blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e956d55a10ef8a9d0c043a3e8728f2bf2bc2c2b1
|
e82a624f09091868fb4a6a9e7e8654e9f7c06cd3
|
/scripts/calcElectronEnDiss_todo/calcElectronEnDiss.py
|
126cf555b617cfaa10f62ac7e697171bbb0d8df7
|
[] |
no_license
|
mj596/blazarpp
|
57b3a46f738c44e27a8b0a89d0509cd0ecf61132
|
af819f153cb69843988caea3560a78d9d45d9bbe
|
refs/heads/master
| 2020-05-17T22:51:46.214700
| 2014-12-15T15:39:08
| 2014-12-15T15:39:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
import numpy as np
import sys
import math
import matplotlib.pyplot as plt
def read_data( filename ):
x=[]
y=[]
file = open(filename,'r')
for line in file.readlines():
x.append(float(line.split(" ")[0]))
y.append(float(line.split(" ")[1]))
file.close()
return np.array([x,y])
#def getEff( idin, idout ):
#
# ein = read_data( 'Injection_'+str(id) )
# eout = read_data( 'Ngamma_'+str(id) )
# ein[1] *= ein[0]*ein[0]
# eout[1] *= eout[0]*eout[0]
#
# def calcEff( xin, xout ):
# return np.trapz( xout[1] )/np.trapz( xin[1] )
## return np.trapz( xout[1], x=xout[0] )/np.trapz( xin[1], x=xin[0] )
#
# return calcEff( ein, eout )
#
#def plot_eff( ):
# import os
# r=[]
# eff=[]
# files = [f for f in os.listdir('.') if os.path.isfile(f)]
# for file in files:
# line = file.split("_")
# if line[0] == 'Ngamma':
# print line[1], getEff( line[1] )
# r.append( line[1] )
# eff.append( getEff( line[1] ) )
#
# plt.plot(r,eff,'*')
# plt.show()
#
#plot_eff()
#
##id=1
##ein = read_data( 'Injection_'+str(id) )
##eout = read_data( 'Ngamma_'+str(id) )
##ein[1] *= ein[0]*ein[0]
##eout[1] *= eout[0]*eout[0]
##plt.loglog(ein[0],ein[1])
##plt.loglog(eout[0],eout[1])
##plt.show()
|
[
"janiak.mateusz@gmail.com"
] |
janiak.mateusz@gmail.com
|
714f1d9dbb8bba30c389fbb980b5f7c47ddc0745
|
642ea5b46ef6796d8c965471862dcd0743b55c75
|
/DataValidation/myapp/migrations/0006_auto_20180310_1636.py
|
b6d59b663961f40c57ec6edb300b949e425d5472
|
[] |
no_license
|
TanjillaTina/OnlineEventRegistration
|
1ada50fbc15f3eaed192ffccb9941c30e14fff31
|
1965f9a5702185c7da5bfd82d124718fa4d02a22
|
refs/heads/master
| 2021-04-06T13:11:58.961533
| 2018-03-14T13:27:32
| 2018-03-14T13:27:32
| 124,711,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# Generated by Django 2.0.2 on 2018-03-11 00:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0005_auto_20180310_1547'),
]
operations = [
migrations.AlterField(
model_name='registrationinfo',
name='roll_num',
field=models.IntegerField(max_length=40, primary_key=True, serialize=False, unique=True),
),
]
|
[
"tanjilla.tina@gmail.com"
] |
tanjilla.tina@gmail.com
|
2cd8151b1c207d57fc29dedffec25f26e40e6006
|
14a61dcb8a2f0d64b9712806d0fb2563f98d9dad
|
/data_preprocess/dump_query2file.py
|
bd77ec9d659071cbf49ee006b7decebc3e364dfa
|
[] |
no_license
|
wang9702/Bert_MRC_NER
|
418c380a68dc3a6f9457157267967120b5bc0815
|
e4644f914ea5c6ebec433ab0f6313f46ef97f02a
|
refs/heads/master
| 2023-06-14T03:09:50.899856
| 2021-07-04T05:52:13
| 2021-07-04T05:52:13
| 382,774,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,285
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Xiaoy LI
# description:
#
import os
import json
msra = {
"default": {
"NR": "人名和虚构的人物形象",
"NS": "按照地理位置划分的国家,城市,乡镇,大洲",
"NT": "组织包括公司,政府党派,学校,政府,新闻机构"
},
"labels": [
"NS",
"NR",
"NT"
]
}
ace2005 = {
"default": {
"FAC": "facility entities are limited to buildings and other permanent man-made structures such as buildings, airports, highways, bridges.",
"GPE": "geographical political entities are geographical regions defined by political and or social groups such as countries, nations, regions, cities, states, government and its people. ",
"LOC": "location entities are limited to geographical entities such as geographical areas and landmasses, mountains, bodies of water, and geological formations.",
"ORG": "organization entities are limited to companies, corporations, agencies, institutions and other groups of people.",
"PER": "a person entity is limited to human including a single individual or a group.",
"VEH": "vehicle entities are physical devices primarily designed to move, carry, pull or push the transported object such as helicopters, trains, ship and motorcycles.",
"WEA": "weapon entities are limited to physical devices such as instruments for physically harming such as guns, arms and gunpowder."
},
"labels": [
"GPE",
"ORG",
"PER",
"FAC",
"VEH",
"LOC",
"WEA"
]
}
ace04 = {
"default": {
"FAC": "facility entities are limited to buildings and other permanent man-made structures such as buildings, airports, highways, bridges.",
"GPE": "geographical political entities are geographical regions defined by political and or social groups such as countries, nations, regions, cities, states, government and its people. ",
"LOC": "location entities are limited to geographical entities such as geographical areas and landmasses, mountains, bodies of water, and geological formations.",
"ORG": "organization entities are limited to companies, corporations, agencies, institutions and other groups of people.",
"PER": "a person entity is limited to human including a single individual or a group.",
"VEH": "vehicle entities are physical devices primarily designed to move, carry, pull or push the transported object such as helicopters, trains, ship and motorcycles.",
"WEA": "weapon entities are limited to physical devices such as instruments for physically harming such as guns, arms and gunpowder."
},
"labels": [
"GPE",
"ORG",
"PER",
"FAC",
"VEH",
"LOC",
"WEA"
]
}
zh_ontonotes4 = {
"default": {
"GPE": "按照国家,城市,州县划分的地理区域",
"LOC": "山脉,河流自然景观的地点",
"ORG": "组织包括公司,政府党派,学校,政府,新闻机构",
"PER": "人名和虚构的人物形象"
},
"labels": [
"LOC",
"PER",
"GPE",
"ORG"
]
}
ccks_task01 = {
"default": {
"dis": "疾病或综合症,中毒或受伤,器官或细胞受损",
"sym": "临床表现,病人在生病时的表现,例如:呼吸困难、阵发性喘憋,",
"pro": "检查或者治疗的过程",
"equ": "治疗过程中使用的设备",
"dru": "治疗疾病的医用药物",
"ite": "医学检验项目,例如:B超、渗透压、肾溶质负荷",
"bod": "身体的某一个部位,例如:脾、肝、胃、肠",
"dep": "医院的各职能科室,例如:内科、外科、儿科、妇科、眼科、耳鼻喉科、口腔科",
"mic": "微生物类,例如:大肠杆菌、寄生虫"
},
"labels": [
"dis",
"sym",
"pro",
"equ",
"dru",
"ite",
"bod",
"dep",
"mic"
]
}
if __name__ == "__main__":
repo_path = "/".join(os.path.realpath(__file__).split("/")[:-2])
with open(os.path.join(repo_path, "../data_preprocess/queries/zh_msra.json"), "w") as f:
json.dump(msra, f, sort_keys=True, indent=2, ensure_ascii=False)
with open(os.path.join(repo_path, "../data_preprocess/queries/zh_ontonotes4.json"), "w") as f:
json.dump(zh_ontonotes4, f, sort_keys=True, indent=2, ensure_ascii=False)
|
[
"862741851@qq.com"
] |
862741851@qq.com
|
a7535a3b8f5a5d17e638eec987a396de31a54613
|
27331020ff20d00f4e875f6a20ce0ca1c906da43
|
/Portpolio/Dog_Cat/Model_mobilenet/renewal/util.py
|
80c8a5f29d2770318e2baed63ce22103a91d2f74
|
[] |
no_license
|
MOOSUNGPARK/source
|
2ac7f24766c953cfafc632eb2812f713aa708944
|
50b2d169d5c64778d818decd564045d7f8e6c304
|
refs/heads/master
| 2021-01-20T14:29:02.015764
| 2019-01-09T06:37:35
| 2019-01-09T06:37:35
| 90,616,630
| 1
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,792
|
py
|
import numpy as np
from PIL import ImageGrab
import Portpolio.Dog_Cat.Model_mobilenet.renewal.config as cfg
# http://leechanho.tistory.com/16
# class CSV_reader():
# def __init__(self, sess):
# self.sess = sess
# self.batch_size = cfg.BATCH_SIZE
#
#
# def read_batch_data(self, file_list):
# filename_queue = tf.train.string_input_producer(file_list, shuffle=False, name='filename_queue')
# reader = tf.TextLineReader()
# _, value = reader.read(filename_queue)
# record_defaults = [[0.] for _ in range(126 * 126)] + [[0]] # X: 126 * 126 -> tf.float / Y: 1 -> tf.int
# xy = tf.decode_csv(value, record_defaults=record_defaults)
# x_batch, y_batch = tf.train.batch([xy[0:-1], xy[-1:]], batch_size=self.batch_size)
#
# return x_batch, y_batch
def data_setting(data):
total_size = len(data)
x = (np.array(data[:,0:-1]) / 255).tolist()
targets = data[:,-1].astype(np.int32)
y = np.zeros((total_size, cfg.LABEL_CNT))
y[np.arange(total_size), targets] = 1
return x,y, total_size
def read_data(*filename):
temp = []
for file in filename:
temp.append(np.loadtxt(file, delimiter=','))
data = np.concatenate(temp, axis=0)
# np.random.shuffle(data)
return data_setting(data)
# def monitor_train_cost(mon_epoch_list, mon_value_list, mon_color_list, mon_label_list):
# for cost, color, label in zip(mon_value_list, mon_color_list[0:len(mon_label_list)], mon_label_list):
# plt.plot(mon_epoch_list, cost, c=color, lw=2, ls='--', marker='o', label=label)
# plt.title('Mobilenet on Dog_Cat')
# plt.legend(loc=1)
# plt.xlabel('Epoch')
# plt.ylabel('Value')
# plt.grid(True)
def image_screenshot():
im = ImageGrab.grab()
im.show()
|
[
"0911godqhr!"
] |
0911godqhr!
|
932542486756cd7d77a5296f2a32f02be8588538
|
3d6de3a8d1d0d8a117b6acc60ea495760eeade2a
|
/202-SP-Project1c/Token.py
|
34bb2cfb5f345695fe2538f2d78ef3155751e808
|
[] |
no_license
|
inddoni/ssu-system-programming
|
59c6c6ed6c327f183f920fc2be33447ff6527adf
|
bd2e9ab9b5c8916ab5380c6d86692d3168e66b16
|
refs/heads/master
| 2022-10-11T00:04:04.618521
| 2020-06-08T14:49:23
| 2020-06-08T14:49:23
| 257,866,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,310
|
py
|
from nltk.tokenize.regexp import RegexpTokenizer
from InstTable import *
from LiteralTable import *
from SymbolTable import *
from TokenTable import *
'''
* 각 라인별로 저장된 코드를 단어 단위로 분할한 후 의미를 해석하는 데에 사용되는 변수와 연산을 정의한다.
* 의미 해석이 끝나면 pass2에서 object code로 변형되었을 때의 바이트 코드 역시 저장한다.
'''
class Token:
"""
* 클래스를 초기화 하면서 바로 line의 의미 분석을 수행한다.
* @param line 문장단위로 저장된 프로그램 코드
"""
def __init__(self):
# 의미 분석 단계에서 사용되는 변수들 initialize
self.location = 0
self.label = ""
self.operator = ""
self.operand = [] # String type list
# self.operand.append("") # operand[0] 값 "" 으로 초기화
self.comment = ""
self.nixbpe = 0
# object code 생성 단계에서 사용되는 변수들
self.objectCode = ""
self.byteSize = 0
MAX_OPERAND = 3
# line의 실질적인 분석을 수행하는 함수. Token의 각 변수에 분석한 결과를 저장한다.
# @param line 문장단위로 저장된 프로그램 코드.
def parsing(self, line):
line = line[:-1] # 문장 맨 뒤 '\n' 자르기
# 매개로 들어온 line을 tab 단위로 잘라 tokens list에 저장
tokenizer = RegexpTokenizer("\t", gaps=True)
tokens = tokenizer.tokenize(line)
count = 0
for token in tokens:
count += 1
if count == 1:
self.label = token
elif count == 2:
self.operator = token
elif count == 3:
opnd = token
tokenizer = RegexpTokenizer(",", gaps=True)
opnds = tokenizer.tokenize(opnd)
i = 0
for op in opnds:
self.operand.append(op)
i += 1
elif count == 4:
self.comment = token
else:
print("[TokenTable.py] parsing() error")
def setLocation(self, loc):
self.location = loc
def setNixbpe(self, nixbpe):
self.nixbpe = nixbpe
def setByteSize(self, num):
self.byteSize = num
def setObjectCode(self, str):
self.objectCode = str
'''
* n,i,x,b,p,e flag를 설정한다.
*
* 사용 예 : setFlag(nFlag, 1);
* 또는 setFlag(TokenTable.nFlag, 1);
*
* @param flag : 원하는 비트 위치
* @param value : 집어넣고자 하는 값. 1또는 0으로 선언한다.
'''
def setFlag(self, flag, value):
calc = flag * value;
self.nixbpe += calc;
'''
* 원하는 flag들의 값을 얻어올 수 있다. flag의 조합을 통해 동시에 여러개의 플래그를 얻는 것 역시 가능하다
*
* 사용 예 : getFlag(nFlag)
* 또는 getFlag(nFlag|iFlag)
*
* @param flags : 값을 확인하고자 하는 비트 위치
* @return : 비트위치에 들어가 있는 값. 플래그별로 각각 32, 16, 8, 4, 2, 1의 값을 리턴할 것임.
'''
def getFlag(self, flags):
return self.nixbpe;
|
[
"choinj97@gmail.com"
] |
choinj97@gmail.com
|
e28f1411b33d5b0a6a08aaec7c2d11dd51c282f9
|
28547f2d0f833fd35d98e93467b4b9396f26586d
|
/tickets_app/models.py
|
51935632de34b26325a65f897ccd3d24545f0de2
|
[] |
no_license
|
MariamKipshidze/tickets
|
a46beaa33ea144c0913e39d1c89f3a7ef3dc8159
|
94e33a09cd69d13840c7c30e7f008c5bc6913934
|
refs/heads/master
| 2023-04-19T08:59:43.507926
| 2023-04-07T08:05:52
| 2023-04-07T08:05:52
| 340,442,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from users.models import User
class Ticket(models.Model):
name = models.CharField(max_length=100, verbose_name=_("Name"))
price = models.DecimalField(max_digits=4, decimal_places=2, verbose_name=_('Price'))
start_date = models.DateTimeField(verbose_name=_("Start Date"))
end_date = models.DateTimeField(verbose_name=_("End date"))
barcode = models.PositiveSmallIntegerField(verbose_name=_("Barcode"), unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Ticket')
verbose_name_plural = _('tickets')
class Order(models.Model):
user = models.ForeignKey(User, verbose_name=_("User"), on_delete=models.CASCADE, related_name="order")
ticket = models.ForeignKey(Ticket, verbose_name=("Ticket"), on_delete=models.CASCADE, related_name="tk_order")
def __str__(self):
return self.user.email
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
|
[
"mari.kifshidze@gmail.com"
] |
mari.kifshidze@gmail.com
|
11df941f9d8db32499f8f06857751bf83cc1b6d9
|
04f826abb0b6a030c4a80d452f46dc171f35c26d
|
/test/textday/days8/udp发送数据、接收数据.py
|
c27b649db16bcfb2591ffcbc3e899eb9e2dfe342
|
[] |
no_license
|
tClown11/Python-Student
|
dd62ca19ede0b70b13d86c60a52c75738d56b0c2
|
93bb2786213caae923aa48bcb8c558d331e66fbf
|
refs/heads/master
| 2022-04-30T14:21:13.250666
| 2018-06-24T14:09:52
| 2018-06-24T14:09:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
#coding=utf-8
from socket import *
#1.创建套接字
udpSocket = socket(AF_INET, SOCK_DGRAM)
#2.准备接收方地址
sendAddr = ('192.168.1.103', 8080)
#3.从键盘获取数据
sendData = raw_input("请输入要发送的数据:")
#4.发送数据到指定电脑上
udpSocket.sendto(sendData, sendAddr)
#5.等待接收对方发送的数据
recvData = udpSocket.recvfrom(1024)#1024表示本次接收的最大字节数
#6.显示对方发送的数据
print(recvData)
#7.关闭套接字
udpSocket.close()
|
[
"tj1211keynote@outlook.com"
] |
tj1211keynote@outlook.com
|
8aabcf0d0b33a8d4d0da2bce1fa75f9cd75ab0a2
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py
|
dc03cf4938109b681f39653d014c2577193656c3
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageTag(Model):
"""An entity observation in the image, along with the confidence score.
:param name: Name of the entity.
:type name: str
:param confidence: The level of confidence that the entity was observed.
:type confidence: float
:param hint: Optional hint/details for this tag.
:type hint: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'confidence': {'key': 'confidence', 'type': 'float'},
'hint': {'key': 'hint', 'type': 'str'},
}
def __init__(self, *, name: str=None, confidence: float=None, hint: str=None, **kwargs) -> None:
super(ImageTag, self).__init__(**kwargs)
self.name = name
self.confidence = confidence
self.hint = hint
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
ca9b40628be215ed9f3590a8f28f4987bf1726d2
|
6a903006ceb7e879a1033e077f8c8020fe9dba30
|
/Back/BuildderBack/users/models.py
|
b049a94d93c3083b47c777ad0ff7613b941b744b
|
[] |
no_license
|
daniel94lad/P-Buildder-V01
|
4bab6ff4eb13a8200a4af8ba10ec3c87690991f4
|
93809c5efc9100c9d914940cf008465b03c2b404
|
refs/heads/master
| 2022-09-08T00:53:52.624313
| 2020-06-02T21:53:15
| 2020-06-02T21:53:15
| 268,914,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
from __future__ import unicode_literals
from django.db import models
from django.urls import reverse
# Create your models here.
def upload_location(instance, filename):
return "%s/%s" %(instance.user_id,filename)
class B_User(models.Model):
user_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length= 120)
last_name = models.CharField(max_length=120)
email = models.EmailField(max_length=254)
password = models.CharField(max_length=50,null=False)
photo = models.ImageField(upload_to=upload_location ,null=True, blank=True, width_field="width_field" ,height_field="height_field")
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
joined = models.DateTimeField(auto_now=False, auto_now_add=True)
# facebook_link = models.URLField(max_length=200)
# twitter_link = models.URLField(max_length=200)
# linkedin_link = models.URLField(max_length=200)
# Projects_active = pending
# Posts= pending
# Comments = pending
# P_Creator = models.BooleanField(default=False)
# P_Collaborator = models.BooleanField(default=False)
# P_Investor = models.BooleanField(default=False)
def __unicode__(self):
return self.first_name
def __str__(self):
return self.first_name
def get_absolute_url(self):
return reverse("users:detail",kwargs={"user_id":self.user_id})
class Meta:
ordering=["joined"]
|
[
"daniel94lad@gmail.com"
] |
daniel94lad@gmail.com
|
21fd1eeefbeadb84c6cf879bce44a1567770bee2
|
73788c28a6c9742f0e7b4ee99ac4a7f854f40611
|
/scripts/climodat/check_database.py
|
83598a87e903898aa9fe779a2b63be56bedd4d0c
|
[] |
no_license
|
nbackas/iem
|
b8c7a356c68865a66b808962e1f09460b74df73f
|
d22e6d7b1b94db3bb081fb08619f83fb5b6784b7
|
refs/heads/master
| 2020-12-31T03:04:22.144156
| 2016-03-21T19:09:16
| 2016-03-21T19:09:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
"""
Check over the database and make sure we have what we need there to
make the climodat reports happy...
"""
import sys
import mx.DateTime
from pyiem.network import Table as NetworkTable
import constants
import psycopg2
state = sys.argv[1]
nt = NetworkTable("%sCLIMATE" % (state,))
COOP = psycopg2.connect(database='coop', host='iemdb')
ccursor = COOP.cursor()
today = mx.DateTime.now()
def fix_year(station, year):
sts = mx.DateTime.DateTime(year, 1, 1)
ets = mx.DateTime.DateTime(year+1, 1, 1)
interval = mx.DateTime.RelativeDateTime(days=1)
now = sts
while now < ets:
ccursor.execute("""SELECT count(*) from alldata_%s where
station = '%s' and day = '%s' """ % (state, station,
now.strftime("%Y-%m-%d")))
row = ccursor.fetchone()
if row[0] == 0:
print 'Adding Date: %s station: %s' % (now, station)
ccursor.execute("""INSERT into alldata_%s (station, day, sday,
year, month) VALUES ('%s', '%s', '%s', %s, %s)
""" % (state, station, now.strftime("%Y-%m-%d"),
now.strftime("%m%d"), now.year, now.month))
now += interval
for station in nt.sts.keys():
sts = mx.DateTime.DateTime(constants.startyear(station), 1, 1)
ets = constants._ENDTS
# Check for obs total
now = sts
interval = mx.DateTime.RelativeDateTime(years=1)
while now < (ets - interval):
days = int(((now + interval) - now).days)
ccursor.execute("""SELECT count(*) from alldata_%s WHERE
year = %s and station = '%s'""" % (state, now.year, station))
row = ccursor.fetchone()
if row[0] != days:
print ('Mismatch station: %s year: %s count: %s days: %s'
'') % (station, now.year, row[0], days)
fix_year(station, now.year)
now += interval
# Check records database...
sts = mx.DateTime.DateTime(2000, 1, 1)
ets = mx.DateTime.DateTime(2001, 1, 1)
interval = mx.DateTime.RelativeDateTime(days=1)
for table in ['climate', 'climate51', 'climate71', 'climate81']:
ccursor.execute("""SELECT count(*) from %s WHERE
station = '%s'""" % (table, station))
row = ccursor.fetchone()
if row[0] == 366:
continue
now = sts
while now < ets:
ccursor.execute("""SELECT * from %s WHERE station = '%s'
and day = '%s'""" % (table, station, now.strftime("%Y-%m-%d")))
if ccursor.rowcount == 0:
print "Add %s station: %s day: %s" % (table, station,
now.strftime("%Y-%m-%d"))
ccursor.execute("""
INSERT into %s (station, valid) values ('%s', '%s')
""" % (table, station, now.strftime("%Y-%m-%d")))
now += interval
ccursor.close()
COOP.commit()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
8600bf75d5692cb6437a34cfea048b13dcde0c7f
|
b4b9de175facf26ac29080c1b97db45754fb585c
|
/b3_custom_edition/b3/plugins/xlrstats/__init__.py
|
5c59465b5a8f3be57ae4e6df6282111ee496c734
|
[] |
no_license
|
Gimhan-minion/cod4
|
9ac592a4a74a819b29e584e8597f8a65a8bd8fac
|
88ce6a10db3ad009c04e69c1ec58cedaddc1b2e9
|
refs/heads/master
| 2022-05-23T00:30:44.086430
| 2020-04-24T10:41:00
| 2020-04-24T10:41:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117,043
|
py
|
#
# XLRstats plugin for BigBrotherBot (B3) (www.bigbrotherbot.net)
# (c) 2004 - 2005 Tim ter Laak (ttlogic@xlr8or.com)
# (c) 2005 - 2014 Mark Weirath (xlr8or@xlr8or.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = 'xlr8or & ttlogic'
__version__ = '3.0.0-beta.17'
import b3
import b3.events
import b3.plugin
import b3.cron
import b3.timezones
import datetime
import time
import os
import re
import thread
import threading
import urllib2
from b3.functions import escape
from b3.functions import getCmd
from b3.functions import right_cut
from ConfigParser import NoOptionError
KILLER = "killer"
VICTIM = "victim"
ASSISTER = "assister"
########################################################################################################################
# #
# MAIN PLUGIN XLRSTATS - HANDLES ALL CORE STATISTICS FUNCTIONALITY #
# #
########################################################################################################################
class XlrstatsPlugin(b3.plugin.Plugin):
_world_clientid = None
_ffa = ['dm', 'ffa', 'syc-ffa']
# on damage_able_games we'll only count assists when damage is 50 points or more
_damage_able_games = ['cod4', 'cod5', 'cod6', 'cod7', 'cod8']
_damage_ability = False
hide_bots = True # set client.hide to True so bots are hidden from the stats
exclude_bots = True # kills and damage to and from bots do not affect playerskill
# history management
_cronTabWeek = None
_cronTabMonth = None
_cronTabKillBonus = None
# webfront variables
webfront_version = 2 # maintain backward compatibility
webfront_url = ''
webfront_config_nr = 0
_minKills = 100
_minRounds = 10
_maxDays = 14
# config variables
defaultskill = 1000
minlevel = 0
onemaponly = False
Kfactor_high = 16
Kfactor_low = 4
Kswitch_confrontations = 50
steepness = 600
suicide_penalty_percent = 0.05
tk_penalty_percent = 0.1
action_bonus = 1.0
kill_bonus = 1.5
assist_bonus = 0.5
assist_timespan = 2 # on non damage based games: damage before death timespan
damage_assist_release = 10 # on damage based games: release the assist (will overwrite self.assist_timespan on startup)
prematch_maxtime = 70
announce = False # announces points gained/lost to players after confrontations
keep_history = True
keep_time = True
min_players = 2 # minimum number of players to collect stats
_xlrstats_active = False # parsing events based on min_players?
_current_nr_players = 0 # current number of players present
silent = False # Disables the announcement when collecting stats = stealth mode
provisional_ranking = True # First Kswitch_confrontations will not alter opponents stats (unless both are under the limit)
auto_correct = True # Auto correct skill points every two hours to maintain a healthy pool
_auto_correct_ignore_days = 60 # How many days before ignoring a players skill in the auto-correct calculation
auto_purge = False # Purge players and associated data automatically (cannot be undone!)
_purge_player_days = 365 # Number of days after which players will be auto-purged
# keep some private map data to detect prematches and restarts
_last_map = None
_last_roundtime = None
# names for various stats tables
playerstats_table = 'xlr_playerstats'
weaponstats_table = 'xlr_weaponstats'
weaponusage_table = 'xlr_weaponusage'
bodyparts_table = 'xlr_bodyparts'
playerbody_table = 'xlr_playerbody'
opponents_table = 'xlr_opponents'
mapstats_table = 'xlr_mapstats'
playermaps_table = 'xlr_playermaps'
actionstats_table = 'xlr_actionstats'
playeractions_table = 'xlr_playeractions'
clients_table = 'clients'
penalties_table = 'penalties'
# default tablenames for the history subplugin
history_monthly_table = 'xlr_history_monthly'
history_weekly_table = 'xlr_history_weekly'
# default table name for the ctime subplugin
ctime_table = 'ctime'
# default tablenames for the Battlestats subplugin
# battlestats_table = 'xlr_battlestats'
# playerbattles_table = 'xlr_playerbattles'
_defaultTableNames = True
_default_messages = {
'cmd_xlrstats': '^3XLR Stats: ^7$name ^7: K ^2$kills ^7D ^3$deaths ^7TK ^1$teamkills ^7Ratio ^5$ratio ^7Skill ^3$skill',
'cmd_xlr': '^3XLR Stats: ^7$name ^7: K ^2$kills ^7D ^3$deaths ^7TK ^1$teamkills ^7Ratio ^5$ratio ^7Skill ^3$skill',
'cmd_xlrtopstats': '^3# $number: ^7$name ^7: Skill ^3$skill ^7Ratio ^5$ratio ^7Kills: ^2$kills',
}
####################################################################################################################
# #
# STARTUP #
# #
####################################################################################################################
def __init__(self, console, config=None):
"""
Object constructor.
:param console: The console instance
:param config: The plugin configuration
"""
self._adminPlugin = None # admin plugin object reference
self._xlrstatsHistoryPlugin = None
self._ctimePlugin = None
self._xlrstatstables = [] # will contain a list of the xlrstats database tables
self._cronTabCorrectStats = None
self.query = None # shortcut to the storage.query function
b3.plugin.Plugin.__init__(self, console, config)
def onStartup(self):
"""
Initialize plugin.
"""
# get the admin plugin so we can register commands
self._adminPlugin = self.console.getPlugin('admin')
# build database schema if needed
self.build_database_schema()
# register our commands
if 'commands' in self.config.sections():
for cmd in self.config.options('commands'):
level = self.config.get('commands', cmd)
sp = cmd.split('-')
alias = None
if len(sp) == 2:
cmd, alias = sp
func = getCmd(self, cmd)
if func:
self._adminPlugin.registerCommand(self, cmd, level, func, alias)
# define a shortcut to the storage.query function
self.query = self.console.storage.query
# initialize tablenames
PlayerStats._table = self.playerstats_table
WeaponStats._table = self.weaponstats_table
WeaponUsage._table = self.weaponusage_table
Bodyparts._table = self.bodyparts_table
PlayerBody._table = self.playerbody_table
Opponents._table = self.opponents_table
MapStats._table = self.mapstats_table
PlayerMaps._table = self.playermaps_table
ActionStats._table = self.actionstats_table
PlayerActions._table = self.playeractions_table
# register the events we're interested in.
self.registerEvent('EVT_CLIENT_JOIN', self.onJoin)
self.registerEvent('EVT_CLIENT_KILL', self.onKill)
self.registerEvent('EVT_CLIENT_KILL_TEAM', self.onTeamKill)
self.registerEvent('EVT_CLIENT_SUICIDE', self.onSuicide)
self.registerEvent('EVT_GAME_ROUND_START', self.onRoundStart)
self.registerEvent('EVT_CLIENT_ACTION', self.onAction) # for game-events/actions
self.registerEvent('EVT_CLIENT_DAMAGE', self.onDamage) # for assist recognition
# get the Client.id for the bot itself (guid: WORLD or Server(bfbc2/moh/hf))
sclient = self.console.clients.getByGUID("WORLD")
if sclient is None:
sclient = self.console.clients.getByGUID("Server")
if sclient is not None:
self._world_clientid = sclient.id
self.debug('got client id for B3: %s; %s' % (self._world_clientid, sclient.name))
# make sure its hidden in the webfront
player = self.get_PlayerStats(sclient)
if player:
player.hide = 1
self.save_Stat(player)
# determine the ability to work with damage based assists
if self.console.gameName in self._damage_able_games:
self.assist_timespan = self.damage_assist_release
self._damage_ability = True
# investigate if we can and want to keep a history
self._xlrstatstables = [self.playerstats_table, self.weaponstats_table, self.weaponusage_table,
self.bodyparts_table, self.playerbody_table, self.opponents_table, self.mapstats_table,
self.playermaps_table, self.actionstats_table, self.playeractions_table]
if self.keep_history:
self._xlrstatstables = [self.playerstats_table, self.weaponstats_table, self.weaponusage_table,
self.bodyparts_table, self.playerbody_table, self.opponents_table,
self.mapstats_table, self.playermaps_table, self.actionstats_table,
self.playeractions_table, self.history_monthly_table, self.history_weekly_table]
self.verbose('starting subplugin XLRstats History')
self._xlrstatsHistoryPlugin = XlrstatshistoryPlugin(self.console, self.history_weekly_table,
self.history_monthly_table, self.playerstats_table)
self._xlrstatsHistoryPlugin.onStartup()
# let's try and get some variables from our webfront installation
if self.webfront_url and self.webfront_url != '':
self.debug('webfront set to: %s' % self.webfront_url)
thread1 = threading.Thread(target=self.getWebsiteVariables)
thread1.start()
else:
self.debug('no webfront url available: using default')
# Analyze the ELO pool of points
self.correctStats()
self._cronTabCorrectStats = b3.cron.PluginCronTab(self, self.correctStats, 0, '0', '*/2')
self.console.cron + self._cronTabCorrectStats
self.purgePlayers()
# set proper kill_bonus and crontab
self.calculateKillBonus()
self._cronTabKillBonus = b3.cron.PluginCronTab(self, self.calculateKillBonus, 0, '*/10')
self.console.cron + self._cronTabKillBonus
# start the ctime subplugin
if self.keep_time:
self._ctimePlugin = CtimePlugin(self.console, self.ctime_table)
self._ctimePlugin.onStartup()
#start the xlrstats controller
#p = XlrstatscontrollerPlugin(self.console, self.min_players, self.silent)
#p.onStartup()
# get the map we're in, in case this is a new map and we need to create a db record for it.
mapstats = self.get_MapStats(self.console.game.mapName)
if mapstats:
self.verbose('map %s ready' % mapstats.name)
# check number of online players (if available)
self.checkMinPlayers()
self.console.say('XLRstats v%s by %s started' % (__version__, __author__))
# end startup sequence
def onLoadConfig(self):
"""
Load plugin configuration.
"""
def validate_server_nr(x):
"""validate the server number and it it's wrong will leave a message in the log file"""
if x < 0:
raise ValueError("servernumber cannot be lower than 0")
return x
self.provisional_ranking = self.getSetting('settings', 'provisional_ranking', b3.BOOL, self.provisional_ranking)
self.auto_correct = self.getSetting('settings', 'auto_correct', b3.BOOL, self.auto_correct)
self.auto_purge = self.getSetting('settings', 'auto_purge', b3.BOOL, self.auto_purge)
self.silent = self.getSetting('settings', 'silent', b3.BOOL, self.silent)
self.hide_bots = self.getSetting('settings', 'hide_bots', b3.BOOL, self.hide_bots)
self.exclude_bots = self.getSetting('settings', 'exclude_bots', b3.BOOL, self.exclude_bots)
self.min_players = self.getSetting('settings', 'minplayers', b3.INT, self.min_players, lambda x: int(max(x, 0)))
self.webfront_version = self.getSetting('settings', 'webfrontversion', b3.STR, self.webfront_version)
self.webfront_url = self.getSetting('settings', 'webfronturl', b3.STR, self.webfront_url)
self.webfront_config_nr = self.getSetting('settings', 'servernumber', b3.INT, self.webfront_config_nr, validate_server_nr)
self.keep_history = self.getSetting('settings', 'keep_history', b3.BOOL, self.keep_history)
self.onemaponly = self.getSetting('settings', 'onemaponly', b3.BOOL, self.onemaponly)
self.minlevel = self.getSetting('settings', 'minlevel', b3.LEVEL, self.minlevel, lambda x: int(max(x, 0)))
self.defaultskill = self.getSetting('settings', 'defaultskill', b3.INT, self.defaultskill)
self.Kfactor_high = self.getSetting('settings', 'Kfactor_high', b3.INT, self.Kfactor_high)
self.Kfactor_low = self.getSetting('settings', 'Kfactor_low', b3.INT, self.Kfactor_low)
self.Kswitch_confrontations = self.getSetting('settings', 'Kswitch_confrontations', b3.INT, self.Kswitch_confrontations)
self.steepness = self.getSetting('settings', 'steepness', b3.INT, self.steepness)
self.suicide_penalty_percent = self.getSetting('settings', 'suicide_penalty_percent', b3.FLOAT, self.suicide_penalty_percent)
self.tk_penalty_percent = self.getSetting('settings', 'tk_penalty_percent', b3.FLOAT, self.tk_penalty_percent)
self.assist_timespan = self.getSetting('settings', 'assist_timespan', b3.INT, self.assist_timespan)
self.damage_assist_release = self.getSetting('settings', 'damage_assist_release', b3.INT, self.damage_assist_release)
self.prematch_maxtime = self.getSetting('settings', 'prematch_maxtime', b3.INT, self.prematch_maxtime)
self.announce = self.getSetting('settings', 'announce', b3.BOOL, self.announce)
self.keep_time = self.getSetting('settings', 'keep_time', b3.BOOL, self.keep_time)
# load custom table names
self.load_config_tables()
def build_database_schema(self):
"""
Build the database schema checking if all the needed tables have been properly created.
If not, it will attempt to create them automatically
"""
sql_main = os.path.join(b3.getAbsolutePath('@b3/plugins/xlrstats/sql'), self.console.storage.protocol)
xlr_tables = {x: getattr(self, x) for x in dir(self) if x.endswith('_table')}
current_tables = self.console.storage.getTables()
for k, v in xlr_tables.items():
if v not in current_tables:
sql_name = right_cut(k, '_table') + '.sql'
sql_path = os.path.join(sql_main, sql_name)
if os.path.isfile(sql_path):
try:
with open(sql_path, 'r') as sql_file:
query = self.console.storage.getQueriesFromFile(sql_file)[0]
self.console.storage.query(query % v)
except Exception, e:
self.error("could not create schema for database table '%s': %s", v, e)
else:
self.info('created database table: %s', v)
else:
self.error("could not create schema for database table '%s': missing SQL script '%s'", v, sql_path)
# EXECUTE SCHEMA UPDATE
update_schema = {
'mysql': {
'history_monthly-update-3.0.0.sql': self.history_monthly_table,
'history_weekly-update-3.0.0.sql': self.history_weekly_table,
'playerstats-update-3.0.0.sql': self.playerstats_table,
},
'sqlite': {
'playerstats-update-3.0.0.sql': self.playerstats_table,
},
'postgresql': {
# NO UPDATE NEEDED FOR THE MOMENT
}
}
for k, v in update_schema[self.console.storage.protocol].items():
sql_path = os.path.join(sql_main, k)
if os.path.isfile(sql_path):
with open(sql_path, 'r') as sql_file:
# execute statements separately since we need to substitute the table name
for q in self.console.storage.getQueriesFromFile(sql_file):
try:
self.console.storage.query(q % v)
except Exception:
# DONT LOG HERE!!! (schema might have already changed so executing the update query will
# raise an exception without actually changing the database table structure (which is OK!)
pass
def load_config_tables(self):
"""
Load config section 'tables'
"""
def load_conf(property_to_set, setting_option):
assert hasattr(self, property_to_set)
try:
table_name = self.config.get('tables', setting_option)
if not table_name:
raise ValueError("invalid table name for %s: %r" % (setting_option, table_name))
setattr(self, property_to_set, table_name)
self._defaultTableNames = False
except NoOptionError, err:
self.debug(err)
except Exception, err:
self.error(err)
self.info('using value "%s" for tables::%s' % (property_to_set, setting_option))
load_conf('playerstats_table', 'playerstats')
load_conf('actionstats_table', 'actionstats')
load_conf('weaponstats_table', 'weaponstats')
load_conf('weaponusage_table', 'weaponusage')
load_conf('bodyparts_table', 'bodyparts')
load_conf('playerbody_table', 'playerbody')
load_conf('opponents_table', 'opponents')
load_conf('mapstats_table', 'mapstats')
load_conf('playermaps_table', 'playermaps')
load_conf('playeractions_table', 'playeractions')
load_conf('history_monthly_table', 'history_monthly')
load_conf('history_weekly_table', 'history_weekly')
load_conf('ctime_table', 'ctime')
####################################################################################################################
# #
# EVENTS #
# #
####################################################################################################################
def onJoin(self, event):
"""
Handle EVT_CLIENT_JOIN
"""
self.checkMinPlayers()
self.join(event.client)
def onKill(self, event):
"""
Handle EVT_CLIENT_KILL
"""
if self._xlrstats_active:
self.kill(event.client, event.target, event.data)
def onTeamKill(self, event):
"""
Handle EVT_CLIENT_KILL_TEAM
"""
if self._xlrstats_active:
if self.console.game.gameType in self._ffa:
self.kill(event.client, event.target, event.data)
else:
self.teamkill(event.client, event.target, event.data)
def onDamage(self, event):
"""
Handle EVT_CLIENT_DAMAGE
"""
if self._xlrstats_active:
self.damage(event.client, event.target, event.data)
def onSuicide(self, event):
"""
Handle EVT_CLIENT_SUICIDE
"""
if self._xlrstats_active:
self.suicide(event.client, event.target, event.data)
def onRoundStart(self, _):
"""
Handle EVT_GAME_ROUND_START
"""
# disable k/d counting if minimum players are not met
self.checkMinPlayers(_roundstart=True)
self.roundstart()
def onAction(self, event):
"""
Handle EVT_CLIENT_ACTION
"""
if self._xlrstats_active:
self.action(event.client, event.data)
####################################################################################################################
# #
# OTHER METHODS #
# #
####################################################################################################################
def getWebsiteVariables(self):
"""
Thread that polls for XLRstats webfront variables
"""
if self.webfront_version == 2:
req = str(self.webfront_url.rstrip('/')) + '/?config=' + str(self.webfront_config_nr) + '&func=pluginreq'
else:
req = str(self.webfront_url.rstrip('/')) + '/' + str(self.webfront_config_nr) + '/pluginreq/index'
try:
f = urllib2.urlopen(req)
res = f.readline().split(',')
# Our webfront will present us 3 values ie.: 200,20,30 -> minKills,minRounds,maxDays
if len(res) == 3:
# Force the collected strings to their final type. If an error occurs they will fail the try statement.
self._minKills = int(res[0])
self._minRounds = int(res[1])
self._maxDays = int(res[2])
self.debug('successfuly retrieved webfront variables: minkills: %i, minrounds: %i, maxdays: %i' % (
self._minKills, self._minRounds, self._maxDays))
except Exception:
self.debug('couldn\'t retrieve webfront variables: using defaults')
def checkMinPlayers(self, _roundstart=False):
"""
Checks if minimum amount of players are present.
If minimum amount of players is reached will enable stats collecting
and if not it disables stats counting on next roundstart
"""
self._current_nr_players = len(self.console.clients.getList())
self.debug('checking number of players online: minimum = %s, current = %s', self.min_players, self._current_nr_players)
if self._current_nr_players < self.min_players and self._xlrstats_active and _roundstart:
self.info('XLRstats disabled: not enough players online')
if not self.silent:
self.console.say('XLRstats disabled: not enough players online!')
self._xlrstats_active = False
elif self._current_nr_players >= self.min_players and not self._xlrstats_active:
self.info('XLRstats enabled: collecting Stats')
if not self.silent:
self.console.say('XLRstats enabled: now collecting stats!')
self._xlrstats_active = True
else:
if self._xlrstats_active:
_status = 'enabled'
else:
_status = 'disabled'
self.debug('nothing to do at the moment: XLRstats is already %s', _status)
def win_prob(self, player_skill, opponent_skill):
return 1 / (10 ** ((opponent_skill - player_skill) / self.steepness) + 1)
def get_PlayerStats(self, client=None):
"""
Retrieves an existing stats record for given client or makes a new one IFF client's level is high enough
Otherwise (also on error), it returns None.
"""
if client is None:
client_id = self._world_clientid
else:
client_id = client.id
q = """SELECT * from %s WHERE client_id = %s LIMIT 1""" % (self.playerstats_table, client_id)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s = PlayerStats()
s.id = r['id']
s.client_id = r['client_id']
s.kills = r['kills']
if (s.kills + s.deaths) > self.Kswitch_confrontations:
s.Kfactor = self.Kfactor_low
else:
s.Kfactor = self.Kfactor_high
s.deaths = r['deaths']
s.teamkills = r['teamkills']
s.teamdeaths = r['teamdeaths']
s.suicides = r['suicides']
s.ratio = r['ratio']
s.skill = r['skill']
s.assists = r['assists']
s.assistskill = r['assistskill']
s.curstreak = r['curstreak']
s.winstreak = r['winstreak']
s.losestreak = r['losestreak']
s.rounds = r['rounds']
s.hide = r['hide']
s.fixed_name = r['fixed_name']
s.id_token = r['id_token']
return s
elif (client is None) or (client.maxLevel >= self.minlevel):
s = PlayerStats()
s._new = True
s.skill = self.defaultskill
s.Kfactor = self.Kfactor_high
s.client_id = client_id
return s
else:
return None
def get_PlayerAnon(self):
return self.get_PlayerStats(None)
def get_WeaponStats(self, name):
s = WeaponStats()
q = """SELECT * from %s WHERE name = '%s' LIMIT 1""" % (self.weaponstats_table, name)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.name = r['name']
s.kills = r['kills']
s.suicides = r['suicides']
s.teamkills = r['teamkills']
return s
else:
s._new = True
s.name = name
return s
def get_Bodypart(self, name):
s = Bodyparts()
q = """SELECT * from %s WHERE name = '%s' LIMIT 1""" % (self.bodyparts_table, name)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.name = r['name']
s.kills = r['kills']
s.suicides = r['suicides']
s.teamkills = r['teamkills']
return s
else:
s._new = True
s.name = name
return s
def get_MapStats(self, name):
assert name is not None
s = MapStats()
q = """SELECT * from %s WHERE name = '%s' LIMIT 1""" % (self.mapstats_table, name)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.name = r['name']
s.kills = r['kills']
s.suicides = r['suicides']
s.teamkills = r['teamkills']
s.rounds = r['rounds']
return s
else:
s._new = True
s.name = name
return s
def get_WeaponUsage(self, weaponid, playerid):
s = WeaponUsage()
q = """SELECT * from %s WHERE weapon_id = %s AND player_id = %s LIMIT 1""" % (self.weaponusage_table, weaponid, playerid)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.player_id = r['player_id']
s.weapon_id = r['weapon_id']
s.kills = r['kills']
s.deaths = r['deaths']
s.suicides = r['suicides']
s.teamkills = r['teamkills']
s.teamdeaths = r['teamdeaths']
return s
else:
s._new = True
s.player_id = playerid
s.weapon_id = weaponid
return s
def get_Opponent(self, killerid, targetid):
s = Opponents()
q = """SELECT * from %s WHERE killer_id = %s AND target_id = %s LIMIT 1""" % (self.opponents_table, killerid, targetid)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.killer_id = r['killer_id']
s.target_id = r['target_id']
s.kills = r['kills']
s.retals = r['retals']
return s
else:
s._new = True
s.killer_id = killerid
s.target_id = targetid
return s
def get_PlayerBody(self, playerid, bodypartid):
s = PlayerBody()
q = """SELECT * from %s WHERE bodypart_id = %s AND player_id = %s LIMIT 1""" % (self.playerbody_table, bodypartid, playerid)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.player_id = r['player_id']
s.bodypart_id = r['bodypart_id']
s.kills = r['kills']
s.deaths = r['deaths']
s.suicides = r['suicides']
s.teamkills = r['teamkills']
s.teamdeaths = r['teamdeaths']
return s
else:
s._new = True
s.player_id = playerid
s.bodypart_id = bodypartid
return s
def get_PlayerMaps(self, playerid, mapid):
if not mapid:
self.info('map not recognized: trying to initialise map...')
mapstats = self.get_MapStats(self.console.game.mapName)
if mapstats:
if hasattr(mapstats, '_new'):
self.save_Stat(mapstats)
self.verbose('map %s successfully initialised', mapstats.name)
mapid = mapstats.id
assert mapid is not None, "failed to get mapid from database for %s" % self.console.game.mapName
else:
return None
s = PlayerMaps()
q = """SELECT * from %s WHERE map_id = %s AND player_id = %s LIMIT 1""" % (self.playermaps_table, mapid, playerid)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.player_id = r['player_id']
s.map_id = r['map_id']
s.kills = r['kills']
s.deaths = r['deaths']
s.suicides = r['suicides']
s.teamkills = r['teamkills']
s.teamdeaths = r['teamdeaths']
s.rounds = r['rounds']
return s
else:
s._new = True
s.player_id = playerid
s.map_id = mapid
return s
def get_ActionStats(self, name):
s = ActionStats()
q = """SELECT * from %s WHERE name = '%s' LIMIT 1""" % (self.actionstats_table, name)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.name = r['name']
s.count = r['count']
return s
else:
s._new = True
s.name = name
return s
def get_PlayerActions(self, playerid, actionid):
s = PlayerActions()
q = """SELECT * from %s WHERE action_id = %s AND player_id = %s LIMIT 1""" % (self.playeractions_table, actionid, playerid)
cursor = self.query(q)
if cursor and not cursor.EOF:
r = cursor.getRow()
s.id = r['id']
s.player_id = r['player_id']
s.action_id = r['action_id']
s.count = r['count']
return s
else:
s._new = True
s.player_id = playerid
s.action_id = actionid
return s
def save_Stat(self, stat):
#self.verbose('*----> XLRstats: saving statistics for %s' % type(stat))
#self.verbose('*----> Contents: %s' %stat)
if hasattr(stat, '_new'):
q = stat._insertquery()
#self.debug('Inserting using: %r', q)
cursor = self.query(q)
if cursor.rowcount > 0:
stat.id = cursor.lastrowid
delattr(stat, '_new')
else:
q = stat._updatequery()
#self.debug('Updating using: %r', q)
self.query(q)
#print 'save_Stat: q= ', q
#self.query(q)
# we could not really do anything with error checking on saving.
# If it fails, that's just bad luck.
return
def check_Assists(self, client, target, data, etype=None):
# determine eventual assists // an assist only counts if damage was done within # secs. before death
# it will also punish teammates that have a 'negative' assist!
_count = 0 # number of assists to return
_sum = 0 # sum of assistskill returned
_vsum = 0 # sum of victims skill deduction returned
self.verbose('----> XLRstats: %s killed %s (%s), checking for assists', client.name, target.name, etype)
try:
ainfo = target._attackers
except:
target._attackers = {}
ainfo = target._attackers
for k, v in ainfo.iteritems():
if k == client.cid:
# don't award the killer for the assist aswell
continue
elif time.time() - v < self.assist_timespan:
assister = self.console.clients.getByCID(k)
self.verbose('----> XLRstats: assister = %s', assister.name)
anonymous = None
victimstats = self.get_PlayerStats(target)
assiststats = self.get_PlayerStats(assister)
# if both should be anonymous, we have no work to do
if (assiststats is None) and (victimstats is None):
self.verbose('----> XLRstats: check_Assists: %s & %s both anonymous, continuing', assister.name, target.name)
continue
if victimstats is None:
anonymous = VICTIM
victimstats = self.get_PlayerAnon()
if victimstats is None:
continue
if assiststats is None:
anonymous = ASSISTER
assiststats = self.get_PlayerAnon()
if assiststats is None:
continue
# calculate the win probability for the assister and victim
assist_prob = self.win_prob(assiststats.skill, victimstats.skill)
# performance patch provided by IzNoGod: ELO states that assist_prob + victim_prob = 1
#victim_prob = self.win_prob(victimstats.skill, assiststats.skill)
victim_prob = 1 - assist_prob
self.verbose('----> XLRstats: win probability for %s: %s', assister.name, assist_prob)
self.verbose('----> XLRstats: win probability for %s: %s', target.name, victim_prob)
# get applicable weapon replacement
actualweapon = data[1]
for r in data:
try:
actualweapon = self.config.get('replacements', r)
except:
pass
# get applicable weapon multiplier
try:
weapon_factor = self.config.getfloat('weapons', actualweapon)
except:
weapon_factor = 1.0
# calculate new skill for the assister
if anonymous != ASSISTER:
oldskill = assiststats.skill
if ( target.team == assister.team ) and not ( self.console.game.gameType in self._ffa ):
#assister is a teammate and needs skill and assists reduced
_assistbonus = self.assist_bonus * assiststats.Kfactor * weapon_factor * (0 - assist_prob)
assiststats.skill = float(assiststats.skill) + _assistbonus
assiststats.assistskill = float(assiststats.assistskill) + _assistbonus
assiststats.assists -= 1 # negative assist
self.verbose('----> XLRstats: assistpunishment deducted for %s: %s (oldsk: %.3f - '
'newsk: %.3f)', assister.name, assiststats.skill - oldskill, oldskill, assiststats.skill)
_count += 1
_sum += _assistbonus
if self.announce and not assiststats.hide:
assister.message('^5XLRstats:^7 Teamdamaged (%s) -> skill: ^1%.3f^7 -> ^2%.1f^7',
target.name, assiststats.skill - oldskill, assiststats.skill)
else:
# this is a real assist
_assistbonus = self.assist_bonus * assiststats.Kfactor * weapon_factor * (1 - assist_prob)
assiststats.skill = float(assiststats.skill) + _assistbonus
assiststats.assistskill = float(assiststats.assistskill) + _assistbonus
assiststats.assists += 1
self.verbose('----> XLRstats: assistbonus awarded for %s: %s (oldsk: %.3f - newsk: %.3f)',
assister.name, assiststats.skill - oldskill, oldskill, assiststats.skill)
_count += 1
_sum += _assistbonus
if self.announce and not assiststats.hide:
assister.message('^5XLRstats:^7 Assistbonus (%s) -> skill: ^2+%.3f^7 -> ^2%.1f^7',
target.name, assiststats.skill - oldskill, assiststats.skill)
self.save_Stat(assiststats)
# calculate new skill for the victim
oldskill = victimstats.skill
if target.team == assister.team and self.console.game.gameType not in self._ffa:
# assister was a teammate, this should not affect victims skill.
pass
else:
# this is a real assist
_assistdeduction = self.assist_bonus * victimstats.Kfactor * weapon_factor * (0 - victim_prob)
victimstats.skill = float(victimstats.skill) + _assistdeduction
self.verbose('----> XLRstats: assist skilldeduction for %s: %s (oldsk: %.3f - newsk: %.3f)',
target.name, victimstats.skill - oldskill, oldskill, victimstats.skill)
_vsum += _assistdeduction
self.save_Stat(victimstats)
# end of assist reward function, return the number of assists
return _count, _sum, _vsum
def kill(self, client, target, data):
"""
Handle situations where client killed target.
"""
if (client is None) or (client.id == self._world_clientid):
return
if target is None:
return
if data is None:
return
# exclude botkills?
if (client.bot or target.bot) and self.exclude_bots:
self.verbose('bot involved: do not process!')
return
_assists_count, _assists_sum, _victim_sum = self.check_Assists(client, target, data, 'kill')
_both_provisional = False
anonymous = None
killerstats = self.get_PlayerStats(client)
victimstats = self.get_PlayerStats(target)
# if both should be anonymous, we have no work to do
if (killerstats is None) and (victimstats is None):
return
if killerstats is None:
anonymous = KILLER
killerstats = self.get_PlayerAnon()
if killerstats is None:
return
killerstats.skill = self.defaultskill
if victimstats is None:
anonymous = VICTIM
victimstats = self.get_PlayerAnon()
if victimstats is None:
return
#_killer_confrontations = killerstats.kills + killerstats.deaths
#_victom_confrontations = victimstats.kills + victimstats.deaths
# calculate winning probabilities for both players
killer_prob = self.win_prob(killerstats.skill, victimstats.skill)
# performance patch provided by IzNoGod: ELO states that killer_prob + victim_prob = 1
# victim_prob = self.win_prob(victimstats.skill, killerstats.skill)
victim_prob = 1 - killer_prob
# get applicable weapon replacement
actualweapon = data[1]
for r in data:
try:
actualweapon = self.config.get('replacements', r)
except:
pass
# get applicable weapon multiplier
try:
weapon_factor = self.config.getfloat('weapons', actualweapon)
except:
weapon_factor = 1.0
# calculate new stats for the killer
if anonymous != KILLER:
oldskill = killerstats.skill
# pure skilladdition for a 100% kill
_skilladdition = self.kill_bonus * killerstats.Kfactor * weapon_factor * (1 - killer_prob)
# deduct the assists from the killers skill, but no more than 50%
if _assists_sum == 0:
pass
elif _assists_sum >= ( _skilladdition / 2 ):
_skilladdition /= 2
self.verbose('----> XLRstats: killer: assists > 50perc: %.3f - skilladd: %.3f', _assists_sum, _skilladdition)
else:
_skilladdition -= _assists_sum
self.verbose('----> XLRstats: killer: assists < 50perc: %.3f - skilladd: %.3f', _assists_sum, _skilladdition)
killerstats.skill = float(killerstats.skill) + _skilladdition
self.verbose('----> XLRstats: killer: oldsk: %.3f - newsk: %.3f', oldskill, killerstats.skill)
killerstats.kills = int(killerstats.kills) + 1
if int(killerstats.deaths) != 0:
killerstats.ratio = float(killerstats.kills) / float(killerstats.deaths)
else:
killerstats.ratio = 0.0
if int(killerstats.curstreak) > 0:
killerstats.curstreak = int(killerstats.curstreak) + 1
else:
killerstats.curstreak = 1
if int(killerstats.curstreak) > int(killerstats.winstreak):
killerstats.winstreak = int(killerstats.curstreak)
else:
killerstats.winstreak = int(killerstats.winstreak)
# first check if both players are in provisional ranking state. If true we need to save both players stats.
if (victimstats.kills + victimstats.deaths) < self.Kswitch_confrontations and \
(killerstats.kills + killerstats.deaths) < self.Kswitch_confrontations and \
self.provisional_ranking:
_both_provisional = True
self.verbose('----> XLRstats: both players in provisional ranking state!')
# implementation of provisional ranking 23-2-2014 MWe:
# we use the first Kswitch_confrontations to determine the victims skill,
# we don't adjust the killers skill just yet, unless the victim is anonymous (not participating in xlrstats)
if _both_provisional or (victimstats.kills + victimstats.deaths) > self.Kswitch_confrontations or \
not self.provisional_ranking or anonymous == VICTIM:
if self.announce and not killerstats.hide:
client.message('^5XLRstats:^7 Killed %s -> skill: ^2+%.2f^7 -> ^2%.2f^7',
target.name, (killerstats.skill - oldskill), killerstats.skill)
self.save_Stat(killerstats)
# calculate new stats for the victim
if anonymous != VICTIM:
oldskill = victimstats.skill
# pure skilldeduction for a 100% kill
_skilldeduction = victimstats.Kfactor * weapon_factor * (0 - victim_prob)
# deduct the assists from the victims skill deduction, but no more than 50%
if _victim_sum == 0:
pass
elif _victim_sum <= ( _skilldeduction / 2 ): #carefull, negative numbers here
_skilldeduction /= 2
self.verbose('----> XLRstats: victim: assists > 50perc: %.3f - skilldeduct: %.3f', _victim_sum, _skilldeduction)
else:
_skilldeduction -= _victim_sum
self.verbose('----> XLRstats: victim: assists < 50perc: %.3f - skilldeduct: %.3f', _victim_sum, _skilldeduction)
victimstats.skill = float(victimstats.skill) + _skilldeduction
self.verbose('----> XLRstats: victim: oldsk: %.3f - newsk: %.3f', oldskill, victimstats.skill)
victimstats.deaths = int(victimstats.deaths) + 1
victimstats.ratio = float(victimstats.kills) / float(victimstats.deaths)
if int(victimstats.curstreak) < 0:
victimstats.curstreak = int(victimstats.curstreak) - 1
else:
victimstats.curstreak = -1
if victimstats.curstreak < int(victimstats.losestreak):
victimstats.losestreak = victimstats.curstreak
else:
victimstats.losestreak = int(victimstats.losestreak)
# first check if both players are in provisional ranking state.
# if true we need to save both players stats.
if (victimstats.kills + victimstats.deaths) < self.Kswitch_confrontations and \
(killerstats.kills + killerstats.deaths) < self.Kswitch_confrontations and self.provisional_ranking:
_both_provisional = True
self.verbose('----> XLRstats: both players in provisional ranking state!')
# implementation of provisional ranking 23-2-2014 MWe:
# we use the first Kswitch_confrontations to determine the victims skill,
# we don't adjust the victims skill just yet, unless the killer is anonymous (not participating in xlrstats)
if _both_provisional or (killerstats.kills + killerstats.deaths) > self.Kswitch_confrontations or \
not self.provisional_ranking or anonymous == KILLER:
if self.announce and not victimstats.hide:
target.message('^5XLRstats:^7 Killed by %s -> skill: ^1%.2f^7 -> ^2%.2f^7',
client.name, (victimstats.skill - oldskill), victimstats.skill)
self.save_Stat(victimstats)
# make sure the record for anonymous is really created with an insert once
if anonymous:
if (anonymous == KILLER) and (hasattr(killerstats, '_new')):
self.save_Stat(killerstats)
elif (anonymous == VICTIM) and (hasattr(victimstats, '_new')):
self.save_Stat(victimstats)
# adjust the "opponents" table to register who killed who
opponent = self.get_Opponent(targetid=victimstats.id, killerid=killerstats.id)
retal = self.get_Opponent(targetid=killerstats.id, killerid=victimstats.id)
#the above should always succeed, but you never know...
if opponent and retal:
opponent.kills += 1
retal.retals += 1
self.save_Stat(opponent)
self.save_Stat(retal)
# adjust weapon statistics
weaponstats = self.get_WeaponStats(name=actualweapon)
if weaponstats:
weaponstats.kills += 1
self.save_Stat(weaponstats)
w_usage_killer = self.get_WeaponUsage(playerid=killerstats.id, weaponid=weaponstats.id)
w_usage_victim = self.get_WeaponUsage(playerid=victimstats.id, weaponid=weaponstats.id)
if w_usage_killer and w_usage_victim:
w_usage_killer.kills += 1
w_usage_victim.deaths += 1
self.save_Stat(w_usage_killer)
self.save_Stat(w_usage_victim)
# adjust bodypart statistics
bodypart = self.get_Bodypart(name=data[2])
if bodypart:
bodypart.kills += 1
self.save_Stat(bodypart)
bp_killer = self.get_PlayerBody(playerid=killerstats.id, bodypartid=bodypart.id)
bp_victim = self.get_PlayerBody(playerid=victimstats.id, bodypartid=bodypart.id)
if bp_killer and bp_victim:
bp_killer.kills += 1
bp_victim.deaths += 1
self.save_Stat(bp_killer)
self.save_Stat(bp_victim)
# adjust map statistics
mapstats = self.get_MapStats(self.console.game.mapName)
if mapstats:
mapstats.kills += 1
self.save_Stat(mapstats)
map_killer = self.get_PlayerMaps(playerid=killerstats.id, mapid=mapstats.id)
map_victim = self.get_PlayerMaps(playerid=victimstats.id, mapid=mapstats.id)
if map_killer and map_victim:
map_killer.kills += 1
map_victim.deaths += 1
self.save_Stat(map_killer)
self.save_Stat(map_victim)
# end of kill function
return
def damage(self, client, target, data):
"""
Handle situations where client damaged target.
"""
if client.id == self._world_clientid:
self.verbose('----> XLRstats: onDamage: WORLD-damage, moving on...')
return None
if client.cid == target.cid:
self.verbose('----> XLRstats: onDamage: self damage: %s damaged %s, continueing', client.name, target.name)
return None
# exclude botdamage?
if (client.bot or target.bot) and self.exclude_bots:
self.verbose('bot involved: do not process!')
return None
# check if game is _damage_able -> 50 points or more damage will award an assist
if self._damage_ability and data[0] < 50:
self.verbose('---> XLRstats: not enough damage done to award an assist')
return
try:
target._attackers[client.cid] = time.time()
except:
target._attackers = {client.cid: time.time()}
self.verbose('----> XLRstats: onDamage: attacker added: %s (%s) damaged %s (%s)',
client.name, client.cid, target.name, target.cid)
self.verbose('----> XLRstats: Assistinfo: %s' % target._attackers)
def suicide(self, client, target, data):
"""
Handle situations where a client committed suicide.
"""
if client is None:
return
if target is None:
return
if data is None:
return
self.check_Assists(client, target, data, 'suicide')
playerstats = self.get_PlayerStats(client)
if playerstats is None:
# anonymous player. We're not interested :)
return
playerstats.suicides += 1
if playerstats.curstreak < 0:
playerstats.curstreak -= 1
else:
playerstats.curstreak = -1
if playerstats.curstreak < playerstats.losestreak:
playerstats.losestreak = playerstats.curstreak
oldskill = playerstats.skill
playerstats.skill = (1 - (self.suicide_penalty_percent / 100.0) ) * float(playerstats.skill)
if self.announce and not playerstats.hide:
client.message('^5XLRstats:^7 Suicide -> skill: ^1%.3f^7 -> ^2%.1f^7',
playerstats.skill - oldskill, playerstats.skill)
self.save_Stat(playerstats)
# get applicable weapon replacement
actualweapon = data[1]
for r in data:
try:
actualweapon = self.config.get('replacements', r)
except:
pass
# update weapon stats
weaponstats = self.get_WeaponStats(name=actualweapon)
if weaponstats:
weaponstats.suicides += 1
self.save_Stat(weaponstats)
w_usage = self.get_WeaponUsage(playerid=playerstats.id, weaponid=weaponstats.id)
if w_usage:
w_usage.suicides += 1
self.save_Stat(w_usage)
# update bodypart stats
bodypart = self.get_Bodypart(name=data[2])
if bodypart:
bodypart.suicides += 1
self.save_Stat(bodypart)
bp_player = self.get_PlayerBody(playerid=playerstats.id, bodypartid=bodypart.id)
if bp_player:
bp_player.suicides = int(bp_player.suicides) + 1
self.save_Stat(bp_player)
# adjust map statistics
mapstats = self.get_MapStats(self.console.game.mapName)
if mapstats:
mapstats.suicides += 1
self.save_Stat(mapstats)
map_player = self.get_PlayerMaps(playerid=playerstats.id, mapid=mapstats.id)
if map_player:
map_player.suicides += 1
self.save_Stat(map_player)
# end of function suicide
return
def teamkill(self, client, target, data):
"""
Handle teamkill situations.
"""
if client is None:
return
if target is None:
return
if data is None:
return
anonymous = None
self.check_Assists(client, target, data, 'teamkill')
killerstats = self.get_PlayerStats(client)
victimstats = self.get_PlayerStats(target)
# if both should be anonymous, we have no work to do
if (killerstats is None) and (victimstats is None):
return
if killerstats is None:
anonymous = KILLER
killerstats = self.get_PlayerAnon()
if killerstats is None:
return
killerstats.skill = self.defaultskill
if victimstats is None:
anonymous = VICTIM
victimstats = self.get_PlayerAnon()
if victimstats is None:
return
victimstats.skill = self.defaultskill
if anonymous != KILLER:
# calculate new stats for the killer
oldskill = killerstats.skill
killerstats.skill = (1 - (self.tk_penalty_percent / 100.0) ) * float(killerstats.skill)
killerstats.teamkills += 1
killerstats.curstreak = 0 # break off current streak as it is now "impure"
if self.announce and not killerstats.hide:
client.message('^5XLRstats:^7 Teamkill -> skill: ^1%.3f^7 -> ^2%.1f^7',
killerstats.skill - oldskill, killerstats.skill)
self.save_Stat(killerstats)
if anonymous != VICTIM:
# calculate new stats for the victim
victimstats.teamdeaths += 1
self.save_Stat(victimstats)
# do not register a teamkill in the "opponents" table
# get applicable weapon replacement
actualweapon = data[1]
for r in data:
try:
actualweapon = self.config.get('replacements', r)
except:
pass
# adjust weapon statistics
weaponstats = self.get_WeaponStats(name=actualweapon)
if weaponstats:
weaponstats.teamkills += 1
self.save_Stat(weaponstats)
w_usage_killer = self.get_WeaponUsage(playerid=killerstats.id, weaponid=weaponstats.id)
w_usage_victim = self.get_WeaponUsage(playerid=victimstats.id, weaponid=weaponstats.id)
if w_usage_killer and w_usage_victim:
w_usage_killer.teamkills += 1
w_usage_victim.teamdeaths += 1
self.save_Stat(w_usage_killer)
self.save_Stat(w_usage_victim)
# adjust bodypart statistics
bodypart = self.get_Bodypart(name=data[2])
if bodypart:
bodypart.teamkills += 1
self.save_Stat(bodypart)
bp_killer = self.get_PlayerBody(playerid=killerstats.id, bodypartid=bodypart.id)
bp_victim = self.get_PlayerBody(playerid=victimstats.id, bodypartid=bodypart.id)
if bp_killer and bp_victim:
bp_killer.teamkills += 1
bp_victim.teamdeaths += 1
self.save_Stat(bp_killer)
self.save_Stat(bp_victim)
# adjust map statistics
mapstats = self.get_MapStats(self.console.game.mapName)
if mapstats:
mapstats.teamkills += 1
self.save_Stat(mapstats)
map_killer = self.get_PlayerMaps(playerid=killerstats.id, mapid=mapstats.id)
map_victim = self.get_PlayerMaps(playerid=victimstats.id, mapid=mapstats.id)
if map_killer and map_victim:
map_killer.teamkills += 1
map_victim.teamdeaths += 1
self.save_Stat(map_killer)
self.save_Stat(map_victim)
# end of function teamkill
return
def join(self, client):
"""
Handle a client joining the game.
"""
if client is None:
return
player = self.get_PlayerStats(client)
if player:
player.rounds = int(player.rounds) + 1
if client.bot:
if self.hide_bots:
self.verbose('hiding bot')
player.hide = True
else:
self.verbose('unhiding bot')
player.hide = False
self.save_Stat(player)
mapstats = self.get_MapStats(self.console.game.mapName)
if mapstats:
playermap = self.get_PlayerMaps(player.id, mapstats.id)
if playermap:
playermap.rounds += 1
self.save_Stat(playermap)
return
def roundstart(self):
"""
Handle new round start.
"""
if self._last_map is None:
self._last_map = self.console.game.mapName
# self._last_roundtime = self.console.game._roundTimeStart
else:
if not self.onemaponly and ( self._last_map == self.console.game.mapName) and \
(self.console.game.roundTime() < self.prematch_maxtime):
# (self.console.game._roundTimeStart - self._last_roundtime < self.prematch_maxtime)):
return
else:
self._last_map = self.console.game.mapName
#self._last_roundtime = self.console.game._roundTimeStart
mapstats = self.get_MapStats(self.console.game.mapName)
if mapstats:
mapstats.rounds += 1
self.save_Stat(mapstats)
return
def action(self, client, data):
"""
Handle client actions.
"""
# self.verbose('----> XLRstats: entering actionfunc')
if client is None:
return
action = self.get_ActionStats(name=data)
if action:
action.count += 1
#self.verbose('----> XLRstats: Actioncount: %s' %action.count)
#self.verbose('----> XLRstats: Actionname: %s' %action.name)
#if hasattr(action, '_new'):
# self.verbose('----> XLRstats: insertquery: %s' %action._insertquery())
#else:
# self.verbose('----> XLRstats: updatequery: %s' %action._updatequery())
self.save_Stat(action)
# is it an anonymous client, stop here
playerstats = self.get_PlayerStats(client)
if playerstats is None:
#self.verbose('----> XLRstats: Anonymous client')
return
playeractions = self.get_PlayerActions(playerid=playerstats.id, actionid=action.id)
if playeractions:
playeractions.count += 1
#self.verbose('----> XLRstats: Players Actioncount: %s' %playeractions.count)
#if hasattr(playeractions, '_new'):
# self.verbose('----> XLRstats: insertquery: %s' %playeractions._insertquery())
#else:
# self.verbose('----> XLRstats: updatequery: %s' %playeractions._updatequery())
self.save_Stat(playeractions)
# get applicable action bonus
try:
_action_bonus = self.config.getfloat('actions', action.name)
#self.verbose('----> XLRstats: Found a bonus for %s: %s' %(action.name, action_bonus))
except:
_action_bonus = self.action_bonus
if _action_bonus:
#self.verbose('----> XLRstats: Old Skill: %s.' %playerstats.skill)
playerstats.skill += _action_bonus
#self.verbose('----> XLRstats: New Skill: %s.' %playerstats.skill)
self.save_Stat(playerstats)
return
def updateTableColumns(self):
self.verbose('checking if we need to update tables for version 2.0.0')
# v2.0.0 additions to the playerstats table:
self._addTableColumn('assists', PlayerStats._table, 'MEDIUMINT( 8 ) NOT NULL DEFAULT "0" AFTER `skill`')
self._addTableColumn('assistskill', PlayerStats._table, 'FLOAT NOT NULL DEFAULT "0" AFTER `assists`')
# alterations to columns in existing tables:
self._updateTableColumns()
return None
# end of update check
def _addTableColumn(self, c1, t1, specs):
try:
self.query("""SELECT %s FROM %s limit 1;""" % (c1, t1))
except Exception, e:
if e[0] == 1054:
self.console.debug('column does not yet exist: %s' % e)
self.query("""ALTER TABLE %s ADD %s %s ;""" % (t1, c1, specs))
self.console.info('created new column `%s` on %s' % (c1, t1))
else:
self.console.error('query failed - %s: %s' % (type(e), e))
def _updateTableColumns(self):
try:
# need to update the weapon-identifier columns in these tables for cod7.
# This game knows over 255 weapons/variations
self.query("""ALTER TABLE %s
CHANGE id id SMALLINT(5) UNSIGNED NOT NULL AUTO_INCREMENT;""" % WeaponStats._table)
self.query("""ALTER TABLE %s
CHANGE weapon_id weapon_id SMALLINT(5) UNSIGNED NOT NULL DEFAULT "0";""" % WeaponUsage._table)
except:
pass
def showTables(self, xlrstats=False):
_tables = []
for table in self.console.storage.getTables():
if xlrstats and table not in self._xlrstatstables:
pass
else:
_tables.append(table)
if xlrstats:
self.console.verbose('available XLRstats tables in this database: %s', _tables)
else:
self.console.verbose('available tables in this database: %s', _tables)
return _tables
def optimizeTables(self, t=None):
if not t:
t = self.showTables()
if isinstance(t, basestring):
_tables = str(t)
else:
_tables = ', '.join(t)
self.debug('optimizing table(s): %s', _tables)
try:
self.query('OPTIMIZE TABLE %s' % _tables)
self.debug('optimize success')
except Exception, msg:
self.error('optimizing table(s) failed: %s: trying to repair...', msg)
self.repairTables(t)
def repairTables(self, t=None):
if not t:
t = self.showTables()
if isinstance(t, basestring):
_tables = str(t)
else:
_tables = ', '.join(t)
self.debug('repairing table(s): %s' % _tables)
try:
self.query('REPAIR TABLE %s' % _tables)
self.debug('repair success')
except Exception, msg:
self.error('repairing table(s) failed: %s' % msg)
def calculateKillBonus(self):
self.debug('calculating kill_bonus')
# make sure _max and _diff are floating numbers (may be redundant)
_oldkillbonus = self.kill_bonus
# querymax skill from players active in the last 20 days
seconds = 20 * 86400
q = """SELECT %s.time_edit, MAX(%s.skill) AS max_skill FROM %s, %s WHERE %s - %s.time_edit <= %s""" % (
self.clients_table, self.playerstats_table, self.clients_table, self.playerstats_table, int(time.time()),
self.clients_table, seconds)
cursor = self.query(q)
r = cursor.getRow()
_max = r['max_skill']
if _max is None:
_max = self.defaultskill
_max = float(_max)
self.verbose('max skill: %s' % _max)
_diff = _max - self.defaultskill
if _diff < 0:
self.kill_bonus = 2.0
elif _diff < 400:
self.kill_bonus = 1.5
else:
c = 200.0 / _diff + 1
self.kill_bonus = round(c, 1)
self.assist_bonus = self.kill_bonus / 3
if self.kill_bonus != _oldkillbonus:
self.debug('kill_bonus changed to: %s', self.kill_bonus)
self.debug('assist_bonus changed to: %s', self.assist_bonus)
else:
self.verbose('kill_bonus: %s' % self.kill_bonus)
self.verbose('assist_bonus: %s' % self.assist_bonus)
def correctStats(self):
self.debug('gathering XLRstats statistics')
_seconds = self._auto_correct_ignore_days * 86400
q = """SELECT MAX(%s.skill) AS max_skill, MIN(%s.skill) AS min_skill, SUM(%s.skill) AS sum_skill,
AVG(%s.skill) AS avg_skill , COUNT(%s.id) AS cnt
FROM %s, %s
WHERE %s.id = %s.client_id
AND %s.client_id <> %s
AND (%s.kills + %s.deaths) > %s
AND %s - %s.time_edit <= %s""" \
% (self.playerstats_table, self.playerstats_table, self.playerstats_table,
self.playerstats_table, self.playerstats_table,
self.playerstats_table, self.clients_table,
self.clients_table, self.playerstats_table,
self.playerstats_table, self._world_clientid,
self.playerstats_table, self.playerstats_table, self.Kswitch_confrontations,
int(time.time()), self.clients_table, _seconds)
cursor = self.query(q)
# self.verbose(q)
r = cursor.getRow()
if r['cnt'] == 0:
return None
_acceptable_average = self.defaultskill + 100
_factor_decimals = 6
# self.verbose('%s; %s; %s' % (r['sum_skill'], _acceptable_average, r['cnt']))
_surplus = r['sum_skill'] - (r['cnt'] * _acceptable_average)
_correction = _surplus / r['cnt']
_correction_factor = (r['cnt'] * _acceptable_average) / r['sum_skill']
self.verbose('------------------------------------------')
self.verbose('- Active pool parameters:')
self.verbose('- Players of last %d days', self._auto_correct_ignore_days)
self.verbose('- Players with minimal %d confrontations', self.Kswitch_confrontations)
self.verbose('------------------------------------------')
self.verbose('- Total players participating: %d', r['cnt'])
self.verbose('- Total skill points in pool: %.2f', r['sum_skill'])
self.verbose('------------------------------------------')
self.verbose('- Highest skill in pool: %.2f', r['max_skill'])
self.verbose('- Lowest skill in pool: %.2f', r['min_skill'])
self.verbose('------------------------------------------')
self.verbose('- Average skill: %.2f', r['avg_skill'])
self.verbose('- Acceptable average skill: %.2f', _acceptable_average)
self.verbose('------------------------------------------')
self.verbose('- Difference (total) with acceptable pool: %.2f', _surplus)
self.verbose('- Avg. points deviation p/player: %.3f', _correction)
self.verbose('- Deviation factor: %s', round(_correction_factor, _factor_decimals))
self.verbose('------------------------------------------')
if _correction_factor < 1:
self.verbose('- !!CORRECTION OF SKILL ADVISED!!')
else:
self.verbose('- pool has room for inflation... no action needed')
self.verbose('------------------------------------------')
if self.auto_correct and round(_correction_factor, _factor_decimals) < 1:
self.debug('correcting overall skill with factor %s...' % round(_correction_factor, _factor_decimals))
self.query("""UPDATE %s SET skill=(SELECT skill * %s ) WHERE %s.client_id <> %s""" % (
self.playerstats_table, _correction_factor, self.playerstats_table, self._world_clientid))
def purgePlayers(self):
if not self.auto_purge:
return None
self.debug('purgin players who haven\'t been online for %s days...', self._purge_player_days)
# find players who haven't been online for a long time
_seconds = self._purge_player_days * 86400
q = """SELECT %s.id, %s.time_edit, %s.client_id, %s.id as player_id FROM %s, %s
WHERE %s.id = %s.client_id
AND %s - %s.time_edit > %s""" % (
self.clients_table, self.clients_table, self.playerstats_table, self.playerstats_table,
self.clients_table, self.playerstats_table, self.clients_table, self.playerstats_table,
int(time.time()), self.clients_table, _seconds)
cursor = self.query(q)
if cursor and not cursor.EOF:
while not cursor.EOF:
r = cursor.getRow()
self.verbose(r)
self.purgePlayerStats(r['player_id'])
self.purgeAssociated(self.playeractions_table, r['player_id'])
self.purgeAssociated(self.playerbody_table, r['player_id'])
self.purgeAssociated(self.playermaps_table, r['player_id'])
self.purgeAssociated(self.weaponusage_table, r['player_id'])
cursor.moveNext()
def purgePlayerStats(self, _id):
self.query("""DELETE FROM %s WHERE id = %s""" % (self.playerstats_table, _id))
def purgeAssociated(self, _table, _id):
self.query("""DELETE FROM %s WHERE player_id = %s""" % (_table, _id))
####################################################################################################################
# #
# COMMANDS #
# #
####################################################################################################################
def cmd_xlrstats(self, data, client, cmd=None):
"""
[<name>] - list a players XLR stats
"""
if data:
sclient = self._adminPlugin.findClientPrompt(data, client)
if not sclient:
# a player matchin the name was not found, a list of closest matches will be displayed
# we can exit here and the user will retry with a more specific player
return
else:
sclient = client
stats = self.get_PlayerStats(sclient)
if stats:
if stats.hide == 1:
client.message('^3XLR Stats: ^7stats for %s are not available (hidden)' % sclient.exactName)
else:
message_vars = {
'name': sclient.exactName,
'kills': stats.kills,
'deaths': stats.deaths,
'teamkills': stats.teamkills,
'ratio': '%1.02f' % stats.ratio,
'skill': '%1.02f' % stats.skill,
}
message = self.getMessage('cmd_xlrstats', message_vars)
cmd.sayLoudOrPM(client, message)
else:
client.message('^3XLR Stats: ^7could not find stats for %s' % sclient.exactName)
def cmd_xlr(self, data, client, cmd=None):
"""
[<name>] - list a players XLR stats
"""
if data:
sclient = self._adminPlugin.findClientPrompt(data, client)
if not sclient:
# a player matchin the name was not found, a list of closest matches will be displayed
# we can exit here and the user will retry with a more specific player
return
else:
sclient = client
stats = self.get_PlayerStats(sclient)
if stats:
if stats.hide == 1:
client.message('^3XLR Stats: ^7stats for %s are not available (hidden)' % sclient.exactName)
else:
message_vars = {
'name': sclient.exactName,
'kills': stats.kills,
'deaths': stats.deaths,
'teamkills': stats.teamkills,
'ratio': '%1.02f' % stats.ratio,
'skill': '%1.02f' % stats.skill,
}
message = self.getMessage('cmd_xlr', message_vars)
cmd.sayLoudOrPM(client, message)
else:
client.message('^3XLR Stats: ^7could not find stats for %s' % sclient.exactName)
def cmd_xlrtopstats(self, data, client, cmd=None, ext=False):
"""
[<#>] - list the top # players of the last 14 days.
"""
thread.start_new_thread(self.doTopList, (data, client, cmd, ext))
def doTopList(self, data, client, cmd=None, ext=False):
"""
Retrieves the Top # Players.
"""
limit = 3
if data:
if re.match('^[0-9]+$', data, re.I):
limit = int(data)
if limit > 10:
limit = 10
q = 'SELECT %s.name, %s.time_edit, %s.id, kills, deaths, ratio, skill, winstreak, losestreak, rounds, fixed_name, ip \
FROM %s, %s \
WHERE (%s.id = %s.client_id) \
AND ((%s.kills > %s) \
AND (%s.rounds > %s)) \
AND (%s.hide = 0) \
AND (%s - %s.time_edit <= %s * 60 * 60 * 24) \
AND %s.id NOT IN \
( SELECT distinct(target.id) FROM %s as penalties, %s as target \
WHERE (penalties.type = "Ban" \
OR penalties.type = "TempBan") \
AND inactive = 0 \
AND penalties.client_id = target.id \
AND ( penalties.time_expire = -1 \
OR penalties.time_expire > %s ) ) \
ORDER BY %s.skill DESC LIMIT %s'\
% (self.clients_table, self.clients_table, self.playerstats_table, self.clients_table, self.playerstats_table,
self.clients_table, self.playerstats_table, self.playerstats_table, self._minKills, self.playerstats_table,
self._minRounds, self.playerstats_table, int(time.time()), self.clients_table, self._maxDays, self.clients_table,
self.penalties_table, self.clients_table,
int(time.time()),
self.playerstats_table, limit)
cursor = self.query(q)
if cursor and not cursor.EOF:
message = '^3XLR Stats Top %s Players:' % limit
if ext:
self.console.say(message)
else:
cmd.sayLoudOrPM(client, message)
c = 1
while not cursor.EOF:
r = cursor.getRow()
message = self.getMessage('cmd_xlrtopstats', {'number': c, 'name': r['name'], 'skill': '%1.02f' % r['skill'],
'ratio': '%1.02f' % r['ratio'], 'kills': r['kills']})
if ext:
self.console.say(message)
else:
cmd.sayLoudOrPM(client, message)
cursor.moveNext()
c += 1
time.sleep(1)
else:
self.debug('no players qualified for the toplist yet...')
message = 'Qualify for the toplist by making at least %i kills and playing %i rounds!' % (
self._minKills, self._minRounds)
if ext:
self.console.say(message)
else:
cmd.sayLoudOrPM(client, message)
def cmd_xlrhide(self, data, client, cmd=None):
"""
<player> <on/off> - hide/unhide a player from the stats
"""
# this will split the player name and the message
handle = self._adminPlugin.parseUserCmd(data)
if handle:
# input[0] is the player id
sclient = self._adminPlugin.findClientPrompt(handle[0], client)
if not sclient:
# a player matchin the name was not found, a list of closest matches will be displayed
# we can exit here and the user will retry with a more specific player
return
else:
client.message('^7Invalid data, try !help xlrhide')
return
if not handle[1]:
client.message('^7Missing data, try !help xlrhide')
return
m = handle[1]
if m in ('on', '1', 'yes'):
if client != sclient:
sclient.message('^3You are invisible in xlrstats!')
client.message('^3%s INVISIBLE in xlrstats!' % sclient.exactName)
hide = 1
elif m in ('off', '0', 'no'):
if client != sclient:
sclient.message('^3You are visible in xlrstats!')
client.message('^3%s VISIBLE in xlrstats!' % sclient.exactName)
hide = 0
else:
client.message('^7Invalid or missing data, try !help xlrhide')
return
player = self.get_PlayerStats(sclient)
if player:
player.hide = int(hide)
self.save_Stat(player)
def cmd_xlrid(self, data, client, cmd=None):
"""
<player ID Token> - identify yourself to the XLRstats website, get your token in your profile on the xlrstats website (v3)
"""
handle = self._adminPlugin.parseUserCmd(data)
if handle:
# input[0] is the token
token = handle[0]
else:
client.message('^7Invalid/missing data, try !help xlrid')
return
player = self.get_PlayerStats(client)
if player:
player.id_token = token
self.verbose('saving identification token %s' % token)
self.save_Stat(player)
client.message('^3Token saved!')
def cmd_xlrstatus(self, data, client, cmd=None):
"""
- exposes current plugin status and major settings
"""
if not self._xlrstats_active:
_neededPlayers = len(self.console.clients.getList()) - self.min_players
client.message('^3XLRstats disabled: need %s more players' % abs(_neededPlayers))
else:
client.message('^3XLRstats enabled: collecting stats')
if self.provisional_ranking:
client.message('^3Provisional phase: %s confrontations' % self.Kswitch_confrontations)
client.message('^3auto_correct: %s, auto_purge: %s, k_b: %s, as_b: %s, ac_b: %s' %
(self.auto_correct, self.auto_purge, self.kill_bonus, self.assist_bonus, self.action_bonus))
def cmd_xlrinit(self, data, client, cmd=None):
"""
- initialize XLRstats database schema (!!!will remove all the collected stats!!!)
"""
xlr_tables = [getattr(self, x) for x in dir(self) if x.endswith('_table')]
current_tables = self.console.storage.getTables()
# truncate database tables
for table in xlr_tables:
if table in current_tables:
self.info('inizializing table: %s', table)
self.console.storage.truncateTable(table)
# eventually rebuild missing tables
self.build_database_schema()
client.message('^3XLRstats database schema initialized')
def cmd_xlrreset(self, data, client, cmd=None):
"""
<player ID Token> - reset xlrstats data
"""
handle = self._adminPlugin.parseUserCmd(data)
if handle:
# input[0] is the player id
sclient = self._adminPlugin.findClientPrompt(handle[0], client)
if not sclient:
# a player matchin the name was not found, a list of closest matches will be displayed
# we can exit here and the user will retry with a more specific player
return
else:
client.message('^7Invalid data.')
return
stats = self.get_PlayerStats(sclient)
if stats:
self.dataReset(sclient.id)
client.message('^3%s ^1Reset XLR Data!'% sclient.exactName)
else:
client.message('^3%s ^1Not Found XLR Data!'% sclient.exactName)
def dataReset(self, pid):
q = """UPDATE xlr_playerstats SET kills=0, deaths=0, teamkills=0, teamdeaths=0, suicides=0, ratio=0, skill=0, assists=0, assistskill=0, curstreak=0, winstreak=0, losestreak=0, rounds=0, hide=0, fixed_name='', id_token='' WHERE id= %s""" % (pid)
self.query(q)
########################################################################################################################
# #
# SUB PLUGIN CONTROLLER - CONTROLS STARTING AND STOPPING OF MAIN XLRSTATS PLUGIN BASED ON PLAYERCOUNT #
# OBSOLETE! REMOVED SINCE IT ALSO AFFECTED THE COMMANDS BEING UNAVAILABLE WHEN INACTIVE #
# #
########################################################################################################################
# class XlrstatscontrollerPlugin(b3.plugin.Plugin):
# """This is a helper class/plugin that enables and disables the main XLRstats plugin
# It can not be called directly or separately from the XLRstats plugin!"""
#
# def __init__(self, console, min_players=3, silent=False):
# self.console = console
# self.console.debug('Initializing SubPlugin: XlrstatsControllerPlugin')
# self.min_players = min_players
# self.silent = silent
# # empty message cache
# self._messages = {}
# self.registerEvent(b3.events.EVT_STOP)
# self.registerEvent(b3.events.EVT_EXIT)
#
# def onStartup(self):
# self.console.debug('Starting SubPlugin: XlrstatsControllerPlugin')
# #get a reference to the main Xlrstats plugin
# self._xlrstatsPlugin = self.console.getPlugin('xlrstats')
# # register the events we're interested in.
# self.registerEvent(b3.events.EVT_CLIENT_JOIN)
# self.registerEvent(b3.events.EVT_GAME_ROUND_START)
#
# def onEvent(self, event):
# if event.type == b3.events.EVT_CLIENT_JOIN:
# self.checkMinPlayers()
# elif event.type == b3.events.EVT_GAME_ROUND_START:
# self.checkMinPlayers(_roundstart=True)
#
# def checkMinPlayers(self, _roundstart=False):
# """Checks if minimum amount of players are present
# if minimum amount of players is reached will enable stats collecting
# and if not it disables stats counting on next roundstart"""
# self._current_nr_players = len(self.console.clients.getList())
# self.debug(
# 'Checking number of players online. Minimum = %s, Current = %s' % (self.min_players, self._current_nr_players))
# if self._current_nr_players < self.min_players and self._xlrstatsPlugin.isEnabled() and _roundstart:
# self.info('Disabling XLRstats: Not enough players online')
# if not self.silent:
# self.console.say('XLRstats Disabled: Not enough players online!')
# self._xlrstatsPlugin.disable()
# elif self._current_nr_players >= self.min_players and not self._xlrstatsPlugin.isEnabled():
# self.info('Enabling XLRstats: Collecting Stats')
# if not self.silent:
# self.console.say('XLRstats Enabled: Now collecting stats!')
# self._xlrstatsPlugin.enable()
# else:
# if self._xlrstatsPlugin.isEnabled():
# _status = 'Enabled'
# else:
# _status = 'Disabled'
# self.debug('Nothing to do at the moment. XLRstats is already %s' % _status)
########################################################################################################################
## ##
## SUB PLUGIN HISTORY - SAVES HISTORY SNAPSHOTS, WEEKLY AND/OR MONTHLY ##
## ##
########################################################################################################################
class XlrstatshistoryPlugin(b3.plugin.Plugin):
"""
This is a helper class/plugin that saves history snapshots
It can not be called directly or separately from the XLRstats plugin!
"""
requiresConfigFile = False
_cronTab = None
_cronTabMonth = None
_cronTabWeek = None
_max_months = 12
_max_weeks = 12
_hours = 5
_minutes = 10
####################################################################################################################
# #
# PLUGIN STARTUP #
# #
####################################################################################################################
def __init__(self, console, weeklyTable, monthlyTable, playerstatsTable):
"""
Object constructor.
:param console: The console instance
:param weeklyTable: The history weekly database table name
:param monthlyTable: The history monthly database table name
:param playerstatsTable: The playerstats database table name
"""
b3.plugin.Plugin.__init__(self, console)
self.history_weekly_table = weeklyTable
self.history_monthly_table = monthlyTable
self.playerstats_table = playerstatsTable
# empty message cache
self._messages = {}
# define a shortcut to the storage.query function
self.query = self.console.storage.query
# purge crontab
tzName = self.console.config.get('b3', 'time_zone').upper()
tzOffest = b3.timezones.timezones[tzName]
hoursGMT = (self._hours - tzOffest)%24
self.debug(u'%02d:%02d %s => %02d:%02d UTC' % (self._hours, self._minutes, tzName, hoursGMT, self._minutes))
self.info(u'everyday at %2d:%2d %s, history info older than %s months and %s weeks will be deleted' % (
self._hours, self._minutes, tzName, self._max_months, self._max_weeks))
self._cronTab = b3.cron.PluginCronTab(self, self.purge, 0, self._minutes, hoursGMT, '*', '*', '*')
self.console.cron + self._cronTab
def onStartup(self):
"""
Initialize plugin.
"""
self.debug('starting subplugin...')
self.verbose('installing history crontabs')
# remove existing crontabs
try:
self.console.cron - self._cronTabMonth
except:
pass
try:
self.console.cron - self._cronTabWeek
except:
pass
try:
# install crontabs
self._cronTabMonth = b3.cron.PluginCronTab(self, self.snapshot_month, 0, 0, 0, 1, '*', '*')
self.console.cron + self._cronTabMonth
self._cronTabWeek = b3.cron.PluginCronTab(self, self.snapshot_week, 0, 0, 0, '*', '*', 1) # day 1 is monday
self.console.cron + self._cronTabWeek
except Exception, msg:
self.error('unable to install history crontabs: %s', msg)
# purge the tables on startup
self.purge()
####################################################################################################################
# #
# CRONJOBS #
# #
####################################################################################################################
def snapshot_month(self):
"""
Create the monthly snapshot.
"""
sql = """INSERT INTO %s (client_id, kills, deaths, teamkills, teamdeaths, suicides, ratio,
skill, assists, assistskill, winstreak, losestreak, rounds, year, month, week, day)
SELECT client_id, kills, deaths, teamkills, teamdeaths, suicides, ratio, skill, assists,
assistskill, winstreak, losestreak, rounds, YEAR(NOW()), MONTH(NOW()), WEEK(NOW(),3), DAY(NOW())
FROM %s""" % (self.history_monthly_table, self.playerstats_table)
try:
self.query(sql)
self.verbose('monthly XLRstats snapshot created')
except Exception, msg:
self.error('creating history snapshot failed: %s' % msg)
def snapshot_week(self):
"""
Create the weekly snapshot.
"""
sql = """INSERT INTO %s (client_id , kills, deaths, teamkills, teamdeaths, suicides, ratio,
skill, assists, assistskill, winstreak, losestreak, rounds, year, month, week, day)
SELECT client_id, kills, deaths, teamkills, teamdeaths, suicides, ratio, skill, assists,
assistskill, winstreak, losestreak, rounds, YEAR(NOW()), MONTH(NOW()), WEEK(NOW(),3), DAY(NOW())
FROM %s""" % (self.history_weekly_table, self.playerstats_table)
try:
self.query(sql)
self.verbose('weekly XLRstats snapshot created')
except Exception, msg:
self.error('creating history snapshot failed: %s', msg)
def purge(self):
"""
Purge history tables.
"""
# purge the months table
if not self._max_months or self._max_months == 0:
self.warning(u'max_months is invalid [%s]' % self._max_months)
return False
self.info(u'purge of history entries older than %s months ...' % self._max_months)
maxMonths = self.console.time() - self._max_months*24*60*60*30
self.verbose(u'calculated maxMonths: %s' % maxMonths)
_month = datetime.datetime.fromtimestamp(int(maxMonths)).strftime('%m')
_year = datetime.datetime.fromtimestamp(int(maxMonths)).strftime('%Y')
if int(_month) < self._max_months:
_yearPrev = int(_year)-1
else:
_yearPrev = int(_year)
q = """DELETE FROM %s WHERE (month < %s AND year <= %s) OR year < %s""" % (self.history_monthly_table, _month, _year, _yearPrev)
self.debug(u'QUERY: %s ' % q)
self.console.storage.query(q)
# purge the weeks table
if not self._max_weeks or self._max_weeks == 0:
self.warning(u'max_weeks is invalid [%s]' % self._max_weeks)
return False
self.info(u'purge of history entries older than %s weeks ...' % self._max_weeks)
maxWeeks = self.console.time() - self._max_weeks*24*60*60*7
self.verbose(u'calculated maxWeeks: %s' % maxWeeks)
_week = datetime.datetime.fromtimestamp(int(maxWeeks)).strftime('%W')
_year = datetime.datetime.fromtimestamp(int(maxWeeks)).strftime('%Y')
if int(_week) < self._max_weeks:
_yearPrev = int(_year)-1
else:
_yearPrev = int(_year)
q = """DELETE FROM %s WHERE (week < %s AND year <= %s) OR year < %s""" % (self.history_weekly_table, _week, _year, _yearPrev)
self.debug(u'QUERY: %s ' % q)
self.console.storage.query(q)
########################################################################################################################
# #
# SUB PLUGIN CTIME - REGISTERS JOIN AND LEAVE TIMES OF PLAYERS #
# #
########################################################################################################################
class TimeStats(object):
came = None
left = None
client = None
class CtimePlugin(b3.plugin.Plugin):
"""
This is a helper class/plugin that saves client join and disconnect time info
It can not be called directly or separately from the XLRstats plugin!
"""
requiresConfigFile = False
_clients = {}
_cronTab = None
_max_age_in_days = 31
_hours = 5
_minutes = 0
####################################################################################################################
# #
# PLUGIN STARTUP #
# #
####################################################################################################################
def __init__(self, console, cTimeTable):
"""
Object constructor.
:param console: The console instance
:param cTimeTable: The ctime database table name
"""
b3.plugin.Plugin.__init__(self, console)
self.ctime_table = cTimeTable
# define a shortcut to the storage.query function
self.query = self.console.storage.query
tzName = self.console.config.get('b3', 'time_zone').upper()
tzOffest = b3.timezones.timezones[tzName]
hoursGMT = (self._hours - tzOffest)%24
self.debug(u'%02d:%02d %s => %02d:%02d UTC' % (self._hours, self._minutes, tzName, hoursGMT, self._minutes))
self.info(u'everyday at %2d:%2d %s, connection info older than %s days will be deleted' % (self._hours,
self._minutes, tzName, self._max_age_in_days))
self._cronTab = b3.cron.PluginCronTab(self, self.purge, 0, self._minutes, hoursGMT, '*', '*', '*')
self.console.cron + self._cronTab
def onStartup(self):
"""
Initialize plugin.
"""
self.debug('starting subplugin...')
self.registerEvent('EVT_CLIENT_AUTH', self.onAuth)
self.registerEvent('EVT_CLIENT_DISCONNECT', self.onDisconnect)
####################################################################################################################
# #
# EVENTS #
# #
####################################################################################################################
def onAuth(self, event):
"""
Handle EVT_CLIENT_AUTH
"""
if not event.client or not event.client.id or event.client.cid is None or \
not event.client.connected or event.client.hide:
return
self.update_time_stats_connected(event.client)
def onDisconnect(self, event):
"""
Handle EVT_CLIENT_DISCONNECT
"""
self.update_time_stats_exit(event.data)
####################################################################################################################
# #
# OTHER METHODS #
# #
####################################################################################################################
def purge(self):
"""
Purge the ctime database table.
"""
if not self._max_age_in_days or self._max_age_in_days == 0:
self.warning(u'max_age is invalid [%s]', self._max_age_in_days)
return False
self.info(u'purge of connection info older than %s days ...', self._max_age_in_days)
q = """DELETE FROM %s WHERE came < %i""" % (self.ctime_table, (self.console.time() - (self._max_age_in_days * 24 * 60 * 60)))
self.debug(u'CTIME QUERY: %s ' % q)
self.console.storage.query(q)
def update_time_stats_connected(self, client):
if client.cid in self._clients:
self.debug(u'CTIME CONNECTED: client exist! : %s', client.cid)
tmpts = self._clients[client.cid]
if tmpts.client.guid == client.guid:
self.debug(u'CTIME RECONNECTED: player %s connected again, but playing since: %s', client.exactName, tmpts.came)
return
else:
del self._clients[client.cid]
ts = TimeStats()
ts.client = client
ts.came = datetime.datetime.now()
self._clients[client.cid] = ts
self.debug(u'CTIME CONNECTED: player %s started playing at: %s', client.exactName, ts.came)
@staticmethod
def formatTD(td):
hours = td // 3600
minutes = (td % 3600) // 60
seconds = td % 60
return '%s:%s:%s' % (hours, minutes, seconds)
def update_time_stats_exit(self, clientid):
self.debug(u'CTIME LEFT:')
if clientid in self._clients:
ts = self._clients[clientid]
# Fail: Sometimes PB in cod4 returns 31 character guids, we need to dump them.
# Lets look ahead and do this for the whole codseries.
#if(self.console.gameName[:3] == 'cod' and self.console.PunkBuster and len(ts.client.guid) != 32):
# pass
#else:
ts.left = datetime.datetime.now()
diff = (int(time.mktime(ts.left.timetuple())) - int(time.mktime(ts.came.timetuple())))
self.debug(u'CTIME LEFT: player: %s played this time: %s sec', ts.client.exactName, diff)
self.debug(u'CTIME LEFT: player: %s played this time: %s', ts.client.exactName, self.formatTD(diff))
#INSERT INTO `ctime` (`guid`, `came`, `left`) VALUES ("6fcc4f6d9d8eb8d8457fd72d38bb1ed2", 1198187868, 1226081506)
q = """INSERT INTO %s (guid, came, gone, nick) VALUES (\"%s\", \"%s\", \"%s\", \"%s\")""" % (self.ctime_table,
ts.client.guid, int(time.mktime(ts.came.timetuple())), int(time.mktime(ts.left.timetuple())), ts.client.name)
self.query(q)
self._clients[clientid].left = None
self._clients[clientid].came = None
self._clients[clientid].client = None
del self._clients[clientid]
else:
self.debug(u'CTIME LEFT: player %s var not set!', clientid)
########################################################################################################################
# #
# SUB PLUGIN BATTLELOG - REGISTERS LAST PLAYED MATCHES #
# #
########################################################################################################################
class BattlestatsPlugin(b3.plugin.Plugin):
"""
This is a helper class/plugin that saves last played matches
It can not be called directly or separately from the XLRstats plugin!
"""
####################################################################################################################
# #
# PLUGIN STARTUP #
# #
####################################################################################################################
def __init__(self, console, battlelogGamesTable, battlelogClientsTable):
"""
Object constructor.
:param console: The console instance
:param battlelogGamesTable: The battlelog games table
:param battlelogClientsTable: The battlelog clients table
"""
b3.plugin.Plugin.__init__(self, console)
self.battlelog_games_table = battlelogGamesTable
self.battlelog_clients_table = battlelogClientsTable
self.query = self.console.storage.query
self.gameLog = None
self.clientsLog = None
def onStartup(self):
"""
Initialize plugin.
"""
self.console.debug('starting subplugin...')
self.registerEvent('EVT_CLIENT_AUTH', self.onAuth)
self.registerEvent('EVT_CLIENT_DISCONNECT', self.onDisconnect)
self.registerEvent('EVT_GAME_ROUND_START', self.onRoundStart)
self.registerEvent('EVT_GAME_ROUND_END', self.onRoundEnd)
####################################################################################################################
# #
# EVENTS #
# #
####################################################################################################################
def onAuth(self, event):
"""
Handle EVT_CLIENT_AUTH
"""
pass
def onDisconnect(self, event):
"""
Handle EVT_CLIENT_DISCONNECT
"""
pass
def onRoundStart(self, event):
"""
Handle EVT_GAME_ROUND_START
"""
pass
def onRoundEnd(self, event):
"""
Handle EVT_GAME_ROUND_END
"""
pass
def clearBattlelog(self):
self.gameLog = None
self.clientsLog = None
def setupBattlelog(self):
self.gameLog = BattleStats()
self.clientsLog = {}
########################################################################################################################
# #
# ABSTRACT CLASSES TO AID XLRSTATS PLUGIN CLASS #
# #
########################################################################################################################
class StatObject(object):
_table = None
def _insertquery(self):
return None
def _updatequery(self):
return None
class PlayerStats(StatObject):
# default name of the table for this data object
_table = 'playerstats'
# fields of the table
id = None
client_id = 0
Kfactor = 1
kills = 0
deaths = 0
teamkills = 0
teamdeaths = 0
suicides = 0
ratio = 0
skill = 0
assists = 0
assistskill = 0
curstreak = 0
winstreak = 0
losestreak = 0
rounds = 0
hide = 0
# the following fields are used only by the PHP presentation code
fixed_name = ""
id_token = "" # player identification token for webfront v3
def _insertquery(self):
q = """INSERT INTO %s (client_id, kills, deaths, teamkills, teamdeaths, suicides, ratio, skill, assists,
assistskill, curstreak, winstreak, losestreak, rounds, hide, fixed_name, id_token) VALUES (%s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, '%s', '%s')""" % (self._table, self.client_id, self.kills,
self.deaths, self.teamkills, self.teamdeaths, self.suicides, self.ratio, self.skill, self.assists,
self.assistskill, self.curstreak, self.winstreak, self.losestreak, self.rounds, self.hide,
escape(self.fixed_name, "'"), self.id_token)
return q
def _updatequery(self):
q = """UPDATE %s SET client_id=%s, kills=%s, deaths=%s, teamkills=%s, teamdeaths=%s, suicides=%s, ratio=%s,
skill=%s, assists=%s, assistskill=%s, curstreak=%s, winstreak=%s, losestreak=%s, rounds=%s, hide=%s,
fixed_name='%s', id_token='%s' WHERE id= %s""" % (self._table, self.client_id, self.kills, self.deaths,
self.teamkills, self.teamdeaths, self.suicides, self.ratio, self.skill, self.assists, self.assistskill,
self.curstreak, self.winstreak, self.losestreak, self.rounds, self.hide, escape(self.fixed_name, "'"),
self.id_token, self.id)
return q
class WeaponStats(StatObject):
# default name of the table for this data object
_table = 'weaponstats'
# fields of the table
id = None
name = ''
kills = 0
suicides = 0
teamkills = 0
def _insertquery(self):
q = """INSERT INTO %s (name, kills, suicides, teamkills) VALUES ('%s', %s, %s, %s)""" % (
self._table, escape(self.name, "'"), self.kills, self.suicides, self.teamkills)
return q
def _updatequery(self):
q = """UPDATE %s SET name='%s', kills=%s, suicides=%s, teamkills=%s WHERE id=%s""" % (
self._table, escape(self.name, "'"), self.kills, self.suicides, self.teamkills, self.id)
return q
class WeaponUsage(StatObject):
# default name of the table for this data object
_table = 'weaponusage'
# fields of the table
id = None
player_id = 0
weapon_id = 0
kills = 0
deaths = 0
suicides = 0
teamkills = 0
teamdeaths = 0
def _insertquery(self):
q = """INSERT INTO %s (player_id, weapon_id, kills, deaths, suicides, teamkills, teamdeaths)
VALUES (%s, %s, %s, %s, %s, %s, %s)""" % (self._table, self.player_id, self.weapon_id, self.kills,
self.deaths, self.suicides, self.teamkills, self.teamdeaths)
return q
def _updatequery(self):
q = """UPDATE %s SET player_id=%s, weapon_id=%s, kills=%s, deaths=%s, suicides=%s, teamkills=%s,
teamdeaths=%s WHERE id=%s""" % (self._table, self.player_id, self.weapon_id, self.kills, self.deaths,
self.suicides, self.teamkills, self.teamdeaths, self.id)
return q
class Bodyparts(StatObject):
# default name of the table for this data object
_table = 'bodyparts'
# fields of the table
id = None
name = ''
kills = 0
suicides = 0
teamkills = 0
def _insertquery(self):
q = """INSERT INTO %s (name, kills, suicides, teamkills) VALUES ('%s', %s, %s, %s)""" % (
self._table, escape(self.name, "'"), self.kills, self.suicides, self.teamkills)
return q
def _updatequery(self):
q = """UPDATE %s SET name='%s', kills=%s, suicides=%s, teamkills=%s WHERE id=%s""" % (
self._table, escape(self.name, "'"), self.kills, self.suicides, self.teamkills, self.id)
return q
class MapStats(StatObject):
# default name of the table for this data object
_table = 'mapstats'
# fields of the table
id = None
name = ''
kills = 0
suicides = 0
teamkills = 0
rounds = 0
def _insertquery(self):
q = """INSERT INTO %s (name, kills, suicides, teamkills, rounds) VALUES ('%s', %s, %s, %s, %s)""" % (
self._table, escape(self.name, "'"), self.kills, self.suicides, self.teamkills, self.rounds)
return q
def _updatequery(self):
q = """UPDATE %s SET name='%s', kills=%s, suicides=%s, teamkills=%s, rounds=%s WHERE id=%s""" % (
self._table, escape(self.name, "'"), self.kills, self.suicides, self.teamkills, self.rounds, self.id)
return q
class PlayerBody(StatObject):
# default name of the table for this data object
_table = 'playerbody'
# fields of the table
id = None
player_id = 0
bodypart_id = 0
kills = 0
deaths = 0
suicides = 0
teamkills = 0
teamdeaths = 0
def _insertquery(self):
q = """INSERT INTO %s (player_id, bodypart_id, kills, deaths, suicides, teamkills, teamdeaths)
VALUES (%s, %s, %s, %s, %s, %s, %s)""" % (self._table, self.player_id, self.bodypart_id, self.kills,
self.deaths, self.suicides, self.teamkills, self.teamdeaths)
return q
def _updatequery(self):
q = """UPDATE %s SET player_id=%s, bodypart_id=%s, kills=%s, deaths=%s, suicides=%s, teamkills=%s, teamdeaths=%s
WHERE id=%s""" % (self._table, self.player_id, self.bodypart_id, self.kills, self.deaths, self.suicides, self.teamkills,
self.teamdeaths, self.id)
return q
class PlayerMaps(StatObject):
# default name of the table for this data object
_table = 'playermaps'
# fields of the table
id = 0
player_id = 0
map_id = 0
kills = 0
deaths = 0
suicides = 0
teamkills = 0
teamdeaths = 0
rounds = 0
def _insertquery(self):
q = """INSERT INTO %s (player_id, map_id, kills, deaths, suicides, teamkills, teamdeaths, rounds)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)""" % (self._table, self.player_id, self.map_id, self.kills,
self.deaths, self.suicides, self.teamkills, self.teamdeaths, self.rounds)
return q
def _updatequery(self):
q = """UPDATE %s SET player_id=%s, map_id=%s, kills=%s, deaths=%s, suicides=%s, teamkills=%s,
teamdeaths=%s, rounds=%s WHERE id=%s""" % (self._table, self.player_id, self.map_id, self.kills,
self.deaths, self.suicides, self.teamkills, self.teamdeaths, self.rounds, self.id)
return q
class Opponents(StatObject):
# default name of the table for this data object
_table = 'opponents'
# fields of the table
id = None
killer_id = 0
target_id = 0
kills = 0
retals = 0
def _insertquery(self):
q = """INSERT INTO %s (killer_id, target_id, kills, retals) VALUES (%s, %s, %s, %s)""" % (
self._table, self.killer_id, self.target_id, self.kills, self.retals)
return q
def _updatequery(self):
q = """UPDATE %s SET killer_id=%s, target_id=%s, kills=%s, retals=%s WHERE id=%s""" % (
self._table, self.killer_id, self.target_id, self.kills, self.retals, self.id)
return q
class ActionStats(StatObject):
# default name of the table for this data object
_table = 'actionstats'
# fields of the table
id = None
name = ''
count = 0
def _insertquery(self):
q = """INSERT INTO %s (name, count) VALUES ('%s', %s)""" % (self._table, escape(self.name, "'"), self.count)
return q
def _updatequery(self):
q = """UPDATE %s SET name='%s', count=%s WHERE id=%s""" % (self._table, escape(self.name, "'"), self.count, self.id)
return q
class PlayerActions(StatObject):
# default name of the table for this data object
_table = 'playeractions'
# fields of the table
id = None
player_id = 0
action_id = 0
count = 0
def _insertquery(self):
q = """INSERT INTO %s (player_id, action_id, count) VALUES (%s, %s, %s)""" % (
self._table, self.player_id, self.action_id, self.count)
return q
def _updatequery(self):
q = """UPDATE %s SET player_id=%s, action_id=%s, count=%s WHERE id=%s""" % (
self._table, self.player_id, self.action_id, self.count, self.id)
return q
class BattleStats(StatObject):
# default name of the table for this data object
_table = 'battlestats'
id = 0
map_id = 0
game_type = ''
total_players = 0
start_time = time.time()
end_time = 0
scores = {}
def _insertquery(self):
q = """INSERT INTO %s (map_id, game_type, total_players, start_time, end_time, scores)
VALUES ('%s', %s, %s, %s, %s, '%s')""" % (self._table, self.map_id, self.game_type, self.total_players,
self.start_time, self.end_time, self.scores)
return q
def _updatequery(self):
q = """UPDATE %s SET map_id=%s, game_type='%s', total_players=%s, start_time=%s, end_time=%s,
scores='%s' """ % (self._table, self.map_id, self.game_type, self.total_players, self.start_time,
self.end_time, self.scores)
return q
class PlayerBattles(StatObject):
# default name of the table for this data object
_table = 'playerbattles'
battlestats_id = 0
start_skill = 0
end_skill = 0
kills = 0
teamKills = 0
deaths = 0
assists = 0
actions = 0
weapon_kills = {}
favorite_weapon_id = 0
if __name__ == '__main__':
print '\nThis is version ' + __version__ + ' by ' + __author__ + ' for BigBrotherBot.\n'
"""
Crontab:
* * * * * command to be executed
- - - - -
| | | | |
| | | | +----- day of week (0 - 6) (Sunday=0)
| | | +------- month (1 - 12)
| | +--------- day of month (1 - 31)
| +----------- hour (0 - 23)
+------------- min (0 - 59)
Query:
INSERT INTO xlr_history_weekly (`client_id` , `kills` , `deaths` , `teamkills` , `teamdeaths` , `suicides` , `ratio` , `skill` , `winstreak` , `losestreak` , `rounds`, `year`, `month`, `week`, `day`)
SELECT `client_id` , `kills` , `deaths` , `teamkills` , `teamdeaths` , `suicides` , `ratio` , `skill` , `winstreak` , `losestreak` , `rounds`, YEAR(NOW()), MONTH(NOW()), WEEK(NOW(),3), DAY(NOW())
FROM `xlr_playerstats`
"""
|
[
"dulkith@outlook.com"
] |
dulkith@outlook.com
|
9e3c5cb6071bd3ed4d9e220bfce4edd41a091e6c
|
70814cbebf17866e9b6fddc78017ce2c7197b993
|
/Queue.py
|
10c14b0970579b935124062749bb077d36137bef
|
[] |
no_license
|
SPYMASTER97/Data-structures
|
aeb74885ed77651df8c7100614c3b87c9e162aeb
|
d891766d5afe02194cb91d0c161cfceada82870c
|
refs/heads/main
| 2023-05-10T03:22:06.427087
| 2021-06-01T17:29:16
| 2021-06-01T17:29:16
| 372,480,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 1 22:42:28 2021
@author: saswa
"""
value_queue = []
value_queue.insert(0, 123.12)
value_queue.insert(0, 124)
value_queue.insert(0, 125.45)
print(value_queue)
print(value_queue.pop())
print(value_queue)
print(value_queue.pop())
#%%
from collections import deque
q = deque()
q.appendleft(13)
q.appendleft(525)
q.appendleft(155)
q.appendleft(25)
print(q)
#%%
class Queue:
def __init__(self):
self.buffer = deque()
def enqueue(self, val):
self.buffer.appendleft(val)
def dequeue(self):
return self.buffer.pop()
def is_empty(self):
return len(self.buffer) == 0
def size(self):
return len(self.buffer)
if __name__ == "__main__":
q = Queue()
q.enqueue(5235)
q.enqueue(63563)
q.enqueue(2662)
q.enqueue(262)
print(q.buffer)
print(q.dequeue())
print(q.is_empty())
print(q.size())
|
[
"noreply@github.com"
] |
SPYMASTER97.noreply@github.com
|
b372d4c9833fc5f1c7cca6df9b56d9565e8f9630
|
036a41c913b3a4e7ae265e22a672dd89302d3200
|
/1501-1600/1591/1591_Python_1.py
|
ada38564785473dc032c8f04c794fbcbae65a0f8
|
[] |
no_license
|
ChangxingJiang/LeetCode
|
e76f96ebda68d7ade53575354479cfc33ad4f627
|
a2209206cdd7229dd33e416f611e71a984a8dd9e
|
refs/heads/master
| 2023-04-13T15:23:35.174390
| 2021-04-24T05:54:14
| 2021-04-24T05:54:14
| 272,088,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
import collections
from typing import List
# 检查顺序合法性
class Order:
class Node:
def __init__(self, i):
self.i = i
self.children = []
def __init__(self, n):
self.n = n
self.node_list = [self.Node(i) for i in range(n)]
def add(self, since, to):
if self._has_child(to, since):
return False
else:
self.node_list[since].children.append(self.node_list[to])
return True
def _has_child(self, i, aim):
waiting_nodes = collections.deque([self.node_list[i]])
while waiting_nodes:
node = waiting_nodes.popleft()
for child in node.children:
if child.i == aim:
return True
waiting_nodes.append(child)
return False
class Solution:
def isPrintable(self, targetGrid: List[List[int]]) -> bool:
# 遍历检查所有的颜色及颜色的最上、最下、最左、最右(依次)的位置
# O(N×M)
color_dict = {}
m = len(targetGrid)
n = len(targetGrid[0])
for i in range(m):
for j in range(n):
color = targetGrid[i][j]
if color not in color_dict:
color_dict[color] = [i, i, j, j]
else:
color_dict[color][0] = min(color_dict[color][0], i)
color_dict[color][1] = max(color_dict[color][1], i)
color_dict[color][2] = min(color_dict[color][2], j)
color_dict[color][3] = max(color_dict[color][3], j)
# 逐个检查每个颜色的打印顺序(即该颜色的区间范围内的数已经比它更晚打印)
order_list = set()
for color in color_dict:
position = color_dict[color]
for i in range(position[0], position[1] + 1):
for j in range(position[2], position[3] + 1):
if targetGrid[i][j] != color:
order = (color, targetGrid[i][j])
if order not in order_list:
order_list.add(order)
# print(order_list)
# 检查顺序是否可能实现
order_monitor = Order(61)
for order in order_list:
if not order_monitor.add(order[0], order[1]):
return False
return True
if __name__ == "__main__":
print(Solution().isPrintable(targetGrid=[[1, 1, 1, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]])) # True
print(Solution().isPrintable(targetGrid=[[1, 1, 1, 1], [1, 1, 3, 3], [1, 1, 3, 4], [5, 5, 1, 4]])) # True
print(Solution().isPrintable(targetGrid=[[1, 2, 1], [2, 1, 2], [1, 2, 1]])) # False
print(Solution().isPrintable(targetGrid=[[1, 1, 1], [3, 1, 3]])) # False
print(Solution().isPrintable(targetGrid=[[6, 2, 2, 5], [2, 2, 2, 5], [2, 2, 2, 5], [4, 3, 3, 4]])) # True
|
[
"1278729001@qq.com"
] |
1278729001@qq.com
|
79f5267747a8e2e44aa9ba0bfb254c82189e1944
|
0bda8e47d72c033b5485429e76793f327f7f6faf
|
/rltk/blocking/canopy_block_generator.py
|
5d5d08ea812593f0564f5cb235c8a720f5c89a55
|
[
"MIT"
] |
permissive
|
TaiPhillips/rltk
|
2fbdb35ba851f30785b731b11962aedd7fef52d4
|
f89b0096e94216af93cb3cf37771f6bfc77c9206
|
refs/heads/master
| 2020-09-21T22:26:09.263982
| 2019-05-03T18:24:31
| 2019-05-03T18:24:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,277
|
py
|
import json
import random
from typing import Callable
from rltk.blocking.block_generator import BlockGenerator
from rltk.blocking.block import Block
from rltk.blocking.block_black_list import BlockBlackList
class CanopyBlockGenerator(BlockGenerator):
"""
Canopy based block generator.
Args:
t1 (float): The loose distance.
t2 (float): The tight distance.
distance_metric (Callable): Compute the distance between two vectors return from :meth:`block`.
The signature is `distance(v1: List, v2: List) -> float`
"""
def __init__(self, t1, t2, distance_metric):
if t1 <= t2:
raise ValueError('t1 should be greater than t2')
if t2 <= 0:
raise ValueError('t1 and t2 should greater than 0')
self._t1 = t1
self._t2 = t2
self._distance_metric = distance_metric
def block(self, dataset, function_: Callable = None, property_: str = None,
block: Block = None, block_black_list: BlockBlackList = None, base_on: Block = None):
"""
The return of `property_` or `function_` should be a vector (list).
"""
block = super()._block_args_check(function_, property_, block)
if base_on:
for block_id, dataset_id, record_id in base_on:
if dataset.id == dataset_id:
r = dataset.get_record(record_id)
value = function_(r) if function_ else getattr(r, property_)
if not isinstance(value, list):
raise ValueError('Return of the function or property should be a vector (list)')
value = block_id + '-' + value
k = self._encode_key(value)
if block_black_list and block_black_list.has(k):
continue
block.add(k, dataset.id, r.id)
if block_black_list:
block_black_list.add(k, block)
else:
for r in dataset:
value = function_(r) if function_ else getattr(r, property_)
if not isinstance(value, list):
raise ValueError('Return of the function or property should be a vector (list)')
k = self._encode_key(value)
if block_black_list and block_black_list.has(k):
continue
block.add(k, dataset.id, r.id)
if block_black_list:
block_black_list.add(k, block)
return block
@staticmethod
def _encode_key(obj):
return json.dumps(obj)
@staticmethod
def _decode_key(str_):
return json.loads(str_)
def generate(self, block1: Block, block2: Block, output_block: Block = None):
output_block = BlockGenerator._generate_args_check(output_block)
dataset = []
for key, _ in block1.key_set_adapter:
dataset.append(self._decode_key(key))
for key, _ in block2.key_set_adapter:
dataset.append(self._decode_key(key))
clusters = self._run_canopy_clustering(dataset, self._t1, self._t2, self._distance_metric)
for c in clusters:
for vec in c:
key = self._encode_key(vec)
set_ = block1.get(key)
if set_:
for ds_id, rid in set_:
output_block.add(key, ds_id, rid)
set_ = block2.get(key)
if set_:
for ds_id, rid in set_:
output_block.add(key, ds_id, rid)
return output_block
@staticmethod
def _run_canopy_clustering(dataset, t1, t2, distance_metric):
"""
The algorithm proceeds as follows, using two thresholds t1 (the loose distance) and t2 (the tight distance),
where t1 > t2.
1. Begin with the set of data points to be clustered.
2. Remove a point from the set, beginning a new 'canopy' containing this point.
3. For each point left in the set, assign it to the new canopy \
if its distance to the first point of the canopy is less than the loose distance t1.
4. If the distance of the point is additionally less than the tight distance t2,
remove it from the original set.
5. Repeat from step 2 until there are no more data points in the set to cluster.
"""
canopies = []
while len(dataset) > 0:
center_idx = random.randint(0, len(dataset) - 1)
center_vec = dataset[center_idx]
new_canopy = []
delete_list = []
del dataset[center_idx]
for d_idx in range(len(dataset)):
d = dataset[d_idx]
distance = distance_metric(center_vec, d)
if distance < t1:
new_canopy.append(d)
if distance < t2:
delete_list.append(d_idx)
# delete vector from dataset from backward
for d_idx in sorted(delete_list, reverse=True):
del dataset[d_idx]
new_canopy.append(center_vec) # add center
canopies.append(new_canopy)
return canopies
|
[
"bigyyx@gmail.com"
] |
bigyyx@gmail.com
|
ff939a7d308a8482f2f324f0f13b9406e3dfc016
|
5c82c0c270ad309ccbabeed34f3267939fb4bb42
|
/cart/apps.py
|
0c9081c7250576d0c2b89c78e42a27671671fbc1
|
[] |
no_license
|
EvgenKham/Course_python-django
|
eff07cd8525d412b031e02ff87af9aa5c2a4b5bc
|
10505f724b49750df1c8085e1214f179ef02a501
|
refs/heads/master
| 2020-04-12T14:03:39.628645
| 2019-02-11T12:24:52
| 2019-02-11T12:25:04
| 162,540,696
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
from django.apps import AppConfig
class CatrConfig(AppConfig):
name = 'cart'
|
[
"hamitcevich@gmail.com"
] |
hamitcevich@gmail.com
|
86f609fb77da71f760f594ab622909ac4e3f2270
|
00102344a5242d2399d733916937c4de355bdcc5
|
/shop/urls.py
|
f9f2ab5a08266d0924c19855e6b3eb6120867af0
|
[] |
no_license
|
KyutaeLee/onlineshop2.0
|
07ef51e1f7ef2c04c8963e5269b3f0863855f8f1
|
44b839c8164eab96e413a400d995a44a99f9efd3
|
refs/heads/master
| 2020-03-07T10:08:20.523280
| 2018-04-05T14:48:38
| 2018-04-05T14:48:38
| 127,424,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
from django.urls import path
from .views import *
app_name = 'shop'
urlpatterns = [
path('', product_list, name='product_list'),
path('<str:category_slug>', product_list, name='product_list_by_category'),
]
|
[
"rbxofjqn@gmail.com"
] |
rbxofjqn@gmail.com
|
e3708403a43f2cca36b946a00919482d012dface
|
b0ab693a632eccf390cb6c5f8997823515440b6c
|
/Python/Tasks/anagram.py
|
f4f123e5837a68a3f8b40b57dab5f17006fa859c
|
[] |
no_license
|
CinematicGenius007/HacktoberFest2021-BVP-CG007
|
8a2932442685492f7abea9de63a6d06d14c9c5c7
|
96143d6d7c4e5acf04cd6a4dcd120d491608cf89
|
refs/heads/master
| 2023-08-15T18:31:05.943506
| 2021-10-09T13:21:41
| 2021-10-09T13:21:41
| 414,681,561
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
def anagram(s, ):
string = len(s)
substring = len(t)
# edge conditions
if substring == 0
return True
if substring > string:
return False
# store substring in dictionary diction
substringdiction dict()
for c in t
substringdiction[c] = substringdiction.get(c, 0) + 1
# loop s
for i in range string:
stringdiction = {}
for j in range(i, i + substring):
# this substring !=substringdiction case, break
if s[j] not in substringdiction:
break
# store each substring in dictionary stringdiction
else:
stringdiction[s[j]] = stringdiction.get(s[j], 0) + 1
# stringdiction==substringdiction means one anagram of t is the substring of s, return True
if stringdiction == substringdiction
return True
# not found stringdiction=substringdiction in the loop
return False
print anagram("udacity","ty")
print anagram("udacity"," ")
print anagram("udacity","mnbgfdfdsfsdfgsgdfg12121")
|
[
"sohilkhattar123@gmail.com"
] |
sohilkhattar123@gmail.com
|
60708da1a81492fc5a92b6bd9022513905a696f7
|
53392f0896171e9a3091fa1d6eaf55c41e5fd5b1
|
/yandex/cloud/ai/translate/v2/translation_service_pb2_grpc.py
|
2daf99d88feb2f36ae47e79a8b5778f0e0f812a1
|
[
"MIT"
] |
permissive
|
IIKovalenko/python-sdk
|
7698094ee69bcf9f1ac6bd798244c4f4d843854d
|
980e2c5d848eadb42799132b35a9f58ab7b27157
|
refs/heads/master
| 2020-06-01T09:11:41.031356
| 2019-05-11T14:00:50
| 2019-05-11T14:00:50
| 190,727,136
| 1
| 0
|
MIT
| 2019-06-07T10:39:33
| 2019-06-07T10:39:33
| null |
UTF-8
|
Python
| false
| false
| 3,979
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from yandex.cloud.ai.translate.v2 import translation_service_pb2 as yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2
class TranslationServiceStub(object):
"""A set of methods for the Yandex Translate service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Translate = channel.unary_unary(
'/yandex.cloud.ai.translate.v2.TranslationService/Translate',
request_serializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.TranslateRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.TranslateResponse.FromString,
)
self.DetectLanguage = channel.unary_unary(
'/yandex.cloud.ai.translate.v2.TranslationService/DetectLanguage',
request_serializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.DetectLanguageRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.DetectLanguageResponse.FromString,
)
self.ListLanguages = channel.unary_unary(
'/yandex.cloud.ai.translate.v2.TranslationService/ListLanguages',
request_serializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.ListLanguagesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.ListLanguagesResponse.FromString,
)
class TranslationServiceServicer(object):
"""A set of methods for the Yandex Translate service.
"""
def Translate(self, request, context):
"""Translates the text to the specified language.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DetectLanguage(self, request, context):
"""Detects the language of the text.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListLanguages(self, request, context):
"""Retrieves the list of supported languages.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TranslationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Translate': grpc.unary_unary_rpc_method_handler(
servicer.Translate,
request_deserializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.TranslateRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.TranslateResponse.SerializeToString,
),
'DetectLanguage': grpc.unary_unary_rpc_method_handler(
servicer.DetectLanguage,
request_deserializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.DetectLanguageRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.DetectLanguageResponse.SerializeToString,
),
'ListLanguages': grpc.unary_unary_rpc_method_handler(
servicer.ListLanguages,
request_deserializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.ListLanguagesRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ai_dot_translate_dot_v2_dot_translation__service__pb2.ListLanguagesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.ai.translate.v2.TranslationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"alex@alexkuk.ru"
] |
alex@alexkuk.ru
|
e6208f6111576bb12f7497e3601bdde288165729
|
8a16fc8c45e92bee17dbcf207aced1f63733636a
|
/uas/penghitungan/Pembayaran.py
|
ec6e53b2f8e32045f800885f05c38a9510b0735d
|
[] |
no_license
|
muchsyaifudin/tugas-uas-pemrograman
|
41f8ec06245b11dd31b69cece23e75407d6cb8e9
|
12ccd09ceb863ddce1f91f216d7f9536b6db058e
|
refs/heads/master
| 2020-05-09T17:06:58.347144
| 2019-04-14T14:43:41
| 2019-04-14T14:43:41
| 181,296,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
def pembayaran() :
from texttable import Texttable
table1 = Texttable ()
no1 = 0
jawab = "y"
while(jawab == 'y'):
nama = (input("masukan nama: "))
nim = (input("masukan nim: "))
kelas = (input("masukan kelas: "))
pilih1 = (input("bayar bulanan (y/t): "))
if pilih1 == 'y':
bulanan = 500000
else :
bulanan = 0
pilih2 = (input("bayar uts (y/t): "))
if pilih2 == 'y':
uts = 50000
else :
uts = 0
pilih3 = (input("bayar uas (y/t): "))
if pilih3 == 'y':
uas = 50000
else :
uas = 0
pilih4 = (input("bayar seminar (y/t): "))
if pilih4 == 'y':
seminar = 100000
else :
seminar = 0
pilih5 = (input("bayar kas (y/t): "))
if pilih5 == 'y':
kas = 20000
else :
kas = 0
print("pembayaran admin 5000")
admin = 5000
total = bulanan+uts+uas+seminar+kas+admin
no1 += 1
table1.add_rows([['no','nama','nim','kelas','bulanan','uts','uas','seminar','kas','admin','total'],[no1,nama,nim,kelas,bulanan,uts,uas,seminar,kas,admin,total]])
print (table1.draw())
jawab=input("tambakan pembayaran (y/t): ")
|
[
"noreply@github.com"
] |
muchsyaifudin.noreply@github.com
|
661b8acdb1f0c2c84b60871b0a1990b0525e6c88
|
800d2325996f400101fb1217305a1934cd5657a2
|
/weatherreport/settings/docker.py
|
f9245c443dd287441fffb8788e56bdb450c11fb3
|
[
"MIT"
] |
permissive
|
sarahboyce/weatherreport
|
eebe32562dd15af64a54fca27c0798ab109d6359
|
075b61f775bc2dd9d1d148317707a0684043d66e
|
refs/heads/main
| 2023-04-04T15:58:05.419282
| 2021-01-30T11:17:59
| 2021-01-30T11:17:59
| 333,877,184
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
import os
from weatherreport.settings.common import *
DEBUG = int(os.environ.get("DEBUG", default=0))
STATIC_ROOT = os.path.join(SITE_ROOT, "staticfiles")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
INSTALLED_APPS += ["whitenoise.runserver_nostatic"]
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware")
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("POSTGRES_DB"),
"USER": os.environ.get("POSTGRES_USER"),
"PASSWORD": os.environ.get("POSTGRES_PASSWORD"),
"HOST": os.environ.get("POSTGRES_HOST"),
"PORT": os.environ.get("POSTGRES_PORT"),
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.environ.get("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"CONNECTION_POOL_KWARGS": {"max_connections": 30},
},
"KEY_PREFIX": "prod",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
[
"sarah.boyce@quickrelease.de"
] |
sarah.boyce@quickrelease.de
|
11947359099c737dbd516c6546c6de926e7438ae
|
6f014c26ad9773b71db2c30642d5f65481913f69
|
/blog/views.py
|
12007f631248292eaaf6d27e34b094a2d990ac18
|
[] |
no_license
|
Rubel-Mahmud/BLOG_SITE
|
8f9188b0cd3e622a3bdb100615b1365bfaaff2bd
|
9b39de55d43ce2d7520c2f7dbbea362d34a078a6
|
refs/heads/main
| 2023-02-05T08:34:47.118681
| 2020-12-22T18:41:50
| 2020-12-22T18:41:50
| 322,625,816
| 0
| 0
| null | 2020-12-22T18:41:51
| 2020-12-18T15:00:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,976
|
py
|
from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from blog.models import Post, Comment
from blog.forms import PostForm, CommentForm
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {
'posts':posts
})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
@login_required
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {
'form':form
})
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {
'form':form
})
@login_required
def post_draft_list(request):
posts = Post.objects.filter(published_date__isnull = True).order_by('created_date')
return render(request, 'blog/post_draft_list.html', {
'posts':posts
})
@login_required
def post_publish(request, pk):
post = get_object_or_404(Post, pk=pk)
post.publish()
return redirect('post_detail', pk=pk)
@login_required
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('post_list')
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'blog/add_comment_to_post.html', {'form': form})
@login_required
def comment_approve(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.delete()
return redirect('post_detail', pk=comment.post.pk)
|
[
"rmsoft.com@gmail.com"
] |
rmsoft.com@gmail.com
|
098e7dc48212ce0a6ffe3e4ac15d1b9fffe0fa71
|
7069c4dbc65c88144d2f89bfe433febaf0a57e2a
|
/scripts/make_relative_path_tests.py
|
907388deed2c73f49effc459c87380cdbde01681
|
[
"MIT"
] |
permissive
|
tomcodes/appleseed
|
4d39965f168be1fd8540b635b5f32c2e8da188b6
|
e8ae4823158d7d40beb35c745eb6e9bee164dd2d
|
refs/heads/master
| 2021-01-17T22:45:50.384490
| 2012-01-20T17:54:49
| 2012-01-20T17:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
import os.path
def make_relative_path(file_path, base):
npath = os.path.normpath(os.path.normcase(file_path))
nbase = os.path.normpath(os.path.normcase(base))
if npath.startswith(nbase):
result = npath[len(nbase):]
if result.startswith("/") or result.startswith("\\"):
result = result[1:]
return result
else:
return file_path
def expect(expected, received):
if expected != received:
print("Unit test failed!")
print(" Expected: {0}".format(expected))
print(" Received: {0}".format(received))
def run_tests():
expect("c:\\dir\\file.ext", make_relative_path("c:\\dir\\file.ext", ""))
expect("/dir/file.ext", make_relative_path("/dir/file.ext", ""))
expect("dir\\file.ext", make_relative_path("c:\\dir\\file.ext", "c:\\"))
expect("dir\\file.ext", make_relative_path("/dir/file.ext", "/"))
expect("dir\\file.ext", make_relative_path("c:\\dir\\file.ext", "c:/"))
expect("dir\\file.ext", make_relative_path("/dir/file.ext", "\\"))
expect("file.ext", make_relative_path("c:\\dir\\file.ext", "c:\\dir"))
expect("file.ext", make_relative_path("/dir/file.ext", "/dir"))
expect("file.ext", make_relative_path("c:\\dir\\file.ext", "c:\\dir\\"))
expect("file.ext", make_relative_path("/dir/file.ext", "/dir/"))
expect("c:\\dir\\file.ext", make_relative_path("c:\\dir\\file.ext", "c:\\rep"))
expect("/dir/file.ext", make_relative_path("/dir/file.ext", "/rep"))
run_tests()
|
[
"beaune@aist.enst.fr"
] |
beaune@aist.enst.fr
|
db9bfe7302043aef1f099ce17901999d4ec86593
|
81913b3a18b9ee35205b6af99ee3eb0e730bc646
|
/simulation_code/set_up_parameters_distributed_t_ref.py
|
99f2cdca528a3e41f4af80e624d3723cf3d1fe51
|
[] |
no_license
|
CINPLA/Skaar_et_al_2020_PLoS_Comput_Biol
|
2ccd940b5010426a2e7e6250c4af27db4c91aeaa
|
26a7f93fc1875e086c4d23c3f5d6e54d48451432
|
refs/heads/master
| 2021-03-08T05:16:16.998855
| 2020-05-25T11:13:34
| 2020-05-25T11:13:34
| 246,319,780
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,034
|
py
|
'''Sets up simulation directories and parameters for NEST simulations
including LFP approximations'''
import os
import parameters as ps
import numpy as np
from nest_parameters import get_unique_id, NEST_PSET
if __name__ == '__main__':
## Add the random varying parameters
PSET = ps.ParameterSpace(NEST_PSET)
PSET['eta'] = ps.ParameterRange(np.linspace(0.8, 4.0, 9))
PSET['g'] = ps.ParameterRange(np.linspace(3.5, 8.0, 10))
PSET['J'] = ps.ParameterRange(np.linspace(0.05, 0.4, 8))
PSET['sigma_factor'] = ps.ParameterRange([0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
PSET['simtime'] = 3000.
PSET['tauMem_gaussian'] = False
PSET['delay_gaussian'] = False
PSET['J_gaussian'] = False
PSET['t_ref_gaussian'] = True
PSET['theta_gaussian'] = False
# set up directory structure
savefolder = os.path.join('./lfp_simulations_gaussian_t_ref/')
parameterset_dest = os.path.join(savefolder, 'parameters')
log_dir = os.path.join(savefolder, 'logs')
nest_jobscript_dest = os.path.join(savefolder, 'nest_jobs')
nest_output = os.path.join(savefolder, 'nest_output')
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
if not os.path.isdir(parameterset_dest):
os.mkdir(parameterset_dest)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
if not os.path.isdir(nest_output):
os.mkdir(nest_output)
print('Start parameter iteration')
for i, paramset in enumerate(PSET.iter_inner()):
# unique id for each parameter set, constructed from the parameset dict
# converted to a sorted list of tuples
paramset = paramset.as_dict()
paramset.update({'nest_seed': paramset['nest_seed'] + i})
paramset.update({'numpy_seed': paramset['numpy_seed'] + i})
paramset.update({'random_seed': paramset['random_seed'] + i})
ps_id = get_unique_id(paramset)
print(ps_id)
## Add parameters to string listing all process IDs by parameters
with open(os.path.join(savefolder, 'id_parameters.txt'), 'a') as f:
f.write(ps_id + '\n')
f.write('%.3f, %.3f, %.3f, %.3f'%(paramset['eta'], paramset['g'], paramset['J'], paramset['sigma_factor']) + '\n')
# put output_path into dictionary, as we now have a unique ID of
# though this will not affect the parameter space object PS
spike_output_path = os.path.join(nest_output, ps_id)
if not os.path.isdir(spike_output_path):
os.mkdir(spike_output_path)
paramset.update({
'ps_id': ps_id,
'spike_output_path': spike_output_path,
'savefolder': savefolder
})
# write using ps.ParemeterSet native format
parameterset_file = os.path.join(parameterset_dest, '{}.pset'.format(ps_id))
ps.ParameterSet(paramset).save(url=parameterset_file)
# specify where to save output and errors
nest_output_file = os.path.join(log_dir, ps_id + '.txt')
|
[
"jewskaar@gmail.com"
] |
jewskaar@gmail.com
|
675c1ad677c8def9f95919a0d25245f256705d78
|
6899130a29e498405c4594727cb9fdc944ea04f0
|
/madlibs.py
|
8a58569256c7336eafaee9999e243ee81d96b59c
|
[] |
no_license
|
teganbroderick/madlibs
|
fbc2a16b27f3ecec8c9aca39717c6104fcdb86bf
|
871c08d1a09d05a305e6ccb749ea1f0502e7e32b
|
refs/heads/master
| 2021-06-27T11:10:33.219523
| 2019-10-17T00:55:45
| 2019-10-17T00:55:45
| 215,620,911
| 0
| 0
| null | 2021-03-20T01:55:43
| 2019-10-16T18:49:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,314
|
py
|
"""A madlib game that compliments its users."""
from random import choice
from flask import Flask, render_template, request
# "__name__" is a special Python variable for the name of the current module.
# Flask wants to know this to know what any imported things are relative to.
app = Flask(__name__)
AWESOMENESS = [
'awesome', 'terrific', 'fantastic', 'neato', 'fantabulous', 'wowza',
'oh-so-not-meh', 'brilliant', 'ducky', 'coolio', 'incredible', 'wonderful',
'smashing', 'lovely',
]
@app.route('/')
def start_here():
"""Display homepage."""
return "Hi! This is the home page."
@app.route('/hello')
def say_hello():
"""Say hello to user."""
return render_template("hello.html")
@app.route('/greet')
def greet_person():
"""Greet user with compliment."""
player = request.args.get("person")
compliment = choice(AWESOMENESS)
return render_template("compliment.html",
person=player,
compliment=compliment)
@app.route('/game')
def show_madlib_form():
""" Show madlib form"""
yesno = request.args.get("yesno")
if yesno == "yes":
return render_template("game.html")
else:
return render_template("goodbye.html")
@app.route('/madlib')
def show_madlib():
"""Show madlib output"""
person = request.args.get("person")
color = request.args.get("color")
noun = request.args.get("noun")
adjective = request.args.get("adjective")
animal = request.args.get("animal")
country = request.args.get("country")
return render_template("madlib.html",
person=person,
color=color,
noun=noun,
adjective=adjective,
animal=animal,
country=country)
@app.route('/continue')
def continue_game():
"""continue game if input is yes"""
continue_game = request.args.get("continue")
if continue_game == "yes":
return render_template("game.html")
else:
return render_template("goodbye.html")
if __name__ == '__main__':
# Setting debug=True gives us error messages in the browser and also
# "reloads" our web app if we change the code.
app.run(debug=True)
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
33c5995e21277b8da0e2948f6352d5b1ee5dd530
|
1aee624ea66bb8da8a165325a69781953f43d969
|
/build/common_msgs/stereo_msgs/catkin_generated/pkg.installspace.context.pc.py
|
fca3d434222dac757de8791e56b26d8b346d024f
|
[] |
no_license
|
ravarmaa/robotex
|
fa7cd98df1ef0902ff8b42f66b868d84ee87284d
|
db58c28d3003451186a06c1d331aecbac8414f92
|
refs/heads/master
| 2020-03-28T10:41:36.871241
| 2018-09-10T15:00:48
| 2018-09-10T15:00:48
| 148,134,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/randalf/robotex/install/include".split(';') if "/home/randalf/robotex/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;sensor_msgs;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "stereo_msgs"
PROJECT_SPACE_DIR = "/home/randalf/robotex/install"
PROJECT_VERSION = "1.12.6"
|
[
"avarmaarando@.com"
] |
avarmaarando@.com
|
75339ed792b3e57c3e7a15c717224dfb8695b42e
|
053b436a21874cb35d5376f7c45d046d80e08e04
|
/Chapter 9 Excercise 3.py
|
57468ae0ecd69877468f186d4c4e773b23b9dbb1
|
[] |
no_license
|
braeden-smith/Chapter-8-9-10
|
4b34f813e1c6f8fc0c7ca1c87062d96e4c851dc9
|
c8d575dcc941f8dd121074096a37d0966e52fc33
|
refs/heads/master
| 2021-05-02T09:15:24.292906
| 2018-02-09T18:05:35
| 2018-02-09T18:05:35
| 120,820,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
#Braeden Smith Chapter 9 Excercise 3 2/5/18
# Exercise 3
# Write a program to read through a mail log, build a histogram using a dictionary to count
# how many messages have come from each email address, and print the dictionary.
email_sender = {}
messages = []
with open('mboxshort.txt') as f:
for line in f:
messages = line.split()
if len(messages) > 3 and line.startswith('From'):
address = messages[1]
if address not in email_sender:
email_sender[address] = 1
else:
email_sender[address] += 1
print(email_sender)
|
[
"noreply@github.com"
] |
braeden-smith.noreply@github.com
|
07c417be70d57393ffb81b79c8364adb9e8259a6
|
06945d7581bee2baa5a5a727f6d3e32001671cfd
|
/eval_trades.py
|
fc63a6cce0382b28a04cff4b5109522f0aae274e
|
[] |
no_license
|
Asber777/lafeat
|
1342304730db66b58a53c5c89a625516881f2577
|
13017aef299a92d7ec64da8d9287058def4d343c
|
refs/heads/master
| 2023-06-04T23:27:20.484390
| 2021-06-23T03:46:07
| 2021-06-23T03:46:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,344
|
py
|
"""
CIFAR-10 evaluation script from TRADES.
Reference:
https://github.com/yaodongyu/TRADES/blob/master/evaluate_attack_cifar10.py
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
import torch.optim as optim
from torchvision import datasets, transforms
from wideresnet import *
import numpy as np
parser = argparse.ArgumentParser(description='PyTorch CIFAR Attack Evaluation')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epsilon', default=0.031,
help='perturbation')
parser.add_argument('--model-path',
default='./models/model_cifar_wrn.pt',
help='model for white-box attack evaluation')
parser.add_argument('--data-attak-path',
default='./attacks/cifar10_X_adv.npy',
help='adversarial data for white-box attack evaluation')
parser.add_argument('--data-path',
default='./attacks/cifar10_X.npy',
help='data for white-box attack evaluation')
parser.add_argument('--target-path',
default='./attacks/cifar10_Y.npy',
help='target for white-box attack evaluation')
args = parser.parse_args()
# settings
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
def image_check(min_delta, max_delta, min_image_adv, max_image_adv):
valid = 1.0
if min_delta < - args.epsilon:
print(f'{min_delta} < -{args.epsilon}')
valid -= 2.0
elif max_delta > args.epsilon:
print(f'{max_delta} > {args.epsilon}')
valid -= 2.0
elif min_image_adv < 0.0:
print(f'{min_image_adv} < 0')
valid -= 2.0
elif max_image_adv > 1.0:
print(f'{max_image_adv} > 1')
valid -= 2.0
if valid > 0.0:
return True
else:
return False
def eval_adv_test_whitebox(model, device, X_adv_data, X_data, Y_data):
"""
evaluate model by white-box attack
"""
model.eval()
robust_err_total = 0
with torch.no_grad():
for idx in range(len(Y_data)):
# load original image
image = np.array(np.expand_dims(X_data[idx], axis=0), dtype=np.float32)
image = np.transpose(image, (0, 3, 1, 2))
# load adversarial image
image_adv = np.array(np.expand_dims(X_adv_data[idx], axis=0), dtype=np.float32)
image_adv = np.transpose(image_adv, (0, 3, 1, 2))
# load label
label = np.array(Y_data[idx], dtype=np.int64)
# check bound
image_delta = image_adv - image
min_delta, max_delta = image_delta.min(), image_delta.max()
min_image_adv, max_image_adv = image_adv.min(), image_adv.max()
valid = image_check(min_delta, max_delta, min_image_adv, max_image_adv)
if not valid:
print('not valid adversarial image')
break
# transform to torch.tensor
data_adv = torch.from_numpy(image_adv).to(device)
target = torch.from_numpy(label).to(device)
# evluation
X, y = Variable(data_adv, requires_grad=True), Variable(target)
out = model(X)
err_robust = (out.data.max(1)[1] != y.data).float().sum()
robust_err_total += err_robust
print(
f'{1 - robust_err_total / (idx + 1):.2%} '
f'({idx + 1 - int(robust_err_total)} / {idx + 1})')
if not valid:
print('not valid adversarial image')
else:
print('robust_err_total: ', robust_err_total * 1.0 / len(Y_data))
def main():
# white-box attack
# load model
model = WideResNet().to(device)
model.load_state_dict(torch.load(args.model_path, map_location=device))
# load data
X_adv_data = np.load(args.data_attak_path)
X_data = np.load(args.data_path)
Y_data = np.load(args.target_path)
eval_adv_test_whitebox(model, device, X_adv_data, X_data, Y_data)
if __name__ == '__main__':
main()
|
[
"gxtfmx@gmail.com"
] |
gxtfmx@gmail.com
|
f4096be78fc2a68ade4f26c201e6170b5b55ed3b
|
38231aa5e30143795bdb40ea7436b6dcdea837dc
|
/FirstFunction.py
|
4793c66ba6320562437afb3d1c16158c2cafabe8
|
[] |
no_license
|
DamonBritt/Cognixia_Python
|
34aa787b7792d46fdf0e12dec7ff0f8f5e183492
|
a7057311983b8439c1a2527d35ccce50b681cbae
|
refs/heads/main
| 2023-03-08T02:53:41.014550
| 2021-02-24T18:22:00
| 2021-02-24T18:22:00
| 341,976,185
| 0
| 5
| null | 2021-02-24T18:05:31
| 2021-02-24T17:15:42
|
Python
|
UTF-8
|
Python
| false
| false
| 88
|
py
|
def weird_arithmetic(x, y, z):
print((x**x + y**z) // z)
weird_arithmetic(5, 6, 7)
|
[
"damonbritt87@yahoo.com"
] |
damonbritt87@yahoo.com
|
79000db8b56b251b19090baf05ee17dc21f7920c
|
de0ae900ba8b423064b6a4b978a654752d3e7b74
|
/PracticeExam/P4/hash_map.py
|
3a021f1ceb2d63d3e065425992127c2b112f9356
|
[
"MIT"
] |
permissive
|
GudniNathan/SC-T-201-GSKI
|
dab2e59cc0fe50f4dd7797b42748006d97e0ac53
|
1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2
|
refs/heads/master
| 2022-12-12T02:01:06.127226
| 2019-04-18T22:01:18
| 2019-04-18T22:01:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
from bucket import *
class HashMap:
def __init__(self, *args, **kwargs):
self.buckets = [Bucket() for i in range(16)]
self.size = 0
def _get_bucket(self, key):
return self.buckets[hash(key) % 16]
def __getitem__(self, key):
try:
return self._get_bucket(key).find(key)
except NotFoundException:
return None
def __setitem__(self, key, value):
bucket = self._get_bucket(key)
try:
bucket.insert(key, value)
self.size += 1
except ItemExistsException:
bucket.update(key, value)
def __delitem__(self, key):
try:
self._get_bucket(key).remove(key)
self.size -= 1
except NotFoundException:
pass
def __len__(self):
return self.size
if __name__ == "__main__":
print("\nTESTING HASHMAP - MAKE BETTER TESTS!!")
m = HashMap()
m[3] = "Value for key: 3"
m[6] = "Value for key: 6"
m[2] = "Value for key: 2"
print("")
print(str(m[2]))
print(str(m[3]))
print(str(m[4]))
print(str(m[5]))
print(str(m[6]))
print("Size of collection: " + str(len(m)))
del m[3]
print("")
print(str(m[2]))
print(str(m[3]))
print(str(m[4]))
print(str(m[5]))
print(str(m[6]))
print("Size of collection: " + str(len(m)))
del m[4]
print("")
print(str(m[2]))
print(str(m[3]))
print(str(m[4]))
print(str(m[5]))
print(str(m[6]))
print("Size of collection: " + str(len(m)))
|
[
"1493259+GudniNatan@users.noreply.github.com"
] |
1493259+GudniNatan@users.noreply.github.com
|
2e0db497cc97e4d00634767b6ec807cb485863ac
|
e528ccd4baf8826339c19b3c3938ef2c389ff628
|
/django_mako_plus/router/router_exception.py
|
ce8baf8f37a1a4e434492848df363ea8f657738d
|
[
"Apache-2.0"
] |
permissive
|
knowsuchagency/django-mako-plus
|
02665ab02f0fed86d47354bcd38b702f5b4dbf69
|
e737be6a2db6e9e897cc804c660494415c4ea180
|
refs/heads/master
| 2021-01-20T03:11:30.194686
| 2018-05-10T23:53:24
| 2018-05-10T23:53:24
| 132,975,292
| 0
| 0
|
Apache-2.0
| 2018-05-11T01:53:22
| 2018-05-11T01:53:22
| null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
from django.http import Http404
from .base import Router
class RegistryExceptionRouter(Router):
'''Router for a registry exception (i.e. view not found).'''
def __init__(self, exc):
self.exc = exc
def get_response(self, request, *args, **kwargs):
raise Http404(str(self.exc))
def message(self, request, descriptive=True):
if descriptive:
return 'RegistryExceptionRouter: {}'.format(self.exc)
return str(self.exc)
|
[
"doconix@gmail.com"
] |
doconix@gmail.com
|
cdbe0603d2755f3c3409875ec142445f54691589
|
a1b1b2f573cedf34148694b30ff496b3cb9fc3ab
|
/watcher.py
|
48fdded1f8f5426170d6f7007555c4f2be389000
|
[] |
no_license
|
g4rcez/Watcher
|
d4b13647e39ee2557f0b7e0b191eda42761c598d
|
e0fff515f0cae5dcb94e71b736ed4592dde83e67
|
refs/heads/master
| 2021-06-26T21:33:27.937680
| 2017-09-12T00:41:09
| 2017-09-12T00:41:09
| 103,200,124
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
#!/usr/bin/python3
import hashlib
from src.Message import Message
from src.Connection import Connection
from src.CredentialsAndDirectory import CredentialsAndDirectory
def banner():
print("""
██╗ ██╗ █████╗ ████████╗ ██████╗██╗ ██╗███████╗██████╗
██║ ██║██╔══██╗╚══██╔══╝██╔════╝██║ ██║██╔════╝██╔══██╗
██║ █╗ ██║███████║ ██║ ██║ ███████║█████╗ ██████╔╝
██║███╗██║██╔══██║ ██║ ██║ ██╔══██║██╔══╝ ██╔══██╗
╚███╔███╔╝██║ ██║ ██║ ╚██████╗██║ ██║███████╗██║ ██║
╚══╝╚══╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
""")
banner()
notifications = Message(open('message.json', 'r').read())
manipulate = CredentialsAndDirectory()
# if not manipulate.status_configuration():
# exit()
manipulate.set_all_directories()
manipulate.set_all_files()
print(notifications.get_message('dirs') + str(len(manipulate.get_all_directories())))
print(notifications.get_message('files') + str(len(manipulate.get_all_files())))
connection = Connection(
manipulate.get_user(), manipulate.get_password(),
manipulate.get_server(), manipulate.get_port()
)
for file in manipulate.get_all_files():
local_hash = hashlib.md5(open(file, 'rb').read()).hexdigest()
file_server = file.replace(manipulate.get_directory(),
manipulate.directory_server() + '/')
signal = "md5sum " + file_server + "| cut -d ' ' -f1"
hash_server = connection.command(signal).decode().replace('\n', '')
if hash_server != local_hash:
connection.put(file, file_server)
print('[!] ' + notifications.get_message('thefile') + file_server + notifications.get_message('rescue') + file)
connection.close()
|
[
"allan.f.garcez@gmail.com"
] |
allan.f.garcez@gmail.com
|
1ed0266f8e0f5e0e603d2b6d4907210233a0df82
|
0723929f8de0448cc9918b0ab8c43b990cb4199a
|
/meiduo_mall/meiduo_mall/settings/prod.py
|
a30ddfcc8a527c22f366bb3e7f32ecbfb2cb03c5
|
[] |
no_license
|
Jumsion05/web_meiduo
|
31bb8e573b5e616fdcc40c4b64e977f3e4f47855
|
9762f0c5d61e8cd1ed6cff9283762d8158c93630
|
refs/heads/master
| 2020-04-12T22:32:59.707625
| 2019-01-15T16:05:58
| 2019-01-15T16:05:58
| 162,715,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
"""
Django settings for meiduo_mall project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
# 生产环境下的
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_#!7g1x7^+mhv6g)s)3#9!ts2qye(2dj)6-qt(p1y_r+zndt8k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduo_mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'meiduo_mall/../templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduo_mall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../../db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"871888414@qq.com"
] |
871888414@qq.com
|
971885ac68bdb16f9a9cd430074db4a5d4cbaee8
|
923bbbcbc6aaada1ddad8b3c006aea0d9038ccb6
|
/Sort_BinarySearch.py
|
e3542911b6dbcaf9f84e9217787acd2d29c0e560
|
[] |
no_license
|
chenfpp/leetcode-exercise
|
b14229877e6b9b6120a1fa00903513f0c2f8a9ad
|
5b9ac601efdccfbd6f89db1ca56cc6bbff0efb32
|
refs/heads/master
| 2020-06-29T10:24:48.162743
| 2019-08-12T15:23:07
| 2019-08-12T15:23:07
| 200,510,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
# 滑动窗口最大值
def get_sliding_window_max(nums, k):
length = len(nums)
result = []
for i in range(length):
temp = []
for j in range(k):
if i + k - 1 < length:
temp.append(nums[i + j])
else:
break
if len(temp) > 0:
result.append(max(temp))
return result
if __name__ == "__main__":
nums = [1, 2, 3, 2, 5, 4, 6, 7]
k = 3
print(get_sliding_window_max(nums, k))
|
[
"noreply@github.com"
] |
chenfpp.noreply@github.com
|
aba907056cfe72fefb4e7c3884b2a31848de692a
|
8a2fb0f7a01c2b198ed1c8e650878297855d4e18
|
/app/admin/roles.py
|
fbff99bec28df6235f4556328fd0e1139c97ef21
|
[] |
no_license
|
best-upm/IBST
|
b7d8d32e8598ede95925301dbd4b30be6cb1b6b3
|
b5a21621759e3d2729b20cb0b3cf30357cf4a495
|
refs/heads/master
| 2022-12-09T09:40:33.141220
| 2020-05-25T13:34:43
| 2020-05-25T13:34:43
| 246,377,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
from app import db
from flask import render_template
from flask_login import login_required, current_user
from app.admin import bp
@bp.route('/role', methods=['GET'])
@login_required
def role():
title='Roles'
return render_template('roles-archive.html', title=title)
|
[
"lukasgdanietz@gmail.com"
] |
lukasgdanietz@gmail.com
|
199056dacd5bd9433faeeb41a3179a9a69da101c
|
c7b2f599de5bc85690a2e978cc0ba1df7802d4fa
|
/dice.py
|
3c62c922f911e00f93b24881b0a62a1ee5212f94
|
[] |
no_license
|
meettaraviya/Mine-search-AI
|
8416265396ab17eaf52662820157f86ec1a1560d
|
1169b721030f12edc06de71542de70eff07064bc
|
refs/heads/master
| 2022-11-20T22:51:14.065061
| 2020-06-18T06:35:17
| 2020-06-18T06:35:17
| 257,130,486
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,521
|
py
|
from minesweeper import MineSweeper, WindowsMineSweeper
from player import GreedyPlayer, OptimizedGreedyPlayer
import numpy as np
import argparse
import os
import time
helper_funcs_f = """
fun sum1(a: bool) { if a then int(INT_MAX, 1) else int(INT_MAX, 0) }
fun sum2(a: bool, b: bool) { sum1(a) + sum1(b) }
fun sum3(a: bool, b: bool, c: bool) { sum1(a) + sum2(b, c) }
fun sum4(a: bool, b: bool, c: bool, d: bool) { sum2(a, b) + sum2(c, d) }
fun sum5(a: bool, b: bool, c: bool, d: bool, e: bool) { sum2(a, b) + sum3(c, d, e) }
fun sum6(a: bool, b: bool, c: bool, d: bool, e: bool, f: bool) { sum3(a, b, c) + sum3(d, e, f) }
fun sum7(a: bool, b: bool, c: bool, d: bool, e: bool, f: bool, g: bool) { sum3(a, b, c) + sum4(d, e, f, g) }
fun sum8(a: bool, b: bool, c: bool, d: bool, e: bool, f: bool, g: bool, h: bool) { sum4(a, b, c, d) + sum4(e, f, g, h) }
"""
def to_program_dice(ms: MineSweeper, outvar: str, outfile:str = None):
INT_MAX = ms.N + 2
program = helper_funcs_f.replace("INT_MAX", str(INT_MAX))
# var_names = ""
prev = None
# for i in range(ms.H):
# for j in range(ms.W):
# program += f"let x_{i}_{j} = flip(0.5) in\n"
for i in range(ms.H):
for j in range(ms.W):
program += f"let x_{i}_{j} = flip(0.5) in\n"
if prev is None:
program += f"let count_{i}_{j} = if x_{i}_{j} then int({INT_MAX}, 1) else int({INT_MAX}, 0) in\n"
else:
program += f"let count_{i}_{j} = if x_{i}_{j} then count_{prev[0]}_{prev[1]} + int({INT_MAX}, 1) else count_{prev[0]}_{prev[1]} in\n"
program += f"let _ = observe(count_{i}_{j} != int({INT_MAX}, {INT_MAX-1})) in\n"
prev = i, j
for i in range(ms.H):
for j in range(ms.W):
if ms.revealed[i, j]:
nbrs = ms.neighbors(i, j)
func = f"sum{len(nbrs)}"
func_args = ", ".join([f"x_{ni}_{nj}" for ni, nj in nbrs])
count = int(ms.get(i, j))
program += f"let _ = observe({func}({func_args}) == int({INT_MAX}, {count})) in\n"
program += f"let _ = observe(!x_{i}_{j}) in\n"
program += f"let _ = observe(count_{ms.H-1}_{ms.W-1} == int({INT_MAX}, {ms.N})) in\n"
program += f"{outvar}\n"
if outfile is None:
print(program)
else:
with open(outfile, 'w') as out:
out.write(program)
call_id_dict = {}
def to_probs_dice(ms: MineSweeper):
call_id = call_id_dict.get(ms.seed, 0)
probs = np.zeros((ms.H, ms.W))
print()
for i in range(ms.H):
for j in range(ms.W):
# print(" . ", end="")
if not ms.revealed[i, j]:
fieldfile = f"programs/dice/ms_{ms.seed}_{call_id}.ml"
codefile = f"programs/dice/ms_{ms.seed}_{call_id}.ml"
to_program_dice(ms, f"x_{i}_{j}", outfile=codefile)
output = os.popen(f"bin/Dice.native {codefile}").read()
try:
prob = float(output.split("\n")[1].split("\t")[1])
except Exception:
print(f"Cannot parse output of dice when run on {codefile}.")
exit(1)
probs[i, j] = prob
call_id += 1
# print()
call_id_dict[ms.seed] = call_id
return probs # return probabilities of having a mine
call_id_dict = {}
def to_probs_dice_optimized(ms: MineSweeper):
INT_MAX = ms.N + 2
program = helper_funcs_f.replace("INT_MAX", str(INT_MAX))
# var_names = ""
prev = None
program += f"let count = int({INT_MAX}, 0) in\n"
for i in range(ms.H):
for j in range(ms.W):
program += f"let x_{i}_{j} = flip(0.5) in\n"
for i in range(ms.H):
for j in range(ms.W):
# program += f"let x_{i}_{j} = flip(0.5) in\n"
program += f"let count = if x_{i}_{j} then count + int({INT_MAX}, 1) else count in\n"
program += f"let _ = observe(count != int({INT_MAX}, {INT_MAX-1})) in\n"
prev = i, j
for i in range(ms.H):
for j in range(ms.W):
if ms.revealed[i, j]:
nbrs = ms.neighbors(i, j)
func = f"sum{len(nbrs)}"
func_args = ", ".join([f"x_{ni}_{nj}" for ni, nj in nbrs])
count = int(ms.get(i, j))
program += f"let _ = observe({func}({func_args}) == int({INT_MAX}, {count})) in\n"
program += f"let _ = observe(!x_{i}_{j}) in\n"
program += f"let _ = observe(count == int({INT_MAX}, {ms.N})) in\n"
outvar = ""
for i in range(ms.H):
for j in range(ms.W):
if outvar == "":
outvar = "x_0_0"
else:
outvar = f"({outvar}, x_{i}_{j})"
program += f"{outvar}\n"
call_id = call_id_dict.get(ms.seed, 0)
codefile = f"programs/dice/ms_{ms.seed}_{call_id}.ml"
call_id_dict[ms.seed] = call_id + 1
with open(codefile, 'w') as out:
out.write(program)
try:
output = os.popen(f"bin/Dice2.native -skip-table -show-marginals {codefile}").read()
probs = [float(w.split("\t")[1]) for w in output.split("\n")[2::4]]
probs = np.array(probs).reshape(ms.H, ms.W)
return probs
except Exception:
print(f"Cannot parse output of dice when run on {codefile}.")
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Solve MineSweeper using DICE.')
parser.add_argument('mine_count', metavar='N', type=int, help='Number of mines.')
parser.add_argument('width', metavar='W', type=int, help='Minefield width.')
parser.add_argument('height', metavar='H', type=int, help='Minefield height.')
parser.add_argument('--game_count', metavar='G', type=int, help='Number of games.', default=1)
parser.add_argument('--player', type=str, help="Player's algorithm.", choices=["greedy", "optimized_greedy"], default="optimized_greedy")
parser.add_argument('--seed', type=int, help="Seed for RNG.", default=np.random.randint(100))
parser.add_argument('--variant', type=str, help="Game variant.", default="simple", choices=["windows", "simple"])
args = parser.parse_args()
N, H, W = args.mine_count, args.height, args.width
# Player = globals()[args.player]
if args.player == "greedy":
Player = GreedyPlayer
elif args.player == "optimized_greedy":
Player = OptimizedGreedyPlayer
if args.variant == "simple":
Variant = MineSweeper
elif args.variant == "windows":
Variant = WindowsMineSweeper
player = Player(to_probs_dice)
n_won = 0
np.random.seed(args.seed)
scores = []
start_time = time.time()
for game_id in range(args.game_count):
print(f"\n-"+"------"*W+"\n")
print(f"GAME #{game_id+1}")
print(f"\n-"+"------"*W+"\n")
ms = Variant(N, H, W)
# ms = MineSweeper(N, H, W)
result, score = player.play(ms, debug=True)
n_won += result
scores.append(score)
end_time = time.time()
print(f"\n-"+"------"*W+"\n")
print(f"\nAI won {n_won}/{args.game_count} games.")
print(f"\nAverage score: {np.mean(scores):.2f}.")
print(f"\nAverage time taken: {(end_time - start_time)/args.game_count} seconds.")
|
[
"meet.taraviya@cs.ucla.edu"
] |
meet.taraviya@cs.ucla.edu
|
0b4b089cb2421dbf051adbae3a366054e449ef04
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_135/ch134_2020_04_01_11_47_07_593166.py
|
404e99e3745e5ecb39c09e326bee9e7237c916e0
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
def verifica_quadrado_perfeito(n):
m = n
i = 0
while m => 0
m = m - i
i = i + 2
m = m * (-1)
m = m**2
if m == n
return True
else:
return False
|
[
"you@example.com"
] |
you@example.com
|
59e66ba132f900a8328c8b884f9778b786e4bf89
|
c3261fb977ae26d7a40e619dace8fa06df738882
|
/src/nlpia/book/examples/ch09_imdb_sentiment_lstm_v1.py
|
06e9b981abc2962a84f747c3b90f970c2140b9a1
|
[
"MIT"
] |
permissive
|
totalgood/nlpia3
|
8dc471eae184d30077506339bfa99b0f42ac7082
|
c5898e010b04aff72215ba8549859703857698fb
|
refs/heads/master
| 2022-11-05T14:57:40.082068
| 2019-10-14T19:38:30
| 2019-10-14T19:38:30
| 193,171,162
| 5
| 0
|
MIT
| 2022-11-01T01:05:46
| 2019-06-21T23:36:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
# coding: utf-8
# In[1]:
import keras
# In[1]:
'''Trains a LSTM on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF + LogReg.
Notes:
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.
- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding
from keras.layers import LSTM
from keras.datasets import imdb
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
|
[
"hobs+github@totalgood.com"
] |
hobs+github@totalgood.com
|
f3d82a111f84cead46652286a4ffcd99a2af1ffe
|
3ec92f3e7f037d02af74f79b0e0f5f3fe6a25863
|
/Code/MergePredictions.py
|
db1607a5675fae1bae76d33eddd6566ae41175b8
|
[] |
no_license
|
muditjai/CMAP
|
26a2eefbdea3ef7da11e2e09bfc4bd00442670f6
|
343a924f9bf04ba3feae62445a881088325a5bc2
|
refs/heads/master
| 2021-01-20T20:28:56.536969
| 2016-07-21T20:39:45
| 2016-07-21T20:39:45
| 63,392,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,664
|
py
|
"""
1. Take 10 optional inputs
2. Find the available predictions from headers
3. Construct final prediction file with above predictions and rest as 1
4. convert this to exe
"""
import numpy as np
from typing import Tuple, Dict
import argparse
# TODO Use numRows=1650 for final test, and 1000 for local test
# Arguments - --geneStartId 971 --geneEndId 12320 --numRows 1650,1000 --genFullFile False --inputFileList MergePredictionData\Gene971.txt,MergePredictionData\Gene971.txt,MergePredictionData\Gene971.txt --outputFile MergePredictionData\Gene971_972_973.txt --outputFileColFormat MergePredictionData\Gene971_972_973_col.txt
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--geneStartId", required=True, type=int, nargs='?', default=971)
parser.add_argument("--geneEndId", required=True, type=int, nargs='?', default=12320)
parser.add_argument("--numRows", required=True, type=int, nargs='?', default=1650)
parser.add_argument("--genFullFile", required=True, nargs='?')
parser.add_argument("--inputFileList", required=True, nargs='?', default="")
parser.add_argument("--outputFile", required=True, nargs='?', default="")
parser.add_argument("--outputFileColFormat", required=True, nargs='?', default="")
args = parser.parse_args()
gene_start = args.geneStartId
gene_end = args.geneEndId
num_rows = args.numRows
gen_full_file = (args.genFullFile == "True")
output_file = args.outputFile
output_file_col_format = args.outputFileColFormat
# Get and read all input files
input_file_list = args.inputFileList.split(",", )
input_file_list = list(filter(None, input_file_list))
file_content_dict = {} # type: Dict[str, np.ndarray]
geneid_filename_dict = {} # type: Dict[int, Tuple[str,int]]
for file_name in input_file_list:
with open(file_name) as fp:
# Parse header lines
header = fp.readline()
genes_in_file = header.split(sep=",")
gene_col_idx = 0
# Build a list of tuples (geneid, filename). Detect duplicate geneid
for gene_header in genes_in_file:
geneid = int(gene_header.split("_")[1])
if geneid in geneid_filename_dict:
print("**Duplicate gene found. Geneid - {0} in both \nFile1-{1}\nFile2-{2}\nExiting.".format(
geneid, geneid_filename_dict[geneid][0], file_name))
exit()
geneid_filename_dict[geneid] = (file_name, gene_col_idx)
gene_col_idx += 1
# Read all file contents into dict
file_data_arr = np.loadtxt(file_name, delimiter=",", skiprows=1)
file_content_dict[file_name] = file_data_arr.reshape(num_rows, -1)
# Output to console the source of genes. Output continuous sources as 1 range.
# Merge the genes in sequence. Use valid gene predictions from files. Rest keep as 1
output_prediction = np.asarray([])
output_header = ""
prev_match_file = ""
for geneid in range(gene_start, gene_end + 1):
pred_list = []
match_file = ""
if geneid in geneid_filename_dict:
match_file = geneid_filename_dict[geneid][0]
match_file_col_idx = geneid_filename_dict[geneid][1]
pred_list = (file_content_dict[match_file])[:, [match_file_col_idx]]
elif gen_full_file: # if full file requested, generate all 1s
pred_list = np.ones([num_rows, 1])
# Print gene source status
if match_file != prev_match_file:
if match_file == "":
print("Gene_{0} generated as all 1 - IsFinalGen-{1}".format(geneid, gen_full_file))
else:
print("Gene_{0} from file- {1}".format(geneid, match_file))
prev_match_file = match_file
if len(pred_list) != 0:
output_header += ",Gene_" + str(geneid)
if (len(output_prediction)) == 0:
output_prediction = pred_list
else:
output_prediction = np.hstack((output_prediction, pred_list))
print("GeneId final value Gene_{0}".format(geneid))
output_header = output_header.strip(',')
# Print the shape of merge. Transpose and do file out.
print("\nOutput Header - " + output_header)
print("Output transpose no header shape-{0}".format(output_prediction.shape))
np.savetxt(output_file_col_format, output_prediction, fmt="%.2f", delimiter=",", header=output_header, comments="")
np.savetxt(output_file, output_prediction.transpose(), fmt="%.2f", delimiter=",", header="", comments="")
|
[
"muditjai@gmail.com"
] |
muditjai@gmail.com
|
197cf6bcbab5b706dca85de94314294cee9862c1
|
db459d978578e726c961fe5423d9495490a8c4c1
|
/tests/unit/test_aws.py
|
4b8f70cc9b81588223eaa0775221aacf963c6a01
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
KOSASIH/openeew-python
|
dafdd56797a404ac61ddde419a772b1088448a43
|
5851b6291713649c342e8a970632153e9c72f760
|
refs/heads/main
| 2023-04-20T17:57:48.623726
| 2021-03-02T16:44:21
| 2021-03-02T16:44:21
| 349,296,940
| 0
| 0
|
Apache-2.0
| 2021-05-13T14:13:34
| 2021-03-19T04:10:46
| null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
# =============================================================================
# Copyright 2019 Grillo Holdings Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import pytest
from openeew.data.aws import AwsDataClient
def test_initialize_country_code_all_caps():
data_client = AwsDataClient('AB')
assert data_client.country_code == 'ab'
def test_change_country_code_all_caps():
data_client = AwsDataClient('AB')
data_client.country_code = 'CD'
assert data_client.country_code == 'cd'
|
[
"michael@grillo.io"
] |
michael@grillo.io
|
944bd3cb19c90482ca15d6db0a3737ed077344fa
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/datahub/databus/shippers/hdfs/shipper.py
|
c11e592351d3e7703c150d3b0a44697e00f147c9
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from datahub.common.const import (
BK_BIZ_ID,
DATA_TYPE,
GEOG_AREA,
HDFS,
ICEBERG,
STORAGES,
)
from datahub.databus.settings import MODULE_SHIPPER
from datahub.databus.shippers.base_shipper import BaseShipper
from datahub.storekit.iceberg import construct_hdfs_conf
from datahub.databus import common_helper, rt
class HdfsShipper(BaseShipper):
storage_type = "hdfs"
module = MODULE_SHIPPER
def _get_shipper_task_conf(self, cluster_name):
data_type = rt.get_rt_fields_storages(self.rt_id)[STORAGES][HDFS][DATA_TYPE]
# 根据hdfs的connection info构建hdfs的配置参数
custom_conf = construct_hdfs_conf(self.sink_conn_info, self.rt_info[GEOG_AREA], data_type)
if data_type == ICEBERG:
return self.config_generator.build_hdfs_iceberg_config_param(
cluster_name,
self.rt_id,
self.source_channel_topic,
self.task_nums,
self.physical_table_name,
custom_conf,
)
else:
# 这里physical_table_name可能是完整的hdfs上的路径,需要截取最后一段作为table_name
return self.config_generator.build_hdfs_config_param(
cluster_name,
self.rt_id,
self.task_nums,
self.source_channel_topic,
custom_conf,
self.rt_info[BK_BIZ_ID],
self.physical_table_name.split("/")[-1],
)
@classmethod
def _compare_connector_conf(cls, cluster_name, connector_name, conf, running_conf):
return common_helper.check_keys_equal(
conf,
running_conf,
[
"rt.id",
"topics",
"tasks.max",
"topics.dir",
"logs.dir",
"hdfs.url",
"hadoop.conf.dir",
"flush.size",
"table.name",
],
)
|
[
"terrencehan@tencent.com"
] |
terrencehan@tencent.com
|
0375a5bd9133eaff907c627f486f7274b1ed169e
|
5db1e54b4d3cfcb441703cdb6a6e4e04dbc2e1d7
|
/n_geq_n.py
|
420a0153076e10705c8ba87a5281c3afdb9660c1
|
[] |
no_license
|
vyanphan/codingchallenges
|
10b0701acbf15bfee9e10c2b2bba3ac4fe5993d2
|
ee2bd2459638f0718199c370f9e81fa44b531d58
|
refs/heads/master
| 2020-03-27T06:47:45.266347
| 2018-10-31T05:20:41
| 2018-10-31T05:20:41
| 146,135,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
import math
'''
NOT DONE
Given an unsorted array of integers n, return the
maximum possible n such that at least n values in
the array are >= n.
[1,2,3,4]
>>> output 2, as 2,3,4 >= 2 but only 3,4 >= 3
[900,2,901,3,1000]
>>> output 3, as 900,901,1000 >= 3
'''
def stupid_n_geq_n(arr): # nlogn solution
arr.sort(reverse=True)
for i in range(len(arr)):
if arr[i] <= i+1:
return arr[i]
return None
def n_geq_n(arr):
return 0
print(n_geq_n([1,2,3,4]))
print(n_geq_n([900,2,901,3,1000]))
print(n_geq_n([1,3,1,1,1,4,5,1,1,1]))
|
[
"vyan.n.phan@gmail.com"
] |
vyan.n.phan@gmail.com
|
26cddebff4c3497ed19984a48cd9ab463737f2ad
|
857d8f44ee11e7bf6972486e6be875aec9fff819
|
/docs/conf.py
|
851b2272a2b3fa1af493d0e40e9948c563e5a7d7
|
[
"MIT"
] |
permissive
|
s0b0lev/mythx-cli
|
dfe45adfb41163098d08bf1849c48083241b22e5
|
27dc1c4ce1d87fbd02be4d32c5fbb4281da7c53c
|
refs/heads/master
| 2022-04-10T21:18:20.506815
| 2020-03-27T10:32:23
| 2020-03-27T10:32:23
| 250,260,634
| 0
| 0
|
MIT
| 2020-03-26T13:05:12
| 2020-03-26T13:05:11
| null |
UTF-8
|
Python
| false
| false
| 4,950
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mythx_cli documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import mythx_cli # isort:skip
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.coverage"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"MythX CLI"
copyright = u"2019, Dominik Muhs"
author = u"Dominik Muhs"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = mythx_cli.__version__
# The full version, including alpha/beta/rc tags.
release = mythx_cli.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["justify.css"]
html_logo = "_static/img/logo.png"
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "mythx_clidoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "mythx_cli.tex", u"MythX CLI Documentation", u"Dominik Muhs", "manual")
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "mythx_cli", u"MythX CLI Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"mythx_cli",
u"MythX CLI Documentation",
author,
"mythx_cli",
"One line description of project.",
"Miscellaneous",
)
]
|
[
"dmuhs@protonmail.ch"
] |
dmuhs@protonmail.ch
|
25be26b35e718200556781820285d30fe312d3c2
|
16bee720f0cd275f848d6b52f14754cccef5b793
|
/01-OOP/14_oop_customClass.py
|
4453be2e5ed7399481179226c08dc032f89fd8cb
|
[] |
no_license
|
lloyd108/python_start
|
d9b979db3efd08929d47d6c82a7d8101eecf7d18
|
6d0162fd44999049d434ba7d595df808f777d741
|
refs/heads/master
| 2020-03-28T19:27:47.845190
| 2019-03-05T15:43:31
| 2019-03-05T15:43:31
| 148,977,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
from types import MethodType
class A:
pass
def say(self):
print("I'm saying...")
say(1)
A.say = say
A.say(1)
a = A()
a.say()
print("*" * 10)
class B:
pass
def talk(s):
print("I'm talking...")
b = B()
b.talk = MethodType(talk, B)
b.talk()
print(type(b))
print("*" * 10)
def eat(self):
print("I'm eating...")
def drink(self):
print("I'm drinking...")
X = type("NewClass", (object, ), {"new_eat": eat, "new_drink": drink})
x = X()
x.new_eat()
x.new_drink()
|
[
"lloyd108@163.com"
] |
lloyd108@163.com
|
0bca003a900431f0ed27f023690564c73a37e175
|
b19768d7ef7d55cbc3c1f2aba2d6de30fb6328cd
|
/validate_pan.py
|
800be8eb196078c9131d3e9a061223b28d2200cb
|
[] |
no_license
|
MalarSankar/demo
|
6e62e5bd76e3793a1e80d8e6347351d359f1b879
|
125ce2e949a89d50724033824e2c1488f5aab07d
|
refs/heads/master
| 2023-02-18T13:18:26.330249
| 2021-01-18T07:30:43
| 2021-01-18T07:30:43
| 267,773,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import re
pan_num=input("enter pan number")
if(re.match("[A-Z]{5}[0-9]{4}[A-Z]{1}",pan_num)):
print("True")
else:
print("False")
|
[
"smalar1998@gmail.com"
] |
smalar1998@gmail.com
|
ac07a4c6e985985142cc1cbbd9cc704b292e741b
|
6e108bc5c4ff3c45d9da6ca2ff62a44a34246169
|
/kumoko/strategies/rfind.py
|
4674acf74fba13e28de553a30aac3bc27a41e8f8
|
[] |
no_license
|
hav4ik/rock-paper-scissors
|
13bcf1728e2ccdf2d6bc7a82eb7b128938981e75
|
397e2cd44b7c882563371edad40336601a83a841
|
refs/heads/main
| 2023-02-25T00:32:04.487564
| 2021-02-01T18:29:54
| 2021-02-01T18:29:54
| 328,952,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,637
|
py
|
import random
from kumoko.kumoko_base import *
from kumoko.kumoko import Kumoko
from kumoko.scoring import SCORINGS
from kumoko.action_choice import ACTION_CHOICES
from functools import partial
class RFindStrategy(BaseAtomicStrategy):
def __init__(self, limit=None, src='his', shenanigans=True):
super().__init__()
self.limit = limit
self.src = src
self.shenanigans = shenanigans
def name(self):
name = f'RFind_{self.limit}_{self.src}'
if self.shenanigans:
name += '_with_shen'
return name
def __call__(self, history):
if len(history) == 0:
return NUM_TO_MOVE[random.randint(0, 2)]
# Type of lookback sequence
if self.src == 'his':
sequence = history.his_moves
elif self.src == 'our':
sequence = history.our_moves
elif self.src == 'dna':
sequence = history.dna_moves
else:
raise ValueError(f'Invalid `src` value (got {self.src}')
# Define lookback window
length = len(history)
if self.limit == None:
lb = length
else:
lb = min(length, self.limit)
# RFind choose action
while lb >= 1 and \
not sequence[length - lb:length] in sequence[0:length - 1]:
lb -= 1
if lb >= 1:
if self.shenanigans:
if random.random() < 0.6:
idx = sequence.rfind(
sequence[length - lb:length], 0, length - 1)
elif random.random() < 0.5:
idx = sequence.rfind(
sequence[length - lb:length], 0, length - 1)
idx2 = sequence.rfind(
sequence[length - lb:length], 0, idx)
if idx2 != -1:
idx = idx2
else:
idx = sequence.find(
sequence[length - lb:length], 0, length - 1)
else:
idx = sequence.rfind(
sequence[length - lb:length], 0, length - 1)
return BEAT[history.his_moves[idx + lb]]
else:
return random.choice('RPS')
class WrappedRFindStrategy(BaseAtomicStrategy):
"""A strategy that contains a Kumoko inside!
"""
class _RFindInnerEnsemble:
"""Only Rfind, nothing else!
"""
def __init__(self, limits, sources, shenanigans=True):
self.limits = limits
self.sources = sources
self.shenanigans = shenanigans
def generate(self):
"""List of strategies (including mirror strategies)
"""
strategies = []
# Add RFind strategies (2 meta-strategies P0 and P'0 for each)
limits=[50, 20, 10]
sources = ['his', 'our', 'dna']
for limit in limits:
for source in sources:
strategies.extend(
generate_meta_strategy_pair(
RFindStrategy,
limit=limit,
src=source,
shenanigans=False,
))
do_rotations = [True for _ in strategies]
return strategies, do_rotations
def __init__(self, limits, sources, shenanigans=True):
ensemble_cls = partial(self._RFindInnerEnsemble,
limits=limits,
sources=sources,
shenanigans=shenanigans)
scoring_cls = SCORINGS['std_dllu_v1']
action_choice_cls = ACTION_CHOICES['best']
self.kumoko = Kumoko(ensemble_cls=ensemble_cls,
scoring_cls=scoring_cls,
action_choice_cls=action_choice_cls)
def __call__(self, history):
if len(history) > 0:
our_last_move = history.our_moves[-1]
his_last_move = history.his_moves[-1]
else:
our_last_move = None
his_last_move = None
return self.kumoko.next_action(our_last_move, his_last_move)
|
[
"hav4ik@users.noreply.github.com"
] |
hav4ik@users.noreply.github.com
|
a65fc101bcb4ea26beea8a74df41edf9f2e715fc
|
c4fe28e6240b59a9e225d0ff5b683dcbb532e992
|
/PankeyevaSD/validator/lib.py
|
7a68150736afc83475243e6d6b3b0250efa7942e
|
[] |
no_license
|
ArtOliinyk/datascience
|
6e1f0db4265f07118d2fa2780b6d583646d4f287
|
fb40edef738a217a1cf4fed874489261f1257db2
|
refs/heads/master
| 2020-09-28T21:09:52.500504
| 2019-12-23T07:08:41
| 2019-12-23T07:08:41
| 226,865,478
| 0
| 0
| null | 2019-12-19T16:17:27
| 2019-12-09T12:25:34
|
Python
|
UTF-8
|
Python
| false
| false
| 436
|
py
|
import re
def id_validator():
return bool(re.match('^\d{6}$',id))
def zip_code_validator(zip):
return bool(re.match('^\d{5}$', zip))
def percent_of_beneficiaries_with_copd_validator(copd):
return bool(re.match("^([0-9]{1,2})?$",copd)) or bool(re.match('^([1]{1}[0]{1}[0]{1})?$',copd))
def percent_of_beneficiaries_with_hypertension_validator(hypertension):
return bool(re.match("^([1]?[0-9]{1,2})?$", hypertension))
|
[
"pankeeva@gmail.com"
] |
pankeeva@gmail.com
|
f8a89df9fcf70f57de1df85699a6f92fcc9e5bdf
|
81f6fd135813f3727576bd5d74acaf0469b53615
|
/phrase_api/models/styleguide_details.py
|
23a8976d2316a9a97303395082950e7632c16e34
|
[] |
no_license
|
rlisowski/phrase-python
|
cb65ded1e80d1985aa95a4403c7aa3f012bd33b4
|
cbd6bf580a74140928b7536bb9b466d43276cc29
|
refs/heads/master
| 2023-06-18T09:24:43.916142
| 2021-07-15T14:21:58
| 2021-07-15T14:21:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,392
|
py
|
# coding: utf-8
"""
Phrase API Reference
The version of the OpenAPI document: 2.0.0
Contact: support@phrase.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from phrase_api.configuration import Configuration
class StyleguideDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'title': 'str',
'created_at': 'datetime',
'updated_at': 'datetime',
'public_url': 'str',
'audience': 'str',
'target_audience': 'str',
'grammatical_person': 'str',
'vocabulary_type': 'str',
'business': 'str',
'company_branding': 'str',
'formatting': 'str',
'glossary_terms': 'str',
'grammar_consistency': 'str',
'literal_translation': 'str',
'overall_tone': 'str',
'samples': 'str'
}
attribute_map = {
'id': 'id',
'title': 'title',
'created_at': 'created_at',
'updated_at': 'updated_at',
'public_url': 'public_url',
'audience': 'audience',
'target_audience': 'target_audience',
'grammatical_person': 'grammatical_person',
'vocabulary_type': 'vocabulary_type',
'business': 'business',
'company_branding': 'company_branding',
'formatting': 'formatting',
'glossary_terms': 'glossary_terms',
'grammar_consistency': 'grammar_consistency',
'literal_translation': 'literal_translation',
'overall_tone': 'overall_tone',
'samples': 'samples'
}
def __init__(self, id=None, title=None, created_at=None, updated_at=None, public_url=None, audience=None, target_audience=None, grammatical_person=None, vocabulary_type=None, business=None, company_branding=None, formatting=None, glossary_terms=None, grammar_consistency=None, literal_translation=None, overall_tone=None, samples=None, local_vars_configuration=None): # noqa: E501
"""StyleguideDetails - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._title = None
self._created_at = None
self._updated_at = None
self._public_url = None
self._audience = None
self._target_audience = None
self._grammatical_person = None
self._vocabulary_type = None
self._business = None
self._company_branding = None
self._formatting = None
self._glossary_terms = None
self._grammar_consistency = None
self._literal_translation = None
self._overall_tone = None
self._samples = None
self.discriminator = None
if id is not None:
self.id = id
if title is not None:
self.title = title
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if public_url is not None:
self.public_url = public_url
if audience is not None:
self.audience = audience
if target_audience is not None:
self.target_audience = target_audience
if grammatical_person is not None:
self.grammatical_person = grammatical_person
if vocabulary_type is not None:
self.vocabulary_type = vocabulary_type
if business is not None:
self.business = business
if company_branding is not None:
self.company_branding = company_branding
if formatting is not None:
self.formatting = formatting
if glossary_terms is not None:
self.glossary_terms = glossary_terms
if grammar_consistency is not None:
self.grammar_consistency = grammar_consistency
if literal_translation is not None:
self.literal_translation = literal_translation
if overall_tone is not None:
self.overall_tone = overall_tone
if samples is not None:
self.samples = samples
@property
def id(self):
"""Gets the id of this StyleguideDetails. # noqa: E501
:return: The id of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this StyleguideDetails.
:param id: The id of this StyleguideDetails. # noqa: E501
:type: str
"""
self._id = id
@property
def title(self):
"""Gets the title of this StyleguideDetails. # noqa: E501
:return: The title of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this StyleguideDetails.
:param title: The title of this StyleguideDetails. # noqa: E501
:type: str
"""
self._title = title
@property
def created_at(self):
"""Gets the created_at of this StyleguideDetails. # noqa: E501
:return: The created_at of this StyleguideDetails. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this StyleguideDetails.
:param created_at: The created_at of this StyleguideDetails. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this StyleguideDetails. # noqa: E501
:return: The updated_at of this StyleguideDetails. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this StyleguideDetails.
:param updated_at: The updated_at of this StyleguideDetails. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def public_url(self):
"""Gets the public_url of this StyleguideDetails. # noqa: E501
:return: The public_url of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._public_url
@public_url.setter
def public_url(self, public_url):
"""Sets the public_url of this StyleguideDetails.
:param public_url: The public_url of this StyleguideDetails. # noqa: E501
:type: str
"""
self._public_url = public_url
@property
def audience(self):
"""Gets the audience of this StyleguideDetails. # noqa: E501
:return: The audience of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._audience
@audience.setter
def audience(self, audience):
"""Sets the audience of this StyleguideDetails.
:param audience: The audience of this StyleguideDetails. # noqa: E501
:type: str
"""
self._audience = audience
@property
def target_audience(self):
"""Gets the target_audience of this StyleguideDetails. # noqa: E501
:return: The target_audience of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._target_audience
@target_audience.setter
def target_audience(self, target_audience):
"""Sets the target_audience of this StyleguideDetails.
:param target_audience: The target_audience of this StyleguideDetails. # noqa: E501
:type: str
"""
self._target_audience = target_audience
@property
def grammatical_person(self):
"""Gets the grammatical_person of this StyleguideDetails. # noqa: E501
:return: The grammatical_person of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._grammatical_person
@grammatical_person.setter
def grammatical_person(self, grammatical_person):
"""Sets the grammatical_person of this StyleguideDetails.
:param grammatical_person: The grammatical_person of this StyleguideDetails. # noqa: E501
:type: str
"""
self._grammatical_person = grammatical_person
@property
def vocabulary_type(self):
"""Gets the vocabulary_type of this StyleguideDetails. # noqa: E501
:return: The vocabulary_type of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._vocabulary_type
@vocabulary_type.setter
def vocabulary_type(self, vocabulary_type):
"""Sets the vocabulary_type of this StyleguideDetails.
:param vocabulary_type: The vocabulary_type of this StyleguideDetails. # noqa: E501
:type: str
"""
self._vocabulary_type = vocabulary_type
@property
def business(self):
"""Gets the business of this StyleguideDetails. # noqa: E501
:return: The business of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._business
@business.setter
def business(self, business):
"""Sets the business of this StyleguideDetails.
:param business: The business of this StyleguideDetails. # noqa: E501
:type: str
"""
self._business = business
@property
def company_branding(self):
"""Gets the company_branding of this StyleguideDetails. # noqa: E501
:return: The company_branding of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._company_branding
@company_branding.setter
def company_branding(self, company_branding):
"""Sets the company_branding of this StyleguideDetails.
:param company_branding: The company_branding of this StyleguideDetails. # noqa: E501
:type: str
"""
self._company_branding = company_branding
@property
def formatting(self):
"""Gets the formatting of this StyleguideDetails. # noqa: E501
:return: The formatting of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._formatting
@formatting.setter
def formatting(self, formatting):
"""Sets the formatting of this StyleguideDetails.
:param formatting: The formatting of this StyleguideDetails. # noqa: E501
:type: str
"""
self._formatting = formatting
@property
def glossary_terms(self):
"""Gets the glossary_terms of this StyleguideDetails. # noqa: E501
:return: The glossary_terms of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._glossary_terms
@glossary_terms.setter
def glossary_terms(self, glossary_terms):
"""Sets the glossary_terms of this StyleguideDetails.
:param glossary_terms: The glossary_terms of this StyleguideDetails. # noqa: E501
:type: str
"""
self._glossary_terms = glossary_terms
@property
def grammar_consistency(self):
"""Gets the grammar_consistency of this StyleguideDetails. # noqa: E501
:return: The grammar_consistency of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._grammar_consistency
@grammar_consistency.setter
def grammar_consistency(self, grammar_consistency):
"""Sets the grammar_consistency of this StyleguideDetails.
:param grammar_consistency: The grammar_consistency of this StyleguideDetails. # noqa: E501
:type: str
"""
self._grammar_consistency = grammar_consistency
@property
def literal_translation(self):
"""Gets the literal_translation of this StyleguideDetails. # noqa: E501
:return: The literal_translation of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._literal_translation
@literal_translation.setter
def literal_translation(self, literal_translation):
"""Sets the literal_translation of this StyleguideDetails.
:param literal_translation: The literal_translation of this StyleguideDetails. # noqa: E501
:type: str
"""
self._literal_translation = literal_translation
@property
def overall_tone(self):
"""Gets the overall_tone of this StyleguideDetails. # noqa: E501
:return: The overall_tone of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._overall_tone
@overall_tone.setter
def overall_tone(self, overall_tone):
"""Sets the overall_tone of this StyleguideDetails.
:param overall_tone: The overall_tone of this StyleguideDetails. # noqa: E501
:type: str
"""
self._overall_tone = overall_tone
@property
def samples(self):
"""Gets the samples of this StyleguideDetails. # noqa: E501
:return: The samples of this StyleguideDetails. # noqa: E501
:rtype: str
"""
return self._samples
@samples.setter
def samples(self, samples):
"""Sets the samples of this StyleguideDetails.
:param samples: The samples of this StyleguideDetails. # noqa: E501
:type: str
"""
self._samples = samples
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StyleguideDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StyleguideDetails):
return True
return self.to_dict() != other.to_dict()
|
[
"support@phrase.com"
] |
support@phrase.com
|
0fac9d80c79c6023fa73a3539cd28456e6c95ebe
|
9ce0159c84ac4e63dbd7d4f8593ff1f7a5c3ae9d
|
/jblog/bin/jupyter-troubleshoot
|
ea022a4476e73ab9f8523fbc5d6fb668aebe0413
|
[] |
no_license
|
cvanderbush/cvanderbush.github.io
|
8c7af21f4bbbfcbcc14bccb2d207eea883a41e45
|
89958814e8b5cd24f23981e0bfcf5f05d89f9c9a
|
refs/heads/master
| 2021-08-14T13:33:33.615612
| 2017-11-15T21:01:50
| 2017-11-15T21:01:50
| 110,868,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
#!/Users/carl/github/cvanderbush.github.io/jblog/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.troubleshoot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"carl.vanderbush@gmail.com"
] |
carl.vanderbush@gmail.com
|
|
f5c648b356bc2ff4784eaa2f50b6a1305dc09e47
|
4662a4f0af562f3fb38ebaad9c5f00f507a6e573
|
/vou_viajar/conta/migrations/0004_auto_20200406_1239.py
|
7229b4585c34cf4a619d2246dd10662cd997e97c
|
[] |
no_license
|
mikefarias/vou_viajar_django
|
462a5a5866ad80f29e6aa9babf1cb70bb7727fcc
|
0e0dc35a20572159f67e3a46a221542f7810b3ba
|
refs/heads/master
| 2022-04-21T16:49:46.188275
| 2020-04-21T11:15:36
| 2020-04-21T11:15:36
| 164,842,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
# Generated by Django 2.1.3 on 2020-04-06 15:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('conta', '0003_auto_20200406_1230'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='user_id',
new_name='user',
),
migrations.RenameField(
model_name='travelagency',
old_name='contact_id',
new_name='contact',
),
migrations.RenameField(
model_name='travelagency',
old_name='owner_id',
new_name='owner',
),
]
|
[
"mike@shoppingonlinearacaju.com.br"
] |
mike@shoppingonlinearacaju.com.br
|
956c2fe8549228dbf1ad1cd269cbbfcf8729bf7b
|
774af61392db87e417b65fcbc28b7976dbd5f9f3
|
/gtrends_input.py
|
589e762c2f67f38282460007a34a6aa218d7d0c5
|
[] |
no_license
|
ajw278/GTrendsPy
|
7c906b589e2f6f73fff1930ad05708999025ccc6
|
b14db013cb63f9989706a462281da51eeef962cd
|
refs/heads/master
| 2021-07-05T14:03:32.751450
| 2020-09-18T07:51:10
| 2020-09-18T07:51:10
| 175,021,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
from __future__ import print_function
"""
Fetch search terms from the file, but associate them in a dictionary
by line number
"""
def fetch_search_terms(fname):
with open(fname) as f:
content = f.readlines()
sterms = []
sassoc = {}
iass = 0
for line in content:
if len(line)>2:
sassoc[iass] = []
lsplit = line.split(',')
for il in lsplit:
sterms.append(il.strip())
sassoc[iass].append(sterms[-1])
iass+=1
return sterms, sassoc
|
[
"andy@dhcp-10-249-178-116.eduroam.wireless.private.cam.ac.uk"
] |
andy@dhcp-10-249-178-116.eduroam.wireless.private.cam.ac.uk
|
2ae44ed23fd75808b97066a9a6faae55af9aa1af
|
328dfeed14f331d3c64b67dcaced73d6a9a3bde8
|
/src/kernel/tools/gen.py
|
6bc1811775f07579d253b3b949b82151c566d725
|
[
"MIT"
] |
permissive
|
priyanr/simba
|
f339930505c17265908e6d07869bf2b7291640c6
|
49c56af04bd0307df2248acb3b0e69f88c50d24f
|
refs/heads/master
| 2021-01-22T02:34:19.685652
| 2017-02-05T20:14:54
| 2017-02-05T20:14:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
#!/usr/bin/env python
import sys
import json
import time
import re
import getpass
file_fmt = """/**
* @section License
*
* The MIT License (MIT)
*
* Copyright (c) 2014-2016, Erik Moqvist
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* This file is part of the Simba project.
*/
/**
* This file was generated by gen.py {major}.{minor} {date}.
*/
#include "simba.h"
#include <stdarg.h>
{sysinfo}
"""
SYSINFO_FMT = """const FAR char sysinfo[] = "app: {name}-{version} built {date} by {user}.\\r\\n"
"board: {board}\\r\\n"
"mcu: {mcu}\\r\\n";
"""
MAJOR = 1
MINOR = 0
if __name__ == '__main__':
name = sys.argv[1]
version = sys.argv[2]
board = sys.argv[3]
mcu = sys.argv[4]
outfile = sys.argv[5]
now = time.strftime("%Y-%m-%d %H:%M %Z")
sysinfo = SYSINFO_FMT.format(name=name,
version=version,
date=now,
user=getpass.getuser(),
board=board,
mcu=mcu)
fout = open(outfile, 'w').write(
file_fmt.format(filename=outfile,
major=MAJOR,
minor=MINOR,
date=time.strftime("%Y-%m-%d %H:%M %Z"),
sysinfo=sysinfo))
|
[
"erik.moqvist@gmail.com"
] |
erik.moqvist@gmail.com
|
844c18e08ce70e1d240a0f8e6ac30bc6f8ee713d
|
fd3d46fcbbbf4a1c5bca359feaca9fc7332921b1
|
/epaas_com/erpnext_com/doctype/conference_participant/conference_participant.py
|
0fd965950d11fe72ed9ad2dc675912e9ade7b2a6
|
[] |
no_license
|
dataent/epaas_com
|
31723de4d3c6e3d0a3d911c2aa8035cdd4a7db44
|
cb6a9995dfce3392b5abd38bb11ed9decfe5a8c1
|
refs/heads/master
| 2020-03-20T22:46:16.057801
| 2018-06-18T22:50:41
| 2018-06-18T22:50:41
| 137,814,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Dataent and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from dataent.model.document import Document
class ConferenceParticipant(Document):
def on_payment_authorized(self, status_changed_to=None):
self.paid = 1
self.save(ignore_permissions=True)
|
[
"38303879+dataent@users.noreply.github.com"
] |
38303879+dataent@users.noreply.github.com
|
c061e28cb24da83390d14a5d04167d2b00d2da38
|
eec1718131dab77fdc0fb94064432b8a2f5492a6
|
/test_models.py
|
266340281e27dc27c3173536768c9d9575fb9da1
|
[] |
no_license
|
Tim-Birk/flask-bogly
|
ef0a258f7d3d50614bd0707a0be5f65de1089caf
|
a17e6195c30abd899049df465b9815d515f3a4d7
|
refs/heads/master
| 2023-02-23T05:36:16.721494
| 2021-01-23T20:31:30
| 2021-01-23T20:31:30
| 331,452,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
from unittest import TestCase
from app import app
from models import db, User, Post
from datetime import datetime
# Use test database and don't clutter tests with SQL
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly_test'
app.config['SQLALCHEMY_ECHO'] = False
db.drop_all()
db.create_all()
class UserModelTestCase(TestCase):
"""Tests for model for Users."""
def setUp(self):
"""Clean up any existing users."""
Post.query.delete()
User.query.delete()
def tearDown(self):
"""Clean up any fouled transaction."""
db.session.rollback()
def test_get_full_name(self):
user = User(first_name="TestFirstName", last_name="TestLastName")
self.assertEquals(user.get_full_name(), "TestFirstName TestLastName")
def test_show_est_formatted_date(self):
post = Post(title="Test Title", content="Some Content...", created_at=datetime(2021, 1, 1, 9, 00))
self.assertEquals(post.show_est_formatted_date(), "Jan 01 2021 09:00:00 AM")
|
[
"timbirkmire@yahoo.com"
] |
timbirkmire@yahoo.com
|
8e3aeedca99b9abff8282876fbf5b655c1f10a76
|
cf2fd8ffb4777e44aa8c0b0522385ed86f12d799
|
/Homework 3/hw3q4.py
|
cdee46b5ac0e5e26d9949334b544ec3e9eef99b8
|
[] |
no_license
|
mtouhidchowdhury/CS1114-Intro-to-python
|
2b622db0116842d13377d1eae3c744f5a7595275
|
31f311a1991f2d6affcbe13e6b8cd0c154bc59b6
|
refs/heads/master
| 2020-07-02T01:09:31.543145
| 2019-08-09T02:50:28
| 2019-08-09T02:50:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 787
|
py
|
#homework 3 question 4
#getting the input of the sides
a = float(input("Length of first side: "))
b = float(input("Length of second side: "))
c = float(input("Length of third side: "))
if a == b and b == c and a ==c :# if all sides equal equalateral triangle
print("This is a equilateral triangle")
elif a == b or a == c or b == c:# if two sides equal isosceles triangle
print("this is a isoseles triangle")
elif (a == b or a == c or b == c) and (a**2 + b**2 == c**2 or a**2 + c**2 == b**2 or c**2 + b**2 == a**2):
print("this is an isosceles right triangle ")#if two sides equal and pythagorean formula fits its isosceles right triangle
else:#if none of the conditions apply its neither
print("neither equalateral nor isosceles right triangle")
|
[
"noreply@github.com"
] |
mtouhidchowdhury.noreply@github.com
|
bee95a3694329278bd0d6da10efe94897ee1528f
|
49664326b09adc7deefbf98503357a3504d055c5
|
/informatics/String/2.py
|
2905847718dfe97ca6155fc6554e75d78a77d394
|
[] |
no_license
|
erkkke/Python2020
|
70f005e5fc0f386919d22d8a31941d22e90ee870
|
c201c0d228ec8f8229c1be6190c8802875c45215
|
refs/heads/master
| 2023-03-01T10:46:37.076082
| 2021-01-31T12:53:18
| 2021-01-31T12:53:18
| 334,650,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
a = input()
print(a.split())
|
[
"matilda@MacBook-Air-Erkebulan.local"
] |
matilda@MacBook-Air-Erkebulan.local
|
b16b068d989de795df1d3025e670b20b0e94feb0
|
a0cd325b7def592dedf9120847cba2447a714fef
|
/hyperplan/index.py
|
83fe88be0876dfea8ae2d89fbb71c70e22179618
|
[
"MIT"
] |
permissive
|
hyperplan-io/cli
|
0bc2a03947979a45cb4ea2d1518c92d63853c6c0
|
dc7d407701fd78d9065d60c35b0f2674b28c86bb
|
refs/heads/master
| 2023-05-26T04:49:36.069566
| 2019-08-27T17:38:26
| 2019-08-27T17:38:26
| 198,371,338
| 1
| 0
|
MIT
| 2023-05-22T22:29:45
| 2019-07-23T06:58:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,578
|
py
|
#!/usr/bin/env python
import keyring
import getpass
import os
import requests
from http.client import RemoteDisconnected
import sys, getopt
import logging
from hyperplan.hpcmd import HyperplanPrompt
from hyperplan.errors import InvalidCredentials
from hyperplan.api import Api
default_service = "hyperplan-cli"
def get_health(root_api_url):
try:
response = requests.get(
'{}/_health'.format(root_api_url)
)
if response.status_code == 200:
return True
else:
return False
except Exception:
return False
def get_api(service, root_api_url, login, password):
return Api(service, root_api_url, login, password)
def create_api_and_start_cmd(service, logger, root_api_url, login, password):
api = get_api(service, root_api_url, login, password)
start_cmd(api, logger)
def start_cmd(api, logger):
try:
HyperplanPrompt(api, logger).cmdloop()
except InvalidCredentials:
login, password = prompt_credentials()
create_api_and_start_cmd(api.service, logger, api.root_api_url, login, password)
except KeyboardInterrupt:
pass
def get_password(service):
username = keyring.get_password(service, "username")
password = keyring.get_password(service, "password")
return (username, password)
def prompt_credentials():
login= input("Login: ")
password = getpass.getpass()
return (login, password)
def help():
print('hyperplan -loglevel <loglevel>')
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"hl:s",["loglevel=", "server=", "help"])
except getopt.GetoptError:
help()
sys.exit(2)
log_level = logging.INFO
server = "http://localhost:8080"
for opt, arg in opts:
if opt in('-h', '--help'):
help()
sys.exit()
elif opt in ("-s", "--server"):
server = arg
elif opt in ("-l", "--loglevel"):
if arg.lower() == 'debug':
log_level = logging.DEBUG
print('Using log level debug')
elif arg.lower() == 'info':
log_level = logging.INFO
print('Using log level info')
elif arg.lower() == 'warn':
log_level = logging.WARN
print('Using log level warn')
elif arg.lower() == 'error':
log_level = logging.ERROR
print('Using log level error')
logger = logging.getLogger()
logger.setLevel(level=log_level)
logging.basicConfig(level=log_level)
health = get_health(server)
if health == False:
print('Server is not reachable, is it running on "{}" ?'.format(server))
else:
login, password = get_password(default_service)
if login != None and password != None:
create_api_and_start_cmd(default_service, logger, server, login, password)
else:
try:
login, password = prompt_credentials()
api = get_api(default_service, server, login, password)
api.authenticate(logger, save_credentials=True, log_error=False)
start_cmd(api, logger)
except InvalidCredentials:
print('Invalid credentials')
except Exception as error:
print(error)
if __name__ == "__main__":
try:
main()
except Exception as err:
print('Unhandled error: {}'.format(err))
try:
sys.exit(0)
except:
os._exit(0)
|
[
"sauray.antoine@gmail.com"
] |
sauray.antoine@gmail.com
|
3735fa99634f45f77e74570d306e0c432f784408
|
c19be3d81e29b2268bda70b0dda2df989331940f
|
/MenuProject/MenuProject/urls.py
|
34219d772e05d19cec3a1ece47a16c9db3ee76c0
|
[] |
no_license
|
Sanjaymali36/Restaurant-Menu-Project
|
2d6fe659e78796124b3ca0aa05082934e2bab418
|
5040d97076f66ce46cdd7b070ff138e37177ff38
|
refs/heads/master
| 2020-03-21T13:21:14.254520
| 2018-11-22T07:53:26
| 2018-11-22T07:53:26
| 138,601,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
"""MenuProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from MenuItem import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.CreateData),
url(r'^disp.*', views.DisplayMenu, name='display'),
url(r'^Del.*', views.DeleteMenuWithItems),
url(r'^rem.*', views.RemoveMenuWithItems)
]
|
[
"noreply@github.com"
] |
Sanjaymali36.noreply@github.com
|
63ea869d69aca38d061182bfb2f448e14df05ec3
|
33c2341e49c0317ce02b06d9618449442877cf48
|
/old code/game_driver_old.py
|
a2972849f2566963352dbbe32cc0e674e03660e2
|
[] |
no_license
|
inferrinizzard/game-driver
|
3ff83d114054693a2e4d48471e4e622f79acfb45
|
c117fd59285ada25436ccd0e74c1f64708bd0885
|
refs/heads/master
| 2020-12-07T19:38:18.440815
| 2020-01-16T19:52:19
| 2020-01-16T19:52:19
| 232,783,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,112
|
py
|
from IPython.display import display, clear_output, Image
from base64 import b64decode
from json import loads
from time import sleep
from random import choice
from abc import ABC, abstractmethod
class GameDriver():
def __init__(self, cdp):
self.cdp = cdp
targetId = self.cdp.send('Target.createTarget',url='http://ipchicken.com',enableBeginFrameControl=True)['targetId']
sessionId = self.cdp.send('Target.attachToTarget',targetId=targetId,flatten=True)['sessionId']
self.sensors = {}
def add_sensor(self, name, sensor):
if(name not in self.sensors):
self.sensors[name] = sensor
sensor.install(self)
return self.sensors
def remove_sensor(self, name):
if(name in self.sensors):
self.sensors[name].remove(self)
self.sensors.pop(name)
return self.sensors
def navigate(self, address):
nav_output = self.cdp.send('Page.navigate', url=address)
if 'itch.io' in address:
sleep(1)
self.navigate(self.cdp.send("Runtime.evaluate", expression="document.getElementsByTagName('iframe')[0].src")["result"]["value"])
self.inject_event_script()
return nav_output
def inject_event_script(self):
collection_script = '''(() => {
let events = [];
const inputs = (key, e) => [key, key.startsWith('mouse') ? [e.clientX, e.clientY, e.button] : String.fromCharCode(e.keyCode)];
['mousedown', 'mousemove', 'mouseup', 'keyup', 'keydown'].forEach(k => window.addEventListener(k, e => events.push({event: inputs(k,e), time: +new Date()}),true));
window._getRecentEvents = () => events.splice(0, events.length);
})();'''
self.cdp.send("Runtime.evaluate", expression=collection_script)
def get_browser_version(self):
return self.cdp.send('Browser.getVersion')
def pause(self):
return self.cdp.send('Emulation.setVirtualTimePolicy', policy='pause')
def step(self, actions=[], dur = None):
for a in actions:
a.apply(self)
if dur is not None:
for i in range(dur):
self.cdp.send('HeadlessExperimental.beginFrame')
self.cdp.send('Emulation.setVirtualTimePolicy', policy='advance', budget=1000/60)
return {name: sensor.observe(self) for name,sensor in self.sensors.items()}
def get_events(self):
res = self.cdp.send("Runtime.evaluate", expression='JSON.stringify(window._getRecentEvents());')
return loads(res['result']['value'])
def press_key(self, key):
def text(letter):
self.cdp.send("Input.dispatchKeyEvent", type="keyDown", windowsVirtualKeyCode=ord(letter), nativeVirtualKeyCode=ord(letter), key=letter)
self.cdp.send("Input.dispatchKeyEvent", type="char", text= letter, key= letter)
self.cdp.send("Input.dispatchKeyEvent", type="keyUp", windowsVirtualKeyCode=ord(letter), nativeVirtualKeyCode=ord(letter), key= letter)
def ascii(char):
self.cdp.send("Input.dispatchKeyEvent", type="keyDown", windowsVirtualKeyCode=ord(char), nativeVirtualKeyCode=ord(char), key=char)
self.cdp.send("Input.dispatchKeyEvent", type="keyUp", windowsVirtualKeyCode=ord(char), nativeVirtualKeyCode=ord(char), key=char)
success = False
if(key.isalnum() or key is " "):
if(len(key)==1):
text(key)
else:
for k in key:
text(k)
success = True
elif(key.isascii()):
if(len(key)==1):
ascii(key)
else:
for k in key:
ascii(k)
success=True
return success
def press_mouse(self, pos, button = 0, type="Moved"):
buttons = ["left", "middle", "right"]
return self.cdp.send('Input.dispatchMouseEvent', type="mouse" + type, x=pos["x"], y=pos["y"], button=buttons[button])
def get_screenshot(self):
res = self.cdp.send('HeadlessExperimental.beginFrame', screenshot={'format':'jpeg'})#, noDisplayUpdates=True)
# res = self.cdp.send("Page.captureScreenshot")
# print(res)
if 'screenshotData' in res:
clear_output()
display(Image(b64decode(res['screenshotData'])))
return res
def run(self):
self.event_log = []
self.event_keys = []
self.event_mouse = []
read_mode = True
print("record mode on")
exit = False
while not exit:
if read_mode:
for e in self.get_events():
# print(e)
if e['event'][0] == "keydown" and ord(e['event'][1]) == 220:
print("record mode off, playback mode on")
self.event_keys = [key["event"][1] for key in filter(lambda x: "keydown" in x["event"][0], self.event_log)]
self.event_log = []
read_mode = False
with open("keys.out", 'w') as log:
log.write(("_").join(self.event_keys))
break
elif e['event'][0] == "keydown" and ord(e['event'][1]) == 221:
self.event_log = []
print("clearing event log")
break
elif e['event'][0] == "keydown" and ord(e['event'][1]) == 219:
exit = True
print("exiting")
break
else:
self.event_log.append(e)
else:
with open('keys.out' , 'r') as log:
e_k = log.read()
self.event_keys = e_k.split("_")
if(not self.event_keys):
read_mode = True
print("playback mode off, record mode on")
continue
# time.sleep(.1)
e = choice(self.event_keys)
self.press_key(e)
# print("pressed '"+e+"'")
last = (lambda x: x[-1] if x else None)(self.get_events())
if last and last['event'][0] == "keyup" and ord(last['event'][1]) == 220:
print("playback mode off, record mode on")
read_mode = True
self.event_keys = []
def close(self):
self.cdp.close()
print("game driver closed")
def send(self, *args, **kwargs):
return self.cdp.send(*args, **kwargs)
def __del__(self):
self.close()
class Sensor(ABC):
@abstractmethod
def install(self, driver):
pass
@abstractmethod
def observe(self, driver):
pass
@abstractmethod
def remove(self, driver):
pass
class Action(ABC):
@abstractmethod
def apply(self, driver):
pass
|
[
"inferrinizzard@gmail.com"
] |
inferrinizzard@gmail.com
|
46163b40ac62876c08a145a4a6690ab5d0d54d7d
|
0cdf05e4456234b271b48f6ff2c6f49b75f021de
|
/rootofchaos/login_app/migrations/0001_initial.py
|
ee2f5b4cd5eedc42ea1b7302ec159ab8fb9ef988
|
[] |
no_license
|
HauslerMatthew/Root-of-Chaos
|
b8aa96e04324407e327ed1cc7c189d7ed5864623
|
926e0aabda365dfc89cf4a9ac5b74fad696cef00
|
refs/heads/main
| 2023-04-01T05:37:54.405958
| 2021-04-07T16:33:43
| 2021-04-07T16:33:43
| 355,613,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
# Generated by Django 2.2 on 2021-01-21 20:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('email_address', models.CharField(max_length=255)),
('username', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"hauslermatthew@gmail.com"
] |
hauslermatthew@gmail.com
|
79af04779687a3dc390f42312330277a6a9a2f95
|
9d0e8f9b6ac9d7ddf4781da21ae741c5e3d0dbe8
|
/venv/bin/easy_install
|
789c44929c85e90ec90a5ea1ff743478e1b30005
|
[] |
no_license
|
ElizBth/Reactive-Navigation-Under-a-Fuzzy-Rules-Based-Scheme-and-Reinforcement-Learning
|
6bdcd33a01bf6bf3032063874c6975387921142a
|
396e5b8fe07a9e75757c9f239f2bc49198a26dac
|
refs/heads/main
| 2023-03-22T03:46:17.840796
| 2021-02-26T21:51:35
| 2021-02-26T21:51:35
| 308,754,052
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
#!/home/lizzy/PycharmProjects/RobotPathPlanningBasedOnBatteryBehaviour/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"lizzy_silverthorns@hotmail.com"
] |
lizzy_silverthorns@hotmail.com
|
|
f28734a018bbe06950d5f86c605fff1aec45e9e4
|
6f52bdc94219b7a72db975276cc94b929e59cdb1
|
/notes.py
|
2547f8c40dea3944fedd7f1653d817d475ef0794
|
[] |
no_license
|
juned8236/Head_pythonbook_comprehanded_data
|
294e377d18897ecb47d510c7b0e81a034036c207
|
9f8de208c30c5fe7ab0b14d6dc9f1712e9284f3e
|
refs/heads/master
| 2020-05-23T19:13:17.598909
| 2019-05-15T21:42:50
| 2019-05-15T21:42:50
| 186,906,611
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
"""That data.strip().split(',') line looks a little weird. Can you explain what’s going on?
A: That’s called method chaining. The first method, strip(), is applied to the line in data, which removes any unwanted whitespace
from the string. Then, the results of the stripping are processed by the second method, split(','), creating a list. The resulting list is
then applied to the target identifier in the previous code. In this way, the methods are chained together to produce the required result. It helps
if you read method chains from left to right.
"""
"""By default, both the sort() method and the sorted()
BIF order your data in ascending order. To order your data in
descending order, pass the reverse=True argument to
either sort() or sorted() and Python will take care of
things for you."""
"""The sort() method changes the
ordering of lists in-place.
The sorted() BIF sorts most any data
structure by providing copied sorting.
Pass reverse=True to either
sort() or sorted() to arrange your
data in descending order.
When you have code like this:
new_l = []
for t in old_l:
new_l.
append(len(t))
rewrite it to use a list comprehension,
like this:
new_l = [len(t) for t
in old_l]
To access more than one data item from
a list, use a slice. For example:
my_list[3:6]
accesses the items from index location 3
up-to-but-not-including index location 6.
Create a set using the set() factory
function."""
|
[
"juned8236@gmail.com"
] |
juned8236@gmail.com
|
87d51aea49dff60922cde0febfc68ef74267abb7
|
3547780f38f53031a8eba2a054b2b6f068bb0515
|
/insertion_sort.py
|
38d31155fbe6ab603313395e30c6cf51dd126b81
|
[] |
no_license
|
SSJ007/Week1
|
53abe1834dbfe9cbafa9e500845850a525cf2352
|
f2986d11856db14ffa35a75e64b1d4ecb4e690cf
|
refs/heads/master
| 2022-11-29T21:53:29.100850
| 2020-08-17T19:39:16
| 2020-08-17T19:39:16
| 273,901,551
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
def insertionSort(arr):
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while j >=0 and key < arr[j] :
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
input_array=list(map(int,input("Enter the values you need to sort my friend\n").split()))
insertionSort(input_array)
print("Sorted values are:",*input_array)
|
[
"noreply@github.com"
] |
SSJ007.noreply@github.com
|
4ff925119237dc850ad565976791e59d1857f28e
|
e1e3561ca1ed2f4902394893f48f99246fee832a
|
/test/unit/rules/functions/test_sub_not_join.py
|
6c8f6807b936c2d77390537d4ef920851f4899a7
|
[
"MIT-0"
] |
permissive
|
trav-c/cfn-python-lint
|
7dbdae4893871fbb5868c2146faedb75feec2212
|
55ea16d94800f05fbe01d44018bf5f96be5cca62
|
refs/heads/master
| 2023-07-06T18:52:48.303514
| 2023-06-30T17:51:51
| 2023-06-30T17:51:51
| 266,032,828
| 0
| 0
|
NOASSERTION
| 2020-05-22T06:12:37
| 2020-05-22T06:12:37
| null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.functions.SubNotJoin import SubNotJoin # pylint: disable=E0401
class TestSubNotJoin(BaseRuleTestCase):
"""Test Rules Get Att"""
def setUp(self):
"""Setup"""
super(TestSubNotJoin, self).setUp()
self.collection.register(SubNotJoin())
self.success_templates = [
"test/fixtures/templates/good/functions/subnotjoin.yaml",
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative(
"test/fixtures/templates/bad/functions/subnotjoin.yaml", 1
)
|
[
"noreply@github.com"
] |
trav-c.noreply@github.com
|
ee5bb8b702ae0d2fd77b6409278aee395309f2b4
|
2f46c6ce5de573ba765a901fbd9e96b53350a657
|
/utils/service_discovery/config.py
|
ee0e90c7e1e7eb91b6941ddda78b4a7d59c956c5
|
[
"BSD-2-Clause"
] |
permissive
|
jsh2134/dd-agent
|
193f65d79cc659b7a7e89af0458e71e8b879439f
|
1551a4f3446eca7ed500dd1858d663a485555404
|
refs/heads/master
| 2020-12-25T22:36:22.602056
| 2016-04-20T21:46:21
| 2016-04-20T21:46:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
# std
import logging
# project
from utils.service_discovery.sd_backend import SD_BACKENDS
from utils.service_discovery.config_stores import extract_sd_config, SD_CONFIG_BACKENDS
log = logging.getLogger(__name__)
def extract_agent_config(config):
# get merged into the real agentConfig
agentConfig = {}
backend = config.get('Main', 'service_discovery_backend')
agentConfig['service_discovery'] = True
conf_backend = None
if config.has_option('Main', 'sd_config_backend'):
conf_backend = config.get('Main', 'sd_config_backend')
if backend not in SD_BACKENDS:
log.error("The backend {0} is not supported. "
"Service discovery won't be enabled.".format(backend))
agentConfig['service_discovery'] = False
if conf_backend is None:
log.warning('No configuration backend provided for service discovery. '
'Only auto config templates will be used.')
elif conf_backend not in SD_CONFIG_BACKENDS:
log.error("The config backend {0} is not supported. "
"Only auto config templates will be used.".format(conf_backend))
conf_backend = None
agentConfig['sd_config_backend'] = conf_backend
additional_config = extract_sd_config(config)
agentConfig.update(additional_config)
return agentConfig
|
[
"haissam@datadoghq.com"
] |
haissam@datadoghq.com
|
68a3581e53ae6b573073542bdfe4b1e020cfd600
|
337dad29f3c740d86ce472bce720dab3c8a501b4
|
/tests/test_excelAddinGenerator.py
|
3af18af5c1104b654f7177c336d0aa2090a6775a
|
[] |
no_license
|
Beakerboy/Excel-Addin-Generator
|
6e87646446f51f258dd6d558a92e12bb216cbb1f
|
bca3e139a377b0050ff73f064e9158afe519b226
|
refs/heads/master
| 2023-02-01T00:42:41.819117
| 2023-01-07T00:04:38
| 2023-01-07T00:04:38
| 183,014,209
| 15
| 2
| null | 2023-01-07T00:04:39
| 2019-04-23T12:51:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
# test_excelAddinGenerator.py
import pytest
from excelAddinGenerator.main import *
from os.path import exists
from filehash import FileHash
def test_success_from_bin():
"""Test that xlam is successfully generated from a OLE file"""
createFromBin("tests/vbaProject.bin", "src/data", "success_bin.xlam")
# Assert that xlam file is created
assert exists("success_bin.xlam")
#assert that bin file within success_bin.xlam matches tests/vbaProject.bin
extractBinFromZip("success_bin.xlam")
md5hasher = FileHash('md5')
assert md5hasher.hash_file("tests/vbaProject.bin") == md5hasher.hash_file("xl/vbaProject.bin")
createFromZip("success_bin.xlam", "src/data", "success_xlam.xlam")
assert exists("success_xlam.xlam")
#assert that bin file within success_xlam.xlam matches bin file within success_bin.xlam
extractBinFromZip("success_xlam.xlam")
assert md5hasher.hash_file("tests/vbaProject.bin") == md5hasher.hash_file("xl/vbaProject.bin")
def test_not_bin_exception():
""" Test that an exception is thrown if the bin file is not an OLE file"""
with pytest.raises(Exception) as e_info:
createFromBin("tests/blank.bin", "src/data", "./fail.xlam")
def test_xlam_not_zip():
""" Test that an exception is thrown if the zip is not a zip archive"""
with pytest.raises(Exception) as e_info:
createFromZip("tests/blank.bin", "src/data", "./fail.xlam")
def test_main():
main(["./excelAddinGenerator", "./tests/vbaProject.bin", "success_bin.xlam"])
main(["./excelAddinGenerator", "success_bin.xlam", "success_xlam.xlam"])
def test_main_incorrect_type():
""" Test that an exception is thrown if the zip is not a zip archive"""
with pytest.raises(Exception) as e_info:
main(["./excelAddinGenerator", "./src/data/xl/styles.xml", "fail.xlam"])
|
[
"noreply@github.com"
] |
Beakerboy.noreply@github.com
|
a81d7941436bbe43ae43215d63788e766ae61278
|
ed93ecbd61387e03e903d84dc9dc89ff13efacf9
|
/pyvdk/tools/mention.py
|
fc289aa26db9f043728325a61974d4641ff5810a
|
[
"MIT"
] |
permissive
|
UT1C/pyvdk
|
070a50ddad1b493b54bac60c7937a5dd60b0eddc
|
168177c4006acc7f57be36f189bee8101e10253d
|
refs/heads/master
| 2023-03-30T05:20:42.900502
| 2020-12-19T18:35:22
| 2020-12-19T18:35:22
| 310,387,311
| 18
| 2
|
MIT
| 2020-11-23T22:06:51
| 2020-11-05T18:40:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
import re
import typing
class Mention:
user: bool
club: bool
id: int
text: str
regex = re.compile(r"\[(id|club)(\d{1,16})\|(.+?)\]")
def __init__(
self,
id: int,
text: str = "mention",
user: bool = False,
club: bool = False,
):
self.id = id
self.text = text
self.user = user
self.club = club
if not self.user and not self.club:
if self.id > 0:
self.user = True
elif self.id < 0:
self.club = True
@classmethod
def find(cls, text: str) -> typing.Optional["Mention"]:
match = cls.regex.search(text)
if match is not None:
return cls(
id=int(match[2]),
user=match[1] == "id",
club=match[1] == "club",
text=match[3]
)
return None
@classmethod
def finditer(cls, text: str) -> typing.Iterator["Mention"]:
iterator = cls.regex.finditer(text)
for i in iterator:
yield cls(
id=int(i[2]),
user=i[1] == "id",
club=i[1] == "club",
text=i[3]
)
def __str__(self) -> str:
return f"[{'id' if self.user else 'club'}{self.id}|{self.text}]"
|
[
"liteman1000@gmail.com"
] |
liteman1000@gmail.com
|
8566d05a5bfb8c6017d82184d07769a03c3882a6
|
a1b86a23feddff385d3397e231bb55a06afc1a49
|
/knnTest.py
|
7236b71aed60ecc92ac942d3266c60e91ea8321f
|
[] |
no_license
|
jc814/assignment1
|
d15eb86b975c030826109e4a01b91885a467ac4a
|
b1df29d8bce0b8bbda037db8a67e7810978807a3
|
refs/heads/master
| 2022-10-10T09:21:23.792691
| 2019-06-02T15:42:28
| 2019-06-02T15:42:28
| 189,863,605
| 0
| 1
| null | 2022-09-30T11:57:16
| 2019-06-02T15:31:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,151
|
py
|
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print(X_train.shape, X_test.shape)
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_no_loops(X_test)
print(dists.shape)
print(dists)
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=3)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
# y_test_pred = classifier.predict_labels(dists, k=5)
# num_correct = np.sum(y_test_pred == y_test)
# accuracy = float(num_correct) / num_test
# print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
|
[
"1054539066@qq.com"
] |
1054539066@qq.com
|
68ed5c05ba913abae9695427a4393f6f29bd2c22
|
243506601c77122e78e512f01a613da2bcb199e6
|
/venv/Scripts/django-admin.py
|
ee0202462f80b5efc334cca402894417407a8330
|
[] |
no_license
|
DevilsZone/Doctors_App
|
186439a5f08f36134b4e1d07a7a0958224fcc937
|
8aebd69e0254414bc78d3541c4630f15aabf3a6c
|
refs/heads/master
| 2022-11-10T17:18:53.653069
| 2020-07-01T06:07:18
| 2020-07-01T06:07:18
| 276,288,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
#!C:\Users\Akash\Desktop\DJANO_APPS\Doctors_App\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"akashkumarqoou@iitkgp.ac.in"
] |
akashkumarqoou@iitkgp.ac.in
|
6c44b6721d398f1c50351fdf82faea3298fb9134
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/2621.py
|
61590502c74929df447eb930b37a2ba432ed2837
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
'''
Created on Apr 12, 2014
@author: fuellerm
'''
import sys
# Read one 4x4 block of cards
def read_cards(f):
row = int(f.readline())
for j in range(1, 5):
if j == row:
result = set(f.readline().rstrip('\n').split(' '))
else:
f.readline()
return result
f = open(sys.argv[1], 'r')
n = int(f.readline())
for i in range(1, n+1):
cards1 = read_cards(f)
cards2 = read_cards(f)
common = list(cards1 & cards2)
if len(common) == 0:
result = "Volunteer cheated!"
elif len(common) == 1:
result = common[0]
else:
result = "Bad magician!"
print "Case #" + str(i) + ": " + result
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
fc1532bd93c30059c3fe87eeb8a1c316b6b3480c
|
d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3
|
/chromium/testing/scripts/generators/gen_run_web_tests_script.py
|
7f9c1aa18a53ae9be90c2632a7bed5a9552a3558
|
[
"BSD-3-Clause"
] |
permissive
|
Csineneo/Vivaldi
|
4eaad20fc0ff306ca60b400cd5fad930a9082087
|
d92465f71fb8e4345e27bd889532339204b26f1e
|
refs/heads/master
| 2022-11-23T17:11:50.714160
| 2019-05-25T11:45:11
| 2019-05-25T11:45:11
| 144,489,531
| 5
| 4
|
BSD-3-Clause
| 2022-11-04T05:55:33
| 2018-08-12T18:04:37
| null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
#!/usr/bin/env vpython
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a script to run blink tests using
blinkpy/web_tests/run_webkit_tests.py
"""
import argparse
import os
import re
import stat
import sys
CHROMIUM_SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
BASH_SCRIPT_TEMPLATE = """\
#!/bin/bash
#
# Generated by //testing/scripts/generators/gen_run_web_tests_script.py
{executable_path} {build_config_arg} "$@"
"""
BAT_SCRIPT_TEMPLATE = """\
@echo off
REM Generated by //testing/scripts/generators/gen_run_web_tests_script.py
CALL python {executable_path} {build_config_arg} %*
%*
"""
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--script-output-path', required=True, help='path to write the file to')
parser.add_argument(
'--build-type', required=True, choices=['debug', 'release'])
options = parser.parse_args(args)
if options.build_type == 'debug':
build_config_arg = '--debug'
else:
build_config_arg = '--release'
executable_path = os.path.join(
CHROMIUM_SRC_DIR, 'third_party', 'blink', 'tools', 'run_web_tests.py')
if options.script_output_path.endswith('.bat'):
contents = BAT_SCRIPT_TEMPLATE.format(
executable_path=executable_path, build_config_arg=build_config_arg)
else:
contents = BASH_SCRIPT_TEMPLATE.format(
executable_path=executable_path, build_config_arg=build_config_arg)
with open(options.script_output_path, 'w') as fp:
fp.write(contents)
os.chmod(options.script_output_path,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"csineneo@gmail.com"
] |
csineneo@gmail.com
|
e7b9f39fd86fc7c99e16077b75ed51bee1d50467
|
0fe0d8a938760af472a22e311e1f6339f5e02430
|
/health_inspector/health_inspector/settings.py
|
232f9766515779f36cfbcaad291fbd1edebd4bea
|
[
"MIT"
] |
permissive
|
rochakgupta/health-inspector
|
023d6d5e12bd7ab80bd6162fa70fb480513c6386
|
8caa8cadd1d70216c4f0242bd27f3377e669e7b4
|
refs/heads/master
| 2021-06-22T18:01:24.387094
| 2020-02-23T09:23:24
| 2020-02-23T09:23:24
| 165,119,265
| 2
| 0
|
MIT
| 2021-06-10T21:08:16
| 2019-01-10T19:34:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,216
|
py
|
"""
Django settings for health_inspector project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import secrets
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = secrets.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'material',
# 'material.frontend',
# 'material.admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'account'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'health_inspector.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'health_inspector.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': secrets.DATABASE_NAME,
'HOST': secrets.DATABASE_HOST,
'USER': secrets.DATABASE_USER,
'PASSWORD': secrets.DATABASE_PASSWORD,
'PORT': secrets.DATABASE_PORT
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = secrets.TIME_ZONE
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'account.CustomUser'
LOGIN_URL = 'login'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = secrets.EMAIL_HOST
EMAIL_HOST_USER = secrets.EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = secrets.EMAIL_HOST_PASSWORD
EMAIL_PORT = secrets.EMAIL_PORT
EMAIL_USE_TLS = True
TWILIO_PHONE_NUMBER = secrets.TWILIO_PHONE_NUMBER
TWILIO_ACCOUNT_SID = secrets.TWILIO_ACCOUNT_SID
TWILIO_AUTH_TOKEN = secrets.TWILIO_AUTH_TOKEN
OPTIONS = {
'init_command': ("SET time_zone='%s'" % TIME_ZONE),
}
|
[
"rochak84@gmail.com"
] |
rochak84@gmail.com
|
65fc261aaa35fb987361a62350d5aab65fd42494
|
0d6a45c42d26e15d999ef1ff008dbcb78b48a704
|
/manage.py
|
296543a5a32f43f67f3cde7cfa9f7559a7034859
|
[] |
no_license
|
Navaneethdi99/gdstore
|
6f0400dc638d56eed299921ea5e4e8a028098bef
|
1ec3382c97ad57316c0f571a67fcce8a7f84f045
|
refs/heads/master
| 2023-08-10T08:59:35.858396
| 2021-09-12T07:13:06
| 2021-09-12T07:13:06
| 405,571,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gdshop.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"navaneethdi99@gmail.com"
] |
navaneethdi99@gmail.com
|
bbc3953f4cdd3bf3a3f03fb20619baf613b07f14
|
d52149a6f5537a9e53b2204c7175616211631872
|
/eshop/urls.py
|
364baa84b5e60653754dbd535b396dd87be774eb
|
[] |
no_license
|
deceptikon/eshop_demo
|
ebcc132bb1ec1b3f9ec0ba99cea5efa59f2e64f0
|
a4d368cd1e4e8f010441eb4c17d75ce5919d5e3d
|
refs/heads/master
| 2022-05-09T04:44:38.898536
| 2020-01-30T13:50:01
| 2020-01-30T13:50:01
| 224,675,309
| 0
| 0
| null | 2022-04-22T22:49:57
| 2019-11-28T14:52:55
|
CSS
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import TemplateView
from . import views
urlpatterns = [
path('', TemplateView.as_view(template_name='main.html'), name='index'),
path('category', views.CatsView.as_view(), name='products_category_all'),
path('category/<slug:slug>', views.CatsView.as_view(), name='products_category'),
path('cart', views.CartView.as_view(), name='cart'),
path('login', views.LoginView.as_view(), name='login'),
path('logout', views.log_out, name='logout'),
path('register', views.register, name='register'),
path('protected', views.protected, name="test"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"lexx.kg@gmail.com"
] |
lexx.kg@gmail.com
|
12538a4f886b449a2ba1d3ee5d68c2ae2bb78c13
|
7bd890e680830bbf0656d8694d9a1bbdc9abe5da
|
/weather/models.py
|
ccfb1b195b806dcbce718bc7a29902a00efc2030
|
[] |
no_license
|
duy2779/weather-app-django
|
7a7df023e344e7ab88b9669dfc9b3fc13524631f
|
28fec7e6d9b89054319275cd38366fad9c8b7a12
|
refs/heads/main
| 2023-03-29T15:34:32.651707
| 2021-03-25T03:52:44
| 2021-03-25T03:52:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
from django.db import models
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'City'
verbose_name_plural = 'Cities'
|
[
"jultqd99@gmail.com"
] |
jultqd99@gmail.com
|
e29eb91c4128e991fb23ad4aaf45d04365908d96
|
caea797032af8b294071b1f9024f53d92ae7e811
|
/manage.py
|
d8a90c9f462184be82fc8d59124cbbb0afa7dc70
|
[] |
no_license
|
hummans/SMSHubServer
|
b28860dcfdd9c9a519e634eba46108f0a14ab1d1
|
37a714ab061baf099c761eed44fedf5bfccbd390
|
refs/heads/master
| 2022-01-06T04:37:15.450133
| 2019-05-18T18:54:06
| 2019-05-18T18:54:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SMSHubServer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"you@example.com"
] |
you@example.com
|
49f9c790641e65005be9d718ff1fd573d74dac8c
|
b7fd93d46cafc329810192a869e1bb85ba69efd1
|
/leParisien.py
|
27fb099967cf7148373990ae743d028ddb0badf1
|
[] |
no_license
|
saprykins/News_scrapers_different_languages
|
f91ac54fbbe158daeb9f78c540dff0eabc8b1bf5
|
4a59732a7de4fad72a9bf86147b1b75ae4891705
|
refs/heads/master
| 2023-06-29T07:35:57.889052
| 2021-07-21T06:15:14
| 2021-07-21T06:15:14
| 388,008,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
# -*- coding: utf-8 -*-
import scrapy
class QuotesSpider(scrapy.Spider):
name = 'LeParisien_news'
start_urls = ['https://www.leparisien.fr/elections/presidentielle/']
def parse(self, response):
for quote in response.css('.story-headline'): # doens't work
print ('header:', quote.css('::text').get())
"""
yield {
#'author': quote.xpath('span/small/text()').get(),
#'text': quote.css('span.text::text').get(),
'header': quote.css('::text').get()
}
"""
"""
next_page = response.css('li.next a::attr("href")').get()
if next_page is not None:
yield response.follow(next_page, self.parse)
"""
|
[
"palatatot@gmail.com"
] |
palatatot@gmail.com
|
8bfce2a7a6aca6b9e16fd8db4fe3fc04d9be1f15
|
fda896eac15fd03fc28aa15e0ba14b76def2e2cd
|
/PyCharm/3_strings.py
|
d81c9a2e65543c2e5abd663de962ada3856d5609
|
[] |
no_license
|
laurencebarin27/code-review
|
82108a2f841f7d8bab047e77df1ad9c9ffeca19d
|
7eddedd7630e3a24957989aa1c4938004e5f4a78
|
refs/heads/master
| 2020-08-04T21:28:27.621230
| 2019-10-02T08:00:57
| 2019-10-02T08:00:57
| 212,284,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
phrase = "De La Salle University"
print(phrase.upper())
print(phrase.isupper())
print(phrase.lower())
print(phrase.islower())
print(len(phrase))
print(phrase.index("S"))
print(phrase.replace("e","u"))
|
[
"noreply@github.com"
] |
laurencebarin27.noreply@github.com
|
d3b5bd512446d05d5576f2ee81934506c0d6840a
|
64e9565bd7ca0cab875f2549fa40781061926568
|
/source/representation.py
|
814b9d475984a0cd99f98cc8330c2075f0d3e463
|
[] |
no_license
|
carlyrobison/sonnetspoofer
|
9dcf0a5d1193fe6623ab80c61f2b2adae2d7889f
|
3b18004b9e5c6c679cb49664500b1fb878c0484d
|
refs/heads/master
| 2021-01-20T03:17:30.783296
| 2017-02-28T01:55:56
| 2017-02-28T01:55:56
| 82,864,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
def word_to_number(word, word_dict):
return word_dict[word]
def words_to_numbers(sonnets):
'''Converts a sonnet to a list of lists
with each word represented as a number'''
word_dict = {}
counter = 0
sonnet_nums = []
for sonnet in sonnets:
sonnet_num = []
for line in sonnet:
sonnet_line = []
for word in line:
l_word = word.lower()
if l_word not in word_dict:
word_dict[l_word] = counter
counter += 1
sonnet_line += [word_dict[l_word]]
sonnet_num += [sonnet_line]
sonnet_nums += [sonnet_num]
return sonnet_nums, word_dict
def number_to_word(num, word_dict):
'''Converts a number to a word'''
for word in word_dict:
if word_dict[word] == num:
return word
return None
def numbers_to_words(line, word_dict):
'''Converts a generated sonnet of numbers into a sonnet with
words'''
ret = ""
for i in range(len(line)):
num = line[i]
ret += number_to_word(num, word_dict)
if i != len(line)-1:
ret += " "
return ret
|
[
"sharikak54@gmail.com"
] |
sharikak54@gmail.com
|
c959b8b7bed8fd3dddde436661861c689c0d7c64
|
1f5f8f95530003c6c66419519d78cb52d21f65c0
|
/projects/golem_integration/tests/browser/wait_for_element_present.py
|
22acbae98f8b9d9e129667624b9b2f208bd90431
|
[] |
no_license
|
golemhq/golem-tests
|
c5d3ab04b1ea3755d8b812229feb60f513d039ac
|
dff8fd3a606c3d1ef8667aece6fddef8ac441230
|
refs/heads/master
| 2023-08-17T23:05:26.286718
| 2021-10-04T20:34:17
| 2021-10-04T20:34:17
| 105,579,436
| 4
| 1
| null | 2018-11-19T00:14:24
| 2017-10-02T20:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
from golem import actions
from projects.golem_integration.utils import expected_exception
description = 'Verify webdriver.wait_for_element_present method'
def test(data):
actions.navigate(data.env.url+'dynamic-elements/?delay=3')
actions.get_browser().wait_for_element_present('#button-five', 5)
actions.verify_element_present('#button-five')
actions.navigate(data.env.url + 'dynamic-elements/?delay=5')
msg = "timeout waiting for element #button-five to be present"
with expected_exception(Exception, msg):
actions.get_browser().wait_for_element_present('#button-five', 3)
|
[
"feo.luciano@gmail.com"
] |
feo.luciano@gmail.com
|
f013fa41d0f4c8046cf3ca3d48870d6debda0bc0
|
d40694bda47fac8b148dfbb2c7fdbf508de33050
|
/botocore/utils.py
|
1d9185c37e60b0852bf39b330eb0acc5f84cf972
|
[
"MIT"
] |
permissive
|
coingraham/serverless_rds_auto_parking
|
f01f022d4aca34dd0bbc688d5e49570eb2c32e95
|
985f0a962b6a07d4f23b51e5a5a76f5d1d37718a
|
refs/heads/master
| 2020-06-24T03:18:49.133513
| 2017-07-14T21:08:05
| 2017-07-14T21:08:05
| 96,917,872
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,005
|
py
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
import dateutil.parser
from dateutil.tz import tzlocal, tzutc
import botocore
from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.exceptions import InvalidDNSNameError, ClientError
from botocore.exceptions import MetadataRetrievalError
from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit
from botocore.vendored import requests
from botocore.compat import OrderedDict, six
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_SECURITY_CREDENTIALS_URL = (
'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
)
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RESTRICTED_REGIONS = [
'us-gov-west-1',
'fips-us-gov-west-1',
]
RETRYABLE_HTTP_ERRORS = (requests.Timeout, requests.ConnectionError)
S3_ACCELERATE_WHITELIST = ['dualstack']
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
def is_json_value_header(shape):
"""Determines if the provided shape is the special header type jsonvalue.
:type shape: botocore.shape
:param shape: Shape to be inspected for the jsonvalue trait.
:return: True if this type is a jsonvalue, False otherwise
:rtype: Bool
"""
return (hasattr(shape, 'serialization') and
shape.serialization.get('jsonvalue', False) and
shape.serialization.get('location') == 'header' and
shape.type_name == 'string')
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub(r'\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class InstanceMetadataFetcher(object):
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, url=METADATA_SECURITY_CREDENTIALS_URL):
self._timeout = timeout
self._num_attempts = num_attempts
self._url = url
def _get_request(self, url, timeout, num_attempts=1):
for i in range(num_attempts):
try:
response = requests.get(url, timeout=timeout)
except RETRYABLE_HTTP_ERRORS as e:
logger.debug("Caught exception while trying to retrieve "
"credentials: %s", e, exc_info=True)
else:
if response.status_code == 200:
return response
raise _RetriesExceededError()
def retrieve_iam_role_credentials(self):
data = {}
url = self._url
timeout = self._timeout
num_attempts = self._num_attempts
try:
r = self._get_request(url, timeout, num_attempts)
if r.content:
fields = r.content.decode('utf-8').split('\n')
for field in fields:
if field.endswith('/'):
data[field[0:-1]] = self.retrieve_iam_role_credentials(
url + field, timeout, num_attempts)
else:
val = self._get_request(
url + field,
timeout=timeout,
num_attempts=num_attempts).content.decode('utf-8')
if val[0] == '{':
val = json.loads(val)
data[field] = val
else:
logger.debug("Metadata service returned non 200 status code "
"of %s for url: %s, content body: %s",
r.status_code, url, r.content)
except _RetriesExceededError:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
num_attempts)
# We sort for stable ordering. In practice, this should only consist
# of one role, but may need revisiting if this expands in the future.
final_data = {}
for role_name in sorted(data):
final_data = {
'role_name': role_name,
'access_key': data[role_name]['AccessKeyId'],
'secret_key': data[role_name]['SecretAccessKey'],
'token': data[role_name]['Token'],
'expiry_time': data[role_name]['Expiration'],
}
return final_data
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
If given the binary type, will simply URL encode it. If given the
text type, will produce the binary type by UTF-8 encoding the
text. If given something else, will convert it to the the text type
first.
"""
# If its not a binary or text string, make it a text string.
if not isinstance(input_str, (six.binary_type, six.text_type)):
input_str = six.text_type(input_str)
# If it's not bytes, make it bytes by UTF-8 encoding it.
if not isinstance(input_str, six.binary_type):
input_str = input_str.encode('utf-8')
return quote(input_str, safe=safe)
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
# In certain cases, a timestamp marked with GMT can be parsed into a
# different time zone, so here we provide a context which will
# enforce that GMT == UTC.
return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()})
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name == 'float':
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'timestamp':
return datetime.datetime(1970, 1, 1, 0, 0, 0)
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
name = ''
if self._use_member_names:
name = shape.member.name
return [
self._generate_skeleton(shape.member, stack, name),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
if n == 1:
if not bucket_name.isalnum():
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url='s3.amazonaws.com', **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing. This allows us to avoid 301 redirects for all
bucket names that can be CNAME'd.
"""
# By default we do not use virtual hosted style addressing when
# signed with signature version 4.
if signature_version is not botocore.UNSIGNED and \
's3v4' in signature_version:
return
elif not _allowed_region(region_name):
return
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def _allowed_region(region_name):
return region_name not in RESTRICTED_REGIONS
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
def deep_merge(base, extra):
"""Deeply two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
for key in extra:
# If the key represents a dict on both given dicts, merge the sub-dicts
if key in base and isinstance(base[key], dict)\
and isinstance(extra[key], dict):
deep_merge(base[key], extra[key])
continue
# Otherwise, set the key on the base to be the value of the extra.
base[key] = extra[key]
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
error = response[1].get('Error', {})
error_code = error.get('Code')
if error_code == '301':
# A raw 301 error might be returned for several reasons, but we
# only want to try to redirect it if it's a HeadObject or
# HeadBucket because all other operations will return
# PermanentRedirect if region is incorrect.
if operation.name not in ['HeadObject', 'HeadBucket']:
return
elif error_code != 'PermanentRedirect':
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
_ALLOWED_HOSTS = [IP_ADDRESS, 'localhost', '127.0.0.1']
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = requests.Session()
self._session = session
self._sleep = sleep
def retrieve_full_uri(self, full_url, headers=None):
"""Retrieve JSON metadata from container metadata.
:type full_url: str
:param full_url: The full URL of the metadata service.
This should include the scheme as well, e.g
"http://localhost:123/foo"
"""
self._validate_allowed_url(full_url)
return self._retrieve_credentials(full_url, headers)
def _validate_allowed_url(self, full_url):
parsed = botocore.compat.urlparse(full_url)
is_whitelisted_host = self._check_if_whitelisted_host(
parsed.hostname)
if not is_whitelisted_host:
raise ValueError(
"Unsupported host '%s'. Can only "
"retrieve metadata from these hosts: %s" %
(parsed.hostname, ', '.join(self._ALLOWED_HOSTS)))
def _check_if_whitelisted_host(self, host):
if host in self._ALLOWED_HOSTS:
return True
return False
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return self._retrieve_credentials(full_url)
def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return self._get_response(full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
response = self._session.get(full_url, headers=headers,
timeout=timeout)
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg="Received non 200 response (%s) from ECS metadata: %s"
% (response.status_code, response.text))
try:
return json.loads(response.text)
except ValueError:
raise MetadataRetrievalError(
error_msg=("Unable to parse JSON returned from "
"ECS metadata: %s" % response.text))
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
|
[
"cgraham@2ndwatch.com"
] |
cgraham@2ndwatch.com
|
3af2f2b6fe9a1ee356e68e7df54220cbae9a8228
|
5c5ded612c2f041c6950fff15768411f4f6b66e1
|
/samples/cityscape/coco.py
|
e7ff5414591dd73bb49374d8633bf4e7c7b28f9b
|
[
"MIT"
] |
permissive
|
Zhuoyao1012/Mask_RCNN
|
09d3a2b1fc28b3abe361a1ae872052af4d51a1c9
|
0f7e99dd35c2eeae5f12503b7c1d910f345c0c69
|
refs/heads/master
| 2022-06-06T15:16:55.100909
| 2018-09-28T03:17:35
| 2018-09-28T03:17:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,594
|
py
|
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights. Also auto download COCO dataset
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet --download=True
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import sys
import time
import numpy as np
import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug)
#Set GPU
os.environ["CUDA_VISIBLE_DEVICES"]='1'
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2017"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def auto_download(self, dataDir, dataType, dataYear):
"""Download the COCO dataset/annotations if requested.
dataDir: The root directory of the COCO dataset.
dataType: What to load (train, val, minival, valminusminival)
dataYear: What dataset year to load (2014, 2017) as a string, not an integer
Note:
For 2014, use "train", "val", "minival", or "valminusminival"
For 2017, only "train" and "val" annotations are available
"""
# Setup paths and file names
if dataType == "minival" or dataType == "valminusminival":
imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
else:
imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
# Create main folder if it doesn't exist yet
if not os.path.exists(dataDir):
os.makedirs(dataDir)
# Download images if not available locally
if not os.path.exists(imgDir):
os.makedirs(imgDir)
print("Downloading images to " + imgZipFile + " ...")
with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + imgZipFile)
with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
zip_ref.extractall(dataDir)
print("... done unzipping")
print("Will use images in " + imgDir)
# Setup annotations data paths
annDir = "{}/annotations".format(dataDir)
if dataType == "minival":
annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
annFile = "{}/instances_minival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
unZipDir = annDir
elif dataType == "valminusminival":
annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
annFile = "{}/instances_valminusminival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
unZipDir = annDir
else:
annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
unZipDir = dataDir
# print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
# Download annotations if not available locally
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(unZipDir)
print("... done unzipping")
print("Will use annotations in " + annFile)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(CocoDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CocoDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
if info["source"] == "coco":
return "http://cocodataset.org/#explore?id={}".format(info["id"])
else:
super(CocoDataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = CocoConfig()
else:
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download)
if args.year in '2014':
dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
val_type = "val" if args.year in '2017' else "minival"
dataset_val.load_coco(args.dataset, val_type, year=args.year, auto_download=args.download)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads',
augmentation=augmentation)
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=120,
layers='4+',
augmentation=augmentation)
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=160,
layers='all',
augmentation=augmentation)
elif args.command == "evaluate":
# Validation dataset
dataset_val = CocoDataset()
val_type = "val" if args.year in '2017' else "minival"
coco = dataset_val.load_coco(args.dataset, val_type, year=args.year, return_coco=True, auto_download=args.download)
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.limit))
evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.limit))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
|
[
"you@example.com"
] |
you@example.com
|
8d447231b19c3b5d99fbaaa3d41d5e0ed349f686
|
b909af8c6b33e51fbf74534f473dc10b6bf9ab02
|
/src/Python/transmitter/utilities/__init__.py
|
83252fd6fd2baa7e04b5ba97c1ace8612b23cfc1
|
[] |
no_license
|
abbad/NetProb
|
a0243a2d258cb3e0f0eb33a158b2a4b94fe2415d
|
7b8234625bbc654e5a80b8acde2cacd2b1abc3d4
|
refs/heads/master
| 2021-01-18T21:44:23.403438
| 2017-08-12T03:07:10
| 2017-08-12T03:07:10
| 9,431,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
from tcp_udp_server_unnamed_pipes_utilities import sendMessage, getOsFileHandle, getHandleDuplicate
from udp_server_win32_named_pipes import readFromPipe
from tcp_server_win32_named_pipes import writeToPipe
from file_io import *
|
[
"ahm.abbad@gmail.com"
] |
ahm.abbad@gmail.com
|
8f8f500352c74c4664b97cf6d4c9dfc3267e7db8
|
cd8d2740d040a10b9d07cc4cd36a86f03a7e8f5b
|
/python/strings/ceasar_cipher.py
|
d6bcbcc44a100c29bccb25cf516270a2f6122d84
|
[
"MIT"
] |
permissive
|
sglavoie/code-snippets
|
9d9cf14c237ee6e7484fbf88385d427207c626f6
|
d867810e1f9bfd57acac7e02a4c02a9d905f121a
|
refs/heads/main
| 2023-05-31T20:56:54.655187
| 2023-05-17T03:04:28
| 2023-05-17T03:04:28
| 178,996,026
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
"""Ceasar Cipher"""
import string
def build_shift_dict(shift) -> dict:
"""
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift. The dictionary
should have 52 keys of all the uppercase letters and all the lowercase
letters only.
shift (integer): the amount by which to shift every letter of the
alphabet. 0 <= shift < 26
Returns: a dictionary mapping a letter (string) to
another letter (string).
"""
lower_letters = string.ascii_lowercase
lower_dict = {}
for i in range(1, 27):
lower_dict[i] = lower_letters[i - 1]
lower_shifted_dict = lower_dict.copy()
for key in lower_dict:
if shift + key > 26:
lower_shifted_dict[key] = lower_dict[shift + key - 26]
else:
lower_shifted_dict[key] = lower_dict[shift + key]
for key, value in lower_dict.items():
lower_shifted_dict[value] = lower_shifted_dict.pop(key)
upper_letters = string.ascii_uppercase
upper_dict = {}
for i in range(1, 27):
upper_dict[i] = upper_letters[i - 1]
upper_shifted_dict = upper_dict.copy()
for key in upper_dict:
if shift + key > 26:
upper_shifted_dict[key] = upper_dict[shift + key - 26]
else:
upper_shifted_dict[key] = upper_dict[shift + key]
for key, value in upper_dict.items():
upper_shifted_dict[value] = upper_shifted_dict.pop(key)
lower_shifted_dict.update(upper_shifted_dict)
return lower_shifted_dict
def apply_shift(string_to_encrypt, shift) -> str:
"""
Applies the Caesar Cipher to the string with the input shift.
Creates a new string that is shifted down the alphabet by some number of
characters determined by the input shift.
shift (integer): the shift with which to encrypt the message.
0 <= shift < 26
Returns: the message text (string) in which every character is shifted
down the alphabet by the input shift
"""
shifted_message = []
shifted_dict = build_shift_dict(shift)
for char in string_to_encrypt:
if char in shifted_dict:
shifted_message.append(shifted_dict[char])
else:
shifted_message.append(char)
return "".join(shifted_message)
def read_and_solve():
string_to_change = input()
shift_to_apply = int(input())
encrypted_string = apply_shift(string_to_change, shift_to_apply)
return encrypted_string
if __name__ == "__main__":
print(read_and_solve())
|
[
"sgdlavoie@gmail.com"
] |
sgdlavoie@gmail.com
|
ad62f729834cd96eac8c6af29169f718ab4f846f
|
b15ff9e2d0f6d8a74a19beb93c07943f706e3b8e
|
/events/migrations/0001_initial.py
|
998f4256b16c901ed6cb4fbc172f45d282da1432
|
[] |
no_license
|
solheng/govtrack.us-web
|
24f0f6cc7c21a0924d0b5c3479bbb05adec62337
|
fa34ad121437aa83fc7fba20018117614a3bc2e5
|
refs/heads/master
| 2020-03-11T01:48:36.154547
| 2018-04-14T19:03:34
| 2018-04-14T19:09:05
| 129,702,267
| 1
| 0
| null | 2018-04-16T07:25:54
| 2018-04-16T07:25:54
| null |
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-02 07:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source_object_id', models.PositiveIntegerField()),
('eventid', models.CharField(max_length=32)),
('when', models.DateTimeField(db_index=True)),
('seq', models.IntegerField()),
],
options={
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('feedname', models.CharField(db_index=True, max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='SubscriptionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('is_default', models.BooleanField(default=False)),
('email', models.IntegerField(choices=[(0, b'No Email Updates'), (1, b'Daily'), (2, b'Weekly')], default=0)),
('last_event_mailed', models.IntegerField(blank=True, null=True)),
('last_email_sent', models.DateTimeField(blank=True, null=True)),
('public_id', models.CharField(blank=True, db_index=True, max_length=16, null=True)),
('trackers', models.ManyToManyField(related_name='tracked_in_lists', to='events.Feed')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_lists', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='event',
name='feed',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Feed'),
),
migrations.AddField(
model_name='event',
name='source_content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
migrations.AlterUniqueTogether(
name='subscriptionlist',
unique_together=set([('user', 'name')]),
),
migrations.AlterUniqueTogether(
name='event',
unique_together=set([('feed', 'id'), ('feed', 'when', 'source_content_type', 'source_object_id', 'eventid'), ('when', 'source_content_type', 'source_object_id', 'seq', 'feed'), ('source_content_type', 'source_object_id', 'eventid', 'feed')]),
),
]
|
[
"jt@occams.info"
] |
jt@occams.info
|
2d1768280b834feee4f6b28b65ef8b07253a49c1
|
eaf84f5f98b274d0d03a97d7c3abe78b3f8c7a07
|
/autoServiceCostMenu.py
|
6b2eae82e17bbd055d8cd8f91bb734645ee5a2a2
|
[] |
no_license
|
mikeCenters/_pythonTinyThings
|
84845c52a7c679472f79e84f567fa12971545ed4
|
8c763c153d5b1b8977effd56cf73c9579c03d131
|
refs/heads/master
| 2020-04-02T14:57:24.742841
| 2018-11-06T22:01:28
| 2018-11-06T22:01:28
| 154,545,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,450
|
py
|
# Auto service cost.
# This program requires variables to store the costs of various services.
# The best practice is to keep the code simple and easy to understand.
oil_change = int(35)
tire_rotation = int(19)
car_wax = int(12)
car_wash = int(7)
# Print service menu and input option.
# This program requires an interactive menu for user to determine needs.
print('Davy\'s auto shop services')
print('Oil change -- $35')
print('Tire rotation -- $19')
print('Car wash -- $7')
print('Car wax -- $12')
print()
option1 = input('Select first service: \n\n')
option2 = input('Select second service: ')
print()
print()
print()
# Print user options with cost and assign costs to new variables.
# This program requires the confirmation of choice and the creation of variables
# to allow a calculation of the total cost.
# The best practice is to keep the code simple and easy to understand, also
# to test each option.
option1cost = int(0)
option2cost = int(0)
print('Davy\'s auto shop invoice\n')
if option1 == 'Oil change':
print('Service 1: Oil change, $%i' % (oil_change))
elif option1 == 'Tire rotation':
print('Service 1: Tire rotation, $%i' % (tire_rotation))
elif option1 == 'Car wax':
print('Service 1: Car wax, $%i' % (car_wax))
elif option1 == 'Car wash':
print('Service 1: Car wash, $%i' % (car_wash))
elif option1 == '-':
print('Service 1: No service')
# Assign cost for option1 to new variable
if option1 == 'Oil change':
option1cost = oil_change
elif option1 == 'Tire rotation':
option1cost = tire_rotation
elif option1 == 'Car wax':
option1cost = car_wax
elif option1 == 'Car wash':
option1cost = car_wash
# Print the cost of option2
if option2 == 'Oil change':
print('Service 2: Oil change, $%i' % (oil_change))
elif option2 == 'Tire rotation':
print('Service 2: Tire rotation, $%i' % (tire_rotation))
elif option2 == 'Car wax':
print('Service 2: Car wax, $%i' % (car_wax))
elif option2 == 'Car wash':
print('Service 2: Car wash, $%i' % (car_wash))
elif option2 == '-':
print('Service 2: No service')
# Assign cost for option2 to new variable
if option2 == 'Oil change':
option2cost = oil_change
elif option2 == 'Tire rotation':
option2cost = tire_rotation
elif option2 == 'Car wax':
option2cost = car_wax
elif option2 == 'Car wash':
option2cost = car_wash
# Total cost of service.
total_cost = int(option1cost + option2cost)
print('\nTotal: $%i' % (total_cost))
|
[
"michael.centers@gmail.com"
] |
michael.centers@gmail.com
|
16ffd54c84dbe8d23a393b2e76c4bba477b84d89
|
7cbae4f3d7c1258d8d91606f0b69d39192200df7
|
/venv/wash.py
|
6f9f67731fd0d91109fa58240f69cae3209d1bf0
|
[] |
no_license
|
githubssj/NaiveBayes
|
4b970d4eeb3127003127c4ae128ae97ccaa2a453
|
4bba318fbb2426a1220cdcc2bcc3b12c0085fbdc
|
refs/heads/master
| 2023-02-12T18:43:18.104477
| 2021-01-07T06:40:42
| 2021-01-07T06:52:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
import os
NUM=5000
def wash_all_file(rootpath):
def wash_file(path, type):
files = os.listdir(path)
i = 1
for file in files:
try:
used_name = path + '\\' + file
## 因为文件名里面包含了文件的后缀,所以重命名的时候要加上
new_name = path + '\\' + type + str(i) + '.' + file.split('.')[1]
except:
## 跳过一些系统隐藏文档
pass
if i <= NUM:
os.rename(used_name, new_name)
else:
os.remove(used_name)
i += 1
contents=os.listdir(rootpath)#电脑、烦恼、健康。。。。
# print("contents=",contents)
for each in contents:#each是电脑、烦恼、健康等某一类
if os.path.isdir(rootpath+'\\'+each): # 判断是文件夹,打开
types=os.listdir(rootpath+'\\'+each)
for type in types:#type是test或train
if os.path.isdir(rootpath+'\\'+each+'\\'+type):
# articles=os.listdir(type)
wash_file(rootpath+'\\'+each+'\\'+type,type)
if __name__ == '__main__':
wash_all_file("D:\机器学习数据\sy发的数据\clean_data")
|
[
"929946014@qq.com"
] |
929946014@qq.com
|
31d4ad5bb495fdc6d0ce9030a888b6692ed38dfe
|
df068be75706b25a61609a857161eda3ba951dae
|
/sbh/gshj/dw_Pg_sql/__init__.py
|
513b766dba997e33e7408c07683aa3f64588ce71
|
[] |
no_license
|
songweirong19931020/tensorflow
|
2a24372de47ed72ee8627fa0c6a2cfac7439dc82
|
e42ddc319089f0c872548c046615399dbc3ccec7
|
refs/heads/master
| 2022-12-10T03:46:31.632772
| 2020-09-04T01:15:11
| 2020-09-04T01:15:12
| 261,907,962
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: __init__.py
Description :
Author : CBH
date: 2020/5/23 09: 46
Ide: PyCharm
-------------------------------------------------
Change Activity:
2020/5/23 09: 46:
-------------------------------------------------
"""
|
[
"wrsong@cenboomh.com"
] |
wrsong@cenboomh.com
|
6dc923ae93ea58d12f86d61b115c21de1bb36376
|
adc3c961b2e89c46b4ca9dcabae9d36d63fc472a
|
/Interview_Prep_Kit/Dynamic_Programming/Maximum_Array_Sum.py
|
042a6fb771d28883304dc7f42c1ccf80626fe2dc
|
[] |
no_license
|
hall500/hackerrank-codes
|
289cc1f913570cddea3574b1519a801c0cd15dd2
|
2fcbde7c8e1c4fe7f46cab9803a281da29732126
|
refs/heads/master
| 2023-08-24T20:37:56.349795
| 2023-08-15T16:47:59
| 2023-08-15T16:47:59
| 295,524,311
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the maxSubsetSum function below.
def maxSubsetSum(arr):
dp = {} # key : max index of subarray, value = sum
dp[0], dp[1] = arr[0], max(arr[0], arr[1])
for i, num in enumerate(arr[2:], start=2):
dp[i] = max(dp[i-1], dp[i-2]+num, dp[i-2], num)
return dp[len(arr)-1]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = maxSubsetSum(arr)
fptr.write(str(res) + '\n')
fptr.close()
|
[
"noreply@github.com"
] |
hall500.noreply@github.com
|
14733fb62e738733a2e27ef980ecc577db195029
|
41d92f05924672e7138e14d8daefbf6850b7aeb2
|
/task1/settings.py
|
e7b271f81a9c5fe5676a8bd182924753c1ccfb99
|
[] |
no_license
|
shaunybrown/SMB_tasks
|
161d48c0879c68794d7bf133aded6e07b251b729
|
feb13d9d0ff4db6fee30c4f4d9c1a4ddb179a6bc
|
refs/heads/master
| 2020-08-21T22:48:19.640183
| 2019-10-30T19:23:08
| 2019-10-30T19:23:08
| 216,265,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
"""
Django settings for task1 project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#$k%77awa((6l4rd)o4z@)jox)_2qd)+mc*vl(%7vv48efl^2a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'task1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'task1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"Shaun.Murray-Brown@os.uk"
] |
Shaun.Murray-Brown@os.uk
|
acf7f16e26fa7ee61ef3572e21836a8c7e3eb878
|
1a97f91088be127845b1bf537c6e964ba524371d
|
/morl/core/__init__.py
|
e3edcefa82f0841212da5e1f5009a4dee5f94131
|
[
"Apache-2.0"
] |
permissive
|
universea/MoRL
|
d2f178e05cdcc9272f2458b065e4d0503e63a4d5
|
499311cf41f9ef45b6941289ecbfb77cfd9288ec
|
refs/heads/main
| 2023-03-02T14:29:02.531037
| 2021-02-16T09:32:46
| 2021-02-16T09:32:46
| 335,837,095
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
from morl.core.agent_base import *
from morl.core.model_base import *
from morl.core.algorithm_base import *
|
[
"287827688@qq.com"
] |
287827688@qq.com
|
cdaefa05c63a331e9d9b06d9c2cfae2beb33afcf
|
edcf205314644a368c81ae273a352db410cf7124
|
/setup.py
|
77ffcf1360fa9e6ecbc81246a7acfe738b2c4fa4
|
[
"MIT"
] |
permissive
|
STUDITEMPS/django_panel_views
|
6ae1c75d069f0aca1900d08b026d7662134ec0d1
|
4c308ce513362be5f3344c425f8eb5404b7ff651
|
refs/heads/master
| 2021-01-10T15:34:57.894647
| 2015-12-02T12:46:27
| 2015-12-02T12:46:27
| 46,286,429
| 3
| 0
| null | 2015-12-02T12:46:30
| 2015-11-16T16:15:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,808
|
py
|
# -*- coding: utf-8 -*-
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='panelviews',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='studitemps panelviews',
long_description=long_description,
# The project's main homepage.
url='http://studitemps.tech',
# Author details
author='studitemps.tech',
author_email='kiss-develompent@studitemps.de',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='django panelviews',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['django<1.9'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
# 'dev': ['check-manifest'],
'test': ['coverage', 'django<1.7'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
},
)
|
[
"paul.wachendorf@jobmensa.de"
] |
paul.wachendorf@jobmensa.de
|
64a1013439e23ab7f9b94cda28275eb78c9269a1
|
bf70e5e39b1eb5fa8be83fec87f1957493c84e87
|
/Python/189 Rotate Array.py
|
032b56faf9ab9c124a550bf07353ecb8f1c80eed
|
[] |
no_license
|
ericsyc/leetcode
|
76273ba23b6513e83b6042599ea07902a769cff9
|
8256a65ef25530c34f42f1f98f1e9f9fe634ece1
|
refs/heads/master
| 2020-04-19T14:41:41.610041
| 2019-04-09T17:36:34
| 2019-04-09T17:36:34
| 152,906,586
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
# Rotate Array
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
k %= len(nums)
nums[:] = nums[-k:]+nums[:-k]
|
[
"noreply@github.com"
] |
ericsyc.noreply@github.com
|
a097fcfb7b812f08fe9988fd32627e87816bf570
|
d0cc49f673eeb0126c9730d8e20b932bbb489547
|
/StockPriceStreamlitWebApp.py
|
628c5321c8212816b382e02d643c504b70df079f
|
[] |
no_license
|
OfficialNMN/Time-Series-Analysis
|
b755179300e6fb2e9fb38619d873bef5f6e97f0a
|
bc8ab5914319f2dace8cc8f414e77d9aa24c450e
|
refs/heads/main
| 2023-04-29T05:48:21.685523
| 2021-05-25T18:24:14
| 2021-05-25T18:24:14
| 323,315,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
import streamlit as st
from datetime import date
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
START = "2015-01-01"
TODAY = date.today().strftime("%Y-%m-%d")
st.title('Stock Forecast App')
stocks = ('GOOG', 'AAPL', 'MSFT', 'GME')
selected_stock = st.selectbox('Select dataset for prediction', stocks)
n_years = st.slider('Years of prediction:', 1, 4)
period = n_years * 365
@st.cache
def load_data(ticker):
data = yf.download(ticker, START, TODAY)
data.reset_index(inplace=True)
return data
data_load_state = st.text('Loading data...')
data = load_data(selected_stock)
data_load_state.text('Loading data... done!')
st.subheader('Raw data')
st.write(data.tail())
# Plot raw data
def plot_raw_data():
fig = go.Figure()
fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_raw_data()
# Predict forecast with Prophet.
df_train = data[['Date','Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
# Show and plot forecast
st.subheader('Forecast data')
st.write(forecast.tail())
st.write(f'Forecast plot for {n_years} years')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write("Forecast components")
fig2 = m.plot_components(forecast)
st.write(fig2)
|
[
"noreply@github.com"
] |
OfficialNMN.noreply@github.com
|
4e5e62465005abc49f96395bbfd2d23b49bc60de
|
577c0ca01734c23f24c2f42844890a040723c883
|
/compare_gan/src/gans/GAN.py
|
fe8e6c3aa91b47ecf894c10738071209d7ff6db5
|
[
"Apache-2.0"
] |
permissive
|
xiao7199/compare_gan
|
488b2ebf0528e0fb52908e4da01e3dfba81b0a28
|
ac821a979ccb5ed46e0a441f87abc9bfd3c37417
|
refs/heads/master
| 2020-06-06T13:07:18.155959
| 2019-07-25T20:13:48
| 2019-07-25T20:13:48
| 192,748,689
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,490
|
py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the MM-GAN and NS-GAN (https://arxiv.org/abs/1406.2661)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.src.gans.abstract_gan import AbstractGAN
import tensorflow as tf
class ClassicGAN(AbstractGAN):
"""Original Generative Adverserial Networks."""
def __init__(self, model_name, **kwargs):
super(ClassicGAN, self).__init__(model_name, **kwargs)
def build_model(self, is_training=True):
image_dims = [self.input_height, self.input_width, self.c_dim]
batch_size = self.batch_size
# Input images.
self.inputs = tf.placeholder(
tf.float32, [batch_size] + image_dims, name="real_images")
# Noise vector.
self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z")
# Discriminator output for real images.
D_real, D_real_logits, _ = self.discriminator(
self.inputs, is_training=is_training, reuse=False)
# Discriminator output for fake images.
G = self.generator(self.z, is_training=is_training, reuse=False)
D_fake, D_fake_logits, _ = self.discriminator(
G, is_training=is_training, reuse=True)
# Loss on real and fake data.
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_real_logits, labels=tf.ones_like(D_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_fake_logits, labels=tf.zeros_like(D_fake)))
# Total discriminator loss.
self.d_loss = d_loss_real + d_loss_fake
# Total generator loss.
if self.model_name == "GAN":
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_fake_logits, labels=tf.ones_like(D_fake)))
elif self.model_name == "GAN_MINMAX":
self.g_loss = -d_loss_fake
else:
assert False, "Unknown GAN model_name: %s" % self.model_name
# Divide trainable variables into a group for D and group for G.
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if "discriminator" in var.name]
g_vars = [var for var in t_vars if "generator" in var.name]
self.check_variables(t_vars, d_vars, g_vars)
# Define optimization ops.
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = tf.train.AdamOptimizer(
self.learning_rate, beta1=self.beta1, name="d_adam").minimize(
self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(
self.learning_rate, beta1=self.beta1, name="g_adam").minimize(
self.g_loss, var_list=g_vars)
# Store testing images.
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
# Setup summaries.
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
class GAN(ClassicGAN):
"""Non-saturating Generative Adverserial Networks.
The loss for the generator is computed using the log trick. That is,
G_loss = -log(D(fake_images)) [maximizes log(D)]
"""
def __init__(self, **kwargs):
super(GAN, self).__init__(model_name="GAN", **kwargs)
class GAN_MINMAX(ClassicGAN):
"""Generative Adverserial Networks with the standard min-max loss.
The loss for the generator is computed as:
G_loss = - ( (1-0) * -log(1 - D(fake_images))
= log (1 - D(fake_images)) [ minimize log (1 - D) ]
"""
def __init__(self, **kwargs):
super(GAN_MINMAX, self).__init__(model_name="GAN_MINMAX", **kwargs)
|
[
"michalski@google.com"
] |
michalski@google.com
|
5564e15f7a77079f4415767b2fbea87616a3950f
|
fac03fe7baa7bd172de96b4f284791a37fe36a41
|
/moocup/users/test/test_views.py
|
aa836d4f832076477b965d4a60b8a20f3e5ce7fd
|
[] |
no_license
|
jessicapaz/moocup
|
998d07dbf86228ecb6bcbe2092912120b9b2f6a0
|
3c64d2f267522ec44977e493292e5a3aba491da3
|
refs/heads/master
| 2020-05-03T16:35:21.738746
| 2019-04-28T15:13:09
| 2019-04-28T15:13:09
| 177,447,643
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,997
|
py
|
from django.urls import reverse
from django.forms.models import model_to_dict
from django.contrib.auth.hashers import check_password
from django.utils import timezone
from nose.tools import ok_, eq_
from rest_framework.test import APITestCase
from rest_framework import status
from .factories import UserFactory
from ..models import User
class TestUserListTestCase(APITestCase):
"""
Tests /users list operations.
"""
def setUp(self):
self.url = reverse('user-list')
self.user_data = model_to_dict(UserFactory.build())
def test_post_request_with_no_data_fails(self):
response = self.client.post(self.url, {})
eq_(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_request_with_valid_data_succeeds(self):
response = self.client.post(self.url, self.user_data)
eq_(response.status_code, status.HTTP_201_CREATED)
user = User.objects.get(pk=response.data.get('id'))
eq_(user.email, self.user_data.get('email'))
ok_(check_password(self.user_data.get('password'), user.password))
class TestUserDetailTestCase(APITestCase):
"""
Tests /users detail operations.
"""
def setUp(self):
self.user = UserFactory()
self.url = reverse('user-detail', kwargs={'pk': self.user.pk})
self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.user.auth_token}')
def test_get_request_returns_a_given_user(self):
response = self.client.get(self.url)
eq_(response.status_code, status.HTTP_200_OK)
def test_put_request_updates_a_user(self):
new_email = 'test@gmail.com'
payload = {'email': new_email,
'is_active': True,
'is_superuser': True,
'timezone': 'America/Sao_Paulo'}
response = self.client.put(self.url, payload)
eq_(response.status_code, status.HTTP_200_OK)
user = User.objects.get(pk=self.user.id)
eq_(user.email, new_email)
|
[
"jessicamorim.42@gmail.com"
] |
jessicamorim.42@gmail.com
|
6280254084a901126c4a4ff3ebdf4cca75a837a8
|
8597eddefd259e514443144fe001bf8b38b3d899
|
/backend/service/data_source_writer_service.py
|
a4687645b1a8405f5b2160060b95e6efa5ac07d0
|
[] |
no_license
|
tonussi/fake-primary-backup
|
546687acb2f515db62533f5edceb7b4acaa4f203
|
d5f5a1ecae643867e96e8eba044d5e10c0807cba
|
refs/heads/master
| 2023-01-19T01:57:45.228342
| 2020-11-30T02:57:18
| 2020-11-30T02:57:18
| 313,307,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,250
|
py
|
import os
import csv
from model.helpers import DirGetter
from model.primary_backup import PrimaryBackup
class DataSourceWriterService(object):
"""
Fake simulator of db csv writting service
"""
def __init__(self):
self.dir_getter = DirGetter()
self.csv_columns = ["No", "Name", "Country"]
self.csv_file = self.dir_getter.source_db_file_path()
self.file_directory = os.path.split(self.csv_file)[0]
self.file_name = os.path.split(self.csv_file)[1]
if not os.path.isdir(self.file_directory):
os.makedirs(self.file_directory)
def perform(self, params):
if len(params) == 0: return "nothing to insert"
if not self._write(params): return f"io problem with the writing stage of the replica number"
if self._invoke_primary_backup_management(): return f"successfully changed data and your data was replicated to other nodes"
return f"processes failed to replicate data {self.which_replica}"
# private
def _invoke_primary_backup_management(self):
try:
PrimaryBackup().perform()
except IOError:
return False
return True
def _write(self, params):
success_check = False
if os.path.isfile(self.csv_file):
success_check = self._append_data(params)
else:
success_check = self._create_file() and self._append_data(params)
return "rows inserted in database with success" if success_check else "rows not inserted (failed)"
def _append_data(self, params):
try:
with open(self.csv_file, 'a+', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=self.csv_columns)
for row_tuple in params:
writer.writerow(row_tuple)
csvfile.close()
except IOError:
return False
return True
def _create_file(self):
try:
with open(self.csv_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=self.csv_columns)
writer.writeheader()
csvfile.close()
except IOError:
return False
return True
|
[
"lucas@prevision.com.br"
] |
lucas@prevision.com.br
|
644d311efc8df693020faec11b4438d3a424fef7
|
0b7907a93d26008bd5de43d0032d70ced86a5e34
|
/test/checkMiniTree_MuonPt.py
|
bc01860c2d48d7930b52c221436b5c2e1a22164e
|
[] |
no_license
|
UMN-CMS/cms-WR
|
ad66f39beb90ac534ba734ed5158b01a6884006b
|
31510cb8ebb7ddba0bf321353a19cc7f75215ed6
|
refs/heads/master
| 2021-01-23T09:42:06.512732
| 2016-11-29T05:01:16
| 2016-11-29T05:01:16
| 29,871,199
| 5
| 2
| null | 2016-10-04T13:53:54
| 2015-01-26T16:54:33
|
Python
|
UTF-8
|
Python
| false
| false
| 532
|
py
|
import ROOT
configfile = open("configs/2015-v1.conf")
config = dict( [ line.strip().split('=') for line in configfile])
datasets = {"DYJets_amctnlo":"", "DYJets_madgraph":"", "DYToEE_powheg":""}
for dataset in datasets:
print dataset
f = ROOT.TFile.Open("root://eoscms//eos/cms/store//user/shervin/ntuples/%s%s/unmerged-allRange.root" % (dataset, config["productionTAG"]))
tree = f.Get("miniTree_dytagandprobe/t")
for event in tree:
for mu_p4 in event.muons_p4:
if mu_p4.Pt() > 1000.0:
print event.event, mu_p4.Pt()
|
[
"phansen@physics.umn.edu"
] |
phansen@physics.umn.edu
|
75f8c87aa5d0485817a80b7ac8bea6eae894dc34
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/5011/104005011.py
|
2425f88aaf2b4a67d2fa2313a8628ec1a6580eee
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
from bots.botsconfig import *
from records005011 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'SA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'N1', MIN: 1, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 10},
{ID: 'PER', MIN: 0, MAX: 1},
{ID: 'P1', MIN: 0, MAX: 1},
{ID: 'G47', MIN: 0, MAX: 1},
{ID: 'F9', MIN: 0, MAX: 1},
{ID: 'FOB', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'SL1', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 1, MAX: 10},
{ID: 'TD4', MIN: 0, MAX: 10},
{ID: 'H1', MIN: 0, MAX: 1},
{ID: 'H2', MIN: 0, MAX: 1},
{ID: 'H3', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'M1', MIN: 0, MAX: 1},
{ID: 'C3', MIN: 0, MAX: 1},
{ID: 'X1', MIN: 0, MAX: 1},
{ID: 'X2', MIN: 0, MAX: 1},
{ID: 'NTE', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 1, MAX: 2, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 10},
{ID: 'PER', MIN: 0, MAX: 1},
]},
{ID: 'L5', MIN: 1, MAX: 100, LEVEL: [
{ID: 'L0', MIN: 0, MAX: 10},
{ID: 'L1', MIN: 0, MAX: 10},
{ID: 'L4', MIN: 0, MAX: 10},
]},
{ID: 'ACS', MIN: 0, MAX: 100},
]},
{ID: 'L3', MIN: 0, MAX: 1},
{ID: 'NTE', MIN: 0, MAX: 10},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
893a6e59c074c24a607c32e8131943c3df32e0bd
|
6472c4553c49a8c05103355ff53b1cbb7f025e8f
|
/pava/implementation/natives/sun/java2d/opengl/OGLSurfaceData.py
|
245ed034d789f6b8454a5b4fdb9cad2070e5893b
|
[
"MIT"
] |
permissive
|
laffra/pava
|
0b012e27c207a3e0f3ca772667b0c32168fe3123
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
refs/heads/master
| 2021-01-23T04:23:22.887146
| 2020-12-21T23:14:09
| 2020-12-21T23:14:09
| 86,191,143
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
def add_native_methods(clazz):
def initTexture__long__boolean__boolean__boolean__int__int__(a0, a1, a2, a3, a4, a5, a6):
raise NotImplementedError()
def initFBObject__long__boolean__boolean__boolean__int__int__(a0, a1, a2, a3, a4, a5, a6):
raise NotImplementedError()
def initFlipBackbuffer__long__(a0, a1):
raise NotImplementedError()
clazz.initTexture__long__boolean__boolean__boolean__int__int__ = initTexture__long__boolean__boolean__boolean__int__int__
clazz.initFBObject__long__boolean__boolean__boolean__int__int__ = initFBObject__long__boolean__boolean__boolean__int__int__
clazz.initFlipBackbuffer__long__ = initFlipBackbuffer__long__
|
[
"iV29VQzQVT11"
] |
iV29VQzQVT11
|
3361d704c1b803e7aa88422c06d8f0e9057f951c
|
734a552186aece647e84f0e324bf1a0735ea45c6
|
/code/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_noDeepSupervision.py
|
8d46f170f9f65a96fe1ecb84a6f1c4835e8612f7
|
[] |
no_license
|
jyniki/experiments
|
59ff5df5022555efcb4b0b0f8eed6668b8c45148
|
8a02027c63ea52667d75665a40318289a4d567fb
|
refs/heads/master
| 2023-02-17T09:11:42.557733
| 2021-01-14T02:27:11
| 2021-01-14T02:27:11
| 327,834,493
| 1
| 0
| null | 2021-01-10T07:51:32
| 2021-01-08T07:50:04
|
Python
|
UTF-8
|
Python
| false
| false
| 8,131
|
py
|
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from network_architecture.generic_UNet import Generic_UNet
from network_architecture.initialization import InitWeights_He
from network_architecture.neural_network import SegmentationNetwork
from training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_patch_size, get_moreDA_augmentation
from training.dataloading.dataset_loading import unpack_dataset
from loss_functions.dice_loss import DC_and_CE_loss
from training.network_training.nnUNetTrainer import nnUNetTrainer
from training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from utils import softmax_helper
from torch import nn
import torch
class nnUNetTrainerV2_noDeepSupervision(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
def setup_DA_params(self):
"""
we leave out the creation of self.deep_supervision_scales, so it remains None
:return:
"""
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
def initialize(self, training=True, force_load_plans=False):
"""
removed deep supervision
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
assert self.deep_supervision_scales is None
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
classes=None,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
changed deep supervision to False
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def run_online_evaluation(self, output, target):
return nnUNetTrainer.run_online_evaluation(self, output, target)
|
[
"1067087283@qq.com"
] |
1067087283@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.