blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3656c3a8b753864e8154ec4f8a46ac7e789e3b0
|
9decd5901a491d08e9235abc7fb8dade362d215e
|
/pastepwn/database/__init__.py
|
d67e426275658725e14ea82b809b9e95828cb0b9
|
[
"MIT"
] |
permissive
|
jonahrosenblum/pastepwn
|
b4e7644fefd289d8ffb2a1cc6e77224dd1545c46
|
26c9e426a195d403894f00638eca6c5687cbd959
|
refs/heads/master
| 2021-01-02T22:03:26.922322
| 2020-02-04T23:36:08
| 2020-02-04T23:36:08
| 239,809,524
| 0
| 0
|
MIT
| 2020-02-11T16:27:06
| 2020-02-11T16:27:05
| null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
# -*- coding: utf-8 -*-
from .abstractdb import AbstractDB
from .mongodb import MongoDB
from .mysqldb import MysqlDB
from .sqlitedb import SQLiteDB
__all__ = ('AbstractDB', 'MongoDB', 'SQLiteDB', 'MysqlDB')
|
[
"d-Rickyy-b@users.noreply.github.com"
] |
d-Rickyy-b@users.noreply.github.com
|
8b309898363353e6bd452f929f6c8eefda2058ae
|
3ad6bae048a9b9fad9ec11750171cefb68fca682
|
/amigaextractor.py
|
f07a9fb2de9e8e7f7b671a83a8c6e8c3d40e5a45
|
[] |
no_license
|
sonnenscheinchen/lha-tools
|
7e0432496a9f09401f61ad47d7cff0157fa3a3fd
|
d0c7dada9c7cedc48e9270b57376e1d270115a0b
|
refs/heads/master
| 2021-01-02T08:45:52.435986
| 2015-09-26T17:31:42
| 2015-09-26T17:31:42
| 24,298,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,863
|
py
|
#!/usr/bin/env python3
from lhafile import LhaFile
import os.path
class LhaExtractor(LhaFile):
def __init__(self, lha_file_name, *args, **kwargs):
super().__init__(lha_file_name, *args, **kwargs)
self.total_file_size = sum(
[ f.file_size for f in self.filelist ])
self.total_compress_size = sum(
[ f.compress_size for f in self.filelist ])
self.total_ratio = self.total_compress_size * 100 / (
self.total_file_size if self.total_file_size else 1)
def __write_metadata(self, file_to_extract, target_file, force):
filenote = self.NameToInfo[file_to_extract].comment or ''
protection_flags = self.NameToInfo[
file_to_extract].flag_bits or '----rwed'
if force is not True:
if filenote == '' and protection_flags == '----rwed':
return
file_date = self.NameToInfo[
file_to_extract].date_time.strftime('%F %T.00')
uaem_string = '{0} {1} {2}\n'.format(
protection_flags, file_date, filenote)
with open('{0}.uaem'.format(target_file), 'wt') as uaem_file:
uaem_file.write(uaem_string)
def list_files(self, verbose=False):
if not verbose:
for item in self.namelist():
print(item)
return
num_files = len(self.filelist)
print(' PACKED SIZE RATIO METHOD CRC STAMP PERM NAME')
print('------- ------- ------ ---------- ------------------- -------- --------------')
for item in self.filelist:
ratio = item.compress_size * 100 / (
item.file_size if item.file_size else 1)
print(str(item.compress_size).rjust(7), end=' ')
print(str(item.file_size).rjust(7), end=' ')
print('{0:.2f}'.format(ratio).rjust(5), end='% ')
print(item.compress_type.decode(), end=' ')
print(hex(item.CRC)[2:].zfill(4), end=' ')
print(item.date_time.strftime('%F %T'), end=' ')
print(item.flag_bits, end=' ')
print(item.filename)
if item.comment:
print(' : {0}'.format(item.comment))
print('------- ------- ------ ---------- ------------------- -------- --------------')
print(str(self.total_compress_size).rjust(7), end=' ')
print(str(self.total_file_size).rjust(7), end=' ')
print('{0:.2f}'.format(self.total_ratio).rjust(5), end='% ')
print('Created on: {0}'.format(item.create_system).center(30), end=' ')
print(str(num_files).rjust(10), end=' file(s)\n')
def extract(self, filename=None, uaem='auto',
dest='.', use_paths=True, overwrite=False, verbose=False):
if filename and filename not in self.namelist():
return False, 'File not found in archive'
if uaem not in ('auto', 'always', 'never'):
raise ValueError('uaem must be auto, always or never')
target_dir = os.path.realpath(dest)
if not os.path.isdir(target_dir):
return False, 'Target directory does not exist.'
if not filename:
files_to_extract = self.namelist()
else:
files_to_extract = [filename]
for file_to_extract in files_to_extract:
xfile = file_to_extract.replace('\\', os.sep).replace('/', os.sep)
if not use_paths:
target_file = os.path.join(target_dir, os.path.basename(xfile))
else:
target_file = os.path.join(target_dir, xfile)
os.makedirs(os.path.dirname(target_file), exist_ok=True)
if verbose:
print(target_file)
if not overwrite and os.path.isfile(target_file):
continue
if not self.NameToInfo[file_to_extract].directory == file_to_extract:
try:
data = self.read(file_to_extract)
except Exception as e:
return False, e.args[0]
with open(target_file, 'wb') as output_file:
output_file.write(data)
if uaem == 'always':
self.__write_metadata(file_to_extract, target_file, force=True)
elif uaem == 'auto':
self.__write_metadata(file_to_extract, target_file, force=False)
return True, ''
def testlha(self):
is_ok = True
for file_to_test in self.namelist():
print(file_to_test, end=' --> ')
try:
data = self.read(file_to_test)
except Exception as e:
print(e.args[0])
is_ok = False
else:
print('OK')
return is_ok
def printdir(self):
self.list_files(verbose=False)
if __name__ == '__main__':
pass
|
[
"jbl007@gmail.com"
] |
jbl007@gmail.com
|
5916cab7601c0e5c624fc9ceba4c6c08f6c28f4b
|
fa8a3be42546079930987e61dd24f417838d26f8
|
/torch_fcn/train_fcn.py
|
2ae4d66696bd40a1275cddf80f47614b27a17597
|
[] |
no_license
|
fsong666/nn
|
2f3fd29d25086f6922ea86d5427f5e22ec9fd61b
|
49b6cc7b76fd7558d60d23b6a9dd67e358d3c0d0
|
refs/heads/master
| 2023-01-18T22:14:02.651038
| 2020-11-29T11:35:36
| 2020-11-29T11:35:36
| 264,522,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,714
|
py
|
from FCN import FCN
import torch
import torch.nn as nn
import random
class TrainModel:
def __init__(self, training_data=None,
test_data=None, validation_data=None,
learning_rate=1.0, mini_batch_size=4, epochs=1, num_class=1):
self.num_class = num_class
self.training_data = training_data
print("mini_batch.shape=\n", len(training_data))
self.test_data = test_data
self.validation_data = validation_data
self.mini_batch_size = mini_batch_size
self.learning_rate = learning_rate
self.epochs = epochs
self.model = FCN()
self.loss_fn = nn.MSELoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.1)
self.optimizer2 = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
# early-stopping
self.patience = 5000
self.validation_frequency = 500 # [0, 499] = 500
def train(self):
n = len(self.training_data)
n_train_batches = int(n / self.mini_batch_size) + 1
best_validation_accuracy = 0
# 提高0.001倍, 提高self.patience
improvement_threshold = 1.001
# 连续3次验证acc降低,触发停止
patience_increase = 5
stop = False
epoch = 0
while epoch < self.epochs and (not stop):
epoch = epoch + 1
random.shuffle(self.training_data)
mini_batches = [self.training_data[k:k + self.mini_batch_size]
for k in range(0, n, self.mini_batch_size)]
mini_batch_index = -1
for mini_batch in mini_batches:
mini_batch_index = mini_batch_index + 1
t = epoch * n_train_batches + mini_batch_index
losses = torch.zeros(1, requires_grad=True)
self.model.train()
for x, y in mini_batch:
x.requires_grad_(True)
activation = self.model(x)
loss = self.loss_fn(activation, y)
losses = losses + loss
losses = losses / self.mini_batch_size
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
if (t + 1) % self.validation_frequency == 0:
self.model.eval()
this_validation_accuracy = self.evaluate(self.validation_data)
print("iteration {0} accuracy: {1}".format(t, this_validation_accuracy))
if this_validation_accuracy > best_validation_accuracy:
if this_validation_accuracy > best_validation_accuracy * improvement_threshold:
self.patience = max(self.patience, t + patience_increase * self.validation_frequency)
print("patience increase:", self.patience)
best_validation_accuracy = this_validation_accuracy
if t >= self.patience:
stop = True
print("early-stop")
break
print("Epoch {0} Test accuracy : {1}".format(epoch, self.evaluate(self.test_data)))
def evaluate(self, test_data):
# np.argmax()返回一个多维数组值最大的索引值,索引是一维索引,索引值是个标量
# test_dat中标签y是一个标量
test_results = [(torch.argmax(self.model(x)), y)
for (x, y) in test_data]
# 对一个比对结果的list求和, list=[1, 0, 1,..]
sum_value = sum(int(x == y) for (x, y) in test_results)
return sum_value / len(test_data)
|
[
"602079852@qq.com"
] |
602079852@qq.com
|
902324c23e786d6f2fd449172d60caee96d15dce
|
0a64c5f696235206ec493586a985c126283d96ec
|
/src/test.py
|
2c9631779f62c96ac0b911ff6a534c6d0823ad51
|
[] |
no_license
|
1987mxy/Jarvis
|
c59f48039aafd9817b9e3d1df1e2fb2f853f7ae6
|
011d2f6f578daac9af95916207867307b8deb8f8
|
refs/heads/master
| 2020-12-23T20:55:13.109994
| 2017-02-11T15:49:40
| 2017-02-11T15:49:40
| 58,322,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
# encoding: utf-8
'''
Created on 2016年1月9日
@author: xiaoyong.mo
'''
class test(object):
a = None
def __init__(self):
pass
def setA(self, av):
test.a = av
def showA(self):
print test.a
if __name__ == '__main__':
t1 = test()
t2 = test()
t2.setA(3)
t1.setA(2)
t2.showA()
|
[
"1987mxy@gmail.com"
] |
1987mxy@gmail.com
|
99e20206326032b464229b458f20e97520f807a9
|
39c78c15add32734d30eb1c27721b76994cb26d8
|
/healthy_salad_chooser/tests/test_views.py
|
aade7be3ed1f0aaff7f62e56d5045e3fdde7a4a0
|
[] |
no_license
|
Ooblioob/salad-bar-showcase
|
4812b17b5033f117f59ea0669a6d50279286f272
|
c406a249304d673458b1a7bbed52b3a106b69e51
|
refs/heads/master
| 2021-01-19T14:30:09.716877
| 2014-05-10T03:12:23
| 2014-05-10T03:12:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.test import TestCase
from django.core.urlresolvers import reverse
class TestViews(TestCase):
def test_home_page(self):
"""
Tests the home page appears with no errors
"""
resp = self.client.get(reverse('index'))
self.assertEqual(resp.status_code, 200)
|
[
"mike.brown@excella.com"
] |
mike.brown@excella.com
|
ade677f8e988685507a1c948ac73be652ce39b49
|
f0d3b759d9b0d2000cea2c291a4974e157651216
|
/apps/goods/migrations/0001_initial.py
|
303ea309f8cf6f7ee582bdc2901bd642b7490841
|
[] |
no_license
|
PYBPYB/Fresh-every-day
|
526265ae0a9b1fe8e8f8944e0320ea8a47b8571c
|
5b62fda9effe327a5da9ce45644bf44ee9d7108f
|
refs/heads/master
| 2020-04-12T14:39:31.325736
| 2019-05-31T02:31:54
| 2019-05-31T02:31:54
| 162,558,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,310
|
py
|
# Generated by Django 2.1.3 on 2018-11-26 09:10
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='商品SPU名称')),
('detail', tinymce.models.HTMLField(blank=True, verbose_name='商品详情')),
],
options={
'verbose_name': '商品SPU',
'verbose_name_plural': '商品SPU',
'db_table': 'df_goods',
},
),
migrations.CreateModel(
name='GoodsImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('image', models.ImageField(upload_to='goods', verbose_name='图片路径')),
],
options={
'verbose_name': '商品图片',
'verbose_name_plural': '商品图片',
'db_table': 'df_goods_image',
},
),
migrations.CreateModel(
name='GoodsSKU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='商品名称')),
('desc', models.CharField(max_length=250, verbose_name='商品简介')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='商品价格')),
('unite', models.CharField(max_length=20, verbose_name='商品单位')),
('image', models.ImageField(upload_to='goods', verbose_name='商品图片')),
('stock', models.IntegerField(default=1, verbose_name='商品库存')),
('sales', models.IntegerField(default=0, verbose_name='商品销量')),
('status', models.SmallIntegerField(choices=[(0, '下架'), (1, '上架')], default=1, verbose_name='是否上架')),
('goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品SPU')),
],
options={
'verbose_name': '商品',
'verbose_name_plural': '商品',
'db_table': 'df_goods_sku',
},
),
migrations.CreateModel(
name='GoodsType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='种类名称')),
('logo', models.CharField(max_length=20, verbose_name='标识')),
('image', models.ImageField(upload_to='type', verbose_name='商品类型图片')),
],
options={
'verbose_name': '商品种类',
'verbose_name_plural': '商品种类',
'db_table': 'df_goods_type',
},
),
migrations.CreateModel(
name='IndexGoodsBanner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('image', models.ImageField(upload_to='banner', verbose_name='图片')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
('sku', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsSKU', verbose_name='商品')),
],
options={
'verbose_name': '首页轮播商品',
'verbose_name_plural': '首页轮播商品',
'db_table': 'df_index_banner',
},
),
migrations.CreateModel(
name='IndexPromotionBanner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='活动名称')),
('url', models.URLField(verbose_name='活动链接')),
('image', models.ImageField(upload_to='banner', verbose_name='活动图片')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
],
options={
'verbose_name': '主页促销活动',
'verbose_name_plural': '主页促销活动',
'db_table': 'df_index_promotion',
},
),
migrations.CreateModel(
name='IndexTypeBanner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('display_type', models.SmallIntegerField(choices=[(0, '不展示'), (1, '展示')], default=1, verbose_name='展示表示')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
('sku', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsSKU', verbose_name='商品SKU')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsType', verbose_name='商品类型')),
],
options={
'verbose_name': '主页分类展示商品',
'verbose_name_plural': '主页分类展示商品',
'db_table': 'df_index_type_goods',
},
),
migrations.AddField(
model_name='goodssku',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsType', verbose_name='商品种类'),
),
migrations.AddField(
model_name='goodsimage',
name='sku',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsSKU', verbose_name='商品'),
),
]
|
[
"you@example.com"
] |
you@example.com
|
44762a4814983fa8bf97ed7e32c784edabcc09aa
|
2c3714ea2421272fb816d7a18cfb3cce8f742c85
|
/webpages/urls.py
|
8af47ab5233c6b5adaefb9d72e53b0cc657697df
|
[] |
no_license
|
RishabhSheyoran/Flight_Price
|
c23e92ec57c7bdd97127e0eb65047292bba55138
|
a042fbfe2dc81c7954a6ea372d408ebe142ad0e2
|
refs/heads/main
| 2023-04-08T08:55:27.764535
| 2021-04-16T09:50:39
| 2021-04-16T09:50:39
| 358,377,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('' , views.home, name="home"),
path('about', views.about, name='about'),
path('services', views.services, name='services'),
path('contact',views.contact, name= 'contact'),
]
|
[
"chaudharydavidson@gmail.com"
] |
chaudharydavidson@gmail.com
|
a1cd8b70bf39f6855eb613d7d0e46b7d75fb9390
|
f6c2b98e2b4a247bcd5b3dc1da6a268391a554aa
|
/anagram/anagram.py
|
f035124d987d7fb90db3a9532dd2c8aa049ce070
|
[] |
no_license
|
Zer0-lab/EpreuvesDuFeu
|
a28793ff4d8607ae637b82f6db8ff2151ddd0041
|
6d6efa9227c6b85263ac4a2031cefea4fb46b716
|
refs/heads/master
| 2022-10-18T04:56:18.852495
| 2020-06-07T15:54:28
| 2020-06-07T15:54:28
| 267,621,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
#!/usr/bin/python3
import sys
wordList = list(open("fr.txt"))
anagram = str(sys.argv[1::])
#anagram = "arbre"
def removeSpace(list):
newList =[]
for i in list:
newList.append(i.strip())
return newList
def toSepare(word):
word2 = ""
for i in sorted(word):
word2 += ''.join(i)
return word2
wordList = removeSpace(wordList)
i = 0
while i < len(wordList):
if toSepare(wordList[i]) == toSepare(anagram):
if(wordList[i] != anagram):
print("L'anagrame de %s est %s " % (anagram, wordList[i]))
i += 1
|
[
"kevin.dhoust@gmail.com"
] |
kevin.dhoust@gmail.com
|
1eec26df7970c6497bf3752cec174cf6f8b1cc80
|
55d5bacab6eca6e1b02e5e5172e4eddf6fe981be
|
/hackKUReadGeneric.py
|
e2d86289cdd838b9a3a2e992b37bee509d2b49e1
|
[] |
no_license
|
harryl6798/HackKU_RFID
|
1ca7b3b81c6fd6e944fac730dc33a4951e47a2f5
|
7e284fe0eccaaafdb1f52fabcb40334315a128f9
|
refs/heads/master
| 2020-04-16T09:25:57.661997
| 2019-01-13T07:03:32
| 2019-01-13T07:03:32
| 165,463,966
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
import RPi.GPIO as GPIO
import SimpleMFRC522
import time
from squid import *
reader = SimpleMFRC522.SimpleMFRC522()
led = Squid(18, 23, 24)
print("Hold a tag near the reader")
def flash(color, times, delay):
for i in range(0, times):
led.set_color(color)
time.sleep(delay)
led.set_color(OFF)
time.sleep(delay)
try:
while True:
led.set_color(GREEN)
id, text = reader.read()
if id:
values = text.split(",")
allergies = ""
if len(values) == 8:
#Will print the values according to the card given
if values[1] == "1":
allergies += "Vegetarian, "
if values[2] == "1":
allergies += "Vegan, "
if values[3] == "1":
allergies += "Halal, "
if values[4] == "1" :
allergies += "Pork, "
if values[5] == "1":
allergies += "Lactose, "
if values[6] == "1":
allergies += "Peanut, "
if values[7] != "":
allergies += values[7]
print("Name: " + values[0] )
print("Allergies: " + allergies)
flash(RED, 6, .08)
finally:
print("cleaning up")
GPIO.cleanup()
|
[
"harryl6798@gmail.com"
] |
harryl6798@gmail.com
|
23af418c31e44728eb36720b928cfc71e6a061e2
|
7955043be474031601135404cb85b0f25c1df8e3
|
/cno/uc2_settings.py
|
3df298f94d9bbe3ecd64e036ea3d4ffd08e2f21d
|
[] |
no_license
|
redblade/5gmedia
|
ec70b206cd3c76bb89fbe83c88c05cdbcb255e05
|
6630f283baa2d1256df101d1267ff52cab300992
|
refs/heads/master
| 2020-08-03T15:37:15.310357
| 2019-09-25T09:59:38
| 2019-09-25T09:59:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,883
|
py
|
from os import path, pardir
#PROJECT_ROOT = "C:\\Users\\magni_000\\Documents\\PyCharm\\fivegmedia\\monitoring-data-translator"
#PROJECT_ROOT = '/opt/monitoring-data-translator'
PROJECT_ROOT = '/opt/sim'
# =================================
# WHITE LIST OF MONITORING METRICS
# =================================
METRICS_WHITE_LIST = {
"openstack": ['memory', 'memory.usage', 'memory.resident', 'memory.bandwidth.total', 'memory.bandwidth.local',
'memory.swap.in', 'memory.swap.out', 'cpu', 'cpu_util', 'cpu.delta', 'vcpus', 'cpu_l3_cache',
'network.incoming.bytes', 'network.incoming.bytes.rate', 'network.outgoing.bytes',
'network.outgoing.bytes.rate', 'network.incoming.packets', 'network.incoming.packets.rate',
'network.outgoing.packets', 'network.outgoing.packets.rate', 'network.incoming.packets.drop',
'network.outgoing.packets.drop', 'network.incoming.packets.error', 'network.outgoing.packets.error'],
"vmware": ['', ],
"opennebula": ['', ],
"unikernels": ['', ],
"kubernetes": ['', ],
"elk": ['', ],
}
# =================================
# KAFKA SETTINGS
# =================================
#KAFKA_SERVER = 217.172.11.188:9092'
#KAFKA_CLIENT_ID = 'monitoring-data-translator'
KAFKA_SERVER = '217.172.11.173:9092'
KAFKA_CLIENT_ID = 'CNO_UC2_UCL'
KAFKA_API_VERSION = (0, 10, 1)
KAFKA_PREFIX_TOPIC = "devstack" # "devstack.*"
#KAFKA_MONITORING_TOPICS = {"openstack": "nvfi.eng.openstack", "vmware": "vmware", }
KAFKA_MONITORING_TOPICS = {"uc2_tm":"trafficmanager.uc2.metrics", "uc2_qoe": "app.uc2.qoe", \
"uc2_vce":"nfvi.tid-onlife.opennebula", 'uc3_load': 'ns.instances.trans',\
"uc2_mon_exec":'ns.instances.exec', "uc2_mon_conf": 'ns.instances.conf'}
KAFKA_TRANSLATION_TOPIC_SUFFIX = "trans"
KAFKA_EXECUTION_TOPIC = {"uc2_exec": 'ns.instances.exec', "uc2_conf" : 'ns.instances.conf'}
# =================================
# OSM SETTINGS
# =================================
OSM_IP = "192.168.1.171"
OSM_ADMIN_CREDENTIALS = {"username": "admin", "password": "admin"}
OSM_COMPONENTS = {"UI": 'https://{}:8443'.format(OSM_IP),
"SO-API": 'https://{}:8008'.format(OSM_IP),
"RO-API": 'http://{}:9090'.format(OSM_IP)}
# =================================
# TEMPLATE OF MONITORING METRIC
# =================================
METRIC_TEMPLATE = {
"vim": {
"name": "dev-openstack",
"type": "openstack"
},
"mano": {
"vdu": {
"id": "da6849d7-663f-4520-8ce0-78eaacf74f08",
"flavor": {
"name": "ubuntuvnf_vnfd-VM-flv",
"ram": 4096,
"id": "c46aecab-c6a9-4775-bde2-070377b8111f",
"vcpus": 1,
"disk": 10,
"swap": 0,
"ephemeral": 0
},
"image_id": "a46178eb-9f69-4e44-9058-c5eb5ded0aa3",
"status": "running"
},
"vnf": {
"id": "00fa217e-cf90-447a-975f-6d838e7c20ed",
"nvfd_id": None
},
"ns": {
"id": "abf07959-053a-4443-a96f-78164913277b",
"nsd_id": None
},
"metric": {
"name": "network.incoming.packets.rate",
"value": None,
"timestamp": None,
"type": "gauge",
"unit": "Mbps"
}
}
}
METRIC_TEMPELATE_UC2 = {
"mano": {
"vdu": {
"ip_address": "str",
"name": "vdu_name",
"mgmt-interface": "null",
"image_id": "null",
"status": "ACTIVE",
"flavor": {
"disk": "null",
"ram": "null",
"vcpus": "null"
},
"id": "integer"
},
"ns": {
"name": "uuid",
"nsd_name": "str",
"nsd_id": "null",
"id": "uuid"
},
"vim": {
"type": "opennebula",
"uuid": "uuid",
"name": "str",
"url": "ip",
"tag": "kubernetes"
},
"vnf": {
"name": "null",
"vnfd_name": "str",
"vnfd_id": "1uuid",
"short_name": "null",
"id": "uuid"
}
},
"metric": {
"timestamp": "2019-02-13T15:45:54.553000Z",
"type": "counter",
"name": "diskrdbytes",
"value": "349236528",
"unit": "bytes"
},
# "metric": {
# "timestamp": "2019-03-28T10:20:05.000000Z",
# "vdu_uuid": 1203,
# "value": "349236528",
# "type": "diskrdbytes",
# "unit": "bytes"
# },
"analysis": {
"action": "true"
},
"execution": {
"planning": "set_vce_bitrate",
"value": "number"
}
}
# Kafka toptic: ns.instances.exec
METRIC_TEMPLATE_UC2_EXEC = {
"analysis": {
"action": "true"
},
"execution": {
"planning": "set_vce_bitrate",
"value": "the value of the bitrate"
}
# the rest needs to be completed
}
# Kafka topic: ns.instances.conf
METRIC_TEMPLATE_UC2_CONF = {
"vce": {
#"mac": "string",
#"vdu_uuid": "string (VM id)",
"action": {"bitrate": "integer|kbps"}
}
}
# Kafka toptic: ns.instances.exec
METRIC_TEMPLATE_UC3_EXEC = {
"analysis": {
"action": "true"
},
"execution": {
"planning": 'vnf_scale_out',
"value": "null"
},
"mano": {
"ns": {
"id": "13987ea3-054a-459b-a24d-f4c76679edaf",
"name": "ns_takis",
"nsd_name": "cirros_2vnf_ns",
"nsd_id": "d5c99561-ec46-4480-8377-b5b218b8b1e5"
},
"vnf": {
"id": "abd00f09-dff1-40f1-be83-637a456ed400",
"short_name": "null",
"vnfd_name": "cirros_vnfd",
"name": "null",
"vnfd_id": "16c40d2e-7a1b-4f22-9e50-3f7ede3e9fc4"
},
"vdu": {
"id": "99f76771-3a39-42ae-a09c-2f79f459a9c9",
"image_id": "a46178eb-9f69-4e44-9058-c5eb5ded0aa3",
"ip_address": "192.168.207.2",
"flavor": {
"id": "c46aecab-c6a9-4775-bde2-070377b8111f",
"disk": 10,
"swap": 0,
"vcpus": 1,
"ram": 4096,
"name": "ubuntuvnf_vnfd-VM-flv",
"ephemeral": 0
},
"mgmt-interface": "null",
"name": "instance-00000009",
"status": "running"
},
"vim": {
"uuid": "48eb5bd0-feaa-48ed-b0d7-6d2b8ad0385e",
"type": "openstack",
"tag": "openstack",
"name": "devstack-ocata",
"url": "http://192.168.1.147/identity/v3"
}
}
}
# ==================================
# LOGGING SETTINGS
# ==================================
# See more: https://docs.python.org/3.5/library/logging.config.html
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'detailed': {
'class': 'logging.Formatter',
'format': "[%(asctime)s] - [%(name)s:%(lineno)s] - [%(levelname)s] %(message)s",
},
'simple': {
'class': 'logging.Formatter',
'format': '%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'simple',
},
'translator': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': "{}/logs/translator.log".format(PROJECT_ROOT),
'mode': 'w',
'formatter': 'detailed',
'level': 'DEBUG',
'maxBytes': 2024 * 2024,
'backupCount': 5,
},
'soapi': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': "{}/logs/soapi.log".format(PROJECT_ROOT),
'mode': 'w',
'formatter': 'detailed',
'level': 'DEBUG',
'maxBytes': 2024 * 2024,
'backupCount': 5,
},
'errors': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': "{}/logs/error.log".format(PROJECT_ROOT),
'mode': 'w',
'level': 'ERROR',
'formatter': 'detailed',
'maxBytes': 2024 * 2024,
'backupCount': 5,
},
},
'loggers': {
'translator': {
'handlers': ['translator']
},
'soapi': {
'handlers': ['soapi']
},
'errors': {
'handlers': ['errors']
}
},
'root': {
'level': 'DEBUG',
'handlers': ['console']
},
}
|
[
"m.kheirkhah@ucl.ac.uk"
] |
m.kheirkhah@ucl.ac.uk
|
b8c56a5dc06d9862a2e84b2c0c240d7aff30a454
|
2baaee5ddc12ac6da88c5485a7eaa6e33902610b
|
/CaseStudies/noPCM/src/Python/Testing/compareMatlabTest.py
|
6cfe00815d0ffe858ae39d620464ae7968c0d62e
|
[
"BSD-2-Clause"
] |
permissive
|
smiths/caseStudies
|
39b4374ba37a2cc68a22e0accc1657f041272f4d
|
d50527d4cbeec88848b4505c71271970ca835e82
|
refs/heads/master
| 2023-07-08T22:11:55.202669
| 2023-06-27T19:37:42
| 2023-06-27T19:37:42
| 110,840,691
| 3
| 2
| null | 2018-12-21T18:21:26
| 2017-11-15T14:01:05
|
C
|
UTF-8
|
Python
| false
| false
| 4,050
|
py
|
#Commented sections pending removal
#No valid comparison tests available for NoPCM hence the lack of action. If/when useable data becomes available,
#apply the same setup as compareFortranTest.py as seen in test_CM5
import sys
sys.path.insert(0, '.')
import unittest
import PCM_Error
class TestCompareMatlab(unittest.TestCase):
def setUp(self):
self.delta = 0.000005
# def test_CM1(self):
# errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'TWat')
# errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'TPCM')
# errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'EWat')
# errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'EPCM')
# self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
# self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
# self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
# self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
# def test_CM2(self):
# errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'TWat')
# errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'TPCM')
# errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'EWat')
# errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'EPCM')
# self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
# self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
# self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
# self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
# def test_CM3(self):
# errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'TWat')
# errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'TPCM')
# errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'EWat')
# errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'EPCM')
# self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
# self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
# self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
# self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
# def test_CM4(self):
# errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'TWat')
# errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'TPCM')
# errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'EWat')
# errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'EPCM')
# self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
# self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
# self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
# self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
##def test_CF5(self):##
##errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P05.out', 'TWatNoP')##
##errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P05.out', 'EWatNoP')##
##self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)##
##self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)##
class CompareMatlabSuite:
def suite(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestCompareMatlab)
return suite
|
[
"moria@mcmaster.ca"
] |
moria@mcmaster.ca
|
30f66664bec93fdb54ccc45aa6bf98513ac891b5
|
b35831afb2c0ad70f3e24f185f3d23e89786afe3
|
/read/extremes.py
|
c05d43e56ca46b0c68b47a5406aca6f5b59f92e2
|
[
"MIT"
] |
permissive
|
lapic-ufjf/evolutionary-ACS-benchmark
|
daf4c59f6c4e267daa7fed46495be17e237465a9
|
0c13f3493819db3baa512268d80b099f9d31b952
|
refs/heads/master
| 2022-11-19T02:37:52.386132
| 2022-11-09T20:28:42
| 2022-11-09T20:28:42
| 249,778,084
| 2
| 1
|
MIT
| 2022-11-09T20:28:44
| 2020-03-24T17:48:52
|
Python
|
UTF-8
|
Python
| false
| false
| 881
|
py
|
import os
import pickle
def create_extremes_name_list(instances, num_objectives_list):
extremes_name = [(instance, num_objectives) for instance in instances for num_objectives in num_objectives_list]
return extremes_name
def get_extremes_name(instance, num_objectives):
name = '%s_%s.pickle' % (instance, num_objectives)
return name
def open_extremes(name, base_folder='results/extremes'):
file_path = os.path.join(base_folder, name)
with open(file_path, 'rb') as file:
file_results = pickle.load(file)
return file_results
def get_extremes_info(extremes):
return extremes['info']
def get_extremes_data(extremes):
return extremes['data']
def get_extremes_worst_point(extremes):
return extremes['data']['worst_point']
def get_extremes_nondominated_population(extremes):
return extremes['data']['nondominated_population']
|
[
"martins.adw@gmail.com"
] |
martins.adw@gmail.com
|
6578ffb4c1cedb97046b4b8c9f593ef686292eda
|
bfa901539e3e8f7deccf34d4e448598906cfa1bb
|
/action-Search-tvshow.py
|
287e731ab5d0eef1e7d243e656aef536d78a817b
|
[] |
no_license
|
Ianouu/snips-kodi-actions
|
3fcfd5be394cad309f0c542d7daf07de5a03d6c3
|
177b7e18cb4d2949705b8f451be898bbf727baa6
|
refs/heads/master
| 2020-04-16T07:19:23.292473
| 2019-01-18T14:44:45
| 2019-01-18T14:44:45
| 165,381,960
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import ConfigParser
from hermes_python.hermes import Hermes
from hermes_python.ontology import *
import io
import simplejson
import requests
import time
CONFIGURATION_ENCODING_FORMAT = "utf-8"
CONFIG_INI = "config.ini"
class SnipsConfigParser(ConfigParser.SafeConfigParser):
def to_dict(self):
return {section : {option_name : option for option_name, option in self.items(section)} for section in self.sections()}
def read_configuration_file(configuration_file):
try:
with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f:
conf_parser = SnipsConfigParser()
conf_parser.readfp(f)
return conf_parser.to_dict()
except (IOError, ConfigParser.Error) as e:
return dict()
def subscribe_intent_callback(hermes, intentMessage):
conf = read_configuration_file(CONFIG_INI)
action_wrapper(hermes, intentMessage, conf)
def action_wrapper(hermes, intentMessage, conf):
addon_name = conf['global']['favorite_addon1']
movie_name = intentMessage.slots.tv_name.first().value
addr_ = conf['global']['ip']
port_ =conf['global']['port']
def openAddon():
request = "{\"jsonrpc\": \"2.0\", \"method\": \"Addons.ExecuteAddon\", \"params\": { \"addonid\": \"plugin.video." + addon_name + "\", \"params\":{\"action\":\"alert\"}}, \"id\": \"1\"}"
url = "http://" + addr_ + ":" + port_ + "/jsonrpc?request=" + request
response = requests.get(url)
json_data = simplejson.loads(response.text)
def searchMovie():
print("o")
request = "{\"jsonrpc\": \"2.0\", \"method\": \"Files.GetDirectory\", \"params\": { \"directory\": \"plugin://plugin.video.exodus/?action=tvSearchterm%26name=" + movie_name + "\"}, \"id\": 1 }"
url = "http://" + addr_ + ":" + port_ + "/jsonrpc?request=" + request
response = requests.get(url)
json_data = simplejson.loads(response.text)
try:
openAddon()
time.sleep(3)
searchMovie()
hermes.publish_end_session(intentMessage.session_id, "")
except requests.exceptions.RequestException:
hermes.publish_end_session(intentMessage.session_id, "Erreur de connection.")
except Exception:
hermes.publish_end_session(intentMessage.session_id, "Erreur de l'application.")
if __name__ == "__main__":
with Hermes("localhost:1883") as h:
h.subscribe_intent("Ianou:Search-tvshow", subscribe_intent_callback) \
.start()
|
[
"clementian067@gmail.com"
] |
clementian067@gmail.com
|
27c0b921e96a11906286be5d2fb8bac1c678ad1c
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc152/abc152_f/9661160.py
|
040bdabbffd7805e7f362fb6eff11285789dc375
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886
| 2021-03-01T12:52:26
| 2021-03-01T12:52:26
| 227,364,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
import collections
from itertools import combinations
N = int(input())
ab = [tuple(map(int, input().split())) for i in range(N-1)]
M = int(input())
uv = [tuple(map(int, input().split())) for i in range(M)]
def popcount(x):
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
return x & 0x0000007f
G = [[-1]*N for i in range(N)]
for i in range(N-1):
a, b = ab[i]
a -= 1
b -= 1
G[a][b] = i
G[b][a] = i
q = collections.deque()
G2 = [[0 for j in range(N)] for i in range(N)]
for i in range(N):
q.append((i, 0))
reached = [False]*N
reached[i] = True
while(q):
x, s = q.popleft()
for y in range(N):
if G[x][y] == -1 or reached[y]:
continue
else:
G2[i][y] = s | (1 << G[x][y])
q.append((y, s | 1 << G[x][y]))
reached[y] = True
ans = 2**(N-1)
ans2 = 0
for i in range(1, 2**M):
tmp = 2**(N-1) - 1
for j in range(M):
if (i >> j) % 2 == 1:
u, v = uv[j]
u -= 1
v -= 1
tmp &= ~G2[u][v]
ans2 += ((-1)**(popcount(i)-1)) * (1 << popcount(tmp))
# print(ans2, i)
print(ans-ans2)
|
[
"kouhei.k.0116@gmail.com"
] |
kouhei.k.0116@gmail.com
|
08503202388d0af78b6d0d168bb172fdbc48da95
|
f008634e359ea39e209dcb29eaa00a0637e25e03
|
/TT/TT/asgi.py
|
0fa14606b0ca280ec474d6b891702090a42d9fc8
|
[] |
no_license
|
Maxzkp/TT
|
5c57599290cc1e7b33f4fb9ac4dcd41897e1a92a
|
f9760b3304af6ccc617d61896be7998e646a1451
|
refs/heads/main
| 2023-05-18T21:17:33.354160
| 2021-06-11T02:23:43
| 2021-06-11T02:23:43
| 369,038,098
| 0
| 0
| null | 2021-06-07T02:51:24
| 2021-05-20T00:46:47
|
Python
|
UTF-8
|
Python
| false
| false
| 381
|
py
|
"""
ASGI config for TT project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TT.settings')
application = get_asgi_application()
|
[
"max.zkp@gmail.com"
] |
max.zkp@gmail.com
|
fa3b4ee4882f9584a72b51f66e015f7c15963038
|
e6c944d9f03bf8f7d99f9ba0b50f1374c8903e54
|
/base.py
|
6ed2b3e8d7518690b25ffddbd2f808d6ff259331
|
[] |
no_license
|
ukolov-pavel/AIST
|
4fe5d113e1a8439d6f4a1858bf75b7c233b515c8
|
d0f44bfb057d7748851a79aaf9009be0ba12b757
|
refs/heads/master
| 2020-03-09T23:26:37.510021
| 2018-05-18T15:01:38
| 2018-05-18T15:01:38
| 115,633,839
| 0
| 0
| null | 2018-01-16T16:00:25
| 2017-12-28T15:03:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import requests
class Driver(object):
__instance = None
@classmethod
def get(cls, type='ff'):
if not cls.__instance:
cls.__instance = webdriver.Firefox()
return cls.__instance
class Session(object):
__instance = None
@classmethod
def get(cls):
if not cls.__instance:
cls.__instance = requests.Session()
return cls.__instance
class BaseTest(object):
stand = 'http://10.32.200.142'
login = 'autotest'
password = '77@dm1nA'
timeout = 15
@classmethod
def setup_class(cls):
cls.driver = Driver.get()
#cls.driver.implicitly_wait(0)
cls.driver.maximize_window()
cls.driver.get(cls.stand)
cls.driver.find_element_by_id('Login').send_keys(cls.login)
cls.driver.find_element_by_id('Password').send_keys(cls.password)
cls.driver.find_element_by_class_name('submit').click()
WebDriverWait(Driver.get(), 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".donor-logo")))
@classmethod
def teardown_class(cls):
Driver.get().close()
Session.get().close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
cc18d34559e1246f1965d54091acbefdaad2f778
|
4668ab207f426d48db7d42d47238db02e4e78c31
|
/Finalized Weight Converter.py
|
4cdf6d795a2fee02605e02167a32a35566e367a8
|
[] |
no_license
|
Alee49/this_school_project
|
de734f7f2a38b01c3c8a9e2ff5c4a4d291ad04c7
|
5578a49b583a214abe7ac73589bf47c2b19114e4
|
refs/heads/master
| 2021-07-13T00:54:32.243554
| 2017-10-17T15:20:46
| 2017-10-17T15:20:46
| 105,920,549
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
print("Welcome please use the abriviated units for the following")
print("We support Pounds(lb), Kilogram(kg), Gram(g), Milligram(mg), Ounce(oz)")
unit1 = input ("Which unit would you like to convert from: ")
unit2 = input("Which unit would you like to convert to: ")
num1 = input("Enter value: ")
Ref = {'lb':0.453592,'mg':0.001,'oz':16}
if unit1 == "lb" and unit2 == "kg":
ans = float(num1)*Ref['lb']
ans = round(ans,3)
print(ans,'kg')
elif unit1 == "kg" and unit2 == "lb":
ans = float(num1)/Ref['lb']
ans = round(ans,3)
print(ans,'lb')
elif unit1 == "lb" and unit2 == "g":
ans = float(num1)*Ref['lb']/Ref['mg']
ans = round(ans,3)
print(ans,'g')
elif unit1 == "g" and unit2 == "lb":
ans = float(num1)/Ref['lb']*Ref['mg']
ans = round(ans,3)
print(ans,'lb')
elif unit1 == "lb" and unit2 == "mg":
ans = float(num1)*Ref['lb']/Ref['mg']/Ref['mg']
ans = round(ans,3)
print(ans,'mg')
elif unit1 == "mg" and unit2 == "lb":
ans = float(num1)/Ref['lb']*Ref['mg']*Ref['mg']
ans = round(ans,4)
print(ans,'lb')
elif unit1 == "kg" and unit2 == "g":
ans = float(num1)/Ref['mg']
ans = round(ans,3)
print(ans,'g')
elif unit1 == "g" and unit2 == "kg":
ans = float(num1)*Ref['mg']
ans = round(ans,3)
print(ans,'kg')
elif unit1 == "g" and unit2 == "mg":
ans = float(num1)/Ref['mg']
ans = round(ans,3)
print(ans,'mg')
elif unit1 == "mg" and unit2 == "g":
ans = float(num1)*Ref['mg']
ans = round(ans,3)
print(ans,'g')
elif unit1 == "kg" and unit2 == "mg":
ans = float(num1)/Ref['mg']/Ref['mg']
ans = round(ans,3)
print(ans,'mg')
elif unit1 == "mg" and unit2 == "kg":
ans = float(num1)*Ref['mg']*Ref['mg']
ans = round(ans,4)
print(ans,'kg')
elif unit1 == "lb" and unit2 == "oz":
ans = float(num1)*Ref['oz']
ans = round(ans,3)
print(ans,'oz')
elif unit1 == "oz" and unit2 == "lb":
ans = float(num1)/Ref['oz']
ans = round(ans,3)
print(ans,'lb')
elif unit1 == "kg" and unit2 == "oz":
ans = float(num1)/Ref['lb']*Ref['oz']
ans = round(ans,3)
print(ans,'oz')
elif unit1 == "oz" and unit2 == "kg":
ans = float(num1)*Ref['lb']/Ref['oz']
ans = round(ans,3)
print(ans,'kg')
elif unit1 == "g" and unit2 == "oz":
ans = float(num1)*Ref['mg']/Ref['lb']*Ref['oz']
ans = round(ans,3)
print(ans,'oz')
elif unit1 == "oz" and unit2 == "g":
ans = float(num1)/Ref['mg']*Ref['lb']/Ref['oz']
ans = round(ans,3)
print(ans,'g')
elif unit1 == "mg" and unit2 == "oz":
ans = float(num1)*Ref['mg']*Ref['mg']/Ref['lb']*Ref['oz']
ans = round(ans,3)
print(ans,'oz')
elif unit1 == "oz" and unit2 == "mg":
ans = float(num1)/Ref['mg']/Ref['mg']*Ref['lb']/Ref['oz']
ans = round(ans,3)
print(ans,'mg')
|
[
"noreply@github.com"
] |
noreply@github.com
|
2de3b0defebf228a952abc69a7eaf5fad8fea8db
|
30a98dfa909dafc237423cbae47076287740de4b
|
/app/my_geo_utils.py
|
5ecb2dbc001f11999c84a6cb9162471a695bb652
|
[] |
no_license
|
pgaval/moveable-weather
|
da3c55b080b16da99fa1cb43fe04cb9994f171f9
|
333b1a40298ab7f2baaa6a662ac29726f5cafbc0
|
refs/heads/master
| 2020-06-03T11:29:39.143624
| 2010-12-12T13:22:03
| 2010-12-12T13:22:03
| 32,232,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
from google.appengine.ext import webapp
# from math import sin, cos, sqrt, atan2, asin, floor
# from math import *
import math
# For each co-ordinate system we do, what are the A, B and E2 values?
# List is A, B, E^2 (E^2 calculated after)
abe_values = {
'wgs84': [ 6378137.0, 6356752.3141, -1 ],
'osgb' : [ 6377563.396, 6356256.91, -1 ],
'osie' : [ 6377340.189, 6356034.447, -1 ]
}
# The earth's radius, in meters, as taken from an average of the WGS84
# a and b parameters (should be close enough)
earths_radius = (abe_values['wgs84'][0] + abe_values['wgs84'][1]) / 2.0
class MyGeoUtils (webapp.RequestHandler):
def approx_ellipsoid_dist (self, (lat1, lon1), (lat2, lon2)):
"""approx_ellipsoid_dist((lat1, lon1), (lat2, lon2)):
Input: lat1, lon1, lat2, lon2: latitude and longitude (in degrees) of two points on Earth.
Output: distance in kilometers "crow fly" between the two points.
If you want less precision use sperical_distance.
If you want more precision use ellipsoid_distance."""
# http://www.codeguru.com/Cpp/Cpp/algorithms/article.php/c5115/
# By Andy McGovern, translated to Python
DE2RA = 0.01745329252 # Degrees to radians
ERAD = 6378.137
FLATTENING = 1.000000 / 298.257223563 # Earth flattening (WGS84)
EPS = 0.000000000005
if lon1 == lon2 and lat1 == lat2:
return 0.0
lat1 = DE2RA * lat1
lon1 = -DE2RA * lon1
lat2 = DE2RA * lat2
lon2 = -DE2RA * lon2
F = (lat1 + lat2) / 2.0
G = (lat1 - lat2) / 2.0
L = (lon1 - lon2) / 2.0
sing = sin(G)
cosl = cos(L)
cosf = cos(F)
sinl = sin(L)
sinf = sin(F)
cosg = cos(G)
S = sing*sing*cosl*cosl + cosf*cosf*sinl*sinl
C = cosg*cosg*cosl*cosl + sinf*sinf*sinl*sinl
W = atan2(sqrt(S), sqrt(C))
R = sqrt(S*C) / W
H1 = (3 * R - 1.0) / (2.0 * C)
H2 = (3 * R + 1.0) / (2.0 * S)
D = 2 * W * ERAD
return D * (1 + FLATTENING * H1 * sinf*sinf*cosg*cosg - FLATTENING*H2*cosf*cosf*sing*sing)
# See http://gagravarr.org/code/ for updates and information
#
# GPL
#
# Nick Burch - v0.06 (30/05/2007)
def calculate_distance_and_bearing(self, from_lat_dec,from_long_dec,to_lat_dec,to_long_dec):
"""Uses the spherical law of cosines to calculate the distance and bearing between two positions"""
# Turn them all into radians
from_theta = float(from_lat_dec) / 360.0 * 2.0 * math.pi
from_landa = float(from_long_dec) / 360.0 * 2.0 * math.pi
to_theta = float(to_lat_dec) / 360.0 * 2.0 * math.pi
to_landa = float(to_long_dec) / 360.0 * 2.0 * math.pi
d = math.acos(
math.sin(from_theta) * math.sin(to_theta) +
math.cos(from_theta) * math.cos(to_theta) * math.cos(to_landa-from_landa)
) * earths_radius
bearing = math.atan2(
math.sin(to_landa-from_landa) * math.cos(to_theta),
math.cos(from_theta) * math.sin(to_theta) -
math.sin(from_theta) * math.cos(to_theta) * math.cos(to_landa-from_landa)
)
bearing = bearing / 2.0 / math.pi * 360.0
return [d,bearing]
|
[
"egilchri@gmail.com@3cd864e4-babe-1b84-cbc2-118daab562f7"
] |
egilchri@gmail.com@3cd864e4-babe-1b84-cbc2-118daab562f7
|
be0d795ee4a482be60cebd7782452cdb1ec3243e
|
5593b35f326748f18053e7ea042c98fe6b70a850
|
/tqt/function/_utils.py
|
fcfe437056c27c6c9f5efbfe6e9d8517486bdff4
|
[
"BSD-3-Clause"
] |
permissive
|
sicdl/TQT
|
7dfe3bce2bb5dace9a467945512e65525a0c3be9
|
27b73fcf27ddfb67cd28f6ed27e49341f27c9f16
|
refs/heads/main
| 2023-04-14T18:28:23.224689
| 2021-04-22T14:46:46
| 2021-04-22T14:46:46
| 362,503,682
| 0
| 0
|
BSD-3-Clause
| 2021-04-28T14:45:14
| 2021-04-28T14:45:13
| null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
import torch
def number_to_tensor(x, t):
r'''
Turn x in to a tensor with data type like tensor t.
'''
return torch.tensor(x).type_as(t)
|
[
"you@example.com"
] |
you@example.com
|
8175e5f799827ea47e4107f34fc78482b2f29126
|
8b4fd96a11afc15359403c1f2c4f63f03648e5b7
|
/21_extract_full_name/extract_full_name.py
|
4d98315245c36ff71bf5cd703705230c8783f972
|
[] |
no_license
|
SpencerPulliam/pythonDsPractice
|
3d321f5e938887447a8f3455dcfcd408b1b9204a
|
f41e92c87a42c395542af7758f6fbd850d6cdb5a
|
refs/heads/main
| 2023-07-14T05:09:48.568081
| 2021-08-28T21:26:01
| 2021-08-28T21:26:01
| 400,893,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
def extract_full_names(people):
"""Return list of names, extracting from first+last keys in people dicts.
- people: list of dictionaries, each with 'first' and 'last' keys for
first and last names
Returns list of space-separated first and last names.
>>> names = [
... {'first': 'Ada', 'last': 'Lovelace'},
... {'first': 'Grace', 'last': 'Hopper'},
... ]
>>> extract_full_names(names)
['Ada Lovelace', 'Grace Hopper']
"""
names = []
for full_name in people:
name = ''
name += full_name['first']
name += ' '
name += full_name['last']
names.append(name)
return names
|
[
"spencer_pulliam@yahoo.com"
] |
spencer_pulliam@yahoo.com
|
9b689b192e40c5c293e15d203c6ae833f5cb0a70
|
e31f84c20af7be8646f03faf22ac55ad041444a3
|
/tests/test_selection/test_drop_duplicate_features.py
|
5d33b3d5d837340cc6fbd7cb372ec75f6777f0eb
|
[
"BSD-3-Clause"
] |
permissive
|
feature-engine/feature_engine
|
564aa2f298bb1beb0606bd5d51261b4d1085a8df
|
3343305a01d1acfeff846b65d33a5686c6e8c84f
|
refs/heads/main
| 2023-08-07T09:19:24.315277
| 2023-06-08T06:27:45
| 2023-06-08T06:27:45
| 163,630,824
| 874
| 105
|
BSD-3-Clause
| 2023-09-13T14:02:23
| 2018-12-31T01:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,967
|
py
|
import numpy as np
import pandas as pd
import pytest
from feature_engine.selection import DropDuplicateFeatures
@pytest.fixture(scope="module")
def df_duplicate_features():
data = {
"Name": ["tom", "nick", "krish", "jack"],
"dob2": pd.date_range("2020-02-24", periods=4, freq="T"),
"City": ["London", "Manchester", "Liverpool", "Bristol"],
"Age": [20, 21, 19, 18],
"Marks": [0.9, 0.8, 0.7, 0.6],
"dob": pd.date_range("2020-02-24", periods=4, freq="T"),
"City2": ["London", "Manchester", "Liverpool", "Bristol"],
"dob3": pd.date_range("2020-02-24", periods=4, freq="T"),
"Age2": [20, 21, 19, 18],
}
df = pd.DataFrame(data)
return df
@pytest.fixture(scope="module")
def df_duplicate_features_with_na():
data = {
"Name": ["tom", "nick", "krish", "jack", np.nan],
"dob2": pd.date_range("2020-02-24", periods=5, freq="T"),
"City": ["London", "Manchester", "Liverpool", "Bristol", np.nan],
"Age": [20, 21, np.nan, 18, 34],
"Marks": [0.9, 0.8, 0.7, 0.6, 0.5],
"dob": pd.date_range("2020-02-24", periods=5, freq="T"),
"City2": ["London", "Manchester", "Liverpool", "Bristol", np.nan],
"dob3": pd.date_range("2020-02-24", periods=5, freq="T"),
"Age2": [20, 21, np.nan, 18, 34],
}
df = pd.DataFrame(data)
return df
@pytest.fixture(scope="module")
def df_duplicate_features_with_different_data_types():
data = {
"A": pd.Series([5.5] * 3).astype("float64"),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": pd.Series([1.0] * 3).astype("float32"),
"F": False,
"G": pd.Series([1] * 3, dtype="int8"),
}
df = pd.DataFrame(data)
return df
def test_drop_duplicates_features(df_duplicate_features):
transformer = DropDuplicateFeatures()
X = transformer.fit_transform(df_duplicate_features)
# expected result
df = pd.DataFrame(
{
"Name": ["tom", "nick", "krish", "jack"],
"dob2": pd.date_range("2020-02-24", periods=4, freq="T"),
"City": ["London", "Manchester", "Liverpool", "Bristol"],
"Age": [20, 21, 19, 18],
"Marks": [0.9, 0.8, 0.7, 0.6],
}
)
pd.testing.assert_frame_equal(X, df)
def test_fit_attributes(df_duplicate_features):
transformer = DropDuplicateFeatures()
transformer.fit(df_duplicate_features)
assert transformer.features_to_drop_ == {"dob", "dob3", "City2", "Age2"}
assert transformer.duplicated_feature_sets_ == [
{"dob", "dob2", "dob3"},
{"City", "City2"},
{"Age", "Age2"},
]
def test_with_df_with_na(df_duplicate_features_with_na):
transformer = DropDuplicateFeatures()
X = transformer.fit_transform(df_duplicate_features_with_na)
# expected result
df = pd.DataFrame(
{
"Name": ["tom", "nick", "krish", "jack", np.nan],
"dob2": pd.date_range("2020-02-24", periods=5, freq="T"),
"City": ["London", "Manchester", "Liverpool", "Bristol", np.nan],
"Age": [20, 21, np.nan, 18, 34],
"Marks": [0.9, 0.8, 0.7, 0.6, 0.5],
}
)
pd.testing.assert_frame_equal(X, df)
assert transformer.features_to_drop_ == {"dob", "dob3", "City2", "Age2"}
assert transformer.duplicated_feature_sets_ == [
{"dob", "dob2", "dob3"},
{"City", "City2"},
{"Age", "Age2"},
]
def test_with_different_data_types(df_duplicate_features_with_different_data_types):
transformer = DropDuplicateFeatures()
X = transformer.fit_transform(df_duplicate_features_with_different_data_types)
df = pd.DataFrame(
{
"A": pd.Series([5.5] * 3).astype("float64"),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"F": False,
}
)
pd.testing.assert_frame_equal(X, df)
|
[
"noreply@github.com"
] |
noreply@github.com
|
15995f5051114e73ac2f6967098ef13f81fb0bf6
|
934d6deb551b03a657e5fb56ff12ef45ed062752
|
/util/parse_snp_pos.py
|
84848d0d32fac4f55b6474d4cc5d0961561feda3
|
[] |
no_license
|
ChuShin/bioinfo
|
dd3bd6ccad48f7bab4a73cd3ce094c428b4c75bd
|
ed1171af31fc96a445b2d8ce5fae2c30724e4e66
|
refs/heads/master
| 2021-12-24T08:10:42.388544
| 2021-12-10T18:32:34
| 2021-12-10T18:32:34
| 30,745,191
| 2
| 0
| null | 2015-07-16T01:57:20
| 2015-02-13T07:22:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,027
|
py
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import argparse
import sys
import pysam
"""parse_snp_pos.py: Calculate where a base is located on the reference
given read alignment (SAM) file and base positions(BED) on the reads, ."""
HI_CONF = 95.0
def load_pos_in_bed_file(pos_file):
"""
load_pos_in_bed_file
:param pos_file: input bed filename
"""
queries = {}
with open(pos_file, 'r') as infile:
for line in infile:
q = line.strip().split('\t')
try:
queries[q[0]] = [q[1], q[2]]
except IndexError:
pass
return queries
def get_confidence_level(perc):
if perc >= HI_CONF:
return '100'
elif perc < HI_CONF:
return '50'
def parse_sam(input_sam_file, output_file, queries):
bam_file = pysam.Samfile(input_sam_file, "rb");
gff_file = open(output_file, 'w')
log_file = open(output_file+'.log', 'w')
for read in bam_file:
if not read.is_unmapped: #if it's mapped
pos_on_query, alleles = queries[read.query_name]
pos_on_query = int(pos_on_query) - 1 # pysam is zero-based
ref_positions = read.get_reference_positions(full_length=True)
strand = '+'
if read.is_reverse:
pos_on_query = read.query_length - pos_on_query - 1
strand = '-'
ref_snp_pos = ref_positions[pos_on_query]
prop_aligned = (( len(read.get_reference_positions()) -
read.get_tag('NM')) / read.query_length ) * 100
if ref_snp_pos != None:
## gff output 1-based
ref_snp_pos = ref_snp_pos + 1
gff_line = "%s\tUofS\tmarker\t%d\t%d\t%s\t%s\t.\t%s\n" %(
bam_file.getrname(read.reference_id), ref_snp_pos,
ref_snp_pos, get_confidence_level(prop_aligned), strand,
'Name='+read.query_name+';Note=PID:'+'%.2f' % prop_aligned)
gff_file.write(gff_line)
elif ref_snp_pos == None:
log_line = "[No SNP position error]\t%s\t%s\t%s\t%s\t%s\n" %(
read.query_name, pos_on_query, alleles,
bam_file.getrname(read.reference_id), read.cigarstring)
log_file.write(log_line)
## log_file.write(str(ref_positions)+'\n') ## debug
def main():
parser = argparse.ArgumentParser(
description="Given a read alignment file and a base position on reads, "
"produce a GFF file of where each base pos is located on the reference.")
parser.add_argument('-b', '--base_pos_info_file', type=str)
parser.add_argument('-o', '--output_file', type=str)
parser.add_argument('input_sam_file', type=str)
args = parser.parse_args()
queries = load_pos_in_bed_file(args.base_pos_info_file)
parse_sam(args.input_sam_file, args.output_file, queries)
if __name__ == "__main__":
main()
|
[
"ChuShin.Koh@gmail.com"
] |
ChuShin.Koh@gmail.com
|
f7322bfe24f366e1da7e22987d6cb7ed70e9b213
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-SecurityInterface/PyObjCTest/test_sfchooseidentitypanel.py
|
128a8fe463da4b1756ea2b16a7730993712ab6e7
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
from PyObjCTools.TestSupport import *
import SecurityInterface
class TestSFChooseIdentityPanelHelper (SecurityInterface.NSObject):
def chooseIdentityPanelShowHelp_(self, v): return 1
class TestSFChooseIdentityPanel (TestCase):
def test_classes(self):
SecurityInterface.SFChooseIdentityPanel
def test_methods(self):
self.assertArgIsSEL(SecurityInterface.SFChooseIdentityPanel.beginSheetForWindow_modalDelegate_didEndSelector_contextInfo_identities_message_, 2, b'v@:@'+objc._C_NSInteger+b'^v')
self.assertArgIsBOOL(SecurityInterface.SFChooseIdentityPanel.setShowsHelp_, 0)
self.assertResultIsBOOL(SecurityInterface.SFChooseIdentityPanel.showsHelp)
self.assertResultIsBOOL(TestSFChooseIdentityPanelHelper.chooseIdentityPanelShowHelp_)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
d3159cc0e9ff7137a95d0711e6f9b502070a0fda
|
0667af1539008f9c6c0dcde2d3f50e8bbccf97f3
|
/source/rttov_test/profile-datasets-py/div52_zen50deg/036.py
|
a78064452fb7611b980d7bf251e9df26e12cb8da
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bucricket/projectMAScorrection
|
bc6b90f07c34bf3e922225b2c7bd680955f901ed
|
89489026c8e247ec7c364e537798e766331fe569
|
refs/heads/master
| 2021-01-22T03:54:21.557485
| 2019-03-10T01:47:32
| 2019-03-10T01:47:32
| 81,468,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,625
|
py
|
"""
Profile ../profile-datasets-py/div52_zen50deg/036.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div52_zen50deg/036.py"
self["Q"] = numpy.array([ 1.60776800e+00, 4.99817300e+00, 4.07414300e+00,
6.67667600e+00, 8.13295200e+00, 7.49870800e+00,
4.95508500e+00, 7.24563000e+00, 5.96519400e+00,
4.81720300e+00, 5.67166500e+00, 5.44611200e+00,
5.89519200e+00, 3.89188700e+00, 4.59972100e+00,
4.77842400e+00, 4.27284700e+00, 4.33680400e+00,
4.22628600e+00, 3.86177400e+00, 4.34807400e+00,
4.12327700e+00, 3.82283400e+00, 4.05055800e+00,
4.04113600e+00, 3.89531200e+00, 4.07803400e+00,
4.26075700e+00, 3.93224200e+00, 3.64382500e+00,
3.69615800e+00, 3.74678700e+00, 3.58848600e+00,
3.42693800e+00, 3.36090700e+00, 3.33667800e+00,
3.40327200e+00, 3.65646200e+00, 3.90278800e+00,
3.68729900e+00, 3.43068400e+00, 3.27563100e+00,
3.23161100e+00, 3.23582300e+00, 5.48813900e+00,
7.68749100e+00, 1.12740300e+01, 1.58383200e+01,
1.87049700e+01, 1.11524200e+01, 3.76257500e+00,
3.06379300e+01, 7.11222300e+01, 8.68246900e+01,
4.55990700e+01, 5.19350000e+00, 1.12110900e+02,
2.31250900e+02, 2.15328600e+02, 9.40471000e+01,
2.87742000e+00, 7.10424400e+00, 1.12544100e+01,
2.41607900e+02, 4.85529300e+02, 1.16292500e+03,
2.05753200e+03, 2.78321900e+03, 3.28114300e+03,
3.78251900e+03, 4.31976100e+03, 4.85350100e+03,
5.47782800e+03, 6.09185800e+03, 6.74157700e+03,
7.38341400e+03, 8.12011400e+03, 8.85724900e+03,
9.79029700e+03, 1.07286700e+04, 1.20882700e+04,
1.35251300e+04, 1.54343700e+04, 1.68775400e+04,
1.76879400e+04, 1.85843900e+04, 1.95199900e+04,
2.05302800e+04, 2.16585600e+04, 2.29002800e+04,
2.42792600e+04, 2.57725100e+04, 2.74135100e+04,
2.90645600e+04, 3.04254300e+04, 3.19978400e+04,
3.40990700e+04, 3.55844500e+04, 3.46182900e+04,
3.36907600e+04, 3.27995700e+04])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56504000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61259000e+01, 6.09895000e+01, 6.61252000e+01,
7.15398000e+01, 7.72395000e+01, 8.32310000e+01,
8.95203000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17777000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23441000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90892000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53627000e+02, 7.77789000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31523000e+02, 9.58591000e+02,
9.86066000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 341.2305, 341.2293, 341.2296, 341.2287, 341.2282, 341.2284,
341.2293, 341.2285, 341.229 , 341.2294, 341.2291, 341.2291,
341.229 , 341.2297, 341.2294, 341.2294, 341.2295, 341.2295,
341.2296, 341.2297, 341.2295, 341.2296, 341.2297, 341.2296,
341.2296, 341.2297, 341.2296, 341.2295, 341.2297, 341.2298,
341.2297, 341.8187, 342.4488, 343.1208, 343.8348, 344.5929,
345.3958, 346.2457, 347.1426, 348.0887, 349.0838, 350.1309,
351.2299, 351.2299, 351.2291, 351.2283, 351.227 , 351.2254,
351.2244, 351.2271, 351.2297, 351.2202, 351.206 , 351.2005,
351.215 , 351.2292, 351.1916, 351.1498, 351.1554, 351.198 ,
351.23 , 351.2285, 351.227 , 351.1461, 351.0605, 350.8225,
350.5083, 350.2534, 350.0786, 349.9025, 349.7138, 349.5263,
349.307 , 349.0914, 348.8631, 348.6377, 348.379 , 348.1201,
347.7923, 347.4628, 346.9852, 346.4806, 345.81 , 345.3031,
345.0184, 344.7036, 344.375 , 344.0201, 343.6238, 343.1877,
342.7034, 342.1789, 341.6025, 341.0226, 340.5446, 339.9924,
339.2543, 338.7326, 339.072 , 339.3978, 339.7108])
self["T"] = numpy.array([ 172.975, 188.884, 204.715, 216.336, 228.403, 234.187,
241.954, 252.632, 263.577, 265.794, 263.43 , 259.566,
253.694, 249.53 , 247.304, 243.998, 242.007, 241.244,
238.968, 234.894, 232.906, 229.919, 226.865, 224.546,
223.304, 222.7 , 223.263, 223.76 , 221.243, 218.834,
216.745, 214.724, 212.733, 210.802, 209.159, 207.665,
206.16 , 204.582, 203.048, 200.011, 196.896, 194.45 ,
192.73 , 191.145, 193.967, 196.723, 199.161, 201.357,
203.588, 206.319, 208.992, 211.874, 214.808, 217.411,
219.322, 221.195, 223.676, 226.172, 230.264, 235.587,
239.854, 240.822, 241.772, 243.613, 245.492, 247.817,
250.353, 252.693, 254.779, 256.778, 258.546, 260.285,
261.97 , 263.628, 265.182, 266.706, 268.272, 269.821,
271.562, 273.297, 275.39 , 277.511, 279.901, 281.829,
283.132, 284.507, 285.898, 287.291, 288.692, 290.073,
291.423, 292.803, 294.252, 295.567, 296.707, 297.862,
299.18 , 300.86 , 300.86 , 300.86 , 300.86 ])
self["O3"] = numpy.array([ 5.01755700e-02, 1.15813100e-01, 2.47679900e-01,
4.75340700e-01, 9.45566100e-01, 1.46851900e+00,
1.87795000e+00, 2.17465400e+00, 2.43277600e+00,
2.92066800e+00, 3.57626700e+00, 4.40444000e+00,
5.45986700e+00, 6.33738900e+00, 7.10775900e+00,
7.82629700e+00, 8.43338300e+00, 8.94155800e+00,
9.35094900e+00, 9.64592600e+00, 9.70494000e+00,
9.62606900e+00, 9.43127400e+00, 9.00999400e+00,
8.35107300e+00, 7.56506600e+00, 6.81864500e+00,
6.09690800e+00, 5.33800900e+00, 4.59311800e+00,
3.73551900e+00, 2.90568800e+00, 2.18988100e+00,
1.49945100e+00, 1.05707600e+00, 7.27611500e-01,
4.60667100e-01, 3.12258400e-01, 1.67879100e-01,
1.23013600e-01, 8.91367700e-02, 7.08934100e-02,
6.97595400e-02, 6.86654900e-02, 6.81548200e-02,
6.76556200e-02, 6.54202300e-02, 6.19483600e-02,
5.86753800e-02, 5.62659800e-02, 5.39084300e-02,
5.14477100e-02, 4.89749200e-02, 4.62010700e-02,
4.26514900e-02, 3.91723600e-02, 3.73109700e-02,
3.56372300e-02, 3.45162900e-02, 3.38305000e-02,
3.31925400e-02, 3.26846700e-02, 3.21860900e-02,
3.17491200e-02, 3.13242300e-02, 3.09235200e-02,
3.05388900e-02, 3.01843900e-02, 2.98684600e-02,
2.95764800e-02, 2.93577500e-02, 2.91488800e-02,
2.90530000e-02, 2.89587700e-02, 2.90996900e-02,
2.92517800e-02, 2.96474400e-02, 3.00649500e-02,
3.06183500e-02, 3.11761600e-02, 3.16856500e-02,
3.21589500e-02, 3.24871200e-02, 3.27531600e-02,
3.29365300e-02, 3.29429600e-02, 3.28500800e-02,
3.25754900e-02, 3.20801900e-02, 3.13033800e-02,
3.01042200e-02, 2.82785300e-02, 2.58376700e-02,
2.26970500e-02, 1.84285000e-02, 1.32521800e-02,
7.32499400e-03, 3.38092700e-03, 3.38431400e-03,
3.38756600e-03, 3.39069000e-03])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 300.86
self["S2M"]["Q"] = 35766.26576
self["S2M"]["O"] = 0.00338028981898
self["S2M"]["P"] = 1008.79
self["S2M"]["U"] = -2.07465
self["S2M"]["V"] = -6.63631
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 302.671
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 50.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = -8.40953
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([1993, 5, 1])
self["TIME"] = numpy.array([12, 0, 0])
|
[
"bucricket@gmail.com"
] |
bucricket@gmail.com
|
8c3c13cc482d32dd464ff37915995f1722df6615
|
4420e702da9a18462a33374bfe9f28b6a60f5963
|
/shapes.py
|
f2eeb8be080aaed6dfb4e62e736f505bedb9c030
|
[] |
no_license
|
NickMateus11/Wireframe-Cube-Rotation
|
15ddc66edd51f725d0eebc59c48cf0386a240a6a
|
9aa1affcde3106304160795446c1d028856175a9
|
refs/heads/main
| 2023-04-21T20:49:32.306623
| 2021-05-15T19:05:24
| 2021-05-15T19:05:24
| 367,680,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,892
|
py
|
from numpy.lib.function_base import average
import pygame
import numpy as np
screen = None
BLACK = pygame.Color("black")
WHITE = pygame.Color("white")
GREY = pygame.Color("grey")
def init(surface):
global screen
screen = surface
class Cube():
def __init__(self, center_pos, s):
self.s = s
if len(center_pos) < 3:
center_pos = center_pos + (0,)
self.c = np.array(center_pos)
self.diag = np.sqrt(3*s**2)
self.points = np.array([
(self.c + ( self.s/2, self.s/2, self.s/2)),
(self.c + ( self.s/2, self.s/2, -self.s/2)),
(self.c + ( self.s/2, -self.s/2, -self.s/2)),
(self.c + ( self.s/2, -self.s/2, self.s/2)),
(self.c + (-self.s/2, self.s/2, self.s/2)),
(self.c + (-self.s/2, self.s/2, -self.s/2)),
(self.c + (-self.s/2, -self.s/2, -self.s/2)),
(self.c + (-self.s/2, -self.s/2, self.s/2)),
])
def draw(self, draw_face=False):
projected_points = [p[:2] for p in self.points]
z_avg1 = np.average(self.points[:4][:,2])
color1 = ((z_avg1+self.s/2)*225/self.s,)*3
z_avg2 = np.average(self.points[4:][:,2])
color2 = ((z_avg2+self.s/2)*225/self.s,)*3
if z_avg1 < 0:
if draw_face:
pygame.draw.polygon(screen, color1, projected_points[:4])
for p in projected_points[:4]:
pygame.draw.circle(screen, GREY, p, self.s//20)
if z_avg2 < 0:
if draw_face:
pygame.draw.polygon(screen, color2, projected_points[4:])
for p in projected_points[4:]:
pygame.draw.circle(screen, GREY, p, self.s//20)
pygame.draw.lines(screen, GREY, True, projected_points[:4])
pygame.draw.lines(screen, GREY, True, projected_points[4:])
pygame.draw.lines(screen, GREY, True, projected_points[:4:3] + projected_points[4::3][::-1])
pygame.draw.lines(screen, GREY, True, projected_points[1:3] + projected_points[5:7][::-1])
if z_avg1 >= 0:
if draw_face:
pygame.draw.polygon(screen, color1, projected_points[:4])
for p in projected_points[:4]:
pygame.draw.circle(screen, GREY, p, self.s//20)
if z_avg2 >= 0:
if draw_face:
pygame.draw.polygon(screen, color2, projected_points[4:])
for p in projected_points[4:]:
pygame.draw.circle(screen, GREY, p, self.s//20)
def rotate(self, pitch=0, roll=0, yaw=0):
if pitch:
theta = np.deg2rad(-pitch)
r_mat = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
])
self.points = np.array([self.c - np.matmul(r_mat, (self.c - p).T) for p in self.points])
if roll:
theta = np.deg2rad(-roll)
r_mat = np.array([
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]
])
self.points = np.array([self.c - np.matmul(r_mat, (self.c - p).T) for p in self.points])
if yaw:
theta = np.deg2rad(-yaw)
r_mat = np.array([
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]
])
self.points = np.array([self.c - np.matmul(r_mat, (self.c - p).T) for p in self.points])
class Prism():
def __init__(self, f1, f2):
if not (len(f1.points) == len(f2.points)):
raise IndexError
self.face1 = f1
self.face2 = f2
def draw(self):
self.face2.draw()
self.face1.draw()
for i in range(len(self.face1.points)):
p1x,p1y = self.face1.points[i]
p2x,p2y = self.face2.points[i]
pygame.draw.line(screen, WHITE, (p1x,p1y), (p2x,p2y), width=2)
def rotate(self, deg):
self.face1.rotate(deg)
self.face2.rotate(deg)
class Rect():
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
self.centre = (x+w/2,y+h/2)
self.points = [(x,y), (x+w,y), (x+w,y+h), (x,y+h)]
def draw(self):
pygame.draw.lines(screen, WHITE, True, self.points, width=2)
for p in self.points:
pygame.draw.circle(screen, WHITE, p, radius=min(self.w,self.h)//20)
def rotate(self, deg):
x0,y0 = self.centre
for i in range(len(self.points)):
x1,y1 = self.points[i]
r = np.sqrt((x1-x0)**2 + (y1-y0)**2)
phi = np.arctan2((y1-y0),(x1-x0)) + np.deg2rad(deg)
self.points[i] = (x0 + r*np.cos(phi), y0 + r*np.sin(phi))
|
[
"mateus.nicholas@gmail.com"
] |
mateus.nicholas@gmail.com
|
b13dbaba2116bbe5a4c5edca8e578cbcee9beada
|
a07906f5b93cb01e74ca90fdd0f8411787dfbc96
|
/py_sandbox/requests_sandbox.py
|
786edc0164ee2dfd26cc80367ec9923a2e825f65
|
[] |
no_license
|
a22wong/sandbox
|
7ff9b747c75ee9bdbe8e1035f8a293a4089523bf
|
d666ea7b95c6e8f001f828082703bdfec907c242
|
refs/heads/master
| 2020-12-02T03:55:33.004303
| 2019-12-30T08:43:48
| 2019-12-30T08:43:48
| 230,880,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
import requests
import json
# url = "https://en.wikipedia.org/w/api.php"
# querystring = {"action":"query","format":"json","prop":"description","list":"search","srsearch":"accenture","srlimit":"1","utf8":"1","formatversion":"2"}
# payload = ""
# headers = {
# 'Accept': "application/json",
# 'cache-control': "no-cache",
# 'Postman-Token': "03624171-cc8f-462b-bd89-c57174d857d3"
# }
# response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
# print(response.text)
# response_json = json.loads(response.text)
# response_json['query']['search'][0]['snippet']
# def build_url(args):
# url = ""
# for arg in args:
# url += arg
# return url
# url = build_url([
# "base/",
# "relative_path"
# ])
# print(url)
|
[
"alexander.wong@accenture.com"
] |
alexander.wong@accenture.com
|
31ed6c30bfa05d19e3016c09f17692f3adb24092
|
ed80f898ae27ade7be78163b80ee8f294f147301
|
/src/algo.py
|
87b2be19c0d04fdde2e303498609003928a1c2a1
|
[] |
no_license
|
Dec1mo/Routing-Robots
|
c82a388b69ad869e700d786181bf11cf536889bf
|
df9e60b6d61d065996f4b05b0642e029900c1b16
|
refs/heads/master
| 2020-06-20T12:22:26.155767
| 2019-07-17T11:19:05
| 2019-07-17T11:19:05
| 197,120,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,058
|
py
|
import queue
import threading
INFINITY = 99999999
def mahatan_distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
class Algo():
def __init__(self, warehouse):
self.warehouse = warehouse
def find_dest(self, requests):
for request in requests:
min_dis = INFINITY
goal_pos = list(request.keys())[0]
if goal_pos in self.warehouse.goods:
#Distances
last_pos = None
for robot_pos, robot_stat in self.warehouse.robots.items():
if robot_stat == None:
if mahatan_distance(robot_pos, goal_pos) < min_dis:
min_dis = mahatan_distance(robot_pos, goal_pos)
if last_pos != None:
self.warehouse.robots[last_pos] = None
self.warehouse.robots[robot_pos] = request
last_pos = robot_pos
#requests.remove(request)
#Need some ways to remove done requests
def small_BFS(self, start, goal, type = 0):
# type = 0 -> have not picked up goods
# type = 1 -> picked up goods
if type == 0:
self.warehouse.grid.walls = [x for x in self.warehouse.robots.keys() if x not in self.warehouse.goals]
elif type == 1:
self.warehouse.grid.walls = [x for x in (list(self.warehouse.robots.keys()) + list(self.warehouse.goods)) if x not in self.warehouse.goals]
'''
print (type)
print ('walls = ', self.warehouse.grid.walls)
'''
frontier = queue.Queue()
frontier.put(start)
came_from = {}
came_from[start] = None
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in self.warehouse.grid.neighbors(current):
if next not in came_from:
frontier.put(next)
came_from[next] = current
current = goal
path = []
while current != start:
path.append(current)
current = came_from[current]
if not path:
return None
else:
return path[-1]
def move(self, robots_next_mov, goods_next_mov):
updated_robots = {}
updated_goods = {}
remaining_robots = {}
remaining_goods = {}
for here, next in robots_next_mov.items():
# print ('here = ', here)
# print ('update = ', updated_robots)
if next not in updated_robots:
updated_robots[next] = here
self.warehouse.robots[next] = self.warehouse.robots.pop(here)
else:
remaining_robots[here] = next
# print ('update = ', updated_robots)
for here, next in goods_next_mov.items():
if next not in updated_goods:
updated_goods[next] = here
# print ('here = ', here)
# print ('next = ', next)
# print ('goods = ', self.warehouse.goods)
self.warehouse.goods.remove(here)
self.warehouse.goods.add(next)
# print ('goods = ', self.warehouse.goods)
for robot, request in self.warehouse.robots.items():
if request != None:
if here in request.keys():
dest2 = list(request.items())[0][1]
new_dest = {}
new_dest[next] = dest2
self.warehouse.robots[robot] = new_dest
else:
remaining_goods[here] = next
return remaining_robots, remaining_goods
def is_covered(self):
for robot, dest in self.warehouse.robots.items():
if dest != None:
return False
return True
def BFS(self, requests):
'''
Notes: This is just a approximate algorithms - not too good
Need more effiective algorithms here
Known things: Robots' positions, Goods' positions and Requests
'''
self.find_dest(requests) # Changes directly to self.warehouse.robots
robots_next_mov = {}
goods_next_mov = {}
one_state_robots = [robot for robot in self.warehouse.robots.keys()]
one_state_goods = [good for good in self.warehouse.goods]
robots_states = [one_state_robots]
goods_states = [one_state_goods]
while True:
for robot, dest in list(self.warehouse.robots.items()):
# (r1,r2):{(d11,d12), (d21, d22)}
# robot 1st_dest 2nd_dest
if dest != None:
dest1, dest2 = list(dest.items())[0]
next_mov = None
if robot == dest1 and robot == dest2:
print ('Done picked for positions = ', robot)
print ("It's time for you to rest my loyal robot ", robot)
self.warehouse.robots.pop(robot)
print ("And you too, some goods, your journey has ended!")
self.warehouse.goods.remove(robot)
elif robot == dest1 and robot != dest2: # Picked goods
next_mov = self.small_BFS(robot, dest2, 1)
if next_mov != None:
robots_next_mov[robot] = next_mov
goods_next_mov[robot] = next_mov
else:
next_mov = self.small_BFS(robot, dest1, 0)
if next_mov != None:
robots_next_mov[robot] = next_mov
robots_next_mov, goods_next_mov = self.move(robots_next_mov, goods_next_mov)
one_state_robots = [robot for robot in self.warehouse.robots.keys()]
one_state_goods = [good for good in self.warehouse.goods]
robots_states.append(one_state_robots)
goods_states.append(one_state_goods)
#print (self.warehouse.robots)
if self.is_covered():
break
'''
for goods_state in goods_states:
print (goods_state)
print ('len = ', len(goods_state))
'''
return robots_states, goods_states
|
[
"thai.dec1mo@gmail.com"
] |
thai.dec1mo@gmail.com
|
2230054695ed61325071dcdfbeabe36fae8ac99a
|
80656f239628f9b3a3c815cb18e972c15f41ea2a
|
/Python Path Planning/heightmap_planner.py
|
418ea6e48cbf32913826108a9d6fa08f9f493257
|
[] |
no_license
|
BoveyRao/Rover
|
c70cb4116f6d4956b9095cde3433d2ae77061b03
|
ab2b0cb23118822eb60d3b040798d514c1ab296e
|
refs/heads/master
| 2021-08-23T06:59:27.742160
| 2017-12-03T23:53:43
| 2017-12-03T23:53:43
| 112,981,297
| 0
| 0
| null | 2017-12-04T01:46:46
| 2017-12-04T01:46:45
| null |
UTF-8
|
Python
| false
| false
| 5,577
|
py
|
import util
import visualizer as vis
import copy
import math
# load ieghtmap file as list of [x, y, height]
inputfile = open('coordinates.txt')
lines = inputfile.read()[2:-2].split('), (')
height_map = dict()
for line in lines:
val = line.split(', ')
x = int(float(val[0])-0.5)
y = int(float(val[1])+0.5)
h = int(val[2])
height_map[(x, y)] = h
# visualization of landscape
size = int(math.sqrt(len(lines)))
data = vis.map_to_array(height_map, size)
with open("landscape.png","wb") as f:
f.write(vis.makeGrayPNG(data))
# scaling factor for distance in x, y, z
x_scale = 50
y_scale = 50
z_scale = 20
goal_state = (64, -64)
delta = {'N': (0, 1),
'S': (0, -1),
'E': (1, 0),
'W': (-1, 0),
'NW': (-1, 1),
'SW': (-1, -1),
'NE': (1, 1),
'SE': (1, -1),
'STOP': (0, 0)}
def getStartState():
return (0, 0) # middle of board
def goal(state):
return (state == goal_state)
def distance(state1, state2):
x1, y1 = state1
x2, y2 = state2
h1, h2 = height_map[state1], height_map[state2]
return math.sqrt(
(x_scale*(x1-x2))**2 + (y_scale*(y1-y2))**2 + (z_scale*(h1-h2))**2)
# gives neighboring states as a list given robot's state
def getSuccessors(state):
result = []
actions = ['N', 'S', 'E', 'W', 'NW', 'SW', 'NE', 'SE']
x, y = state
for action in actions:
dx, dy = delta[action][0], delta[action][1]
new_state = (x + dx, y + dy)
if new_state in height_map:
result.append((new_state, action, distance(new_state, state)))
return result
def depthFirstSearch():
"""
Search the deepest nodes in the search tree first.
"""
state = getStartState()
nodes = util.Stack()
path = util.Stack()
nodes.push((state, path))
states_visited = set()
while True:
if nodes.isEmpty(): # no nodes to expand
return util.raiseNotDefined()
while state in states_visited: # expand nodes that aren't visited
(state, path) = nodes.pop()
if goal(state): # check if node is goal
return path.list
children = getSuccessors(state) # get children nodes
# create new path for each child
for child in children:
new_path = copy.deepcopy(path)
new_path.push(child[1])
nodes.push((child[0], new_path))
states_visited.add(state) # add expanded node as visited
# print depthFirstSearch()
def breadthFirstSearch():
"""Search the shallowest nodes in the search tree first."""
# similar to above except fringe is stored as Queue instead of Stack
state = getStartState()
nodes = util.Queue()
path = util.Stack()
nodes.push((state, path))
states_visited = set()
while True:
if nodes.isEmpty():
return util.raiseNotDefined()
while state in states_visited:
(state, path) = nodes.pop()
if goal(state):
return path.list
children = getSuccessors(state)
for child in children:
new_path = copy.deepcopy(path)
new_path.push(child[1])
nodes.push((child[0], new_path))
states_visited.add(state)
# print breadthFirstSearch()
def heuristic(node):
return distance(node[0], goal_state)
def uniformCostSearch():
"""Search the node of least total cost first."""
# give the cost of getting to the node
# def priority(node):
# return len(node[1].list)
# smiliar to above except fringe is stored as PriorityQueue
state = getStartState()
nodes = util.PriorityQueueWithFunction(heuristic)
path = util.Stack()
nodes.push((state, path))
states_visited = set()
while True:
if nodes.isEmpty():
return util.raiseNotDefined()
while state in states_visited:
(state, path) = nodes.pop()
if goal(state):
return path.list
children = getSuccessors(state)
for child in children:
new_path = copy.deepcopy(path)
new_path.push(child[1])
nodes.push((child[0], new_path))
states_visited.add(state)
# print uniformCostSearch()
def aStarSearch():
"""Search the node that has the lowest combined cost and heuristic first."""
# give the forward and back cost as the heuristic
def priority(node):
h = heuristic(node)
def f1(x): return x[1]
g = sum(map(f1, node[1].list))
return h + g
# similar to above
state = getStartState()
nodes = util.PriorityQueueWithFunction(priority)
path = util.Stack()
nodes.push((state, path))
states_visited = set()
while True:
if nodes.isEmpty():
return util.raiseNotDefined()
while state in states_visited:
(state, path) = nodes.pop()
if goal(state):
def f0(x): return x[0]
return map(f0, path.list)
children = getSuccessors(state)
for child in children:
new_path = copy.deepcopy(path)
new_path.push((child[1], child[2]))
nodes.push((child[0], new_path))
states_visited.add(state)
# visualize path on landscape
# data = vis.map_to_array(height_map, size)
data_uc = data
data_astar = data
cur_state = getStartState()
actions = uniformCostSearch()
# actions = aStarSearch()
while actions:
x, y = cur_state[0] + 64, 64 - cur_state[1]
data_astar[y][x] = 255
x, y = cur_state
dx, dy = delta[actions.pop(0)]
cur_state = (x + dx, y + dy)
with open("landscape_uc.png","wb") as f:
f.write(vis.makeGrayPNG(data_uc))
# with open("landscape_astar.png","wb") as f:
# f.write(vis.makeGrayPNG(data_astar))
|
[
"albertchien@Alberts-MBP-2.local.tld"
] |
albertchien@Alberts-MBP-2.local.tld
|
a86e68874b93cdcfc9b7faeb9e7b9c2c6d6080a7
|
73b8a5ade0e13b7ba5e6cc31c2b2f1eb29cf74e1
|
/servicionacidosvivos/manage.py
|
941363f3ffd6a5029996dfb3960cbc49c60e6121
|
[] |
no_license
|
jblandonsv/nacidos-vivos
|
d377af7c3f7f0fc12172803a85e5d6140c6a1f54
|
81ecbde6881dd6f87ec6e84b6dadb946f6b19be4
|
refs/heads/master
| 2020-05-27T08:58:33.736988
| 2013-07-12T16:43:11
| 2013-07-12T16:43:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "servicionacidosvivos.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"blandon.jaime@gmail.com"
] |
blandon.jaime@gmail.com
|
a463d23256ed3b7f0178434ea5256ff915ef0430
|
4bb1a23a62bf6dc83a107d4da8daefd9b383fc99
|
/work/abc034_d2.py
|
4afb3860d7f983c4de267f774fec7425d98c023d
|
[] |
no_license
|
takushi-m/atcoder-work
|
0aeea397c85173318497e08cb849efd459a9f6b6
|
f6769f0be9c085bde88129a1e9205fb817bb556a
|
refs/heads/master
| 2021-09-24T16:52:58.752112
| 2021-09-11T14:17:10
| 2021-09-11T14:17:10
| 144,509,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
n,k = map(int, input().split())
wpl = [list(map(int, input().split())) for _ in range(n)]
def f(w,p,x):
p = p/100
return p*w - w*x
def check(x):
l = [f(wpl[i][0], wpl[i][1], x) for i in range(n)]
l.sort(reverse=True)
return sum(l[:k])>=0
ok = 0
ng = 1
while abs(ng-ok)>10**-7:
mid = (ok+ng)/2
if check(mid):
ok = mid
else:
ng = mid
print(ok*100)
|
[
"takushi-m@users.noreply.github.com"
] |
takushi-m@users.noreply.github.com
|
38f612204aaf7a5bb92d2ddfc8514649d07bdcad
|
a73cc710aa370be94b70248f2268d9c3b14059d0
|
/server/src/weblab/core/web/quickadmin.py
|
1fba2f6ba62d2380539e03e37e0669230626b289
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
weblabdeusto/weblabdeusto
|
05692d4cc0a36287191544551d4a1113b3d95164
|
62e488afac04242a68efa4eb09fd91d7e999d4dd
|
refs/heads/master
| 2023-05-10T23:14:05.407266
| 2022-08-31T14:16:23
| 2022-08-31T14:16:23
| 5,719,299
| 19
| 23
|
BSD-2-Clause
| 2023-05-01T20:18:53
| 2012-09-07T16:24:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,417
|
py
|
from __future__ import print_function, unicode_literals
import datetime
import calendar
from flask import render_template, request, send_file, Response, url_for
from functools import wraps, partial
from weblab.core.web import weblab_api
from weblab.core.db import UsesQueryParams
def check_credentials(func):
@wraps(func)
def wrapper(*args, **kwargs):
expected_token = weblab_api.config.get('quickadmin_token', None)
if expected_token:
token = request.args.get('token')
if not token:
return Response("You must provide a token like ?token=something")
if token != expected_token:
return Response("Invalid token")
return func(*args, **kwargs)
return wrapper
def get_url_for():
existing_args = dict(request.args)
existing_args.pop('page', None)
my_url_for = partial(url_for, **existing_args)
if 'token' in request.args:
return partial(my_url_for, token = request.args['token'])
return my_url_for
def create_query_params(**kwargs):
params = {}
for potential_arg in 'login', 'experiment_name', 'category_name', 'ip', 'country':
if potential_arg in request.args:
params[potential_arg] = request.args[potential_arg]
for potential_arg in 'start_date', 'end_date':
if potential_arg in request.args:
try:
params[potential_arg] = datetime.datetime.strptime(request.args[potential_arg], "%Y-%m-%d").date()
except ValueError:
pass
for potential_arg in 'page',:
if potential_arg in request.args:
try:
params[potential_arg] = int(request.args[potential_arg])
except ValueError:
pass
if 'page' not in params or params['page'] <= 0:
params['page'] = 1
for potential_arg in 'date_precision',:
if potential_arg in request.args:
if request.args[potential_arg] in ('day', 'month', 'year', 'week'):
params[potential_arg] = request.args[potential_arg]
if 'date_precision' not in params:
params['date_precision'] = 'month'
params.update(kwargs)
query_params = UsesQueryParams(**params)
metadata = weblab_api.db.quickadmin_uses_metadata(query_params)
params['count'] = metadata['count']
if 'start_date' in params:
params['min_date'] = params['start_date']
else:
params['min_date'] = metadata['min_date']
if 'end_date' in params:
params['max_date'] = params['end_date']
else:
params['max_date'] = metadata['max_date']
return UsesQueryParams(**params)
@weblab_api.route_web('/quickadmin/')
@check_credentials
def index():
return render_template("quickadmin/index.html", url_for = get_url_for())
LIMIT = 20
@weblab_api.route_web('/quickadmin/uses')
@check_credentials
def uses():
query_params = create_query_params()
uses = weblab_api.db.quickadmin_uses(LIMIT, query_params)
return render_template("quickadmin/uses.html", limit = LIMIT, uses = uses, filters = query_params.filterdict(), arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Uses', endpoint = '.uses')
@weblab_api.route_web('/quickadmin/use/<int:use_id>')
@check_credentials
def use(use_id):
return render_template("quickadmin/use.html", param_url_for = get_url_for(), **weblab_api.db.quickadmin_use(use_id = use_id))
@weblab_api.route_web('/quickadmin/file/<int:file_id>')
@check_credentials
def file(file_id):
file_path = weblab_api.db.quickadmin_filepath(file_id = file_id)
if file_path is None:
return "File not found", 404
return send_file(file_path, as_attachment = True)
@weblab_api.route_web('/quickadmin/uses/map')
@check_credentials
def uses_map():
query_params = create_query_params()
per_country = weblab_api.db.quickadmin_uses_per_country(query_params)
per_time = _per_country_by_to_d3(weblab_api.db.quickadmin_uses_per_country_by(query_params))
return render_template("quickadmin/uses_map.html", per_country = per_country, per_time = per_time, arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Uses map', endpoint = '.uses_map')
@weblab_api.route_web('/quickadmin/demos')
@check_credentials
def demos():
group_names = weblab_api.config.get_value('login_default_groups_for_external_users', [])
query_params = create_query_params(group_names = group_names)
uses = weblab_api.db.quickadmin_uses(LIMIT, query_params)
return render_template("quickadmin/uses.html", limit = LIMIT, uses = uses, arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Demo uses', endpoint = '.demos')
@weblab_api.route_web('/quickadmin/demos/map')
@check_credentials
def demos_map():
group_names = weblab_api.config.get_value('login_default_groups_for_external_users', [])
query_params = create_query_params(group_names = group_names)
per_country = weblab_api.db.quickadmin_uses_per_country(query_params)
per_time = _per_country_by_to_d3(weblab_api.db.quickadmin_uses_per_country_by(query_params))
return render_template("quickadmin/uses_map.html", per_country = per_country, per_time = per_time, arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Demo uses map', endpoint = '.demos_map')
def _per_country_by_to_d3(per_time):
new_per_time = [
# {
# key : country,
# values : [
# [
# time_in_milliseconds,
# value
# ]
# ]
# }
]
total_per_country = [
# (country, number)
]
for country in per_time:
total_per_country.append( (country, sum([ value for key, value in per_time[country] ]) ))
total_per_country.sort(lambda x, y: cmp(x[1], y[1]), reverse = True)
top_countries = [ country for country, value in total_per_country[:10] ]
max_value = max([value for country, value in total_per_country[:10] ] or [0])
key_used = 'month'
times_in_millis = {
# millis : datetime
}
for country in top_countries:
for key in [ key for key, value in per_time[country] ]:
if len(key) == 1:
if isinstance(key[0], datetime.date):
key_used = 'day'
date_key = key[0]
else:
key_used = 'year'
date_key = datetime.date(year = key[0], month = 1, day = 1)
elif len(key) == 2:
key_used = 'month'
date_key = datetime.date(year = key[0], month = key[1], day = 1)
else:
continue
time_in_millis = calendar.timegm(date_key.timetuple()) * 1000
times_in_millis[time_in_millis] = key
for country in per_time:
if country not in top_countries:
continue
country_data = {'key' : country, 'values' : []}
country_time_data = dict(per_time[country])
for time_in_millis in sorted(times_in_millis):
key = times_in_millis[time_in_millis]
value = country_time_data.get(key, 0)
country_data['values'].append([time_in_millis, value])
new_per_time.append(country_data)
return { 'key_used' : key_used, 'per_time' : new_per_time, 'max_value' : max_value}
|
[
"pablo.orduna@deusto.es"
] |
pablo.orduna@deusto.es
|
307afce7174d1f60914d4a08060660c34b82e628
|
794be26e4ab7bdd9af017ce1d0c6ce1f087d968d
|
/functional_tests/test_create.py
|
b0b5e0c8678060723a0834273615afdbe0ad3866
|
[
"Apache-2.0",
"LGPL-3.0-only"
] |
permissive
|
jasinner/elliott
|
02fcc2f67b56d4e16eef28f0323d276fbd954593
|
67d77913517d0f7954dc02d918eb96ba78ec1ea8
|
refs/heads/master
| 2021-06-18T19:59:45.878716
| 2021-04-29T21:33:51
| 2021-04-29T21:33:51
| 215,217,286
| 0
| 0
|
Apache-2.0
| 2019-10-15T05:52:13
| 2019-10-15T05:52:13
| null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
from __future__ import absolute_import, print_function, unicode_literals
import unittest
import subprocess
from functional_tests import constants
class GreateTestCase(unittest.TestCase):
def test_create_rhba(self):
out = subprocess.check_output(
constants.ELLIOTT_CMD
+ [
"--group=openshift-4.2", "create", "--type=RHBA", "--impetus=standard", "--kind=rpm",
"--date=2020-Jan-1", "--assigned-to=openshift-qe-errata@redhat.com", "--manager=vlaad@redhat.com", "--package-owner=lmeyer@redhat.com"
]
)
self.assertIn("Would have created advisory:", out.decode("utf-8"))
|
[
"yuxzhu@redhat.com"
] |
yuxzhu@redhat.com
|
c806ba4082228cc8c2b36ecdef168a9341dec251
|
647d3d4c32940ccbebf1c00fc0dbf74ec70ba8c2
|
/SoSR/Training/data/util.py
|
a84c825c5f85b2aa746cdfef07f8c5a4c7ab5199
|
[] |
no_license
|
AlanZhang1995/TwoStreamSR
|
e1f8b02e9d0cc680df9d297ce45929f61ef5ca59
|
6b3857075ccca6cd7b620c8ad580be494cba13e9
|
refs/heads/master
| 2020-04-08T15:20:19.231186
| 2020-02-11T05:12:54
| 2020-02-11T05:12:54
| 159,474,176
| 21
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,790
|
py
|
import os
import math
import pickle
import random
import numpy as np
import lmdb
import torch
import cv2
import logging
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
####################
# Files & IO
####################
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def _read_lmdb_img_HR(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.float32)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path):
# read image by cv2 or from lmdb
# return: Numpy float32, HWC, BGR, [0,1]
if env is None: # img
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
else:
img = _read_lmdb_img(env, path)
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
def read_img_HR(env, path):
# read image by cv2 or from lmdb
# return: Numpy float32, HWC, BGR, [0,1]
if env is None: # img
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
else:
img = _read_lmdb_img_HR(env, path)
# images and weight map totally have 5 channels
if img.shape[2] == 5:
img_grb = img[:, :, :3]
img_weight = img[:, :, 3:5]
img_grb = img_grb.astype(np.float32) / 255.
result= np.dstack((img_grb,img_weight))
elif img.shape[2] == 3:
result = img.astype(np.float32) / 255.
else:
raise NotImplementedError('Wrong input channel')
return result
####################
# image processing
# process on numpy image
####################
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True):
'''bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
####################
# Functions
####################
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
return out_2.numpy()
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)
|
[
"277212623@qq.com"
] |
277212623@qq.com
|
9c5f16cd2a1ba89806e4e52d73c3d4589d700dee
|
d5816b32da217392cfdca4aa07ca46529c3487e4
|
/invoices/migrations/0004_auto_20140930_0106.py
|
ea9ba813e442303aff699b0f70e25c5a39edc2d3
|
[] |
no_license
|
devthrashnull/hamventory
|
032102ab5ef744271ab94874a7df2a0bf4a09945
|
7508f48e7201743c0b0d3083c4bcfe772b721168
|
refs/heads/master
| 2016-09-05T12:01:38.494061
| 2014-09-30T01:20:08
| 2014-09-30T01:20:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('invoices', '0003_auto_20140930_0106'),
]
operations = [
migrations.AlterField(
model_name='item',
name='price',
field=models.DecimalField(null=True, max_digits=32, decimal_places=2, blank=True),
),
]
|
[
"fautz@jetastudio.com"
] |
fautz@jetastudio.com
|
edb3f6fcb237274c99a4d039bed953c644093ac5
|
c2e4ecc177c04e4672767da97da5d9f8c892e7fc
|
/littlenn/layers/dropout.py
|
74d7f24f3dcd70bc2d8233a99a4f868ab153993c
|
[
"MIT"
] |
permissive
|
qbeer/littlenn
|
828a313507dead5dea8fcade32453b912af8c7f0
|
e783cb5d713f5669449fa993b3001848c028309b
|
refs/heads/master
| 2021-11-05T01:53:11.632316
| 2021-10-28T18:15:59
| 2021-10-28T18:15:59
| 238,507,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
import numpy as np
from littlenn.layers.abstract_layer import Layer
class Dropout(Layer):
def __init__(self, keep_prob):
super(Dropout, self).__init__()
self.keep_prob = keep_prob
def _create_weights(self, dim_in):
self.dim_out = dim_in
def _init_optimizers(self, optimizer_factory, params):
pass
def _get_weights(self):
return self.W.reshape(-1, 1)
def _get_trainable_params(self):
return 0
def __call__(self, x, training):
if training:
self.W = (np.random.rand(x.shape[0], x.shape[1]) <= self.keep_prob) / self.keep_prob
act = self.W * x
else:
act = x
return act
def grads(self, grads):
dprev, *z = grads
return self.W * dprev
def _apply_grads(self, grads):
pass
def __str__(self):
return "Dropout : (keep_prob = %.1f)" % self.keep_prob
|
[
"olaralex@phys-gs.elte.hu"
] |
olaralex@phys-gs.elte.hu
|
b0d4e0f15425ad655928fbe37779c555a3728775
|
48d995e31da9cc2dd63a1cc7561a274cb32dc38f
|
/MyBlueButterfly/MyBlueButterfly/settings.py
|
f1529f583dd7e60763e510cb21091723a044065c
|
[] |
no_license
|
Blu3M0nkey/MyBlueButterfly
|
2fbeeff6ef1936a11d50ec096fc140879d4f269a
|
345823ebef4484729136a9fc7a9d0c7aba892717
|
refs/heads/master
| 2020-04-14T10:47:51.327237
| 2019-01-05T03:52:20
| 2019-01-05T03:52:20
| 163,796,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,323
|
py
|
"""
Django settings for MyBlueButterfly project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import posixpath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c4e18954-c68d-4c00-a6ed-1558e432c991'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'Main',
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyBlueButterfly.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyBlueButterfly.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
|
[
"samedina86@gmail.com"
] |
samedina86@gmail.com
|
594eaa6cce6464e3ce1165188820b67175525a11
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/opsworks_write_f/rds-db-instance_update.py
|
3257b4557a10fe6a781cb6c9086bdfa3b85a8b86
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
deregister-rds-db-instance : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/opsworks/deregister-rds-db-instance.html
describe-rds-db-instances : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/opsworks/describe-rds-db-instances.html
register-rds-db-instance : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/opsworks/register-rds-db-instance.html
"""
write_parameter("opsworks", "update-rds-db-instance")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
061e1a704629d8949be1743454ac0c89316349fb
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc025/C/2618079.py
|
c2927f9e67892662b56299b7a9fff478e70376c2
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
n = int(input())
l = []
r = []
for i in range(n):
lt, rt = map(int,input().split())
l.append(lt)
r.append(rt)
l.append(0)
r.append(0)
l.sort()
r.sort()
l.reverse()
ans = 0
i = 0
while r[i]<l[i]:
ans += 2*(l[i] - r[i])
i+=1
print(ans)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
e0b51af08de583fc6d2449bff3c69e61e59ce414
|
3f3f2b3eaab992d3cc8f49fcd03e4824a11fddab
|
/diamond.releng.jenkins/job.scripts/email_owners_of_submittable_changes.py
|
7bfd821419fa2722938f0131ed624a2ce5f2ba3e
|
[] |
no_license
|
DiamondLightSource/diamond-releng
|
7bff1926e3fd2f9df3c056d8af5521b4e74aaf41
|
ba15336e7f7d3c160d3c3bc28316817cb4585305
|
refs/heads/master
| 2021-01-25T03:19:25.403769
| 2019-01-02T16:05:28
| 2019-01-02T16:05:28
| 19,986,689
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,159
|
py
|
#!/usr/bin/env python3
###
### Requires Python 3
###
'''
Identify Gerrit changes that are ready to submit, and email the owners
'''
from email.message import EmailMessage
from email.headerregistry import Address
import datetime
import itertools
import json
import logging
import operator
import os
import os.path
import smtplib
import stat
import sys
import time
import urllib.request
import urllib.parse
import urllib.error
GERRIT_HOST = 'gerrit.diamond.ac.uk'
# define module-wide logging
logger = logging.getLogger(__name__)
def setup_logging():
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s", "%Y-%m-%d %H:%M:%S")
# create console handler
logging_console_handler = logging.StreamHandler()
logging_console_handler.setFormatter(formatter)
logger.addHandler(logging_console_handler)
logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
class SubmittableChangesProcessor():
def __init__(self):
setup_logging()
self.logger = logger
self.gerrit_url_base = 'https://' + GERRIT_HOST + '/' # when using the REST API, this is the base URL to use
self.gerrit_url_browser = self.gerrit_url_base # when generating links, this is the base URL to use
# since the Gerrit REST API has been secured, then we need to use basic authentication
self.gerrit_url_base += 'a/'
handler = urllib2.HTTPBasicAuthHandler()
handler.add_password('Gerrit Code Review', self.gerrit_url_base, *self.get_gerrit_http_username_password())
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
@staticmethod
def get_gerrit_http_username_password():
''' the token required to authenticate to Gerrit is stored in a file
the file, in addition to comment and empty lines, contains a single line of the format
username:password
'''
token_filename = os.path.abspath(os.path.expanduser('~/passwords/http-password_Gerrit_for-REST.txt'))
assert os.path.isfile(token_filename)
assert os.stat(token_filename).st_mode == stat.S_IRUSR + stat.S_IFREG # permissions must be user-read + regular-file
last_nonempty_line = ''
with open(token_filename, 'r') as token_file:
for line in token_file: # standard OS terminator is converted to \n
line = line.rstrip('\n') # remove trailing newline
if line:
last_nonempty_line = line
if last_nonempty_line:
return last_nonempty_line.split(':', 1)
raise Exception('File %s appears empty' % token_filename)
def gerrit_REST_api(self, relative_url, accept404=False):
''' Call the Gerrit REST API
'''
url = self.gerrit_url_base + relative_url
request = urllib.request.Request(url, headers={'Accept': 'application/json', 'Accept-Charset': 'utf-8'}) # header specifies compact json, which is more efficient
self.logger.debug('gerrit_REST_api retrieving: %s' % (url,))
try:
rest_json = urllib.request.urlopen(request).read()
except (urllib.error.HTTPError) as err:
if accept404 and (err.code == 404):
self.logger.debug('Invalid response from Gerrit server reading %s: %s' % (url, err))
return None
self.logger.critical('Invalid response from Gerrit server reading %s: %s' % (url, err))
return None
gerrit_magic_prefix_line = b")]}'\n"
if not rest_json[:len(gerrit_magic_prefix_line)] == gerrit_magic_prefix_line:
self.logger.critical('Invalid response from Gerrit server reading %s: magic prefix line not found' % (url,))
return None
standard_json = json.loads(rest_json[len(gerrit_magic_prefix_line):].decode('utf-8')) # strip off the magic prefix line returned by Gerrit
# self.logger.debug(json.dumps(standard_json, indent=2))
return standard_json
def get_submittable_changes_from_gerrit(self):
''' Queries Gerrit to get a list of ChangeInfo records for the changes that can be submitted
'''
url = 'changes/?q=%s&o=CURRENT_REVISION&o=DETAILED_ACCOUNTS' % (urllib.parse.quote('is:open label:Code-Review+2 label:Verified+1 NOT label:Code-Review-2 NOT label:Verified-1'),)
changeinfos = self.gerrit_REST_api(url)
longest_string = {}
longest_string['_number'] = max(itertools.chain((len(str(ci['_number'])) for ci in changeinfos), (len('Change'),)))
longest_string['project'] = max(itertools.chain((len(ci['project']) for ci in changeinfos), (len('Project'),)))
longest_string['branch'] = max(itertools.chain((len(ci['branch']) for ci in changeinfos), (len('Branch'),)))
longest_string['owner'] = max(itertools.chain((len(ci['owner']['name']) for ci in changeinfos), (len('Owner'),)))
format = ('%' + str(longest_string['_number']) + 's ' +
'%-' + str(longest_string['project']) + 's ' +
'%-' + str(longest_string['branch']) + 's ' +
'%-' + str(longest_string['owner']) + 's ' +
'%-16s ' + # for the time last updated
'%s\n') # for the subject
emails = set()
report = format % ('Change', 'Project', 'Branch', 'Owner', 'Updated', 'Subject')
# use a sort key that transforms Firstname.Lastname@example.com to lastname.firstname
for ci in sorted(changeinfos, key=lambda ci:
'.'.join(operator.itemgetter(2,0)(ci['owner']['email'].partition('@')[0].lower().partition('.'))) +
os.path.basename(ci['project'])): # there can be multiple changeinfos
report += format % (ci['_number'], ci['project'], ci['branch'], ci['owner']['name'], ci['updated'][:16], ci['subject'])
emails.add(ci['owner']['email'])
self.emails = sorted(emails)
self.report = report
return
def make_email(self):
body = 'Below is a list of changes in Gerrit that have been verified and reviewed, but are still waiting for the change owner to submit them' + \
', as of ' + time.strftime("%a, %Y/%m/%d %H:%M:%S %Z") + '.\n'
body += '''
PLEASE CONSIDER EITHER:
Submit your change, it you still want it
Abandon your change, if it is no longer required
'''
body += self.report
body += '\n<<end report>>\n'
# we are going to create an email message with ASCII characters, so convert any non-ASCII ones
# note that this is really a hack, we should be smarter about constructing an email message
body = body.replace("’", "'").replace('“', '"').replace('”', '"')
message = EmailMessage()
message['Subject'] = 'Report on Gerrit changes waiting for the owner to submit'
message['From'] = Address('Jenkins Build Server (Diamond Light Source)', 'gerrit-no-reply@diamond.ac.uk')
message['List-Id'] = 'Gerrit awaiting submit <gerrit-awaiting-submit.jenkins.diamond.ac.uk>'
# use a sort key that transforms Firstname.Lastname@example.com to lastname.firstname
message['To'] = [Address(addr_spec=committer) for committer in sorted(
self.emails,
key=lambda email_addr: '.'.join(operator.itemgetter(2,0)(email_addr.partition('@')[0].lower().partition('.')))
) if '@' in committer]
message['CC'] = ('matthew.webber@diamond.ac.uk',)
message.set_content(body)
email_expires_days = 5
if email_expires_days:
message['Expiry-Date'] = (datetime.datetime.utcnow() + datetime.timedelta(days=email_expires_days)).strftime("%a, %d %b %Y %H:%M:%S +0000")
self.logger.info("Sending email ...")
with smtplib.SMTP('localhost') as smtp:
smtp.send_message(message)
return message
if __name__ == '__main__':
scp = SubmittableChangesProcessor()
scp.get_submittable_changes_from_gerrit()
message = scp.make_email()
print(message)
sys.exit(0)
|
[
"matthew.webber@diamond.ac.uk"
] |
matthew.webber@diamond.ac.uk
|
0f8e80f05d97ef7fe563582b689f04d41f18e890
|
a2387d301fb7da453ecc84765debdcf42b1696d0
|
/ansible/modules/storage/netapp/vasa_storage_systems_remove.py
|
4012f037474ead09c8d8e4cd65e8537e7dd540d3
|
[
"Apache-2.0"
] |
permissive
|
sapcc/vasa
|
9730949ba7b6a830e1561f46882cebc1b428806f
|
acb6fd7e1e211f6040f94a535699e3af46e1e2b1
|
refs/heads/master
| 2022-07-17T09:26:26.829656
| 2019-06-12T09:34:15
| 2019-06-12T09:34:15
| 132,451,367
| 0
| 0
|
Apache-2.0
| 2022-04-22T12:29:49
| 2018-05-07T11:32:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author Hannes Ebelt <hannes.ebelt@sap.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
from ansible.module_utils.basic import AnsibleModule
from pyvasa.storage_systems import StorageSystems
from pyvasa.user_authentication import UserAuthentication
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
module: vasa_storage_systems_remove
short_description: storage systems of netapp vasa unified appliance
author: Hannes Ebelt (hannes.ebelt@sap.com)
description:
- remove storage system from netapp unified vasa appliance
options:
host:
description:
- The ip or name of the vasa unified appliance to manage.
required: true
port:
description:
- The port of the vasa unified appliance to manage.
required: false
default: '8143'
vc_user:
description:
- vcenter username
required: true
vc_password:
description:
- vcenter user password
required: true
cluster_id:
description:
- id of the storage cluster
required: true
'''
EXAMPLES = '''
- name: "remove storage system from netapp unified vasa appliance"
local_action:
module: vasa_storage_systems_remove
host: "{{ inventory_hostname }}"
port: "{{ appliance_port }}"
vc_user: "{{ vcenter_username }}"
vc_password: "{{ vcenter_password }}"
cluster_id: "{{ cluster_id }}"
'''
RETURN = '''
{
"responseMessage": "string",
"return_code": "int"
}
'''
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True, type='str'),
vc_user=dict(required=True, type='str'),
vc_password=dict(required=True, type='str', no_log='true'),
port=dict(required=False, default='8143'),
cluster_id=dict(required=True, type='str')
),
supports_check_mode=True
)
host = module.params['host']
port = module.params['port']
vc_user = module.params['vc_user']
vc_password = module.params['vc_password']
cluster_id = module.params['cluster_id']
result = dict(changed=False)
connect = UserAuthentication(
port=port,
url=host,
vcenter_user=vc_user,
vcenter_password=vc_password
)
token = connect.login()
token_id = token.get('vmwareApiSessionId')
if not token_id:
module.fail_json(msg="No Token!")
vp = StorageSystems(
port=port,
url=host,
token=token_id
)
res = vp.delete_storage_system(
controller_id=cluster_id
)
try:
if res['status_code'] == 200:
result.update(result=res)
result.update(changed=True)
else:
result.update(result=res)
result.update(changed=False)
result.update(failed=True)
except BaseException as e:
module.fail_json(message=e.message)
module.exit_json(**result)
main()
|
[
"hannes.ebelt@sap.com"
] |
hannes.ebelt@sap.com
|
f7d5c61567a7f79b5b7b6781b04b1cfb36979cca
|
240f66f2d212532f778bf1d5ab00f78c9bce2d8e
|
/trumpplan/blog/apps.py
|
17f8fbd6fb3e740e2556d39bcf09573ab4fad961
|
[] |
no_license
|
ddfunt/trumpplan
|
317edad24d73edcfd1cdfc7ac73a8bb49d594a63
|
623ee2961095a6e3a3c5fd060102de40422f8c04
|
refs/heads/master
| 2021-06-07T20:22:37.535628
| 2016-12-08T02:23:26
| 2016-12-08T02:23:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class BlogConfig(AppConfig):
name = 'trumpplan.blog'
|
[
"matt.muckle@brightspec.com"
] |
matt.muckle@brightspec.com
|
62626f1793367e14eb35b3f0c5c118c92e7da3f2
|
0bd4ea611bcfb2ce5ab08889b97dd13dd5cbb0a5
|
/mysite/mysite/settings.py
|
ca603c9c966197e7f9d2c3b46f0da241055f72ed
|
[] |
no_license
|
sparsh0204/djangoblog
|
27ff316f370480b4ef15fb98bc39c32006c9c8e0
|
12f022d1b362c335e7c2889d9a59c1acb1184cbc
|
refs/heads/master
| 2021-01-25T11:57:41.575673
| 2017-06-23T17:48:23
| 2017-06-23T17:48:23
| 93,953,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,096
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^h(nf+(u@wmehr4a7bh*v*t4+6(vze#ttvf(dh=k4!^*m^g^gt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"sparshcode@gmail.com"
] |
sparshcode@gmail.com
|
2793957c54e342208e9431576ae1c201b305deef
|
6020701c2929eb0d5ace7aefd1aaa88853403830
|
/3-Looping/for1.py
|
dab13e9b60fafea1e48887e8def1fa9db9b4bbda
|
[] |
no_license
|
AnaTrzeciak/Curso_Python
|
e24d06f2d23de6943e14515e07f12f3eb380ad99
|
78c8528fed34f7eb3f5d735ebdecd07836e39631
|
refs/heads/master
| 2020-03-29T06:32:49.862308
| 2019-11-21T16:30:56
| 2019-11-21T16:30:56
| 149,629,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
#ecoding:utf-8
#Crie um script que imprime todos os dias da semana na tela
semana= ['segunda', 'terça', 'quarta', 'quinta', 'sexta', 'sabado', 'domingo']
for dia in semana:
print(dia)
|
[
"amgtrzeciak@gmail.com"
] |
amgtrzeciak@gmail.com
|
0004d354c35de36b3b80793b9bad3091b1cef4e3
|
8ffe9be9c89881fe85a74d80ae7154e7bbf9045e
|
/api/views.py
|
47e5c29f40a06ebaac5552b3266d7408b0e837b9
|
[] |
no_license
|
fabriccio27/djangorestframework-react
|
4c48ea117d4e99b1488d3a9d3565b0d72af1b7ae
|
eb541a157dfa7c93a842735cf22a6610f75f9111
|
refs/heads/main
| 2023-06-19T08:44:25.612174
| 2021-07-17T13:29:42
| 2021-07-17T13:29:42
| 381,804,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,602
|
py
|
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import TaskSerializer
from .models import Task
# Create your views here.
""" JsonResponse lo que hace es que el browser interprete lo que mando como un objeto JSON """
@api_view(["GET"])
def apiOverview(request):
api_urls = {
"List":"/task-list/",
"Detail View":"/task-detail/<str:pk>/",
"Create":"/task-create/",
"Update":"/task-update/<str:pk>/",
"Delete":"/task-delete/<str:pk>/"
}
# REST framework also introduces a Response object, which is a type of TemplateResponse that takes unrendered content
# and uses content negotiation to determine the correct content type to return to the client.
return Response(api_urls)
@api_view(["GET"])
def taskList(request):
tasks = Task.objects.order_by('-id')
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data)
# el serializer lo que hace es tomar la abstraccion de models y llevarlo a algo
# que se puede intercambiar facilmente, como un json. Tambien aplica validacion a la request.data en base a model
@api_view(["GET"])
def taskDetail(request, pk):
tasks = Task.objects.get(id=pk)
""" aca podria haber usado get_object_or_404 """
serializer = TaskSerializer(tasks, many=False)
return Response(serializer.data)
@api_view(["POST"])
def taskCreate(request):
# al usar el decorator de api_view para post, me genera una vista con un form para que le ponga content
# request.data es un json, es un feature de usar este framework
# restframework me da un nuevo objeto request, que simplifica el uso al acceso del dict de info de request
# antes tenia que ver si la request era de POST y hacer request.POST
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(["POST"])
def taskUpdate(request, pk):
#al pasarle el kwarg instance al serializer, le estoy diciendo que no cree un nuevo objeto,
# si no que con lo que vino de data, sobreescriba task/instance
task = Task.objects.get(id=pk)
serializer = TaskSerializer(instance=task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(["DELETE"])
def taskDelete(request, pk):
task = Task.objects.get(id=pk)
task.delete()
return Response(f"Item {pk} was successfully deleted!")
|
[
"fabricio_inc27@hotmail.com"
] |
fabricio_inc27@hotmail.com
|
56e7042919359660f1204a7150332bfc1ea4c9ad
|
66320eda70f67cda9aa351a6f6bf5817a04b9e57
|
/uuv_docs/package.py
|
9543c3d06ded03a19660c7f68e5145d76a10ebaa
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
FangYuanFYO/uuv_simulator_docs
|
1789dd2d9266fbd0d253f22d767d70a7bd730353
|
2c4e2ca91c1b9381387659e5d742b2824dbd0215
|
refs/heads/master
| 2020-06-05T02:20:38.588402
| 2019-05-30T12:05:25
| 2019-05-30T12:05:25
| 192,280,057
| 1
| 0
|
Apache-2.0
| 2019-06-17T05:24:45
| 2019-06-17T05:24:44
| null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
# Copyright (c) 2016-2019 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xmltodict
import rospkg
class PackageConfigLoader:
def __init__(self, catkin_pkg, repository_name, git_root_url):
rp = rospkg.RosPack()
self._catkin_pkg = catkin_pkg
self._catkin_pkg_path = rp.get_path(catkin_pkg)
self._repository = repository_name
self._git_root_url = git_root_url
def update_data(self):
output = dict()
output.update(self._get_package_version())
return output
def _get_package_version(self):
with open(os.path.join(self._catkin_pkg_path, 'package.xml'), 'r') as package_file:
pkg_config = xmltodict.parse(package_file.read())
version = pkg_config['package']['version']
description = pkg_config['package']['description']
return dict(version=version, description=description)
def get_markdown(self):
output = self.update_data()
output_str = '\n\n'.format(output['version'])
output_str += '> Link to the `{}` repository [here]({})\n\n'.format(
self._repository, self._git_root_url + '/' + self._repository)
output_str += '# Description\n\n'
output_str += output['description'] + '\n\n'
return output_str
|
[
"musa.marcusso@de.bosch.com"
] |
musa.marcusso@de.bosch.com
|
e1a24bee538f55419b12446f7f37bc4f25bc8e38
|
03c8d75d11dd34a253d265ce5b44bf7984311bab
|
/root2yoda
|
ddf417c19d865e0e7c684d83a4ebafd3e9738188
|
[] |
no_license
|
raggleton/QGAnalysisRIVET
|
e8a57fbfa1380e1c67365b0d5a944119f715813b
|
0703bdf81bf27f5fc91d8eedb6e44651d978749a
|
refs/heads/master
| 2021-06-08T19:29:53.683282
| 2021-04-06T07:22:56
| 2021-04-06T07:22:56
| 142,179,672
| 0
| 1
| null | 2020-11-03T17:19:58
| 2018-07-24T15:40:48
|
Gnuplot
|
UTF-8
|
Python
| false
| false
| 1,656
|
#! /usr/bin/env python
"""\
%prog rootfile [yodafile]
Convert a ROOT data file to the YODA data format.
"""
import yoda, os, sys, optparse
from yoda.script_helpers import parse_x2y_args, filter_aos
parser = optparse.OptionParser(usage=__doc__)
parser.add_option("-m", "--match", dest="MATCH", metavar="PATT", default=None,
help="Only write out histograms whose path matches this regex")
parser.add_option("-M", "--unmatch", dest="UNMATCH", metavar="PATT", default=None,
help="Exclude histograms whose path matches this regex")
opts, args = parser.parse_args()
in_out = parse_x2y_args(args, ".root", ".yoda")
if not in_out:
sys.stderr.write("You must specify the ROOT and YODA file names\n")
sys.exit(1)
import ROOT
for i, o in in_out:
print "opening", i
rf = ROOT.TFile(i)
rootobjects_raw = list(yoda.root.getall(rf))
rootobjects = [(path, ro) for (path, ro) in rootobjects_raw if not isinstance(ro, ROOT.TH1F)]
th1f = [(path, ro) for (path, ro) in rootobjects_raw if isinstance(ro, ROOT.TH1F)]
print rootobjects
print th1f
# Conversion of TH1F into TH1D
for path, ro in th1f:
temp = ROOT.TH1D()
ro.Copy(temp)
rootobjects.append((path, temp))
def to_yoda(path, ro):
print path, ro
ao = yoda.root.to_yoda(ro)
ao.path = path
return ao
analysisobjects = [to_yoda(path, ro) for (path, ro) in rootobjects]
rf.Close()
analysisobjects = [ao for ao in analysisobjects if ao is not None]
filter_aos(analysisobjects, opts.MATCH, opts.UNMATCH)
yoda.writeYODA(analysisobjects, o)
|
[
"robin.aggleton@cern.ch"
] |
robin.aggleton@cern.ch
|
|
49eb497ffcfb331fc89dc4d4e403951ac1ca37c3
|
e0d38dc7dee7efab7bf80c56fccea6475ba02bc0
|
/condicionais.py
|
81115284bbf1e042b10f55519fa60456c5ce3d4b
|
[] |
no_license
|
franciscocesar/python-lp
|
9696856e4a7d73d77098766009e4aca1bf5fbb0a
|
db3cd770559bff923dbc3589f84a6c822e014ecf
|
refs/heads/main
| 2023-08-25T19:58:07.580532
| 2021-11-03T07:34:20
| 2021-11-03T07:34:20
| 424,115,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
"""
Condições IF, ELIF e ELSE
"""
if False:
print('Verdadeiro')
elif True:
print('Agora é verdadeiro')
nome = input('Qual o seu nome? ')
print(f'Seu nome é {nome}')
else:
print('Não é verdadeiro')
print('Ola')
|
[
"francisco.mateus@souunit.com.br"
] |
francisco.mateus@souunit.com.br
|
241ad1f6e201d8905fd6858c9f73078ea0a50482
|
7d6a3f76525a1bddb2f9d74dd752edf65ccb8f2d
|
/M4/lgb_un_p.py
|
2352495db6a13e1263df53b9e23783923e611732
|
[] |
no_license
|
ImChenxii/DC-SWUFE
|
d1ba170fbb7ea03eca1cc1d7c24c8c5bcb1c50a4
|
34fa79a2a920b0be8bb11b7ddcc42cbd16379a89
|
refs/heads/master
| 2020-04-02T13:11:19.261277
| 2018-10-24T09:22:37
| 2018-10-24T09:22:37
| 154,471,017
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,106
|
py
|
import pandas as pd
import numpy as np
import gc
import datetime
import os
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from imblearn.combine import SMOTETomek
from sklearn.metrics import auc, roc_auc_score
import xgboost as xgb
import lightgbm as lgb
import warnings
warnings.filterwarnings('ignore')
# 读取数据
train_1 = pd.read_csv('../raw_data/train_xy.csv')
test_1 = pd.read_csv('../raw_data/test_all.csv')
print("Load data over")
# 去除group列,取出id列
train1_id = train_1.pop('cust_id')
test1_id = test_1.pop('cust_id')
test_1['y'] = -1
data_1 = pd.concat([train_1, test_1], axis=0, ignore_index=True)
data_1.drop('cust_group', axis=1, inplace=True)
data_1.replace({-99 : np.nan}, inplace=True)
# 分别构建数值特征和分类特征数组
num_feature = ['x_' + str(i) for i in range(1, 96)]
cat_feature = ['x_' + str(i) for i in range(96, 158)]
# 构建20个重要特征数组
top_20_features = ['x_80', 'x_2', 'x_95', 'x_52', 'x_81', 'x_93', 'x_40', 'x_1', 'x_157', 'x_58',
'x_72', 'x_63', 'x_43', 'x_97', 'x_19', 'x_45', 'x_29', 'x_62', 'x_42', 'x_64']
top_20_cat = ['x_97', 'x_157']
top_20_num = []
for i in top_20_features:
if i not in top_20_cat:
top_20_num.append(i)
# 去除常量features
unique_df = data_1.nunique().reset_index()
unique_df.columns = ["col_name", "unique_count"]
constant_df = unique_df[unique_df["unique_count"] == 1]
constant_feature = constant_df.col_name.tolist()
data_1.drop(constant_feature, axis=1, inplace=True)
for item in constant_feature:
if item in num_feature:
num_feature.remove(item)
if item in cat_feature:
cat_feature.remove(item)
print("drop ", len(constant_feature), " constant feature(s)")
# 去除缺失值占比大于80%的feature
mis_80_list = []
for col in data_1.columns:
mis_val_percent = 100 * data_1[col].isnull().sum() / len(data_1)
if (mis_val_percent >= 80.0):
mis_80_list.append(col)
data_1.drop(mis_80_list, axis=1, inplace=True)
for item in mis_80_list:
if item in num_feature:
num_feature.remove(item)
if item in cat_feature:
cat_feature.remove(item)
print("drop ", len(mis_80_list), "missing feature(s)")
# 缺失值个数当做一个特征来统计用户信息完整度
data_1['null_num'] = data_1.isna().sum(axis=1)
# 去除一个缺失值train比test多较多的离群值
data_1.drop(data_1[data_1['null_num'] > 90].index, inplace=True)
# 对于特征重要性缺失占比一定的训练集数据去除
threshold_20 = 10
data_1['null_20_num'] = data_1[top_20_features].isna().sum(axis=1)
drop_20_index = list(data_1[data_1['null_20_num'] > threshold_20].index)
drop_20_train = []
for i in drop_20_index:
if int(i) < 15000:
drop_20_train.append(i)
data_1.drop(drop_20_train, inplace=True)
print("Because top20 feature importance drop %d rows" %len(drop_20_train))
# 对连续值特征中标准差小于0.1的列去除
std_df = data_1.std().reset_index()
std_df.columns = ["col_name", "std"]
low_std = std_df[std_df["std"] < 0.1]
low_std_list = low_std.col_name.tolist()
low_std_num_list = [i for i in low_std_list if i in num_feature]
data_1.drop(low_std_num_list, axis=1, inplace=True)
for item in low_std_num_list:
if item in num_feature:
num_feature.remove(item)
if item in cat_feature:
cat_feature.remove(item)
print("Because low standard deviation drop %d continuous feature" %len(low_std_num_list))
# 缺失值先填充为-99
data_1.fillna(-99, inplace=True)
gc.collect()
# 对类别特征进行One-hot和LabelEncoder
unique_df = data_1.nunique().reset_index()
unique_df.columns = ["col_name", "unique_count"]
category2_df = unique_df[unique_df["unique_count"] == 2]
category2_feature = category2_df.col_name.tolist()
if 'y' in category2_feature:
category2_feature.remove('y')
le = LabelEncoder()
for col in category2_feature:
le.fit(data_1[col])
data_1[col] = le.transform(data_1[col])
for item in category2_feature:
if item in cat_feature:
cat_feature.remove(item)
def one_hot_encode(data, column_name):
dummies = pd.get_dummies(data[column_name], prefix=column_name)
combined = data.join(dummies)
combined.drop(column_name, axis=1, inplace=True)
return combined
for col_name in cat_feature:
data_1 = one_hot_encode(data_1, col_name)
print(col_name, " one-hot is over.")
train = data_1[(data_1['y'] != -1) & (data_1['y'] != -2)]
test = data_1[data_1['y'] == -1]
label = train.pop('y')
test.drop('y', axis=1, inplace=True)
del data_1
gc.collect()
print("train shape is ", train.shape)
print("test shape is", test.shape)
train.replace({-99 : np.nan}, inplace=True)
test.replace({-99 : np.nan}, inplace=True)
X = train.values
y = label.values
test = test.values
del train
del label
gc.collect()
RANDOM_SEED = 1225
N = 5
skf = StratifiedKFold(n_splits=N, shuffle=False, random_state=RANDOM_SEED)
cv_result = []
pre_result = []
for k, (train_index, test_index) in enumerate(skf.split(X, y)):
print('*' * 20 + 'Start Round ' + str(k + 1) + ' Split' + '*' * 20)
X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'num_leaves': 25,
'max_depth': 7,
'learning_rate': 0.05,
'feature_fraction': 0.52,
'bagging_fraction': 0.52,
'min_child_weight': 0,
'min_child_samples': 55,
'verbose': 50,
'reg_alpha': 0,
'reg_lambda': 0.001,
'seed': 0,
# 'is_unbalance': True
}
print('*' * 20 + 'Start Round' + str(k + 1) + ' Training'+ '*' * 20)
# train
model_lgb = lgb.train(
params,
lgb_train,
num_boost_round = 5000,
valid_sets = lgb_eval,
early_stopping_rounds = 50,
verbose_eval = 50
)
# predict
print('*' * 20 + 'start predict'+ '*' *20)
y_pred = model_lgb.predict(X_test, num_iteration=model_lgb.best_iteration)
cv_result.append(roc_auc_score(y_test, y_pred))
print('Round ', str(k + 1),'fold AUC score is ', cv_result[k])
pre_result.append(model_lgb.predict(test, num_iteration=model_lgb.best_iteration))
print('Finished Round ' + str(k + 1) + '!')
five_pre = []
print('offline: cv_score: ', np.mean(cv_result))
for k, i in enumerate(pre_result):
if k == 0:
five_pre = np.array(i).reshape(-1,1)
else:
five_pre = np.hstack((five_pre, np.array(i).reshape(-1,1)))
result = []
for i in five_pre:
result.append(np.mean(i))
sub = pd.DataFrame()
sub['cust_id'] = list(test1_id.values)
sub['pred_prob'] = list(result)
sub.to_csv('./lgb_un_p.csv', index=False, encoding='utf-8')
|
[
"im.chenxi@hotmail.com"
] |
im.chenxi@hotmail.com
|
c8e9d0c8bd3bb3f4b56908f31da123b5dd79705c
|
71e0cb631066f840df17fc6df0700a1d76cb1121
|
/django_imgur/storage.py
|
1ced32e91322f21a7733119a14b63fc6762394eb
|
[] |
no_license
|
preetamherald/django-imgur
|
1a33ad7f8c56c2c79cd44f42e88e3e946c874913
|
2741958a9d31593331b0ec22c6c9222b15592ad9
|
refs/heads/master
| 2020-04-05T16:21:07.462882
| 2018-11-10T18:24:03
| 2018-11-10T18:24:03
| 157,008,266
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,859
|
py
|
import os.path
import base64
try:
from io import StringIO
except ImportError:
from io import StringIO
from django.core.cache import cache
from django.core.files import File
from django.core.files.storage import Storage
from django.utils.encoding import filepath_to_uri
import requests
from imgurpython import ImgurClient
from imgurpython.helpers.error import ImgurClientError
from django.utils.deconstruct import deconstructible
from .settings import (CONSUMER_ID,
CONSUMER_SECRET,
ACCESS_TOKEN,
ACCESS_TOKEN_REFRESH,
USERNAME)
import logging
logger = logging.getLogger(__name__)
@deconstructible
class ImgurStorage(Storage):
"""
A storage class providing access to resources in a Dropbox Public folder.
"""
def __init__(self, location='/'):
self.client = ImgurClient(
CONSUMER_ID,
CONSUMER_SECRET,
ACCESS_TOKEN,
ACCESS_TOKEN_REFRESH)
logger.info("Logged in Imgur storage")
self.account_info = self.client.get_account(USERNAME)
self.albums = self.client.get_account_albums(USERNAME)
self.location = location
self.base_url = 'https://api.imgur.com/3/account/{url}/'.format(url=self.account_info.url)
def _get_abs_path(self, name):
return os.path.join(self.location, name)
def _open(self, name, mode='rb'):
remote_file = self.client.get_image(name, self, mode=mode)
return remote_file
def _save(self, name, content):
name = self._get_abs_path(name)
directory = os.path.dirname(name)
logger.info([a.title for a in self.albums])
logger.info(name)
logger.info(directory)
if not self.exists(directory) and directory:
album = self.client.create_album({"title": directory})
self.albums = self.client.get_account_albums(USERNAME)
album = [a for a in self.albums if a.title == directory][0]
#if not response['is_dir']:
# raise IOError("%s exists and is not a directory." % directory)
response = self._client_upload_from_fd(content, {"album": album.id, "name": name, "title": name}, False)
return response["name"]
def _client_upload_from_fd(self, fd, config=None, anon=True):
""" use a file descriptor to perform a make_request """
if not config:
config = dict()
contents = fd.read()
b64 = base64.b64encode(contents)
data = {
'image': b64,
'type': 'base64',
}
data.update({meta: config[meta] for meta in set(self.client.allowed_image_fields).intersection(list(config.keys()))})
return self.client.make_request('POST', 'upload', data, anon)
def delete(self, name):
name = self._get_abs_path(name)
self.client.delete_image(name)
def exists(self, name):
name = self._get_abs_path(name)
if len([a for a in self.albums if a.title == name]) > 0:
return True
try:
album = [a for a in self.albums if a.title == os.path.dirname(name)][0]
images = self.client.get_album_images(album.id)
metadata = self.client.get_image(name)
if len([im for im in images if im.name == name]) > 0:
logger.info(dir(metadata))
return True
except ImgurClientError as e:
if e.status_code == 404: # not found
return False
raise e
except IndexError as e:
return False
else:
return True
return False
def listdir(self, path):
path = self._get_abs_path(path)
response = self.client.get_image(path)
directories = []
files = []
for entry in response.get('contents', []):
if entry['is_dir']:
directories.append(os.path.basename(entry['path']))
else:
files.append(os.path.basename(entry['path']))
return directories, files
def size(self, path):
cache_key = 'django-imgur-size:%s' % filepath_to_uri(path)
size = cache.get(cache_key)
if not size:
directory = os.path.dirname(path)
name = os.path.basename(path)
album = [a for a in self.albums if a.title == directory][0]
images = self.client.get_album_images(album.id)
image = [im for im in images if im.name == path][0]
size = self.client.get_image(image.id).size
cache.set(cache_key, size)
return size
def url(self, path):
cache_key = 'django-imgur-url:%s' % filepath_to_uri(path)
url = cache.get(cache_key)
if not url:
directory = os.path.dirname(path)
name = os.path.basename(path)
album = [a for a in self.albums if a.title == directory][0]
images = self.client.get_album_images(album.id)
image = [im for im in images if im.name == path][0]
url = self.client.get_image(image.id).link
cache.set(cache_key, url)
return url
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
#name = self._get_abs_path(name)
#dir_name, file_name = os.path.split(name)
#file_root, file_ext = os.path.splitext(file_name)
## If the filename already exists, add an underscore and a number (before
## the file extension, if one exists) to the filename until the generated
## filename doesn't exist.
#count = itertools.count(1)
#while self.exists(name):
# # file_ext includes the dot.
# name = os.path.join(dir_name, "%s_%s%s" % (file_root, count.next(), file_ext))
return name
@deconstructible
class ImgurFile(File):
def __init__(self, name, storage, mode):
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = StringIO()
self.start_range = 0
self._name = name
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
return requests.get(self._storage.url(self._name)).content
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = StringIO(content)
self._is_dirty = True
def close(self):
#if self._is_dirty:
# self._storage.client.put_file(self._name, self.file.getvalue())
self.file.close()
|
[
"preetamherald@gmail.com"
] |
preetamherald@gmail.com
|
ded2b610f5f4fd5f3d8166d7da825b28689454ef
|
9b748c24629089268dc8c6f84e13ca4af77ebc58
|
/fanmelegacy/social/migrations/0020_auto__add_field_notificacion_descripcion__add_field_notificacion_nombr.py
|
a1ac2ce63f63b0e15f31a15fa469ed6b9dcb89af
|
[] |
no_license
|
emilioramirez/fanme
|
5f01215c208d5c612357ffd8036621d959e73829
|
60ede12c0af8082d0f778429e5ea30b73c60daf8
|
refs/heads/master
| 2021-01-16T19:14:19.301946
| 2014-02-18T19:17:48
| 2014-02-18T19:17:48
| 32,593,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,079
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Notificacion.descripcion'
db.add_column('social_notificacion', 'descripcion', self.gf('django.db.models.fields.TextField')(default='', max_length=300), keep_default=False)
# Adding field 'Notificacion.nombre'
db.add_column('social_notificacion', 'nombre', self.gf('django.db.models.fields.CharField')(default='', max_length=30), keep_default=False)
def backwards(self, orm):
# Deleting field 'Notificacion.descripcion'
db.delete_column('social_notificacion', 'descripcion')
# Deleting field 'Notificacion.nombre'
db.delete_column('social_notificacion', 'nombre')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'social.evento': {
'Meta': {'object_name': 'Evento'},
'creador': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eventos_creados'", 'to': "orm['auth.User']"}),
'descripcion': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'fecha_creacion': ('django.db.models.fields.DateField', [], {}),
'fecha_fin': ('django.db.models.fields.DateField', [], {}),
'fecha_inicio': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitados': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'eventos_invitado'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'localizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['support.Localizacion']"})
},
'social.mensaje': {
'Meta': {'object_name': 'Mensaje'},
'fecha': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mensaje': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'user_from': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mensajes_enviados'", 'to': "orm['auth.User']"}),
'user_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mensajes_recibidos'", 'to': "orm['auth.User']"})
},
'social.notificacion': {
'Meta': {'object_name': 'Notificacion'},
'descripcion': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'empresa': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notificaciones_enviadas'", 'to': "orm['auth.User']"}),
'fecha_creacion': ('django.db.models.fields.DateTimeField', [], {}),
'fecha_expiracion': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'usuarios_to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'notificaciones_recibidas'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'support.localidad': {
'Meta': {'object_name': 'Localidad'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'provincia': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['support.Provincia']"})
},
'support.localizacion': {
'Meta': {'object_name': 'Localizacion'},
'barrio': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'calle': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'localidad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['support.Localidad']"}),
'numero': ('django.db.models.fields.IntegerField', [], {}),
'numero_local': ('django.db.models.fields.IntegerField', [], {})
},
'support.pais': {
'Meta': {'object_name': 'Pais'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'support.provincia': {
'Meta': {'object_name': 'Provincia'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['support.Pais']"})
}
}
complete_apps = ['social']
|
[
"munozmatiasn@gmail.com"
] |
munozmatiasn@gmail.com
|
4de0f9cafa6f1fa9712f8f63f4d5c125cf87f748
|
512d5801157204f9e57b8e2b27f4df9a405ec771
|
/0x04-python-more_data_structures/9-multiply_by_2.py
|
8cef95fa4b7cb1d4c6c4ac889afb97d291f51e52
|
[] |
no_license
|
CSant04y/holbertonschool-higher_level_programming
|
f27c223107e342b5da2cd2e1b23c044de0302424
|
016c416c8060f8caab1664adeaa3ee23de670a75
|
refs/heads/master
| 2023-04-23T15:35:52.192397
| 2021-05-12T20:56:55
| 2021-05-12T20:56:55
| 319,377,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
#!/usr/bin/python3
def multiply_by_2(a_dictionary):
new_dic = a_dictionary.copy()
for i in new_dic:
new_dic[i] *= 2
return new_dic
|
[
"esquivelcarlos27893@gmail.com"
] |
esquivelcarlos27893@gmail.com
|
0ea698c8cea14b0b8495c8fd1b0de1862d550635
|
2dc7d42e73ce37716156fc71651d0cddbe6ad890
|
/script/test_loginihrm.py
|
2ad63c5bcbc1b44c740ce47dc5cdcd028932d692
|
[] |
no_license
|
nihao545720/apiTestIHRM
|
31ce7b18731235f76a8aab968fda4f9b81ac5689
|
02a32c50a98b72ccd35128581fc8ca793a097377
|
refs/heads/master
| 2020-09-27T08:15:23.348759
| 2019-12-07T07:29:24
| 2019-12-07T07:29:24
| 226,472,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,720
|
py
|
# 导包
import logging
import unittest
import app
from api.loginapi import LoginApi
from utils import assert_common
class TestLoginIhrm(unittest.TestCase):
def setUp(self):
pass
@classmethod
def setUpClass(cls):
cls.login_api = LoginApi()
pass
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
pass
# 登录成功的测试用例
def test01_login_success(self):
# 发送登录请求
response = self.login_api.login("13800000002", "123456")
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, True, 10000, "操作成功")
#获取json的数据
jsonData = response.json()
#拼接token组成全局变量
token = "Bearer " + jsonData.get("data")
#把token值保存到全局变量aap.py中
#首先在app.py中创建HEADERS 变量才能保存
app.HEADERS = {"Content-Type":"application/json", "Authorization":token}
logging.info("保存的登录token和content-type: {}".format(app.HEADERS))
# 账户不存在的测试用例
def test02_mobile_is_error(self):
# 发送登录请求
response = self.login_api.login("13900000002", "123456")
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
# 密码错误的测试用例
def test03_password_is_error(self):
# 发送登录请求
response = self.login_api.login("13800000002", "error")
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
# 无参的测试用例
def test04_none_params(self):
# 发送登录请求
response = self.login_api.login_none_params()
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, False, 99999, "抱歉,系统繁忙,请稍后重试")
#用户名为空测试用例
def test05_mobile_is_null(self):
# 发送登录请求
response = self.login_api.login("", "error")
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
#密码为空测试用例
def test06_password_is_null(self):
# 发送登录请求
response = self.login_api.login("13800000002", "")
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
#多参的测试
def test07_extra_params(self):
# 发送登录请求
response = self.login_api.login_extra_parmas()
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, True, 10000, "操作成功")
#少参的测试用例
def test08_less_parmas(self):
# 发送登录请求
response = self.login_api.login_less_parmas()
# 打印日志
logging.info("登录接口返回的数据为:{}".format(response.json()))
# 断言
assert_common(self, response, 200, False, 99999, "抱歉,系统繁忙,请稍后重试")
|
[
"stan@123.com"
] |
stan@123.com
|
cc66e6b71b2aeb27a3f682be7c8f5e6662f88ec1
|
370c6f73e69b361d2921c297fbe3f503ccea292c
|
/seq_gan.py
|
d66f4cbbb9d9d5046746d697efe7010755836934
|
[] |
no_license
|
jackakarafotas/MarketAlgorithms
|
4380dd79ec36f9a4a7caa1afc4cec8d0a634fe8c
|
83ac34fada85fcc34857500684f3eb7c9ec6631e
|
refs/heads/master
| 2020-03-17T19:02:32.905752
| 2018-11-19T23:08:38
| 2018-11-19T23:08:38
| 133,843,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,216
|
py
|
import tensorflow as tf
import numpy as np
import sys
from math import ceil
from generator import Generator
from discriminator import Discriminator
class SeqGan():
def __init__(self,data,start_return,batch_size,mle_epochs,adv_epochs,pt_steps,pt_epochs,at_steps,at_epochs,g_hidden_size,d_hidden_size):
self.data = data
self.n_samples = len(data)
self.seq_length = len(data[0])
self.start_return = start_return
self.batch_size = batch_size
self.mle_epochs = mle_epochs
self.adv_epochs = adv_epochs
self.pt_steps = pt_steps
self.pt_epochs = pt_epochs
self.at_steps = at_steps
self.at_epochs = at_epochs
self.generator = Generator(g_hidden_size,self.seq_length)
self.discriminator = Discriminator(d_hidden_size,self.seq_length,dropout=0.8)
# build graphs
print('\nbuilding graphs...',end='')
print(' mle...',end='')
self._build_mle_graph()
print(' pg...',end='')
self._build_pg_graph()
print(' dis...',end='')
self._build_dis_graph()
# trainers
g_vars = [var for var in tf.trainable_variables() if 'g_' in var.name]
d_vars = [var for var in tf.trainable_variables() if 'd_' in var.name]
self.mle_train = tf.train.AdamOptimizer(1e-2).minimize(self.mle_loss,var_list=g_vars)
self.pg_train = tf.train.AdamOptimizer(1e-2).minimize(self.pg_loss,var_list=g_vars)
self.dis_train = tf.train.AdagradOptimizer(1e-3).minimize(self.dis_loss,var_list=d_vars)
# initialize
print(' initializing variables...',end='')
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
print(' Done.')
## GRAPHS
def _build_mle_graph(self):
self.mle_x = tf.placeholder(tf.float32,[self.batch_size,self.seq_length],name='mle_x')
self.mle_y = tf.placeholder(tf.float32,[self.batch_size,self.seq_length],name='mle_y')
self.mle_loss = self.generator.batch_mse_loss(self.mle_x,self.mle_y)
def _build_pg_graph(self):
self.pg_samples = self.generator.sample(self.batch_size*2)
self.pg_inp, self.pg_target = self._prep_generator_batch_tf(self.pg_samples)
self.pg_rewards = self.discriminator.batch_classify(self.pg_target)
self.pg_loss = self.generator.batch_pg_loss(self.pg_inp, self.pg_target, self.pg_rewards)
def _build_dis_graph(self):
self.dis_x = tf.placeholder(tf.float32,[self.batch_size,self.seq_length],name='dis_x')
self.dis_y = tf.placeholder(tf.float32,[self.batch_size],name='dis_y')
self.dis_d_out = self.discriminator.batch_classify(self.dis_x)
self.dis_n_correct = tf.equal(tf.round(self.dis_d_out),self.dis_y)
self.dis_accuracy = tf.reduce_mean(tf.cast(self.dis_n_correct, tf.float32))
self.dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.dis_d_out,labels=self.dis_y))
## TRAINERS
def train_generator_mle(self):
"""
Maximum Likelihood pretraining for generator
"""
print('\nStarting Generator MLE Training...')
for epoch in range(self.mle_epochs):
print('epoch {} :'.format(epoch+1),end='')
sys.stdout.flush()
total_loss = 0
n_batches = 0
for i in range(0,self.n_samples-self.batch_size,self.batch_size):
n_batches += 1
batches_data = self.data[i:i+self.batch_size]
inp, target = self._prep_generator_batch_py(batches_data)
feed_dict = {
self.mle_x : inp,
self.mle_y : target,
}
_, batch_loss = self.sess.run([self.mle_train,self.mle_loss],feed_dict)
total_loss += batch_loss
if (i / self.batch_size) % ceil(int(self.n_samples/self.batch_size) / 20) == 0:
print('.',end='')
sys.stdout.flush()
## PRINT SAMPLES
total_loss = total_loss / n_batches / self.seq_length
print('Average MSE Loss per sample: {}'.format(total_loss))
print('Done!')
def pretrain_discriminator(self):
print('\nStarting Discriminator Pre-Training...')
self.train_discriminator(self.pt_steps,self.pt_epochs)
print('Done!')
def adversarial_training(self):
for epoch in range(self.adv_epochs):
print('\n--------\nEPOCH {}\n--------'.format(epoch+1))
# Generator
print('\nAdversarial Training Generator : ', end='')
sys.stdout.flush()
self.train_generator_PG(1)
# Discriminator
print('\nAdversarial Training Discriminator : ')
self.train_discriminator(self.at_steps,self.at_epochs)
def train_generator_PG(self, num_batches):
"""
The generator is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
for batch in range(num_batches):
self.sess.run(self.pg_train)
def train_discriminator(self,d_steps,epochs):
# Train
for d_step in range(d_steps):
print('Retrieving Samples...')
samples = self.sample(self.batch_size*int(self.n_samples/self.batch_size))
dis_inp, dis_target = self._prep_discriminator_data(self.data, samples)
for epoch in range(epochs):
print('d-step {0} epoch {1} : '.format(d_step + 1, epoch + 1), end='')
sys.stdout.flush()
total_loss = 0
total_acc = 0
n_batches = 0
for i in range(0,2*(self.n_samples-self.batch_size),self.batch_size):
n_batches += 1
inp, target = dis_inp[i:i + self.batch_size], dis_target[i:i + self.batch_size]
feed_dict = {
self.dis_x : inp,
self.dis_y : target,
}
_, batch_loss, acc = self.sess.run([self.dis_train,self.dis_loss,self.dis_accuracy],feed_dict=feed_dict)
total_loss += batch_loss
total_acc += acc
if (i / self.batch_size) % ceil(int(2*self.n_samples/self.batch_size) / 10) == 0:
print('.', end='')
sys.stdout.flush()
total_loss /= n_batches
total_acc /= n_batches
print(' average_loss = {0}, train_acc = {1}'.format(total_loss, total_acc))
def sample(self,num_samples):
return self.sess.run(self.generator.sample(num_samples))
## HELPERS
def _prep_generator_batch_py(self,samples):
inp = np.array(samples)
target = np.array(samples)
inp[:, 0] = self.start_return
inp[:, 1:] = target[:, :-1]
return inp, target
def _prep_generator_batch_tf(self,samples):
n, seq_length = self._shape(samples)
target = samples
init_returns = tf.constant(self.start_return,shape=[n],dtype=tf.float32)
init_returns = tf.reshape(init_returns,shape=[n,1])
inp = tf.concat([init_returns, target[:, :seq_length - 1]],1)
return inp, target
def _batchwise_sample(self,num_samples):
samples = []
for i in range(int(num_samples/self.batch_size)):
sample = self.sample(self.batch_size) # : batch_size * seq_length
samples += sample
return samples # ~num_samples * seq_length
def _prep_discriminator_data(self,pos_samples,neg_samples):
neg_size = len(neg_samples)
pos_samples = pos_samples[:neg_size]
pos_size = len(pos_samples)
pos_samples = np.array(pos_samples)
neg_samples = np.array(neg_samples)
inp = np.concatenate((pos_samples,neg_samples))
target_pos = [1 for i in range(pos_size)]
target_neg = [0 for i in range(neg_size)]
target = np.array(target_pos+target_neg)
shuffle_indices = np.random.permutation(np.arange(len(target)))
inp = inp[shuffle_indices]
target = target[shuffle_indices]
return inp, target
def _shape(self,tensor):
size_0, size_1 = tensor.get_shape()
arr = np.array([size_0,size_1], dtype=np.int32)
return arr[0], arr[1]
|
[
"jackakarafotas@gmail.com"
] |
jackakarafotas@gmail.com
|
9d931265bebb430bda611c707c72bb659e21d359
|
fc820d29cdb776de29e2dcd10f47dbd84ac66f42
|
/tests/utils/HttpUtilsTest.py
|
59889ea66b97c4931bda1ffcac7334df0ef6f208
|
[] |
no_license
|
tw-wh-devops-community/homework-tracker-pyapi
|
c027153e8f3e2e62ccdafffccd66b4c6befaec28
|
54132f93d667d8fdc168e909219f0695c9c349b3
|
refs/heads/master
| 2020-03-22T05:08:39.443293
| 2018-07-06T06:28:38
| 2018-07-06T06:28:38
| 139,545,114
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
import unittest
from app.constants import WxConstants as wxconstants
from app.model.ResponseBean import ResponseBean
class HttpUtilsTest(unittest.TestCase):
def test_create_assignment_message(self):
message = {'interviewwxId': '陈玮', 'interviewName': '陈玮', 'candidateRole': 'DEV', 'candidateName': '胡红翔', 'publishDate': '2018-07-03T03:25:53.724Z', 'expireDate': '2018-07-06T03:25:53.724Z'}
self.assertEqual('陈玮', message['interviewName'])
def test_assignment_format(self):
data = {'interviewwxId': '陈玮', 'interviewName': '陈玮', 'candidateRole': 'DEV', 'candidateName': '胡红翔', 'publishDate': '2018-07-03T03:25:53.724Z', 'expireDate': '2018-07-06T03:25:53.724Z'}
message = wxconstants.CREATE_ASSIGNMENT_TEMPLATE.format(
data['interviewName'],
data['candidateName'],
data['candidateRole'],
data['publishDate'],
data['expireDate'])
print(message)
def test_create_class_with_none(self):
bean = ResponseBean('0000', 'success')
print(bean)
if __name__ == '__main__':
unittest.main()
|
[
"gohuhx@163.com"
] |
gohuhx@163.com
|
766973ba9748fa74c5378e42398721badd887cf3
|
2612f336d667a087823234daf946f09b40d8ca3d
|
/python/lib/Lib/site-packages/django/utils/decorators.py
|
17f2ea30b337f624c0f984698e31aebbb19f6d37
|
[
"Apache-2.0"
] |
permissive
|
tnorbye/intellij-community
|
df7f181861fc5c551c02c73df3b00b70ab2dd589
|
f01cf262fc196bf4dbb99e20cd937dee3705a7b6
|
refs/heads/master
| 2021-04-06T06:57:57.974599
| 2018-03-13T17:37:00
| 2018-03-13T17:37:00
| 125,079,130
| 2
| 0
|
Apache-2.0
| 2018-03-13T16:09:41
| 2018-03-13T16:09:41
| null |
UTF-8
|
Python
| false
| false
| 4,290
|
py
|
"Functions that help with dynamically creating decorators for views."
try:
from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
except ImportError:
from django.utils.functional import wraps, update_wrapper, WRAPPER_ASSIGNMENTS # Python 2.4 fallback.
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the view class.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined it.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func(self, *args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445.
"""
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception, e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(middleware, 'process_response'):
result = middleware.process_response(request, response)
if result is not None:
return result
return response
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view)
return _decorator
return _make_decorator
|
[
"dmitry.trofimov@jetbrains.com"
] |
dmitry.trofimov@jetbrains.com
|
5a505771f17c609e5d1161fc4855299fd1f0388d
|
7e7944f4d5546b4c28b87aa38ddcb7bf76bcc1f2
|
/python_control/up_control.py
|
9caaf2d46541871d3aa6905b8b6dcd26012ac6d5
|
[] |
no_license
|
TubeChip404/105_rov_undersea
|
f45819bc7e837723644afa2f009b3ab5ad9bba85
|
36fef8129365dfe4413911587f5e4a29f790fcb8
|
refs/heads/master
| 2020-03-27T16:36:28.616678
| 2018-08-30T19:05:03
| 2018-08-30T19:05:03
| 146,794,654
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,163
|
py
|
from socket import *
import time
import serial
from threading import Thread
#Configure
raspi_addr='192.168.0.112'
raspi_port=6666
power_addr='192.168.0.7'
power_port=20108
joypad_com='COM6'
joypad_band=115200
network_timeout=1
serial_timeout=1
joypad_data_frame_length=24
network_send_delay=0.05
thruster_horiz_left_front_max_min_joypad_max_min=[368,184,0,1024]
thruster_horiz_right_front_max_min_joypad_max_min=[368,184,0,1024]
thruster_horiz_left_behind_max_min_joypad_max_min=[368,184,0,1024]
thruster_horiz_right_behind_max_min_joypad_max_min=[368,184,0,1024]
thruster_verti_left_middle_max_min_joypad_max_min=[368,184,0,1024]
thruster_verti_right_middle_max_min_joypad_max_min=[368,184,0,1024]
arm_front_servo_max_min_joypad_max_min=[368,184,0,1024]
arm_behind_servo_max_min_joypad_max_min=[368,184,0,1024]
camera_servo_max_min_joypad_max_min=[368,184,0,1024]
#End
raspi_sk=socket(AF_INET,SOCK_STREAM)
raspi_sk.connect((raspi_addr,raspi_port))
power_sk=socket(AF_INET,SOCK_STREAM)
power_sk.connect((power_addr,power_port))
joypad_dev=serial.Serial(joypad_com,joypad_band,timeout=serial_timeout)
#global values
joypad_analog_value=[512,512,512,512,512,512,512,512]
joypad_digital_value=[0,0,0]
main_temp=0
power_temp=0
power_data_frame=''
#end
def read_joypad_data_to_global_value():
global joypad_dev,joypad_analog_value,joypad_digital_value,joypad_data_frame_length
while 1:
try:
joypad_data=joypad_dev.read(joypad_data_frame_length*2)
for pos in range(0,joypad_data_frame_length):
if ord(joypad_data[pos])==0xaa:
chksum=0
for offset in range(0,joypad_data_frame_length-1):
chksum+=ord(joypad_data[pos+offset])
if chksum%256==ord(joypad_data[pos+23]):
for i in range(0,8):
joypad_analog_value[i]=ord(joypad_data[pos+i*2+1])*256+ord(joypad_data[pos+i*2+2])
joypad_digital_value[0]=ord(joypad_data[pos+17])
joypad_digital_value[1]=ord(joypad_data[pos+18])
joypad_digital_value[2]=ord(joypad_data[pos+19])
except:
print 'joypad read error occur.'
joypad_dev.close()
try:
joypad_dev=serial.Serial(joypad_com,joypad_band,timeout=serial_timeout)
except:
time.sleep(0.5)
continue
continue
def map_value(module,invalue):
invalue=float(invalue)
source_differ=invalue-module[2]
source_scale=source_differ/(module[3]-module[2])
if source_scale<0 :
source_scale=0
if source_scale>1 :
source_scale=1
target_value=module[0]+(module[1]-module[0])*source_scale
return round(target_value)
def send_data_to_power_controller():
global power_sk,joypad_analog_value,power_temp,power_data_frame
while 1 :
try:
power_data_frame='\xaa'
rl_differ=abs(512-joypad_analog_value[2])
real_x1=(joypad_analog_value[0]-rl_differ)*0.5+512
real_x2=(joypad_analog_value[0]+rl_differ)*0.5
real_y1=(joypad_analog_value[1]-rl_differ)*0.5+512
real_y2=(joypad_analog_value[1]+rl_differ)*0.5
thruster_horiz_left_front=map_value(thruster_horiz_left_front_max_min_joypad_max_min,real_y1)
thruster_horiz_right_front=map_value(thruster_horiz_right_front_max_min_joypad_max_min,real_x1)
thruster_horiz_left_behind=map_value(thruster_horiz_left_behind_max_min_joypad_max_min,real_x2)
thruster_horiz_right_behind=map_value(thruster_horiz_right_behind_max_min_joypad_max_min,real_y2)
thruster_verti_left_middle=map_value(thruster_verti_left_middle_max_min_joypad_max_min,joypad_analog_value[3])
thruster_verti_right_middle=map_value(thruster_verti_right_middle_max_min_joypad_max_min,joypad_analog_value[4])
arm_front_servo=map_value(arm_front_servo_max_min_joypad_max_min,joypad_analog_value[4])
arm_behind_servo=map_value(arm_behind_servo_max_min_joypad_max_min,joypad_analog_value[5])
camera_servo=map_value(camera_servo_max_min_joypad_max_min,joypad_analog_value[6])
power_data_frame+=chr(int(thruster_horiz_left_front/256))#make package started
power_data_frame+=chr(int(thruster_horiz_left_front%256))
power_data_frame+=chr(int(thruster_horiz_right_front/256))
power_data_frame+=chr(int(thruster_horiz_right_front%256))
power_data_frame+=chr(int(thruster_horiz_left_behind/256))
power_data_frame+=chr(int(thruster_horiz_left_behind%256))
power_data_frame+=chr(int(thruster_horiz_right_behind/256))
power_data_frame+=chr(int(thruster_horiz_right_behind%256))
power_data_frame+=chr(int(thruster_verti_left_middle/256))
power_data_frame+=chr(int(thruster_verti_left_middle%256))
power_data_frame+=chr(int(thruster_verti_right_middle/256))
power_data_frame+=chr(int(thruster_verti_right_middle%256))
power_data_frame+=chr(int(arm_front_servo/256))
power_data_frame+=chr(int(arm_front_servo%256))
power_data_frame+=chr(int(arm_behind_servo/256))
power_data_frame+=chr(int(arm_behind_servo%256))
power_data_frame+=chr(int(camera_servo/256))
power_data_frame+=chr(int(camera_servo%256))
power_data_frame+='\x00'*(32-len(power_data_frame))+chr(joypad_digital_value[0])+chr(joypad_digital_value[1])
power_data_frame+='\x00'*(47-len(power_data_frame))
chksum=0
for i in range(0,len(power_data_frame)):
chksum+=ord(power_data_frame[i])
power_data_frame+=chr(chksum%256) #make package finished
power_sk.send(power_data_frame*5)
time.sleep(network_send_delay)
power_temp=ord(power_sk.recv(1))
except:
power_sk.close()
print 'network error occur.'
try:
power_sk.connect((power_addr,power_port))
except:
power_sk.close()
time.sleep(0.5)
continue
continue
def send_data_to_raspi():
global power_data_frame,raspi_sk,main_temp
while 1 :
try :
raspi_sk.send(power_data_frame*30)
main_temp=ord(raspi_sk.recv(1))
except :
continue
t_read_joypad=Thread(target=read_joypad_data_to_global_value)
t_send_data_to_power=Thread(target=send_data_to_power_controller)
t_send_data_to_raspi=Thread(target=send_data_to_raspi)
t_read_joypad.setDaemon(True)
t_send_data_to_power.setDaemon(True)
t_send_data_to_raspi.setDaemon(True)
t_read_joypad.start()
t_send_data_to_power.start()
t_send_data_to_raspi.start()
while 1 :
print ('power temp=%d C \t main temp=%d C'%(power_temp,main_temp))
time.sleep(1)
|
[
"42848004+TubeChip404@users.noreply.github.com"
] |
42848004+TubeChip404@users.noreply.github.com
|
c880c38f9b96a1ed61de4fabbf9406f0315f03bc
|
14b8c915ba6cbd867c7af7a7e43aaa60a7eefc9d
|
/blogs/views.py
|
dca9f3ca69ab12512ec1ab98e644ac850958bf15
|
[] |
no_license
|
champ1567/DjangoDemo
|
68e251821f322b6d83ad554a03cabab269bb2492
|
29efc56ca6b602737822e738859540d3dc7337d1
|
refs/heads/main
| 2023-06-12T19:08:41.012717
| 2021-07-05T01:12:20
| 2021-07-05T01:12:20
| 382,603,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request,"frontend/index.html")
|
[
"champ.inwe@gmail.com"
] |
champ.inwe@gmail.com
|
36aa9e739f1297f2c9b50e04747db5d5837c0382
|
a4352b7436fc17794ba7e5b6435f16ddf9b9acae
|
/programação dinamica.py
|
3b4345b133768377a6168a1f57b1a52dbc6d4f3e
|
[] |
no_license
|
pedfalcao/Algoritmos
|
9b7773cd8b6d572958076e2052f3627ff044ce6f
|
dfe7833ebd684c20ea345613793a948136f3e4a5
|
refs/heads/main
| 2023-03-14T01:21:21.038089
| 2021-03-02T01:04:55
| 2021-03-02T01:04:55
| 343,580,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
'''
legenda:
d - qtd de dinheiro disponivel p/ contratacoes
n - qtd de jogadores disponiveis p/ contratacao
j, js = jogador, jogadores
p, ps = pontuacao, pontuacoes
c, cs = custo, custos
r = resultado
v = valor
'''
def dinamica(d, n, js, ps, cs):
'''
realiza o calculo de maneira dinamica
'''
k = [[(0, []) for i in range(d+1)] for i in range(n+1)]
for i in range(n+1):
for c in range(d+1):
if i == 0 or c == 0:
k[i][c] = 0, []
elif cs[i-1] <= c:
s1 = (ps[i-1] + k[i-1][c-cs[i-1]][0], k[i-1][c-cs[i-1]][1] + [js[i-1]] + [cs[i-1]])
s2 = k[i-1][c]
k[i][c] = max(s1, s2, key=lambda item: item[0])
else:
k[i][c] = k[i-1][c]
return k[n][d]
def main():
js = [] #nomes dos jogadores
cs = [] #custos dos jogadores
ps = [] #pontuacoes dos jogadores
d = int(input())
n = int(input())
for i in range(n):
j = input()
j = j.split(';')
js.append(j[0])
cs.append(int(j[1]))
ps.append(int(j[2]))
r = dinamica(d, n, js, ps, cs)
print(r[0])
v = 0
js = []
for i in range(len(r[1])):
if i%2 == 1:
v+=r[1][i]
else:
js.append(r[1][i])
print(v)
for i in js:
print(i)
if __name__ == '__main__':
main()
|
[
"paaf2@cin.ufpe.br"
] |
paaf2@cin.ufpe.br
|
1e80f914cc4276ed069e4a1b44beb5b6f1b0141e
|
17e6d820f00b9904c3ece76aab758439d2db3452
|
/mysite/settings.py
|
3eb4de28ae25321c378da76c34a2cab35ee53b1d
|
[] |
no_license
|
AnubisMx26/my-first-blog
|
ca88ca6045706cac0835aca85b535fca2d60f8f8
|
cc454f3495ae0da1f35770c548b53532c5952587
|
refs/heads/master
| 2021-08-28T10:07:10.483357
| 2017-12-11T23:49:28
| 2017-12-11T23:49:28
| 113,909,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '80xrb_-dl0%zt!u+qbe&_o4-7yuw2xyrs-w#(^i0j1h(=4z76f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-mx'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"rsandoval@planseguro.com.mx"
] |
rsandoval@planseguro.com.mx
|
4dee0daf77fa48f37448dd8cf7d857f94c9426d5
|
e91ba13a71dc8757e4c6f483d300bb32db8947d4
|
/kubernetes-mastery/slides/markmaker.py
|
d7ef7a0356e368ba4cf696f9414f7f69f63ba6cc
|
[
"Apache-2.0"
] |
permissive
|
sijoonlee/kubernetes_study
|
752788d4ecf542072436e13ad98b9c67c3b3db2c
|
668abacf4f855b55f23562486e420d29397bbe6d
|
refs/heads/master
| 2022-12-22T06:52:51.224364
| 2020-09-30T17:38:18
| 2020-09-30T17:38:18
| 276,719,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,687
|
py
|
#!/usr/bin/env python
# transforms a YAML manifest into a HTML workshop file
import glob
import logging
import os
import re
import string
import subprocess
import sys
import yaml
logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
class InvalidChapter(ValueError):
def __init__(self, chapter):
ValueError.__init__(self, "Invalid chapter: {!r}".format(chapter))
def anchor(title):
title = title.lower().replace(' ', '-')
title = ''.join(c for c in title if c in string.ascii_letters+'-')
return "toc-" + title
def interstitials_generator():
images = [url.strip() for url in open("interstitials.txt") if url.strip()]
while True:
for image in images:
yield image
interstitials = interstitials_generator()
def insertslide(markdown, title):
title_position = markdown.find("\n# {}\n".format(title))
slide_position = markdown.rfind("\n---\n", 0, title_position+1)
logging.debug("Inserting title slide at position {}: {}".format(slide_position, title))
before = markdown[:slide_position]
toclink = "toc-chapter-{}".format(title2path[title][0])
_titles_ = [""] + all_titles + [""]
currentindex = _titles_.index(title)
previouslink = anchor(_titles_[currentindex-1])
nextlink = anchor(_titles_[currentindex+1])
interstitial = interstitials.next()
extra_slide = """
---
class: pic
.interstitial[]
---
name: {anchor}
class: title
{title}
.nav[
[Previous section](#{previouslink})
|
[Back to table of contents](#{toclink})
|
[Next section](#{nextlink})
]
.debug[(automatically generated title slide)]
""".format(anchor=anchor(title), interstitial=interstitial, title=title, toclink=toclink, previouslink=previouslink, nextlink=nextlink)
after = markdown[slide_position:]
return before + extra_slide + after
def flatten(titles):
for title in titles:
if isinstance(title, list):
for t in flatten(title):
yield t
else:
yield title
def generatefromyaml(manifest, filename):
manifest = yaml.load(manifest)
markdown, titles = processchapter(manifest["chapters"], filename)
logging.debug("Found {} titles.".format(len(titles)))
toc = gentoc(titles)
markdown = markdown.replace("@@TOC@@", toc)
for title in flatten(titles):
markdown = insertslide(markdown, title)
exclude = manifest.get("exclude", [])
logging.debug("exclude={!r}".format(exclude))
if not exclude:
logging.warning("'exclude' is empty.")
exclude = ",".join('"{}"'.format(c) for c in exclude)
# Insert build info. This is super hackish.
markdown = markdown.replace(
".debug[",
".debug[\n```\n{}\n```\n\nThese slides have been built from commit: {}\n\n".format(dirtyfiles, commit),
1)
markdown = markdown.replace("@@TITLE@@", manifest["title"].replace("\n", "<br/>"))
html = open("workshop.html").read()
html = html.replace("@@MARKDOWN@@", markdown)
html = html.replace("@@EXCLUDE@@", exclude)
html = html.replace("@@CHAT@@", manifest["chat"])
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
return html
# Maps a section title (the string just after "^# ") to its position
# in the table of content (as a (chapter,part,subpart,...) tuple).
title2path = {}
path2title = {}
all_titles = []
# "tree" is a list of titles, potentially nested.
def gentoc(tree, path=()):
if not tree:
return ""
if isinstance(tree, str):
title = tree
title2path[title] = path
path2title[path] = title
all_titles.append(title)
logging.debug("Path {} Title {}".format(path, title))
return "- [{}](#{})".format(title, anchor(title))
if isinstance(tree, list):
if len(path) == 0:
return "\n---\n".join(gentoc(subtree, path+(i+1,)) for (i,subtree) in enumerate(tree))
elif len(path) == 1:
chapterslide = "name: toc-chapter-{n}\n\n## Chapter {n}\n\n".format(n=path[0])
for (i,subtree) in enumerate(tree):
chapterslide += gentoc(subtree, path+(i+1,)) + "\n\n"
chapterslide += ".debug[(auto-generated TOC)]"
return chapterslide
else:
return "\n\n".join(gentoc(subtree, path+(i+1,)) for (i,subtree) in enumerate(tree))
# Arguments:
# - `chapter` is a string; if it has multiple lines, it will be used as
# a markdown fragment; otherwise it will be considered as a file name
# to be recursively loaded and parsed
# - `filename` is the name of the file that we're currently processing
# (to generate inline comments to facilitate edition)
# Returns: (epxandedmarkdown,[list of titles])
# The list of titles can be nested.
def processchapter(chapter, filename):
if isinstance(chapter, unicode):
return processchapter(chapter.encode("utf-8"), filename)
if isinstance(chapter, str):
if "\n" in chapter:
titles = re.findall("^# (.*)", chapter, re.MULTILINE)
slidefooter = ".debug[{}]".format(makelink(filename))
chapter = chapter.replace("\n---\n", "\n{}\n---\n".format(slidefooter))
chapter += "\n" + slidefooter
return (chapter, titles)
if os.path.isfile(chapter):
return processchapter(open(chapter).read(), chapter)
if isinstance(chapter, list):
chapters = [processchapter(c, filename) for c in chapter]
markdown = "\n---\n".join(c[0] for c in chapters)
titles = [t for (m,t) in chapters if t]
return (markdown, titles)
raise InvalidChapter(chapter)
# Try to figure out the URL of the repo on GitHub.
# This is used to generate "edit me on GitHub"-style links.
try:
if "REPOSITORY_URL" in os.environ:
repo = os.environ["REPOSITORY_URL"]
else:
repo = subprocess.check_output(["git", "config", "remote.origin.url"])
repo = repo.strip().replace("git@github.com:", "https://github.com/")
if "BRANCH" in os.environ:
branch = os.environ["BRANCH"]
else:
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
branch = branch.strip()
base = subprocess.check_output(["git", "rev-parse", "--show-prefix"])
base = base.strip().strip("/")
urltemplate = ("{repo}/tree/{branch}/{base}/{filename}"
.format(repo=repo, branch=branch, base=base, filename="{}"))
except:
logging.exception("Could not generate repository URL; generating local URLs instead.")
urltemplate = "file://{pwd}/{filename}".format(pwd=os.environ["PWD"], filename="{}")
try:
commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
except:
logging.exception("Could not figure out HEAD commit.")
commit = "??????"
try:
dirtyfiles = subprocess.check_output(["git", "status", "--porcelain"])
except:
logging.exception("Could not figure out repository cleanliness.")
dirtyfiles = "?? git status --porcelain failed"
def makelink(filename):
if os.path.isfile(filename):
url = urltemplate.format(filename)
return "[{}]({})".format(filename, url)
else:
return filename
if len(sys.argv) != 2:
logging.error("This program takes one and only one argument: the YAML file to process.")
else:
filename = sys.argv[1]
if filename == "-":
filename = "<stdin>"
manifest = sys.stdin
else:
manifest = open(filename)
logging.info("Processing {}...".format(filename))
sys.stdout.write(generatefromyaml(manifest, filename))
logging.info("Processed {}.".format(filename))
|
[
"shijoonlee@gmail.com"
] |
shijoonlee@gmail.com
|
c839093885f967b6ea909ba7c8bab8b319dde49d
|
30de1d37044cfba76b18cb0ecb1bb543ba4a4e22
|
/Lec3/ascii/ascii.py
|
84f63460df233477e91d15ee0f54436597bcae20
|
[] |
no_license
|
cocoaaa/WebDev
|
28574990d4bd1b6cc0e8023b3eb0a642d122ede8
|
21e18dc0f4d1f99c59c7b63db610a7df0991a8b9
|
refs/heads/master
| 2016-08-11T15:51:06.777475
| 2016-02-04T23:39:34
| 2016-02-04T23:39:34
| 49,907,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,717
|
py
|
import os, json, urllib2, collections
import webapp2 #python webframe
import jinja2 #python template engine
import logging
import time
import tools
from google.appengine.ext import db
#Templates directory: relative to the directory this current python file is in.
template_dir = os.path.join(os.path.dirname(__file__), "templates")
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape=True)
#Global Variables
#requestIP = "76.119.233.230"
API_KEY = "AIzaSyC2oZC2Vd5lRnAgPQ_Svv2JTtkXVD6MR4w"
URL_BASE = "https://maps.googleapis.com/maps/api/staticmap?%(markers_str)s&size=%(width)sx%(height)s&key=%(key)s"
CACHE = {}
#Database
class Art(db.Model):
title = db.StringProperty(required=True)
art = db.TextProperty(required=True)
location = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
#def get_top10(update = False):
# """Returns the result of top ten latest art object created in the datastore"""
# key = 'top10'
# if not update and key in CACHE:
# return CACHE[key]
# else:
# print "!!!!!!!!!" #debug
# logging.info("DB Query") #debug
# arts = Art.gql("ORDER BY created DESC").fetch(10)
# arts = list(arts)
# CACHE[key] = arts
# return arts
#wirte directly to the cache when updating a new art that is just submitted
def get_top10(update = False, newArt=None):
"""Returns the result of top ten latest art object created in the datastore"""
key = 'top10'
print "Length of cache: ", len(CACHE.get(key,[]))
if not update and key in CACHE:
logging.info("Cache hit")
return CACHE[key]
elif key in CACHE: #CACHE needs to be updated
assert (update and newArt is not None)
#Remove the oldest
arts_queue = CACHE[key]
arts_queue.pop()
arts_queue.appendleft(newArt)
#Update the cache
CACHE[key] = arts_queue
logging.info("Cache updated without entire DB read!")
return arts_queue
else:
logging.info("Entire DB read. argh") #debug
arts_queue = collections.deque()
arts = list(Art.gql("ORDER BY created DESC").fetch(10))
arts_queue.extend(arts)
CACHE[key] = arts_queue
return arts_queue
class Handler(webapp2.RequestHandler):
def write(self,*a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params ):
t= jinja_env.get_template(template)
return t.render(params)
def render(self, template, **params):
self.response.out.write(self.render_str(template, **params))
def write_front(self, title="", art="", error_title="", error_art=""):
arts = get_top10()
print 'number of arts on the front: ', len(arts)
for a in arts:
print a.location
mapURL = None
if arts:
locations = filter(None, [a.location for a in arts])
print locations
if locations:
mapURL = tools.getMapURL(locations, API_KEY, URL_BASE)
logging.info("locations: %s", str(locations))
print "location url: ", locations
# if arts: #debug
print 'total arts submitted: ', len(arts)
print 'latest submitted: ', arts[0].title
print 'location: ', arts[0].location
self.render("front.html", title=title, art=art, error_title=error_title, error_art=error_art, arts=arts,mapURL=mapURL)
class MainPage(Handler):
def get(self):
self.write_front()
def post(self):
title = self.request.get("title")
art = self.request.get("art")
# requestIP= self.request.remote_addr
requestIP = tools.getRandomIP() #string type
logging.info("This is the logging info test")
logging.info("Printing the request's ip...")
logging.info("ip: %s", requestIP)
# print "HEREERERERERERERERER IS THE IP"
# print requestIP
if (tools.isValid_title(title) and tools.isValid_art(art)):
#Get the location of the one who just submitted the artwork
location = tools.getLocation(requestIP) #string or None
print "location of the new art: ", location
#Store to the db
art = Art(title = title, art = art, location=location)
art.put()
#clear the cache to prevent the stale cache problem
# CACHE.clear() #deletes everything in the dictionary or CACHE['top'] = None
#To handle cache stampede by multiple concurrent user requests
print "new art put into database"
# print "waiting for the update to be valid"
# time.sleep(1)
get_top10(update=True, newArt = art)
# print "Updated the cache based on the new database"
self.redirect("/success")
return
else:
error_title = ""
error_art = ""
if not tools.isValid_title(title):
#Rerender the form with error_title
error_title = "Give your art a title!"
if not tools.isValid_art(art):
#Rerender the form with error_art
error_art = "Please submit your art!"
self.write_front(title, art, error_title, error_art)
return
class SuccessHandler(Handler):
def get(self):
self.render('success.html')
app = webapp2.WSGIApplication([('/', MainPage),
('/success', SuccessHandler)],
debug=True)
|
[
"hjsong@mit.edu"
] |
hjsong@mit.edu
|
6c12e13fb624ce346c269a8345b61228787d6dd0
|
b5177ab489d2c137b1204833bb20d6545c6fe409
|
/Week5/TODO/main/models.py
|
f1a94d4044bf7982d0305b7a8eadf47cce1d66ce
|
[] |
no_license
|
Aru09/JangoRepo
|
816dd2e58df9e93c6c54908542814dda5612f20e
|
5ebbde23fe83481aba39cf487bbb7cea50b65bc2
|
refs/heads/master
| 2020-03-27T21:26:44.553035
| 2018-11-27T14:05:33
| 2018-11-27T14:05:33
| 147,144,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from django.db import models
class Task_list(models.Model):
name = models.CharField(max_length = 255)
def __str__(self):
return self.name
class Task(models.Model):
task_name = models.CharField(max_length = 255)
created = models.DateTimeField(auto_now_add = True)
due_on = models.DateTimeField()
owner = models.CharField( max_length = 255, default= "Admin")
mark = models.BooleanField( default = False)
task_list = models.ForeignKey(Task_list, on_delete=models.CASCADE, default = None)
def __str__(self):
return self.task_name
|
[
"noreply@github.com"
] |
noreply@github.com
|
6000a2d804783a5d4220d63e4e1a31dae8e8d95a
|
f47a4199c4a67eeaaf52faa64105d84f3996b15a
|
/ICP2/source/1.py
|
57e9fcdf76d2ab560fb6492b42e6f563c704c8c0
|
[] |
no_license
|
gowtham304/python
|
a7f7aa555a8cfe189ef7addd1ec6b0e8a5c19d4c
|
c76a190c4d7ea6945dc6407735804d02ce4b71ab
|
refs/heads/master
| 2021-08-27T22:55:21.468012
| 2021-08-12T22:18:42
| 2021-08-12T22:18:42
| 190,480,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
n = int(input("How many numbers do you have? "))
lb=[]
for i in range(n):
s = int(input("Enter a number >> "))
lb.append(s)
print(lb)
kg = [(x/2.2046525820) for x in lb]
print(kg)
|
[
"47073384+gowtham304@users.noreply.github.com"
] |
47073384+gowtham304@users.noreply.github.com
|
131b266d1db9276b440977307e8f0b3feee3d07f
|
c5e6f0445ecbfa128c1149393740f650f3d8102b
|
/1/2/2.2.2.py
|
96e0aa29e73d11f157af042fa6151f3e2fe67136
|
[] |
no_license
|
xyz-to/Imagetoken
|
6eb4cd35a40b002f69c04820a5c8b43b91a9537e
|
19d61d9c826bdfeb58eeb5523c8fd6433a5bd3ee
|
refs/heads/master
| 2023-08-01T02:42:18.860506
| 2021-09-12T15:44:26
| 2021-09-12T15:44:26
| 405,266,390
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,860
|
py
|
"""将灰度值转换为RGB值的图像"""
from matplotlib import pyplot as plt
import numpy as np
from skimage import data, color
L = 255
def GetR(gray):
"""转换灰度值的R通道部分"""
if gray < L / 2:
return 0
elif gray > 3 * L / 4:
return L
else:
return 4 * L - 4 * gray
def GetG(gray):
"""转换灰度值的G通道部分"""
if gray < L / 4:
return 4 * gray
elif gray > 3 * L / 4:
return 4 * L - 4 * gray
else:
return L
def GetB(gray):
"""转换灰度值的B通道部分"""
if gray < L / 4:
return L
elif gray > L / 2:
return 0
else:
return 2 * L - 4 * gray
# 设置字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.size'] = 15
plt.rcParams['axes.unicode_minus'] = False
# 设置x , y轴的值
x = [0, 64, 127, 191, 255]
R = []
for i in x:
R.append(GetR(i))
# 绘制R通道的映射关系
plt.figure()
plt.plot(x, R, 'r--', label='红色变换')
plt.legend(loc='best')
R = []
for i in x:
R.append(GetB(i))
# 绘制B通道的映射关系
plt.figure()
plt.plot(x, R, 'b--', label='蓝色变换')
plt.legend(loc='best')
R = []
for i in x:
R.append(GetG(i))
# 绘制R通道的映射关系
plt.figure()
plt.plot(x, R, 'g--', label='绿色变换')
plt.legend(loc='best')
plt.show()
"""将灰度图像转换为彩色图像"""
graying = color.rgb2gray(data.coffee())*255
coloring = np.zeros(data.coffee().shape, dtype='uint8')
for i in range(coloring.shape[0]):
for j in range(coloring.shape[1]):
v = graying[i, j]
r, g, b = GetR(v), GetG(v), GetB(v)
coloring[i, j, :] = (r, g, b)
# 显示原灰度图像
plt.figure()
plt.axis('off')
plt.imshow(graying, cmap='gray')
# 显示转换的颜色图像
plt.figure()
plt.imshow(coloring)
plt.axis('off')
plt.show()
|
[
"2282661074@qq.com"
] |
2282661074@qq.com
|
43b9efcb67283c12ab78d41bf4a139edda32f6a5
|
8101c599bdf68e0fcc2dbc8188640abfebc4a790
|
/test/test.py
|
f651e500372ecdf139f269049f79c37f139d61d8
|
[
"BSD-3-Clause"
] |
permissive
|
symbooglix/boogie-runner
|
2a39ddc86d1fee8e3750db6c07f3d20363195390
|
01e1fe993d5eacf7055f1d950a209583c0405fd6
|
refs/heads/master
| 2021-01-21T04:37:04.636241
| 2016-04-05T16:28:27
| 2016-04-05T16:28:27
| 28,610,541
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,484
|
py
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
import argparse
import logging
import os
import pprint
import re
import shutil
import subprocess
import sys
import yaml
testDir = os.path.dirname(os.path.abspath(__file__))
repoDir = os.path.dirname(testDir)
# Hack
sys.path.insert(0, repoDir)
from BoogieRunner import ProgramListLoader
# Another Hack
sys.path.insert(0, os.path.join(repoDir, 'analysis'))
from br_util import FinalResultType, classifyResult
class RunnerTool:
def __init__(self, configFile, listFile, relativePathPrefix, workDir, yamlOutput):
self.configFile = configFile
self.listLocation = listFile
self.relativePathPrefix = relativePathPrefix
self.workDir = workDir
self.yamlOutput = yamlOutput
assert os.path.exists(self.listLocation)
def doCleanUp(self):
shutil.rmtree(self.workDir)
os.remove(self.yamlOutput)
def getFileList(self):
return ProgramListLoader.load(self.listLocation, self.relativePathPrefix)
class BatchRunnerTool(RunnerTool):
def __init__(self, configFile, listFile, relativePathPrefix, workDir, yamlOutput):
super(BatchRunnerTool, self).__init__(configFile, listFile, relativePathPrefix, workDir, yamlOutput)
self.numJobs = 1
def setNumJobs(self, count):
assert count > 0
self.numJobs = count
def getResults(self, testFiles, clean=True):
if os.path.exists(self.yamlOutput):
os.remove(self.yamlOutput)
exitCode = subprocess.call([self.tool,
"--jobs={}".format(self.numJobs),
self.configFile,
self.listLocation,
self.workDir,
self.yamlOutput
])
if exitCode != 0:
logging.error('Tool failed')
sys.exit(1)
if not os.path.exists(self.yamlOutput):
logging.error('cannot find yaml output')
sys.exit(1)
results = None
with open(self.yamlOutput, 'r') as y:
results = yaml.load(y)
if clean:
self.doCleanUp()
return results
@property
def tool(self):
return os.path.join(repoDir, 'boogie-batch-runner.py')
class SingleRunTool(RunnerTool):
def getResults(self, testFiles, clean=False):
logging.warning('clean directive ignored')
# Run over the tests
results = [ ]
for testFile in testFiles.keys():
exitCode = subprocess.call([self.tool,
self.configFile,
testFile,
self.workDir,
self.yamlOutput
])
if exitCode != 0:
logging.error('Tool failed')
sys.exit(1)
if not os.path.exists(self.yamlOutput):
logging.error('Yaml output is missing')
sys.exit(1)
with open(self.yamlOutput, 'r') as f:
results.extend(yaml.load(f))
self.doCleanUp()
return results
@property
def tool(self):
return os.path.join(repoDir, 'boogie-runner.py')
def main(args):
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("-j", "--jobs", type=int, default=1,
help='jobs to run in parallel. Only works when using batch mode')
parser.add_argument("-k", "--keep-files", dest='keep_files',
action='store_true', default=False)
parser.add_argument("-l", "--list-file", dest='list_file',
type=str, default="simple_boogie_programs.txt")
parser.add_argument("config_file")
parser.add_argument("mode", choices=['single', 'batch'], help="Front end to use. Valid options %(choices)s")
pargs = parser.parse_args(args)
if pargs.mode != 'batch' and pargs.jobs > 1:
logging.error('Can only specify jobs when using "batch" mode')
return 1
# Compute some paths
workDir = os.path.join(testDir, 'working_dir')
yamlOutput = os.path.join(testDir, 'result.yml')
if not os.path.exists(pargs.config_file):
logging.error('Could not find config_file {}'.format(pargs.config_file))
return 1
listFile = os.path.join(testDir, pargs.list_file)
if not os.path.exists(listFile):
logging.error('Could not find list file "{}".'.format(listFile))
return 1
if pargs.mode == 'single':
runnerConstructor = SingleRunTool
elif pargs.mode == 'batch':
runnerConstructor = BatchRunnerTool
else:
logging.error('Invalid mode')
return 1
runner = runnerConstructor(pargs.config_file, listFile, testDir, workDir, yamlOutput)
if pargs.jobs > 1:
runner.setNumJobs(pargs.jobs)
if not os.path.exists(runner.tool):
logging.error('Cannot find {}'.format(runner.tool))
return 1
if os.path.exists(yamlOutput):
logging.error('Yaml output file "{}" exists. Remove it'.format(yamlOutput))
return 1
# Find all the tests
testFiles = {}
filenames = runner.getFileList()
for potentialTest in filenames:
if not os.path.exists(potentialTest):
logging.error('Could not find file "{}"'.format(potentialTest))
return 1
r = re.compile(r'^//\s*(\w+)')
# Read expected test result from first line of file
with open(potentialTest, 'r') as testFile:
line = testFile.readline()
m = r.match(line)
if m == None:
logging.error('Failed to find result on first line of file {}'.format(potentialTest))
return 1
expectedResultStr = m.group(1)
expectedResultEnum = FinalResultType[expectedResultStr]
logging.info('Found test:{} - {}'.format(potentialTest, expectedResultEnum))
testFiles[potentialTest] = expectedResultEnum
# Run tests
if os.path.exists(workDir):
logging.info('removing {}'.format(workDir))
shutil.rmtree(workDir)
os.mkdir(workDir)
results = runner.getResults(testFiles, clean= not pargs.keep_files)
# Check the results against the testFiles
logging.info('Got results:\n{}'.format(pprint.pformat(results)))
for result in results:
filename = result['program']
actualClassification = classifyResult(result)
expectedClassification = testFiles[filename]
if actualClassification != expectedClassification:
logging.error('Result mismatch for {}, expected {}, got {}'.format(
filename, expectedClassification, actualClassification))
return 1
logging.info('SUCCESS!')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"daniel.liew@imperial.ac.uk"
] |
daniel.liew@imperial.ac.uk
|
0abd2fdc02ec2dc86d229d0283aa88cfd9cdb26e
|
083f753d872fb735ac1f76465b1aefe239a8ab38
|
/conc_HTTP_server.py
|
7ef6643606091b0bd882a5e3da723907b1318ae4
|
[
"MIT"
] |
permissive
|
miramzz/network_tools-1
|
fc3c8168f373b7c1c1364b763230bfef5a1657d9
|
88aa23868d4bc8d00a568844f203e926e31fa76d
|
refs/heads/master
| 2021-01-15T17:59:41.873258
| 2014-06-18T20:43:43
| 2014-06-18T20:43:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,922
|
py
|
import socket
import os
import mimetypes
import SocketServer
from HTTPExceptions import HTTPException
from HTTPExceptions import HTTP400Error, HTTP404Error, HTTP405Error
def request_parser(raw_request):
raw_request = raw_request.split('\r\n')
keys = ('method', 'URI', 'protocol')
request = dict(zip(keys, raw_request[0].split()))
for element in raw_request[1:]:
if element.lower().startswith('host:'):
request['host'] = element.split()[1]
break
return request
def check_request_method(request):
if request['method'] != 'GET':
raise HTTP405Error('Method Not Allowed')
def check_request_URI(request):
if ".." in request['URI']:
raise HTTP400Error('Bad Request')
if not request['URI'].startswith('/'):
##need to add something to check if this is an existing directory/filename
raise HTTP400Error('Bad Request')
def check_request_protocol(request):
if request['protocol'] != "HTTP/1.1":
raise HTTP400Error('Bad Request')
def check_request_host(request):
if 'host' not in request:
raise HTTP400Error('Bad Request')
def resource_locator(uri):
root = os.path.abspath(os.path.dirname(__file__))
root = os.path.join(root, "webroot")
dir_to_check = root + uri
if not os.path.exists(dir_to_check):
uri = ""
return 'HTTP404Error'
if os.path.isdir(dir_to_check):
dir_contents = os.listdir(dir_to_check)
return directory_formatter(dir_contents, uri)
else:
open_file = open(dir_to_check, 'r+')
file_contents = open_file.read()
return file_contents
def request_validator(request, content=""):
try:
check_request_method(request)
check_request_URI(request)
check_request_protocol(request)
check_request_host(request)
if content == 'HTTP404Error':
return ('404', 'Resource Not Found', '<html><h1>404 - Resource Not Found</h1></html>')
return ('200', 'OK', '{}'.format(content))
except HTTPException as err:
content = '<h1>{} - {}</h1>'.format(err.code, err.message)
return (err.code, err.message, content)
def response_builder(response, content):
mimetype = mimetypes.guess_type(content)[0]
content_type = 'Content-Type: {}'.format(mimetype)
template = '\r\n'.join(['HTTP/1.1 {} {}', content_type, '', '{}'])
return template.format(*response)
def directory_formatter(content, dir_uri):
output_list = "<html><ul>"
for item in content:
path = "{}/{}".format(dir_uri[1:], item)
output_list += '<li><a href="{}">{}</a></li>'.format(path, item)
output_list += "</ul></html>"
return output_list
def echo(conn, addr):
buffsize = 32
print('Waiting for message...')
while True:
final_output = ''
done = False
while not done:
msg_part = conn.recv(buffsize)
final_output += msg_part
if len(msg_part) < buffsize:
done = True
request = request_parser(final_output)
content = resource_locator(request["URI"])
response = request_validator(request, content)
response = response_builder(response, request["URI"])
conn.sendall(response)
conn.close()
break
def http_server():
SERVER_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
SERVER_SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
SERVER_SOCKET.bind(('127.0.0.1', 50000))
SERVER_SOCKET.listen(1)
final_output = echo(SERVER_SOCKET.accept())
SERVER_SOCKET.close()
return final_output
if __name__ == '__main__':
from gevent.server import StreamServer
from gevent.monkey import patch_all
patch_all()
server = StreamServer(('127.0.0.1', 50000), echo)
print('Starting echo server on port 50000')
server.serve_forever()
|
[
"corinne.hutchinson@gmail.com"
] |
corinne.hutchinson@gmail.com
|
a577a93ea9080e600a4aba8dc03b1a09b3d3ccd3
|
0c3fa93ed9c3929061f667fe574fb75e04f4f6ff
|
/find_number_sentence.py
|
ec3b9e2b9c94727e6ff7765eb7da98ac632f641b
|
[] |
no_license
|
nandu7180/Python
|
21af5c40a6edb1d73c7d444d3b2070816442bf59
|
16471f95669aa3acd37583a9b29b1fb2caca0c55
|
refs/heads/master
| 2020-03-11T22:37:27.788604
| 2018-04-21T07:51:07
| 2018-04-21T07:51:07
| 130,297,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
import re
a = "Iam2ragh34va21"
lis = map(int, re.findall('\d+', a))
nsum = sum(lis)
print nsum
|
[
"noreply@github.com"
] |
noreply@github.com
|
a7ec1d0348a449cc52d594b3b29f306f779fd8e4
|
a5292f369caff6fc1b9c0384031042a745fb44c5
|
/Templates/Deck of cards.py
|
d3835508a925f2e5a20e742cdbe10cdbd6f2c49e
|
[] |
no_license
|
yukiya727/pycharm
|
56f0c3011c9ecb9dc6d6802f8819e77129c5a589
|
3f0b78fdfec7d9698d65584f4b050a22f2161a02
|
refs/heads/master
| 2022-11-21T16:04:39.493448
| 2020-07-19T07:58:00
| 2020-07-19T07:58:00
| 278,785,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
import random
import time
import threading
class Card():
def __init__(self, suit, value):
self.suit = suit
self.value = value
self.values = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
def __str__(self):
return "%s of %s" % (self.value, self.suit)
def __gt__(self, othercard):
if self.suit == othercard.suit:
if self.values.index(self.value) > self.values.index(othercard.value):
return True
else:
return False
if self.value.index(self.value) == self.values.index(othercard.value):
if self.suit.index(self.suit) > self.suit.index(othercard.suit):
return True
else:
return False
class Deck():
def __init__(self):
self.suits = ['Hearts', 'Diamonds', 'Clubs', 'Spades']
self.values = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
self.cards = [Card(s, v) for s in self.suits for v in self.values]
def __str__(self):
description = "Deck: \n"
for s in self.cards:
description += str(s) + "\n"
return description
def shuffle(self):
random.shuffle(self.cards)
def deal(self):
if len(self.cards) > 0:
print("Deal")
return self.cards.pop()
def getCard(self):
card = self.cards[0]
del self.cards[0]
return card
def checkDeck(self):
if len(self.cards) == 0:
return False
return True
def newDeck(self):
self.cards = [Card(s, v) for s in self.suits for v in self.values]
# player_deck = [[''] for i in range(4)]
def player_cards(id, mode):
global d
player_deck = []
st = 0.0
while d.checkDeck():
if st != 0.0:
st = 0.01 - st
time.sleep(st)
player_deck.append(d.getCard())
st = random.uniform(0, 0.01)
time.sleep(st)
print("Player {0}'s deck is:\n{1}".format(id+1, player_deck))
d = Deck()
d.shuffle()
players = []
for i in range(4):
player = threading.Thread(target=player_cards, args=(i, False))
player.start()
players.append(player)
for p in players:
p.join()
for i in range(4):
print("Player {0}'s deck is:\n".format(i + 1))
for
|
[
"yukiya727p@gmail.com"
] |
yukiya727p@gmail.com
|
3092c08a731b61558189665e7d2e63d08603ab03
|
d9eafd325ab775b7b32af2dd0b63afc7310be53d
|
/pfwra/home/migrations/0004_auto_20210323_0728.py
|
3678a7c488fe83d6dd909f1c2f80b1f809a9fe79
|
[
"MIT"
] |
permissive
|
johnkellehernz/pfwra
|
54b0db7debaed629d6003e0826a15bde2fd4a197
|
5b8c718bb2f1aaa34e9a718e07baf270294f7ba6
|
refs/heads/main
| 2023-05-01T14:39:42.419993
| 2021-05-13T11:00:07
| 2021-05-13T11:00:07
| 353,514,688
| 0
| 0
|
MIT
| 2021-03-31T23:15:32
| 2021-03-31T23:15:31
| null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
# Generated by Django 3.0.11 on 2021-03-23 07:28
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20210219_0827'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='featured',
field=wagtail.core.fields.StreamField([('cards', wagtail.core.blocks.StructBlock([('link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link label', required=True)), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Choose a page to link to', label='Page', required=False)), ('external_url', wagtail.core.blocks.URLBlock(help_text='Or choose an external URL to link to', label='External URL', required=False))], required=False)), ('header', wagtail.core.blocks.CharBlock(label='Header text')), ('text', wagtail.core.blocks.TextBlock(help_text='Write an introduction for the card', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(required=False))]))], blank=True, help_text='Featured cards'),
),
migrations.AlterField(
model_name='homepage',
name='hero_cta',
field=models.CharField(blank=True, help_text='Text to display on Call to Action', max_length=255, null=True, verbose_name='Hero CTA'),
),
migrations.AlterField(
model_name='homepage',
name='hero_text',
field=models.CharField(blank=True, help_text='Write an introduction for the homepage', max_length=255, null=True),
),
]
|
[
"jordi.joan@gmail.com"
] |
jordi.joan@gmail.com
|
814bbf98eeee530f21372492d0a0d9f8a9ce62d1
|
d8f7b9943049bd483189fe58fd4abf37163866dd
|
/GUI Code/search.py
|
9d83c91ad738a58d3a07107996a978d96e19663f
|
[] |
no_license
|
NagahShinawy/python-data-structures-algorithms
|
d14ecd478caa13e36c4f2dcdf942e5f9e9f351e5
|
c254f12dca78444e3b2bbd667d4508a699b9fb89
|
refs/heads/main
| 2023-05-12T17:26:23.477742
| 2021-05-10T07:08:30
| 2021-05-10T07:08:30
| 365,436,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
"""
Python Data Structures - A Game-Based Approach
Robin Andrews - https://compucademy.net/
Search Algorithms for use in GUI.
"""
import config
import heapq
import helper_functions as helpers
from collections import deque
class PriorityQueue:
def __init__(self):
self.elements = []
def is_empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def dfs(board, start, goal):
stack = [start]
visited = set()
full_path = []
while stack:
current = stack.pop()
full_path.append(current)
if current == goal:
return full_path
for direction in ["up", "right", "down", "left"]: # Other orders are fine too.
row_offset, col_offset = config.offsets[direction]
neighbour = (current[0] + row_offset, current[1] + col_offset)
if helpers.is_legal_pos(board, neighbour) and neighbour not in visited:
stack.append(neighbour)
visited.add(neighbour)
def bfs(board, start, goal):
queue = deque()
queue.append(start)
visited = set()
full_path = []
while queue:
current = queue.popleft()
full_path.append(current)
if current == goal:
return full_path
for direction in ["up", "right", "down", "left"]:
row_offset, col_offset = config.offsets[direction]
neighbour = (current[0] + row_offset, current[1] + col_offset)
if helpers.is_legal_pos(board, neighbour) and neighbour not in visited:
queue.append(neighbour)
visited.add(neighbour)
def heuristic(a, b):
x1, y1 = a
x2, y2 = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star(board, start_pos, goal_pos):
pq = PriorityQueue()
pq.put(start_pos, 0)
g_values = {}
g_values[start_pos] = 0
full_path = []
while not pq.is_empty():
current_cell_pos = pq.get()
full_path.append(current_cell_pos)
if current_cell_pos == goal_pos:
return full_path
for direction in ["up", "right", "down", "left"]:
row_offset, col_offset = config.offsets[direction]
neighbour = (
current_cell_pos[0] + row_offset,
current_cell_pos[1] + col_offset,
)
new_cost = (
g_values[current_cell_pos] + 1
) # Would be edge weight in a weighted graph
if helpers.is_legal_pos(board, neighbour):
# Second check only applies to weighted graph.
if neighbour not in g_values or new_cost < g_values[neighbour]:
g_values[neighbour] = new_cost
f_value = new_cost + heuristic(goal_pos, neighbour)
pq.put(neighbour, f_value)
|
[
"E-n.Shinawy@lean.sa"
] |
E-n.Shinawy@lean.sa
|
15612575ea6312e20252678f637619af3da8dd28
|
7625f99ef06958df0d69668f612841e6f9749344
|
/manage.py
|
c2c9664e6b4b1e3875c108ec866b39fa468ffe03
|
[] |
no_license
|
KewinEvers96/tournamentManagerNWK
|
d9fa11059c3de4b8da43932a35c0053feb647867
|
98fa632768ee3b1b2520284b97994d7c210d2ce6
|
refs/heads/main
| 2023-07-26T19:41:49.565687
| 2021-08-10T04:56:40
| 2021-08-10T04:56:40
| 394,169,787
| 0
| 0
| null | 2023-07-16T07:20:42
| 2021-08-09T06:05:24
|
Python
|
UTF-8
|
Python
| false
| false
| 673
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tournamentManager.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"keversy@unal.edu.co"
] |
keversy@unal.edu.co
|
599edb1ee8ffd3b0dc26ea6189d552c48e759329
|
b71482c9e035ade758bbd0275794154b1318cefc
|
/Library/deployModels/test/pipeline.py
|
44aed53cded90ed507536f9a4a76820c21fd12d4
|
[] |
no_license
|
UCSD-SEELab/smarthomeDeploymentCode
|
9615fcab9b1dd58889d1767eb523c390b8a62163
|
7d9be41b5229f84402c25e7d66b9b3626af5dad6
|
refs/heads/master
| 2020-04-08T23:19:44.662756
| 2018-12-01T04:50:48
| 2018-12-01T04:50:48
| 159,819,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
import sys
sys.path.append('../../')
from deployModels.HLdeployer.communicate import *
import json
def blah(mosq, obj, msg):
print(str(msg.payload))
with open("./config.json", 'r') as confFile:
conf = json.load(confFile)["local"]
ab = communicate(conf, [blah])
ab.sendData("crk", str(12))
ab.startListening()
while True:
pass
|
[
"rishikanthc@gmail.com"
] |
rishikanthc@gmail.com
|
d33e66fbe2c3010ddfa1f442b382780ecfe03b90
|
3edcb7f381d6acda50bf8bd9f8b25ddeb2dfa71a
|
/PracticeStuff/ex20.py
|
d5c571203f523414f9322d61b4655144a6490fb8
|
[] |
no_license
|
Mr-Clean-Code/Python
|
e7273e3689e940e00d7a0bde7bbbe69ddd764a3c
|
11145d2939cefa84b4afe2469f2bd336c2987ca5
|
refs/heads/main
| 2023-06-12T03:45:09.663939
| 2021-07-08T08:54:58
| 2021-07-08T08:54:58
| 384,059,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(2)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
|
[
"noreply@github.com"
] |
noreply@github.com
|
b1509a8fd4ca04578be6136ee76e723603784297
|
3e1e3acbc40ee3b52b90043c6b72d6069471d09a
|
/lc153_rotatedArray.py
|
68f7c6c30a353356708821b5df28b20f75a6c138
|
[] |
no_license
|
WagonsWest/Leetcode
|
ef84b8ccbf87e37d0fc1cc16a859d514c8348bc1
|
2bf72dd56a8d1d7f56289ddc54176ab9765d69c9
|
refs/heads/master
| 2023-06-05T16:39:18.862260
| 2021-06-29T08:48:38
| 2021-06-29T08:48:38
| 371,731,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left = 0
right = len(nums)-1
while left < right:
mid = left + (right-left)/2
if nums[left] < nums[mid] and nums[mid] < nums[right]:
return nums[left]
elif nums[left] <= nums[mid] and nums[mid] > nums[right]:
left = mid+1
else:
right = mid
return nums[left]
|
[
"z667zhan@uwaterloo.ca"
] |
z667zhan@uwaterloo.ca
|
85e8c8ce2254a08de1b00abc2d3dc1954d1d61fc
|
543ae8c2829892f2dc0383cf52478526acb3213f
|
/0301-0400/0301-0350/0318MaximumProductOfWordLengths/demo.py
|
9cf8e6aea3666b964973a2216a6f4a41f5ad6eb0
|
[] |
no_license
|
BLZbanme/leetcode
|
04795331acf4191f64555c472f8003756287bdcf
|
26ba5bdc254366ecf80dfabcbbc9320757e17c34
|
refs/heads/master
| 2022-02-07T12:06:11.633897
| 2022-01-29T12:13:03
| 2022-01-29T12:13:03
| 185,048,357
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
class Solution:
def maxProduct(self, words: List[str]) -> int:
maskMap = dict()
if not words or not len(words):
return 0
N = len(words)
valueList = [0] * N
aCode = ord('a')
for i in range(N):
word = words[i]
for s in word:
valueList[i] |= 1 << (ord(s) - aCode)
if (len(word) >= maskMap.get(valueList[i], 0)):
maskMap[valueList[i]] = len(word)
maxVal = 0
for mask1 in maskMap:
for mask2 in maskMap:
if (mask1 & mask2 == 0) and maskMap[mask1] * maskMap[mask2] > maxVal:
maxVal = maskMap[mask1] * maskMap[mask2]
return maxVal
|
[
"swordcome@gmail.com"
] |
swordcome@gmail.com
|
8635a03985fe50c6d62a963dd2a677a3d2382066
|
ee9d1da814af64064de49e777e6fe983f92e7621
|
/old/vdh_script.py
|
26da9294add192a677d965804ba76f9e1ecad294
|
[] |
no_license
|
nm96/MSc_project_code
|
8506fab47b291cffb4a9558da52a486a4dca290f
|
7b8b02b6b5dce0c09adf8e5ca2c3f464c4e6eff1
|
refs/heads/master
| 2022-12-14T20:34:53.432790
| 2020-09-13T15:06:31
| 2020-09-13T15:06:31
| 274,463,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
# Van der Heijden Script
# ----------------------
import numpy as np
from scipy.integrate import solve_ivp
from scipy.fft import rfft
import matplotlib.pyplot as plt
import time
from solutions import *
from models import *
t0 = time.time()
fn = 0 # Initialize figure number for plotting
# Rename basic functions and constants for clarity
cos = np.cos
sin = np.sin
tanh = np.tanh
pi = np.pi
# Define parameter values:
eps = 0.14 # Rotor eccentricity
Om = 3.1 # Driving frequency
m = 10 # Mass (per unit length)
c = 0.05 # Damping coefficient
k = 1 # Stiffness coefficient
h = 1 # Gap width
k_c = 50 # Stator stiffness parameter for VdH model
Om_nat = (k/m)**0.5 # Shaft natural frequency
Om_nat_c = (k_c/m)**0.5 # 'In-contact' natural frequency
# Define the model:
model = (VdH,(h,k_c))
params = (eps,Om,m,c,k,model)
# Integrate
tspan = (0,2**9)
N = tspan[1]*2**6
tt = np.linspace(*tspan,N)
X0 = [0.01,0,0,0]
sol = solve_ivp(dXdt,tspan,X0,t_eval=tt,args=params,method='Radau')
# Plot solution in stationary frame:
fn += 1; fig = plt.figure(fn); ax = fig.add_axes([.1,.1,.8,.8])
ax.plot(*solxy(sol))
ax.plot(h*cos(np.linspace(0,2*pi,1000)),h*sin(np.linspace(0,2*pi,1000)),c='r')
ax.set_aspect('equal')
ax.set_title("Solution trajectory in the stationary frame")
# Plot solution in corotating frame:
fn += 1; fig = plt.figure(fn); ax = fig.add_axes([.1,.1,.8,.8])
ax.plot(*rotsolxy(sol,Om))
ax.plot(h*cos(np.linspace(0,2*pi,1000)),h*sin(np.linspace(0,2*pi,1000)),c='r')
ax.set_aspect('equal')
ax.set_title("Solution trajectory in the co-rotating frame")
# Plot spectrum:
fn += 1; fig = plt.figure(fn); ax = fig.add_axes([.1,.1,.8,.8])
ax.axvline(Om_nat,ls='--',c='g')
ax.axvline(Om_nat_c,ls='--',c='b')
ax.axvline(Om,ls='--',c='r')
ax.plot(*transformed(sol),c='k')
ax.set_title("Log-fft spectrum for a solution with the Van der Heijden model")
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel("Log(fft(sol))")
ax.grid("on")
tf = time.time()
print("T = {:.2f}s".format(tf-t0))
plt.show()
|
[
"as19538@bristol.ac.uk"
] |
as19538@bristol.ac.uk
|
44a16f28b318d131dbeefaf200012cfa5e1bd8de
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/typeshed/tests/pytype_test.py
|
ee7ac0bb9cb9d9175b955f913e9188cc8bbc75a2
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 7,915
|
py
|
#!/usr/bin/env python3
"""Test runner for typeshed.
Depends on pytype being installed.
If pytype is installed:
1. For every pyi, do nothing if it is in pytype_blacklist.txt.
2. Otherwise, call 'pytype.io.parse_pyi'.
Option two will load the file and all the builtins, typeshed dependencies. This
will also discover incorrect usage of imported modules.
"""
import argparse
import itertools
import os
import re
import subprocess
import traceback
from typing import List, Match, Optional, Sequence, Tuple
from pytype import config as pytype_config, io as pytype_io
TYPESHED_SUBDIRS = ["stdlib", "third_party"]
TYPESHED_HOME = "TYPESHED_HOME"
UNSET = object() # marker for tracking the TYPESHED_HOME environment variable
def main() -> None:
args = create_parser().parse_args()
typeshed_location = args.typeshed_location or os.getcwd()
subdir_paths = [os.path.join(typeshed_location, d) for d in TYPESHED_SUBDIRS]
check_subdirs_discoverable(subdir_paths)
check_python_exes_runnable(python27_exe_arg=args.python27_exe, python36_exe_arg=args.python36_exe)
files_to_test = determine_files_to_test(typeshed_location=typeshed_location, subdir_paths=subdir_paths)
run_all_tests(
files_to_test=files_to_test,
typeshed_location=typeshed_location,
python27_exe=args.python27_exe,
python36_exe=args.python36_exe,
print_stderr=args.print_stderr,
dry_run=args.dry_run,
)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Pytype/typeshed tests.")
parser.add_argument("-n", "--dry-run", action="store_true", default=False, help="Don't actually run tests")
# Default to '' so that symlinking typeshed subdirs in cwd will work.
parser.add_argument("--typeshed-location", type=str, default="", help="Path to typeshed installation.")
# Set to true to print a stack trace every time an exception is thrown.
parser.add_argument(
"--print-stderr", action="store_true", default=False, help="Print stderr every time an error is encountered."
)
# We need to invoke python2.7 and 3.6.
parser.add_argument("--python27-exe", type=str, default="python2.7", help="Path to a python 2.7 interpreter.")
parser.add_argument("--python36-exe", type=str, default="python3.6", help="Path to a python 3.6 interpreter.")
return parser
class PathMatcher:
def __init__(self, patterns: Sequence[str]) -> None:
self.matcher = re.compile(r"({})$".format("|".join(patterns))) if patterns else None
def search(self, path: str) -> Optional[Match[str]]:
if not self.matcher:
return None
return self.matcher.search(path)
def load_blacklist(typeshed_location: str) -> List[str]:
filename = os.path.join(typeshed_location, "tests", "pytype_blacklist.txt")
skip_re = re.compile(r"^\s*([^\s#]+)\s*(?:#.*)?$")
skip = []
with open(filename) as f:
for line in f:
skip_match = skip_re.match(line)
if skip_match:
skip.append(skip_match.group(1))
return skip
def run_pytype(*, filename: str, python_version: str, python_exe: str, typeshed_location: str) -> Optional[str]:
"""Runs pytype, returning the stderr if any."""
options = pytype_config.Options.create(
filename,
module_name=_get_module_name(filename),
parse_pyi=True,
python_version=python_version,
python_exe=python_exe)
old_typeshed_home = os.environ.get(TYPESHED_HOME, UNSET)
os.environ[TYPESHED_HOME] = typeshed_location
try:
pytype_io.parse_pyi(options)
except Exception:
stderr = traceback.format_exc()
else:
stderr = None
if old_typeshed_home is UNSET:
del os.environ[TYPESHED_HOME]
else:
os.environ[TYPESHED_HOME] = old_typeshed_home
return stderr
def _get_relative(filename: str) -> str:
top = 0
for d in TYPESHED_SUBDIRS:
try:
top = filename.index(d)
except ValueError:
continue
else:
break
return filename[top:]
def _get_module_name(filename: str) -> str:
"""Converts a filename {subdir}/m.n/module/foo to module.foo."""
return ".".join(_get_relative(filename).split(os.path.sep)[2:]).replace(".pyi", "").replace(".__init__", "")
def can_run(exe: str, *, args: List[str]) -> bool:
try:
subprocess.run([exe] + args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except OSError:
return False
else:
return True
def _is_version(path: str, version: str) -> bool:
return any("{}{}{}".format(d, os.path.sep, version) in path for d in TYPESHED_SUBDIRS)
def check_subdirs_discoverable(subdir_paths: List[str]) -> None:
for p in subdir_paths:
if not os.path.isdir(p):
raise SystemExit("Cannot find typeshed subdir at {} (specify parent dir via --typeshed-location)".format(p))
def check_python_exes_runnable(*, python27_exe_arg: str, python36_exe_arg: str) -> None:
for exe, version_str in zip([python27_exe_arg, python36_exe_arg], ["27", "36"]):
if can_run(exe, args=["--version"]):
continue
formatted_version = ".".join(list(version_str))
script_arg = "--python{}-exe".format(version_str)
raise SystemExit(
"Cannot run Python {version}. (point to a valid executable via {arg})".format(
version=formatted_version, arg=script_arg
)
)
def determine_files_to_test(*, typeshed_location: str, subdir_paths: Sequence[str]) -> List[Tuple[str, int]]:
"""Determine all files to test, checking if it's in the blacklist and which Python versions to use.
Returns a list of pairs of the file path and Python version as an int."""
skipped = PathMatcher(load_blacklist(typeshed_location))
files = []
for root, _, filenames in itertools.chain.from_iterable(os.walk(p) for p in subdir_paths):
for f in sorted(f for f in filenames if f.endswith(".pyi")):
f = os.path.join(root, f)
rel = _get_relative(f)
if skipped.search(rel):
continue
if _is_version(f, "2and3"):
files.append((f, 2))
files.append((f, 3))
elif _is_version(f, "2"):
files.append((f, 2))
elif _is_version(f, "3"):
files.append((f, 3))
else:
print("Unrecognized path: {}".format(f))
return files
def run_all_tests(
*,
files_to_test: Sequence[Tuple[str, int]],
typeshed_location: str,
python27_exe: str,
python36_exe: str,
print_stderr: bool,
dry_run: bool
) -> None:
bad = []
errors = 0
total_tests = len(files_to_test)
print("Testing files with pytype...")
for i, (f, version) in enumerate(files_to_test):
stderr = (
run_pytype(
filename=f,
python_version="2.7" if version == 2 else "3.6",
python_exe=python27_exe if version == 2 else python36_exe,
typeshed_location=typeshed_location,
)
if not dry_run
else None
)
if stderr:
if print_stderr:
print(stderr)
errors += 1
stacktrace_final_line = stderr.rstrip().rsplit("\n", 1)[-1]
bad.append((_get_relative(f), stacktrace_final_line))
runs = i + 1
if runs % 25 == 0:
print(" {:3d}/{:d} with {:3d} errors".format(runs, total_tests, errors))
print("Ran pytype with {:d} pyis, got {:d} errors.".format(total_tests, errors))
for f, err in bad:
print("{}: {}".format(f, err))
if errors:
raise SystemExit("\nRun again with --print-stderr to get the full stacktrace.")
if __name__ == "__main__":
main()
|
[
"srusskih@users.noreply.github.com"
] |
srusskih@users.noreply.github.com
|
ae0680848978d297a36259c8c88173bd004503b1
|
327e45972e9c36433e9b17c377833b783793162b
|
/Drafts/ASASASAD.py
|
84a9c7e6806b1deddf01ff3db7755df7115cdacc
|
[] |
no_license
|
SashaPoraiko/GitTest
|
44ea5018078682f50b07fba95417830207b10958
|
8d183382169a0c88a8e0e6af3b1d5a7db204e07b
|
refs/heads/master
| 2020-06-02T14:59:06.749132
| 2019-09-19T14:53:43
| 2019-09-19T14:53:43
| 191,198,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
commands = ['end', 'write', 'read', 'calculate']
def calculate_things_semester(file_name):
semester = input('Enter the semester number: ')
disc_list = listed_disciplines(file_name)
total_hours = 0
for discipline in filter(lambda x: x[1] == semester, disc_list):
total_hours += int(discipline[2])
return total_hours
def listed_disciplines(file_name):
with open(file_name, 'r') as items:
return list(map(lambda row: row[:-1].split('\t'), items.readlines()))
def calculate_unique_lecturers(file_name):
disc_list = listed_disciplines(file_name)
lecturers_list = []
for discipline in filter(lambda x: x[4] not in lecturers_list, disc_list):
lecturers_list.append(discipline[4])
return lecturers_list
def read(file_name):
with open(file_name, 'r') as thing_list:
print('\n' + thing_list.read())
def write(file_name):
with open(file_name, 'a') as thing_list:
inc_thing = input('Enter the Thing: ')
if not validate(inc_thing.split('\t')):
print('Wrong data!')
return
thing_list.write(inc_thing + '\n')
def validate(data):
return len(data) == 5
def run():
file = 'Things.txt'
while True:
command = input('Enter the command: ')
if command == 'end':
break
if command not in commands:
print('Wrong command! ')
continue
if command == 'write':
write(file)
elif command == 'read':
read(file)
elif command == 'calculate':
print('The total hours for current semester is: {}'.format(calculate_things_semester(file)))
print('The unique lecturers list is: {}'.format(calculate_unique_lecturers(file)))
if __name__ == '__main__':
run()
|
[
"poraiko.alexandr@gmail.com"
] |
poraiko.alexandr@gmail.com
|
0082cc077df1735c971c44820d6af60113964d09
|
3f3aa41bbb5dac460318c214b2e06a9a4d384f93
|
/current_model_code/decisions.py
|
8db36e97485cc79c06f344e299a16f34f2a8aebc
|
[] |
no_license
|
quao627/ABM_py
|
f576e7cfc93961a2844e19b95b1a76278ef3bcd6
|
15cbf6cc8316b86a463c4c5caade208f54360c89
|
refs/heads/master
| 2023-03-18T13:17:05.913734
| 2021-03-15T21:53:11
| 2021-03-15T21:53:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Working definition of decision class for ABM
of environmental migration
@author: kelseabest
"""
#import packages
import random
import math
import numpy as np
import matplotlib.pyplot as plt
class decision :
#method decide returns True or False
#subclass of decisions
def __init__(self): #initialize outcome
self.outcome = False
def decide(self):
pass
class utility_max(decision):
def __init__(self): #initialize utilities
super().__init__()
def decide(self, household):
if household.total_utility < household.total_util_w_migrant:
self.outcome = True
class push_threshold(decision):
def __init__(self): #initialize utilities
super().__init__()
def decide(self, household):
if household.secure == False:
self.outcome = True
elif household.total_utility < household.total_util_w_migrant:
self.outcome = True
class tpb(decision):
def __init__(self): #initialize utilities
super().__init__()
def decide(self):
pass
class pmt(decision):
def __init__(self): #initialize utilities
super().__init__()
def decide(self):
pass
class mobility_potential(decision):
def __init__(self): #initialize utilities
super().__init__()
def decide(self):
pass
|
[
"kelsea.b.best@vanderbilt.edu"
] |
kelsea.b.best@vanderbilt.edu
|
0c78396cacf3dcb777ca52b8bb646c14114b8fd8
|
b323fe5968aea700322428ba6bd239b45bc88c00
|
/sohpen/website/migrations/0004_auto_20170518_0707.py
|
9cbdfebe44d099d22afdb59741aada8fb2fc3ec3
|
[] |
no_license
|
aakashres/sophen
|
a1862be0fe4aaac51a03f111c1943c1e44f517cb
|
d84b8e8640f10eef22a79b8afba3e226405f9e5d
|
refs/heads/master
| 2022-11-08T01:46:05.697691
| 2017-11-06T11:10:22
| 2017-11-06T11:10:22
| 273,651,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-18 07:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0003_auto_20170518_0544'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='website.Menu'),
),
]
|
[
"aakash.shres@gmail.com"
] |
aakash.shres@gmail.com
|
b794a12562f904bc4635f5b9ed68facb83139732
|
a7da58ad91b007b3650003708eb91928f1e3684a
|
/bt5/erp5_safeimage/DocumentTemplateItem/portal_components/document.erp5.TileImageTransformed.py
|
466787ccecd19c9b734940556b1aa542d84022ef
|
[] |
no_license
|
jgpjuniorj/j
|
042d1bd7710fa2830355d4312a6b76103e29639d
|
dc02bfa887ffab9841abebc3f5c16d874388cef5
|
refs/heads/master
| 2021-01-01T09:26:36.121339
| 2020-01-31T10:34:17
| 2020-02-07T04:39:18
| 239,214,398
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
from Products.ERP5.Document.Image import Image
from zLOG import LOG,INFO,ERROR,WARNING
class TileImageTransformed(Image):
"""
Tile Images split images in many small parts and then store informations as sub objects
"""
def _setFile(self, *args, **kw):
"""Set the file content and reset image information."""
if "TileGroup0" in self.objectIds():
self.manage_delObjects("TileGroup0")
if "ImageProperties.xml" in self.objectIds():
self.manage_delObjects("ImageProperties.xml")
self._update_image_info()
processor = self.Image_getERP5ZoomifyProcessor(self,True)
processor.ZoomifyProcess(self.getId(),*args)
|
[
"seb@nexedi.com"
] |
seb@nexedi.com
|
617d7e2db0c09938a95dc1544a51167769b0c787
|
a22464ad72a7e0cf346e599452b41840d2fec0fc
|
/CodeJam/2019/2019Qualification/testing_tool.py
|
f91474cf7f1dde11a33d4d6b0c294513f025d3f3
|
[] |
no_license
|
lzy960601/Google_Coding_Competitions
|
997ed05f611ee0f1947001d74860bb98adbdb11e
|
67e3235a7388c1dcda2e3b7589b91000aa3a2d5d
|
refs/heads/master
| 2022-10-11T07:06:06.325829
| 2022-10-04T16:22:04
| 2022-10-04T16:22:04
| 177,498,727
| 156
| 15
| null | 2020-10-18T17:14:53
| 2019-03-25T02:17:19
|
C++
|
UTF-8
|
Python
| false
| false
| 7,157
|
py
|
# Usage: `testing_tool.py test_number`, where the argument test_number
# is 0 for Test Set 1 or 1 for Test Set 2.
from __future__ import print_function
import random
import sys
import re
# Use raw_input in Python2.
try:
input = raw_input
except NameError:
pass
_ERROR_MSG_EXTRA_NEW_LINES = "Input has extra newline characters."
_ERROR_MSG_INCORRECT_ARG_NUM = "Answer has wrong number of tokens."
_ERROR_MSG_NOT_SORTED = "Worker IDs in answer must be sorted."
_ERROR_MSG_NOT_UNIQUE = "Worker IDs in answer must be distinct."
_ERROR_MSG_INVALID_TOKEN = "Input has invalid token."
_ERROR_MSG_OUT_OF_RANGE = "Input includes an out-of-range value."
_ERROR_MSG_READ_FAILURE = "Read for input fails."
_QUERY_LIMIT_EXCEEDED_MSG = "Query Limit Exceeded."
_WRONG_ANSWER_MSG = "Wrong Answer."
_ERROR_MSG_INTERNAL_FAILURE = ("The judge failed due to an internal error. "
"This should not happen, please raise an issue "
"to the Code Jam team.")
class Case:
def __init__(self, bad_set, N, F):
self.__bad_set = set(bad_set) # The set of broken computers
self.__N = N # The total number of computers
self.__max_num_tries = F # The number of allowed guesses
self.__raw_input = input
def _parse_contestant_query(self, bitstring):
"""Tries to parse a contestant's input as if it were a query bitstring.
Returns:
(string, string): The first argument is the bitstring, the second is
the error string in case of error.
If the parsing succeeds, the return value should be (str, None).
If the parsing fails, the return value should be (None, str).
"""
# Must be of length exactly N
if len(bitstring) != self.__N:
return (None, _ERROR_MSG_INVALID_TOKEN)
# Bitstring must contain only 0 and 1
if not all([x in '01' for x in bitstring]):
return (None, _ERROR_MSG_INVALID_TOKEN)
return (bitstring, None)
def _parse_contestant_answer(self, tokens, res):
"""Tries to parse a contestant's input as if it were answering a testcase.
Returns:
(list string): The first argument is the answer, the second is
the error string in case of error.
If the parsing succeeds, the return value should be (list, None).
If the parsing fails, the return value should be (None, str).
"""
if len(tokens) != len(self.__bad_set):
print(self.__N, file = sys.stderr)
print(self.__bad_set, file = sys.stderr)
return (None, _ERROR_MSG_INCORRECT_ARG_NUM)
try:
contestant_answer = list(map(int, tokens))
except Exception:
return (None, _ERROR_MSG_INVALID_TOKEN)
if sorted(contestant_answer) != contestant_answer:
return (None, _ERROR_MSG_NOT_SORTED)
if len(set(contestant_answer)) != len(contestant_answer):
return (None, _ERROR_MSG_NOT_UNIQUE)
for x in contestant_answer:
if (x < 0) or (x >= self.__N):
return (None, _ERROR_MSG_OUT_OF_RANGE)
return (contestant_answer, None)
def _parse_contestant_input(self, response):
"""Parses contestant's input.
Parse contestant's input which should be either a string of N bits or
a list of len(bad_set) space-separated integers.
Args:
response: (str or list) one-line of input given by the contestant.
Returns:
(int or list, string): The bitstring sent by the contestant if making
a query, or a list of ints if the contestant is answering the test case.
the second argument is an error string in case of error.
If the parsing succeeds, the return value should be (int or list, None).
If the parsing fails, the return value should be (None, str).
"""
if ("\n" in response) or ("\r" in response):
return None, _ERROR_MSG_EXTRA_NEW_LINES
if not re.match("^[\s0-9-]+$", response):
return None, _ERROR_MSG_INVALID_TOKEN
print('! : ' + response, file = sys.stderr)
tokens = response.split()
if len(tokens) == 1 and len(tokens[0]) == self.__N:
# If there is exactly one token and it has length N, it must be a query.
# A number with N digits has to be at least 10**N which is always > N,
# so there is no way for a valid answer to be mistaken as a query.
return self._parse_contestant_query(tokens[0])
else:
# It's not a query, so it must parse as an answer.
return self._parse_contestant_answer(tokens, response)
def _answer_query(self, bitstring):
answer = ""
for i in range(self.__N):
if i not in self.__bad_set:
answer += bitstring[i]
return answer
def Judge(self):
"""Judge one single case; should only be called once per test case.
Returns:
An error string, or None if the attempt was correct.
"""
print(self.__N, len(self.__bad_set), self.__max_num_tries)
sys.stdout.flush()
# +1 for the answer they have to give
for queries in range(self.__max_num_tries + 1):
try:
contestant_input = self.__raw_input()
except Exception:
return _ERROR_MSG_READ_FAILURE
contestant_input, err = self._parse_contestant_input(contestant_input)
if err is not None:
return err
if type(contestant_input) is str:
# Query
if queries == self.__max_num_tries:
# Too many queries
return _QUERY_LIMIT_EXCEEDED_MSG
else:
print(self._answer_query(contestant_input))
sys.stdout.flush()
else:
# Answer
assert(type(contestant_input) is list)
if set(contestant_input) == self.__bad_set:
# Testcase answered correctly
print(1)
sys.stdout.flush()
return None
else:
return _WRONG_ANSWER_MSG
return _QUERY_LIMIT_EXCEEDED_MSG
def getTestCases(test_number):
F = (10, 5)[test_number]
# You can edit or add your own test cases here.
cases = [Case([1, 2, 3], 4, F), Case([2, 3, 5], 6, F), Case([1000], 1024, F)]
return cases
def JudgeAllCases(test_number):
"""Sends input to contestant and judges contestant output.
In the case of any error (other than extra input after all testcases are
finished), -1 is printed to stdout.
Returns:
An error string, or None if the attempt was correct.
"""
try:
cases = getTestCases(test_number)
except Exception:
return _ERROR_MSG_INTERNAL_FAILURE
print(len(cases))
sys.stdout.flush()
for idx, case in enumerate(cases):
err = case.Judge()
if err is not None:
print(-1)
sys.stdout.flush()
return "Case #{} fails:\n{}".format(idx+1, err)
# Make sure nothing other than EOF is printed after all cases finish.
try:
response = input()
except EOFError:
return None
except:
return "Exception raised while reading input after all cases finish."
return "Additional input after all cases finish: {}".format(response[:1000])
def main():
random.seed(379009)
test_number = int(sys.argv[1])
if test_number != 1:
test_number = 0
result = JudgeAllCases(test_number)
if result is not None:
print(result, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"21952652+lzy960601@users.noreply.github.com"
] |
21952652+lzy960601@users.noreply.github.com
|
c2448e93ec7f3156bcafb8652128d8a28a335313
|
00da97c7d54ae6666ea202db7c7391ef5599e2c0
|
/guoyu/migrations/0002_auto_20180806_1646.py
|
25d402aa62d9e5fa97e40385de678b4705738073
|
[] |
no_license
|
Sean609/Learn-Chinas-
|
c986327479ab225fd768f3b43ee0765e5947df8a
|
d052a295d11a7ff1f324206f3ffc01a3454b9ac0
|
refs/heads/master
| 2022-12-10T03:47:28.346088
| 2018-10-04T12:37:11
| 2018-10-04T12:37:11
| 151,568,133
| 0
| 0
| null | 2022-11-22T02:52:50
| 2018-10-04T12:29:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-08-06 16:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guoyu', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tempinfo',
name='awaz',
field=models.FileField(default=0, upload_to='sitatic/audio', verbose_name='awaz'),
preserve_default=False,
),
migrations.AlterField(
model_name='tempinfo',
name='fanyi',
field=models.CharField(max_length=40, verbose_name='\u7ffb\u8bd1'),
),
migrations.AlterField(
model_name='tempinfo',
name='font',
field=models.CharField(max_length=20, verbose_name='\u62fc\u97f3'),
),
migrations.AlterField(
model_name='tempinfo',
name='juzi',
field=models.CharField(max_length=100, verbose_name='\u53e5\u5b50'),
),
]
|
[
"abliz@bluemouse.com.cn"
] |
abliz@bluemouse.com.cn
|
e9af0ba3018a1b4b710debbc65ec77f0108e0de3
|
9a8472865830b5f4fe39c69e4235da4b3272170f
|
/shower/migrations/0017_eye.py
|
d84381299080b6a4e4c85672dfd9ec0d6bd7e4cb
|
[] |
no_license
|
KimberleyLawrence/babybook
|
ed9b2fb599fda0e8567a556c2e18b9bbe28bf4b2
|
a03753822cafd49d0c247029e2fab4c9365890b4
|
refs/heads/master
| 2021-01-10T16:44:12.940468
| 2016-04-19T03:52:38
| 2016-04-19T03:52:38
| 51,638,144
| 1
| 1
| null | 2016-03-23T03:39:06
| 2016-02-13T08:23:21
|
Python
|
UTF-8
|
Python
| false
| false
| 828
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-02 06:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shower', '0016_auto_20160226_0557'),
]
operations = [
migrations.CreateModel(
name='Eye',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('guess', models.CharField(max_length=6)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"kimberleynedic@gmail.com"
] |
kimberleynedic@gmail.com
|
74587b67f842719cafc4fa195d739f55ed2fa651
|
7d75f44a9486b6e2bb0e58c72d22be436cc308af
|
/EventSelector/share/skimZtautau_r17.py
|
b31398f3d40146374158a5dfb51487f1b8a5eba0
|
[] |
no_license
|
zinon/RawData
|
f95dbef4c12eb9db73adbae8e94c031781bc3bef
|
38743ed4d52f532a03e53dfee774abfa49e82988
|
refs/heads/master
| 2021-01-19T20:18:15.971916
| 2015-02-25T11:30:18
| 2015-02-25T11:30:18
| 31,310,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,166
|
py
|
#!/usr/bin/env python
"""
Author: E. Feng (Chicago) <Eric.Feng@cern.ch>
Modified: B. Samset (UiO) <b.h.samset@fys.uio.no> for use with SUSYD3PDs
Modified: C. Young (Oxford) for use with a trigger
Modified: A. Larner (Oxford) & C.Young to filter on lepton Pt and for use with TauD3PDs to filter on lepton Pt
Modified: R. Reece (Penn) <ryan.reece@cern.ch> - added event counting histogram
Modified: J. Griffiths (UW-Seattle) griffith@cern.ch -- added duplicate event filtering
Usage:
./skim_D3PDs.py file1.root,file2.root,...
with pathena:
prun --exec "skim_D3PDs.py %IN" --athenaTag=15.6.9 --outputs susy.root --inDS myinDSname --outDS myoutDSname --nFilesPerJob=50
"""
from Skimmer import Skimmer
from TauSelector import TauSelector
from MuonSelector import MuonSelector
from ElectronSelector import ElectronSelector
from ZtautauSelector import ZtautauSelector
import optparse
####################################################
#
# Process Command Line Options
#
####################################################
usage = "usage: %prog files"
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
if not len(args):
parser.print_help()
raise(Exception, 'ERROR - must pass input files on command line')
files = []
for arg in args: files += arg.split(',')
#####################################################
#
# CONFIGURATION
#
#####################################################
## ----------------- Selectors ------------------ ##
# Tau Selector
tau_selector = TauSelector()
tau_selector.min_pt = 15000.
tau_selector.max_eta = 2.47
tau_selector.req_bdt_t = True
tau_selector.veto_loose_muon = True
# Muon Selector
muon_selector = MuonSelector()
muon_selector.min_pt = 20000.
muon_selector.max_eta = 2.4
muon_selector.req_tight = False
muon_selector.req_combined = True
muon_selector.max_z0 = 10.
muon_selector.req_trt_cleaning = True
muon_selector.min_BLHits = 1
muon_selector.min_PixHits = 2
muon_selector.min_SCTHits = 6
muon_selector.max_SCTHoles = 1
# Electron Selector
ele_selector = ElectronSelector()
ele_selector.min_pt = 15000.
ele_selector.max_eta = 2.47
ele_selector.excluded_eta_regions = [ [1.37, 1.52] ]
ele_selector.allowed_authors = [ 1, 3 ]
ele_selector.req_medium = False
ele_selector.req_tight = True
ele_selector.req_cleaning = True
# Ztautau Selector
ztautau_selector = ZtautauSelector()
ztautau_selector.tau_selector = tau_selector
ztautau_selector.muon_selector = muon_selector
ztautau_selector.ele_selector = ele_selector
ztautau_selector.channel = 2 # 1 - electron, 2 - muon
ztautau_selector.req_dilepton_veto = True
ztautau_selector.min_sum_cos_dphi = -0.15
ztautau_selector.max_trans_mass = 50.e3
ztautau_selector.tag_max_nucone40 = 0
ztautau_selector.tag_max_etcone20onpt = 0.04
ztautau_selector.req_os = True
## ------------ Skimming and Slimming ------------ ##
skimmer = Skimmer()
skimmer.selectors.append(ztautau_selector)
skimmer.switch_off_branches = [
'tau_cluster_*',
'jet_*',
'ph_*',
'mu_muid_*',
'trk_*',
]
skimmer.switch_on_branches = [
'jet_AntiKt4TopoEM_*'
]
skimmer.max_events = -1
skimmer.skim_hist_name = 'h_n_events'
skimmer.output_filename = 'tauskim.root'
skimmer.main_tree_name = 'tau'
skimmer.meta_tree_details = [['tauMeta','TrigConfTree']]
skimmer.input_files = files
skimmer.lumi_dir = 'Lumi'
skimmer.lumi_obj_name = 'tau'
skimmer.lumi_outfile_base = 'lumi'
skimmer.skim_hist = 'h_n_events'
#####################################################
#
# EXECUTE
#
#####################################################
skimmer.initialise()
skimmer.ch_new.Branch( 'TagIndex', ztautau_selector.tag_index, 'TagIndex/I' )
skimmer.ch_new.Branch( 'ProbeIndex', ztautau_selector.probe_index, 'ProbeIndex/I' )
skimmer.ch_new.Branch( 'VisMass', ztautau_selector.vis_mass, 'VisMass/D' )
skimmer.execute()
|
[
"zinon123@gmail.com"
] |
zinon123@gmail.com
|
75c8f6a38a04378c5ffdf3154ad83a7999aaaf00
|
ceb35ffff93e0dd0cc305af46f51bd8015f0849a
|
/book/algorithms/algorithms_one_liner_05.py
|
0ce50b263e196278a381be2f5eb8cdf91a423886
|
[] |
no_license
|
shoaibdipu/PythonOneLiners
|
76d35129641414acf60d08fb6fff81fe075dfeb5
|
7e216558c5c6c8009dbc51a39808237f451b185d
|
refs/heads/master
| 2023-08-04T12:50:00.030175
| 2021-09-22T19:44:32
| 2021-09-22T19:44:32
| 409,334,647
| 0
| 0
| null | 2021-09-22T19:41:46
| 2021-09-22T19:41:46
| null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Calculating the Powerset by Using Functional Programming
# Dependencies
from functools import reduce
# The Data
s = {1, 2, 3}
# The One-Liner
ps = lambda s: reduce(lambda P, x: P + [subset | {x} for subset in P], s, [set()])
# The Result
print(ps(s))
'''
[set(), {1}, {2}, {1, 2}, {3}, {1, 3}, {2, 3}, {1, 2, 3}]
'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
647e8796a012abf04250fb60a973bca50dd8a759
|
4b058d88820f1443f279569bb85288c54d35f67a
|
/src/services/hotel-v1-python/app/routes/hotels.py
|
6a8ca207ed324b057972521a8ca33cc065c7fc0c
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
1154046/instana-openshift
|
ccd658b000cdb563ec34fe924d14d54db93e8fc1
|
90a56dcffebf795fd0967f4e630527db55c75035
|
refs/heads/main
| 2023-09-02T17:44:03.219543
| 2021-11-21T19:43:14
| 2021-11-21T19:43:14
| 430,455,801
| 0
| 0
|
Apache-2.0
| 2021-11-21T19:05:11
| 2021-11-21T19:05:10
| null |
UTF-8
|
Python
| false
| false
| 4,798
|
py
|
from app.services import data_handler
from app.errors import tag_not_found, item_not_found
from app.jaeger import Jaeger
from flask import jsonify, request, Blueprint
from datetime import datetime
from pybreaker import CircuitBreaker
hotels_blueprint = Blueprint("hotels_blueprint", __name__)
info_breaker = CircuitBreaker(fail_max=5, reset_timeout=30)
id_breaker = CircuitBreaker(fail_max=5, reset_timeout=30)
breaker = CircuitBreaker(fail_max=5, reset_timeout=30)
context = Jaeger()
def string_to_array(string):
return string.split(",")
def get_query_param(key, query_data, func):
if key in query_data.keys():
if query_data[key] == "NaN":
return None
return func(query_data[key])
return None
@hotels_blueprint.route("/info/<tag>", methods=["GET"])
def filter_list(tag):
"""
/**
* GET /api/v1/hotels/info/{filter}
* @tag Hotel
* @summary Get filter list
* @description Gets list of a type to filter Hotel data by.
* @pathParam {FilterType} filter - The name of the filter to get options for.
* @response 200 - OK
* @response 400 - Filter Not Found Error
* @response 500 - Internal Server Error
*/
"""
context.start("info", request)
try:
data = info_breaker.call(data_handler.get_filter_list, tag, context)
status_code = 200
except tag_not_found.TagNotFoundException as e:
data = {"error": e.args[0]}
status_code = 400
except Exception as e:
data = {"error": e.args[0]}
status_code = 500
finally:
context.stop(status_code)
return jsonify(data), status_code
@hotels_blueprint.route("/<id>", methods=["GET"])
def get_id(id):
"""
/**
* GET /api/v1/hotels/{id}
* @tag Hotel
* @summary Get hotel by id
* @description Gets data associated with a specific Hotel ID.
* @pathParam {string} id - id of the Hotel
* @queryParam {string} dateFrom - Date From
* @queryParam {string} dateTo - Date To
* @response 200 - OK
* @response 404 - not found
* @response 500 - Internal Server Error
*/
"""
context.start("id", request)
try:
query_data = request.args
data = id_breaker.call(
data_handler.get_hotel_by_id,
id,
{
"date_from": get_query_param("dateFrom", query_data, parse_date),
"date_to": get_query_param("dateTo", query_data, parse_date),
},
context,
)
status_code = 200
except item_not_found.ItemNotFoundException:
data = {"error": "not found"}
status_code = 404
except Exception as e:
data = {"error": e.args[0]}
status_code = 500
finally:
context.stop(status_code)
return jsonify(data), status_code
@hotels_blueprint.route("/<country>/<city>", methods=["GET"])
def get_city(country, city):
"""
/**
* GET /api/v1/hotels/{country}/{city}
* @tag Hotel
* @summary Get list of hotels
* @description Gets data associated with a specific city.
* @pathParam {string} country - Country of the hotel using slug casing.
* @pathParam {string} city - City of the hotel using slug casing.
* @queryParam {string} dateFrom - Date From
* @queryParam {string} dateTo - Date To
* @queryParam {string} [superchain] - Hotel superchain name.
* @queryParam {string} [hotel] - Hotel Name.
* @queryParam {string} [type] - Hotel Type.
* @queryParam {number} [mincost] - Min Cost.
* @queryParam {number} [maxcost] - Max Cost.
* @response 200 - OK
* @response 500 - Internal Server Error
*/
"""
context.start("city", request)
try:
query_data = request.args
data = breaker.call(
data_handler.get_hotels,
country,
city,
{
"superchain": get_query_param(
"superchain", query_data, string_to_array
),
"hotel": get_query_param("hotel", query_data, string_to_array),
"type": get_query_param("type", query_data, string_to_array),
"min_cost": get_query_param("mincost", query_data, int),
"max_cost": get_query_param("maxcost", query_data, int),
"date_from": get_query_param("dateFrom", query_data, parse_date),
"date_to": get_query_param("dateTo", query_data, parse_date),
},
context,
)
status_code = 200
except Exception as e:
print(e)
data = {"error": "Error"}
status_code = 500
finally:
context.stop(status_code)
return jsonify(data), status_code
def parse_date(date):
return datetime.strptime(date, "%Y-%m-%d")
|
[
"magicmax33@gmail.com"
] |
magicmax33@gmail.com
|
35db2a331b65c35385546534a82e6d1381874fc8
|
5141e92d15c1493b85a45b7ba7b3723874345fe3
|
/repos/system_upgrade/el7toel8/actors/opensshpermitrootlogincheck/libraries/opensshpermitrootlogincheck.py
|
0cb90819c7cdcad2c62488ee74b8c52dbbabe8ce
|
[
"Apache-2.0"
] |
permissive
|
zdohnal/leapp-repository
|
ccb1cf734cd40413f7b2d3b3e6ae689563fd153e
|
8220b707c9b576712f1289d3aaa8767cadeeb788
|
refs/heads/master
| 2022-01-19T09:40:05.194100
| 2022-01-07T17:15:15
| 2022-01-07T17:15:15
| 183,820,028
| 0
| 1
|
Apache-2.0
| 2020-01-02T13:35:48
| 2019-04-27T20:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
def semantics_changes(config):
globally_enabled = False
in_match_disabled = False
for opt in config.permit_root_login:
if opt.value != "yes" and opt.in_match is not None \
and opt.in_match[0].lower() != 'all':
in_match_disabled = True
if opt.value == "yes" and (opt.in_match is None or
opt.in_match[0].lower() == 'all'):
globally_enabled = True
return not globally_enabled and in_match_disabled
|
[
"vsokol@redhat.com"
] |
vsokol@redhat.com
|
9e18e432aa9b22ce8b954341a277acd526ffeb4f
|
0f454697abe311b0398ce106cf896c7781417b41
|
/old/backup/script/simplenet.py
|
f2929c992d834d936a804d244987b0c7ddb3cd49
|
[] |
no_license
|
securitysystem/SecuritySystem
|
c4e51edbf156559ecad2a31908f78240818bd1ba
|
ee56a045972199dba9d07f748c81f86aa1bfcac5
|
refs/heads/master
| 2021-01-20T22:41:13.381400
| 2015-01-23T00:27:46
| 2015-01-23T00:27:46
| 29,707,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,659
|
py
|
#########################################################
#Code Author: OmniBean
#Go to:
#http://www.omnibean.96.lt/
#for more by OmniBean.
#########################################################
"""
The SimpleNet Library for Python 3 is a library written by OmniBean
that allows anyone to easily set up networking in Python using sockets.
Everything is predefined, check out the demos for more information.
Read the Documentation for more information and a tutorial.
"""
#SimpleNet by OmniBean
import os
import socket # Import socket module
s = socket.socket() # Create a socket object
client = 'Nx'
address = 'Nx'
waddress = 'Nx'
cport = 0
localhost = 'Nx'
def host(port, showdata=None, backlog=None):
global s,client, address, waddress, cport, localhost
cport = int(port)
mhost = host = socket.gethostname()
s = socket.socket() # Create a new socket object
s.bind((host,port))
#print('Port is ',cport)
if showdata != None:
print('Waiting for connection at '+str(host)+' on port '+str(port))
if backlog != None:
s.listen(backlog)
else:
s.listen(5)
client, address = s.accept()
waddress = address
if showdata != None:
print('Got Connection from: '+str(address)+' on port '+str(cport))
def connect(thost, port, showdata=None):
global s, client, address, waddress, cport, localhost
port = int(port)
cport = port
#print('Port is ',cport)
localhost = host = socket.gethostname()
s = socket.socket() # Create a new socket object
if showdata != None:
print('Waiting for connection at: '+str(thost)+' on port '+str(cport))
s.connect((thost, port))
waddress = thost
if showdata != None:
print('Connected to: '+str(waddress)+' on port '+str(cport))
def send(msg, showdata=None):
global s,client, address, waddress, cport, localhost
if showdata != None:
print('Target Address:',waddress)
print('Target Port:',cport)
#print()
#s.connect((waddress, cport))
data = str.encode(str(msg))
s.send(data)
if showdata != None:
print('Sent '+waddress+':',str(msg))
def receive(size=None, showdata=None):
global s,client, address, waddress, cport, localhost
if showdata != None:
print('Target Address:',waddress)
print('Target Port:',cport)
#s.connect((waddress, cport))
if size != None:
data = s.recv(size)
else:
data = s.recv(5000)
if showdata != None:
print('Received from '+address+':',bytes.decode(data))
return bytes.decode(data)
def server_send(msg, showdata=None):
global s, client, address, waddress, cport, localhost
if showdata != None:
print('Target Address:',waddress)
print('Target Port:',cport)
#print()
#s.connect(waddress, cport))
data = str.encode(str(msg))
client.send(data)
if showdata != None:
print('Sent '+waddress[0]+':',str(msg))
def server_receive(size=None, showdata=None):
global s,client, address, waddress, cport, localhost
if showdata != None:
print('Target Address:',waddress)
print('Target Port:',cport)
#s.connect((waddress, cport))
if size != None:
data = client.recv(size)
else:
data = client.recv(5000)
if showdata != None:
print('Received from '+address+':',bytes.decode(data))
return bytes.decode(data)
def close(showdata=None):
global s,client, address, waddress, cport, localhost
client.close()
if showdata != None:
print('Closed connection with '+addr+' on port '+cport)
|
[
"pwnetwork.pwteam@outlook.com"
] |
pwnetwork.pwteam@outlook.com
|
de8015a0be9d5dab9f49de71d3750d89eff1ad29
|
c839a6780637cc2fe7f22bde1fab50d848adfd92
|
/NavdharaTech/asgi.py
|
35aab73c5da90968f3dd47c40ff0b75eae09b66f
|
[] |
no_license
|
Ganesh-Thorat-01/NavdharaTech
|
3412ee415d4aa385f7f64471212f0fd5cce13357
|
b303a2e67b24d5dc30f7beb461fe5a3106a1a5a5
|
refs/heads/main
| 2023-03-16T14:58:58.305297
| 2021-03-16T18:10:43
| 2021-03-16T18:10:43
| 314,438,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
ASGI config for NavdharaTech project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NavdharaTech.settings')
application = get_asgi_application()
|
[
"thorat.ganeshscoe@gmail.com"
] |
thorat.ganeshscoe@gmail.com
|
df63bca0d301825b33a9357cf855adc7db3c3879
|
eaad54d45fbffe978479ee4bca15ec5457fa5f51
|
/scraping/bin/pylint
|
4af42c18e077012d0f10d2536b0f7505258c4742
|
[] |
no_license
|
able-leopard/recipe_scraping
|
8fc6eb0d53c2f5ea53824b9bb30dde7d29279517
|
ddf6cfbad9cc46f38dc8e8d6d11cb84f6b01e365
|
refs/heads/master
| 2021-06-27T15:11:17.665357
| 2019-12-03T16:00:11
| 2019-12-03T16:00:11
| 224,886,436
| 0
| 0
| null | 2021-06-02T00:43:28
| 2019-11-29T16:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
#!/home/hzren/Environment_Scraping/scraping/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
|
[
"alexren268@gmail.com"
] |
alexren268@gmail.com
|
|
d1ef866323ca68278fc0640ad5012b626ffbcc97
|
0e0cc706ec416a8488742b31079bb9ce5291ca2a
|
/texttemplates/selectivediff.q.py
|
f846c008c5ed2fc416730c7ed0c2969aaf356eda
|
[] |
no_license
|
serviceprototypinglab/fipe
|
444473a0c2a04f38c8deca455e8f32b80eac3c22
|
3f22115def870c8852cbba66a918178c274b18cd
|
refs/heads/master
| 2023-08-10T21:17:56.637654
| 2021-09-28T12:20:43
| 2021-09-28T12:20:43
| 344,916,608
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
"""
Das nachfolgende Skript führt eine mathematische Berechnung gemäss einer
spezifischen Formel durch. Die Formel muss nicht weiter verstanden werden.
Vervollständigen Sie das Skript an den kommentierten Stellen, so dass
es lauffähig wird und schliesslich das Resultat ausgibt.
Etwas 'Tüfteln' und aufmerksames Lesen von Python-Fehlermeldungen
ist dabei hilfreich.
(20 Punkte - je 2 Punkte pro korrekter Vervollständigung)
"""
import math
# Vervollständigung V1: Modul für numerisches Rechnen importieren (2P)
fixpoint_array = [
1.0036784723520398,
1.0046843389945894,
1.0042146074100489,
0.9976506126739353,
1.004628260985105,
0.99796357703{VARY:448}79,
1.0043510357919885,
0.9951575774279858,
1.0026486757629631,
1.0011646109142465
]
# Vervollständigung V2: Schlüsselwort für Klassen (2P)
ConvergenceCalculation:
# Vervollständigung V3: Parameter für Konstruktormethode (2P)
def __init__:
self.const_a = 1.492582
self.const_b = 2994.13952094
self.const_c = 999324.24942325669
self.const_d = math.e + math.sin(self.const_a) - 0.23599352
# Vervollständigung V4: Methodendefinition siehe Aufrufe unten (2P)
def :
self.array = np.array(fixpoint_array)
def calculate(self):
x = self.const_a * self.const_b + self.const_b
y = self.const_a * self.const_d + self.const_b
# Vervollständigung V5: Ergebnisattribut (2P)
self. = (x + y) * np.mean(self.array)
def validate(self):
# Vervollständigungen V6/V7: mit Assertions prüfen, ob Ergebnis im gültigen Bereich liegt (2 x 2P)
self.result > 10000
self.result < 11000
def output(self):
# Vervollständigung V8: Ergebnis auf Standardausgabe schreiben (2P)
(round(self.result))
if __name__ == "__main__":
# Vervollständigung V9: Objekt instanziieren (2P)
c = ()
c.precalculate()
c.calculate()
c.validate()
# Vervollständigung V10: Ergebnisausgabe (2P)
|
[
"spio@tougener2"
] |
spio@tougener2
|
5493039aa43679a7833c4d327c208b17172a747b
|
15a530b8fd2fbf34ff00e292958128b62d292d18
|
/tsp/population_search/acs/__init__.py
|
4fa1e000a157a9f43bd7cceacaa828e544cfde7a
|
[] |
no_license
|
mikitachab/pea
|
2dfb9cddc642eff64d9647020262473143dd4125
|
46aab99445b0cd4fbefdbe42d19eb502654e820b
|
refs/heads/master
| 2021-07-16T12:51:29.252980
| 2020-05-16T15:52:42
| 2020-05-16T15:52:42
| 155,883,320
| 0
| 0
| null | 2019-07-21T14:14:06
| 2018-11-02T15:13:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 64
|
py
|
from .acs import AntColonySystem
__all__ = ['AntColonySystem']
|
[
"mikita.chabatarovich@gmail.com"
] |
mikita.chabatarovich@gmail.com
|
2a4411ff798f42c21fba6e8035ff401a33c2dbe3
|
9555a30ff8e19c48c51a6eaa9cbc6ca8ed629391
|
/Algorithms/Implementations/Encription.py
|
8d450f79cdfb108a46d840f43763609feb1931d4
|
[] |
no_license
|
Mister-eX/Hackerrank-solutions
|
f3636d35c35cd22b3b4102b7650add1ca3a2a078
|
d786b3043de04efada296bad7e4424eb897ced72
|
refs/heads/master
| 2022-12-21T12:37:55.462937
| 2020-09-28T14:15:04
| 2020-09-28T14:15:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import math
from collections import deque
def encryption(s):
n= len(s)
x = math.sqrt(n)
r = math.floor(x)
c = math.ceil(x)
if r * c < n:
r += 1
for i in range(c):
j = i
while j < n:
string = string + s[j]
j += c
string = string + ' '
return string
|
[
"sapanravidas@gmail.com"
] |
sapanravidas@gmail.com
|
1d4da51a32831508e08ce8b958a79530db4fe284
|
6d65bac3ee30bd32aaa1d143b878556bfe7d7bb1
|
/Cloud Data Warehouses/Data Warehouse Project/sql_queries.py
|
94bf5d85a4d7a611ad257d6af9ce0d0531c04035
|
[] |
no_license
|
iopkelvin/Data_Engineer_Nanodegree
|
d870d552a8f9e7057dbf8a6406d8c85b43a59700
|
edc631f7e29c57ff7a477700e91f5832bf97db88
|
refs/heads/master
| 2022-07-03T18:26:27.039059
| 2020-05-16T09:39:53
| 2020-05-16T09:39:53
| 260,390,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,008
|
py
|
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs"
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# stating_songs = "DROP TABLE IF EXISTS stating_songs"
# CREATE TABLES
staging_events_table_create= ("""
CREATE TABLE staging_events (
artist VARCHAR,
auth VARCHAR,
firstName VARCHAR,
gender VARCHAR,
itemInSession INTEGER,
lastName VARCHAR,
length FLOAT,
level VARCHAR,
location VARCHAR,
method VARCHAR,
page VARCHAR,
registration FLOAT,
sessionId INTEGER,
song VARCHAR,
status INTEGER,
ts TIMESTAMP,
userAgent VARCHAR,
userId INTEGER
);
""")
staging_songs_table_create = ("""
CREATE TABLE staging_songs(
num_songs INTEGER,
artist_id VARCHAR,
artist_latitude FLOAT,
artist_longitude FLOAT,
artist_location VARCHAR,
artist_name VARCHAR,
song_id VARCHAR,
title VARCHAR,
duration FLOAT,
year INTEGER
);
""")
songplay_table_create = ("""
CREATE TABLE songplays(
songplay_id INTEGER IDENTITY(0,1) PRIMARY KEY,
start_time TIMESTAMP NOT NULL SORTKEY,
user_id INTEGER NOT NULL,
level VARCHAR,
song_id VARCHAR NOT NULL,
artist_id VARCHAR NOT NULL,
session_id INTEGER NOT NULL,
location VARCHAR,
user_agent VARCHAR
);
""")
user_table_create = ("""
CREATE TABLE users(
user_id INTEGER SORTKEY PRIMARY KEY,
first_name VARCHAR NOT NULL,
last_name VARCHAR,
gender VARCHAR,
level VARCHAR
);
""")
song_table_create = ("""
CREATE TABLE songs(
song_id VARCHAR SORTKEY PRIMARY KEY,
title VARCHAR NOT NULL,
artist_id VARCHAR NOT NULL,
year INTEGER,
duration FLOAT
);
""")
artist_table_create = ("""
CREATE TABLE artists(
artist_id VARCHAR SORTKEY PRIMARY KEY,
name VARCHAR NOT NULL,
location VARCHAR,
latitude FLOAT,
longitude FLOAT
);
""")
time_table_create = ("""
CREATE TABLE time(
start_time TIMESTAMP SORTKEY PRIMARY KEY,
hour INTEGER,
day INTEGER,
week INTEGER,
month INTEGER,
year INTEGER,
weekday VARCHAR
);
""")
# STAGING TABLES
staging_events_copy = ("""
copy staging_events from {data_bucket}
credentials 'aws_iam_role={role_arn}'
region 'us-west-2' format as JSON {log_json_path}
timeformat as 'epochmillisecs';
""").format(data_bucket=config['S3']['LOG_DATA'], role_arn=config['IAM_ROLE']['ARN'], log_json_path=config['S3']['LOG_JSONPATH'])
staging_songs_copy = ("""
copy staging_songs from {data_bucket}
credentials 'aws_iam_role={role_arn}'
region 'us-west-2' format as JSON 'auto';
""").format(data_bucket=config['S3']['SONG_DATA'], role_arn=config['IAM_ROLE']['ARN'])
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO songplays (start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)
SELECT DISTINCT(se.ts) AS start_time,
se.userId AS user_id,
se.level AS level,
ss.song_id AS song_id,
ss.artist_id AS artist_id,
se.sessionId AS session_id,
se.location AS location,
se.userAgent AS user_agent
FROM staging_events se
JOIN staging_songs ss ON se.song=ss.title AND se.artist=ss.artist_name
WHERE se.page = 'NextSong';
""")
user_table_insert = ("""
INSERT INTO users(user_id, first_name, last_name, gender, level)
SELECT DISTINCT(userId) AS user_id,
firstName AS first_name,
lastName AS last_name,
gender,
level
FROM staging_events
WHERE user_id IS NOT NULL
AND page = 'NextSong';
""")
song_table_insert = ("""
INSERT INTO songs (song_id, title, artist_id, year, duration)
SELECT DISTINCT(song_id) AS song_id,
title,
artist_id,
year,
duration
FROM staging_songs
WHERE song_id IS NOT NULL;
""")
artist_table_insert = ("""
INSERT INTO artists (artist_id, name, location, latitude, longitude)
SELECT DISTINCT(artist_id) AS artist_id,
artist_name AS name,
artist_location AS location,
artist_latitude AS latitude,
artist_longitude AS longitude
FROM staging_songs
WHERE artist_id IS NOT NULL;
""")
time_table_insert = ("""
INSERT INTO time (start_time, hour, day, week, month, year, weekday)
SELECT DISTINCT(start_time) AS start_time,
EXTRACT(hour FROM start_time) AS hour,
EXTRACT(day FROM start_time) AS day,
EXTRACT(week FROM start_time) AS week,
EXTRACT(month FROM start_time) AS month,
EXTRACT(year FROM start_time) AS year,
EXTRACT(dayofweek FROM start_time) AS weekday
FROM songplays;
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
|
[
"iopkelvin@gmail.com"
] |
iopkelvin@gmail.com
|
8850d3ec8a458b5fc000f3faba63ac1042c98579
|
d1e96475c745ca11915593e92311404b7666f6a2
|
/simpleapi/database/crud.py
|
2d44722d728e54e91bd6d55b2aa7f8b4d5968518
|
[] |
no_license
|
noFrostoo/simpleApi
|
7b6f611198dddae374f151dd13445be05b330c7d
|
48ee19e5cc132a993249fad5c045676f03dce0fe
|
refs/heads/main
| 2023-04-26T01:48:12.551267
| 2021-05-21T14:36:31
| 2021-05-21T14:36:31
| 368,916,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,098
|
py
|
from datetime import datetime, timezone
from typing import List, Optional
import uuid
from sqlalchemy.orm import Session
from simpleapi.api import schemas
from . import models
def create_message(db: Session, message: schemas.MessageBase) -> models.Message:
new_message = models.Message(**message.dict())
db.add(new_message)
db.commit()
db.refresh(new_message)
return new_message
def edit_message(db: Session, msg: schemas.Message, id: int) -> models.Message:
message = models.Message()
message.id = id
message.views_count = 0
message.content = msg.content
rows = db.query(models.Message).filter(models.Message.id == id).update(message.dict())
db.commit()
return message
def delete_message(db: Session, id: int) -> bool:
rows = db.query(models.Message).filter(models.Message.id == id).delete()
db.commit()
return rows != 0
def view_message(db: Session, id: int) -> models.Message:
rows = db.query(models.Message).filter(models.Message.id == id).all()
if rows != 0:
pass # raise expecption
message = rows[0]
message.views_count += 1
db.query(models.Message).filter(models.Message.id == id).update(message.dict())
db.commit()
return message
def get_message_by_id(db: Session, id: int) -> models.Message:
rows = db.query(models.Message).filter(models.Message.id == id).all()
if rows != 0:
raise NoMessageOfThisId
return rows[0]
def get_user(db: Session, username: str) -> models.User:
rows = db.query(models.User).filter(models.User.username == username).all()
if len(rows) == 0:
return None
return rows[0]
def add_user(db: Session, username: str, password: str) -> models.User:
new_user = models.User()
new_user.username = username
new_user.password = password
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
def check_msg_exists(db: Session, id: int) -> bool:
rows = db.query(models.Message).filter(models.Message.id == id).all()
return len(rows) != 0
class NoMessageOfThisId(Exception):
pass
|
[
"nofrost1a@gmail.com"
] |
nofrost1a@gmail.com
|
c9f286b5ec419b0973157fc14f2d484d6c5346d3
|
e8d3b04a19ba1b6373877068c3200e91f5142932
|
/lastfour_main/main.py
|
1cfe3e63008b494f2f398b341fbc8fe73958ea72
|
[] |
no_license
|
kpavankumar623/cricket-project
|
46005ebc7586979deef4d9071441c13b726b095a
|
d83ed4400080d1bf8f40f086cd88d5e71957e3ba
|
refs/heads/master
| 2020-07-16T14:15:03.475153
| 2019-09-20T13:08:06
| 2019-09-20T13:08:06
| 205,804,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
import sys
sys.path.append('..')
from player import Player
from innings import Inning
from match_rules import MatchRules
import helper
player1 = Player("Kirat Boli", prob=[5, 30, 25, 10, 15, 1, 9, 5])
player2 = Player("N.S Nodi", prob=[10, 40, 20, 5, 10, 1, 4, 10])
player3 = Player("R Rumrah", prob=[20, 30, 15, 5, 5, 1, 4, 20])
player4 = Player("Shashi Henra", prob=[30, 25, 5, 0, 5, 1, 4, 30])
team ="Lengaburu"
BATEMEN = {team: []}
for player in Player.player:
BATEMEN[player.team].append(player)
inn = Inning()
match = MatchRules(40,3,4)
BALLS_IN_OVER = 6
RUN_ODD = (1, 3, 5)
striker = BATEMEN[team][0]
non_striker = BATEMEN[team][1]
print('\n{} overs left, {} runs to win\n'.format(match.OVERS_MAX - inn.overs, match.WIN_SCORE - inn.score))
while match.stop_condition(inn.score,inn.overs,inn.wickets) != True:
try:
run_scored = striker.rand_score()
if run_scored != 'OUT':
striker.increase_score(run_scored)
inn.increase_score(run_scored,striker)
else:
striker.player_out()
inn.increase_wicket(striker)
striker = BATEMEN[team][inn.wickets + 1] #index error if wickets reached max
if inn.balls == BALLS_IN_OVER:
inn.increase_over(match.OVERS_MAX,match.WIN_SCORE)
if inn.balls == BALLS_IN_OVER or run_scored in RUN_ODD:
striker , non_striker = non_striker, striker
except IndexError:
break
except ValueError:
print("Player Probability of Scoring runs not clear")
print('\nLengaburu {}-{}({}.{})\n'.format(inn.score,inn.wickets,inn.overs,inn.balls))
helper.cal_winners(inn,match)
match.personal_scores(BATEMEN)
|
[
"kpavankumar623@hotmail.com"
] |
kpavankumar623@hotmail.com
|
582d8df350455a0ac4ead2662303513df51bc4e8
|
9dfc5bf5d286c7b7f13ce4c17a8def1eb829d2b4
|
/engine/fut/engine/fut_strategyAberration_1.py
|
148503356e2de5a64ce79e517a46464f60ac4fbf
|
[] |
no_license
|
chenzhenhu-yeah/nature
|
a463058fb4cc600fbcbd6a41edb7df485008aad6
|
368f52181f1ac7c0c8b06623c15faf77b7fc5e36
|
refs/heads/master
| 2021-06-24T10:20:03.796435
| 2021-01-16T06:40:31
| 2021-01-16T06:40:31
| 193,628,719
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,747
|
py
|
# encoding: UTF-8
import os
import pandas as pd
from csv import DictReader
from collections import OrderedDict, defaultdict
from nature import to_log, get_dss, get_contract
from nature import DIRECTION_LONG,DIRECTION_SHORT,OFFSET_OPEN,OFFSET_CLOSE,OFFSET_CLOSETODAY,OFFSET_CLOSEYESTERDAY
from nature import ArrayManager, Signal, Portfolio, TradeData, SignalResult
########################################################################
class Fut_AberrationSignal_Duo(Signal):
#----------------------------------------------------------------------
def __init__(self, portfolio, vtSymbol):
self.type = 'duo'
# 策略参数
self.bollWindow = 80 # 布林通道窗口数
self.bollDev = 2 # 布林通道的偏差
self.fixedSize = 1 # 每次交易的数量
self.initBars = 90 # 初始化数据所用的天数
self.minx = 'min5'
# 策略临时变量
self.bollUp = 0 # 布林通道上轨
self.bollDown = 0 # 布林通道下轨
# 需要持久化保存的变量
self.stop = 0 # 多头止损
Signal.__init__(self, portfolio, vtSymbol)
#----------------------------------------------------------------------
def load_param(self):
filename = get_dss() + 'fut/cfg/signal_aberration_'+self.type+'_param.csv'
if os.path.exists(filename):
df = pd.read_csv(filename)
df = df[ df.pz == get_contract(self.vtSymbol).pz ]
if len(df) > 0:
rec = df.iloc[0,:]
self.bollWindow = rec.bollWindow
self.bollDev = rec.bollDev
print('成功加载策略参数', self.bollWindow, self.bollDev)
#----------------------------------------------------------------------
def set_param(self, param_dict):
if 'bollWindow' in param_dict:
self.bollWindow = param_dict['bollWindow']
print('成功设置策略参数 self.bollWindow: ',self.bollWindow)
if 'bollDev' in param_dict:
self.bollDev = param_dict['bollDev']
print('成功设置策略参数 self.bollDev: ',self.bollDev)
#----------------------------------------------------------------------
def onBar(self, bar, minx='min5'):
"""新推送过来一个bar,进行处理"""
self.bar = bar
if minx == 'min1':
self.on_bar_min1(bar)
else:
self.on_bar_minx(bar)
# r = [[minx,bar.date,bar.time,bar.open,bar.close]]
# df = pd.DataFrame(r)
# filename = get_dss() + 'fut/check/bar_' + self.vtSymbol + '.csv'
# df.to_csv(filename, index=False, mode='a', header=False)
def on_bar_min1(self, bar):
pass
def on_bar_minx(self, bar):
self.am.updateBar(bar)
if not self.am.inited:
return
#print('here')
self.calculateIndicator() # 计算指标
self.generateSignal(bar) # 触发信号,产生交易指令
#----------------------------------------------------------------------
def calculateIndicator(self):
"""计算技术指标"""
self.bollUp, self.bollDown = self.am.boll(self.bollWindow, self.bollDev)
self.stop = (self.bollUp + self.bollDown)/2
#----------------------------------------------------------------------
def generateSignal(self, bar):
# 当前无仓位
if self.unit == 0:
if bar.close > self.bollUp:
self.buy(bar.close, self.fixedSize)
# 持有多头仓位
elif self.unit > 0:
if bar.close < self.stop:
self.sell(bar.close, abs(self.unit))
#----------------------------------------------------------------------
def load_var(self):
filename = get_dss() + 'fut/check/signal_aberration_'+self.type+'_var.csv'
if os.path.exists(filename):
df = pd.read_csv(filename)
df = df[df.vtSymbol == self.vtSymbol]
if len(df) > 0:
rec = df.iloc[-1,:] # 取最近日期的记录
self.unit = rec.unit
if rec.has_result == 1:
self.result = SignalResult()
self.result.unit = rec.result_unit
self.result.entry = rec.result_entry
self.result.exit = rec.result_exit
self.result.pnl = rec.result_pnl
#----------------------------------------------------------------------
def save_var(self):
r = []
if self.result is None:
r = [ [self.portfolio.result.date,self.vtSymbol, self.unit, \
0, 0, 0, 0, 0 ] ]
else:
r = [ [self.portfolio.result.date,self.vtSymbol, self.unit, \
1, self.result.unit, self.result.entry, self.result.exit, self.result.pnl ] ]
df = pd.DataFrame(r, columns=['datetime','vtSymbol','unit', \
'has_result','result_unit','result_entry','result_exit', 'result_pnl'])
filename = get_dss() + 'fut/check/signal_aberration_'+self.type+'_var.csv'
df.to_csv(filename, index=False, mode='a', header=False)
#----------------------------------------------------------------------
def open(self, price, change):
"""开仓"""
self.unit += change
if not self.result:
self.result = SignalResult()
self.result.open(price, change)
r = [ [self.bar.date+' '+self.bar.time, '多' if change>0 else '空', '开', \
abs(change), price, 0 ] ]
df = pd.DataFrame(r, columns=['datetime','direction','offset','volume','price','pnl' ])
filename = get_dss() + 'fut/deal/signal_aberration_'+self.type+'_' + self.vtSymbol + '.csv'
df.to_csv(filename, index=False, mode='a', header=False)
#----------------------------------------------------------------------
def close(self, price):
"""平仓"""
self.unit = 0
self.result.close(price)
r = [ [self.bar.date+' '+self.bar.time, '', '平', \
0, price, self.result.pnl ] ]
df = pd.DataFrame(r, columns=['datetime','direction','offset','volume','price','pnl' ])
filename = get_dss() + 'fut/deal/signal_aberration_'+self.type+'_' + self.vtSymbol + '.csv'
df.to_csv(filename, index=False, mode='a', header=False)
self.result = None
########################################################################
class Fut_AberrationSignal_Kong(Signal):
#----------------------------------------------------------------------
def __init__(self, portfolio, vtSymbol):
self.type = 'kong'
# 策略参数
self.bollWindow = 80 # 布林通道窗口数
self.bollDev = 2 # 布林通道的偏差
self.fixedSize = 1 # 每次交易的数量
self.initBars = 90 # 初始化数据所用的天数
self.minx = 'min5'
# 策略临时变量
self.bollUp = 0 # 布林通道上轨
self.bollDown = 0 # 布林通道下轨
# 需要持久化保存的变量
self.stop = 0 # 多头止损
Signal.__init__(self, portfolio, vtSymbol)
#----------------------------------------------------------------------
def load_param(self):
filename = get_dss() + 'fut/cfg/signal_aberration_'+self.type+'_param.csv'
if os.path.exists(filename):
df = pd.read_csv(filename)
df = df[ df.pz == get_contract(self.vtSymbol).pz ]
if len(df) > 0:
rec = df.iloc[0,:]
self.bollWindow = rec.bollWindow
self.bollDev = rec.bollDev
print('成功加载策略参数', self.bollWindow, self.bollDev)
#----------------------------------------------------------------------
def set_param(self, param_dict):
if 'bollWindow' in param_dict:
self.bollWindow = param_dict['bollWindow']
print('成功设置策略参数 self.bollWindow: ',self.bollWindow)
if 'bollDev' in param_dict:
self.bollDev = param_dict['bollDev']
print('成功设置策略参数 self.bollDev: ',self.bollDev)
#----------------------------------------------------------------------
def onBar(self, bar, minx='min5'):
"""新推送过来一个bar,进行处理"""
self.bar = bar
if minx == 'min1':
self.on_bar_min1(bar)
else:
self.on_bar_minx(bar)
# r = [[minx,bar.date,bar.time,bar.open,bar.close]]
# df = pd.DataFrame(r)
# filename = get_dss() + 'fut/check/bar_' + self.vtSymbol + '.csv'
# df.to_csv(filename, index=False, mode='a', header=False)
def on_bar_min1(self, bar):
pass
def on_bar_minx(self, bar):
self.am.updateBar(bar)
if not self.am.inited:
return
#print('here')
self.calculateIndicator() # 计算指标
self.generateSignal(bar) # 触发信号,产生交易指令
#----------------------------------------------------------------------
def calculateIndicator(self):
"""计算技术指标"""
self.bollUp, self.bollDown = self.am.boll(self.bollWindow, self.bollDev)
self.stop = (self.bollUp + self.bollDown)/2
#----------------------------------------------------------------------
def generateSignal(self, bar):
# 当前无仓位
if self.unit == 0:
if bar.close < self.bollDown:
self.short(bar.close, self.fixedSize)
# 持有空头仓位
elif self.unit < 0:
if bar.close > self.stop:
self.cover(bar.close, abs(self.unit))
#----------------------------------------------------------------------
def load_var(self):
filename = get_dss() + 'fut/check/signal_aberration_'+self.type+'_var.csv'
if os.path.exists(filename):
df = pd.read_csv(filename)
df = df[df.vtSymbol == self.vtSymbol]
if len(df) > 0:
rec = df.iloc[-1,:] # 取最近日期的记录
self.unit = rec.unit
if rec.has_result == 1:
self.result = SignalResult()
self.result.unit = rec.result_unit
self.result.entry = rec.result_entry
self.result.exit = rec.result_exit
self.result.pnl = rec.result_pnl
#----------------------------------------------------------------------
def save_var(self):
r = []
if self.result is None:
r = [ [self.portfolio.result.date,self.vtSymbol, self.unit, \
0, 0, 0, 0, 0 ] ]
else:
r = [ [self.portfolio.result.date,self.vtSymbol, self.unit, \
1, self.result.unit, self.result.entry, self.result.exit, self.result.pnl ] ]
df = pd.DataFrame(r, columns=['datetime','vtSymbol','unit', \
'has_result','result_unit','result_entry','result_exit', 'result_pnl'])
filename = get_dss() + 'fut/check/signal_aberration_'+self.type+'_var.csv'
df.to_csv(filename, index=False, mode='a', header=False)
#----------------------------------------------------------------------
def open(self, price, change):
"""开仓"""
self.unit += change
if not self.result:
self.result = SignalResult()
self.result.open(price, change)
r = [ [self.bar.date+' '+self.bar.time, '多' if change>0 else '空', '开', \
abs(change), price, 0 ] ]
df = pd.DataFrame(r, columns=['datetime','direction','offset','volume','price','pnl' ])
filename = get_dss() + 'fut/deal/signal_aberration_'+self.type+'_' + self.vtSymbol + '.csv'
df.to_csv(filename, index=False, mode='a', header=False)
#----------------------------------------------------------------------
def close(self, price):
"""平仓"""
self.unit = 0
self.result.close(price)
r = [ [self.bar.date+' '+self.bar.time, '', '平', \
0, price, self.result.pnl ] ]
df = pd.DataFrame(r, columns=['datetime','direction','offset','volume','price','pnl' ])
filename = get_dss() + 'fut/deal/signal_aberration_'+self.type+'_' + self.vtSymbol + '.csv'
df.to_csv(filename, index=False, mode='a', header=False)
self.result = None
########################################################################
class Fut_AberrationPortfolio(Portfolio):
#----------------------------------------------------------------------
def __init__(self, engine, symbol_list, signal_param={}):
#Portfolio.__init__(self, Fut_AberrationSignal_Duo, engine, symbol_list, signal_param, Fut_AberrationSignal_Kong, signal_param)
#Portfolio.__init__(self, Fut_AberrationSignal_Duo, engine, symbol_list, signal_param, None, None)
Portfolio.__init__(self, Fut_AberrationSignal_Kong, engine, symbol_list, signal_param, None, None)
self.name = 'aberration'
#----------------------------------------------------------------------
def _bc_newSignal(self, signal, direction, offset, price, volume):
"""
对交易信号进行过滤,符合条件的才发单执行。
计算真实交易价格和数量。
"""
multiplier = self.portfolioValue * 0.01 / get_contract(signal.vtSymbol).size
multiplier = int(round(multiplier, 0))
#print(multiplier)
multiplier = 1
#print(self.posDict)
# 计算合约持仓
if direction == DIRECTION_LONG:
self.posDict[signal.vtSymbol] += volume*multiplier
else:
self.posDict[signal.vtSymbol] -= volume*multiplier
#print(self.posDict)
# 对价格四舍五入
priceTick = get_contract(signal.vtSymbol).price_tick
price = int(round(price/priceTick, 0)) * priceTick
self.engine._bc_sendOrder(signal.vtSymbol, direction, offset, price, volume*multiplier, self.name)
# 记录成交数据
trade = TradeData(self.result.date, signal.vtSymbol, direction, offset, price, volume*multiplier)
# l = self.tradeDict.setdefault(self.result.date, [])
# l.append(trade)
self.result.updateTrade(trade)
|
[
"chenzhenhu@yeah.net"
] |
chenzhenhu@yeah.net
|
e747773591efce5226afdebb21cdbb4fc8ae5846
|
54e0c677471942aa35386e810e7fa54753bbecce
|
/Assignment 3 submission/main.py
|
972f9cae5b5f4cd5c77246ef33e85972329b07e1
|
[] |
no_license
|
itsuncheng/COMP4901I_Assignments
|
b27dd86d4e5a438c54c1298478bcc07efe2ea879
|
3bbe115056900515a4fa8fee1973219f9efdf334
|
refs/heads/master
| 2021-05-16T21:39:59.170470
| 2020-03-27T08:43:26
| 2020-03-27T08:43:26
| 250,479,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,652
|
py
|
import pandas as pd
import re
import numpy as np
import pickle
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
from sklearn.metrics import f1_score, classification_report, accuracy_score
from model import WordCNN
def trainer(train_loader,dev_loader, model, optimizer, criterion, epoch=25, early_stop=3, scheduler=None):
best_acc = 0
for e in range(epoch):
loss_log = []
model.train()
pbar = tqdm(enumerate(train_loader),total=len(train_loader))
for i, (X, y, ind) in pbar:
############################################
#TO DO
#write a trainer to train your CNN model
#evaluate your model on development set every epoch
#you are expected to achieve between 0.50 to 0.70 accuracy on development set
############################################
optimizer.zero_grad()
logit = model(X)
loss = criterion(logit, y)
loss.backward()
optimizer.step()
loss_log.append(loss.item())
pbar.set_description("(Epoch {}) TRAIN LOSS:{:.4f}".format((e+1), np.mean(loss_log)))
model.eval()
logits = []
ys = []
for X,y,ind in dev_loader:
logit = model(X)
logits.append(logit.data.cpu().numpy())
ys.append(y.data.cpu().numpy())
logits = np.concatenate(logits, axis=0)
preds = np.argmax(logits, axis=1)
ys = np.concatenate(ys, axis=0)
acc = accuracy_score(y_true=ys, y_pred=preds)
label_names = ['rating 0', 'rating 1','rating 2']
report = classification_report(ys, preds, digits=3,
target_names=label_names)
if acc>best_acc:
best_acc=acc
else:
early_stop-=1
print("current validation report")
print("\n{}\n".format(report))
print()
print("epoch: {}, current accuracy:{}, best accuracy:{}".format(e+1,acc,best_acc))
if early_stop==0:
break
if scheduler is not None:
scheduler.step()
return model, best_acc
def predict(model, test_loader, save_file="submission.csv"):
logits = []
inds = []
model.eval()
for X,ind in test_loader:
logit = model(X)
logits.append(logit.data.cpu().numpy())
inds.append(ind.data.cpu().numpy())
logits = np.concatenate(logits, axis=0)
inds = np.concatenate(inds, axis=0)
preds = np.argmax(logits, axis=1)
result = {'id':list(inds), "rating":preds}
df = pd.DataFrame(result, index=result['id'])
df.to_csv(save_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--dropout", type=float, default=0.3)
parser.add_argument("--kernel_num", type=int, default=100)
parser.add_argument("--kernel_sizes", type=str, default='3,4,5')
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--early_stop", type=int, default=3)
parser.add_argument("--embed_dim", type=int, default=100)
parser.add_argument("--max_len", type=int, default=200)
parser.add_argument("--class_num", type=int, default=3)
parser.add_argument("--lr_decay", type=float, default=0.5)
parser.add_argument('-dpad','--dynamic_pad', help='True to use dynamic padding, default is False', action='store_true', default=False)
parser.add_argument("--file_name", type=str, default='submission.csv')
args = parser.parse_args()
# check if dynamic padding flag is true
if args.dynamic_pad:
from preprocess_dpad import get_dataloaders
else:
from preprocess import get_dataloaders
#load data
train_loader, dev_loader, test_loader, vocab_size = get_dataloaders(args.batch_size, args.max_len)
#build model
# try to use pretrained embedding here
# embedding_matrix = np.loadtxt('w_emb_mat.txt')
model = WordCNN(args, vocab_size, embedding_matrix=None)
#loss function
criterion = nn.CrossEntropyLoss()
#choose optimizer
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,model.parameters()), lr=args.lr)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=args.lr_decay)
model, best_acc = trainer(train_loader, dev_loader, model, optimizer, criterion, early_stop = args.early_stop)
print('best_dev_acc:{}'.format(best_acc))
predict(model, test_loader, args.file_name)
if __name__=="__main__":
main()
|
[
"itsuncheng2000@gmail.com"
] |
itsuncheng2000@gmail.com
|
30aec9891a46dbbe643a92b765ac61393ad4a416
|
56bd9b3518f21080a0493f5330249bf5e85289fd
|
/engineering/common/econstants.py
|
6c98788eeb4a4ac58e6294df9a553babe863a6f2
|
[
"Apache-2.0"
] |
permissive
|
kevin-zhangsen/badam
|
da680bf8669722b5bc922381537bc4762fa5c228
|
6823f7dcd7c1b54c3b38edeffe59c16317598a2c
|
refs/heads/master
| 2020-04-01T13:43:03.300155
| 2015-10-29T01:07:46
| 2015-10-29T01:07:46
| 45,371,347
| 2
| 0
| null | 2015-11-02T04:02:50
| 2015-11-02T04:02:47
| null |
UTF-8
|
Python
| false
| false
| 7,858
|
py
|
__author__ = 'nash.xiejun'
import os
class OperationType(object):
CFG_ALL_IN_ONE = 'cfg-all-in-one'
CFG_HOST_NAME = 'cfg-hostname'
DEPLOY_CASCADING = 'deploy-cascade-openstack'
DEPLOY_HYBRID_CLOUD = 'deploy-hybrid-cloud'
class EndpointType(object):
COMPUTE = 'compute'
VOLUME = 'volume'
VOLUME2 = 'volumev2'
IMAGE = 'image'
NETWORK = 'network'
ORCHESTRATION = 'orchestration'
EC2 = 'ec2'
METERING = 'metering'
class EndpointURL(object):
COMPUTE = 'http://%s:8774/v2/$(tenant_id)s'
VOLUME = 'http://%s:8776/v1/$(tenant_id)s'
VOLUME2 = 'http://%s:8776/v2/$(tenant_id)s'
IMAGE = 'http://%s:9292/'
NETWORK = 'http://%s:9696/'
ORCHESTRATION = 'http://%s:8004/v1/$(tenant_id)s'
EC2 = 'http://%s:8773/services/Cloud'
METERING = 'http://%s:8777/'
class ServiceName(object):
NOVA = 'nova'
CINDER = 'cinder'
GLANCE = 'glance'
NEUTRON = 'neutron'
KEYSTONE = 'keystone'
class PathConfigFile(object):
ROOT = os.path.sep
ETC = 'etc'
PLUGINS = 'plugins'
ML_2 = 'ml2'
ML2_CONF = 'ml2_conf.ini'
NOVA_CONF = 'nova.conf'
#etc/nova/nova.conf
NOVA = os.path.join(ETC, ServiceName.NOVA, NOVA_CONF)
NOVA_COMPUTE_CONF = 'nova-compute.conf'
#etc/nova/nova-compute.conf
NOVA_COMPUTE = os.path.join(ETC, ServiceName.NOVA, NOVA_COMPUTE_CONF)
NEUTRON_CONF = 'neutron.conf'
#etc/neutron/neutron.conf
NEUTRON = os.path.join(ETC, ServiceName.NEUTRON, NEUTRON_CONF)
# etc/neutron/plugins/ml2/ml2_conf.ini
ML2 = os.path.join(ETC, ServiceName.NEUTRON, PLUGINS, ML_2, ML2_CONF)
L3_PROXY_INI = 'l3_proxy.ini'
# etc/neutron/l3_proxy.ini
L3_PROXY = os.path.join(ETC, ServiceName.NEUTRON, L3_PROXY_INI)
#etc/keystone/keystone.conf
KEYSTONE_CONF = 'keystone.conf'
KEYSTONE = os.path.join(ETC, ServiceName.KEYSTONE, KEYSTONE_CONF)
#etc/glance/glance.conf
GLANCE_CONF = 'glance.conf'
GLANCE = os.path.join(ETC, ServiceName.GLANCE, GLANCE_CONF)
#etc/cinder/cinder.conf
CINDER_CONF = 'cinder.conf'
CINDER = os.path.join(ETC, ServiceName.CINDER, CINDER_CONF)
class PathTriCircle(object):
TRICIRCLE = 'tricircle-master'
JUNO_PATCHES = 'juno-patches'
NOVA_PROXY = 'novaproxy'
CINDER_PROXY = 'cinderproxy'
NEUTRON_PROXY = 'neutronproxy'
L2_PROXY = 'l2proxy'
L3_PROXY = 'l3proxy'
GLANCE_SYNC = 'glancesync'
GLANCE_STORE = 'glance_store'
PATCH_CINDER_CASCADED_TIMESTAMP = 'timestamp-query-patch'
PATCH_GLANCE_LOCATION = 'glance_location_patch'
PATCH_GLANCE_STORE = 'glance_store_patch'
PATCH_NEUTRON_CASCADED_BIG2LAYER = 'neutron_cascaded_big2layer_patch'
PATCH_NEUTRON_CASCADED_L3 = 'neutron_cascaded_l3_patch'
PATCH_NEUTRON_CASCADED_TIMESTAMP = 'neutron_timestamp_cascaded_patch'
PATCH_NEUTRON_CASCADING_BIG2LAYER = 'neutron_cascading_big2layer_patch'
PATCH_NEUTRON_CASCADING_L3 = 'neutron_cascading_l3_patch'
PATCH_NOVA_SCHEDULING = 'nova_scheduling_patch'
# tricircle-master/glancesync
PATH_CASCADING_GLANCE_SYNC = os.path.join(TRICIRCLE, GLANCE_SYNC)
# tricircle-master/cinderproxy
PATH_PROXY_CINDER = os.path.join(TRICIRCLE, CINDER_PROXY)
# tricircle-master/neutronproxy/l2proxy
PATH_PROXY_NEUTRON_L2 = os.path.join(TRICIRCLE, NEUTRON_PROXY, L2_PROXY)
# tricircle-master/neutronproxy/l3proxy
PATH_PROXY_NEUTRON_L3 = os.path.join(TRICIRCLE, NEUTRON_PROXY, L3_PROXY)
# tricircle-master/novaproxy
PATH_PROXY_NOVA = os.path.join(TRICIRCLE, NOVA_PROXY)
# tricircle-master/juno-patches/cinder/timestamp-query-patch
PATH_PATCH_CINDER_CASCADED_TIMESTAMP = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.CINDER, PATCH_CINDER_CASCADED_TIMESTAMP)
# tricircle-master/juno-patches/glance/glance_location_patch
PATH_PATCH_GLANCE_LOCATION = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.GLANCE, PATCH_GLANCE_LOCATION)
# tricircle-master/juno-patches/glance_store/glance_store_patch/
PATH_PATCH_GLANCE_STORE = os.path.join(TRICIRCLE, JUNO_PATCHES, GLANCE_STORE, PATCH_GLANCE_STORE)
# tricircle-master/juno-patches/neutron/neutron_cascaded_big2layer_patch
PATH_PATCH_NEUTRON_CASCADED_BIG2LAYER = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADED_BIG2LAYER)
# tricircle-master/juno-patches/neutron/neutron_cascaded_l3_patch
PATH_PATCH_NEUTRON_CASCADED_L3 = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADED_L3)
# tricircle-master/juno-patches/neutron/neutron_cascading_big2layer_patch
PATH_PATCH_NEUTRON_CASCADING_BIG2LAYER = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADING_BIG2LAYER)
# tricircle-master/juno-patches/neutron/neutron_cascading_l3_patch
PATH_PATCH_NEUTRON_CASCADING_L3 = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADING_L3)
# tricircle-master/juno-patches/neutron/neutron_timestamp_cascaded_patch
PATH_PATCH_NEUTRON_CASCADED_TIMESTAMP = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADED_TIMESTAMP)
# tricircle-master/juno-patches/nova/nova_scheduling_patch
PATH_PATCH_NOVA_SCHEDULING = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NOVA, PATCH_NOVA_SCHEDULING)
PATCH_TO_PATH = {
PATCH_NOVA_SCHEDULING : PATH_PATCH_NOVA_SCHEDULING,
PATCH_NEUTRON_CASCADING_BIG2LAYER : PATH_PATCH_NEUTRON_CASCADING_BIG2LAYER,
PATCH_NEUTRON_CASCADING_L3 : PATH_PATCH_NEUTRON_CASCADING_L3,
PATCH_NEUTRON_CASCADED_BIG2LAYER : PATH_PATCH_NEUTRON_CASCADED_BIG2LAYER,
PATCH_NEUTRON_CASCADED_L3 : PATH_PATCH_NEUTRON_CASCADED_L3,
PATCH_NEUTRON_CASCADED_TIMESTAMP : PATH_PATCH_NEUTRON_CASCADED_TIMESTAMP,
PATCH_CINDER_CASCADED_TIMESTAMP : PATH_PATCH_CINDER_CASCADED_TIMESTAMP,
NOVA_PROXY : PATH_PROXY_NOVA,
CINDER_PROXY : PATH_PROXY_CINDER,
L2_PROXY : PATH_PROXY_NEUTRON_L2,
L3_PROXY : PATH_PROXY_NEUTRON_L3
}
class PathHybridCloud(object):
HYBRID_CLOUD_PATCHES = 'hybrid_cloud_patches'
THIRD_LIB = '3rd_lib'
PYTHON = 'python'
JAVA = 'java'
OPENSTACK_DASHBOARD = 'openstack_dashboard'
WSGI = 'wsgi'
ROOT = os.path.sep
#/usr/share/openstack-dashboard/openstack_dashboard/
# hybrid_cloud_patches/3rd_lib/java
PATH_THIRD_LIB_JAVA = os.path.join(HYBRID_CLOUD_PATCHES, THIRD_LIB, JAVA)
# hybrid_cloud_patches/3rd_lib/python
PATH_THIRD_LIB_PYTHON = os.path.join(HYBRID_CLOUD_PATCHES, THIRD_LIB, PYTHON)
# hybrid_cloud_patches/java
PATH_PATCHES_JAVA = os.path.join(HYBRID_CLOUD_PATCHES, JAVA)
# hybrid_cloud_patches/python
PATH_PATCHES_PYTHON = os.path.join(HYBRID_CLOUD_PATCHES, PYTHON)
# hybrid_cloud_patches/wsgi
PATH_PATCHES_OPENSTACK_DASHBOARD = os.path.join(HYBRID_CLOUD_PATCHES, WSGI)
# /usr/share/openstack-dashboard/
PATH_INSTALL_PATCH_OPENSTACK_DASHBOARD = ''.join([ROOT, os.path.join('usr', 'share', 'openstack-dashboard')])
class PathTricircleConfigFile(object):
PROXY_CINDER = os.path.join(PathTriCircle.PATH_PROXY_CINDER, PathConfigFile.CINDER)
PROXY_NEUTRON_L2 = os.path.join(PathTriCircle.PATH_PROXY_NEUTRON_L2, PathConfigFile.ML2)
PROXY_NEUTRON_L3 = os.path.join(PathTriCircle.PATH_PROXY_NEUTRON_L3, PathConfigFile.L3_PROXY)
PROXY_NOVA_COMPUTE = os.path.join(PathTriCircle.PATH_PROXY_NOVA, PathConfigFile.NOVA_COMPUTE)
PROXY_NOVA = os.path.join(PathTriCircle.PATH_PROXY_NOVA, PathConfigFile.NOVA)
class ConfigReplacement(object):
REGION_NAME = 'region_name'
CASCADED_NODE_IP = 'cascaded_node_ip'
CASCADING_NODE_IP = 'cascading_node_ip'
CINDER_TENANT_ID = 'cinder_tenant_id'
AVAILABILITY_ZONE = 'availability_zone'
CASCADING_OS_REGION_NAME = 'cascading_os_region_name'
ML2_LOCAL_IP = 'ml2_local_ip'
|
[
"nash.xiejun@gmail.com"
] |
nash.xiejun@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.