blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7fa376127790af494936c9aab5e68155b1e0b12d
|
2942ccbb87ceee789337cf946fc878b530889b41
|
/train.py
|
327bbccce02f69a36fcf97eaa57bf49dd4706ffe
|
[] |
no_license
|
ducthangqd1998/Deeplabv3plus-Tensorflow
|
6b774caf34d432fb6a5f870b29092f4f34294da3
|
278d07711bd38f5f936d30351a29424e50b97cf4
|
refs/heads/master
| 2021-01-04T18:36:53.905049
| 2019-05-22T07:43:02
| 2019-05-22T07:43:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,858
|
py
|
#coding=utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
import os
import cv2
import datetime
slim = tf.contrib.slim
import deeplab_model
import input_data
import utils.utils as Utils
flags = tf.app.flags
FLAGS = flags.FLAGS
# For dataset
BATCH_SIZE_OS_16 = 8
BATCH_SIZE_OS_8 = 2
CROP_HEIGHT = input_data.HEIGHT
CROP_WIDTH = input_data.WIDTH
CHANNELS = 3
CLASSES = deeplab_model.CLASSES
_IGNORE_LABEL = input_data._IGNORE_LABEL
PRETRAINED_MODEL_PATH = deeplab_model.PRETRAINED_MODEL_PATH
# For training steps
SAMPLES_AUG = 10582
SAMEPLES_train = 1464
EPOCHES = 42
MAX_STEPS_FAST = (SAMPLES_AUG) // BATCH_SIZE_OS_16 * EPOCHES
MAX_STEPS_SLOW = (SAMEPLES_train) // BATCH_SIZE_OS_8 * EPOCHES
SAVE_CHECKPOINT_STEPS = 5000
SAVE_SUMMARY_STEPS = 1000
PRINT_STEPS = 200
# For training config
_POWER = 0.9
_WEIGHT_DECAY = 1e-4
initial_lr_fast = 7e-3
end_lr_fast = 1e-5
decay_steps_fast = 50000
initial_lr_slow = 1e-5
end_lr_slow = 1e-6
decay_steps_slow = 30000
flags.DEFINE_integer('output_stride', 16, 'output stride used in the resnet model')
if FLAGS.output_stride == 16:
MAX_STEPS = MAX_STEPS_FAST
initial_lr = initial_lr_fast
end_lr = end_lr_fast
decay_steps = decay_steps_fast
BATCH_SIZE = BATCH_SIZE_OS_16
train_data = input_data.read_train_data()
val_data = input_data.read_val_data()
elif FLAGS.output_stride == 8:
MAX_STEPS = MAX_STEPS_SLOW
initial_lr = initial_lr_slow
end_lr = end_lr_slow
decay_steps = decay_steps_slow
BATCH_SIZE = BATCH_SIZE_OS_8
train_data = input_data.read_train_raw_data()
val_data = input_data.read_val_data()
# for saved path
saved_ckpt_path = './checkpoint/'
saved_summary_train_path = './summary/train/'
saved_summary_test_path = './summary/test/'
def cal_loss(logits, y, loss_weight=1.0):
'''
raw_prediction = tf.reshape(logits, [-1, CLASSES])
raw_gt = tf.reshape(y, [-1])
indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, CLASSES - 1)), 1)
gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
prediction = tf.gather(raw_prediction, indices)
# Pixel-wise softmax loss.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
'''
y = tf.reshape(y, shape=[-1])
not_ignore_mask = tf.to_float(tf.not_equal(y, _IGNORE_LABEL)) * loss_weight
one_hot_labels = tf.one_hot(
y, CLASSES, on_value=1.0, off_value=0.0)
logits = tf.reshape(logits, shape=[-1, CLASSES])
loss = tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels, logits=logits, weights=not_ignore_mask)
return tf.reduce_mean(loss)
with tf.name_scope('input'):
x = tf.placeholder(dtype=tf.float32, shape=[None, CROP_HEIGHT, CROP_WIDTH, CHANNELS], name='x_input')
y = tf.placeholder(dtype=tf.int32, shape=[None, CROP_HEIGHT, CROP_WIDTH], name='ground_truth')
logits = deeplab_model.deeplab_v3_plus(x, is_training=True, output_stride=FLAGS.output_stride, pre_trained_model=PRETRAINED_MODEL_PATH)
#logits = deeplab_model.deeplabv3_plus_model_fn(x)
with tf.name_scope('regularization'):
train_var_list = [v for v in tf.trainable_variables()
if 'beta' not in v.name and 'gamma' not in v.name]
# Add weight decay to the loss.
with tf.variable_scope("total_loss"):
l2_loss = _WEIGHT_DECAY * tf.add_n(
[tf.nn.l2_loss(v) for v in train_var_list])
with tf.name_scope('loss'):
loss = cal_loss(logits, y)
tf.summary.scalar('loss', loss)
loss_all = loss + l2_loss
tf.summary.scalar('loss_all', loss_all)
with tf.name_scope('learning_rate'):
global_step = tf.Variable(0, trainable=False)
lr = tf.train.polynomial_decay(
learning_rate=initial_lr,
global_step=global_step,
decay_steps=decay_steps,
end_learning_rate=end_lr,
power=_POWER,
cycle=False,
name=None
)
tf.summary.scalar('learning_rate', lr)
with tf.name_scope("opt"):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(loss_all, var_list=train_var_list, global_step=global_step)
with tf.name_scope("mIoU"):
predictions = tf.argmax(logits, axis=-1, name='predictions')
train_mIoU = tf.Variable(0, dtype=tf.float32, trainable=False)
tf.summary.scalar('train_mIoU', train_mIoU)
test_mIoU = tf.Variable(0, dtype=tf.float32, trainable=False)
tf.summary.scalar('test_mIoU',test_mIoU)
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
# if os.path.exists(saved_ckpt_path):
ckpt = tf.train.get_checkpoint_state(saved_ckpt_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
sess.run(tf.assign(global_step, 0))
print("Model restored...")
# saver.restore(sess, './checkpoint/deeplabv3plus.model-30000')
train_summary_writer = tf.summary.FileWriter(saved_summary_train_path, sess.graph)
test_summary_writer = tf.summary.FileWriter(saved_summary_test_path, sess.graph)
for i in range(0, MAX_STEPS + 1):
image_batch_0, image_batch, anno_batch, filename = train_data.next_batch(BATCH_SIZE, is_training=True)
image_batch_val_0, image_batch_val, anno_batch_val, filename_val = val_data.next_batch(BATCH_SIZE, is_training=True)
_ = sess.run(optimizer, feed_dict={x: image_batch, y: anno_batch})
if i % SAVE_SUMMARY_STEPS == 0:
train_summary = sess.run(merged, feed_dict={x: image_batch, y: anno_batch})
train_summary_writer.add_summary(train_summary, i)
test_summary = sess.run(merged, feed_dict={x: image_batch_val, y: anno_batch_val})
test_summary_writer.add_summary(test_summary, i)
if i % PRINT_STEPS == 0:
train_loss_val_all = sess.run(loss_all, feed_dict={x: image_batch, y: anno_batch})
print(datetime.datetime.now().strftime("%Y.%m.%d-%H:%M:%S"), " | Step: %d, | Train loss all: %f" % (i, train_loss_val_all))
if i % SAVE_SUMMARY_STEPS == 0:
learning_rate = sess.run(lr)
pred_train, train_loss_val_all, train_loss_val = sess.run([predictions, loss_all, loss],
feed_dict={x: image_batch, y: anno_batch})
pred_test, test_loss_val_all, test_loss_val = sess.run([predictions, loss_all, loss],
feed_dict={x: image_batch_val, y: anno_batch_val})
train_mIoU_val, train_IoU_val = Utils.cal_batch_mIoU(pred_train, anno_batch, CLASSES)
test_mIoU_val, test_IoU_val = Utils.cal_batch_mIoU(pred_test, anno_batch_val, CLASSES)
sess.run(tf.assign(train_mIoU, train_mIoU_val))
sess.run(tf.assign(test_mIoU, test_mIoU_val))
print('------------------------------')
print(datetime.datetime.now().strftime("%Y.%m.%d-%H:%M:%S"), " | Step: %d, | Lr: %f, | train loss all: %f, | train loss: %f, | train mIoU: %f, | test loss all: %f, | test loss: %f, | test mIoU: %f" % (
i, learning_rate, train_loss_val_all, train_loss_val, train_mIoU_val, test_loss_val_all, test_loss_val, test_mIoU_val))
print('------------------------------')
print(train_IoU_val)
print(test_IoU_val)
print('------------------------------')
#prediction = tf.argmax(logits, axis=-1, name='predictions')
if i % SAVE_CHECKPOINT_STEPS == 0:
if not os.path.exists('images'):
os.mkdir('images')
for j in range(BATCH_SIZE):
cv2.imwrite('images/%d_%s_train_img.png' %(i, filename[j].split('.')[0]), image_batch[j])
cv2.imwrite('images/%d_%s_train_anno.png' %(i, filename[j].split('.')[0]), Utils.color_gray(anno_batch[j]))
cv2.imwrite('images/%d_%s_train_pred.png' %(i, filename[j].split('.')[0]), Utils.color_gray(pred_train[j]))
cv2.imwrite('images/%d_%s_test_img.png' %(i, filename_val[j].split('.')[0]), image_batch_val[j])
cv2.imwrite('images/%d_%s_test_anno.png' %(i, filename_val[j].split('.')[0]), Utils.color_gray(anno_batch_val[j]))
cv2.imwrite('images/%d_%s_test_pred.png' %(i, filename_val[j].split('.')[0]), Utils.color_gray(pred_test[j]))
if i % SAVE_CHECKPOINT_STEPS == 0:
saver.save(sess, os.path.join(saved_ckpt_path, 'deeplabv3plus.model'), global_step=i)
if __name__ == '__main__':
tf.app.run()
|
[
"zhulf0804@gmail.com"
] |
zhulf0804@gmail.com
|
8783fd225862f98980ed7674ab0421a2ba7fa60b
|
00fee81437a42ecbee4934ee82a8deebc8c5d827
|
/query_api.py
|
83513efee92cdc94399f65dff8b4d8d8679051bf
|
[] |
no_license
|
ahbauer/nyt_puppet
|
042e835ce134cbf4ce7238be8845f99296cbf289
|
2f5332a4acaf3ed587530982f4a5d8b801be4996
|
refs/heads/master
| 2020-04-06T04:55:54.818645
| 2015-04-05T05:01:46
| 2015-04-05T05:01:46
| 33,414,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
import sys
import os
import datetime
import json
import wget
def main():
if len(sys.argv) != 2:
print 'Usage: query_api outfilename'
exit(1)
logfilename = 'templog.txt'
call = 'http://api.nytimes.com/svc/search/v2/articlesearch.json?sort=newest&fq=source%3A%22The+New+York+Times%22+AND+document_type%3A%22article%22&api-key=8cbd506758608f03f71e955c07315a98%3A4%3A69419709'
wget.download(call,out=logfilename)
json_file = open(logfilename)
json_data = json.load(json_file)
json_file.close()
os.remove(logfilename)
outfile = open(sys.argv[1], 'a')
if len(json_data['response']['docs']) == 0:
outfile.write( '{0}: No documents received!\n'.format(datetime.datetime.now()) )
else:
article = json_data['response']['docs'][0]
outfile.write( '{2}: The most recent article published on NYTimes.com is "{1}", publication date {0}\n<br>\n'.format(article['pub_date'], article['headline']['main'], datetime.datetime.now()) )
outfile.close()
if __name__ == '__main__':
main()
|
[
"ahbauer01@gmail.com"
] |
ahbauer01@gmail.com
|
fd629182c12d4d84061fd95250e082bfad17438e
|
5971c280956cb1ce53d4bab75ac90443ced39218
|
/Individual Questions/BinarySearchArray.py
|
a8173dc1771c1aa6c513d33027ad3f8a85777ea0
|
[
"MIT"
] |
permissive
|
dvasavda/Tech-Interview-Preparation
|
e1a19e0820938aa3abe221d21eba6127829cdaee
|
42930ccc0bf86ce72162b3ffe186bd912b0997cb
|
refs/heads/master
| 2022-11-10T17:50:18.009863
| 2020-07-01T18:48:11
| 2020-07-01T18:48:11
| 171,165,034
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
def binarySearchSortedArray(nums, s):
"""
Args:
{List<int>} nums
{int} s
Returns:
{boolean} Whether s is in nums.
"""
# Write your code here.
beginning = 0
end = len(nums) - 1
found = False
while beginning <= end and not found:
mid = (beginning + end) // 2
if nums[mid] == s:
found = True
else:
if s <= nums[mid]:
end = mid - 1
if s >= nums[mid]:
beginning = mid + 1
return found
|
[
"d_vasavda@u.pacific.edu"
] |
d_vasavda@u.pacific.edu
|
73a1fbf3232070516bfe940f3763fe2da8c37e9f
|
e63a36870512edb7fd947b809631cf153b028997
|
/surveil/tests/api/controllers/v2/auth/test_auth.py
|
f826fd19633457892325f655e1f92337498b9a8b
|
[
"Apache-2.0"
] |
permissive
|
titilambert/surveil
|
632c7e65d10e03c675d78f278822015346f5c47a
|
8feeb64e40ca2bd95ebd60506074192ecdf627b6
|
refs/heads/master
| 2020-05-25T13:36:59.708227
| 2015-06-29T14:07:07
| 2015-06-29T14:07:07
| 38,249,530
| 1
| 0
| null | 2015-06-29T13:38:04
| 2015-06-29T13:38:03
| null |
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
# Copyright 2015 - Savoir-Faire Linux inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from surveil.tests.api import functionalTest
class TestAuthController(functionalTest.FunctionalTest):
def test_auth_login(self):
auth = {
"auth": {
"tenantName": "demo",
"passwordCredentials": {
"username": "demo",
"password": "secretsecret"
}
}
}
response = self.post_json('/v2/auth/tokens', params=auth)
expected = {
"access": {
"token": {
"issued_at": "2014-01-30T15:30:58.819584",
"expires": "2014-01-31T15:30:58Z",
"id": "aaaaa-bbbbb-ccccc-dddd",
"tenant": {
"enabled": True,
"description": "Hey!",
"name": "demo",
"id": "fc394f2ab2df4114bde39905f800dc57"
}
}
}
}
self.assertEqual(json.loads(response.body.decode()), expected)
|
[
"alexandre.viau@savoirfairelinux.com"
] |
alexandre.viau@savoirfairelinux.com
|
ddb9cad69a65e5191579ada19f9b910281d97041
|
ff6f60d02ed8d024f7b2db5c9eb4b1196ebf166b
|
/my_flask/app/web/book.py
|
ea9c29ced2fe5f2b7754770161dd96e6bcc73744
|
[] |
no_license
|
cekong/learnit
|
43b707e347ff552754b6592e01dd106c98cd0cc5
|
b4111d6fee95960f7b7ca5421b7159cb6122ad2a
|
refs/heads/master
| 2020-03-25T13:53:37.848843
| 2019-08-29T06:46:48
| 2019-08-29T06:46:48
| 143,848,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,707
|
py
|
''''''
'''
把视图函数从主文件分离出来
用蓝图注册视图函数
https://coding.imooc.com/lesson/194.html#mid=12773
线程隔离
https://coding.imooc.com/lesson/194.html#mid=12630
使用模板
https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/0014320129740415df73bf8f81e478982bf4d5c8aa3817a000
'''
from app.spider.my_flask_test_2_book import yushubook
from app.libs.my_flask_test_2_help import is_key_or_isbn
from flask import jsonify, request, render_template,flash
from flask_login import current_user
from . import web
from app.forms.book import searchform
from app.view_models.book import BookViewModel,BookCollection
import json
from app.models.gift import Gift
from app.models.wish import Wish
from app.view_models.trade import TradeInfo
from app.models.base import db
from app.models.book import Book
'''------------------'''
'''
q :普通关键字(书名或者isbn)
isbn号 isbn13(13个0到9的数字组成) isbn10 10个0到9的数字组成含有一些‘-’
page页码
通过http://127.0.0.1:5000/book/search?q=关键字&page=页码 访问
'''
@web.route("/book/search")
def search():
form=searchform(request.args) #验证参数
books=BookCollection()
if form.validate():
q=form.q.data.strip()
page=form.page.data
# 用来判断q是关键字还是isbn
key_or_isbn = is_key_or_isbn(q)
yushu_book = yushubook()
if key_or_isbn == 'isbn':
yushu_book.search_by_isbn(q)
else:
yushu_book.search_by_keyword(q,page)
books.fill(yushu_book,q)
# return json.dumps(books,default=lambda o:o.__dict__)
# return json.dumps(books),200,{'content-type':'application/json'}
# return jsonify(books) # 作用和上句相同
# return 返回的是字符串格式,而result是json即dict格式,需要序列化
else:
# return jsonify({'msg':'参数校验失败'})
flash('参数校验失败')
return jsonify(form.errors) #显示具体错误信息
return render_template('search_result.html', books=books)
'''------------------'''
'''------------------'''
'''
通过http://127.0.0.1:5000/book/search/关键字/页码 访问
'''
# @web.route("/book/search/<q>/<page>")
# def search(q,page):#视图函数
# ...
'''------------------'''
@web.route('/book/<isbn>/detail')
def book_detail(isbn):
has_in_gifts = False
has_in_wishes = False
# 取书籍详情数据
yushu_book = yushubook()
yushu_book.search_by_isbn(isbn)
book = BookViewModel(yushu_book.first)
# MVC 模型-视图-控制器
if current_user.is_authenticated:
if Gift.query.filter_by(uid=current_user.id, isbn=isbn,
launched=False).first():
has_in_gifts = True
if Wish.query.filter_by(uid=current_user.id, isbn=isbn,
launched=False).first():
has_in_wishes = True
trade_gifts = Gift.query.filter_by(isbn=isbn, launched=False).all()
trade_wishes = Wish.query.filter_by(isbn=isbn, launched=False).all()
trade_wishes_model = TradeInfo(trade_wishes)
trade_gifts_model = TradeInfo(trade_gifts)
return render_template('book_detail.html',
book=book, wishes=trade_wishes_model,
gifts=trade_gifts_model, has_in_wishes=has_in_wishes,
has_in_gifts=has_in_gifts)
'''使用send_static_file下载'''
# @web.route('/download')
# def download():
# if user:
# send_static_file
# pass
'''request也是线程隔离的'''
@web.route('/test_xiancheng')
def test_xiancheng():
from flask import request #使用了线程隔离
from app.libs.none_local import n #未使用线程隔离
print(n.v)
n.v=4 #未实现线程隔离
print('------------------')
print(getattr(request,'v',None))
setattr(request,'v',3) #实现了线程隔离
print('------------------')
return ''
'''模板 Flask默认支持的模板是jinja2'''
@web.route('/my_flask_register_login')
def test():
r={
'name':'a',
'action':'hello a'
}
r1={
'name':'b',
'action':'hello b'
}
#模板 html
return render_template('test.html',data=r,data1=r1)
'''模板 使用基础模板layout.html+test1.html'''
@web.route('/test1')
def test1():
r={
'name':'a',
'action':'hello a'
}
r1={
'name':'b',
'action':'hello b'
}
flash('怎么那么美',category='error')
flash('就是那么美',category='warning')
return render_template('test1.html',data=r,data1=r1)
|
[
"noreply@github.com"
] |
cekong.noreply@github.com
|
93246b6bddbe88b106b29b48b5c969490f4097c2
|
97d5563bcbd95987c51fe3fe6390b2542169f715
|
/SniperPolicy/cmt/fragments/install.py
|
cb848f995af2cb6dbc7699f822b107216d502de6
|
[] |
no_license
|
SNiPER-Framework/cmt4sniper
|
6d48b2c0ad2dd73e909fd956a000d596fe6dd1a6
|
6cc33e8ca8b8170febbb96424c022a67f14b1a0b
|
refs/heads/master
| 2023-04-08T12:36:53.568797
| 2021-04-02T16:09:02
| 2021-04-02T16:09:02
| 354,012,979
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,647
|
py
|
#!/usr/bin/env python
"""
Script used to install files keeping track of the files that have
been installed, so that at the next installation the file removed
from the source directory will also be removed from the destination
directory.
The script provide also the "uninstall" functionality to remove all
and only the files that it installed for the package.
Command line:
install.py [-x exclusion1 [-x exclusion2 ...]] [-l logfile] source1 [source2 ...] dest
install.py -u [-l logfile] [dest1 ...]
@author: Marco Clemencic <marco.clemencic@cern.ch>
"""
# Needed to for the local copy of the function os.walk, introduced in Python 2.3
# It must be removed when the support for Python 2.2 is dropped
# should be at the first line to please Python 2.5
_version = "$Id: install.py,v 1.15 2008/10/28 17:24:39 marcocle Exp $"
def main():
try:
# optparse is available only since Python 2.3
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-x","--exclude",action="append",
metavar="PATTERN", default = [],
dest="exclusions", help="which files/directories to avoid to install")
parser.add_option("-l","--log",action="store",
dest="logfile", default="install.log",
help="where to store the informations about installed files [default: %default]")
parser.add_option("-d","--destname",action="store",
dest="destname", default=None,
help="name to use when installing the source into the destination directory [default: source name]")
parser.add_option("-u","--uninstall",action="store_true",
dest="uninstall", default=False,
help="do uninstall")
parser.add_option("-s","--symlink",action="store_true",
dest="symlink", default=False,
help="create symlinks instead of copy")
#parser.add_option("-p","--permission",action="store",
# metavar="PERM",
# dest="permission",
# help="modify the permission of the destination file (see 'man chown'). Unix only.")
(opts,args) = parser.parse_args()
except ImportError:
# Old style option parsing
# It must be removed when the support for Python 2.2 is dropped
from getopt import getopt, GetoptError
from sys import argv,exit
class _DummyParserClass:
def __init__(self):
self.usage = "usage: install.py [options]"
self.help = """options:
-h, --help show this help message and exit
-x PATTERN, --exclude=PATTERN
which files/directories to avoid to install
-l LOGFILE, --log=LOGFILE
where to store the informations about installed files
[default: install.log]
-d DESTNAME, --destname=DESTNAME
name to use when installing the source into the
destination directory [default: source name]
-u, --uninstall do uninstall
-s, --symlink create symlinks instead of copy"""
def error(self,msg=None):
print(self.usage + "\n")
if not msg:
msg = self.help
print(msg)
exit(1)
parser = _DummyParserClass()
try:
optlist, args = getopt(argv[1:],"hx:l:d:us",
["help","exclude","log","destname","uninstall","symlink"])
except GetoptError:
# print help information and exit:
parser.error()
# Dummy option class
class _DummyOptionsClass:
def __init__(self):
# defaults
self.exclusions = []
self.uninstall = False
self.logfile = "install.log"
self.destname = None
self.symlink = False
opts = _DummyOptionsClass()
for opt,val in optlist:
if opt in [ "-h", "--help" ]:
parser.error()
elif opt in [ "-x", "--exclude" ]:
opts.exclusions.append(val)
elif opt in [ "-l", "--log" ]:
opts.logfile = val
elif opt in [ "-d", "--destname" ]:
opts.destname = val
elif opt in [ "-u", "--uninstall" ]:
opts.uninstall = True
elif opt in [ "-s", "--symlink" ]:
opts.symlink = True
# options consistency check
from pickle import dump,load
from os.path import realpath
if opts.uninstall:
if opts.exclusions:
parser.error("Exclusion list does not make sense for uninstall")
opts.destination = args
try:
log = load(open(opts.logfile,"rb"))
except:
log = LogFile()
uninstall(log,opts.destination,realpath(dirname(opts.logfile)))
if log:
dump(log,open(opts.logfile,"wb"))
else:
from os import remove
try:
remove(opts.logfile)
except OSError as x:
if x.errno != 2 : raise
else : # install mode
if len(args) < 2:
parser.error("Specify at least one source and (only) one destination")
opts.destination = args[-1]
opts.sources = args[:-1]
try:
log = load(open(opts.logfile,"rb"))
except:
log = LogFile()
if opts.symlink :
if len(opts.sources) != 1:
parser.error("no more that 2 args with --symlink")
opts.destination, opts.destname = split(opts.destination)
install(opts.sources,opts.destination,
log,opts.exclusions,opts.destname,
opts.symlink, realpath(dirname(opts.logfile)))
dump(log,open(opts.logfile,"wb"))
from os import makedirs, listdir, rmdir
from os.path import exists, isdir, getmtime, split, join, realpath, dirname
try:
from os import walk
except ImportError:
def walk(top, topdown=True, onerror=None):
"""Copied from Python 2.3 os.py (see original file for copyright)
This function has been introduced in Python 2.3, and this copy should
be removed once the support for Python 2.2 is dropped.
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if not islink(path):
for x in walk(path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
class LogFile:
"""
Class to incapsulate the logfile functionalities.
"""
def __init__(self):
self._installed_files = {}
def get_dest(self,source):
try:
return self._installed_files[source]
except KeyError:
return None
def set_dest(self,source,dest):
self._installed_files[source] = dest
def get_sources(self):
return list(self._installed_files.keys())
def remove(self,source):
try:
del self._installed_files[source]
except KeyError:
pass
def __len__(self):
return self._installed_files.__len__()
def filename_match(name,patterns,default=False):
"""
Check if the name is matched by any of the patterns in exclusions.
"""
from fnmatch import fnmatch
for x in patterns:
if fnmatch(name,x):
return True
return default
def expand_source_dir(source, destination, exclusions = [],
destname = None, logdir = realpath(".")):
"""
Generate the list of copies.
"""
expansion = {}
src_path,src_name = split(source)
if destname:
to_replace = source
replacement = join(destination,destname)
else:
to_replace = src_path
replacement = destination
for dirname, dirs, files in walk(source):
if to_replace:
dest_path=dirname.replace(to_replace,replacement)
else:
dest_path=join(destination,dirname)
# remove excluded dirs from the list
dirs[:] = [ d for d in dirs if not filename_match(d,exclusions) ]
# loop over files
for f in files:
if filename_match(f,exclusions): continue
key = getRelativePath(dest_path, join(dirname,f))
value = getRelativePath(logdir, join(dest_path,f))
expansion[key] = value
return expansion
def remove(file, logdir):
from os import remove
from os.path import normpath, splitext, exists
file = normpath(join(logdir, file))
try:
print("Remove '%s'"%file)
remove(file)
# For python files, remove the compiled versions too
if splitext(file)[-1] == ".py":
for c in ['c', 'o']:
if exists(file + c):
print("Remove '%s'" % (file+c))
remove(file+c)
file_path = split(file)[0]
while file_path and (len(listdir(file_path)) == 0):
print("Remove empty dir '%s'"%file_path)
rmdir(file_path)
file_path = split(file_path)[0]
except OSError as x: # ignore file-not-found errors
if x.errno in [2, 13] :
print("Previous removal ignored")
else:
raise
def getCommonPath(dirname, filename):
from os import sep
from os.path import splitdrive
# if the 2 components are on different drives (windows)
if splitdrive(dirname)[0] != splitdrive(filename)[0]:
return None
dirl = dirname.split(sep)
filel = filename.split(sep)
commpth = []
for d, f in zip(dirl, filel):
if d == f :
commpth.append(d)
else :
break
commpth = sep.join(commpth)
if not commpth:
commpth = sep
elif commpth[-1] != sep:
commpth += sep
return commpth
def getRelativePath(dirname, filename):
""" calculate the relative path of filename with regards to dirname """
import os.path
# Translate the filename to the realpath of the parent directory + basename
filepath,basename = os.path.split(filename)
filename = os.path.join(os.path.realpath(filepath),basename)
# Get the absolute pathof the destination directory
dirname = os.path.realpath(dirname)
commonpath = getCommonPath(dirname, filename)
# for windows if the 2 components are on different drives
if not commonpath:
return filename
relname = filename[len(commonpath):]
reldir = dirname[len(commonpath):]
if reldir:
relname = (os.path.pardir+os.path.sep)*len(reldir.split(os.path.sep)) \
+ relname
return relname
def update(src,dest,old_dest = None, syml = False, logdir = realpath(".")):
from shutil import copy2
from sys import platform
from os.path import normpath
if platform != "win32":
from os import symlink
realdest = normpath(join(logdir, dest))
dest_path = split(realdest)[0]
realsrc = normpath(join(dest_path,src))
if (not exists(realdest)) or (getmtime(realsrc) > getmtime(realdest)):
if not isdir(dest_path):
print("Create dir '%s'"%(dest_path))
makedirs(dest_path)
# the destination file is missing or older than the source
if syml and platform != "win32" :
if exists(realdest):
remove(realdest,logdir)
print("Create Link to '%s' in '%s'"%(src,dest_path))
symlink(src,realdest)
else:
print("Copy '%s' -> '%s'"%(src,realdest))
copy2(realsrc,realdest) # do the copy (cp -p src dest)
#if old_dest != dest: # the file was installed somewhere else
# # remove the old destination
# if old_dest is not None:
# remove(old_dest,logdir)
def install(sources, destination, logfile, exclusions = [],
destname = None, syml = False, logdir = realpath(".")):
"""
Copy sources to destination keeping track of what has been done in logfile.
The destination must be a directory and sources are copied into it.
If exclusions is not empty, the files matching one of its elements are not
copied.
"""
for s in sources:
src_path, src_name = split(s)
if not exists(s):
continue # silently ignore missing sources
elif not isdir(s): # copy the file, without logging (?)
if destname is None:
dest = join(destination,src_name)
else:
dest = join(destination,destname)
src = getRelativePath(destination,s)
dest = getRelativePath(logdir,dest)
old_dest = logfile.get_dest(src)
update(src,dest,old_dest,syml,logdir)
logfile.set_dest(src,dest) # update log
else: # for directories
# expand the content of the directory as a dictionary
# mapping sources to destinations
to_do = expand_source_dir(s,destination,exclusions,destname, logdir)
src = getRelativePath(destination,s)
last_done = logfile.get_dest(src)
if last_done is None: last_done = {}
for k in to_do:
try:
old_dest = last_done[k]
del last_done[k]
except KeyError:
old_dest = None
update(k,to_do[k],old_dest,syml,logdir)
# remove files that were copied but are not anymore in the list
for old_dest in list(last_done.values()):
remove(old_dest,logdir)
logfile.set_dest(src,to_do) # update log
def uninstall(logfile, destinations = [], logdir=realpath(".")):
"""
Remove copied files using logfile to know what to remove.
If destinations is not empty, only the files/directories specified are
removed.
"""
for s in logfile.get_sources():
dest = logfile.get_dest(s)
if type(dest) is str:
if filename_match(dest,destinations,default=True):
remove(dest, logdir)
logfile.remove(s)
else:
for subs in list(dest.keys()):
subdest = dest[subs]
if filename_match(subdest,destinations,default=True):
remove(subdest,logdir)
del dest[subs]
if not dest:
logfile.remove(s)
if __name__ == "__main__":
main()
|
[
"zoujh@ihep.ac.cn"
] |
zoujh@ihep.ac.cn
|
c4c3253a6b07f19944021bbecf697a2cb8e6faec
|
168f705d7283ca060fbcbbd93cf58ff71d001aff
|
/2/Arbitrage/test_american.py
|
4c7fbfa9eb8039cbdcc5da107e7d3dda593b5e3f
|
[] |
no_license
|
jaceiverson/assign2
|
cc403271d64844b3476140e19f6d6d62acd76c8f
|
2576fd6d822dd9c8806ce0a63cb656328ed1b428
|
refs/heads/master
| 2020-08-26T19:24:52.054175
| 2019-12-09T20:33:33
| 2019-12-09T20:33:33
| 217,119,759
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
from payoffs import VanillaOption, call_payoff, put_payoff, american_binomial
spot = 41.0
strike = 40.0
rate = 0.08
vol = 0.30
div = 0.0
expiry = 1.0
steps = 3
the_call = VanillaOption(strike, expiry, call_payoff)
price = american_binomial(the_call, spot, rate, vol, div, steps)
price2 = european_binomial(the_call, spot, rate, vol, div, steps)
print(f"The Call Option Price is: {price : 0.3f}")
|
[
"jaceiverson@Jaces-MacBook-Pro.local"
] |
jaceiverson@Jaces-MacBook-Pro.local
|
0013b1e16967cbeac62eae6ea8f98578c863e5bb
|
f1cc96e94ca7b39e37a2def3a144b8737e402c6b
|
/dingtalk_sdk_gmdzy2010/base_request.py
|
d57ef4f6f136ecb02db1bcfb2008c9b9cdf17b5b
|
[
"BSD-2-Clause"
] |
permissive
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
f3d6fe70d457166d796fef53d86a7f24c32bf25d
|
f8c9fb130977993daf38137f37edfa3ee3eb4b66
|
refs/heads/master
| 2022-12-23T01:18:54.563214
| 2022-12-16T03:18:59
| 2022-12-16T03:18:59
| 152,196,653
| 7
| 4
|
BSD-2-Clause
| 2022-12-16T03:19:00
| 2018-10-09T06:09:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,244
|
py
|
import logging
import os
import requests
class BaseRequest(object):
"""The base request for dingtalk"""
logs_path = os.path.dirname(os.path.abspath(__file__))
request_url = None
request_methods_valid = [
"get", "post", "put", "delete", "head", "options", "patch"
]
def __init__(self, **kwargs):
self.kwargs = kwargs
self.logger = self.set_logger()
self.response = None
self.json_response = None
self.call_status = False
self._request_method = "get"
def set_logger(self):
"""Method to build the base logging system. By default, logging level
is set to INFO."""
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs')
logger_handler = logging.FileHandler(logger_file)
logger_handler.setLevel(logging.INFO)
logger_formatter = logging.Formatter(
'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'
)
logger_handler.setFormatter(logger_formatter)
logger.addHandler(logger_handler)
return logger
@property
def request_method(self):
"""Mostly, the get method is used to request wanted json data, as a
result, the property of request_method is set to get by default."""
return self._request_method
@request_method.setter
def request_method(self, method_str):
request_method_lower = method_str.lower()
if request_method_lower in self.request_methods_valid:
self._request_method = request_method_lower
else:
raise ValueError(
"%s is not a valid HTTP request method, please choose one"
"of %s to perform a normal http request, correct it now."
"" % (method_str, ",".join(self.request_methods_valid))
)
def get_response(self):
"""Get the original response of requests"""
request = getattr(requests, self.request_method, None)
if request is None and self._request_method is None:
raise ValueError("A effective http request method must be set")
if self.request_url is None:
raise ValueError(
"Fatal error occurred, the class property \"request_url\" is"
"set to None, reset it with an effective url of dingtalk api."
)
response = request(self.request_url, **self.kwargs)
self.response = response
return response
def get_json_response(self):
"""This method aims at catching the exception of ValueError, detail:
http://docs.python-requests.org/zh_CN/latest/user/quickstart.html#json
"""
self.json_response = self.get_response().json()
if self.json_response is not None:
error_code = self.json_response.get("errcode", None)
self.call_status = True if error_code == 0 else False
return self.json_response
def get_call_status(self):
"""The global status of api calling."""
return self.call_status
|
[
"noreply@github.com"
] |
gmdzy2010.noreply@github.com
|
45aaca22b2d1ee117fe7f1cc3ed059e91eb08b11
|
c9dc59c06d67eaffd59e924886ae8d33a7c22c18
|
/monitor.py
|
27fd4acfd5accd8b11f0a18e72614225244e3954
|
[] |
no_license
|
arvind181998ad/AWS-RESOURCE-MONITOR
|
0659e49e9c954cddbb9cd9ef128d2c1dbc688916
|
600a369271ba4e35dea79e245f31401d8fa89735
|
refs/heads/master
| 2021-05-10T08:00:00.721156
| 2018-01-25T06:52:00
| 2018-01-25T06:52:00
| 118,871,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,853
|
py
|
#import statements
import argparse
import boto3
import os
import sys
from pathlib import Path
#regionsCode
regions=['us-east-1','us-east-2','us-west-1','us-west-2','eu-west-1','eu-west-2','eu-west-3',
'ca-central-1','eu-central-1','ap-northeast-1','ap-northeast-2','ap-southeast-1',
'ap-southeast-2','ap-south-1','sa-east-1'];
#regionsName
regionsName=['US East\n(N. Virginia)','US East (Ohio)','US West \n(N.California)','US West (Oregon)','EU (Ireland)','EU (London)','EU (Paris)',
'Canada (Central)','EU (Frankfurt)','Asia Pacific\n(Tokyo)','Asia Pacific\n(Seoul)','Asia Pacific\n(Singapore)',
'Asia Pacific\n(Sydney)','Asia Pacific\n(Mumbai)','South America\n(São Paulo)'];
total=0;
#userName=os.getenv('username');
home=Path.home();
#methods
def StatusInstances(input): #instances state (running,stopped,terminated)
try:
count=0;
zone=connect.describe_availability_zones();
for zones in zone['AvailabilityZones']:
zname=zones["ZoneName"]
getStaIns=connects.instances.filter(Filters=[
{
'Name':'instance-state-name',
'Values':[input]
},
{
'Name':'availability-zone',
'Values':[zname]
}
]
);
for staIns in getStaIns:
print(regionsName[i]+"\t"+zname+"\t"+staIns.instance_type+"\t\t"+staIns.id+"\t"+staIns.public_ip_address+"\t\t\t"+staIns.key_name);
count=count+1;
if count >=1:
print("\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"+str(count)+" "+input);
total=count;
except:
print("");
def filterByTag(getNum): #filter by tags
zone=connect.describe_availability_zones();
for zones in zone['AvailabilityZones']:
zname=zones["ZoneName"]
getStaIns=connects.instances.filter(Filters=[
{
'Name':'instance-state-name',
'Values':['running','stopped','terminated']
},
{
'Name':'availability-zone',
'Values':[zname]
}
]
);
if getNum == 0: #all [tagged and untagged]
for ins in getStaIns:
if ins.tags:
print(regionsName[i]+"\t"+zname+"\t"+ins.instance_type+"\t\t"+ins.id+"\t"+ins.public_ip_address+"\t\t\t"+ins.key_name+"\t\tTAGGED");
else:
print("\n\n"+regionsName[i]+"\t"+zname+"\t"+ins.instance_type+"\t\t"+ins.id+"\t"+ins.public_ip_address+"\t\t\t"+ins.key_name+"\t\tUNTAGGED");
if getNum == 1: #tagged
for ins in getStaIns:
if ins.tags:
print("\n\n"+regionsName[i]+"\t"+zname+"\t"+ins.instance_type+"\t\t"+ins.id+"\t"+ins.public_ip_address);
for tag in ins.tags:
print("\t\t\t\t\t\t\t\t\t\t\t\t\t\t"+tag['Key']+"\t\t\t"+tag['Value']);
if getNum == 2: #untagged
for ins in getStaIns:
if not ins.tags:
print("\n\n"+regionsName[i]+"\t"+zname+"\t"+ins.instance_type+"\t\t"+ins.id+"\t"+ins.public_ip_address);
def configure(): #creating configuration files
path=str(home)+"\\.aws\\";
accessKey=input("Enter the AWS ACCESS KEY ID:");
secretKey=input("Enter the AWS SECRET ACCESS KEY:");
if not os.path.exists(path):
os.mkdir(path);
createFile=open(path+"credentials","wb+");
send="[default]\naws_access_key_id = "+accessKey+"\naws_secret_access_key = "+secretKey+"";
createFile.write(send.encode());
backupFile=open(path+".bak","wb+");
backupFile.write(send.encode());
print("\nConfig file updated.\n");
def checkConfig(): #checking for configuration files
access=0;
path=str(home)+"\\.aws\\";
if not os.path.exists(path):
access=access+1;
return access;
def createProfile(): #create new profile
profilename=input("PROFILE NAME: ");
accessKey=input("AWS ACCESS KEY ID: ");
secretKey=input("AWS SECRET ACCESS KEY: ");
path=str(home)+"\\.ResourceMonitor\\";
if not os.path.exists(path):
os.mkdir(path);
createProfile=open(path+profilename,"wb+");
send="[default]\naws_access_key_id = "+accessKey+"\naws_secret_access_key = "+secretKey+"";
createProfile.write(send.encode());
print("\nProfile created successfully.\n");
def restore(): #restoring the backup file
path=str(home)+"\\.aws\\";
backupFile=open(path+".bak","r");
createFile=open(path+"credentials","wb+");
default=backupFile.read();
createFile.write(default.encode());
def profile(paths): #change profile
path=str(home)+"\\.aws\\";
if not os.path.exists(path):
os.mkdir(path);
openFile=open(str(home)+"\\.ResourceMonitor\\"+paths,"r");
fileText=openFile.read();
sendFile=open(path+"credentials","wb+");
sendText=sendFile.write(fileText.encode());
print("\nprofile changed.\n");
def deleteProfile(profileName): #delete profile
os.remove(str(home)+"\\.ResourceMonitor\\"+profileName);
print("\nProfile deleted successfully");
#argument parser
parser=argparse.ArgumentParser(description="Resource Monitor - AWS EC2",formatter_class=argparse.RawTextHelpFormatter);
parser.add_argument("options",help="instance state [ running | stopped | terminated | all ] \ntags [ tagged | untagged | tags ]\nconfiguration [ configure | createprofile | deleteprofile ]"
"\n\n[instance state]\n\nrunning = display all running instances\nstopped = display all stopped instances\nterminated = display all terminated instances"
"\nall = diplay all instances"
"\n\n[tags]\n\ntagged = display all tagged instances\nuntagged = display all untagged instances\ntags = display all tagged and untagged instances\n\n[Configuration]\n\nconfigure = configure authentication keys\ncreateprofile = create new profile"
"\ndeleteprofile = delete the created profile\n\n[ Config file location : C:\\Users\\..\\.ResourceMonitor ]");
parser.add_argument("--region",help="Regions");
parser.add_argument("--profile",help="Profile Name");
get=checkConfig();
args=parser.parse_args();
if args.options=="configure": #configure authentication keys
configure();
elif args.options=="createprofile": #create new profile
createProfile();
elif args.options=="deleteprofile": #delete profile
profileName=input("PROFILE NAME: ");
deleteProfile(profileName);
elif not get==0: #checking config files
print("No config file detected. type > monitor configure");
sys.exit();
#action for arguments
if args.options=="running": #running
if args.profile:
profile(args.profile);
if args.region:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
connects=boto3.resource('ec2',args.region);
connect=boto3.client('ec2',args.region);
i=regions.index(args.region);
StatusInstances("running");
else:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
for i in range(0,10):
connects=boto3.resource('ec2',regions[i]);
connect=boto3.client('ec2',regions[i]);
StatusInstances("running");
if args.options=="stopped": #stopped
if args.profile:
profile(args.profile);
if args.region:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
connects=boto3.resource('ec2',args.region);
connect=boto3.client('ec2',args.region);
i=regions.index(args.region);
StatusInstances("stopped");
else:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
for i in range(0,10):
connects=boto3.resource('ec2',regions[i]);
connect=boto3.client('ec2',regions[i]);
StatusInstances("stopped");
if args.options=="terminated": #terminated
if args.profile:
profile(args.profile);
if args.region:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
connects=boto3.resource('ec2',args.region);
connect=boto3.client('ec2',args.region);
i=regions.index(args.region);
StatusInstances("terminated");
else:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
for i in range(0,10):
connects=boto3.resource('ec2',regions[i]);
connect=boto3.client('ec2',regions[i]);
StatusInstances("terminated");
if args.options=="all": #all [running,stopped,terminated]
if args.profile:
profile(args.profile);
if args.region:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
connects=boto3.resource('ec2',args.region);
connect=boto3.client('ec2',args.region);
i=regions.index(args.region);
StatusInstances("running");
StatusInstances("stopped");
StatusInstances("terminated");
else:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY PAIR NAME\t\tTOTAL\n");
for i in range(0,10):
connects=boto3.resource('ec2',regions[i]);
connect=boto3.client('ec2',regions[i]);
StatusInstances("running");
StatusInstances("stopped");
StatusInstances("terminated");
if args.options=="tagged": #tagged
if args.profile:
profile(args.profile);
if args.region:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY NAME\t\tKEY VALUE\n");
i=regions.index(args.region);
connects=boto3.resource('ec2',args.region);
connect=boto3.client('ec2',args.region);
filterByTag(1);
else:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY NAME\t\tKEY VALUE\n");
for i in range(0,10):
connects=boto3.resource('ec2',regions[i]);
connect=boto3.client('ec2',regions[i]);
filterByTag(1);
if args.options=="untagged": #untagged
if args.profile:
profile(args.profile);
if args.region:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY NAME\t\tKEY VALUE\n");
i=regions.index(args.region);
connects=boto3.resource('ec2',args.region);
connect=boto3.client('ec2',args.region);
filterByTag(2);
else:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY NAME\t\tKEY VALUE\n");
for i in range(0,10):
connects=boto3.resource('ec2',regions[i]);
connect=boto3.client('ec2',regions[i]);
filterByTag(2);
if args.options=="tags": #tags[tagged and untagged]
if args.profile:
profile(args.profile);
if args.region:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY NAME\t\tKEY VALUE\n");
i=regions.index(args.region);
connects=boto3.resource('ec2',args.region);
connect=boto3.client('ec2',args.region);
filterByTag(0);
else:
print("\nREGION\t\tZONE\t\tINSTANCE TYPE\t\tINSTANCE ID\t\tPUBLIC IP ADDRESS\t\tKEY NAME\t\tKEY VALUE\n");
for i in range(0,10):
connects=boto3.resource('ec2',regions[i]);
connect=boto3.client('ec2',regions[i]);
filterByTag(0);
restore();
|
[
"arvindkec18@gmail.com"
] |
arvindkec18@gmail.com
|
c4f6f2ac9cf581a356a86df581ca76f783be41d4
|
cae30a18b2329ed1c3113b48dbe5095096e7d297
|
/sample/app/settings.py
|
905864248f2ccd8e175e82812b67645348153001
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
hdknr/djpress
|
a477c0d8996ee378d27db055361778a5e018daa1
|
9f482d87f2e1f40938dac313bf4c7c3c61ed812f
|
refs/heads/master
| 2020-12-10T21:03:28.982405
| 2017-05-31T12:27:02
| 2017-05-31T12:27:02
| 40,709,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,258
|
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(qdtn4=ec!2+orr86226&m3@-i@lnx+n*4o**u9xjz8k9l8c1('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
####
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
INSTALLED_APPS += (
'djpress',
)
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
STATIC_URL = '/assets/'
try:
from local_settings import configure
configure(globals())
except:
pass
try:
from app import logs
LOGGING = logs.LOGGING
except:
pass
try:
from app.databases import DatabaseRouter
DATABASES.update(DatabaseRouter.confs())
DATABASE_ROUTERS = [DatabaseRouter.router()]
except:
pass
DJPRESS_KEY = 'this is the secret.'
MIDDLEWARE_CLASSES += (
'djpress.middleware.Middleware',
'app.middleware.SettingsMiddleware',
)
|
[
"gmail@hdknr.com"
] |
gmail@hdknr.com
|
1e998c835edcbb4c3b2f76f6cc444af4a37474dd
|
ef5b59fe56c3a0b68578b58580f769e3e5713c3a
|
/scripts/gen_index.py
|
46438ccadb4feee62cca3c4b1ddfd10072e58db8
|
[] |
no_license
|
noahsug/space
|
7472f270d6005dc2ee9db0d5f085deaaa7609c1c
|
abe68e06708eb05d19c1fb2e365258ebc323c8ce
|
refs/heads/master
| 2020-12-24T15:58:25.345015
| 2015-10-22T21:57:54
| 2015-10-22T21:57:54
| 25,186,464
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,697
|
py
|
import fnmatch
import os.path
import re
excluded_files = [
'di/*',
'util/*'
]
impl_files = []
test_impl_files = []
for root, dirnames, filenames in os.walk('app/js'):
for filename in filenames:
path = os.path.join(root, filename)[len('app/js/'):]
should_add = True;
for excluded_file in excluded_files:
if fnmatch.fnmatch(path, excluded_file):
should_add = False;
break
if not should_add:
continue
impl_files.append('<script src="js/%s"></script>' % (path))
test_impl_files.append('<script src="../app/js/%s"></script>' % (path))
test_files = []
for root, dirnames, filenames in os.walk('test/js'):
for filename in filenames:
path = os.path.join(root, filename)[len('test/'):]
test_files.append('<script src="%s"></script>' % (path))
mock_files = []
for root, dirnames, filenames in os.walk('test/mock'):
for filename in filenames:
if not fnmatch.fnmatch(filename, 'mock_manager.js'):
path = os.path.join(root, filename)[len('test/'):]
mock_files.append('<script src="%s"></script>' % (path))
## Write index.html
index_file = open('app/index.html', 'r')
index_html = index_file.read();
index_file.close();
impl_files_pattern = re.compile('/di\.js.*?' +
'(<script.*</script>)' +
'.*?<body', re.M | re.S)
match = re.search(impl_files_pattern, index_html);
index_html = index_html.replace(match.group(1), '\n '.join(impl_files));
index_file = open('app/index.html', 'w')
index_file.write(index_html);
index_file.close();
## Write test_runner.html
test_file = open('test/test_runner.html', 'r')
test_html = test_file.read();
test_file.close();
# test impl files
test_impl_files_pattern = re.compile('/di\.js.*?' +
'(<script.*</script>)' +
'.*<!-- Mocks -->', re.M | re.S)
match = re.search(test_impl_files_pattern, test_html);
test_html = test_html.replace(match.group(1), '\n '.join(test_impl_files));
# mock files
mock_files_pattern = re.compile('/mock_manager\.js.*?' +
'(<script.*</script>)' +
'.*<!-- Tests -->', re.M | re.S)
match = re.search(mock_files_pattern, test_html);
test_html = test_html.replace(match.group(1), '\n '.join(mock_files));
# test files
test_files_pattern = re.compile('\"test_environment\.js.*?' +
'(<script.*</script>)', re.M | re.S)
match = re.search(test_files_pattern, test_html);
test_html = test_html.replace(match.group(1), '\n '.join(test_files));
test_file = open('test/test_runner.html', 'w')
test_file.write(test_html);
test_file.close();
|
[
"sugarman@google.com"
] |
sugarman@google.com
|
3fb3276f5ca2daccf98e9e4f20943e8700298d82
|
f81b8a10249df65e7e242717a53913432716c3b5
|
/testinvenio/records/permissions.py
|
6657d874bd74d9099fc88c4fd4b3ff22aef0e44f
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
Alzpeta/testinvenio
|
68f14d2b90e9a7524a1e72051098ec2af2a93e07
|
210e4b6ce6b1edcb2ce9ded1bc683745e7eb421a
|
refs/heads/master
| 2023-01-04T13:18:52.704975
| 2020-11-02T17:51:35
| 2020-11-02T17:51:35
| 300,592,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 alzp.
#
# testInvenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Permissions for testInvenio."""
from invenio_access import Permission, any_user
def files_permission_factory(obj, action=None):
"""Permissions factory for buckets."""
return Permission(any_user)
|
[
"alzbeta.pokorna@cesnet.cz"
] |
alzbeta.pokorna@cesnet.cz
|
25950a0c8324fe9133c09093b521017005ebb5e3
|
49663f8e687b6df7f8662a7c5d2d56099c73234b
|
/crawl/company.py
|
514bf0d3d63042c52548bf8283b440e2a5b74b6d
|
[] |
no_license
|
mohammadyousuf/crawler
|
afb91f19b9c3ce4b59876886841ad33b4baa6654
|
92c5ea30a24e31b4401b811336fac80cf5e20c85
|
refs/heads/master
| 2020-12-31T01:23:01.416204
| 2016-06-14T11:32:27
| 2016-06-14T11:32:27
| 65,561,917
| 0
| 0
| null | 2016-08-12T15:05:46
| 2016-08-12T15:05:45
| null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
import requests
url='http://www.crowdfunder.com/browse/deals&template=false'
data={
'entity_only':'true',
'page':2
}
html_post=requests.post(url,data=data)
#再用正则来解析
|
[
"zouxuan0626@gmail.com"
] |
zouxuan0626@gmail.com
|
a09507c7b2dec29ed37933703d1aa4d4a44e9713
|
cc774d03112e39e0a6233317e24734c216fa9156
|
/index.py
|
d2e34ce85710f3d57dd98be96d35422ad9d189eb
|
[] |
no_license
|
PinkyJangir/list
|
d38a4e6b52d5012706778d7f58d29b991d8db2d7
|
7ae5527afffd557040d9d335f115d7e68389bffd
|
refs/heads/main
| 2023-08-23T17:07:16.976942
| 2021-11-05T17:05:34
| 2021-11-05T17:05:34
| 419,428,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
num=[4,6,8,3,9,2,10,22,35,]
i=0
sum=0
while i<len(num):
pos=num.index(num[i])
if pos%2==0:
sum+=num[i]
i+=1
print(pos)
print(sum)
|
[
"noreply@github.com"
] |
PinkyJangir.noreply@github.com
|
ceaab22f98a359b5e28ae89ceb85a659c9b128cc
|
24ae9cbef6534da4f43f897c1f03251e47676412
|
/blog/migrations/0001_initial.py
|
e963207b8ca0a7249ec1544e77ba3b3e829b348b
|
[] |
no_license
|
kermit71/my-first-blog
|
f726381c907ba8bfca53507edd379049aa9aa80d
|
4088d35444bf591720a216cbe1831c9d6c2c6e5b
|
refs/heads/master
| 2020-08-27T06:28:58.768371
| 2019-10-28T06:47:47
| 2019-10-28T06:47:47
| 217,270,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.2.6 on 2019-10-24 09:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"katsushi@gmail.com"
] |
katsushi@gmail.com
|
9a0d948f397408058c143ca7451ef115e421dacc
|
9d48fcc536cf53ae5b37028012b43b2fe07f0e8a
|
/backend/projects/models.py
|
52b01b7eef3dfa65b71ae4ccc37085d69eb605b0
|
[] |
no_license
|
gda2048/Collections
|
51ac348ebf93583bce109b75d3f3511255ee5e59
|
6e00f54e512e4e16945f87b5f70df3d12e80f844
|
refs/heads/master
| 2022-12-11T06:24:29.612513
| 2020-01-10T06:33:52
| 2020-01-10T06:33:52
| 223,648,250
| 1
| 0
| null | 2022-12-08T06:59:38
| 2019-11-23T20:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,863
|
py
|
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from profiles.models import Team, User
class Backlog(models.Model):
team = models.OneToOneField(Team, on_delete=models.CASCADE, primary_key=True, related_name='backlog')
def __str__(self):
return self.team.name
class Meta:
db_table = 'backlogs'
verbose_name = 'Backlog'
verbose_name_plural = 'Backlogs'
@receiver(post_save, sender=Team)
def create_favorites(sender, instance, created, **kwargs):
if created:
Backlog.objects.create(team=instance)
class Collection(models.Model):
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='collections')
name = models.CharField(max_length=255)
description = models.CharField(max_length=1023, blank=True, default='')
date_created = models.DateTimeField(default=timezone.now)
def __str__(self):
return f'{self.team.name}:{self.name}'
class Meta:
db_table = 'collections'
verbose_name = 'Collection'
verbose_name_plural = 'Collections'
ordering = ['-date_created']
class List(models.Model):
name = models.CharField(max_length=255)
collection = models.ForeignKey(Collection, on_delete=models.CASCADE, related_name='lists')
date_created = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
class Meta:
db_table = 'lists'
verbose_name = 'List'
verbose_name_plural = 'Lists'
ordering = ['-date_created']
class Item(models.Model):
name = models.CharField(max_length=255)
description = models.CharField(max_length=1023, blank=True, default='')
start_date = models.DateTimeField('Date created', default=timezone.now)
last_change = models.DateTimeField('Last change date', auto_now=True)
end_date = models.DateTimeField('Deadline date', default=timezone.now, null=True)
units = models.PositiveIntegerField('Number of units', default=0)
assigned_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='assigned_items', null=True)
creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='created_items')
backlog = models.ForeignKey(Backlog, on_delete=models.CASCADE, related_name='items')
list = models.ForeignKey(List, on_delete=models.CASCADE, related_name='items', null=True)
def __str__(self):
return self.name
def clean(self):
if self.list and self.list.collection.team != self.backlog.team:
raise ValidationError('Items can be added only from the team backlog')
class Meta:
db_table = 'items'
verbose_name = 'Item'
verbose_name_plural = 'Items'
|
[
"goncharovdma@gmail.com"
] |
goncharovdma@gmail.com
|
933b91b45f6fecffc5ae724e69228611d5645c2a
|
8a2188817aa77badb3f2b1effdf51687da437a55
|
/Biblioteca/Biblioteca/apps/estudiantes/views.py
|
5fff51dae47b855fb4b0fbb56e9abe08590b3d7f
|
[] |
no_license
|
juanjavierlimachi/Biblioteca
|
9e3d41b130ed77e72424c35e6e62832704ee5858
|
7b839220e9b2ea67c965b722d94467acadb37aae
|
refs/heads/master
| 2016-09-09T22:19:56.334965
| 2015-07-17T03:26:47
| 2015-07-17T03:26:47
| 39,229,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
from django.shortcuts import render, render_to_response
from django.views.generic import CreateView,TemplateView,ListView
from django.http import HttpResponseRedirect
from django.template import RequestContext
from .formularios import *
from Biblioteca.apps.libro.models import Libro
# Create your views here.
def Reservrlibros(request,id):
id_libro=id
if request.method=='POST':
form=reservaForm(request.POST)
if form.is_valid():
estu=Estudiante()
estu.nombre=request.POST['nombre']
estu.establecimiento=request.POST['establecimiento']
estu.ci=request.POST['ci']
estu.Libro_id=id_libro
estu.save()
return HttpResponseRedirect('/reservaExitosa/')
else:
form=reservaForm()
return render_to_response('libros/reservas.html',{'form':form},context_instance=RequestContext(request))
def MostrarReservas(request):
reservas=Estudiante.objects.all()
libros=Libro.objects.all()
return render_to_response('estudiantes/MostrarReservas.html',{'reservas':reservas,'libros':libros},context_instance=RequestContext(request))
class ConsultaLibros(ListView):
template_name='estudiantes/ConsultaLibros.html'
model = Libro
context_object_name = 'libros'
|
[
"juan@hotmail.com"
] |
juan@hotmail.com
|
9fa22717f26d1ef4a14e25f2e2d3c08434fd32a0
|
1af1f89eb9a178b95d1ba023b209b7538fb151f0
|
/Algorithms/139. Word Break.py
|
577d67934255597ccca105fd503a93467072e38f
|
[] |
no_license
|
0xtinyuk/LeetCode
|
77d690161cc52738e63a4c4b6595a6012fa5c21e
|
08bc96a0fc2b672282cda348c833c02218c356f1
|
refs/heads/master
| 2023-02-21T16:58:39.881908
| 2021-01-25T08:00:13
| 2021-01-25T08:00:13
| 292,037,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
f=[False]*(len(s)+1)
f[0]=True
for i in range(len(s)):
if f[i]:
for word in wordDict:
l=len(word)
if (i+l<=len(s)) and (word==s[i:i+l]):
f[i+l]=True
return f[len(s)]
|
[
"xliu301@uottawa.ca"
] |
xliu301@uottawa.ca
|
9fd1fc7708f649c1fe1972ea6d7e4f1dda742923
|
9de33c12dc876c0f5acf766aee49689e3ab76a97
|
/281a.py
|
52a292b2dac442531f89662aec87ec43b3aa6098
|
[] |
no_license
|
PinkFromTheFuture/codeforces
|
91061d8a35e995566e752d290bc264505eee3bc8
|
bf512e92f985467281fd7a36be9ca9197ec37b3c
|
refs/heads/master
| 2022-05-27T06:54:40.546999
| 2018-09-26T05:05:50
| 2018-09-26T05:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
# http://codeforces.com/problemset/problem/281/A
# Capitalization is writing a word with its first letter as a capital letter.
# Input
# A single line contains a non-empty word. This word consists of lowercase and uppercase English letters. The length of the word will not exceed 10^3.
word = input()
# Your task is to capitalize the given word.
# Note, that during capitalization all the letters except the first one remains unchanged.
word = word[0].upper() + word[1:]
# Output
# Output the given word after capitalization.
print(word)
|
[
"eduardoxfurtado@gmail.com"
] |
eduardoxfurtado@gmail.com
|
4150bb6a4a34ce887a39ed8abcde93662b505e47
|
262635145ffd34696c42dec3970b0523a2081f8b
|
/simple-cipher/simple_cipher.py
|
c4b5e4f6063b7d729adba5168e5e4c8f5b23bf54
|
[
"MIT"
] |
permissive
|
amalshehu/exercism-python
|
9ff347ab9dde7338d01ccc94edbe0aa538f14a03
|
eb469246504fb22463e036a989dc9b44e0a83410
|
refs/heads/master
| 2020-04-17T17:28:02.482703
| 2016-10-25T17:51:54
| 2016-10-25T17:51:54
| 66,532,839
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
# File: simple_cipher.py
# Purpose: Implement a simple shift cipher like Caesar and a more secure substitution cipher
# Programmer: Amal Shehu
# Course: Exercism
# Date: Monday 26 September 2016, 02:00 AM
import random
from string import ascii_lowercase
letters = ascii_lowercase
class Cipher():
"""Generate a key for Cipher if not provided."""
def __init__(self, key=None):
if not key:
key = ''.join(random.SystemRandom().choice(ascii_lowercase) for _ in range(150))
elif not key.isalpha() or not key.islower():
raise ValueError('Invalid key')
self.key = key
def encode(self, text):
key = self.key
while len(key) < len(text):
key += self.key
cipher = ""
for i in range(len(text)):
letter = text.lower()[i]
if letter in letters:
cipher += letters[(letters.index(letter)+letters.index(key[i])) % 26]
return cipher
def decode(self, ciph):
key = self.key
while len(key) < len(ciph):
key += self.key
txt = ""
for i in range(len(ciph)):
letter = ciph.lower()[i]
if letter in letters:
txt += letters[(letters.index(letter)-letters.index(key[i])) % 26]
return txt
class Caesar():
def encode(self, text):
return ''.join([letters[(letters.index(letter)+3) % 26] \
for letter in text.lower() if letter in letters])
def decode(self, ciph):
return ''.join([letters[(letters.index(letter)-3) % 26] \
for letter in ciph.lower() if letter in letters])
|
[
"amalshehu@gmail.com"
] |
amalshehu@gmail.com
|
0f527b4f593714362ae575fb6b0d141dc375912e
|
55a411a77f23fd827cfbf86524dc2317b67a0097
|
/File2.py
|
1a75839138907e68ddb3bb5a0384db80ad48a34b
|
[] |
no_license
|
imradhetiwari/python
|
83896d79f7d47fa767b09f22719fbf5111877c02
|
b302082bf076d9d1e610c2b38371c22fc8c7d562
|
refs/heads/master
| 2022-04-25T02:43:51.054655
| 2020-04-26T04:43:59
| 2020-04-26T04:43:59
| 258,942,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
import File1
print("File2 __name__ = %s" %__name__)
if __name__ == "__main__":
print("File2 is being run directly")
else:
print("File2 is being imported")
|
[
"radhetiwari001@gmail.com"
] |
radhetiwari001@gmail.com
|
eedec90b012beec34fb98a45089fbfea5ae876f5
|
c8270ad3297129247451b06102c85b88b8aacc89
|
/Hangman- Angel Vasquez.py
|
fa56ee644d5df979f9f48f40007660dc0401cbc3
|
[] |
no_license
|
AngelVasquez20/CSE
|
99a73d1d8e0c53c8dc63a49fced0bf37f29fafc9
|
db2c67a935f21bcd47cfd60a25cf6620ec3008c8
|
refs/heads/master
| 2020-04-02T07:50:58.341218
| 2019-05-23T21:26:33
| 2019-05-23T21:26:33
| 154,217,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
import random
import string
guess = 8
pun = list(string.punctuation)
alphabet = list(string.ascii_letters)
words = ["Apple",
"Word",
"Backpack",
"Pencil",
"Legs",
"Mouse",
"Dog",
"Cat",
"Myth",
"Shoe",
"I love dogs!"]
random = random.choice(words)
random_word = list(random)
letters_guessed = []
win = False
for i in range(len(random)):
if random[i] in alphabet:
random_word.pop(i)
random_word.insert(i, "_")
if random[i] in pun:
random_word.insert(i, "!")
print(' '.join(random_word))
while guess > 0 and not win:
guess1 = input("Guess a letter: ")
print(guess)
letters_guessed.append(guess1.lower())
for i in range(len(random)):
if random[i].lower() in letters_guessed:
random_word.pop(i)
random_word.insert(i, random[i])
if guess1.lower() not in random_word and guess1.upper() not in random_word:
guess -= 1
print("".join(random_word))
if "_" not in random_word:
win = True
print("Well done winner you have guessed the word.")
|
[
"42391751+AngelVasquez20@users.noreply.github.com"
] |
42391751+AngelVasquez20@users.noreply.github.com
|
98ed281e779a38d3a97dcf894060c68eaf321f8c
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC1095.py
|
ba9aa4a123022af05dc1e50f899c06c927cc5564
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,536
|
py
|
# qubit number=5
# total number=54
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[1]) # number=29
prog.cz(input_qubit[3],input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=31
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=48
prog.x(input_qubit[0]) # number=49
prog.cx(input_qubit[1],input_qubit[0]) # number=50
prog.cx(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[4]) # number=41
prog.cx(input_qubit[1],input_qubit[0]) # number=37
prog.x(input_qubit[1]) # number=10
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.x(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=42
prog.cz(input_qubit[0],input_qubit[3]) # number=43
prog.h(input_qubit[3]) # number=44
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1095.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
d22c9363b518f701800c8d0f1854dce1b79d6bbb
|
6f4c4c4ae395d8d4ced87d3a287925eb4c3eb887
|
/webAdminClient/wsgi.py
|
69d87ddbde55b0135c69f10ec0549ee8c41faf82
|
[] |
no_license
|
vipinbharti121/webAdminClient
|
279bca56c96e88a261c4331f6c5be2a439c7b1cc
|
2cd8e870a07b8f44bd5d12ee906ca59e3b08a907
|
refs/heads/master
| 2021-01-19T09:58:28.837284
| 2017-04-27T03:35:20
| 2017-04-27T03:35:20
| 87,802,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for webAdminClient project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webAdminClient.settings")
application = get_wsgi_application()
|
[
"vipinbharti121@gmail.com"
] |
vipinbharti121@gmail.com
|
a8076f0df1c8c43943182d080ee3a2e0e7acc6e7
|
4fc357805e30cd634c753073f8b9e4750ba6eac7
|
/SPCOR/example1.py
|
fafea848d5f828e8517a8d57ae144d9a6b34b6a7
|
[] |
no_license
|
MUDDASICK/CodeSamples
|
e970634e5b50e93e39261d9af335652da37f60c4
|
1af073e3ad61cf9efe0e26f7097a8414d4bf4739
|
refs/heads/main
| 2023-08-17T07:34:33.203566
| 2023-06-17T15:42:45
| 2023-06-17T15:42:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
import json
import requests
from rich import print as rprint
requests.packages.urllib3.disable_warnings()
def get_token():
token_url = "https://10.10.20.65/api/fdm/v5/fdm/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
payload = {"grant_type": "password", "username": "admin", "password": "Cisco1234"}
token_response = requests.post(
token_url, headers=headers, data=json.dumps(payload), verify=False
)
token_response.raise_for_status()
if token_response.status_code == 200:
rprint("Token Received...\n")
token = token_response.json()["access_token"]
return token
def create_network(token):
network_url = "https://10.10.20.65/api/fdm/v5/object/networks"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
}
payload = {
"name": "CBTN1",
"description": "NUGGETS",
"subType": "NETWORK",
"value": "99.88.77.0/24",
"dnsResolution": "IPV4_ONLY",
"type": "networkobject",
}
create_response = requests.post(
network_url, headers=headers, data=json.dumps(payload), verify=False
)
create_response.raise_for_status()
if create_response.status_code == 200:
rprint("[green]SUCCESS[/green]: New Object Created")
rprint(create_response.text)
if __name__ == "__main__":
token = get_token()
create_network(token=token)
|
[
"noreply@github.com"
] |
MUDDASICK.noreply@github.com
|
440761660bf6ce25743676c1383266710cbdb766
|
4af090efabd08ef73c411a00ce4972a1c6f30a22
|
/python_100days/9day/game_02.py
|
828ddc089fc9572c0a06e392bca6079ad8d54afa
|
[] |
no_license
|
predatory123/byhytest
|
e52bca664f9461c9309aaa9bf779c02368ed937c
|
578206c9ec9253d0d9325e72cdc13dde6eeb2fc1
|
refs/heads/master
| 2023-04-26T13:33:14.462408
| 2021-05-20T13:33:37
| 2021-05-20T14:26:22
| 369,213,148
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,135
|
py
|
Card = collections.namedtuple('Card', ['rank', 'suit'])
class Poker(MutableSequence):
# 扑克牌的相关定义
ranks = [str(n) for n in range(2, 11)] + list('JQKA')
suits = 'spades hearts diamonds clubs'.split() # 黑桃,红桃,方块,梅花
suit_values = dict(spades=3, hearts=2, diamonds=1, clubs=0)#黑桃最大,红桃次之,方块再次之,梅花最小
def __init__(self):
self._cards = [Card(rank, suit) for rank in self.ranks
for suit in self.suits]
def __len__(self):
return len(self._cards)
def __getitem__(self, position): # 仅仅实现了__getitem__方法,该对象就变成可迭代的
return self._cards[position]
def __setitem__(self, position, value):
self._cards[position] = value
def __delitem__(self, position):
del self._cards[position]
def insert(self, position, value):
self._cards[position] = value
Own_Poker = collections.namedtuple('Own_Poker', ['id', 'rank', 'suit', 'score'])
class Player():
'''
牌型 豹子:三张同样大小的牌。 顺金:花色相同的三张连牌。 金花:三张花色相同的牌。
顺子:三张花色不全相同的连牌。 对子:三张牌中有两张同样大小的牌。 单张:除以上牌型的牌。
'''
def __init__(self, id, poker):
self.id = id
self.poker = poker #一副扑克牌
self.pokers = [] #玩家手中的牌
self.type = 0 # 每个人初始都假定为三张毫无关系的牌,也就是扑克牌赢法中的“单张”
def set_card_score(self, card):
'''
按照点数判定扑克牌的大小
:param card:扑克牌卡片
:return:扑克牌点数大小
'''
rank_value = Poker.ranks.index(card.rank)
suit_values = Poker.suit_values
return rank_value * len(suit_values) + suit_values[card.suit]
def sort_card_index(self, rank_index_list):
'''
通过值减下标的方式分组,如果三个值连续则被分配到同一个g中
比如说ll=[3,4,5,7,8],分组时,enumerate(ll)=[(0,3),(1,4),(2,5),(3,7),(4,8)],fun函数值减下标,结果一样的,就归为一组
在本程序中,如果是三张连续的扑克牌,则应该是同一个g中,此时返回为Ture,否则为False
:param rank_index_list:
:return:
'''
fun = lambda x: x[1] - x[0]
for k, g in groupby(enumerate(rank_index_list), fun): # 返回一个产生按照fun进行分组后的值集合的迭代器.
if len([v for i, v in g]) == 3:
return True
return False
def judge_type(self):
'''
玩家随机发完三张牌后,根据扑克牌玩法进行区分,对手中的牌进行判别属于哪种类型
:return:
'''
suit_list = []
rank_list = []
score_list = []
for poker in self.pokers:
suit_list.append(poker.suit)
rank_list.append(poker.rank)
score_list.append(poker.score)
rank_index_list = [] # 扑克牌卡片在Poker中rank中的index
for rank in rank_list:
index = self.poker.ranks.index(rank)
rank_index_list.append(index)
if len(set(rank_list)) == 1:
self.type = 5 # 豹子
elif len(set(suit_list)) == 1:
if self.sort_card_index(rank_index_list):
self.type = 4 # 顺金
else:
self.type = 3 # 金花
elif self.sort_card_index(rank_index_list):
self.type = 2 # 顺子
elif len(set(rank_list)) == 2:
self.type = 1 # 对子
def play(self):
self.judge_type()
class Winner():
def __init__(self, player1, player2):
self.player1 = player1
self.player2 = player2
def get_max_card(self, player):
'''
筛选出三张牌中最大的牌,这里返回的是在ranks中的index
:param player:
:return:
'''
ranks = Poker.ranks
rank_index_list = [] # 扑克牌卡片在Poker中rank中的index
for poker in player.pokers:
index = ranks.index(poker.rank)
rank_index_list.append(index)
return max(rank_index_list)
def get_card_suit(self, player):
'''
返回扑克牌花色大小
:param player:
:return:
'''
suit_values = Poker.suit_values
suit = player.pokers[0].suit
return suit_values[suit]
def get_card_value(self, player):
'''
当牌型是对子的时候,经过匹配找出是对子的牌和单个的牌,这里返回的是牌的index,便于比较大小
:param player:
:return:
'''
ranks = Poker.ranks
rank_index_dict = {} # 扑克牌卡片在Poker中rank中的index
repeat_rank_value = 0 # 成对的两张扑克牌的大小
single_rank_value = 0 # 单个的扑克牌的大小
for poker in player.pokers:
index = ranks.index(poker.rank)
if index in rank_index_dict:
rank_index_dict[index] += 1
else:
rank_index_dict[index] = 1
rank_index_dict = sorted(rank_index_dict.items(), key=lambda d: d[1], reverse=True)
n = 0
for key in rank_index_dict:
if n == 0:
repeat_rank_value = key
else:
single_rank_value = key
n += 1
return repeat_rank_value, single_rank_value
def get_player_score(self, player):
'''
当牌型为单牌时,计算手中的牌相加后的值大小
:param player:
:return:
'''
ranks = Poker.ranks
score = 0
for poker in player.pokers:
index = ranks.index(poker.rank) # 扑克牌卡片在Poker中rank中的index
score += index
return score
def get_winner(self):
player1, player2 = self.player1, self.player2
# 先比较玩家手中的牌型,大的胜出,玩牌的规则暂时不涉及到牌色,如有修改可以在此基础上调整
# 豹子> 顺金 > 金花 > 顺子 > 对子 > 单张
if player1.type > player2.type:
return player1
elif player1.type < player2.type:
return player2
else: # 当玩家双方手中的牌型一致时,根据赢法一一判断
if player1.type == 5 or player1.type == 4 or player1.type == 2: # 豹子、顺金、顺子 规则说明:按照比点
if self.get_max_card(player1) > self.get_max_card(player2):
return player1
else:
return player2
elif player1.type == 1: # 对子 规则说明:先比较相同两张的值的大小,谁大谁胜出;如果对子相同,再比较单个
repeat_rank_value1, single_rank_value1 = self.get_card_value(player1)
repeat_rank_value2, single_rank_value2 = self.get_card_value(player1)
if repeat_rank_value1 > repeat_rank_value2:
return player1
elif repeat_rank_value1 < repeat_rank_value2:
return player2
else:
if single_rank_value1 > single_rank_value2:
return player1
elif single_rank_value1 < single_rank_value2:
return player2
else:
return None # 平局,大家手上的牌一样大
else: # 单牌,金花 规则:比较所有牌的点数大小,不区分牌色
if self.get_player_score(player1) > self.get_player_score(player2):
return player1
elif self.get_player_score(player1) < self.get_player_score(player2):
return player2
else:
return None
def compare_card(card1, card2):
'''
比较两种扑克牌是否相同
:param card1:
:param card2:
:return: 相同返回为True,否则为False
'''
if card1.rank == card2.rank and card1.suit == card2.suit:
return True
return False
def dutch_official_work(poker, player1, player2):
'''
发牌人(荷官)给两位玩家轮替发牌,发出去的牌都需要从这副扑克牌中剔除出去
:param poker: 那一副扑克牌
:param player1:玩家1
:param player2:玩家2
:return:整理后的扑克牌
'''
def distribute_card(player):
card = choice(poker) # 发牌
player.pokers.append(Own_Poker(player.id, card.rank, card.suit, player.set_card_score(card)))
for i in range(len(poker)):
if compare_card(card, poker[i]):
poker.__delitem__(i)
break
shuffle(poker) # 洗牌
for k in range(3):
distribute_card(player1)
distribute_card(player2)
return poker
|
[
"13310948808@163.com"
] |
13310948808@163.com
|
88a17af2c77b278374873db91472bee81840c2fd
|
915ac708aeac53125f29bef90c2c047eaed4940e
|
/Anaconda/Lib/site-packages/blaze/server/__init__.py
|
cc45dd552a001665948f97c616e525f2a682c66e
|
[] |
no_license
|
bopopescu/newGitTest
|
c8c480ddd585ef416a5ccb63cbc43e3019f92534
|
5a19f7d01d417a34170a8f760a76e6a8bb7c9274
|
refs/heads/master
| 2021-05-31T17:00:26.656450
| 2016-06-08T06:43:52
| 2016-06-08T06:43:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from __future__ import absolute_import, division, print_function
from .server import Server, to_tree, from_tree, api
from .client import ExprClient, Client
|
[
"arvindchari88@gmail.com"
] |
arvindchari88@gmail.com
|
8fb531fec8d28f2ab789a855a44fa97599d92fdd
|
49fe43b6626be1a47a1b05bb16b606c582ba84d7
|
/tests/message.py
|
a1294789830dc75dfa578e3caee6d641bfd4ab56
|
[
"Apache-2.0"
] |
permissive
|
TheUncleKai/pyeasyb
|
3804e066dddef492cbf0471a4aafec3464336572
|
6237d67bfb9ad7599c7706f3ccbe83aa1806464e
|
refs/heads/master
| 2020-11-29T16:28:23.444037
| 2020-02-08T23:07:11
| 2020-02-08T23:07:11
| 230,167,888
| 0
| 1
|
Apache-2.0
| 2020-02-08T23:07:12
| 2019-12-26T00:18:29
|
Python
|
UTF-8
|
Python
| false
| false
| 6,618
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2017, Kai Raphahn <kai.raphahn@laburec.de>
#
import unittest
import easyb.command
import easyb.message
from easyb.definitions import Direction, Length, Priority
__all__ = [
"TestMessage"
]
# noinspection DuplicatedCode
class TestMessage(unittest.TestCase):
"""Testing class for message coding and decoding module."""
def setUp(self):
"""set up test.
"""
return
def tearDown(self):
"""tear down test.
"""
return
def test_message_1(self):
message = easyb.message.Message()
self.assertNotEqual(message, None)
self.assertIs(message.address, 0)
self.assertIs(message.code, 0)
self.assertEqual(message.priority, Priority.NoPriority)
self.assertEqual(message.length, Length.Byte3)
self.assertEqual(message.direction, Direction.FromMaster)
self.assertEqual(message.param, [])
self.assertIsNone(message.stream)
return
def test_message_2(self):
message = easyb.message.Message(address=1, code=1, priority=Priority.Priority,
length=Length.Byte6, direction=Direction.FromSlave,
param=[1, 0])
self.assertNotEqual(message, None)
self.assertIs(message.address, 1)
self.assertIs(message.code, 1)
self.assertEqual(message.priority, Priority.Priority)
self.assertEqual(message.length, Length.Byte6)
self.assertEqual(message.direction, Direction.FromSlave)
self.assertEqual(message.param, [1, 0])
self.assertIsNone(message.stream)
return
def test_command_1(self):
message = easyb.message.Message()
command = easyb.command.Command(name="Test", address=1, code=1, length=Length.Byte6, param=[2, 0])
message.command(command)
self.assertNotEqual(message, None)
self.assertIs(message.address, 1)
self.assertIs(message.code, 1)
self.assertEqual(message.priority, Priority.NoPriority)
self.assertEqual(message.length, Length.Byte6)
self.assertEqual(message.direction, Direction.FromMaster)
self.assertEqual(message.param, [2, 0])
self.assertIsNone(message.stream)
return
def test_encode_1(self):
message = easyb.message.Message(address=1, code=15, priority=Priority.NoPriority,
length=Length.Byte6, direction=Direction.FromMaster,
param=[202, 0])
send = bytes([0xfe, 0xf2, 0xed, 0x35, 0x00, 0x47])
check = message.encode()
stream = message.stream
self.assertTrue(check)
self.assertEqual(stream.bytes, send)
return
def test_encode_2(self):
message = easyb.message.Message(address=1, code=15, priority=Priority.NoPriority,
length=Length.Byte3, direction=Direction.FromMaster)
send = bytes([0xfe, 0xf0, 0xe3])
check = message.encode()
stream = message.stream
self.assertTrue(check)
self.assertEqual(stream.bytes, send)
return
def test_encode_3(self):
message = easyb.message.Message(address=1, code=15, priority=Priority.NoPriority,
length=Length.Byte6, direction=Direction.FromMaster,
param=[202, 0, 0])
check = message.encode()
self.assertFalse(check)
return
def test_encode_4(self):
message = easyb.message.Message(address=1, code=15, priority=Priority.NoPriority,
length=Length.Byte6, direction=Direction.FromMaster,
param=[202, 0, 0, 0])
check = message.encode()
self.assertFalse(check)
return
def test_encode_5(self):
message = easyb.message.Message(address=1, code=15, priority=Priority.NoPriority,
length=Length.Byte9, direction=Direction.FromMaster,
param=[202, 0, 1, 0])
send = bytes([254, 244, 255, 53, 0, 71, 254, 0, 61])
check = message.encode()
self.assertTrue(check)
self.assertEqual(message.stream.bytes, send)
return
def test_encode_6(self):
message = easyb.message.Message(address=1, code=15, priority=Priority.NoPriority,
length=Length.Byte9, direction=Direction.FromMaster,
param=[202, 0, 1, 0, 0, 0])
check = message.encode()
self.assertFalse(check)
return
def test_encode_7(self):
message = easyb.message.Message(address=1, code=15, priority=Priority.NoPriority,
length=Length.Variable, direction=Direction.FromMaster,
param=[202, 0, 1, 0, 0, 0])
check = message.encode()
self.assertTrue(check)
return
def test_decode_1(self):
header = [0xfe, 0xf5, 0xf8]
message = easyb.message.Message()
check = message.decode(bytes(header))
message.info("TEST")
self.assertTrue(check)
self.assertEqual(message.address, 1)
self.assertEqual(message.code, 15)
self.assertEqual(message.priority, Priority.NoPriority)
self.assertEqual(message.length, Length.Byte9)
self.assertEqual(message.direction, Direction.FromSlave)
return
def test_decode_2(self):
header = [0xfe, 0xf5, 0xfc]
message = easyb.message.Message()
check = message.decode(bytes(header))
self.assertFalse(check)
return
def test_decode_3(self):
header = [0xfe, 0xf5]
message = easyb.message.Message()
check = message.decode(bytes(header))
self.assertFalse(check)
return
|
[
"kai.raphahn@laburec.de"
] |
kai.raphahn@laburec.de
|
3c2c415e894e5277ca78bcbfeb452d75e08a1e80
|
0d1b38738bf8d3a46efdf44ef6dd1fd061a0ff3e
|
/python/xformer/loading.py
|
4d5aa713432cbcc7a16a2e1d8eaee5660d113609
|
[
"MIT"
] |
permissive
|
nelhage/taktician
|
aea285e49c97d9212390075239abf4816b0023ee
|
8ab398ad8ce65a7615da476c6e99c3f6d5d24d76
|
refs/heads/main
| 2022-12-08T23:43:26.458026
| 2022-11-06T23:31:12
| 2022-11-06T23:31:12
| 57,939,961
| 60
| 14
|
MIT
| 2022-11-06T23:31:13
| 2016-05-03T03:54:39
|
Go
|
UTF-8
|
Python
| false
| false
| 848
|
py
|
import os.path
import torch
from torch import nn
import yaml
from .model import Transformer
def load_config(save_dir):
with open(os.path.join(save_dir, "config.yaml")) as fh:
return yaml.unsafe_load(fh)
def load_snapshot(model: nn.Module, save_dir: str):
state = torch.load(os.path.join(save_dir, "model.pt"), map_location="cpu")
model.load_state_dict(state)
def load_model(save_dir, device="cpu"):
config = load_config(save_dir)
model = Transformer(config, device=device)
load_snapshot(model, save_dir)
return model
def save_model(model: Transformer, save_dir: str):
os.makedirs(save_dir, exist_ok=True)
torch.save(
model.state_dict(),
os.path.join(save_dir, "model.pt"),
)
with open(os.path.join(save_dir, "config.yaml"), "w") as fh:
yaml.dump(model.cfg, fh)
|
[
"nelhage@nelhage.com"
] |
nelhage@nelhage.com
|
e91f2530231ab7cbdb7a09e7e41c5b7853538c00
|
985be2d2d979c1d5ffbd6cd73d9da711951e4f1c
|
/django_channels/asgi.py
|
2b6813d966c147f9309c5e271d9d2c4f4d244138
|
[] |
no_license
|
sreesh-mallya/django-channels-demo
|
6a1492c2ffe3a8f37782ced19562c629fa65ee8f
|
8a3ac7d3e04ecd8c5053009f760d84e3b9415882
|
refs/heads/master
| 2021-01-23T22:16:04.353634
| 2017-09-19T03:44:00
| 2017-09-19T03:44:00
| 102,924,295
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
import os
import channels.asgi
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_channels.settings")
channel_layer = channels.asgi.get_channel_layer()
|
[
"sreeshsmallya@gmail.com"
] |
sreeshsmallya@gmail.com
|
74dab3259df5726216ab245a698f4ad633b56a07
|
6d14641ff3806da2ba5a3087e36c475b7e07a336
|
/text_mining_code.py
|
b963126d00a9467c1f36f95a44e716da2f1bd9a2
|
[] |
no_license
|
jamiecayley/ALS
|
32c4e20f0ee9cd7bc04c0a7870dd20b8ad98a175
|
20a00595adb952eb4ccf74a221cee376d4028900
|
refs/heads/master
| 2021-05-27T12:39:25.868830
| 2014-08-27T02:19:03
| 2014-08-27T02:19:03
| 15,090,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import gzip
class TextMinning:
def __init__(self, fileName):
self.fileName = fileName
def read_gz_file_in_chunks(self, chunk_size=1024):
'''Lazy function to read a gz file piece by piece
Default chunck size: 1K.'''
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def create_subsetted_version(self):
f = gzip.open(fileName, 'wb')
for piece in read_gz_file_in_chunks(f):
process_data(piece)
fieldnames = ['entrez_id', 'word', 'average_count']
def main():
fileName = "/Users/mtchavez/Downloads/gene_word_matrix_2011.gz"
tm = TextMinning(fileName)
tm.read_gz_file_in_chunks()
tm.create_subsetted_version()
|
[
"materechm@gmail.com"
] |
materechm@gmail.com
|
1db31bea4b6b755733c4f518e1ca07de05251809
|
77f8bb38f9dd100406365a96fea8940cf8eecfa5
|
/django_pro2/wsgi.py
|
e01dcc3e7ee630f93446db8208db692fa99bfd24
|
[] |
no_license
|
yadukrishnan369/cybersquare
|
553280825d0852dd72879b135bc33fc5efcbcd77
|
356b7e6f430cc813f47c8f94a7e9bfc346cfdc30
|
refs/heads/master
| 2023-08-22T11:16:09.047228
| 2021-10-01T04:04:02
| 2021-10-01T04:04:02
| 411,560,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for django_pro2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_pro2.settings')
application = get_wsgi_application()
|
[
"yadukrish9656@gmail.com"
] |
yadukrish9656@gmail.com
|
467094675e5091fc349d381f86d9afaa14d38086
|
f3db8bd591ab2cdffc926436190dc8f31a37bfde
|
/project/settings.py
|
73eed63fb9e8861e55a15e763c61b3609071f84f
|
[] |
no_license
|
ravikaran1/civilmain
|
5180e88b845aa8ede9ee0b3322dbc6e7e7bfa12b
|
9994e1a62ebe8d30b3561f6ff811ff3b8fc4fa08
|
refs/heads/master
| 2023-06-02T00:57:03.882666
| 2021-06-10T03:06:54
| 2021-06-10T03:06:54
| 375,542,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,250
|
py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from django.contrib.messages import constants as messages
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fwg0mp5t-foh^sv%-rh*&4mai)$d2ch$@vp@7v1#1ihm!*=4mt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MESSAGE_TAGS = {
messages.ERROR:'danger',
}
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
|
[
"ravi5sandhu775@gmail.com"
] |
ravi5sandhu775@gmail.com
|
1665a7e801e38198331918e02cc942dbca6af308
|
edc399cbe15f6b0c8dff622e2e2445f1913615e4
|
/train/cnn.py
|
91b8359226b0493097f9808a574836ab7ead58e8
|
[] |
no_license
|
mannysinghh11/Colorify
|
1e9e488df5f14cf94fb8adb0b4b64f3e180c95b1
|
936ce5538c1a36366e1bddfe2936b078146a4b2a
|
refs/heads/master
| 2020-09-15T05:38:57.935914
| 2019-11-22T22:58:03
| 2019-11-22T22:58:03
| 223,359,227
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,928
|
py
|
from keras.models import Sequential
from keras.layers import Conv2D, UpSampling2D, InputLayer, Conv2DTranspose
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage.color import rgb2lab, lab2rgb, rgb2gray, xyz2lab
from keras.layers import Dense, Dropout, Flatten, Activation, BatchNormalization, regularizers
from keras.datasets import cifar10 #to find out more about the dataset
from skimage.io import imsave
from skimage.transform import rescale, resize
from scipy import ndimage, misc
from sklearn.model_selection import train_test_split
import numpy as np
import os
import re
class CNNModel(object):
def __init__(self):
self.training_images = []
self.model = self.compile_model()
self.traindir2 = "/trainingdata/"
# Her we load images from our training images director, sort the files and load those images into an array
def set_images(self):
# Get images
images = os.listdir(self.traindir2)
images.sort(key=lambda var: [int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
filelen = len(images)-1
print(len(images)-1)
print("This is the length")
# Split up training image from dataset
self.training_images = images[:filelen]
# Here we perform the training by taking all the images, coverting them into an numpyarray representation and loading
# those into an array.
# Here we also convert image from rgb to lab colorspace
def train(self):
xSpace = []
ySpace = []
for image in self.training_images:
if(".JPEG" in image):
train_image = img_to_array(load_img(self.traindir2+image))
train_image = train_image / 255 # We divide by 255 to normalize the representation so its from 0-1 instead of 0-255
xSpace.append(rgb2lab(train_image)[:, :, 0]) # this gives us the L Channel
ySpace.append((rgb2lab(train_image)[:, :, 1:]) / 128) # We divide by 128 since ab are represented in -128 - 128
xSpace = np.array(xSpace, dtype=float)
ySpace = np.array(ySpace, dtype=float)
xSpace = xSpace.reshape(len(xSpace), 64, 64, 1) # Here we reshape our array so it will fit into our model
ySpace = ySpace.reshape(len(ySpace), 64, 64, 2)
# Train Model. This is the function we use to perform training.
# the variable 'history' is used for getting metrics into floydhub
history = self.model.fit(x=xSpace, y=ySpace, validation_split=.1, batch_size=1000, epochs=100, verbose=1)
def initialModel(self):
# building model using keras - Floyd Model
model = Sequential()
model.add(InputLayer(input_shape=(64, 64, 1)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(2, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
# Finish model
model.compile(optimizer='rmsprop', loss='mse', metrics=["accuracy"])
print("Created model!")
return model
def compile_model(self):
# building model using keras - Floyd Model
model = Sequential()
model.add(InputLayer(input_shape=(64, 64, 1)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
model.add(UpSampling2D((2, 2)))
# Finish model
model.compile(optimizer='rmsprop', loss='mse', metrics=["accuracy"])
print("Created model!")
return model
def ann_model(self):
X = []
Y = []
for image in os.listdir(dir):
if '.jpg' in image:
train_image = img_to_array(load_img(dir + image))
# train_image = scipy.misc.imresize(train_image, (64, 64))
train_image = train_image / 255
X.append(rgb2lab(train_image)[:, :, 0])
Y.append((rgb2lab(train_image)[:, :, 1:]) / 128)
X = np.array(X, dtype=float) # this converts it into a huge vector
Y = np.array(Y, dtype=float)
X = X.reshape(len(X), 255, 255, 1)
Y = Y.reshape(len(Y), 255, 255, 2)
dimData_input = np.prod(X.shape[1:])
dimData_output = np.prod(Y.shape[1:])
X = X.reshape(X.shape[0], dimData_input)
Y = Y.reshape(Y.shape[0], dimData_output)
train_images, test_images, train_labels, test_labels = train_test_split(X, Y, test_size = 0.20)
model2 = Sequential()
model2.add(Dense(1024, activation='relu', input_shape=(input,)))
# model4.add(Dense(1024, activation='relu'))
# model4.add(Dense(1024, activation='relu'))
model2.add(Dense(8192, activation='tanh'))
model2.add(Dense(8192, activation='tanh'))
model2.summary()
def save_model(self):
# Save model to disk
model_json = self.model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
self.model.save_weights("model.h5")
print("Saved model to disk")
ac = CNNModel()
ac.set_images()
ac.train()
ac.save_model()
|
[
"manpreetdot11@gmail.com"
] |
manpreetdot11@gmail.com
|
379486d00f0e987e17171fdb512a37eede8de8be
|
2b485c67c723151f73ec96da9f6337a0c9857dae
|
/easy/q844 backspaceStrCmp.py
|
14e5a37a7c60bca57bec20299809afd8391389f6
|
[] |
no_license
|
Anupya/leetcode
|
c7792e6ac61b655491a1c734f9167281356471d3
|
cb45e66a41e0c6a8583bb9c4bf846b470ef4bc0f
|
refs/heads/master
| 2022-10-10T14:01:22.189414
| 2022-09-07T21:36:24
| 2022-09-07T21:36:24
| 151,865,310
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
# Given two strings s and t, return true if they are equal when both are typed into empty text editors. '#' means a backspace character.
# Note that after backspacing an empty text, the text will continue empty.
class Solution:
def backspaceCompare(self, s: str, t: str) -> bool:
# preprocess s
newS = ""
for x in s:
if x == '#' and len(newS):
newS = newS[:-1]
if x != '#':
newS += x
# preprocess t
newT = ""
for x in t:
if x == '#' and len(newT):
newT = newT[:-1]
if x != '#':
newT += x
return newS == newT
|
[
"anupya@hotmail.ca"
] |
anupya@hotmail.ca
|
7b0b6659a3a83edf47364333170527adf0b1588a
|
4fc33bc0b47557f93458e597bbfaa66d625b9573
|
/FaceDetectCamerav3Multiface.py
|
9632b2a5f904e20776ea76d183b6c6c93be543b9
|
[] |
no_license
|
pedroZenone/FaceDetection
|
06eaec9e491359f70bc0f5c0fc740beb53b78b69
|
56fb2e0ba9e9d97d222365f20402c4d45c843aab
|
refs/heads/master
| 2020-04-14T00:10:48.220111
| 2018-12-29T17:09:13
| 2018-12-29T17:09:13
| 163,526,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,328
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 22 14:44:59 2018
@author: pedzenon
"""
import cv2
from imutils.video import FPS
from google.cloud import vision
from google.cloud.vision import types
import numpy as np
from queue import Queue
from threading import Thread
import operator
import pandas as pd
# incializo la varible de comunicacion del tread con el main
sentiment = pd.DataFrame([['NoSentiment',0,0,0]],columns=['sentiment','x','y','dist'])
def main():
# fps.stop()
# print(fps.fps())
cam = cv2.VideoCapture(0)
window = []
FPSxseg = 12
client = vision.ImageAnnotatorClient()
q = Queue() # cola de comunicacion thread - main
q.put("Finish") # inicializacion
global sentiment
face_img = {'NoSentiment':cv2.imread("noSentiment.png",-1), 'anger': cv2.imread("anger.png",-1) ,
'joy': cv2.imread("joy.png",-1) , 'surprise': cv2.imread("surprise.png",-1) ,
'sorrow':cv2.imread("sorrow.png",-1)}
# fps = FPS().start()
while True:
ret_val, img = cam.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# voy cargando aparicion de caras
if (len(faces) >= 1):
window.append(1)
else:
window.append(0)
# Cada 2 segundos para testear si hay presencia de cara
if(len(window) == FPSxseg*2 + 1):
if(sum(window) > 0.75*FPSxseg):
image = types.Image(content= np.array(cv2.imencode('.jpg', img)[1]).tobytes())
# Start thread if not started before
if(q.empty() == False):
if(q.get() == "Finish"):
in_thread = {'client':client,'image':image,'queue':q}
t = Thread(target=GoogleCall, args=(in_thread,))
t.start()
else:
sentiment['sentiment'] = 'NoSentiment'
window.clear()
# por cada cara que detecta opencv matcheo la cara de google con la de opencv
for (x_cv,y_cv,w,h) in faces:
x_cv = np.float64(x_cv)
y_cv = np.float64(y_cv)
sentiment['dist'] = 0
sentiment['dist'] = np.power(sentiment.x - x_cv,2) + np.power(sentiment.y - y_cv,2)
senti = sentiment.iloc[sentiment['dist'].idxmin(),].sentiment
face_add(face_img[senti],img,np.int32(x_cv),np.int32(y_cv),w,h)
cv2.imshow('my webcam', img)
if cv2.waitKey(1) == 27:
break # esc to quit
# fps.update()
cv2.destroyAllWindows()
# =============================================================================
# @fn: face_add
# @s_img: foto del emoji
# @l_img: foto sobre la que hay que pegar la imagen
# @x,y: coordenadas donde esta la cara de la persona
# @w,h: dimensiones de la cara
# @brief: agrega a la imagen base la foto del emoji
# =============================================================================
def face_add(s_img,l_img,x,y,w,h):
rec_dim = max([h,w])
dim = (rec_dim,rec_dim)
s_img = cv2.resize(s_img, dim, interpolation = cv2.INTER_AREA)
x_offset= x
y_offset= y
y1, y2 = y_offset, y_offset + s_img.shape[0]
x1, x2 = x_offset, x_offset + s_img.shape[1]
alpha_s = s_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
l_img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * l_img[y1:y2, x1:x2, c])
# =============================================================================
# @fn: GoogleCall
# @in_param: diccionario con client de google, imagen para proceesar, queue de comunicacion con thread
# @brief: Hace una llamada a la API de google y carga en la variable global sentiment el sentimeiento que la API entrego
# =============================================================================
def GoogleCall(in_param):
print('Google Call!')
client = in_param['client']
image = in_param['image']
queue = in_param['queue']
response = client.face_detection(image=image)
faces_ = response.face_annotations
global sentiment
sentiment = sentiment_vote(faces_) # cargo la variable global con el resultado
queue.put("Finish") # aviso al main que el thread termino!
# =============================================================================
# @fn: sentiment_vote
# @faces: json de google
# @brief: entrega el sentimiento resultante que envio google junto con la posicion del ojo izquierdo para traquear caras
# @out: entrego una lista con sentimiento+posicion del ojo
# =============================================================================
def sentiment_vote(faces):
sentiment = []
likelihood_name = (0, 0, 1, 2,3, 4)
google_vision = {}
for face in faces:
aux = {}
google_vision['anger'] = likelihood_name[face.anger_likelihood]
google_vision['joy'] = likelihood_name[face.joy_likelihood]
google_vision['surprise'] = likelihood_name[face.surprise_likelihood]
google_vision['sorrow'] = likelihood_name[face.sorrow_likelihood]
face.landmarks[0].position.x
aux['y'] = face.landmarks[0].position.y
aux['x'] = face.landmarks[0].position.x
sentiment_aux = max(google_vision.items(), key=operator.itemgetter(1))[0]
if(google_vision[sentiment_aux] >= 2): # likely or very_likely
aux['sentiment'] = sentiment_aux
else:
aux['sentiment'] = 'NoSentiment'
sentiment.append(aux)
return pd.DataFrame(sentiment) # devuelvo un dataframe con el sentimiento y ubicacion
##############################################################################
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
pedroZenone.noreply@github.com
|
6cc91dd9608a49fe3e92eb0154fc330d8290deca
|
154c2668551ed260e1277bf95b2225f9de7365a7
|
/Ejercicios.py/Ahorcados.py
|
df5bb04509e8920f9b6f4675d69d2e7fc437c4ba
|
[] |
no_license
|
felipegarciab9601/Ejercicios.py
|
8ab5efd749b7edecfa8193ccd3d68e721952772d
|
b6db8bde34f246e78883c7a97ee26f529f70ac71
|
refs/heads/master
| 2022-04-24T13:20:37.146167
| 2020-04-22T16:12:02
| 2020-04-22T16:12:02
| 257,684,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,155
|
py
|
import random
IMAGES = ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
| |
|
=========''', '''
+---+
| |
O |
/|\ |
| |
/ |
=========''', '''
+---+
| |
O |
/|\ |
| |
/ \ |
=========''', '''
''']
WORDS = [
'lavadora',
'secadora',
'sofa',
'gobierno',
'diputado',
'democracia',
'computadora',
'teclado'
]
def random_word():
idx = random.randint(0, len(WORDS) - 1)
return WORDS[idx]
def display_board(hidden_word, tries):
print(IMAGES[tries])
print('')
print(hidden_word)
print('--- * --- * --- * --- * --- * --- ')
def run():
word = random_word()
hidden_word = ['-'] * len(word)
tries = 0
while True:
display_board(hidden_word, tries)
current_letter = str(rinput('Escoge una letra: '))
letter_indexes = []
for idx in range(len(word)):
if word[idx] == current_letter:
letter_indexes.append(idx)
if len(letter_indexes) == 0:
tries += 1
if tries == 7:
display_board(hidden_word, tries)
print('')
print('¡Perdiste! La palabra correcta era {}'.format(word))
break
else:
for idx in letter_indexes:
hidden_word[idx] = current_letter
letter_indexes = []
try:
hidden_word.index('-')
except ValueError:
print('')
print('¡Felicidades! Ganaste. La palabra es: {}'.format(word))
break
if __name__ == '__main__':
print('B I E N V E N I D O S A A H O R C A D O S')
run()
|
[
"felipegarcia9631@hotmail.com"
] |
felipegarcia9631@hotmail.com
|
85af1d64e52abeaebaed17dddf17a473c39de16f
|
cb59b3eac9c01ee126077b3d20c474b89fd479ee
|
/Argishti/Question.py
|
8d5b7a9f383428db6e5bedcd642b32c679f110a8
|
[] |
no_license
|
Levon187/ufar-python
|
0648ee7711c46b1df00d1d04b7b1cbe3c5437c55
|
82c934c7cc017ecac457a339f9ac8ca4d9658431
|
refs/heads/master
| 2023-04-15T10:56:32.778755
| 2021-04-21T10:28:14
| 2021-04-21T10:28:14
| 335,051,877
| 0
| 0
| null | 2021-02-08T06:56:22
| 2021-02-01T18:53:27
|
Python
|
UTF-8
|
Python
| false
| false
| 120
|
py
|
class Question:
def __init__(self, question, answer):
self.question = question
self.answer = answer
|
[
"tigranyan.argishti@gmail.com"
] |
tigranyan.argishti@gmail.com
|
3708487f4168be67105fc290391be7c47ce0d308
|
6706c2dc2291a1530d14df2647aa79f787caeb49
|
/Assignment_3/Code7.py
|
0907b9eefc161512508cc6b2062893bbcb1145bf
|
[] |
no_license
|
Tanishksingh/cyber_security
|
f31ecb7b25ed2099f92f614b9664c652d17c5cb7
|
cfade3623f921be3905f2df636409998dba71ec5
|
refs/heads/master
| 2023-01-22T22:12:07.339996
| 2020-12-04T05:01:59
| 2020-12-04T05:01:59
| 293,023,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
def summer_69(l):
sum=0
len=l.__len__()
if len == 0:
return 0
for i in l:
if i!=6:
sum=sum+i
else:
break
for i in range(len):
if l[i]==9:
break
for j in range(i+1,len):
sum=sum+l[j]
return sum
l=[2,1,6,9,11]
print(summer_69(l))
|
[
"noreply@github.com"
] |
Tanishksingh.noreply@github.com
|
c1d2c127f9400ecd79337517d74aaa2a3243a246
|
8ae2f13c2860ec2c21e0f839fa044cdb12b26059
|
/random_lut_generate/LUT1DHist.py
|
4b500b63b2fdfe972633a45a10a34e087b7b6f60
|
[] |
no_license
|
inyukwo1/LUTFinder
|
b5fcfc0960b4680926eebc23eb992bebafda88fe
|
b851e483ce8c8f46f8f77b677c33ed475b71f1f1
|
refs/heads/master
| 2020-03-19T00:57:28.425952
| 2018-07-30T07:52:55
| 2018-07-30T07:52:55
| 135,512,415
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
import os
import sys
import random
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utils.QuadraticFunction import QuadraticFunction
from utils.LinearFunction import get_linearfunction_from_two_point
from random_lut_generate.LUT1D import LUT1D
def random_0_to_1():
val = random.uniform(0., 1.)
# below loop makes curve more dynamically
while(0.2 < val and val < 0.8):
val = random.uniform(0., 1.)
return val
def make_fixed_hists(floats):
hists = []
for i in range(len(floats)):
float = floats[i]
point_pair = (1. / (len(floats) - 1) * i, float)
hists.append(point_pair)
return hists
def make_random_hists(hist_num):
hists = []
for i in range(hist_num):
random_float = random_0_to_1()
point_pair = (1. / (hist_num - 1) * i, random_float)
hists.append(point_pair)
return hists
def make_linear_functions_from_hists(hists):
linear_funcs = []
for i in range(len(hists) - 1):
linear_func = get_linearfunction_from_two_point(hists[i], hists[i + 1])
x_range = (hists[i][0], hists[i+1][0])
linear_funcs.append((x_range, linear_func))
return linear_funcs
def integrate_linear_funcs(linear_funcs):
integrated_funcs = []
start_y = 0
for x_range, linear_func in linear_funcs:
integrated = linear_func.get_integrate()
start_point = (x_range[0], start_y)
integrated.vertical_move(start_point)
integrated_funcs.append((x_range, integrated))
start_y = integrated.calc(x_range[1])
end_y = start_y
return integrated_funcs, end_y
class LUT1DRandomHist(LUT1D):
def __init__(self, hist_num):
LUT1D.__init__(self)
for channel in ['r', 'g', 'b']:
self._init_color_map(channel, hist_num)
def _init_color_map(self, channel, hist_num):
hists = make_random_hists(hist_num)
linear_funcs = make_linear_functions_from_hists(hists)
integrated_funcs, end_y = integrate_linear_funcs(linear_funcs)
for x_range, integrated_func in integrated_funcs:
denormalized_start_x = int(x_range[0] * 256)
denormalized_end_x = int(x_range[1] * 256)
for x in range(denormalized_start_x, denormalized_end_x):
y = integrated_func.calc(x / 256.)
normalized_y = y * 256. / end_y
self.color_map[channel].append(normalized_y)
assert len(self.color_map[channel]) == 256
if __name__ == "__main__":
lut = LUT1DRandomHist(5)
lut.plot()
|
[
"ihna@voyagerx.com"
] |
ihna@voyagerx.com
|
f3561d81c6add357eae34a128836a3ea8c0c8073
|
45be19034693c00dbcc87dc2c0e8c3a30f456abb
|
/bin/rst2s5.py
|
bb57e73256925cc6efc1a0d676e82c3b4be23d04
|
[] |
no_license
|
yochidros/Huffman-Server
|
282ad898cdf8637965c2323ad8a6175e2ff4ee69
|
2eeb6a457c8befc44b7ad645ceae53a64ca3c93c
|
refs/heads/master
| 2022-09-03T05:00:22.730865
| 2018-02-04T10:17:18
| 2018-02-04T10:17:18
| 103,617,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/Users/yotio/yochio/python/huffman/bin/python
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
|
[
"mm9.movement.trb@gmail.com"
] |
mm9.movement.trb@gmail.com
|
561046dc8ce00c3f0a9c89eaa8757544bf3c445f
|
d54ff297c945f1400418493307036b7d6514c4b8
|
/draw_cost.py
|
67759eb196def69d6b3691aac2fa97d50e8b9da8
|
[] |
no_license
|
whoyao/python-drawing-exercise
|
b0f54f28776cbd8deb4f75cd0b2aa1b39ecc5e96
|
4fcf3c76659b5cfe29be8ec90a4fe7ef45aadd85
|
refs/heads/master
| 2020-04-15T03:59:18.138540
| 2019-01-07T02:29:28
| 2019-01-07T02:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
import os
import time
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from io import StringIO
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
cm = plt.get_cmap('rainbow')
read_path = "/tmp/cost_fifo.pipe"
def draw_cost_image(data):
df = pd.read_csv(StringIO(data.decode('utf-8')), delimiter=";", header=None)
df = df.dropna()
df_collision = df[np.abs(df[3]) > 0.5]
sc = plt.scatter(df[0], df[1], c=df[2], vmin=min(df[2]), vmax=max(df[2]), cmap=cm)
print(df_collision)
sc2 = plt.scatter(df_collision[0], df_collision[1], c='k', marker='^', s=50)
plt.colorbar(sc)
plt.xlabel("dis")
plt.ylabel("vel")
plt.draw()
plt.pause(0.0001)
plt.clf()
def show_cost():
try:
os.mkfifo(read_path)
except OSError as e:
print("Pipe error:", e)
rf = os.open(read_path, os.O_RDONLY)
plt.ion()
while True:
# try:
data = os.read(rf, 4096)
if len(data) == 0:
print("No data!")
else:
# print(data)
draw_cost_image(data)
time.sleep(0.2)
show_cost()
|
[
"qsct9501@126.com"
] |
qsct9501@126.com
|
74b42f0bcfe5607557cba09fb5f0be4ade0a28ad
|
ee0fdbf9616fba410ccdf34f2c8e3f8364eb6f09
|
/MultiView/noisyMNIST/MNIST_FG_mv.py
|
d699f82e5bc224dcd11252585dc02b3ec6237e01
|
[] |
no_license
|
zhangqi19880501/CorrelatedEmbeddings
|
1bc11d28956c85a8a091d5d0d77e72aee5958857
|
1d899dc341c347847e9da790f98b7577047faa76
|
refs/heads/master
| 2023-08-25T04:22:36.423074
| 2020-03-09T12:06:01
| 2020-03-09T12:06:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,096
|
py
|
# -*- coding: utf-8 -*-
""""
Convoutional NN script for MNIST based on Tensorflow Tutorial.
Author: fdcalmon@us.ibm.com
"""
import tensorflow as tf
import pickle
import gzip
import numpy as np
import scipy as sp
import time
import pandas as pd
# from util import load_data
from models_mv import *
# Initialize the input of F and G nets
X = tf.placeholder(tf.float32, shape=[None, 784], name='X1')
Y = tf.placeholder(tf.float32, shape=[None, 784], name='X2')
with open ('noisy_mnist', 'rb') as f1:
n_mnist = pickle.load(f1)
with open ('noisy_rotate_mnist', 'rb') as f2:
nr_mnist = pickle.load(f2)
train_x = n_mnist[0][0]
train_y = nr_mnist[0][0]
test_x = n_mnist[2][0]
test_y = nr_mnist[2][0]
train_labels = n_mnist[0][1].reshape((len(n_mnist[0][1]), 1))
test_labels = n_mnist[2][1].reshape((len(n_mnist[2][1]), 1))
def train(var_name, mb_size, n_steps, d):
# mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# Construct networks
F_Net = F_net(X, d)
G_Net= G_net(Y, d)
# Loss functions
F_loss, _ = F_loss_svd(G_Net, F_Net, mb_size)
G_loss = F_loss
# Gradient-based solver
learning_rate = 0.01
F_solver = tf.train.AdagradOptimizer(learning_rate).minimize(F_loss)
G_solver = tf.train.AdagradOptimizer(learning_rate).minimize(G_loss)
# run model
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# saver = tf.train.Saver()
file = open('Data/'+var_name+'_log.txt','w')
file.write('Iter\t sec/iter \t total sec\n')
t0 = time.time()
for it in range(n_steps):
randidx = np.random.randint(len(train_x), size=mb_size)
X_mb = train_x[randidx]
Y_mb = train_y[randidx]
_, F_loss_curr = sess.run([F_solver, F_loss], feed_dict={X: X_mb, Y: Y_mb})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={X: X_mb, Y: Y_mb})
if (it+1)%1000 == 0:
t1 =time.time()
total_time = t1-t0
time_per_iter = total_time/(it+1)
file.write('{:.0f}\t {:.5f} \t {:.5f}\n'.format(it+1,time_per_iter,total_time))
file.flush()
# evaluate on train set
print('Reconstruct the results')
F_output_train = sess.run(F_Net, feed_dict={X: train_x})
G_output_train = sess.run(G_Net, feed_dict={Y: train_y})
# A, a, B, b = normalizeFG(F_output_train, G_output_train)
# wF_train, wG_train = normalization(F_output_train, G_output_train, A, B, a, b)
# trueCorr_train, corrG_train, newCorr_train, Anorm_train, Bnorm_train, wF_train, wG_train = computeMetrics(F_output_train, G_output_train)
# mnist_train_labels = np.zeros((len(F_output_train), 1))
# for k in range(len(F_output_train)):
# mnist_train_labels[k] = list(mnist.train.labels[k]).index(1)
# print(F_output_train.shape)
# print(G_output_train.shape)
# print(len(train_labels))
df_data_train = pd.DataFrame(np.hstack((F_output_train, G_output_train, train_labels)))
df_data_train.to_csv('Data_FG_new/'+var_name+'_train.csv')
# evaluate on train set
F_output_test = sess.run(F_Net, feed_dict={X: test_x})
G_output_test = sess.run(G_Net, feed_dict={Y: test_y})
# wF_test, wG_test = normalization(F_output_test, G_output_test, A, B, a, b)
# trueCorr_test, corrG_test, newCorr_test, Anorm_test, Bnorm_test, wF_test, wG_test = computeMetrics(F_output_test, G_output_test)
# mnist_test_labels = np.zeros((len(F_output_test), 1))
# for k in range(len(F_output_test)):
# mnist_test_labels[k] = list(mnist.test.labels[k]).index(1)
df_data_test = pd.DataFrame(np.hstack((F_output_test, G_output_test, test_labels)))
df_data_test.to_csv('Data_FG_new/'+var_name+'_test.csv')
# save_path = saver.save(sess, var_name+'.ckpt')
file.close()
sess.close()
## creates and saves tensorflow models for each of the digits
if __name__ == '__main__':
var_name = 'MNIST_FG_mv'
d_list = [40]
for i in range(len(d_list)):
print(i)
train(var_name+str(d_list[i]), 2048, 50000, d_list[i])
|
[
"noreply@github.com"
] |
zhangqi19880501.noreply@github.com
|
1dac47c4a4a311a2a8edb0ee7423e2b80b7a07d5
|
a0fdab5bea482ece570ae798dca5fe19307e8610
|
/python/cloneobj.py
|
e182624c4f07998e8f3cf133a6eb532fbad29059
|
[] |
no_license
|
velppa/oracle-scripts
|
9cf9345fd291293b3b54cd1374a4e1f3b03e93ef
|
ee22c9158bf1dd0524e0f6d2c80e74292c7895d1
|
refs/heads/master
| 2021-05-31T23:31:51.041367
| 2016-06-10T08:50:56
| 2016-06-10T08:50:56
| 8,296,513
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,367
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cloneobj.py
A module for clone objects from one Oracle Database to another.
history:
0.1.0 (2013-08-01): Initial version
0.1.1 (2013-08-01): + Added ability to create object if it doesn't exists
on target DB
0.1.2 (2013-08-02): + Added 'select' attribute to Cloner class
which used as select statement if set.
If not set then usual SELECT * FROM is used.
+ Cloner logs total number of inserted rows instead
number of insert rows on current step
~ str() replaced to {!s} in string formatting
0.1.3 (2013-08-12): ~ Added parameter names to format strings
+ Passwords are now hidden in __repr__
+ Added `insert` function to Cloner.clone to provide
regular insert if bulk insert fails with TypeError
0.1.4 (2013-08-12): ~ Minor format strings improvements
0.2.0 (2013-08-14): + Added Cloner.columns and Cloner.where attributes
+ INSERT in Cloner is now column-aware of cursor it
takes to insert -- ability to insert into some columns
0.2.1 (2013-11-25): ~ Changes in module header, follow PEP-257
+ Added logging instead of print
0.2.2 (2014-02-11): ~ Python3 compatibility
0.3 (2014-02-19): ~ Fixed bug with TypeError
+ Connection now connects on execute if not active
"""
import re
import datetime
import logging
import cx_Oracle
__version__ = '0.3'
__author__ = 'Pavel Popov'
__email__ = 'pavelpopov@outlook.com'
__date__ = '2014-02-19'
__license__ = 'GPLv3'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def setup_logger():
FORMAT = ''
formatter = logging.Formatter(fmt=FORMAT)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
setup_logger()
class Connection:
"""Provides connection to Oracle DB and corresponding methods."""
def __init__(self, connection_string):
cs = connection_string
self.__connection_string = cs
self.connection_string = '{user}@{db}'.format(user=cs[0:cs.index('/')], db=cs[cs.rindex('@') + 1:])
self.conn = None
self.cursor = None
self.active = False
def __repr__(self):
return "{status} connection to '{conn}'".format(status=self.status(), conn=self.connection_string)
def connect(self):
"""Establishes connection."""
if not self.active:
self.conn = cx_Oracle.connect(self.__connection_string)
self.cursor = self.conn.cursor()
self.active = True
def close(self):
"""Closes connection."""
if self.active:
self.cursor.close()
self.conn.close()
self.active = False
def status(self):
return 'Active' if self.active else 'Not active'
def commit(self):
"""Commits transaction on connection level."""
self.conn.commit()
# todo: add logger instead of print
print('Commit complete.')
def object_exists(self, obj):
q = """SELECT 1
FROM all_objects
WHERE owner = upper(:owner)
AND object_name = upper(:name)
AND object_type = upper(:type)
"""
params = {'owner': obj.owner, 'name': obj.name, 'type': obj.type}
self.cursor.execute(q, params)
return len(self.cursor.fetchall()) == 1
def ddl(self, obj):
q = """SELECT dbms_metadata.get_ddl(upper(:type), upper(:name), upper(:owner)) FROM dual"""
params = {'owner': obj.owner, 'name': obj.name, 'type': obj.type}
self.execute(q, params)
return self.cursor.fetchone()[0].read()
def ddl_target(self, ddl, from_obj, to_obj):
# removing schema name from DDL
ddl = ddl.replace(' {type} "{owner}"."{name}"'.format(type=from_obj.type, owner=from_obj.owner.upper(),
name=from_obj.name.upper()),
' {type} "{name}"'.format(type=to_obj.type, name=to_obj.name.upper()))
# remapping tablespace
r = re.compile('TABLESPACE ".*"')
tablespace = to_obj.opts['tablespace']
ddl = r.sub('TABLESPACE "{name}"'.format(name=tablespace) if tablespace is not None else '', ddl)
return ddl
def log(self, query, params=None):
if params is None:
print("ISSUING '{query}' ON '{db}'".format(query=query, db=self.connection_string))
else:
print("ISSUING '{query}' WITH PARAMS {params!s} ON '{db}'".format(query=query, params=params,
db=self.connection_string))
def execute(self, query, params=None, print_output=False):
"""
Execute statement at the connection.
If connection is not active tries to connect first.
Arguments:
query -- statement to be executed
params -- dictionary with bind variables
print_output -- boolean flag to print output to stdout
"""
if not self.active:
self.connect()
if isinstance(params, dict):
self.log(query, params)
self.cursor.execute(query, params)
else:
self.log(query)
self.cursor.execute(query)
if print_output:
for row in self.cursor:
# todo: tab-separated print instead of built-one
print(row)
class DBObject:
"""Describe Oracle Database object"""
def __init__(self, name=None, owner=None, type='TABLE', opts=None):
# todo: maybe combine owner and object_name together?
self.owner = owner.lower() if owner is not None else None
self.name = name.lower() if name is not None else None
self.type = type
self.opts = {'tablespace': None, 'truncate': False, 'create_if_not_exists': False}
if isinstance(opts, dict):
self.opts.update(opts)
def __repr__(self):
return '{type} {owner}.{name}'.format(type=self.type.lower(), owner=self.owner, name=self.name)
class Cloner:
"""Copies content of one object to another"""
BULK_ROWS = 25000
# BULK_ROWS = 100000
def __init__(self, from_db, from_obj, to_db, to_obj):
self.from_db = from_db
self.to_db = to_db
self.from_obj = from_obj
self.to_obj = to_obj
self.select = None
self.columns = None
self.where = None
if not (self.from_obj.type == 'TABLE' and self.to_obj.type == 'TABLE'):
raise Exception('Currently only tables are supported')
if self.from_obj.name is None:
raise Exception('Specify object name for source object')
if self.to_obj.name is None:
self.to_obj.name = self.from_obj.name
if (self.from_db.connection_string == self.to_db.connection_string and
self.from_obj.name == self.to_obj.name and
self.from_obj.owner == self.to_obj.owner and
self.from_obj.type == self.to_obj.type):
raise Exception('Objects are equal')
def connect(self):
self.from_db.connect()
self.set_owner(self.from_obj, self.from_db)
self.to_db.connect()
self.set_owner(self.to_obj, self.to_db)
def close(self):
"""Close connections to Databases"""
self.from_db.close()
self.to_db.close()
@staticmethod
def set_owner(obj, conn):
if obj.owner is None:
conn.execute('SELECT LOWER(user) FROM dual')
obj.owner = conn.cursor.fetchone()[0]
def clone(self):
"""Clone object from_obj to to_obj"""
# todo: measure time spent on transfer
self.connect()
if not self.to_db.object_exists(self.to_obj):
if self.to_obj.opts['create_if_not_exists']:
self.from_obj.opts['tablespace'] = self.to_obj.opts['tablespace']
from_ddl = self.from_db.ddl(self.from_obj)
to_ddl = self.to_db.ddl_target(from_ddl, self.from_obj, self.to_obj)
# todo: create target objects on cursor basis instead of object basis
self.to_db.execute(to_ddl)
else:
raise Exception('First, create {obj} at {db}'.format(obj=self.to_obj, db=self.to_db.connection_string))
if self.select is None:
where = '1=1' if self.where is None else self.where
columns = '*' if self.columns is None else self.columns
self.select = '''SELECT {columns}
FROM {owner}.{name}
WHERE 1=1
AND {where}
'''.format(owner=self.from_obj.owner, name=self.from_obj.name,
columns=columns, where=where)
if self.to_obj.opts['truncate'] and self.to_obj.type == 'TABLE':
self.to_db.execute('TRUNCATE TABLE {owner}.{name}'.format(owner=self.to_obj.owner, name=self.to_obj.name))
self.from_db.execute(self.select)
desc = self.from_db.cursor.description
columns = ', '.join([x[0] for x in desc]).lower()
placeholders = ', '.join([':{!s}'.format(x) for x in range(len(desc))])
insert = 'INSERT INTO {owner}.{table}({columns}) VALUES({placeholders})'
insert = insert.format(owner=self.to_obj.owner, table=self.to_obj.name,
columns=columns, placeholders=placeholders)
self.to_db.log(insert)
self.to_db.cursor.prepare(insert)
def insert(rows):
rowcount = 0
for row in rows:
try:
self.to_db.cursor.execute(None, row)
except TypeError as e:
print('TypeError on row occurred: {}'.format(e))
print(row)
rowcount += 1
return rowcount
def bulk_insert(rows, total_rows=0):
if rows:
try:
self.to_db.cursor.executemany(None, rows)
rowcount = self.to_db.cursor.rowcount
except TypeError as e:
print('TypeError occurred: {}'.format(e))
print('Trying to insert row-by-row')
rowcount = insert(rows)
print('{time}: {x} rows processed'.format(time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
x=total_rows + rowcount))
return rowcount
else:
print('Empty set - nothing to insert')
return 0
total_rows = 0
i = 0
rows = []
for row in self.from_db.cursor:
i += 1
rows.append(row)
if i == Cloner.BULK_ROWS:
total_rows += bulk_insert(rows, total_rows)
i = 0
rows = []
bulk_insert(rows, total_rows)
self.to_db.commit()
def __repr__(self):
return 'Cloner from {from_obj} at {from_db} to '\
'{to_obj} at {to_db}'.format(from_obj=self.from_obj, from_db=self.from_db.connection_string,
to_obj=self.to_obj, to_db=self.to_db.connection_string)
def example():
# todo: accept command line params
from_db = Connection('user/pass@qwer')
from_obj = DBObject(owner='SCHEME', name='SOME_TABLE')
to_db = Connection('user2/pass2@qwer2')
to_obj = DBObject(name='SOME_TABLE2',
opts={'truncate': True, 'create_if_not_exists': False})
cloner = Cloner(from_db=from_db, from_obj=from_obj,
to_db=to_db, to_obj=to_obj)
cloner.select = """SELECT table_name , owner
FROM all_tables p
WHERE 1=1
AND rownum < 20"""
cloner.connect()
print(cloner)
cloner.clone()
cloner.close()
if __name__ == '__main__':
example()
|
[
"schmooser@gmail.com"
] |
schmooser@gmail.com
|
9faf595fcc5468767901d55f56ff17d44f768a1b
|
9d5f249748163c22cc69ab5bf34a29b455976f61
|
/Product_Hunt/manage.py
|
89265361936832c02978d5fc633c75f15cbcef0e
|
[] |
no_license
|
javed2214/Product-Hunt
|
b572e12c2040e6e2c7b13e09839493597e2a5edb
|
f982a6546ef7958f84c2bb59e387376a934821bf
|
refs/heads/master
| 2020-06-22T05:24:59.396923
| 2020-02-13T11:40:47
| 2020-02-13T11:40:47
| 197,644,961
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Product_Hunt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"javed.ansari4122@gmail.com"
] |
javed.ansari4122@gmail.com
|
4877a8a4c7be4b0f61b169fea50729f49ce0471a
|
8036b0ee78c96dbc78191bfe48e5dbec71593533
|
/developer/src/developer/__init__.py
|
3052023287ff556db4ea840219538a90b94f13e0
|
[] |
no_license
|
jimhansson/tellstick-server
|
bb4bcf2c249956855af5031a923c508ea734bbde
|
040fbb28e9df728f13f0a306bfa6b4d075a433e3
|
refs/heads/master
| 2021-01-19T03:42:21.653259
| 2017-03-02T15:28:32
| 2017-03-02T15:28:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
# -*- coding: utf-8 -*-
from Developer import Developer
|
[
"micke.prag@telldus.se"
] |
micke.prag@telldus.se
|
ba9874b9f8af165cdc730447c935f81fbb148ff4
|
eab00469f2c29d4747e8f93ee9dde776184e583b
|
/bearded_web/extra/bearded/tools/nmap.py
|
8af0591f3a6ab81e5fb2ffb492a6328aee6881d6
|
[] |
no_license
|
slonoed/bearded
|
a36a2b00ba2ae598a181a308d242cde44a520424
|
db94c88b0d4b91f728b51d6fbea82acd48ba84fe
|
refs/heads/master
| 2021-01-20T23:47:31.226165
| 2013-06-09T10:59:11
| 2013-06-09T10:59:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
# -*- coding: utf-8 -*-
from bearded.tools.cl import CommandLineTool
class NmapTool(CommandLineTool):
pass
|
[
"m0sth8@gmail.com"
] |
m0sth8@gmail.com
|
9388636bc2e2092d6c1bbed3be9ec9c6c7e4326d
|
aa4c21a5eef13136d97030d3aa6a487f3c587800
|
/flask/models/shift_index.py
|
3e3cd4aa033c19d076f4747a0ca05848a906de41
|
[] |
no_license
|
nicolaseschneider/Revel_Take_Home
|
85ea7a24567387c14bf9946f628ec928fad3c9ce
|
9bd1a9bc3a8aa420e880f4f66bdab3ccc04976ec
|
refs/heads/master
| 2022-12-19T01:41:11.041668
| 2020-09-10T16:00:30
| 2020-09-10T16:00:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from datetime import datetime
from db import db
from marshmallow import Schema
class ShiftIndex(db.Model):
id = db.Column(db.Integer, primary_key=True)
shift_id = db.Column(db.Integer, db.ForeignKey('shift.id'))
next_vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicle.id'), nullable=True)
created_at = db.Column(db.DateTime, default=datetime.now)
updated_at = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
class ShiftIndexSchema(Schema):
class Meta:
model = ShiftIndex
fields = ('id','shift_id', 'next_vehicle_id', 'created_at', 'updated_at')
|
[
"nicolas.e.schneider@vanderbilt.edu"
] |
nicolas.e.schneider@vanderbilt.edu
|
c2d3e57a5a6b5a10ff258e9de90b260b54892d58
|
a09f4fc0051160f264c0aa0a3d060b23161901a4
|
/blog/migrations/0001_initial.py
|
a1a7f23a6b9c6315c597cc03b5fba48d73dd5a85
|
[] |
no_license
|
todo-2mari5/my-second-djangogirls
|
8ad92c3f3146a1208fa70a389a9822ac59388edc
|
0e911f17e73a768b05ad235cb3a463d46899ab2e
|
refs/heads/master
| 2020-06-06T21:14:13.255906
| 2019-06-21T06:51:11
| 2019-06-21T06:51:11
| 192,854,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.2.2 on 2019-06-20 05:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"ha1ma2chi3@yahoo.co.jp"
] |
ha1ma2chi3@yahoo.co.jp
|
d06917809a1c413d98e8f20232aac174f4a82c3b
|
63d1d69021448811e7d42b0c087cbca116f99a86
|
/web/web/middlewares/auth.py
|
4dd3513c9050d7c2cdbfea66e3062ca7e00d17ba
|
[] |
no_license
|
jackfrued/xpc
|
47ffb9b0059933589c71716e75f99f255758c140
|
92160b05cf4d877aa435a426cef883347b002671
|
refs/heads/master
| 2020-03-28T20:23:04.961050
| 2018-07-06T09:44:25
| 2018-07-06T09:44:25
| 149,064,323
| 2
| 1
| null | 2018-09-17T03:33:07
| 2018-09-17T03:33:07
| null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
from django.http import HttpResponseRedirect
from django.conf import settings
from web.models import Composer
from web.helpers.composer import md5_pwd
need_login = ['/']
class AuthMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
if request.path in need_login:
cid = request.COOKIES.get('cid')
token = request.COOKIES.get('token')
if not cid or md5_pwd(cid, settings.SECRET_KEY) != token:
return HttpResponseRedirect('/login/')
request.composer = Composer.get(cid=cid)
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
|
[
"guye999@gmail.com"
] |
guye999@gmail.com
|
e2fa980526ee9288ede168a8cdba489ba891011e
|
b05761d771bb5a85d39d370c649567c1ff3eb089
|
/venv/lib/python3.10/site-packages/jedi/third_party/typeshed/stdlib/3/email/mime/audio.pyi
|
f098016bcd2b5810dfaacbc15e020cb1ea16dc2e
|
[] |
no_license
|
JawshyJ/Coding_Practice
|
88c49cab955eab04609ec1003b6b8c20f103fc06
|
eb6b229d41aa49b1545af2120e6bee8e982adb41
|
refs/heads/master
| 2023-02-19T10:18:04.818542
| 2023-02-06T21:22:58
| 2023-02-06T21:22:58
| 247,788,631
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
pyi
|
/home/runner/.cache/pip/pool/86/c8/f6/db3c49c0b945e933d70f344c92cbb8a14a319f2a54b2161e9f15d2904f
|
[
"37465112+JawshyJ@users.noreply.github.com"
] |
37465112+JawshyJ@users.noreply.github.com
|
7cdc5eafeefeca965fbcc94d556980727053266c
|
48f76de5f7f2055ce5a79a350af6ad5920bb57c4
|
/utils.py
|
e12ac6b28c9ea7a2db236f8ded57cbbb02a150bc
|
[] |
no_license
|
RahulSundar/DL-ROM-Meth
|
5d736e4f70117fb754dd3e5d0ecdff6f93b01897
|
ab6edd36eb96a936647283210ddd1b101aab8c69
|
refs/heads/master
| 2023-03-10T04:38:56.950448
| 2021-02-24T21:09:22
| 2021-02-24T21:09:22
| 432,648,678
| 0
| 1
| null | 2021-11-28T07:37:22
| 2021-11-28T07:37:21
| null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
"""
Stefania Fresca, MOX Laboratory, Politecnico di Milano
February 2019
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
import scipy.io as sio
import h5py
def read_data(mat):
data = sio.loadmat(mat)
S = data['S'].squeeze()
S = np.transpose(S)
return S
def read_large_data(mat):
file = h5py.File(mat, 'r')
S = file['S'][:]
return S
def read_params(mat):
params = sio.loadmat(mat)
params = params['I'].squeeze()
return params
def max_min(S_train, n_train):
S_max = np.max(np.max(S_train[:n_train], axis = 1), axis = 0)
S_min = np.min(np.min(S_train[:n_train], axis = 1), axis = 0)
return S_max, S_min
def scaling(S, S_max, S_min):
S[ : ] = (S - S_min)/(S_max - S_min)
def inverse_scaling(S, S_max, S_min):
S[ : ] = (S_max - S_min) * S + S_min
def zero_pad(S, n):
paddings = np.zeros((S.shape[0], n))
S = np.hstack((S, paddings))
return S
def safe_mkdir(path):
try:
os.mkdir(path)
except OSError:
pass
|
[
"stefania.fresca@polimi.it"
] |
stefania.fresca@polimi.it
|
c9273b245d2a6a3149b716d51969d7382a6673a6
|
d36e503b4f951824f9ce0f5e5bce244b695e6bac
|
/guildwars2/database.py
|
a4cbe59a15906b924432275f62f4850c4ac0eb3c
|
[
"MIT"
] |
permissive
|
greaka/GW2Bot
|
126ab28b919bd3ed9e238ad6292e30172cdd93bc
|
103300d5121f2fe92f25081e429fbff60aed652a
|
refs/heads/master
| 2020-03-07T04:39:25.338173
| 2018-06-22T14:06:49
| 2018-06-22T14:06:49
| 127,272,629
| 0
| 0
|
MIT
| 2018-03-29T09:58:26
| 2018-03-29T09:58:26
| null |
UTF-8
|
Python
| false
| false
| 17,111
|
py
|
import asyncio
import collections
import re
import time
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from pymongo.errors import BulkWriteError
from .exceptions import APIKeyError
class DatabaseMixin:
@commands.command()
@commands.cooldown(1, 5, BucketType.user)
async def skillinfo(self, ctx, *, skill):
"""Information about a given skill"""
user = ctx.author
skill_sanitized = re.escape(skill)
search = re.compile(skill_sanitized + ".*", re.IGNORECASE)
cursor = self.db.skills.find({"name": search})
number = await cursor.count()
if not number:
await ctx.send(
"Your search gave me no results, sorry. Check for typos.")
return
if number > 20:
await ctx.send(
"Your search gave me {} results. Please be more specific".
format(number))
return
items = []
msg = "Which one of these interests you? Type it's number```"
async for item in cursor:
items.append(item)
if number != 1:
for c, m in enumerate(items):
msg += "\n{}: {}".format(c, m["name"])
msg += "```"
message = await ctx.send(msg)
def check(m):
return m.channel == ctx.channel and m.author == user
try:
answer = await self.bot.wait_for(
"message", timeout=120, check=check)
except asyncio.TimeoutError:
message.edit(content="No response in time")
return None
try:
num = int(answer.content)
choice = items[num]
except:
await message.edit(content="That's not a number in the list")
return None
try:
await answer.delete()
except:
pass
else:
message = await ctx.send("Searching far and wide...")
choice = items[0]
data = await self.skill_embed(choice)
try:
await message.edit(content=None, embed=data)
except discord.HTTPException:
await ctx.send("Need permission to embed links")
async def skill_embed(self, skill):
# Very inconsistent endpoint, playing it safe
description = None
if "description" in skill:
description = skill["description"]
url = "https://wiki.guildwars2.com/wiki/" + skill["name"].replace(
' ', '_')
async with self.session.head(url) as r:
if not r.status == 200:
url = None
data = discord.Embed(
title=skill["name"], description=description, url=url)
if "icon" in skill:
data.set_thumbnail(url=skill["icon"])
if "professions" in skill:
if skill["professions"]:
professions = skill["professions"]
if len(professions) != 1:
data.add_field(
name="Professions", value=", ".join(professions))
elif len(professions) == 9:
data.add_field(name="Professions", value="All")
else:
data.add_field(
name="Profession", value=", ".join(professions))
if "facts" in skill:
for fact in skill["facts"]:
try:
if fact["type"] == "Recharge":
data.add_field(name="Cooldown", value=fact["value"])
if fact["type"] == "Distance" or fact["type"] == "Number":
data.add_field(name=fact["text"], value=fact["value"])
if fact["type"] == "ComboField":
data.add_field(
name=fact["text"], value=fact["field_type"])
except:
pass
return data
@commands.group()
@commands.is_owner()
async def database(self, ctx):
"""Commands related to DB management"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
return
@database.command(name="create")
async def db_create(self, ctx):
"""Create a new database
"""
await self.rebuild_database()
@database.command(name="statistics")
async def db_stats(self, ctx):
"""Some statistics
"""
cursor = self.bot.database.get_users_cursor({
"key": {
"$ne": None
}
}, self)
result = await cursor.count()
await ctx.send("{} registered users".format(result))
cursor_updates = self.bot.database.get_guilds_cursor({
"updates.on": True
})
cursor_daily = self.bot.database.get_guilds_cursor({"daily.on": True})
cursor_news = self.bot.database.get_guilds_cursor({"news.on": True})
result_updates = await cursor_updates.count()
result_daily = await cursor_daily.count()
result_news = await cursor_news.count()
await ctx.send("{} guilds for update notifs\n{} guilds for daily "
"notifs\n{} guilds for news "
"feed".format(result_updates, result_daily,
result_news))
async def get_title(self, title_id):
try:
results = await self.db.titles.find_one({"_id": title_id})
title = results["name"]
except:
return ""
return title
async def get_world_name(self, wid):
try:
doc = await self.db.worlds.find_one({"_id": wid})
name = doc["name"]
except:
name = None
return name
async def get_world_id(self, world):
world = re.escape(world)
world = "^" + world + "$"
search = re.compile(world, re.IGNORECASE)
if world is None:
return None
doc = await self.db.worlds.find_one({"name": search})
if not doc:
return None
return doc["_id"]
async def fetch_statname(self, item):
statset = await self.db.itemstats.find_one({"_id": item})
try:
name = statset["name"]
except:
name = ""
return name
async def fetch_item(self, item):
return await self.db.items.find_one({"_id": item})
async def fetch_key(self, user, scopes=None):
doc = await self.bot.database.get_user(user, self)
if not doc or "key" not in doc or not doc["key"]:
raise APIKeyError(
"No API key associated with {.mention}. "
"Add your key using `$key add` command. If you don't know "
"how, the command includes a tutorial.".format(user))
if scopes:
missing = []
for scope in scopes:
if scope not in doc["key"]["permissions"]:
missing.append(scope)
if missing:
missing = ", ".join(missing)
raise APIKeyError(
"{.mention}, your API key is missing the following "
"permissions to use this command: `{}`\nConsider adding "
"a new key with those permissions "
"checked".format(user, missing))
return doc["key"]
async def cache_dailies(self):
try:
results = await self.call_api("achievements/daily")
await self.cache_endpoint("achievements")
except:
return
try:
doc = {}
for category, dailies in results.items():
daily_list = []
for daily in dailies:
if not daily["level"]["max"] == 80:
continue
daily_doc = await self.db.achievements.find_one({
"_id":
daily["id"]
})
if not daily_doc:
continue
name = daily_doc["name"]
if category == "fractals":
if name.startswith(
"Daily Tier"
) and not name.startswith("Daily Tier 4"):
continue
daily_list.append(name)
doc[category] = sorted(daily_list)
doc["psna"] = [self.get_psna()]
doc["psna_later"] = [self.get_psna(offset_days=1)]
await self.bot.database.set_cog_config(self,
{"cache.dailies": doc})
except Exception as e:
self.log.exception("Exception caching dailies: ", exc_info=e)
async def cache_raids(self):
raids = []
raids_index = await self.call_api("raids")
for raid in raids_index:
raids.append(await self.call_api("raids/" + raid))
await self.bot.database.set_cog_config(self, {"cache.raids": raids})
async def get_raids(self):
config = await self.bot.database.get_cog_config(self)
return config["cache"].get("raids")
async def cache_endpoint(self, endpoint, all_at_once=False):
async def bulk_write(item_group):
bulk = self.db[endpoint].initialize_unordered_bulk_op()
for item in itemgroup:
item["_id"] = item.pop("id")
bulk.find({"_id": item["_id"]}).upsert().replace_one(item)
try:
await bulk.execute()
except BulkWriteError as e:
self.log.exception(
"BWE while caching {}".format(endpoint), exc_info=e)
items = await self.call_api(endpoint)
if not all_at_once:
counter = 0
total = len(items)
while True:
percentage = (counter / total) * 100
print("Progress: {0:.1f}%".format(percentage))
ids = ",".join(str(x) for x in items[counter:counter + 200])
if not ids:
print("{} done".format(endpoint))
break
itemgroup = await self.call_api("{}?ids={}".format(
endpoint, ids))
await bulk_write(itemgroup)
counter += 200
else:
itemgroup = await self.call_api("{}?ids=all".format(endpoint))
await bulk_write(itemgroup)
async def rebuild_database(self):
start = time.time()
self.bot.available = False
await self.bot.change_presence(
activity=discord.Game(name="Rebuilding API cache"),
status=discord.Status.dnd)
endpoints = [["items"], ["achievements"], ["itemstats", True], [
"titles", True
], ["recipes"], ["skins"], ["currencies", True], ["skills", True],
["specializations", True], ["traits",
True], ["worlds", True]]
for e in endpoints:
try:
await self.cache_endpoint(*e)
except:
msg = "Caching {} failed".format(e)
self.log.warn(msg)
owner = self.bot.get_user(self.bot.owner_id)
await owner.send(msg)
await self.db.items.create_index("name")
await self.db.achievements.create_index("name")
await self.db.titles.create_index("name")
await self.db.recipes.create_index("output_item_id")
await self.db.skins.create_index("name")
await self.db.currencies.create_index("name")
await self.db.skills.create_index("name")
await self.db.worlds.create_index("name")
await self.cache_raids()
end = time.time()
self.bot.available = True
print("Done")
self.log.info(
"Database done! Time elapsed: {} seconds".format(end - start))
async def itemname_to_id(self,
destination,
item,
user,
*,
flags=[],
filters={},
database="items",
group_duplicates=False): # TODO cleanup
def consolidate_duplicates(items):
unique_items = collections.OrderedDict()
for item in items:
item_tuple = item["name"], item["rarity"]
if item_tuple not in unique_items:
unique_items[item_tuple] = []
unique_items[item_tuple].append(item["_id"])
unique_list = []
for k, v in unique_items.items():
unique_list.append({"name": k[0], "rarity": k[1], "ids": v})
return unique_list
def check(m):
if isinstance(destination,
(discord.abc.User, discord.abc.PrivateChannel)):
chan = isinstance(m.channel, discord.abc.PrivateChannel)
else:
chan = m.channel == destination.channel
return m.author == user and chan
item_sanitized = re.escape(item)
search = re.compile(item_sanitized + ".*", re.IGNORECASE)
cursor = self.db[database].find({
"name": search,
"flags": {
"$nin": flags
},
**filters
})
number = await cursor.count()
if not number:
await destination.send(
"Your search gave me no results, sorry. Check for "
"typos.\nAlways use singular forms, e.g. Legendary Insight")
return None
if number > 25:
await destination.send("Your search gave me {} item results. "
"Try exact match "
"search? `Y/N`".format(number))
try:
answer = await self.bot.wait_for(
"message", timeout=120, check=check)
except asyncio.TimeoutError:
return None
if answer.content.lower() != "y":
return
exact_match = "^" + item_sanitized + "$"
search = re.compile(exact_match, re.IGNORECASE)
cursor = self.db[database].find({
"name": search,
"flags": {
"$nin": flags
},
**filters
})
number = await cursor.count()
if not number:
await destination.send(
"Your search gave me no results, sorry. Check for "
"typos.\nAlways use singular forms, e.g. Legendary Insight"
)
return None
if number > 25:
await destination.send(
"Your search gave me {} item results. "
"Please be more specific".format(number))
return None
items = []
async for item in cursor:
items.append(item)
items.sort(key=lambda i: i["name"])
longest = len(max([item["name"] for item in items], key=len))
msg = [
"Which one of these interests you? Simply type it's number "
"into the chat now:```ml", "INDEX NAME {}RARITY".format(
" " * (longest)), "-----|------{}|-------".format(
"-" * (longest))
]
if group_duplicates:
distinct_items = consolidate_duplicates(items)
else:
for item in items:
item["ids"] = [item["_id"]]
distinct_items = items
if number != 1:
for c, m in enumerate(distinct_items, 1):
msg.append(" {} {}| {} {}| {}".format(
c, " " * (2 - len(str(c))), m["name"].upper(),
" " * (4 + longest - len(m["name"])), m["rarity"]))
msg.append("```")
message = await destination.send("\n".join(msg))
try:
answer = await self.bot.wait_for(
"message", timeout=120, check=check)
except asyncio.TimeoutError:
await message.edit(content="No response in time")
return None
try:
num = int(answer.content) - 1
choice = distinct_items[num]
except:
await message.edit(content="That's not a number in the list")
return None
try:
await message.delete()
await answer.delete()
except:
pass
else:
choice = distinct_items[0]
for item in items:
if item["_id"] in choice["ids"]:
if item["type"] == "UpgradeComponent":
choice["is_upgrade"] = True
return choice
|
[
"Maselkov@users.noreply.github.com"
] |
Maselkov@users.noreply.github.com
|
669188eac36b8293c12dd5899a45b26c2a0b8884
|
0377ead8058df100bd415cadc152d6f95688f925
|
/system/urls.py
|
af5771d26a0d26a9e841234d2372de005415e7ca
|
[] |
no_license
|
laojiuh/DjangoMall
|
b676112528e8167fdb48153606d68caa298ffdac
|
778ed847bee6f5ad36efbbff6cbf18839ad38138
|
refs/heads/master
| 2022-12-10T18:13:14.950126
| 2020-09-14T08:01:08
| 2020-09-14T08:01:08
| 295,344,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
from django.conf.urls import url, include
from system import views
urlpatterns = [
# 新闻列表
url(r'^news/$', views.news_list, name='news_list'),
# 新闻详情
url(r'^news/(?P<pk>\d+)/$', views.news_detail, name='news_detail'),
# 验证码
url(r'^verify/code/$', views.verify_code, name='verify_code'),
]
|
[
"huangzeb@kean.edu"
] |
huangzeb@kean.edu
|
c823246cc4de9cfcf3137a03ec96fe34984eb630
|
d25d6368011a6a41e4346330e500d2a93d87981e
|
/tests/simpletest_camsreg.py
|
4301daaaf91348cb86bd0597499bd390bd164f0f
|
[
"MIT"
] |
permissive
|
andreas-h/mss-chem
|
f8e0f94850c1ba01f896fb79058632d1a81855dc
|
ae91f4f5b4c19a4ff0ce8fd342fe0aed4ce2b9ab
|
refs/heads/master
| 2021-01-19T09:24:34.736706
| 2017-07-13T15:21:23
| 2017-07-13T15:23:35
| 82,105,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,889
|
py
|
import datetime
import os.path
import unittest
import urllib
from msschem.models import CAMSRegDriver
from msschem.download import CAMSRegDownload
import msschem_settings
class TestCAMSRegDriver(unittest.TestCase):
def setUp(self):
self.driver = msschem_settings.register_datasources['CAMSReg_ENSEMBLE']
yesterday = datetime.date.today() - datetime.timedelta(0)
self.fcinit = datetime.datetime(yesterday.year, yesterday.month, yesterday.day)
def test_get_nt(self):
dr = self.driver
init = datetime.datetime(2017, 2, 15, 0, 0)
start = datetime.datetime(2017, 2, 15, 0, 0)
end = datetime.datetime(2017, 2, 15, 1, 0)
self.assertEqual(dr.get_nt(init, start, end), 25)
start = datetime.datetime(2017, 2, 15, 0, 0)
end = datetime.datetime(2017, 2, 16, 0, 0)
self.assertEqual(dr.get_nt(init, start, end), 25)
start = datetime.datetime(2017, 2, 15, 0, 0)
end = datetime.datetime(2017, 2, 16, 1, 0)
self.assertEqual(dr.get_nt(init, start, end), 49)
start = datetime.datetime(2017, 2, 16, 0, 0)
end = datetime.datetime(2017, 2, 17, 0, 0)
self.assertEqual(dr.get_nt(init, start, end), 49)
start = datetime.datetime(2017, 2, 16, 1, 0)
end = datetime.datetime(2017, 2, 17, 0, 0)
self.assertEqual(dr.get_nt(init, start, end), 24)
start = datetime.datetime(2017, 2, 15, 3, 0)
end = datetime.datetime(2017, 2, 18, 7, 0)
self.assertEqual(dr.get_nt(init, start, end), 97)
#@unittest.skip('This takes too long ...')
#def test_postprocess(self):
# dr = self.driver
# dr.postprocess('NO2',
# datetime.datetime(2017, 3, 31),
# ['/home2/hilboll/tmp/msschem/cams_regional/0H24H.nc',
# '/home2/hilboll/tmp/msschem/cams_regional/25H48H.nc',
# '/home2/hilboll/tmp/msschem/cams_regional/49H72H.nc',
# '/home2/hilboll/tmp/msschem/cams_regional/73H96H.nc',
# ])
#def test_download(self):
# self.filenames_tmp = self.driver.download('NO2', self.fcinit)
def test_get(self):
self.driver.get('NO2', self.fcinit)
#class TestCAMSRegDownload(unittest.TestCase):
#
# def setUp(self):
# self.driver = msschem_settings.register_datasources['CAMSReg_ENSEMBLE']
#
# def test_download(self):
# yesterday = datetime.date.today() - datetime.timedelta(1)
# fcinit = datetime.datetime(yesterday.year, yesterday.month, yesterday.day)
# fcstart = fcinit
# fcend = fcstart + datetime.timedelta(4)
# dl = self.driver.dldriver
# dl.get('NO2', fcinit, fcstart, fcend, 'blatest1.nc')
## def get(self, species, fcinit, fcstart, fcend, fn_out):
if __name__ == '__main__':
unittest.main()
|
[
"hilboll@uni-bremen.de"
] |
hilboll@uni-bremen.de
|
1943786d90c3e350147505fb37ab3c28ffdba700
|
1f882cf88e201906e0188d97da69bdf9393f4a8c
|
/sara/main.py
|
39216c24749b3ece7c2bb96f0e6e2f63d498b4b3
|
[] |
no_license
|
kwarwp/ida
|
8d5fb4e893e6958cafbe8a91bcb75167634ff391
|
6f8d783e0fd750d164c676d1c20024e891b8be5b
|
refs/heads/master
| 2022-09-13T14:57:35.198924
| 2022-09-06T13:38:24
| 2022-09-06T13:38:24
| 129,903,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
# ida.sara.main.py
from _spy.vitollino.main import Cena, Elemento, Texto, STYLE
from _spy.vitollino.main import INVENTARIO as inv
STYLE["width"]=900
PANTANO = "https://1.bp.blogspot.com/-jALpqBrfBW4/VvvS6bpUnVI/AAAAAAAAATo/sd2gRUbk3tc-rdp8iPeEXCgz6LSQGjGzQ/s1600/Swamp%2BHouse.jpg"
ALICE = "https://3.bp.blogspot.com/-o7Y78sYGkjY/V6O1G7WysSI/AAAAAAAAMO8/IG0Q7cJKKcYA70fLNINSaLG02t9fQT52QCLcB/s1600/ALICE%2B%25283%2529.png"
TARZAN = "https://vignette.wikia.nocookie.net/vsbattles/images/6/68/Tarzan.png/revision/latest?cb=20170117061234"
YODA = "https://vignette.wikia.nocookie.net/disney/images/9/95/Master_Yoda.png/revision/latest?cb=20161024220430&path-prefix=pt-br"
AVENIDA = "https://3.bp.blogspot.com/-L6J4CqwyXWY/W2XD_sCVqhI/AAAAAAAA1Ck/dGexwCTuNsA3YCrC8vaQTb9lsY7dHDdugCLcBGAs/s1600/Ciclovia%2BAmaral%2BPeixoto%2B2.png"
def criarcenas():
pantano =Cena(img=PANTANO)
avenida =Cena(img=AVENIDA)
pantano.direita = avenida
avenida.esquerda=pantano
alice =Elemento(img= ALICE, tit="Alice", style=dict(left=150, top=150, width=60,height=200))
alice.entra(pantano)
falaalice = Texto(pantano, " Em uma noite enluarada, Dona ALICE saiu para dar uma volta. Porém, durante o passeio, se perdeu no caminho e não enxergava nada a sua frente além de um pantamo assutador.Pensando em como sair daquela enrrascada, a pequena Alice não teve outra opção do que rezar, quando de repente...")
alice.vai=falaalice.vai
tarzan =Elemento(img= TARZAN, tit="Tarzan", style=dict(left=150, top=150, width=60,height=200))
tarzan.entra(pantano)
falatarzan = Texto(pantano, "-Oooo oooo ooooo ooooo!!!!- Eis que surge o Tarzan, que indignou-se pela hora que a menina andava pela rua. -Que cê tá fazendo aqui? Tá tarde!")
tarzan.vai=falatarzan.vai
pantano.vai()
criarcenas()
|
[
"38007182+kwarwp@users.noreply.github.com"
] |
38007182+kwarwp@users.noreply.github.com
|
3b01f164671274ebbba3a96bf4a791187d891367
|
640a9e83152b32f2e84f73cf8271215e5fa15c57
|
/oscar/bin/tox-quickstart
|
b8d067fa48faf81e894467d393f42dfcb33545b1
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
WillisXChen/django-oscar
|
b230fbf618cd5c5e49d51b8e699202c605ac0dd2
|
78c8516b4dd49a25d6d493287919d033dd674298
|
refs/heads/master
| 2021-01-20T21:37:29.070560
| 2016-02-09T21:44:26
| 2016-02-09T21:44:26
| 42,301,086
| 0
| 0
| null | 2015-09-11T09:55:07
| 2015-09-11T09:55:07
| null |
UTF-8
|
Python
| false
| false
| 240
|
#!/root/git/django-oscar/oscar/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tox._quickstart import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"misweyu2007@gmail.com"
] |
misweyu2007@gmail.com
|
|
a2d5bc16a0ada0db1486395bebcfc11c5ed9229b
|
ca4846107d9fc74a963f544881f4b2bf0c9af825
|
/faceRank.py
|
32f99e0e3579aa06342ac4dc5cef51aaa71d596f
|
[] |
no_license
|
marqui5/py
|
54dd6a0b566e4461f591d407baea8c15661e2dd9
|
6613b1a0df5d153d11cea871f81d8d4a9a03e708
|
refs/heads/master
| 2023-03-08T19:52:31.929533
| 2021-02-23T10:05:46
| 2021-02-23T10:05:46
| 294,984,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,160
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
pip install pillow
pip install baidu-aip
pip install tkinter
"""
import PIL
import time
import base64
import tkinter as tk
from PIL import Image
from PIL import ImageTk
from aip import AipFace
from tkinter.filedialog import askopenfilename
# 配置百度aip参数
APP_ID = '15768642'
API_KEY = 'xhiiGmGPRCRj10XIqVlVeCky'
SECRET_KEY = 'ZDMMAO7StwTKzW8BspVQxvoGtdgSW4yI'
a_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
image_type = 'BASE64'
options = {'face_field': 'age,gender,beauty'}
def get_file_content(file_path):
"""获取文件内容"""
with open(file_path, 'rb') as fr:
content = base64.b64encode(fr.read())
return content.decode('utf8')
def face_score(file_path):
"""脸部识别分数"""
result = a_face.detect(get_file_content(file_path), image_type, options)
print(result)
age = result['result']['face_list'][0]['age']
beauty = result['result']['face_list'][0]['beauty']
gender = result['result']['face_list'][0]['gender']['type']
return age, beauty, gender
class ScoreSystem():
"""打分系统类"""
root = tk.Tk()
# 修改程序框的大小
root.geometry('800x500')
# 添加程序框标题
root.title('女神/男神颜值打分系统')
# 修改背景色
canvas = tk.Canvas(root,
width=800, # 指定Canvas组件的宽度
height=500, # 指定Canvas组件的高度
bg='#E6E6FA') # 指定Canvas组件的背景色
canvas.pack()
def start_interface(self):
"""主运行函数"""
self.title()
self.time_component()
# 打开本地文件
tk.Button(self.root, text='打开文件', command=self.show_original_pic).place(x=50, y=150)
# 进行颜值评分
tk.Button(self.root, text='运行程序', command=self.open_files2).place(x=50, y=230)
# 显示帮助文档
tk.Button(self.root, text='帮助文档', command=self.show_help).place(x=50, y=310)
# 退出系统
tk.Button(self.root, text='退出软件', command=self.quit).place(x=50, y=390)
# 显示图框标题
tk.Label(self.root, text='原图', font=10).place(x=380, y=120)
# 修改图片大小
self.label_img_original = tk.Label(self.root)
# 设置显示图框背景
self.cv_orinial = tk.Canvas(self.root, bg='white', width=270, height=270)
# 设置显示图框边框
self.cv_orinial.create_rectangle(8, 8, 260, 260, width=1, outline='red')
# 设置位置
self.cv_orinial.place(x=265, y=150)
# 显示图片位置
self.label_img_original.place(x=265, y=150)
# 设置评分标签
tk.Label(self.root, text='性别', font=10).place(x=680, y=150)
self.text1 = tk.Text(self.root, width=10, height=2)
tk.Label(self.root, text='年龄', font=10).place(x=680, y=250)
self.text2 = tk.Text(self.root, width=10, height=2)
tk.Label(self.root, text='评分', font=10).place(x=680, y=350)
self.text3 = tk.Text(self.root, width=10, height=2)
# 填装文字
self.text1.place(x=680, y=175)
self.text2.place(x=680, y=285)
self.text3.place(x=680, y=385)
# 开启循环
self.root.mainloop()
def show_original_pic(self):
"""放入文件"""
self.path_ = askopenfilename(title='选择文件')
# 处理文件
img = Image.open(fr'{self.path_}')
img = img.resize((270, 270), PIL.Image.ANTIALIAS) # 调整图片大小至270*270
# 生成tkinter图片对象
img_png_original = ImageTk.PhotoImage(img)
# 设置图片对象
self.label_img_original.config(image=img_png_original)
self.label_img_original.image = img_png_original
self.cv_orinial.create_image(5, 5, anchor='nw', image=img_png_original)
def open_files2(self):
# 获取百度API接口获得的年龄、分数、性别
age, score, gender = face_score(self.path_)
# 清楚text文本框内容并进行插入
self.text1.delete(1.0, tk.END)
self.text1.tag_config('red', foreground='RED')
self.text1.insert(tk.END, gender, 'red')
self.text2.delete(1.0, tk.END)
self.text2.tag_config('red', foreground='RED')
self.text2.insert(tk.END, age, 'red')
self.text3.delete(1.0, tk.END)
self.text3.tag_config('red', foreground='RED')
self.text3.insert(tk.END, score, 'red')
def show_help(self):
"""显示帮助"""
pass
def quit(self):
"""退出"""
self.root.quit()
def get_time(self, lb):
"""获取时间"""
time_str = time.strftime("%Y-%m-%d %H:%M:%S") # 获取当前的时间并转化为字符串
lb.configure(text=time_str) # 重新设置标签文本
self.root.after(1000, self.get_time, lb) # 每隔1s调用函数 get_time自身获取时间
def time_component(self):
"""时间组件"""
lb = tk.Label(self.root, text='', fg='blue', font=("黑体", 15))
lb.place(relx=0.75, rely=0.90)
self.get_time(lb)
def title(self):
"""标题设计"""
lb = tk.Label(self.root, text='女神/男神颜值打分系统',
bg='#6495ED',
fg='lightpink', font=('华文新魏', 32),
width=20,
height=2,
# relief=tk.SUNKEN
)
lb.place(x=200, y=10)
score_system = ScoreSystem()
score_system.start_interface()
|
[
"marquisv587@gmail.com"
] |
marquisv587@gmail.com
|
a6ab0392aad5709344249a58c8fb7114bd548332
|
9497f6239025668668206d98c9872b96e84bf182
|
/utils/config.py
|
fa2bc8556e1e6782b0a3e23ebb0f4f15190a9c45
|
[] |
no_license
|
richardruancw/StarcraftElite
|
7ff33d4adfc6668c550cb4b8ba76c1e870b1fcd2
|
b77b3c2721fd9edce79b19b214e7d01820c269e7
|
refs/heads/master
| 2021-04-28T05:53:36.723189
| 2018-03-15T18:40:09
| 2018-03-15T18:40:09
| 122,189,323
| 2
| 1
| null | 2018-03-10T05:04:43
| 2018-02-20T11:25:03
|
Python
|
UTF-8
|
Python
| false
| false
| 992
|
py
|
import tensorflow as tf
class config():
# env specific
evaluate = False
# output config
output_path = "../results/policy_gradient/"
model_output = output_path + "model.weights/"
log_path = output_path + "log.txt"
plot_output = output_path + "Policy_gradient.png"
summary_freq = 1
save_freq = 5
# model and training config
mode = "MC" # value can be "TD" or "MC"
num_batches = 100 # number of batches trained on
batch_size = 100 # number of steps used to compute each policy update, default is 1000
max_ep_len = 100 # maximum episode length
eval_batch_size = batch_size
rand_begin = 0.2
rand_end = 0
rand_steps = num_batches
learning_rate = 3e-2
gamma = 0.98 # the discount factor
use_baseline = True
normalize_advantage = True
history_mul = 1
restore = False
# since we start new episodes for each batch
assert max_ep_len <= batch_size
assert rand_steps <= num_batches
|
[
"vinyao1994@gmail.com"
] |
vinyao1994@gmail.com
|
ca8e18abaf220c482618359d83c2e717ef3daf09
|
d842a95213e48e30139b9a8227fb7e757f834784
|
/gcloud/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudscheduler/v1beta1/cloudscheduler_v1beta1_messages.py
|
acf9d468339b98a481b54644189c37358b460936
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/JobSniperRails
|
f37a15edb89f54916cc272884b36dcd83cdc868a
|
39e7f871887176770de0f4fc6789e9ddc7f32b1f
|
refs/heads/master
| 2022-11-22T18:12:37.972441
| 2019-09-20T22:43:14
| 2019-09-20T22:43:14
| 282,293,504
| 0
| 0
|
MIT
| 2020-07-24T18:47:35
| 2020-07-24T18:47:34
| null |
UTF-8
|
Python
| false
| false
| 45,215
|
py
|
"""Generated message classes for cloudscheduler version v1beta1.
Creates and manages jobs run on a regular recurring schedule.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'cloudscheduler'
class AppEngineHttpTarget(_messages.Message):
r"""App Engine target. The job will be pushed to a job handler by means of
an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job
is acknowledged by means of an HTTP response code in the range [200 - 299].
Error 503 is considered an App Engine system error instead of an application
error. Requests returning error 503 will be retried regardless of retry
configuration and not counted against retry counts. Any other response code,
or a failure to receive a response before the deadline, constitutes a failed
attempt.
Enums:
HttpMethodValueValuesEnum: The HTTP method to use for the request. PATCH
and OPTIONS are not permitted.
Messages:
HeadersValue: HTTP request headers. This map contains the header field
names and values. Headers can be set when the job is created. Cloud
Scheduler sets some headers to default values: * `User-Agent`: By
default, this header is `"AppEngine-Google;
(+http://code.google.com/appengine)"`. This header can be modified,
but Cloud Scheduler will append `"AppEngine-Google;
(+http://code.google.com/appengine)"` to the modified `User-Agent`. *
`X-CloudScheduler`: This header will be set to true. If the job has an
body, Cloud Scheduler sets the following headers: * `Content-Type`: By
default, the `Content-Type` header is set to `"application/octet-
stream"`. The default can be overridden by explictly setting `Content-
Type` to a particular media type when the job is created. For
example, `Content-Type` can be set to `"application/json"`. * `Content-
Length`: This is computed by Cloud Scheduler. This value is output
only. It cannot be changed. The headers below are output only. They
cannot be set or overridden: * `X-Google-*`: For Google internal use
only. * `X-AppEngine-*`: For Google internal use only. In addition,
some App Engine headers, which contain job-specific information, are
also be sent to the job handler.
Fields:
appEngineRouting: App Engine Routing setting for the job.
body: Body. HTTP request body. A request body is allowed only if the HTTP
method is POST or PUT. It will result in invalid argument error to set a
body on a job with an incompatible HttpMethod.
headers: HTTP request headers. This map contains the header field names
and values. Headers can be set when the job is created. Cloud Scheduler
sets some headers to default values: * `User-Agent`: By default, this
header is `"AppEngine-Google; (+http://code.google.com/appengine)"`.
This header can be modified, but Cloud Scheduler will append
`"AppEngine-Google; (+http://code.google.com/appengine)"` to the
modified `User-Agent`. * `X-CloudScheduler`: This header will be set to
true. If the job has an body, Cloud Scheduler sets the following
headers: * `Content-Type`: By default, the `Content-Type` header is set
to `"application/octet-stream"`. The default can be overridden by
explictly setting `Content-Type` to a particular media type when the
job is created. For example, `Content-Type` can be set to
`"application/json"`. * `Content-Length`: This is computed by Cloud
Scheduler. This value is output only. It cannot be changed. The
headers below are output only. They cannot be set or overridden: *
`X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For
Google internal use only. In addition, some App Engine headers, which
contain job-specific information, are also be sent to the job handler.
httpMethod: The HTTP method to use for the request. PATCH and OPTIONS are
not permitted.
relativeUri: The relative URI. The relative URL must begin with "/" and
must be a valid HTTP relative URL. It can contain a path, query string
arguments, and `#` fragments. If the relative URL is empty, then the
root path "/" will be used. No spaces are allowed, and the maximum
length allowed is 2083 characters.
"""
class HttpMethodValueValuesEnum(_messages.Enum):
r"""The HTTP method to use for the request. PATCH and OPTIONS are not
permitted.
Values:
HTTP_METHOD_UNSPECIFIED: HTTP method unspecified. Defaults to POST.
POST: HTTP POST
GET: HTTP GET
HEAD: HTTP HEAD
PUT: HTTP PUT
DELETE: HTTP DELETE
PATCH: HTTP PATCH
OPTIONS: HTTP OPTIONS
"""
HTTP_METHOD_UNSPECIFIED = 0
POST = 1
GET = 2
HEAD = 3
PUT = 4
DELETE = 5
PATCH = 6
OPTIONS = 7
@encoding.MapUnrecognizedFields('additionalProperties')
class HeadersValue(_messages.Message):
r"""HTTP request headers. This map contains the header field names and
values. Headers can be set when the job is created. Cloud Scheduler sets
some headers to default values: * `User-Agent`: By default, this header
is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This
header can be modified, but Cloud Scheduler will append `"AppEngine-
Google; (+http://code.google.com/appengine)"` to the modified `User-
Agent`. * `X-CloudScheduler`: This header will be set to true. If the job
has an body, Cloud Scheduler sets the following headers: * `Content-
Type`: By default, the `Content-Type` header is set to `"application
/octet-stream"`. The default can be overridden by explictly setting
`Content-Type` to a particular media type when the job is created. For
example, `Content-Type` can be set to `"application/json"`. * `Content-
Length`: This is computed by Cloud Scheduler. This value is output only.
It cannot be changed. The headers below are output only. They cannot be
set or overridden: * `X-Google-*`: For Google internal use only. *
`X-AppEngine-*`: For Google internal use only. In addition, some App
Engine headers, which contain job-specific information, are also be sent
to the job handler.
Messages:
AdditionalProperty: An additional property for a HeadersValue object.
Fields:
additionalProperties: Additional properties of type HeadersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HeadersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
appEngineRouting = _messages.MessageField('AppEngineRouting', 1)
body = _messages.BytesField(2)
headers = _messages.MessageField('HeadersValue', 3)
httpMethod = _messages.EnumField('HttpMethodValueValuesEnum', 4)
relativeUri = _messages.StringField(5)
class AppEngineRouting(_messages.Message):
r"""App Engine Routing. For more information about services, versions, and
instances see [An Overview of App
Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-app-
engine), [Microservices Architecture on Google App
Engine](https://cloud.google.com/appengine/docs/python/microservices-on-app-
engine), [App Engine Standard request
routing](https://cloud.google.com/appengine/docs/standard/python/how-
requests-are-routed), and [App Engine Flex request
routing](https://cloud.google.com/appengine/docs/flexible/python/how-
requests-are-routed).
Fields:
host: Output only. The host that the job is sent to. For more information
about how App Engine requests are routed, see
[here](https://cloud.google.com/appengine/docs/standard/python/how-
requests-are-routed). The host is constructed as: * `host =
[application_domain_name]`</br> `| [service] + '.' +
[application_domain_name]`</br> `| [version] + '.' +
[application_domain_name]`</br> `| [version_dot_service]+ '.' +
[application_domain_name]`</br> `| [instance] + '.' +
[application_domain_name]`</br> `| [instance_dot_service] + '.' +
[application_domain_name]`</br> `| [instance_dot_version] + '.' +
[application_domain_name]`</br> `| [instance_dot_version_dot_service]
+ '.' + [application_domain_name]` * `application_domain_name` = The
domain name of the app, for example <app-id>.appspot.com, which is
associated with the job's project ID. * `service =` service *
`version =` version * `version_dot_service =` version `+ '.' +`
service * `instance =` instance * `instance_dot_service =` instance
`+ '.' +` service * `instance_dot_version =` instance `+ '.' +`
version * `instance_dot_version_dot_service =` instance `+ '.' +`
version `+ '.' +` service If service is empty, then the job will be
sent to the service which is the default service when the job is
attempted. If version is empty, then the job will be sent to the
version which is the default version when the job is attempted. If
instance is empty, then the job will be sent to an instance which is
available when the job is attempted. If service, version, or instance
is invalid, then the job will be sent to the default version of the
default service when the job is attempted.
instance: App instance. By default, the job is sent to an instance which
is available when the job is attempted. Requests can only be sent to a
specific instance if [manual scaling is used in App Engine
Standard](https://cloud.google.com/appengine/docs/python/an-overview-of-
app-engine?hl=en_US#scaling_types_and_instance_classes). App Engine Flex
does not support instances. For more information, see [App Engine
Standard request
routing](https://cloud.google.com/appengine/docs/standard/python/how-
requests-are-routed) and [App Engine Flex request
routing](https://cloud.google.com/appengine/docs/flexible/python/how-
requests-are-routed).
service: App service. By default, the job is sent to the service which is
the default service when the job is attempted.
version: App version. By default, the job is sent to the version which is
the default version when the job is attempted.
"""
host = _messages.StringField(1)
instance = _messages.StringField(2)
service = _messages.StringField(3)
version = _messages.StringField(4)
class CloudschedulerProjectsLocationsGetRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class CloudschedulerProjectsLocationsJobsCreateRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsCreateRequest object.
Fields:
job: A Job resource to be passed as the request body.
parent: Required. The location name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID`.
"""
job = _messages.MessageField('Job', 1)
parent = _messages.StringField(2, required=True)
class CloudschedulerProjectsLocationsJobsDeleteRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsDeleteRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
"""
name = _messages.StringField(1, required=True)
class CloudschedulerProjectsLocationsJobsGetRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsGetRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
"""
name = _messages.StringField(1, required=True)
class CloudschedulerProjectsLocationsJobsListRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsListRequest object.
Fields:
pageSize: Requested page size. The maximum page size is 500. If
unspecified, the page size will be the maximum. Fewer jobs than
requested might be returned, even if more jobs exist; use
next_page_token to determine if more jobs exist.
pageToken: A token identifying a page of results the server will return.
To request the first page results, page_token must be empty. To request
the next page of results, page_token must be the value of
next_page_token returned from the previous call to ListJobs. It is an
error to switch the value of filter or order_by while iterating through
pages.
parent: Required. The location name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudschedulerProjectsLocationsJobsPatchRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsPatchRequest object.
Fields:
job: A Job resource to be passed as the request body.
name: Optionally caller-specified in CreateJob, after which it becomes
output only. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID`
can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons
(:), or periods (.). For more information, see [Identifying
projects](https://cloud.google.com/resource-manager/docs/creating-
managing-projects#identifying_projects) * `LOCATION_ID` is the canonical
ID for the job's location. The list of available locations can be
obtained by calling ListLocations. For more information, see
https://cloud.google.com/about/locations/. * `JOB_ID` can contain only
letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_).
The maximum length is 500 characters.
updateMask: A mask used to specify which fields of the job are being
updated.
"""
job = _messages.MessageField('Job', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class CloudschedulerProjectsLocationsJobsPauseRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsPauseRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
pauseJobRequest: A PauseJobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
pauseJobRequest = _messages.MessageField('PauseJobRequest', 2)
class CloudschedulerProjectsLocationsJobsResumeRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsResumeRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
resumeJobRequest: A ResumeJobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
resumeJobRequest = _messages.MessageField('ResumeJobRequest', 2)
class CloudschedulerProjectsLocationsJobsRunRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsRunRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
runJobRequest: A RunJobRequest resource to be passed as the request body.
"""
name = _messages.StringField(1, required=True)
runJobRequest = _messages.MessageField('RunJobRequest', 2)
class CloudschedulerProjectsLocationsListRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsListRequest object.
Fields:
filter: The standard list filter.
name: The resource that owns the locations collection, if applicable.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class HttpTarget(_messages.Message):
r"""Http target. The job will be pushed to the job handler by means of an
HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is
acknowledged by means of an HTTP response code in the range [200 - 299]. A
failure to receive a response constitutes a failed execution. For a
redirected request, the response returned by the redirected request is
considered.
Enums:
HttpMethodValueValuesEnum: Which HTTP method to use for the request.
Messages:
HeadersValue: The user can specify HTTP request headers to send with the
job's HTTP request. This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain
commas. These headers represent a subset of the headers that will
accompany the job's HTTP request. Some HTTP request headers will be
ignored or replaced. A partial list of headers that will be ignored or
replaced is below: - Host: This will be computed by Cloud Scheduler and
derived from uri. * `Content-Length`: This will be computed by Cloud
Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-
Scheduler"`. * `X-Google-*`: Google internal use only. *
`X-AppEngine-*`: Google internal use only. The total size of headers
must be less than 80KB.
Fields:
body: HTTP request body. A request body is allowed only if the HTTP method
is POST, PUT, or PATCH. It is an error to set body on a job with an
incompatible HttpMethod.
headers: The user can specify HTTP request headers to send with the job's
HTTP request. This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain
commas. These headers represent a subset of the headers that will
accompany the job's HTTP request. Some HTTP request headers will be
ignored or replaced. A partial list of headers that will be ignored or
replaced is below: - Host: This will be computed by Cloud Scheduler and
derived from uri. * `Content-Length`: This will be computed by Cloud
Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-
Scheduler"`. * `X-Google-*`: Google internal use only. *
`X-AppEngine-*`: Google internal use only. The total size of headers
must be less than 80KB.
httpMethod: Which HTTP method to use for the request.
oauthToken: If specified, an [OAuth
token](https://developers.google.com/identity/protocols/OAuth2) will be
generated and attached as an `Authorization` header in the HTTP request.
This type of authorization should generally only be used when calling
Google APIs hosted on *.googleapis.com.
oidcToken: If specified, an
[OIDC](https://developers.google.com/identity/protocols/OpenIDConnect)
token will be generated and attached as an `Authorization` header in the
HTTP request. This type of authorization can be used for many
scenarios, including calling Cloud Run, or endpoints where you intend to
validate the token yourself.
uri: Required. The full URI path that the request will be sent to. This
string must begin with either "http://" or "https://". Some examples of
valid values for uri are: `http://acme.com` and
`https://acme.com/sales:8080`. Cloud Scheduler will encode some
characters for safety and compatibility. The maximum allowed URL length
is 2083 characters after encoding.
"""
class HttpMethodValueValuesEnum(_messages.Enum):
r"""Which HTTP method to use for the request.
Values:
HTTP_METHOD_UNSPECIFIED: HTTP method unspecified. Defaults to POST.
POST: HTTP POST
GET: HTTP GET
HEAD: HTTP HEAD
PUT: HTTP PUT
DELETE: HTTP DELETE
PATCH: HTTP PATCH
OPTIONS: HTTP OPTIONS
"""
HTTP_METHOD_UNSPECIFIED = 0
POST = 1
GET = 2
HEAD = 3
PUT = 4
DELETE = 5
PATCH = 6
OPTIONS = 7
@encoding.MapUnrecognizedFields('additionalProperties')
class HeadersValue(_messages.Message):
r"""The user can specify HTTP request headers to send with the job's HTTP
request. This map contains the header field names and values. Repeated
headers are not supported, but a header value can contain commas. These
headers represent a subset of the headers that will accompany the job's
HTTP request. Some HTTP request headers will be ignored or replaced. A
partial list of headers that will be ignored or replaced is below: - Host:
This will be computed by Cloud Scheduler and derived from uri. * `Content-
Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This
will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal
use only. * `X-AppEngine-*`: Google internal use only. The total size of
headers must be less than 80KB.
Messages:
AdditionalProperty: An additional property for a HeadersValue object.
Fields:
additionalProperties: Additional properties of type HeadersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HeadersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
body = _messages.BytesField(1)
headers = _messages.MessageField('HeadersValue', 2)
httpMethod = _messages.EnumField('HttpMethodValueValuesEnum', 3)
oauthToken = _messages.MessageField('OAuthToken', 4)
oidcToken = _messages.MessageField('OidcToken', 5)
uri = _messages.StringField(6)
class Job(_messages.Message):
r"""Configuration for a job. The maximum allowed size for a job is 100KB.
Enums:
StateValueValuesEnum: Output only. State of the job.
Fields:
appEngineHttpTarget: App Engine HTTP target.
attemptDeadline: The deadline for job attempts. If the request handler
does not respond by this deadline then the request is cancelled and the
attempt is marked as a `DEADLINE_EXCEEDED` failure. The failed attempt
can be viewed in execution logs. Cloud Scheduler will retry the job
according to the RetryConfig. The allowed duration for this deadline
is: * For HTTP targets, between 15 seconds and 30 minutes. * For App
Engine HTTP targets, between 15 seconds and 24 hours. * For PubSub
targets, this field is ignored.
description: Optionally caller-specified in CreateJob or UpdateJob. A
human-readable description for the job. This string must not contain
more than 500 characters.
httpTarget: HTTP target.
lastAttemptTime: Output only. The time the last job attempt started.
name: Optionally caller-specified in CreateJob, after which it becomes
output only. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID`
can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons
(:), or periods (.). For more information, see [Identifying
projects](https://cloud.google.com/resource-manager/docs/creating-
managing-projects#identifying_projects) * `LOCATION_ID` is the canonical
ID for the job's location. The list of available locations can be
obtained by calling ListLocations. For more information, see
https://cloud.google.com/about/locations/. * `JOB_ID` can contain only
letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_).
The maximum length is 500 characters.
pubsubTarget: Pub/Sub target.
retryConfig: Settings that determine the retry behavior.
schedule: Required, except when used with UpdateJob. Describes the
schedule on which the job will be executed. The schedule can be either
of the following types: *
[Crontab](http://en.wikipedia.org/wiki/Cron#Overview) * English-like
[schedule](https://cloud.google.com/scheduler/docs/configuring/cron-job-
schedules) As a general rule, execution `n + 1` of a job will not begin
until execution `n` has finished. Cloud Scheduler will never allow two
simultaneously outstanding executions. For example, this implies that if
the `n+1`th execution is scheduled to run at 16:00 but the `n`th
execution takes until 16:15, the `n+1`th execution will not start until
`16:15`. A scheduled start time will be delayed if the previous
execution has not ended when its scheduled time occurs. If retry_count
> 0 and a job attempt fails, the job will be tried a total of
retry_count times, with exponential backoff, until the next scheduled
start time.
scheduleTime: Output only. The next time the job is scheduled. Note that
this may be a retry of a previously failed attempt or the next execution
time according to the schedule.
state: Output only. State of the job.
status: Output only. The response from the target for the last attempted
execution.
timeZone: Specifies the time zone to be used in interpreting schedule. The
value of this field must be a time zone name from the [tz
database](http://en.wikipedia.org/wiki/Tz_database). Note that some
time zones include a provision for daylight savings time. The rules for
daylight saving time are determined by the chosen tz. For UTC use the
string "utc". If a time zone is not specified, the default will be in
UTC (also known as GMT).
userUpdateTime: Output only. The creation time of the job.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. State of the job.
Values:
STATE_UNSPECIFIED: Unspecified state.
ENABLED: The job is executing normally.
PAUSED: The job is paused by the user. It will not execute. A user can
intentionally pause the job using PauseJobRequest.
DISABLED: The job is disabled by the system due to error. The user
cannot directly set a job to be disabled.
UPDATE_FAILED: The job state resulting from a failed
CloudScheduler.UpdateJob operation. To recover a job from this state,
retry CloudScheduler.UpdateJob until a successful response is
received.
"""
STATE_UNSPECIFIED = 0
ENABLED = 1
PAUSED = 2
DISABLED = 3
UPDATE_FAILED = 4
appEngineHttpTarget = _messages.MessageField('AppEngineHttpTarget', 1)
attemptDeadline = _messages.StringField(2)
description = _messages.StringField(3)
httpTarget = _messages.MessageField('HttpTarget', 4)
lastAttemptTime = _messages.StringField(5)
name = _messages.StringField(6)
pubsubTarget = _messages.MessageField('PubsubTarget', 7)
retryConfig = _messages.MessageField('RetryConfig', 8)
schedule = _messages.StringField(9)
scheduleTime = _messages.StringField(10)
state = _messages.EnumField('StateValueValuesEnum', 11)
status = _messages.MessageField('Status', 12)
timeZone = _messages.StringField(13)
userUpdateTime = _messages.StringField(14)
class ListJobsResponse(_messages.Message):
r"""Response message for listing jobs using ListJobs.
Fields:
jobs: The list of jobs.
nextPageToken: A token to retrieve next page of results. Pass this value
in the page_token field in the subsequent call to ListJobs to retrieve
the next page of results. If this is empty it indicates that there are
no more results through which to paginate. The page token is valid for
only 2 hours.
"""
jobs = _messages.MessageField('Job', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class OAuthToken(_messages.Message):
r"""Contains information needed for generating an [OAuth
token](https://developers.google.com/identity/protocols/OAuth2). This type
of authorization should generally only be used when calling Google APIs
hosted on *.googleapis.com.
Fields:
scope: OAuth scope to be used for generating OAuth access token. If not
specified, "https://www.googleapis.com/auth/cloud-platform" will be
used.
serviceAccountEmail: [Service account
email](https://cloud.google.com/iam/docs/service-accounts) to be used
for generating OAuth token. The service account must be within the same
project as the job. The caller must have iam.serviceAccounts.actAs
permission for the service account.
"""
scope = _messages.StringField(1)
serviceAccountEmail = _messages.StringField(2)
class OidcToken(_messages.Message):
r"""Contains information needed for generating an [OpenID Connect
token](https://developers.google.com/identity/protocols/OpenIDConnect). This
type of authorization can be used for many scenarios, including calling
Cloud Run, or endpoints where you intend to validate the token yourself.
Fields:
audience: Audience to be used when generating OIDC token. If not
specified, the URI specified in target will be used.
serviceAccountEmail: [Service account
email](https://cloud.google.com/iam/docs/service-accounts) to be used
for generating OIDC token. The service account must be within the same
project as the job. The caller must have iam.serviceAccounts.actAs
permission for the service account.
"""
audience = _messages.StringField(1)
serviceAccountEmail = _messages.StringField(2)
class PauseJobRequest(_messages.Message):
r"""Request message for PauseJob."""
class PubsubMessage(_messages.Message):
r"""A message that is published by publishers and consumed by subscribers.
The message must contain either a non-empty data field or at least one
attribute. Note that client libraries represent this object differently
depending on the language. See the corresponding <a
href="https://cloud.google.com/pubsub/docs/reference/libraries">client
library documentation</a> for more information. See <a
href="https://cloud.google.com/pubsub/quotas">Quotas and limits</a> for more
information about message limits.
Messages:
AttributesValue: Optional attributes for this message.
Fields:
attributes: Optional attributes for this message.
data: The message data field. If this field is empty, the message must
contain at least one attribute.
messageId: ID of this message, assigned by the server when the message is
published. Guaranteed to be unique within the topic. This value may be
read by a subscriber that receives a `PubsubMessage` via a `Pull` call
or a push delivery. It must not be populated by the publisher in a
`Publish` call.
publishTime: The time at which the message was published, populated by the
server when it receives the `Publish` call. It must not be populated by
the publisher in a `Publish` call.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AttributesValue(_messages.Message):
r"""Optional attributes for this message.
Messages:
AdditionalProperty: An additional property for a AttributesValue object.
Fields:
additionalProperties: Additional properties of type AttributesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
attributes = _messages.MessageField('AttributesValue', 1)
data = _messages.BytesField(2)
messageId = _messages.StringField(3)
publishTime = _messages.StringField(4)
class PubsubTarget(_messages.Message):
r"""Pub/Sub target. The job will be delivered by publishing a message to the
given Pub/Sub topic.
Messages:
AttributesValue: Attributes for PubsubMessage. Pubsub message must
contain either non-empty data, or at least one attribute.
Fields:
attributes: Attributes for PubsubMessage. Pubsub message must contain
either non-empty data, or at least one attribute.
data: The message payload for PubsubMessage. Pubsub message must contain
either non-empty data, or at least one attribute.
topicName: Required. The name of the Cloud Pub/Sub topic to which messages
will be published when a job is delivered. The topic name must be in the
same format as required by PubSub's [PublishRequest.name](https://cloud.
google.com/pubsub/docs/reference/rpc/google.pubsub.v1#publishrequest),
for example `projects/PROJECT_ID/topics/TOPIC_ID`. The topic must be in
the same project as the Cloud Scheduler job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AttributesValue(_messages.Message):
r"""Attributes for PubsubMessage. Pubsub message must contain either non-
empty data, or at least one attribute.
Messages:
AdditionalProperty: An additional property for a AttributesValue object.
Fields:
additionalProperties: Additional properties of type AttributesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
attributes = _messages.MessageField('AttributesValue', 1)
data = _messages.BytesField(2)
topicName = _messages.StringField(3)
class ResumeJobRequest(_messages.Message):
r"""Request message for ResumeJob."""
class RetryConfig(_messages.Message):
r"""Settings that determine the retry behavior. By default, if a job does
not complete successfully (meaning that an acknowledgement is not received
from the handler, then it will be retried with exponential backoff according
to the settings in RetryConfig.
Fields:
maxBackoffDuration: The maximum amount of time to wait before retrying a
job after it fails. The default value of this field is 1 hour.
maxDoublings: The time between retries will double `max_doublings` times.
A job's retry interval starts at min_backoff_duration, then doubles
`max_doublings` times, then increases linearly, and finally retries
retries at intervals of max_backoff_duration up to retry_count times.
For example, if min_backoff_duration is 10s, max_backoff_duration is
300s, and `max_doublings` is 3, then the a job will first be retried in
10s. The retry interval will double three times, and then increase
linearly by 2^3 * 10s. Finally, the job will retry at intervals of
max_backoff_duration until the job has been attempted retry_count times.
Thus, the requests will retry at 10s, 20s, 40s, 80s, 160s, 240s, 300s,
300s, .... The default value of this field is 5.
maxRetryDuration: The time limit for retrying a failed job, measured from
time when an execution was first attempted. If specified with
retry_count, the job will be retried until both limits are reached. The
default value for max_retry_duration is zero, which means retry duration
is unlimited.
minBackoffDuration: The minimum amount of time to wait before retrying a
job after it fails. The default value of this field is 5 seconds.
retryCount: The number of attempts that the system will make to run a job
using the exponential backoff procedure described by max_doublings. The
default value of retry_count is zero. If retry_count is zero, a job
attempt will *not* be retried if it fails. Instead the Cloud Scheduler
system will wait for the next scheduled execution time. If retry_count
is set to a non-zero number then Cloud Scheduler will retry failed
attempts, using exponential backoff, retry_count times, or until the
next scheduled execution time, whichever comes first. Values greater
than 5 and negative values are not allowed.
"""
maxBackoffDuration = _messages.StringField(1)
maxDoublings = _messages.IntegerField(2, variant=_messages.Variant.INT32)
maxRetryDuration = _messages.StringField(3)
minBackoffDuration = _messages.StringField(4)
retryCount = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class RunJobRequest(_messages.Message):
r"""Request message for forcing a job to run now using
RunJob."""
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
[
"luizfper@gmail.com"
] |
luizfper@gmail.com
|
4ec4a5bfddb39a5eb3fa4043c7cb581d9dabed25
|
62fb0a3f284c8247a0575d3bdc7d209632577fcd
|
/handywsgi/context/request.py
|
fbb1b735a49913e016c35f5f100391b5d613c623
|
[] |
no_license
|
haxwithaxe/handywsgi
|
f8b001e137a71faa4b5037b1fbc2f83c888542b4
|
0cd451033de35868ca846f2d9269230fbbf172cd
|
refs/heads/master
| 2021-01-10T11:36:30.470930
| 2016-01-19T00:44:22
| 2016-01-19T00:45:09
| 45,813,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,972
|
py
|
import cgi
import urllib
class Request:
""" Encapsulation of the data in the request to the server.
Attributes:
environment (dict): os.envron
wsgi (WSGIData): PEP-3333 wsgi environ
netloc (str): Server name (and port for nonstandard ports).
url (str): The full request URL (cleaned up).
content (object): CONTENT_* CGI variables.
http (object): HTTP_* CGI variables.
query (object): QUERY_* CGI variables.
client (object): CLIENT_* CGI variables.
script (object): SCRIPT_*, PATH_*, and REQUEST_* CGI variables.
server (object): SERVER_* CGI variables.
"""
def __init__(self, environment):
self.environment = environment.copy()
self.wsgi = WSGIData(environment.copy())
self._get_request_parts(environment.copy())
if self.server.port not in (None, '', 80, 443):
self.netloc = '{}:{}'.format(self.server.name, self.server.port)
else:
self.netloc = self.server.name
self.url = urllib.parse.urlunparse(
(self.wsgi.url_scheme, self.netloc, self.query.path, self.query.param, '', '')
)
def _get_request_parts(self, environment):
""" Build up the object model of the incoming data. """
class Content:
length = environment.get('CONTENT_LENGTH')
mime = environment.get('CONTENT_TYPE')
self.content = Content()
class HTTP:
accept = environment.get('HTTP_ACCEPT')
accept_encoding = environment.get('HTTP_ACCEPT_ENCODING')
accept_language = environment.get('HTTP_ACCEPT_LANGUAGE')
cache_control = environment.get('HTTP_CACHE_CONTROL')
connection = environment.get('HTTP_CONNECTION')
hostname = environment.get('HTTP_HOST')
user_agent = environment.get('HTTP_USER_AGENT')
self.http = HTTP()
class Query:
path = environment.get('PATH_INFO')
param = environment.get('QUERY_STRING')
method = environment.get('REQUEST_METHOD')
if param:
full_path = '{}?{}'.format(path, param)
else:
full_path = path
_params = cgi.FieldStorage(fp=environment['wsgi.input'], environ=environment.copy())
params = {p: _params.getvalue(p) for p in _params.keys()}
self.query = Query()
class Client:
address = environment.get('REMOTE_ADDR')
hostname = environment.get('REMOTE_HOST')
self.client = Client()
class Script:
name = environment.get('SCRIPT_NAME')
pwd = environment.get('PWD')
gateway_interface = environment.get('GATEWAY_INTERFACE')
self.script = Script()
class Server:
name = environment.get('SERVER_NAME')
port = environment.get('SERVER_PORT')
protocol = environment.get('SERVER_PROTOCOL')
software = environment.get('SERVER_SOFTWARE')
shlvl = environment.get('SHLVL')
self.server = Server()
class WSGIData:
""" A model of the "wsgi.*" fields in the request data. """
def __init__(self, environment):
self.errors = environment.get('wsgi.errors') # <_io.TextIOWrapper name='<stderr>' mode='w' encoding='UTF-8'>,
self.file_wrapper = environment.get('wsgi.file_wrapper') # <class 'wsgiref.util.FileWrapper'>
self.input_file = environment.get('wsgi.input') # <_io.BufferedReader name=5>
self.post_data = cgi.FieldStorage(fp=environment.get('wsgi.input'), environ=environment, keep_blank_values=True)
self.multiprocess = environment.get('wsgi.multiprocess')
self.multithread = environment.get('wsgi.multithread')
self.run_once = environment.get('wsgi.run_once')
self.url_scheme = environment.get('wsgi.url_scheme')
self.version = environment.get('wsgi.version')
|
[
"me@haxwithaxe.net"
] |
me@haxwithaxe.net
|
9663dd080e2c85d7828e4b5a15985d44f99590d1
|
16cbb9cd6105b97587c2421a9d6e84e7f3ce3a05
|
/UBTAutotest_Linebus_Platform_0708/BusProject/Linebus_Platform_API/common/read_datas.py
|
cbffee2328c880f180f203e74579637ff94bf245
|
[] |
no_license
|
Smallblackincontrast/linebus_auto
|
88a433a91b6fd8e93c5c61d5ce7738af18f8afa5
|
ae9b33e00122a9510116fbcd4d3408c22d4e0673
|
refs/heads/main
| 2023-07-21T08:55:05.230244
| 2021-08-24T09:57:08
| 2021-08-24T09:57:08
| 399,413,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,827
|
py
|
# -*- coding:utf-8 -*-
# @Time :2019/4/8 10:07
# @Author :Tester_Liang
# @Email :649626809@qq.com
# @File :read_datas.py
# @software :PyCharm
from openpyxl import load_workbook
from BusProject.Linebus_Platform_API.common.MyLog import MyLogs
import os
class ReadDatas:
def __init__(self, filename):
self.log = MyLogs()
if not os.path.exists(filename):
self.log.error("找不到文件")
else:
self.filename = filename
def read_datas(self, sheetname):
try:
wb = load_workbook(self.filename)
except Exception as e:
self.log.error("打开文件异常{}".format(e))
else:
if sheetname not in wb.sheetnames:
self.log.error("找不到表单")
else:
sheet = wb[sheetname]
test_name = []
for column in range(1, sheet.max_column):
test_name.append(sheet.cell(1, column).value)
testdata = []
for i in range(2, sheet.max_row + 1):
testdict = {}
for j in range(1, 9):
testdict[test_name[j - 1]] = sheet.cell(i, j).value
testdata.append(testdict)
return testdata
def write_back(self, sheetname, row, result, passed):
try:
wb = load_workbook(self.filename)
except Exception as e:
self.log.error("打开文件异常{}".format(e))
else:
if sheetname not in wb.sheetnames:
self.log.error("找不到文件")
else:
sheet = wb[sheetname]
sheet.cell(row, 9).value = result
sheet.cell(row, 10).value = passed
wb.save(self.filename)
|
[
"ruanzhe666@163.com"
] |
ruanzhe666@163.com
|
8b791177c20c6544035723587cac13679ca5be30
|
4331b28f22a2efb12d462ae2a8270a9f666b0df1
|
/.history/dvdstore/webapp/views_20190913160121.py
|
76f1fffb46dcfc898dc3c617d665465394a1c248
|
[] |
no_license
|
ZiyaadLakay/csc312.group.project
|
ba772a905e0841b17478eae7e14e43d8b078a95d
|
9cdd9068b5e24980c59a53595a5d513c2e738a5e
|
refs/heads/master
| 2020-07-26T23:30:22.542450
| 2019-09-16T11:46:41
| 2019-09-16T11:46:41
| 200,703,160
| 0
| 0
| null | 2019-08-05T17:52:37
| 2019-08-05T17:52:37
| null |
UTF-8
|
Python
| false
| false
| 5,640
|
py
|
from django.shortcuts import render
from .models import DVD, Transaction, Customer
from django.core.paginator import EmptyPage,PageNotAnInteger, Paginator
from django.db.models import Q
from django.contrib.auth.models import User, auth
from django.shortcuts import render, redirect
from django.contrib import messages
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.decorators import login_required, permission_required
from .form import DocumentForm
#This is the homepage for the User
def home(request):
dvds = DVD.objects.all() #imports dvds from database
query = request.GET.get("query")
gen = request.GET.get("gen")
if query:
dvds = DVD.objects.filter(Q(Title__icontains=query))#Search Function according to name
elif gen:
dvds = DVD.objects.filter(Q(genre__icontains=gen))#Search Function according to name
paginator = Paginator(dvds, 3) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
genre = {'Action', 'Comedy', 'Drama', 'Family', 'Romance'}
return render(request, 'home.html', {'dvds':dvds}, {'genre':genre}) #renders the page
#This is the page for clerks
@login_required
def clerk(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
dvds = DVD.objects.filter(Q(Title__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'clerk.html',context_dict)
@login_required
def userstbl(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
users = User.objects.filter(Q(username__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'userstbl.html',context_dict)
@login_required
def transactions(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
trans = Transaction.objects.filter(Q(TransactionNumber__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'transactions.html',context_dict)
def register2(request):
if request.method == 'POST':
first_name= request.POST['first_name']
last_name= request.POST['last_name']
username= request.POST['username']
email= request.POST['email']
password1= first_name[0]+last_name
if User.objects.filter(username=username).exists():
messages.info(request, 'Username Taken')
return redirect('clerk')
elif User.objects.filter(email=email).exists():
messages.info(request, 'Email Taken')
user = Transaction.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
user.save()
messages.info(request, 'User Created')
return redirect('/clerk')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/clerk')
def booking(request):
username= request.POST['username']
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).update(BookingPickup=username)
return redirect('home')
def checkout(request):
dvdID= request.POST['dvdID']
numOfDays=request.POST['numDaysBooked']
dvdPrice=request.POST['dvdPrice']
bill=numOfDays*dvdPrice
DVD.objects.filter(id=dvdID).update(NumDaysBooked=numOfDays,InStock=False)
#users_ID
#TransactionNumber
#RentDate
#DueDate
#MovieTitle
#Payment_Method
#Amount
trans = User.objects.create_user(users_ID=, TransactionNumber=, RentDate=, DueDate=, MovieTitle=, Payment_Method=,Amount=)
trans.save()
return render(request, 'clerk.html',{'bill':bill})
def checkoutProceed(request):
dvdID= request.POST['dvdID']
numOfDays=request.POST['numDaysBooked']
DVD.objects.filter(id=dvdID).update(NumDaysBooked=numOfDays,InStock=False)
return redirect('/clerk')
def checkin(request):
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).update(BookingPickup='None',InStock=True,NumDaysBooked=0)
return redirect('home')
|
[
"uzairjoneswolf@gmail.com"
] |
uzairjoneswolf@gmail.com
|
b32100a480b61370780b3e6aa401b2b07809dc8b
|
0ac4c32a29cfee6ffe18a85304bb8d61a774d9d0
|
/ShowTweetData.py
|
8159c415c0fac1661ebe8752c656795683b03e82
|
[] |
no_license
|
neondude/TweetInfoExtractor
|
32d1c4d95874ef97dd9d4850076c72ab66ee4334
|
c15cf0f8ac904a71dd94b32144b66a52e3bf7837
|
refs/heads/master
| 2021-01-10T07:05:09.009297
| 2016-03-08T01:57:57
| 2016-03-08T01:57:57
| 53,334,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
#!/usr/bin/env python
import sys
import rfc822
import time
import json
from sqlite3 import connect
from urllib import urlopen, urlencode
from tweetconnect import *
from auth_and_Secret import TweetOuth
c = None
tweet_id = None
def load_tweets(**kwargs):
args = dict(trim_user=1)
args.update(**kwargs)
url = 'https://api.twitter.com/1.1/statuses/show.json?' + urlencode(args)
user_timeline = TweetOuth.tweet_req(url)
tweet=json.loads(user_timeline)
if type(tweet) == dict and tweet.has_key(u'errors'):
raise Exception(tweet[u'errors'])
print "text: ",tweet[u'text']
print "hashtags: "
for twit in range(len(tweet['entities']['hashtags'])):
print tweet['entities']['hashtags'][twit][u'text']
print "references: "
for twit in range(len(tweet['entities']['user_mentions'])):
print tweet['entities']['user_mentions'][twit][u'screen_name']
print "Favorite Count: ", tweet['favorite_count']
print "Retweet Count: ", tweet['retweet_count']
def print_help(args):
print >>sys.stderr, '''
Usage:
%s <tweet_id>
show data of tweet
''' % args[0]
def main(*args):
global c, tweet_id
if len(args) != 2:
print_help(args)
else:
tweet_id=args[1]
load_tweets(id=tweet_id)
if __name__ == '__main__':
main(*sys.argv)
|
[
"siddharth.nagrajann@gmail.com"
] |
siddharth.nagrajann@gmail.com
|
9859269d8fba660b528d9985ec913e031aa96775
|
ab3d5ea4bf0e48914ed14fcf16e5b1d752f199ba
|
/pcg_libraries/src/pcg_gazebo/parsers/sdf/stiffness.py
|
e542a13796da4c2fcd0890a4bfb85285f9bbdecf
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
boschresearch/pcg_gazebo_pkgs
|
5f1004d0de874d4d1abc4eb695777013027158b2
|
1c112d01847ca4f8da61ce9b273e13d13bc7eb73
|
refs/heads/master
| 2020-06-11T06:28:36.228431
| 2020-02-07T13:05:28
| 2020-02-07T13:05:28
| 193,876,180
| 44
| 3
|
NOASSERTION
| 2020-02-07T12:00:55
| 2019-06-26T09:45:05
|
Python
|
UTF-8
|
Python
| false
| false
| 970
|
py
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLScalar
class Stiffness(XMLScalar):
_NAME = 'stiffness'
_TYPE = 'sdf'
def __init__(self, default=1e8):
XMLScalar.__init__(self, default)
def _set_value(self, value):
assert value > 0
XMLScalar._set_value(self, value)
|
[
"Musa.Marcusso@de.bosch.com"
] |
Musa.Marcusso@de.bosch.com
|
9d06d08ee4d60841884a62b2cda2fcf89dfc02d6
|
a4a8bbec98320a09ecaf33e26278bbc228aa5efc
|
/timer_start.py
|
d8b8d5e5b74bad30f8ddf4f526128b809d515618
|
[] |
no_license
|
ird87/biosynthesis-timer
|
f78990fb3729d0c8622fee2dc35339d12978dedf
|
7d98b7532547c56f4fff8f3ab1f6f36eb9fa31a5
|
refs/heads/master
| 2020-12-20T13:09:12.429825
| 2020-01-24T21:17:01
| 2020-01-24T21:17:01
| 236,086,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,860
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import PyQt5
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QDesktopWidget, QApplication
from timer import Ui_wBiosynthesisTimer
import sys
from conf import Configure
from process import TimerMode, ValveStatus, Process
class TimerWindow(PyQt5.QtWidgets.QMainWindow):
progressbar_change = PyQt5.QtCore.pyqtSignal(int)
progressbar_set = PyQt5.QtCore.pyqtSignal(int)
valve_status_set = PyQt5.QtCore.pyqtSignal(ValveStatus)
time_change = PyQt5.QtCore.pyqtSignal(str)
def __init__(self):
super(TimerWindow, self).__init__()
self.ui = Ui_wBiosynthesisTimer()
self.ui.setupUi(self)
self.setFixedSize(360, 260)
self.center()
self.setWindowFlags(
Qt.Window |
Qt.CustomizeWindowHint |
Qt.WindowTitleHint |
Qt.WindowCloseButtonHint |
Qt.WindowStaysOnTopHint |
Qt.WindowMinimizeButtonHint
)
self.conf=Configure()
try:
from gpio import GPIO
self.test_environment = False
except (ImportError, RuntimeError):
from gpio_test import GPIO
self.test_environment = True
print('TestModeON')
self.gpio=GPIO([self.conf.port])
self.process=Process(self)
self.mode=TimerMode.ManualMode
self.status=ValveStatus.ValveOpen
# По-умолчанию скрываем группу с автоматическим режимом управления
self.ui.gbManualMode.setVisible(False)
self.ui.lblManualTimer.setVisible(False)
# Назначим команды кнопкам
self.ui.btnAutoStart.clicked.connect(self.auto_mode_start)
self.ui.btnAutoStop.clicked.connect(self.auto_mode_stop)
self.ui.btnManualStart.clicked.connect(self.manual_mode_start)
self.ui.btnManualStop.clicked.connect(self.manual_mode_stop)
self.ui.cmbCurrentMode.currentIndexChanged.connect(self.current_mode_changed)
self.progressbar_change.connect(self.change_progressbar, PyQt5.QtCore.Qt.QueuedConnection)
self.progressbar_set.connect(self.set_progressbar, PyQt5.QtCore.Qt.QueuedConnection)
self.valve_status_set.connect(self.set_status, PyQt5.QtCore.Qt.QueuedConnection)
self.time_change.connect(self.change_time, PyQt5.QtCore.Qt.QueuedConnection)
def center(self):
# geometry of the main window
qr = self.frameGeometry()
# center point of screen
cp = QDesktopWidget().availableGeometry().center()
# move rectangle's center point to screen's center point
qr.moveCenter(cp)
# top left of rectangle becomes top left of window centering it
self.move(qr.topLeft())
def current_mode_changed(self):
# Включаем ручной режим: выключаем автоматический / меняем видимую группу в ui / Устанавливаем режим
if self.ui.cmbCurrentMode.currentIndex()==0:
self.auto_mode_stop()
self.ui.gbAutoMode.setVisible(False)
self.ui.gbManualMode.setVisible(True)
self.set_mode(TimerMode.ManualMode)
# Включаем автоматический режим: выключаем ручной / меняем видимую группу в ui / Устанавливаем режим
if self.ui.cmbCurrentMode.currentIndex()==1:
self.manual_mode_stop()
self.ui.gbAutoMode.setVisible(True)
self.ui.gbManualMode.setVisible(False)
self.set_mode(TimerMode.AutoMode)
def auto_mode_start(self):
self.process.start()
def auto_mode_stop(self):
self.process.process_break()
def manual_mode_start(self):
self.set_status(ValveStatus.ValveClose)
self.gpio.port_on(self.conf.port)
def manual_mode_stop(self):
self.set_status(ValveStatus.ValveOpen)
self.gpio.port_off(self.conf.port)
def set_mode(self, mode):
self.mode=mode
self.set_status(ValveStatus.ValveOpen)
def set_status(self, status):
self.status=status
if self.status == ValveStatus.ValveOpen:
self.ui.txtCurrentStatus.setText('клапан открыт')
if self.status == ValveStatus.ValveClose:
self.ui.txtCurrentStatus.setText('клапан закрыт')
def set_progressbar(self, maximum):
self.ui.pbrProgressBar.setValue(0)
self.ui.pbrProgressBar.setMaximum(maximum)
def change_time(self, t):
self.ui.lblTimer.setText(t)
def change_progressbar(self, t):
if self.ui.pbrProgressBar.value() < self.ui.pbrProgressBar.maximum():
self.ui.pbrProgressBar.setValue(self.ui.pbrProgressBar.value() + t)
def closeEvent(self, event):
self.set_status(ValveStatus.ValveOpen)
self.gpio.port_off(self.conf.port)
def main():
app = PyQt5.QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
window = TimerWindow() # Создаём объект класса Main
window.show() # Показываем окно
app.exec_()
if __name__ == '__main__': # Если мы запускаем файл напрямую, а не импортируем
main() # то запускаем функцию main()
|
[
"ird87.post.ru@gmail.com"
] |
ird87.post.ru@gmail.com
|
437eaf6e18296f938461bc6c976833dfc6d431ab
|
6c72880df5cd3b105d0e2a9024d67ad7f0000bf9
|
/day5.py
|
2c9de2c9012ed19ca96a4232cfd5a1446f23a86b
|
[] |
no_license
|
cheesepuff90/code-kata
|
6d2f5b6de2989ee74054afab42d4a99dca6a90ce
|
df49df041ba0741ebafbb4b59f6797afec1c7c69
|
refs/heads/main
| 2023-08-14T19:29:36.788791
| 2021-10-06T01:57:53
| 2021-10-06T01:57:53
| 406,581,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
def get_prefix(strs):
ans = ''
if len(strs) == 0:
return ''
strs = sorted(strs)
for idx,i in enumerate(strs[0]):
if i == strs[-1][idx]:
ans += i
else:
break
return ans
# strs은 단어가 담긴 배열입니다.
# 공통된 시작 단어(prefix)를 반환해주세요.
# 예를 들어
# strs = ['start', 'stair', 'step']
# return은 'st'
# strs = ['start', 'wework', 'today']
# return은 ''
|
[
"kimdh1004yo@gmail.com"
] |
kimdh1004yo@gmail.com
|
21cbfa5ebdcf652d23f9830273248d00894b0745
|
d01d546af45ad870ebe38a8c21ac05785bed1d85
|
/devel/lib/python2.7/dist-packages/service_demo/srv/__init__.py
|
b1c2959b36aa7e19764a43a7b899d9273be6a66e
|
[] |
no_license
|
A201124253/c_s_u
|
56adfab067edb179201faa858377c11baa19a59b
|
fc104e267eeee90e23c98ce3bac25143a9c3d5e7
|
refs/heads/master
| 2020-03-24T17:14:08.360088
| 2018-07-30T13:45:24
| 2018-07-30T13:45:24
| 142,853,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
/home/fapsros/c_s_u/devel/.private/service_demo/lib/python2.7/dist-packages/service_demo/srv/__init__.py
|
[
"jimmy-lee_93.06@hotmail.com"
] |
jimmy-lee_93.06@hotmail.com
|
20fa02a294482db1f1dcaa6f11277daf40316d68
|
ecd31d9ab634d6150a8f96b09e4b716e0273d8e6
|
/server/models/city_model.py
|
282985af0646c183b5f84aaaab2270479566263e
|
[] |
no_license
|
mtyiska/move-rate-recommeder
|
409debd0aeb995b275934772f831fe83945d2b32
|
4cc8e715f5c25372fb4a935d92c4f0532103b6c0
|
refs/heads/master
| 2020-03-09T00:50:31.041340
| 2018-04-07T05:19:33
| 2018-04-07T05:19:33
| 128,498,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
from db import db
class CityModel(db.Model):
__tablename__ = 'cities'
id =db.Column(db.Integer, primary_key=True)
cityname = db.Column(db.String(80))
rating = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
image = db.Column(db.String(80))
# user_id = db.Column(db.Integer)
user = db.relationship('UserModel')
def __init__(self, cityname, rating, user_id, image):
self.cityname = cityname
self.rating = rating
self.user_id = user_id
self.image = image
def json(self):
return {'city_id':self.id,'cityname': self.cityname, 'rating': self.rating, 'user_id': self.user_id, 'image':self.image}
@classmethod
def find_by_name(cls, cityname):
return cls.query.filter_by(cityname=cityname).first()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
[
"mtyiska@gmail.com"
] |
mtyiska@gmail.com
|
152f5094d355552012d8c6c65694e2647845bc90
|
1556527f9077813490366d1fef284bc0ae1e02ba
|
/sdk/lusid/models/corporate_action_source.py
|
802af594affa766e4ab9e78cd627b1fcc7d80526
|
[
"MIT"
] |
permissive
|
timbarrass/lusid-sdk-python-preview
|
a04ce8887c7001bd7ddf099027ab94c97d8fa400
|
9a54e98bf748d87469aa7c797607550fe65ba6ba
|
refs/heads/master
| 2020-08-03T20:02:37.282370
| 2019-09-30T10:51:19
| 2019-09-30T10:51:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,424
|
py
|
# coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://api.lusid.com/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can streamline their data. One of the primary tools to extend the data model is through using properties. Properties can be associated with amongst others: - * Transactions * Instruments * Portfolios The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is the primary container for transactions and holdings. * **Derived Portfolios** Derived portfolios allow portfolios to be created based on other portfolios, by overriding or overlaying specific items * **Holdings** A holding is a position account for a instrument within a portfolio. Holdings can only be adjusted via transactions. * **Transactions** A Transaction is a source of transactions used to manipulate holdings. * **Corporate Actions** A corporate action is a market event which occurs to a instrument, for example a stock split * **Instruments** A instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** Several entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. This section describes the data model that LUSID exposes via the APIs. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Prices (Analytics) Instrument prices are stored in LUSID's Analytics Store | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|Unique instrument identifier | | Value|decimal|Value of the analytic, eg price | | Denomination|string|Underlying unit of the analytic, eg currency, EPS etc. | ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | ## Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional model data. Portfolio properties can be changed over time as well. For example, to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Reference Portfolios Reference portfolios are portfolios that contain only weights, as opposed to transactions, and are designed to represent entities such as indices. ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ### Movements Engine The Movements engine sits on top of the immutable event store and is used to manage the relationship between input trading actions and their associated portfolio holdings. The movements engine reads in the following entity types:- * Posting Transactions * Applying Corporate Actions * Holding Adjustments These are converted to one or more movements and used by the movements engine to calculate holdings. At the same time it also calculates running balances, and realised P&L. The outputs from the movements engine are holdings and transactions. ## Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate ### Example Transactions #### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] #### A Forward FX Example LUSID has a flexible transaction modelling system, and there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Other types and behaviours can be configured as required. Using these transaction types, the holdings query will report two forward positions. One in each currency. Since an FX trade is an exchange of one currency for another, the following two 6 month forward transactions are equivalent: | Column | Forward 'Sell' Trade | Forward 'Buy' Trade | | ----- | ----- | ----- | | TransactionId | FBN00004 | FBN00005 | | Type | FwdFxSell | FwdFxBuy | | InstrumentIdentifiers | { \"CCY\", \"CCY_GBP\" } | { \"CCY\", \"CCY_USD\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | | SettlementDate | 2019-02-06 | 2019-02-06 | | Units | 10000.00 | 13142.00 | | TransactionPrice |1 | 1 | | TradeCurrency | GBP | USD | | ExchangeRate | 1.3142 | 0.760919 | | TotalConsideration.Amount | 13142.00 | 10000.00 | | TotalConsideration.Currency | USD | GBP | | Trade/default/TradeToPortfolioRate | 1.0 | 0.760919 | ## Holdings A holding represents a position in a instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Property Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. # Schemas The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"102\">102</a>|VersionNotFound| | | <a name=\"104\">104</a>|InstrumentNotFound| | | <a name=\"105\">105</a>|PropertyNotFound| | | <a name=\"106\">106</a>|PortfolioRecursionDepth| | | <a name=\"108\">108</a>|GroupNotFound| | | <a name=\"109\">109</a>|PortfolioNotFound| | | <a name=\"110\">110</a>|PropertySchemaNotFound| | | <a name=\"111\">111</a>|PortfolioAncestryNotFound| | | <a name=\"112\">112</a>|PortfolioWithIdAlreadyExists| | | <a name=\"113\">113</a>|OrphanedPortfolio| | | <a name=\"119\">119</a>|MissingBaseClaims| | | <a name=\"121\">121</a>|PropertyNotDefined| | | <a name=\"122\">122</a>|CannotDeleteSystemProperty| | | <a name=\"123\">123</a>|CannotModifyImmutablePropertyField| | | <a name=\"124\">124</a>|PropertyAlreadyExists| | | <a name=\"125\">125</a>|InvalidPropertyLifeTime| | | <a name=\"127\">127</a>|CannotModifyDefaultDataType| | | <a name=\"128\">128</a>|GroupAlreadyExists| | | <a name=\"129\">129</a>|NoSuchDataType| | | <a name=\"132\">132</a>|ValidationError| | | <a name=\"133\">133</a>|LoopDetectedInGroupHierarchy| | | <a name=\"135\">135</a>|SubGroupAlreadyExists| | | <a name=\"138\">138</a>|PriceSourceNotFound| | | <a name=\"139\">139</a>|AnalyticStoreNotFound| | | <a name=\"141\">141</a>|AnalyticStoreAlreadyExists| | | <a name=\"143\">143</a>|ClientInstrumentAlreadyExists| | | <a name=\"144\">144</a>|DuplicateInParameterSet| | | <a name=\"147\">147</a>|ResultsNotFound| | | <a name=\"148\">148</a>|OrderFieldNotInResultSet| | | <a name=\"149\">149</a>|OperationFailed| | | <a name=\"150\">150</a>|ElasticSearchError| | | <a name=\"151\">151</a>|InvalidParameterValue| | | <a name=\"153\">153</a>|CommandProcessingFailure| | | <a name=\"154\">154</a>|EntityStateConstructionFailure| | | <a name=\"155\">155</a>|EntityTimelineDoesNotExist| | | <a name=\"156\">156</a>|EventPublishFailure| | | <a name=\"157\">157</a>|InvalidRequestFailure| | | <a name=\"158\">158</a>|EventPublishUnknown| | | <a name=\"159\">159</a>|EventQueryFailure| | | <a name=\"160\">160</a>|BlobDidNotExistFailure| | | <a name=\"162\">162</a>|SubSystemRequestFailure| | | <a name=\"163\">163</a>|SubSystemConfigurationFailure| | | <a name=\"165\">165</a>|FailedToDelete| | | <a name=\"166\">166</a>|UpsertClientInstrumentFailure| | | <a name=\"167\">167</a>|IllegalAsAtInterval| | | <a name=\"168\">168</a>|IllegalBitemporalQuery| | | <a name=\"169\">169</a>|InvalidAlternateId| | | <a name=\"170\">170</a>|CannotAddSourcePortfolioPropertyExplicitly| | | <a name=\"171\">171</a>|EntityAlreadyExistsInGroup| | | <a name=\"173\">173</a>|EntityWithIdAlreadyExists| | | <a name=\"174\">174</a>|DerivedPortfolioDetailsDoNotExist| | | <a name=\"176\">176</a>|PortfolioWithNameAlreadyExists| | | <a name=\"177\">177</a>|InvalidTransactions| | | <a name=\"178\">178</a>|ReferencePortfolioNotFound| | | <a name=\"179\">179</a>|DuplicateIdFailure| | | <a name=\"180\">180</a>|CommandRetrievalFailure| | | <a name=\"181\">181</a>|DataFilterApplicationFailure| | | <a name=\"182\">182</a>|SearchFailed| | | <a name=\"183\">183</a>|MovementsEngineConfigurationKeyFailure| | | <a name=\"184\">184</a>|FxRateSourceNotFound| | | <a name=\"185\">185</a>|AccrualSourceNotFound| | | <a name=\"186\">186</a>|AccessDenied| | | <a name=\"187\">187</a>|InvalidIdentityToken| | | <a name=\"188\">188</a>|InvalidRequestHeaders| | | <a name=\"189\">189</a>|PriceNotFound| | | <a name=\"190\">190</a>|InvalidSubHoldingKeysProvided| | | <a name=\"191\">191</a>|DuplicateSubHoldingKeysProvided| | | <a name=\"192\">192</a>|CutDefinitionNotFound| | | <a name=\"193\">193</a>|CutDefinitionInvalid| | | <a name=\"194\">194</a>|TimeVariantPropertyDeletionDateUnspecified| | | <a name=\"195\">195</a>|PerpetualPropertyDeletionDateSpecified| | | <a name=\"196\">196</a>|TimeVariantPropertyUpsertDateUnspecified| | | <a name=\"197\">197</a>|PerpetualPropertyUpsertDateSpecified| | | <a name=\"200\">200</a>|InvalidUnitForDataType| | | <a name=\"201\">201</a>|InvalidTypeForDataType| | | <a name=\"202\">202</a>|InvalidValueForDataType| | | <a name=\"203\">203</a>|UnitNotDefinedForDataType| | | <a name=\"204\">204</a>|UnitsNotSupportedOnDataType| | | <a name=\"205\">205</a>|CannotSpecifyUnitsOnDataType| | | <a name=\"206\">206</a>|UnitSchemaInconsistentWithDataType| | | <a name=\"207\">207</a>|UnitDefinitionNotSpecified| | | <a name=\"208\">208</a>|DuplicateUnitDefinitionsSpecified| | | <a name=\"209\">209</a>|InvalidUnitsDefinition| | | <a name=\"210\">210</a>|InvalidInstrumentIdentifierUnit| | | <a name=\"211\">211</a>|HoldingsAdjustmentDoesNotExist| | | <a name=\"212\">212</a>|CouldNotBuildExcelUrl| | | <a name=\"213\">213</a>|CouldNotGetExcelVersion| | | <a name=\"214\">214</a>|InstrumentByCodeNotFound| | | <a name=\"215\">215</a>|EntitySchemaDoesNotExist| | | <a name=\"216\">216</a>|FeatureNotSupportedOnPortfolioType| | | <a name=\"217\">217</a>|QuoteNotFoundFailure| | | <a name=\"218\">218</a>|InvalidQuoteIdentifierFailure| | | <a name=\"219\">219</a>|InvalidInstrumentDefinition| | | <a name=\"221\">221</a>|InstrumentUpsertFailure| | | <a name=\"222\">222</a>|ReferencePortfolioRequestNotSupported| | | <a name=\"223\">223</a>|TransactionPortfolioRequestNotSupported| | | <a name=\"224\">224</a>|InvalidPropertyValueAssignment| | | <a name=\"230\">230</a>|TransactionTypeNotFound| | | <a name=\"231\">231</a>|TransactionTypeDuplication| | | <a name=\"232\">232</a>|PortfolioDoesNotExistAtGivenDate| | | <a name=\"233\">233</a>|QueryParserFailure| | | <a name=\"234\">234</a>|DuplicateConstituentFailure| | | <a name=\"235\">235</a>|UnresolvedInstrumentConstituentFailure| | | <a name=\"236\">236</a>|UnresolvedInstrumentInTransitionFailure| | | <a name=\"300\">300</a>|MissingRecipeFailure| | | <a name=\"301\">301</a>|DependenciesFailure| | | <a name=\"304\">304</a>|PortfolioPreprocessFailure| | | <a name=\"310\">310</a>|ValuationEngineFailure| | | <a name=\"311\">311</a>|TaskFactoryFailure| | | <a name=\"312\">312</a>|TaskEvaluationFailure| | | <a name=\"350\">350</a>|InstrumentFailure| | | <a name=\"351\">351</a>|CashFlowsFailure| | | <a name=\"360\">360</a>|AggregationFailure| | | <a name=\"370\">370</a>|ResultRetrievalFailure| | | <a name=\"371\">371</a>|ResultProcessingFailure| | | <a name=\"371\">371</a>|ResultProcessingFailure| | | <a name=\"372\">372</a>|VendorResultProcessingFailure| | | <a name=\"373\">373</a>|VendorResultMappingFailure| | | <a name=\"374\">374</a>|VendorLibraryUnauthorisedFailure| | | <a name=\"390\">390</a>|AttemptToUpsertDuplicateQuotes| | | <a name=\"391\">391</a>|CorporateActionSourceDoesNotExist| | | <a name=\"392\">392</a>|CorporateActionSourceAlreadyExists| | | <a name=\"393\">393</a>|InstrumentIdentifierAlreadyInUse| | | <a name=\"394\">394</a>|PropertiesNotFound| | | <a name=\"395\">395</a>|BatchOperationAborted| | | <a name=\"400\">400</a>|InvalidIso4217CurrencyCodeFailure| | | <a name=\"410\">410</a>|IndexDoesNotExist| | | <a name=\"411\">411</a>|SortFieldDoesNotExist| | | <a name=\"413\">413</a>|NegativePaginationParameters| | | <a name=\"414\">414</a>|InvalidSearchSyntax| | | <a name=\"-10\">-10</a>|ServerConfigurationError| | | <a name=\"-1\">-1</a>|Unknown error| | # noqa: E501
The version of the OpenAPI document: 0.10.739
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CorporateActionSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'href': 'str',
'id': 'ResourceId',
'version': 'Version',
'display_name': 'str',
'description': 'str',
'links': 'list[Link]'
}
attribute_map = {
'href': 'href',
'id': 'id',
'version': 'version',
'display_name': 'displayName',
'description': 'description',
'links': 'links'
}
def __init__(self, href=None, id=None, version=None, display_name=None, description=None, links=None): # noqa: E501
"""CorporateActionSource - a model defined in OpenAPI""" # noqa: E501
self._href = None
self._id = None
self._version = None
self._display_name = None
self._description = None
self._links = None
self.discriminator = None
if href is not None:
self.href = href
self.id = id
self.version = version
if display_name is not None:
self.display_name = display_name
if description is not None:
self.description = description
if links is not None:
self.links = links
@property
def href(self):
"""Gets the href of this CorporateActionSource. # noqa: E501
:return: The href of this CorporateActionSource. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this CorporateActionSource.
:param href: The href of this CorporateActionSource. # noqa: E501
:type: str
"""
self._href = href
@property
def id(self):
"""Gets the id of this CorporateActionSource. # noqa: E501
:return: The id of this CorporateActionSource. # noqa: E501
:rtype: ResourceId
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CorporateActionSource.
:param id: The id of this CorporateActionSource. # noqa: E501
:type: ResourceId
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def version(self):
"""Gets the version of this CorporateActionSource. # noqa: E501
:return: The version of this CorporateActionSource. # noqa: E501
:rtype: Version
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this CorporateActionSource.
:param version: The version of this CorporateActionSource. # noqa: E501
:type: Version
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
@property
def display_name(self):
"""Gets the display_name of this CorporateActionSource. # noqa: E501
:return: The display_name of this CorporateActionSource. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this CorporateActionSource.
:param display_name: The display_name of this CorporateActionSource. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""Gets the description of this CorporateActionSource. # noqa: E501
:return: The description of this CorporateActionSource. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CorporateActionSource.
:param description: The description of this CorporateActionSource. # noqa: E501
:type: str
"""
self._description = description
@property
def links(self):
"""Gets the links of this CorporateActionSource. # noqa: E501
:return: The links of this CorporateActionSource. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this CorporateActionSource.
:param links: The links of this CorporateActionSource. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CorporateActionSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"concourse@finbourne.com"
] |
concourse@finbourne.com
|
5da273e3dcf3aa4e91ee91896e18b031439f6c03
|
87dfd4ac2a43802fc6e01955e527e2c7e637d0ca
|
/examples/nested/mog4.py
|
216a3bb4e5683fece5f2db8c983c5eb34da16c97
|
[
"MIT"
] |
permissive
|
mattpitkin/nnest
|
c5d8e75a655a33129ee14f9d5149afc1e7e62828
|
1f81caba63514349baf6fa06d8db7d6511d08cd8
|
refs/heads/master
| 2022-07-02T18:32:32.655652
| 2019-12-11T17:19:59
| 2019-12-11T17:19:59
| 260,881,536
| 0
| 0
| null | 2020-05-03T10:25:46
| 2020-05-03T10:25:46
| null |
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
import os
import sys
import argparse
import copy
import numpy as np
import scipy.special
import torch
sys.path.append(os.getcwd())
def log_gaussian_pdf(theta, sigma=1, mu=0, ndim=None):
if ndim is None:
try:
ndim = len(theta)
except TypeError:
assert isinstance(theta, (float, int)), theta
ndim = 1
logl = -(np.sum((theta - mu) ** 2) / (2 * sigma ** 2))
logl -= np.log(2 * np.pi * (sigma ** 2)) * ndim / 2.0
return logl
class Gaussian(object):
def __init__(self, sigma=1.0, nderived=0):
self.sigma = sigma
self.nderived = nderived
def __call__(self, theta):
logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)
return logl, [0.0] * self.nderived
class GaussianMix(object):
def __init__(self, sep=4, weights=(0.4, 0.3, 0.2, 0.1), sigma=1, nderived=0):
assert len(weights) in [2, 3, 4], ('Weights must have 2, 3 or 4 components. Weights=' + str(weights))
assert np.isclose(sum(weights), 1), ('Weights must sum to 1! Weights=' + str(weights))
self.nderived = nderived
self.weights = weights
self.sigmas = [sigma] * len(weights)
positions = []
positions.append(np.asarray([0, sep]))
positions.append(np.asarray([0, -sep]))
positions.append(np.asarray([sep, 0]))
positions.append(np.asarray([-sep, 0]))
self.positions = positions[:len(weights)]
def __call__(self, theta):
thetas = []
for pos in self.positions:
thetas.append(copy.deepcopy(theta))
thetas[-1][:2] -= pos
logls = [(Gaussian(sigma=self.sigmas[i])(thetas[i])[0]
+ np.log(self.weights[i])) for i in range(len(self.weights))]
logl = scipy.special.logsumexp(logls)
return logl, [0.0] * self.nderived
def main(args):
from nnest import NestedSampler
from nnest.distributions import GeneralisedNormal
g = GaussianMix()
def loglike(z):
return np.array([g(x)[0] for x in z])
def transform(x):
return 10. * x
if args.base_dist == 'gen_normal':
base_dist = GeneralisedNormal(torch.zeros(args.x_dim), torch.ones(args.x_dim), torch.tensor(args.beta))
else:
base_dist = None
sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,
hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,
use_gpu=args.use_gpu, base_dist=base_dist, scale=args.scale)
sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=args.switch, noise=args.noise)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=5,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=2000,
help="number of train iters")
parser.add_argument("--mcmc_steps", type=int, default=0)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument("--test_samples", type=int, default=0)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=0)
parser.add_argument('--log_dir', type=str, default='logs/mog4')
parser.add_argument('--base_dist', type=str, default='')
parser.add_argument('--scale', type=str, default='constant')
parser.add_argument('--beta', type=float, default=8.0)
args = parser.parse_args()
main(args)
print('Expected log Z: %5.4f' % (args.x_dim * np.log(20)))
|
[
"adam.moss@nottingham.ac.uk"
] |
adam.moss@nottingham.ac.uk
|
a0b3c7b72cb58599fa7cca1862baa3edbb6e27b5
|
0d4b4fa2aa5113bcadac6d5c8f95a95dba07944f
|
/Variables.py
|
d4e45d296dc583cc535ca9be4891d353079817f0
|
[
"MIT"
] |
permissive
|
imran-ice/iMessage-Visualizer
|
559ef103fc48e02ca88b89438dd0b1f1cd865c74
|
933664cc4072eac0e43bd874dede792370ae3673
|
refs/heads/master
| 2021-10-11T10:49:08.165407
| 2019-01-24T21:52:14
| 2019-01-24T21:52:14
| 271,576,502
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
#############################################################################
#Replace with the persons name that you were texting
leftName = "Bob"
#Replace with your name
rightName = "John"
#############################################################################
|
[
"noreply@github.com"
] |
imran-ice.noreply@github.com
|
e7962ac37ed33844de4f64f61dc9db4739ae414a
|
ae0408fbe442438cf3a569662eb64c9ca3e41fe4
|
/portafolios.py
|
f4a0c2c330f0e87b08060ee5128c8fbef3f36bbf
|
[] |
no_license
|
danilhramon/simulacion-Poryecto2
|
c9fa15bf38feae5d15cd282ed202dd4cbe3a2489
|
03479847ecec07dd6af9165ead0cd6bcd654f0f8
|
refs/heads/master
| 2020-12-24T19:04:28.887472
| 2016-04-11T21:53:31
| 2016-04-11T21:53:31
| 55,984,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,821
|
py
|
from pandas.io.data import DataReader
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
#"act1 = DataReader("GENTERA.MX", "yahoo", datetime(2015,1,1), datetime(2016,1,1))
#"act1=np.log(act1["Adj Close"])
#"act2 = DataReader("GFNORTEO.MX", "yahoo", datetime(2015,1,1), datetime(2016,1,1))
#"act2=np.log(act2["Adj Close"])
#"act3 = DataReader("GFREGIOO.MX", "yahoo", datetime(2015,1,1), datetime(2016,1,1))
#"act3=np.log(act3["Adj Close"])
#"act4 = DataReader("GRUMAB.MX", "yahoo", datetime(2015,1,1), datetime(2016,1,1))
#"act4=np.log(act4["Adj Close"])
#"act5 = DataReader("GSANBORB-1.MX", "yahoo", datetime(2015,1,1), datetime(2016,1,1))
#"act5=np.log(act5["Adj Close"])
#"activos=act1,act2,act3,act4,act5
#"activos=np.diff(activos,axis=1)
#"mean=np.mean(activos,axis=1)
#"cov=np.cov(activos)
mean=0.0002456,0.00023456
cov=[[0.000032,0.0000158619040471187],[0.0000158619040471187,0.00003145]]
def portafolios(mean,cov,numerPorta):
#generador de posibles portafolios. entrega las medias y las varianzas de los posibles portafolios
#vector de las medias de los activos del portafolio de sus ren ln
#matriz de varianzas y covarianzas de los rendimentos ln del portafolio
#numero de portafolios que se desea generar
numerPorta=10000
Zt=np.random.uniform(size=(numerPorta,2))
l=np.sum(Zt,axis=1)
for i in range(1,numerPorta):
Zt[i,]=Zt[i,]/l[i]
Zt=np.asmatrix(Zt)
mean=np.asmatrix(mean)
cov=np.asmatrix(cov)
portmean=Zt*mean.T
portvar=np.diag(np.sqrt(Zt*cov*Zt.T))
x,y=portvar,portmean
return(x,y)
ll = plt.plot(x,y,'bo')
plt.axis([0.0045, .006, 0.00023, .00025])
plt.xlabel('Desviación')
plt.ylabel('Rendimiento')
plt.title('Nuve de portafolios')
plt.show()
|
[
"danielhramon@hotmail.com"
] |
danielhramon@hotmail.com
|
53f9953fe16687b2ea912c67e71e5bb8b7141b59
|
97ee29c877e09388e34669d8a93a516d70ff8192
|
/tests/robocrys/test_query_operators.py
|
4f98da21900140c853389a9513dfd73464be3da8
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-hdf5",
"BSD-2-Clause"
] |
permissive
|
hhaoyan/api
|
fb4057c502c35e9495894e17d0b692efd4a2ac2b
|
e2dc71934baecd1a85621f7f7f6ff85f96cbd684
|
refs/heads/main
| 2023-06-26T05:18:38.225857
| 2021-07-23T16:22:35
| 2021-07-23T16:22:35
| 361,736,552
| 0
| 0
|
NOASSERTION
| 2021-04-26T12:10:02
| 2021-04-26T12:10:01
| null |
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
from mp_api.routes.robocrys.query_operators import RoboTextSearchQuery
from monty.tempfile import ScratchDir
from monty.serialization import loadfn, dumpfn
def test_robocrys_search_query():
op = RoboTextSearchQuery()
pipeline = [
{
"$search": {
"index": "description",
"regex": {
"query": ["cubic", "octahedra"],
"path": "description",
"allowAnalyzedField": True,
},
}
},
{
"$facet": {
"total_doc": [{"$count": "count"}],
"results": [
{
"$project": {
"_id": 0,
"task_id": 1,
"description": 1,
"condensed_structure": 1,
"last_updates": 1,
"search_score": {"$meta": "searchScore"},
}
}
],
}
},
{"$unwind": "$results"},
{"$unwind": "$total_doc"},
{
"$replaceRoot": {
"newRoot": {
"$mergeObjects": ["$results", {"total_doc": "$total_doc.count"}]
}
}
},
{"$sort": {"search_score": -1}},
{"$skip": 0},
{"$limit": 10},
]
assert op.query(keywords="cubic, octahedra", skip=0, limit=10) == {
"pipeline": pipeline
}
with ScratchDir("."):
dumpfn(op, "temp.json")
new_op = loadfn("temp.json")
assert new_op.query(keywords="cubic, octahedra", skip=0, limit=10) == {
"pipeline": pipeline
}
assert op.post_process([{"total_doc": 10}]) == [{"total_doc": 10}]
assert op.meta() == {"total_doc": 10}
|
[
"noreply@github.com"
] |
hhaoyan.noreply@github.com
|
3b048cb8842f249f36c5b37d58c96a3e9178f74e
|
659e3af28a29b68e997d5cabdfb6b3b6ade85b6d
|
/api/facebook.py
|
953e1436d57d34bf9421b25df87c08c20ab75cb6
|
[] |
no_license
|
siengsotheara/findmepitch
|
4696d90895428632cedb6be34a9a9c3e273ab23a
|
01c7a9d4b6c32fe773e417c60482cea737fc65a0
|
refs/heads/master
| 2022-12-11T22:29:06.241150
| 2017-09-10T15:40:02
| 2017-09-10T15:40:02
| 102,928,791
| 0
| 0
| null | 2022-12-08T00:37:47
| 2017-09-09T05:12:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
from flask import Flask, redirect, url_for, session, request
from flask_oauth import OAuth
import requests
import os
import sys
SECRET_KEY = 'development key'
DEBUG = False
FACEBOOK_APP_ID = '159985207843693'
FACEBOOK_APP_SECRET = '7b111fdeb43cf6889725a849013d387c'
app = Flask(__name__)
app.debug = DEBUG
app.secret_key = SECRET_KEY
oauth = OAuth()
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email'}
)
@app.route('/')
def index():
return 'Welcome to FindMePitch'
@app.route('/login')
def login():
return facebook.authorize(callback=url_for('facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True))
@app.route('/login/authorized')
@facebook.authorized_handler
def facebook_authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
#print (resp['access_token'], '')
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me?fields=id,name,email')
return 'Logged in as id=%s name=%s email=%s redirect=%s' % \
(me.data['id'], me.data['name'], me.data['email'], request.args.get('next'))
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run('0.0.0.0', port=port)
|
[
"sieng.sotheara.7@gmail.com"
] |
sieng.sotheara.7@gmail.com
|
78fe285f28b682aad7bc6b10277f25fcb6cddbb4
|
175f14c6f94614d8c8532ac3bf6a4dbc027cf48c
|
/app.py
|
9d0329e5d9d6db8d7bc95fa27f6e3d188b4b636a
|
[] |
no_license
|
iamavailable/uber_rides
|
7bc7016a02e411e5a4a8c0b84c1b8a18ec8fd3de
|
6d0b5771b510941b7ebb5ddcfef79c1998dcbd21
|
refs/heads/master
| 2021-07-12T20:02:07.875540
| 2020-02-16T02:27:26
| 2020-02-16T02:27:26
| 240,818,671
| 0
| 0
| null | 2021-03-20T02:56:00
| 2020-02-16T02:23:08
|
CSS
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import math
app = Flask(__name__)
model = pickle.load(open('taxi.pkl','rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0],2)
return render_template('index.html', prediction_text = "Number of Weekly Rides Should be {}".format(math.floor(output)))
if __name__ == '__main__':
app.run(debug=True)
|
[
"noreply@github.com"
] |
iamavailable.noreply@github.com
|
08bb02864efb4d8ef55a1ca6d6d165fbff28ec0a
|
8a5cb95a574f22a26f726d6bf4c3c4bc45993a03
|
/Server/db_firestore_admin.py
|
b4b9b98f964cd6ff95c28bbb24451add905a8739
|
[] |
no_license
|
dharamk/daas
|
43fa2996272332bbb523e9b95bbc3c4ff8da4a2d
|
30ca6dd3d606b0238e2b243fa60c3737343ccce2
|
refs/heads/master
| 2022-03-06T01:41:55.875501
| 2019-10-15T02:06:19
| 2019-10-15T02:06:19
| 206,477,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,097
|
py
|
#!/bin/python3
#!/bin/python3
import firebase_admin
from firebase_admin import auth
from firebase_admin import credentials
from firebase_admin import firestore
import db_interface
FIRESTORE_DB_TEST_AGENTS_COLLECTION = "testing_agents"
FIRESTORE_DB_PRODUCTION_AGENTS_COLLECTION = "production_agents"
FIRESTORE_DB_ALL_AGENTS_MAIN_DOC = "all_agents"
FIRESTORE_DB_ALL_AGENTS_INFO_DOC = "all_agents_info"
FIRESTORE_DB_ALL_AGENTS_SECURITY_DOC = "all_agents_secure_data"
FIRESTORE_DB_ONLINE_AGENTS_DOC = "online_agents"
FIRESTORE_DB_ONLINE_AGENTS_DATA_COLL = 'data'
FIRESTORE_DB_ALL_DEVICES_MAIN_DOC = "devices"
DEFAULT_SERVICE_ACCOUNT_JSON_NAME = "db_firebase_admin.json"
# Schema of document 'all_agents'
# {
# "metadata" : {
# "count": 500,
# "agent_ids":[]
# },
# }
ALL_AGENTS_MAIN_METADATA_KEY = 'metadata'
ALL_AGENTS_MAIN_AGENT_IDS = 'metadata.agent_ids'
ALL_AGENTS_MAIN_AGENT_COUNT = 'metadata.count'
ALL_AGENTS_SECURITY_SCHEME = 'security_scheme'
ALL_AGENTS_SECURITY_PRIV_KEY_ID = 'private_key_id'
ALL_AGENTS_SECURITY_PRIV_KEY = 'private_key'
ALL_AGENTS_INFO_AGENT_NAME = 'name'
ALL_AGENTS_INFO_AGENT_OWNER_ID = 'owner_id'
ALL_AGENTS_INFO_AGENT_CREATION_TIMESTAMP = 'created_at'
ALL_AGENTS_INFO_AGENT_SHARED_BY_USERS = "is_shared"
ONLINE_AGENTS_METADATA_KEY = 'metadata'
ONLINE_AGENTS_AGENT_COUNT = 'metadata.count'
ONLINE_AGENTS_AGENT_IDS = 'metadata.agent_ids'
ONLINE_AGENTS_DATA_AGENT_IP_ADDR = 'ip_address'
ONLINE_AGENTS_DATA_AGENT_PORT = 'port'
class FirestoreAdminClient(db_interface.AbstractDbClient):
def __init__(self, server_obj, root_collection=None, use_test=True):
"""
initialize and set-up the database client
"""
self.server = server_obj
if root_collection:
self.root_collection = root_collection
elif not use_test:
raise ValueError
else:
self.root_collection = FIRESTORE_DB_TEST_AGENTS_COLLECTION
from pathlib import Path
self.admin_creds = credentials.Certificate(
str(Path.cwd() / DEFAULT_SERVICE_ACCOUNT_JSON_NAME))
self.admin_app = firebase_admin.initialize_app(self.admin_creds)
self.client = firestore.client()
self.root_collection_ref = self.client.collection(self.root_collection)
self.all_agents = self.root_collection_ref.document(
FIRESTORE_DB_ALL_AGENTS_MAIN_DOC)
self.online_agents = self.root_collection_ref.document(
FIRESTORE_DB_ONLINE_AGENTS_DOC)
self.online_agents_data = self.online_agents.collection(
FIRESTORE_DB_ONLINE_AGENTS_DATA_COLL)
# self.online_agents_data = self.client.collection(
# self.root_collection, FIRESTORE_DB_ONLINE_AGENTS_DOC, FIRESTORE_DB_ONLINE_AGENTS_DATA_COLL)
self.all_agents_info = self.root_collection_ref.document(
FIRESTORE_DB_ALL_AGENTS_INFO_DOC)
# self.root_collection_ref.document.document(FIRESTORE_DB_ALL_AGENTS_SECURITY_DOC)
self.all_agents_security = self.root_collection_ref.document(
FIRESTORE_DB_ALL_AGENTS_SECURITY_DOC)
def _is_agent_id_in_use(self, aid):
"""
Checks if given Agent-id has already been registered(onboarded)
with the Server.
Args:
aid (str): Agent unique id
Returns:
Boolean : True if the agent-id is already registered with Server
else return False
"""
if not self.all_agents:
raise AttributeError
if not aid:
raise ValueError
doc = self.all_agents
array_agent_ids = doc.get([ALL_AGENTS_MAIN_AGENT_IDS]).get(
ALL_AGENTS_MAIN_AGENT_IDS)
if array_agent_ids and aid in array_agent_ids:
return True
return False
def _add_agent_main(self, aid):
if not aid or not self.all_agents:
raise ValueError
doc = self.all_agents
t = {}
t['count'] = 1
t['agent_ids'] = [aid]
snapshot = doc.get().to_dict()
if not snapshot:
doc.set({ALL_AGENTS_MAIN_METADATA_KEY: t})
else:
doc.update({ALL_AGENTS_MAIN_AGENT_COUNT: firestore.Increment(1)})
doc.update(
{ALL_AGENTS_MAIN_AGENT_IDS: firestore.ArrayUnion([aid])})
def _add_agent_security(self, aid, security_options=None):
if not aid or not self.all_agents_security:
raise ValueError
doc = self.all_agents_security
# TODO: enhance this once we figure out what all schemes we've to use
snapshot = doc.get()
if not snapshot.to_dict():
doc.set({aid: {ALL_AGENTS_SECURITY_SCHEME: None}})
else:
doc.update({aid: {ALL_AGENTS_SECURITY_SCHEME: None}})
def _delete_agent_main(self, aid):
if not aid or not self.all_agents:
raise ValueError
doc = self.all_agents
snapshot = doc.get()
print(snapshot.to_dict())
if not snapshot.to_dict():
print("agent_main: Document empty. aid:{}".format(aid))
return
array_ids = snapshot.get(ALL_AGENTS_MAIN_AGENT_IDS)
print(array_ids)
if array_ids and aid in array_ids:
doc.update({ALL_AGENTS_MAIN_AGENT_COUNT: firestore.Increment(-1)})
doc.update(
{ALL_AGENTS_MAIN_AGENT_IDS: firestore.ArrayRemove([aid])})
else:
print("Agent-id not found")
def _delete_agent_security(self, aid):
if not aid or not self.all_agents_security:
raise ValueError
doc = self.all_agents_security
snapshot = doc.get()
print(snapshot.to_dict())
if not snapshot.to_dict():
print("agent_security: Document empty. aid:{}".format(aid))
return
# agent_security = snapshot.
# print(agent_security)
doc.update({aid: firestore.DELETE_FIELD})
pass
def _add_agent_info(self, aid, name, uid, is_shared=True):
if not aid or not name or not uid:
raise ValueError
coll = self.all_agents_info.collection('data')
agent_doc = coll.document(aid)
t = {}
t[ALL_AGENTS_INFO_AGENT_NAME] = name
t[ALL_AGENTS_INFO_AGENT_CREATION_TIMESTAMP] = firestore.SERVER_TIMESTAMP
t[ALL_AGENTS_INFO_AGENT_SHARED_BY_USERS] = is_shared
t[ALL_AGENTS_INFO_AGENT_OWNER_ID] = uid
# this method should reset the full document.
# ideally this would be called once by the Server at very beginning.
agent_doc.set(t)
def _delete_agent_info(self, aid):
if not aid:
raise ValueError
coll = self.all_agents_info.collection('data')
agent_doc = coll.document(aid).delete()
def _add_agent_connection(self, aid, ip_addr, port):
if not aid:
raise ValueError
pdoc = self.online_agents
psnapshot = pdoc.get()
if not psnapshot.to_dict():
t = {}
t['count'] = 0
t['agent_ids'] = []
print("yes-parent-doc is empty")
pdoc.set({ONLINE_AGENTS_METADATA_KEY: t})
else:
# do-nothing if a new agent-connection info is received
# agent will update the 'connection-status' i.e. its live status
# once the agent-service is online. This function is called
# very first time the agent is created(and not ONLINE).
# so these fields will be udpated in a different flow.
pass
ag = {}
ag[ONLINE_AGENTS_DATA_AGENT_PORT] = port
ag[ONLINE_AGENTS_DATA_AGENT_IP_ADDR] = ip_addr
child_collection = self.online_agents_data
doc = child_collection.document(aid)
doc.set(ag)
pass
def _delete_agent_connection(self, aid):
if not aid:
raise ValueError
parent_doc = self.online_agents
p_snapshot = parent_doc.get()
if p_snapshot.to_dict():
online_agents = p_snapshot.get(ONLINE_AGENTS_AGENT_IDS)
if online_agents and aid in online_agents:
parent_doc.update(
{ONLINE_AGENTS_AGENT_IDS: firestore.ArrayRemove([aid])})
parent_doc.update(
{ONLINE_AGENTS_AGENT_COUNT: firestore.Increment(-1)})
else:
# do nothing if there is no live-information about this agent
# with server.
pass
doc = self.online_agents_data.document(aid)
snapshot = doc.get()
print(snapshot.to_dict())
if not snapshot.to_dict():
# .document(aid) creates the document by default if it does not exist.
# if it has been deleted in the past we have inadvertently created it
# so just-delete it - i.e. no return from this if-check
print("agent_connection: Document empty. aid:{}".format(aid))
self.online_agents_data.document(aid).delete()
pass
def _get_agent_by_name(self, name_args, owner_id='default'):
if not name_args:
return None
full_name = None
if isinstance(name_args, dict):
# name_prefix = name_json['prefix']
# name_token = name_json['token']
full_name = name_args['name']
elif isinstance(name_args, str):
full_name = name_args
coll = self.all_agents_info.collection('data')
docs = coll.where('name.name', '==', full_name).where(
'owner_id', '==', owner_id).stream()
count = 0
t = None
if docs:
for doc in docs:
if count == 1:
# if we ever have two documents(agents) with same full-name
# it would be a disaster - so report it.
raise ValueError
count += 1
# print(doc.get().to_dict())
t = dict()
t["aid"] = doc.id
return t
def _get_agent_by_id(self):
pass
def _get_agent_by_uri(self, ip_addr, port):
"""
Gets the list of Agent(s) matching with given ip-address and port.
There MUST be only one Agent with a given ip-address and port.
If port is null - return all the agents(list of agent-ids)
with given ip-address.
"""
if not ip_addr:
raise ValueError
coll = self.online_agents_data
ip_docs = coll.where('ip_address', '==', ip_addr)
if not ip_docs:
return None
t = None
# if port is not given, just fetch a list of all
# the agents with this IP-Address
if not port:
agents_list = list()
for doc in ip_docs.stream():
agents_list.append(doc.id)
t = dict()
t['aid'] = agents_list
return t
port_docs = ip_docs.where('port', '==', port).stream()
count = 0
if port:
if port_docs:
for doc in port_docs:
if count == 1:
# if we ever have two agents using same ip-address
# and same port then raise an exception.
raise ValueError
count += 1
t = dict()
t["aid"] = [doc.id]
return t
# Public Methods on Database class
def create_new_agent(self, aid, name, owner_id='default', ip_addr=None, port=None):
if not name or not aid:
raise ValueError
if self._is_agent_id_in_use(aid):
print("hello- agent is already available")
raise ValueError
self._add_agent_main(aid)
self._add_agent_security(aid)
self._add_agent_info(aid, name, owner_id)
self._add_agent_connection(aid, ip_addr, port)
pass
def delete_agent(self, aid):
if not aid:
raise ValueError
self._delete_agent_main(aid)
self._delete_agent_security(aid)
self._delete_agent_info(aid)
self._delete_agent_connection(aid)
pass
def connect(self):
"""
connects to Database Server
"""
pass
def disconnect(self):
"""
disconnects to Database server
"""
pass
def get_agent(self, **kargs):
# 'name'=<some-agent-name>
# 'aid'=<some-agent-id>
# 'ip_address'=<some-ip-address>
pass
def get_agent_connection_info(self, agent_id):
if not agent_id:
raise ValueError
agent_doc = self.online_agents_data.document(agent_id)
snapshot = agent_doc.get(['ip_address', 'port'])
return snapshot.to_dict()
def has_agent_id(self, aid):
return self._is_agent_id_in_use(aid)
def has_agent_name(self, agent_name, owner_id='default'):
if not agent_name:
raise ValueError
d = self._get_agent_by_name(agent_name, owner_id)
if not d:
print("agent by Name not found {}".format(agent_name))
return False
return True
def has_agent_ip_addr_and_port(self, ip_addr, port):
if not ip_addr:
raise ValueError
d = self._get_agent_by_uri(ip_addr, port)
if not d:
return False
return True
def get_agent_by_ipaddr_and_port(self, ip_addr, port):
if not ip_addr:
raise ValueError
d = self._get_agent_by_uri(ip_addr, port)
return d
def set_agent_ipaddr_port(self, aid, ip_addr, port):
if not aid:
raise ValueError
agent_doc = self.online_agents_data.document(aid)
agent_doc.update({
'ip_address': ip_addr,
'port': port,
})
def verify_token(self, token_id, current_user=None):
if not token_id or not current_user:
raise ValueError
try:
decoded_token = auth.verify_id_token(token_id, check_revoked=True)
uid = decoded_token['uid']
if current_user:
if uid is not current_user:
raise ValueError('Token does not belong to User')
except auth.RevokedIdTokenError:
# Token revoked, inform the user to reauthenticate or signOut()
# return status and whether it needs to sign-in again
return (False, True)
except auth.InvalidIdTokenError:
# Token is invalid
return {False, False}
return (True, False)
def update_device_status(self, device_id, device_dict=None, detected=False):
pass
def update_agent(self, agent_id, agent_dict):
sub_coll = self.agents.collection(agent_dict["aid"])
doc = sub_coll.document(agent_dict["aid"]).set(agent_dict)
pass
def update_agent_fields(self, agent_id, **kwargs):
if not agent_id:
raise ValueError
sub_coll = self.agents.collection(agent_id)
doc = sub_coll.document(agent_id)
for key, value in kwargs.items():
pass
def find_agent_by_ip_address(self, ip_address, port):
if not ip_address:
raise ValueError
all_agents = self.agents.collections().where('ip-address', '==', ip_address)
pass
def test_create_delete_agents(db):
db.create_new_agent("12567", "agent-name-12567",
"default", "192.168.1.15", 12121)
db.create_new_agent("12568", "agent-name-12568",
"default", "192.168.1.16", 12122)
db.delete_agent("12569")
# print(db._get_agent_by_name("agent-name-12568").to_dict())
t = db._get_agent_by_name("agent-name-12568")
print("agent-details for id: {} => {}".format("agent-name-12568", t))
t = db._get_agent_by_name("agent-name-12569")
print("agent-details for id: {} => {}".format("agent-name-12569", t))
t = db._get_agent_by_name("agent-name-12567")
print("agent-details for id: {} => {}".format("agent-name-12567", t))
# db.delete_agent("12567")
# db.delete_agent("12568")
# db.delete_agent("12568")
if __name__ == '__main__':
agent_obj = 'dummy'
db = FirestoreAdminClient(
agent_obj, root_collection="foo_testing")
db.create_new_agent(
"asdfaksdfkasdf", "agent-name-asdfad12567", "default", "192.168.1.15", 12121)
# test_create_delete_agents(db)
# db.create_new_agent("12569", "agent-name-12569", "default", "192.168.1.17", 12121)
# agent_dict = {"aid": "123456",
# "name": "agent-dummy-name-1", "host": "ubuntu"}
# gcp_db.update_agent(agent_dict["aid"], agent_dict)
# agent_dict = {"aid": "123455",
# "name": "agent-dummy-name-2", "host": "ubuntu"}
# gcp_db.update_agent(agent_dict["aid"], agent_dict)
# agent_dict = {"aid": "123458",
# "name": "agent-dummy-name-3", "host": "ubuntu"}
# gcp_db.update_agent(agent_dict["aid"], agent_dict)
|
[
"dharam.kumar.gupta@gmail.com"
] |
dharam.kumar.gupta@gmail.com
|
010bbeb1cf60a81058716701fda29d14b538f0c1
|
0d70e49189a95454ef22f1a21cbac02899e8accd
|
/controller/users.py
|
d00ac6fec9ba6b31f83d7faa0a48ad22fb07f7e1
|
[] |
no_license
|
EmilieM71/Projet-5-startup-Pur-Beurre
|
dd6ab2267333521a439b1cc8fdd0e9fa64487449
|
105e7080adfb14fb69b9b404cfda6621d5b7e8ec
|
refs/heads/master
| 2021-07-19T09:18:35.434100
| 2019-10-22T10:05:05
| 2019-10-22T10:05:05
| 197,351,182
| 0
| 1
| null | 2020-07-21T17:32:01
| 2019-07-17T08:46:27
|
Python
|
UTF-8
|
Python
| false
| false
| 738
|
py
|
from view.view_home_connect import ViewHomeConnect
class User:
""" This class is responsible for :
- display a hom window with welcome message and ask the user to
log in """
def __init__(self):
pass
@staticmethod
def open_home_connect_window():
"""This feature allows the home window to be displayed"""
# Opening the home connect window
view_home_connect = ViewHomeConnect()
view_home_connect.window.mainloop()
def log_in(self):
# ouverture fenetre avec formulaire pour se connecter
# rechercher si les identifiants saisie existe
pass
def create_an_account(self):
# ouverture fenetre de creation de compte
pass
|
[
"martelemilie@hotmail.fr"
] |
martelemilie@hotmail.fr
|
6e0e8afd8e60cd83815c5e451171d6fc4ca0eaf1
|
9edb6c478c1c9769c989276834a9fb6aa32dbea9
|
/exercise/lesson7-4-prime.py
|
6ce90ad3701d65fce634105628140e948cfd3098
|
[] |
no_license
|
dsuz/techacademy-python3
|
67cbfeb3ee0803ee3de5e3470e1f647728686ca8
|
8d6f026002e0732a0819e5dadd1abe5efec042c1
|
refs/heads/master
| 2020-03-23T23:58:16.397310
| 2019-03-19T01:57:49
| 2019-03-19T01:57:49
| 142,266,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
# pattern 1: 課題の仕様の条件をそのまま実装するやり方
# ただし、100 までなら問題ないが、このやり方では素数を判定する範囲が大きくなると正しい結果を得られない
print('pattern 1')
for i in range(2, 101):
if i != 2 and i % 2 == 0:
continue
if i != 3 and i % 3 == 0:
continue
if i != 5 and i % 5 == 0:
continue
if i != 7 and i % 7 == 0:
continue
print(i)
# pattern 2: Python の特徴に沿って短く書くやり方。ただし、計算には無駄が多い。
print('pattern 2')
for n in range(2, 101):
for p in range(2, n):
if n % p == 0:
break
else:
print(n)
# pattern 3: 奇数のみに対して判定処理を行う
# 計算の回数が少なくて済む
print('pattern 3')
for n in range(1, 101):
if n == 2: # 2は自明な素数として処理する
print(n)
elif n % 2 != 0 and n > 2: # 3以上の奇数に対して素数かどうか判定する
odd = 3 # odd は奇数
while odd**2 <= n:
if n % odd == 0:
break
odd += 2 # odd を次の奇数にする
else:
print(n)
|
[
"bboydaisuke@gmail.com"
] |
bboydaisuke@gmail.com
|
be5d3eb119948666460f7f37aaa237d499484b27
|
5451676b68c2c85bcfef46c23f596300cef137c4
|
/_source/conf.py
|
69b13df9efd7f0ed2a1343aa91094c8080c31104
|
[] |
no_license
|
telnetning/sphinx-notebook
|
16d19ed1ca7080eb773c7916433bbd7cb3b06a5c
|
32de101a4371b5f9dd0ed15a689243cbd70dc26a
|
refs/heads/master
| 2020-03-19T11:37:17.951625
| 2018-06-28T15:15:43
| 2018-06-28T15:15:43
| 136,445,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,504
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'telnetning\' Notebook'
copyright = '2018, telnetning'
author = 'telnetning'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'testdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'papersize':'a4paper',# The font size ('10pt', '11pt' or '12pt').
'pointsize':'12pt','classoptions':',oneside','babel':'',#必須
'inputenc':'',#必須
'utf8extra':'',#必須
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{xeCJK}
\usepackage{indentfirst}
\setlength{\parindent}{2em}
\setCJKmainfont{WenQuanYi Micro Hei}
\setCJKmonofont[Scale=0.9]{WenQuanYi Micro Hei Mono}
\setCJKfamilyfont{song}{WenQuanYi Micro Hei}
\setCJKfamilyfont{sf}{WenQuanYi Micro Hei}
\XeTeXlinebreaklocale "zh"
\XeTeXlinebreakskip = 0pt plus 1pt
"""}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'test.tex', 'test Documentation',
'telnetning', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'test', 'test Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'test', 'test Documentation',
author, 'test', 'One line description of project.',
'Miscellaneous'),
]
|
[
"telnetning@gmail.com"
] |
telnetning@gmail.com
|
0489440d43a55fc3a209170cab154c65853eb696
|
c2a46158a91d3dd41e962230d182c80bfc88886e
|
/test/test_contacts_api.py
|
d2b476ce812d2c52031cb34f82bc33c081bc6e71
|
[] |
no_license
|
martinsauve/doli-swagger-client-python
|
e5f4308b6a38c34c4c0bcc796f6863e983b6d7da
|
b2606e6f9f3064fe55f81ab90ec524921086a159
|
refs/heads/master
| 2023-08-14T00:04:02.228383
| 2021-10-06T15:34:02
| 2021-10-06T15:34:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
# coding: utf-8
"""
Restler API Explorer
Live API Documentation # noqa: E501
OpenAPI spec version: 1
Contact: arul@luracast.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.contacts_api import ContactsApi # noqa: E501
from swagger_client.rest import ApiException
class TestContactsApi(unittest.TestCase):
"""ContactsApi unit test stubs"""
def setUp(self):
self.api = ContactsApi() # noqa: E501
def tearDown(self):
pass
def test_contacts_add_category(self):
"""Test case for contacts_add_category
Add a category to a contact 🔐 # noqa: E501
"""
pass
def test_contacts_create_user(self):
"""Test case for contacts_create_user
Create an user account object from contact (external user) 🔐 # noqa: E501
"""
pass
def test_contacts_remove_category(self):
"""Test case for contacts_remove_category
Remove the link between a category and a contact 🔐 # noqa: E501
"""
pass
def test_contacts_retrieve_by_email(self):
"""Test case for contacts_retrieve_by_email
Get properties of a contact object by Email 🔐 # noqa: E501
"""
pass
def test_contacts_retrieve_categories(self):
"""Test case for contacts_retrieve_categories
Get categories for a contact 🔐 # noqa: E501
"""
pass
def test_create_contacts(self):
"""Test case for create_contacts
Create contact object 🔐 # noqa: E501
"""
pass
def test_list_contacts(self):
"""Test case for list_contacts
List contacts 🔐 # noqa: E501
"""
pass
def test_remove_contacts(self):
"""Test case for remove_contacts
Delete contact 🔐 # noqa: E501
"""
pass
def test_retrieve_contacts(self):
"""Test case for retrieve_contacts
Get properties of a contact object 🔐 # noqa: E501
"""
pass
def test_update_contacts(self):
"""Test case for update_contacts
Update contact 🔐 # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"martin@billesandco.paris"
] |
martin@billesandco.paris
|
65bfbb32587539be33f925373460965abd890331
|
814420c64f39265378f414e32dffcd7fdec32a6e
|
/lab/util.py
|
308f6e2387468b8d40576a97e1516b0fb78edade
|
[
"MIT"
] |
permissive
|
Lagikna/QuLab
|
02210b35b830b25735a00197827b5d0daa351685
|
57d3e52feced73dcf2466e8582a5c2f32e0266bc
|
refs/heads/master
| 2020-03-18T19:44:22.785660
| 2018-12-13T07:26:11
| 2018-12-13T07:26:11
| 135,174,561
| 1
| 3
|
MIT
| 2019-12-16T06:10:19
| 2018-05-28T14:49:59
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
# -*- coding: utf-8 -*-
from qulab.util import (FWHM_of_normal_distribution, Std_of_norm_from_FWHM,
WTodBm, dBmToW, get_probility, get_projection_axes,
get_threshold_visibility, get_unit_prefix, kurtosis,
project_data_into_axes, skew, step_t, step_t_finite,
step_t_finite_overshoot, threshold)
|
[
"xfh1987@gmail.com"
] |
xfh1987@gmail.com
|
0bccfa94dce8e7de2c0cba1832bf7c7f84cb96d7
|
6228aec0cb85f6ca24ebb746adc5fddedfd11037
|
/pinhole/common/tests/test_utils.py
|
b5d2ceb62148b526a99f0f7f00ac4e533c9cb111
|
[] |
no_license
|
freyes/pinhole
|
dbadada04f569623de06dd426eb7e27cb15667c5
|
bb42647e7c20aed37e5288636285beaca9cce0bf
|
refs/heads/master
| 2016-09-05T10:42:05.485693
| 2014-01-20T22:16:13
| 2014-01-20T22:16:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from __future__ import absolute_import
from nose.tools import assert_equal
from ..utils import convert
class TestConvert(object):
def test(self):
l = [('CamelCase', 'camel_case'),
('CamelCamelCase', 'camel_camel_case'),
('Camel2Camel2Case', 'camel2_camel2_case'),
('getHTTPResponseCode', 'get_http_response_code'),
('get2HTTPResponseCode', 'get2_http_response_code'),
('HTTPResponseCode', 'http_response_code'),
('HTTPResponseCodeXYZ', 'http_response_code_xyz')]
for a, b in l:
yield self.check, a, b
def check(self, value, expected):
assert_equal(convert(value), expected)
|
[
"freyes@tty.cl"
] |
freyes@tty.cl
|
a6541180d6c98c1e419752242e46052594b5af0a
|
7ce62f37c121499e630d4c74dd4e150701793c65
|
/project2/nothotdog/nothotdog/asgi.py
|
70f68e0fe4bed76b1e771d5009406b7a770661a7
|
[] |
no_license
|
jesper-trell/trell-hell
|
8c0ebf889ebccb4cb0d87d461dc33217d4e521e5
|
dc3fabeb7e9367ec2f6a4b4d7069f8b29bebac28
|
refs/heads/master
| 2023-03-16T13:15:09.595108
| 2021-03-09T21:20:27
| 2021-03-09T21:20:27
| 330,919,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for nothotdog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nothotdog.settings')
application = get_asgi_application()
|
[
"jesper@trell.se"
] |
jesper@trell.se
|
ad69431da5f2601f25b6cd966f3faee816fe0fad
|
8496c77dc1bb820d9b28d703697257cc508320bc
|
/experiments/predict.py
|
10de34998b7c799065018f59e3b93ee992ed8e48
|
[
"MIT"
] |
permissive
|
TomWattsChem/MTExplainer
|
bec980894328c094bcc6ee961a247939c14952a4
|
3018ca14dd0aa538e868505939826a89c9c57aa2
|
refs/heads/master
| 2023-06-17T01:29:02.629119
| 2021-07-16T11:40:20
| 2021-07-16T11:40:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from __future__ import print_function
from rdkit import Chem
smiles='B.C1CCOC1.CC(C)(C)C1=CCCc2occc21'
def smi_tokenizer(smi):
"""
Tokenize a SMILES molecule or reaction
"""
import re
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
assert smi == ''.join(tokens)
return ' '.join(tokens)
m = Chem.MolToSmiles(Chem.MolFromSmiles(smiles))
print(smi_tokenizer(m))
|
[
"davkovacs97@gmail.com"
] |
davkovacs97@gmail.com
|
9e3be7ab50d312c9e78cd72d21c20c101af1d795
|
99bf56586dbc1c9227b3edbbac49c1ef60ce1a4d
|
/const.py
|
4f6e16d3201ac501b96fad2f069dabea792d3737
|
[] |
no_license
|
StarryYJ/optimizer
|
9d8500adf751ed8b7ab4eb9604ebe6d85a0e326f
|
f006d6a6f62c405752f3b6cf156839bcf0b40a8a
|
refs/heads/master
| 2023-04-13T16:11:15.237702
| 2021-04-22T03:22:22
| 2021-04-22T03:22:22
| 357,015,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
"""
CovModel 协方差模型:
FACTOR_MODEL_DAILY 日度协方差模型
FACTOR_MODEL_MONTHLY 月度协方差模型
FACTOR_MODEL_QUARTERLY 季度协方差模型
IndustryClassification 行业分类标准:
SWS 申万一级
ZX 中信一级
SWS_1 申万一级,对非银金融进一步细分
风格因子:
beta 贝塔
book_to_price 账面市值比
earnings_yield 盈利率
growth 成长性
leverage 杠杆率
liquidity 流动性
momentum 动量
non_linear_size 非线性市值
residual_volatility 残余波动率
size 市值
"""
from enum import Enum
class CovModel(Enum):
FACTOR_MODEL_DAILY = 'factor_model/daily'
FACTOR_MODEL_MONTHLY = 'factor_model/monthly'
FACTOR_MODEL_QUARTERLY = 'factor_model/quarterly'
class IndustryClassification(Enum):
"""行业分类标准"""
SWS = 'shenwan' # 申万一级
ZX = 'zhongxin' # 中信一级
SWS_1 = 'shenwan_non_bank_financial_breakdown' # 申万一级,对非银金融进一步细分
STYLE_FACTORS = [
'beta', 'book_to_price', 'earnings_yield', 'growth', 'leverage', 'liquidity', 'momentum',
'non_linear_size', 'residual_volatility', 'size'
]
|
[
"yjin23@stevens.edu"
] |
yjin23@stevens.edu
|
34291b2488193e1a2f4ab57818aa28ba397c7b4c
|
96a23bee9d6948c652fd8697f9b37c83a5dd676f
|
/core/migrations/0006_auto_20170506_1307.py
|
9136534c6e38ea6925ad98e91a908a3151adb07b
|
[] |
no_license
|
Lok-tar-ogar/golden
|
3a115e5722badefc32e56e7507e051b1d2c5c760
|
b7aafd797897fd96bcfa46001a0054403fbfd0c2
|
refs/heads/master
| 2021-01-19T12:09:21.227211
| 2020-12-29T03:23:30
| 2020-12-29T03:23:30
| 88,020,785
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-06 05:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_contact'),
]
operations = [
migrations.RemoveField(
model_name='facility',
name='content',
),
migrations.AddField(
model_name='facility',
name='madefac',
field=models.CharField(max_length=50, null=True, verbose_name='制作厂商'),
),
migrations.AddField(
model_name='facility',
name='num',
field=models.CharField(max_length=50, null=True, verbose_name='数量'),
),
migrations.AddField(
model_name='facility',
name='para',
field=models.CharField(max_length=100, null=True, verbose_name='参数'),
),
migrations.AddField(
model_name='facility',
name='unit',
field=models.CharField(max_length=50, null=True, verbose_name='单位'),
),
migrations.AddField(
model_name='facility',
name='usage',
field=models.CharField(max_length=50, null=True, verbose_name='用途'),
),
]
|
[
"418586403@qq.com"
] |
418586403@qq.com
|
9b7d589c2fd574f22bdb413a8c7ffd98650369b2
|
d2a89523297d8973c4e68f59b5c7fb32e224c4b1
|
/cloud_sender.py
|
6f2779ea765285b0323fc72c9cb593823b22fdd4
|
[] |
no_license
|
IoBeer/brewbot-pi
|
8acdb8f2e782cd838529786860c2cb5f02145951
|
cf17393f724e649ad057eb9e6c17700d54dcd093
|
refs/heads/master
| 2021-01-20T07:18:07.166916
| 2017-05-02T04:09:35
| 2017-05-02T04:09:35
| 89,988,276
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,814
|
py
|
import dbus.mainloop.glib;dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
from gi.repository import GObject
import paho.mqtt.client as mqtt
from google.cloud import pubsub
from commons import Commons
import sqlite3 as lite
from network_status import NetworkStatus
import NetworkManager
import json
import requests
from threading import Thread
import ConfigParser
__devices = {}
__d_args = ('sender', 'destination', 'interface', 'member', 'path')
__d_args = dict([(x + '_keyword', 'd_' + x) for x in __d_args])
TEMP_TOPIC = "smarthomebrew/sensor/temperatura"
SERIAL_NUMBER = Commons.getserial()
__network_status = NetworkStatus()
__db_con = None
__db_cur = None
__topic = None
def on_connect(client, data, rc):
client.subscribe([(TEMP_TOPIC, 0)])
def on_message(client, userdata, msg):
if __network_status.is_connected():
publish_temp_cloud(msg.payload)
else:
save_temperature(msg.payload, False)
def save_temperature(msg, sent):
global __db_con
global __db_cur
msg_data = msg.split("#")
__db_cur.execute('INSERT INTO temperatures (timestamp, temp, sent) VALUES(?, ?, ?)',
(msg_data[0], msg_data[1], sent))
__db_con.commit()
def publish_temp_cloud(msg):
global __topic
# Data must be a bytestring
cloud_msg = SERIAL_NUMBER + "#" + msg
cloud_msg.encode('utf-8')
message_id = __topic.publish(cloud_msg)
save_temperature(msg, True)
print('Message {} published.'.format(message_id))
def device_add_remove(*args, **kwargs):
global __d_args
global __devices
msg = kwargs['d_member']
if msg == "DeviceAdded":
# Argument will be the device, which we want to monitor now
args[0].connect_to_signal('StateChanged', device_state_change, **__d_args)
return
if msg == "DeviceRemoved":
if args[0].object_path in __devices:
del args[0].object_path
def device_state_change(*args, **kwargs):
global __devices
global __network_status
msg = kwargs['d_member']
path = kwargs['d_path']
device = NetworkManager.Device(path)
newState = NetworkManager.const('device_state', args[0])
try:
if device.DeviceType == NetworkManager.NM_DEVICE_TYPE_ETHERNET:
connectionType = "Ethernet"
elif device.DeviceType == NetworkManager.NM_DEVICE_TYPE_WIFI:
connectionType = "Wifi"
except:
# D-Bus likely doesn't know about the device any longer,
# this is typically a removable Wifi stick
path = kwargs['d_path']
if path in __devices:
connectionType = __devices[path]["type"]
if newState == "activated":
path = kwargs['d_path']
__devices[path] = {"type": connectionType,
"active": True}
if connectionType == "Ethernet":
__network_status.ethernet = True
if connectionType == "Wifi":
__network_status.wifi = True
send_unsent_batch()
else:
if connectionType == "Ethernet":
__network_status.ethernet = False
if connectionType == "Wifi":
__network_status.wifi = False
def send_unsent_batch():
global __db_con
global __db_cur
try:
__db_con.row_factory = lite.Row
__db_cur = __db_con.cursor()
rows = __db_cur.execute("SELECT timestamp, temp AS temperature FROM temperatures WHERE sent = 0").fetchall()
json_history = json.dumps([dict(ix) for ix in rows])
req = requests.post("<YOUR_FIREBASE_FUNCTIONS_ENDPOINT>/batchTemperatureUpdate",
headers={'content-type': 'application/json', 'x-serial': '%s'} % Commons.getserial(),
data=json_history)
if req.status_code == requests.codes.ok:
__db_cur.execute("UPDATE temperatures SET sent = 1 WHERE sent = 0")
__db_con.commit()
else:
print("Erro ao enviar batch")
except lite.Error, e:
print "Error %s:" % e.args[0]
def send_fermentation_settings():
config = ConfigParser.RawConfigParser()
try:
config.read('fermentation.properties')
except ConfigParser.Error:
print("Error opening fermentation.properties. Missing?")
settings_data = json.dumps({Commons.getserial() : dict(config.items('Settings'))})
req = requests.post("<YOUR_FIREBASE_FUNCTIONS_ENDPOINT>/fermentationData",
headers={'content-type': 'application/json'},
data=settings_data)
if req.status_code == requests.codes.ok:
print "Fermentation settings sent!"
else:
print("Error sending fermentation data :(")
def start_sender():
try:
global __network_status
global __db_con
global __db_cur
global __topic
global __devices
global __network_status
################################################################################################################
# database connection #
################################################################################################################
__db_con = lite.connect('fermentation.db')
__db_cur = __db_con.cursor()
################################################################################################################
# Network d-bus communitcation #
################################################################################################################
NetworkManager.NetworkManager.connect_to_signal('DeviceAdded', device_add_remove, **__d_args)
NetworkManager.NetworkManager.connect_to_signal('DeviceRemoved', device_add_remove, **__d_args)
for dev in NetworkManager.NetworkManager.GetDevices():
print("DEVICE!!")
dev.connect_to_signal('StateChanged', device_state_change, **__d_args)
__devices[dev.object_path] = {}
if dev.DeviceType == NetworkManager.NM_DEVICE_TYPE_ETHERNET and \
NetworkManager.const('device_state', dev.State) == "activated":
__devices[dev.object_path]["active"] = True
__devices[dev.object_path]["type"] = "Ethernet"
__network_status.ethernet = True
if dev.DeviceType == NetworkManager.NM_DEVICE_TYPE_WIFI and \
NetworkManager.const('device_state', dev.State) == "activated":
__devices[dev.object_path]["active"] = True
__devices[dev.object_path]["type"] = "Wifi"
__network_status.wifi = True
print "Starting network manager thread"
thread = Thread(target = start_network_manager_loop)
thread.start()
################################################################################################################
# Send fermentation settings #
################################################################################################################
settings_thread = Thread(target=send_fermentation_settings)
settings_thread.start()
################################################################################################################
# PubSub Cloud topic setup #
################################################################################################################
print "Starting cloud connection"
pubsub_client = pubsub.Client()
__topic = pubsub_client.topic('temperature')
################################################################################################################
# Local MQTT server #
################################################################################################################
print "Starting local MQTT connection"
client = mqtt.Client(client_id='PUBSUB',
protocol=mqtt.MQTTv31)
client.on_connect = on_connect
client.on_message = on_message
client.connect("127.0.0.1", 1883)
client.loop_forever()
except KeyboardInterrupt:
print("Pressed CTRL+C! :(")
finally:
if __db_con:
__db_con.close()
def start_network_manager_loop():
print "Starting network manager loop"
loop = GObject.MainLoop()
loop.run()
|
[
"netomarin@google.com"
] |
netomarin@google.com
|
2720a33f422a856681d8a8db81aaea996e93161a
|
52911a335c2c0d156987f18ded9d1e6d71b59eb5
|
/src/testspislave/test_rpigpioaccess.py
|
8f1f6b317159bdabbef985f2fc03a7772b9091ec
|
[
"MIT"
] |
permissive
|
anetczuk/SpiSlave
|
140343e533357f3a599c442163dfc18971cfb3f5
|
030cadf5cde8e71a9530e1e549854d60c4a8b3b3
|
refs/heads/master
| 2021-01-24T00:44:23.762501
| 2018-02-24T22:47:15
| 2018-02-24T22:47:15
| 122,780,155
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
# MIT License
#
# Copyright (c) 2017 Arkadiusz Netczuk <dev.arnet@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import unittest
imported_gpio = False
try:
from spislave.rpigpioaccess import RPiGPIOAccess
imported_gpio = True
except ImportError:
## ignore error
pass
#__scriptdir__ = os.path.dirname(os.path.realpath(__file__))
# logging.basicConfig(level=logging.INFO)
##
## Hardware layer -- no need to be tested
##
class RPiGPIOAccessTest(unittest.TestCase):
def setUp(self):
# Called before execution of each testfunction
pass
def test_instantiate(self):
try:
RPiGPIOAccess()
self.assertTrue( imported_gpio )
except NameError:
## happens when could not import GPIO access module
self.assertFalse( imported_gpio )
if __name__ == "__main__":
unittest.main()
|
[
"anetczuk@o2.pl"
] |
anetczuk@o2.pl
|
43011224b64a05abed58be208483403e34ffb0cb
|
51640458ffd9583a2c4839e1a3d94cac17a25692
|
/modes/display_mode/menu_top.py
|
eee544c100a871cc7ad54a50d170963b338c06b9
|
[
"MIT"
] |
permissive
|
ZhengPeng7/HCI_lite
|
eb67efceb0de2a5932eb24ebbecfd67089eb1bf0
|
9fff182d85ff5910dcfa9f6d8d8d7bcde38caa49
|
refs/heads/master
| 2020-03-16T15:36:23.541154
| 2018-06-27T13:20:42
| 2018-06-27T13:20:42
| 132,751,040
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
import cv2
import numpy as np
def attach_menu(frame_bg_with_menu, menu_dict, icon_len_side=80):
"""
Description: Attach menu on the top of frame_bg.
To choose some function, just move your hand over the corresponding icon.
Params:
frame_bg_with_menu: Input from web camera.
menu_dict: Icons to be attached on the top of a frame_bg_with_menu,
which consists of color panel, thickness regulator, and some icons with other speical usages.
icon_len_side: The length of icon side.
"""
for idx, icon in enumerate(menu_dict):
val = menu_dict[icon]
frame_bg_with_menu[
:icon_len_side, idx*icon_len_side:(idx+1)*icon_len_side
] = val if isinstance(val, tuple) else cv2.imread(val)
return frame_bg_with_menu
|
[
"15732115701@163.com"
] |
15732115701@163.com
|
22aeb9101445feebf84676bdca207c9de219fff3
|
3507377718c6f0f04a57ca400c370d2434f057e7
|
/qe2e/tests/test_case.py
|
86164a3911fef3277d2306c7a29c3921522d3f06
|
[
"MIT"
] |
permissive
|
joshmarlow/qe2e
|
6092cc4ae2f2a30f60d30f1bd2b4fa2bf48063f0
|
2980dcd296a11c531ef7895c4cb35d9d9dd5c600
|
refs/heads/main
| 2023-04-17T01:38:11.323265
| 2021-04-24T20:57:50
| 2021-04-24T20:57:50
| 361,186,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,248
|
py
|
import unittest
from unittest import mock
import requests
from qe2e.core import AssertContains, AssertEq, Case, Exec, GetUrl, RunState
class TestCase(unittest.TestCase):
def setUp(self):
self.case = Case(
name="Login screen",
tags=["no-auth"],
steps=[
Exec(command="ls -a"),
GetUrl(
response_name="login_response",
url="localhost:8000/login",
),
AssertEq(
actual="login_response.status_code",
expected=200,
),
AssertEq(
actual="login_response.html.title",
expected="Login to continue",
),
AssertContains(
container="login_response.html.content",
content="You really want to login",
),
],
)
@mock.patch.object(requests, "get")
def test_execute(self, mock_get):
mock_get.return_value = mock.Mock(
status_code=200,
content="""
<html>
<title>Login to continue</title>
<body>
You really want to login
</body>
</html>
""",
)
expected_run_state: RunState = {
0: {"success": True},
1: {"success": True},
2: {"success": True},
3: {"success": True},
4: {"success": True},
"login_response": {
"html": {
"content": "\n"
"\n"
"Login to continue\n"
"\n"
" You really want "
"to login\n"
" \n"
"\n",
"title": "Login to continue",
},
"status_code": 200,
},
}
actual_run_result = self.case.evaluate()
assert actual_run_result == expected_run_state
def test_load(self):
json = {
"name": "Login screen",
"steps": [
{
"type": "exec",
"command": "ls -a",
},
{
"type": "get_url",
"url": "localhost:8000/login",
"response_name": "login_response",
},
{
"type": "assert_eq",
"actual": "login_response.status_code",
"expected": 200,
},
{
"type": "assert_eq",
"actual": "login_response.html.title",
"expected": "Login to continue",
},
{
"type": "assert_contains",
"container": "login_response.html.content",
"content": "You really want to login",
},
],
"tags": ["no-auth"],
}
actual_case = Case.from_dict(json)
assert actual_case == self.case
|
[
"joshmarlow@gmail.com"
] |
joshmarlow@gmail.com
|
f8c77cb5d3a65a103000fec35f91f539b362a2ca
|
050586709c1b728a31e6d273553fab925791e5c8
|
/test.py
|
ad416f1b26d3ee67fd2ce0bfdc66e219718b5efb
|
[] |
no_license
|
SuphakornHomnan/image_processing_hw1
|
745a982153c5980dfd2285a8d1d9d796d19e1471
|
b47a2a00c0de5caee84012ff32823daef212a40c
|
refs/heads/master
| 2021-01-26T13:33:51.913721
| 2020-02-26T17:51:46
| 2020-02-26T17:51:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
for i in range(255):
print(chr(i))
|
[
"kotokikung@gmail.com"
] |
kotokikung@gmail.com
|
fac03fb7685e4cadaa0707684ffa3ab42dc2eb53
|
00d4b3a28e7638ac7e5767e8ce1fef5c339a194e
|
/models/AttrNet.py
|
5efaf898e03bb0bf1060e0fe5e473022b6317c20
|
[] |
no_license
|
peternara/FashionPredictor-roi-pooling
|
a1b957975deeaf188bba2bb418a0b26247d0aacf
|
7a6aa7b3b957b99eba910ff393aa0e1137b4c916
|
refs/heads/master
| 2021-09-26T10:02:07.354728
| 2018-10-28T23:36:14
| 2018-10-28T23:36:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,531
|
py
|
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from config import cfg
from layer_utils.Pool import Pooling
import numpy as np
class AttrNet(nn.Module):
def __init__(self, num_classes=88, init_weights=True):
super(AttrNet, self).__init__()
# self.rois = rois
# the first 4 shared conv layers
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
# the first branch-- global image enter the 5th conv layer and fc1
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1,affine=True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc1 = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(True),
nn.Dropout(),
)
self.fc2 = nn.Sequential(
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
)
self.second_branch_fc1 = nn.Sequential(
nn.Linear(512*7*7, 512), #tbd
nn.ReLU(True),
nn.Dropout(),
)
self.second_branch_fc2 = nn.Sequential(
nn.Linear(11264, 4096),
nn.ReLU(True),
nn.Dropout(),
)
self.fc3_fine_tune = nn.Linear(8192, cfg.num_classes)
# forward model, if single = True, forward a single image
# if single = False, forward batch
def forward(self, x, u, v):
# share first 4 conv layers
x = self.conv1(x) # 112
x = self.conv2(x) # 56
x = self.conv3(x) # 28
x = self.conv4(x) # 14
# first branch-- continue to enter 5th conv layer
first_branch_conv = self.conv5(x)
first_branch_conv = first_branch_conv.view(first_branch_conv.size(0), -1)
first_branch_out = self.fc1(first_branch_conv) # 4096D
first_branch_out = self.fc2(first_branch_out) # 4096D
pool_out = Pooling(x, u,v, self.second_branch_fc1, self.second_branch_fc2,first_branch_out) #4096D
# concat the output from the first and the second branch
both_branch = torch.cat((first_branch_out, pool_out), 1) # 8192D
output = self.fc3_fine_tune(both_branch)
# for attribute prediction: return output
# for image retrieval: return both_branch
#return both_branch
return output
def initialize_weights(layers):
if isinstance(layers, nn.Linear):
nn.init.normal(layers.weight, 0, 0.01)
nn.init.constant(layers.bias, 0)
else:
for m in layers:
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.weight, 1)
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal(m.weight, 0, 0.01)
nn.init.constant(m.bias, 0)
def build_network():
pretrained_weights = torch.load(cfg.VGG16_Weights)
pretrained_list = list(pretrained_weights.items())
my_model = AttrNet( num_classes=cfg.num_classes)
my_model_kvpair = my_model.state_dict()
# load ImageNet-trained vgg16_bn weights
count = 0
# load all conv layers (conv1- conv5) and fc1 from pretrained ImageNet weights(79 parameters in total)
for key, value in my_model_kvpair.items():
if count < 82: # this is for vgg16 pretrained weights
my_model_kvpair[key] = pretrained_list[count]
count+=1
# initialize fc2,fc3 and second_branch fc
initialize_weights(my_model.second_branch_fc1)
initialize_weights(my_model.second_branch_fc2)
initialize_weights(my_model.fc3_fine_tune)
return my_model
|
[
"zwliu.hust@gmail.com"
] |
zwliu.hust@gmail.com
|
dafcbc06fa2fa0b0b7683913a3764286f52d34cf
|
25a4ea33eefc1840c730c9d99a03f56c8fe841b3
|
/nre.py
|
02c51165ecc83ae8a8d669cbd53241d114da845d
|
[] |
no_license
|
mtfrigo/DL-NLU
|
81956a641edd9ba52ffc8d67891e0c526f5e768a
|
6600c7a7fc53ff17a4390eaf5bc30df1447703ff
|
refs/heads/master
| 2020-12-11T13:14:39.468377
| 2020-01-20T12:48:33
| 2020-01-20T12:48:33
| 233,857,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,955
|
py
|
import numpy as np
from numpy import zeros
import pandas as pd
from nltk.tokenize import WhitespaceTokenizer
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import text, sequence
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, Input
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, SpatialDropout1D, Bidirectional
from sklearn.model_selection import train_test_split
import dataset as Dataset
from dataset import DataParser, DatasetHandler
# set parameters:
VOCAB_SIZE = 8000
MAXLEN = 50
BATCH_SIZE = 32
EMBEDDING_DIM = 100
FILTERS = 250
KERNEL_SIZE = 3
HIDDEN_DIM = 100
EPOCHS = 10
VALIDATION_SPLIT = 0.2
def train(X_tr, y_tr):
output_dim = 50
word_input = Input(shape=(MAXLEN,))
model = Embedding(input_dim=VOCAB_SIZE, output_dim=output_dim, input_length=MAXLEN)(word_input)
model = SpatialDropout1D(0.1)(model)
model = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(len(tags), activation="softmax"))(model)
model = Model(word_input, out)
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
checkpointer = ModelCheckpoint(filepath = 'NER_model.h5',
verbose = 0,
mode = 'auto',
save_best_only = True,
monitor='val_loss')
history = model.fit(X_tr, y_tr.reshape(*y_tr.shape, 1),
batch_size=BATCH_SIZE, epochs=EPOCHS, shuffle=True,
validation_split=VALIDATION_SPLIT, verbose=1, callbacks=[checkpointer])
return history
if __name__ == "__main__":
dataset = Dataset.load()
handler = DatasetHandler(dataset)
getter = DataParser(handler.data, handler.tags, handler.intent_labels)
sentences = getter.sentences
tags = getter.tags
labels = [[s[1] for s in sent] for sent in getter.sentences]
sentences = [" ".join([s[0] for s in sent]) for sent in getter.sentences]
tokenizer = Dataset.getTokenizer()
# --- Vocabulary as dict. Key => word, value => word index
print('Found %s unique tokens.' % len(tokenizer.word_index))
sequences = tokenizer.texts_to_sequences(handler.texts) # ---- Transforms each text to a sequence of numbers
# INTENTION MODEL NAO USAVA PADDING = POST
X = pad_sequences(sequences, maxlen=MAXLEN, padding="post") # --- Pads sequence to same length
# ONLY FOR NER MODEL!!
# --- Transforms all TAGS to NUMBERS and padds to maxlen
y_ner = [[getter.tag2idx[l_i] for l_i in l] for l in labels]
y_ner = pad_sequences(maxlen=MAXLEN, sequences=y_ner, value=getter.tag2idx["O"])
# --- Split data into train and test
# NER MODEL
X_tr, X_te, y_tr, y_te = train_test_split(X, y_ner, test_size=0.2, shuffle=True)
train(X_tr, y_tr)
|
[
"matheus.tfrigo@gmail.com"
] |
matheus.tfrigo@gmail.com
|
fe5319852c04b489d41757d452c90772be5a0a24
|
4d0d162ead661f641b0ed76b47b7bba3a769d34d
|
/tienda/api/tests.py
|
582bd0f63dac69e06d687dffd77f59a5ad39e950
|
[
"MIT"
] |
permissive
|
BaniMontoya/Django_api_store
|
d44f92f4c33e6fe4b6203ebb92ff595ab325fc6d
|
9e58bce3efafdd821367522336caee0531a5410c
|
refs/heads/main
| 2023-03-17T05:35:05.544741
| 2021-02-25T04:13:20
| 2021-02-25T04:13:20
| 342,119,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,342
|
py
|
import tempfile
from django.contrib.auth import get_user_model
from PIL import Image
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from api import models as tienda_models
import logging
logging.disable(logging.CRITICAL)
class TestCaseStock(APITestCase):
def setUp(self):
self.user = self.setup_user()
self.token = Token.objects.create(user=self.user)
self.token.save()
self.client.defaults['HTTP_AUTHORIZATION'] = 'Token {}'.format(
self.token.key)
@staticmethod
def setup_user():
User = get_user_model()
return User.objects.create_user(
'test',
email='testuser@test.com',
password='test'
)
def image(self):
super().setUp()
self.file = tempfile.NamedTemporaryFile(suffix='.jpg')
image = Image.new('RGB', (100, 100))
image.save(self.file.name)
return self.file
def test_producto(self):
'''
Testing Producto Model
'''
data = {
"nombre": "test",
"presentacion": "test",
"marca": "test",
"fabricante": "test",
"foto": self.image(),
"descripcion": "test",
"nivel_azucar": "test",
"nivel_sal": "test",
"nivel_grasa": "test",
"estado": "test"
}
producto = self.client.post('/api/producto/', data, format='multipart')
data = {
"nombre": "test",
"presentacion": "test",
"marca": "test",
"fabricante": "test",
"foto": self.image(),
"descripcion": "test",
"nivel_azucar": "test",
"nivel_sal": "test",
"nivel_grasa": "test",
"estado": "test"
}
producto = self.client.post('/api/producto/', data, format='multipart')
ret = self.client.get('/api/producto/', format='json',
HTTP_AUTHORIZATION='Token {}'.format(self.token.key))
self.assertEqual(tienda_models.Producto.objects.all().count(), 2)
ret = self.client.get(
f'/api/producto/{producto.data["id"]}/', format='json')
self.assertEqual(ret.data['id'], 2)
'''
Testing Categoria Model
'''
data = {
"nombre": "test",
"descripcion": "test",
"icono": self.image(),
}
categoria = self.client.post(
'/api/categoria/', data, format='multipart')
ret = self.client.get(
f'/api/categoria/{categoria.data["id"]}/', format='json')
self.assertEqual(tienda_models.Categoria.objects.all().count(), 1)
self.assertEqual(ret.data['id'], 1)
data = {
"nombre": "test",
"descripcion": "test",
"icono": self.image(),
}
categoria = self.client.post(
'/api/categoria/', data, format='multipart')
ret = self.client.get(
f'/api/categoria/{categoria.data["id"]}/', format='json')
self.assertEqual(tienda_models.Categoria.objects.all().count(), 2)
self.assertEqual(ret.data['id'], 2)
'''
Testing SubCategoria Model
'''
data = {
"nombre": "test",
"descripcion": "test",
"icono": self.image(),
"categoria": categoria.data["id"]
}
subcategoria = self.client.post(
'/api/subcategoria/', data, format='multipart')
ret = self.client.get(
f'/api/subcategoria/{subcategoria.data["id"]}/', format='json')
self.assertEqual(tienda_models.SubCategoria.objects.all().count(), 1)
self.assertEqual(ret.data['id'], 1)
data = {
"nombre": "test",
"descripcion": "test",
"icono": self.image(),
"categoria": categoria.data["id"]
}
subcategoria = self.client.post(
'/api/subcategoria/', data, format='multipart')
ret = self.client.get(
f'/api/subcategoria/{subcategoria.data["id"]}/', format='json')
self.assertEqual(tienda_models.SubCategoria.objects.all().count(), 2)
self.assertEqual(ret.data['id'], 2)
'''
Testing Tienda Model
'''
data = {
"id_ciudad": "test",
"nombre": "test",
"logo": self.image(),
}
tienda = self.client.post('/api/tienda/', data, format='multipart',
HTTP_AUTHORIZATION='Token {}'.format(self.token.key))
ret = self.client.get(
f'/api/tienda/{tienda.data["id"]}/', format='json')
self.assertEqual(tienda_models.Tienda.objects.all().count(), 1)
self.assertEqual(ret.data['id'], 1)
data = {
"id_ciudad": "test",
"nombre": "test",
"logo": self.image(),
}
tienda = self.client.post('/api/tienda/', data, format='multipart',
HTTP_AUTHORIZATION='Token {}'.format(self.token.key))
ret = self.client.get(
f'/api/tienda/{tienda.data["id"]}/', format='json')
self.assertEqual(tienda_models.Tienda.objects.all().count(), 2)
self.assertEqual(ret.data['id'], 2)
'''
Testing Stock_En_Tienda Model
'''
data = []
how_many_elem = 150000
import logging
import time
for index in range(0, how_many_elem):
data.append({
"pvp": 1.0,
"tiene_iva": True,
"estado": "published",
"precio_compra": 1.0,
"margen_ganancia": 1.0,
"id_tienda": tienda.data["id"],
"id_producto": producto.data["id"],
"categoria_id": categoria.data["id"]
})
tiempo_inicial = time.time()
self.client.post(
'/api/stock_en_tienda/', data=data, format='json')
print(time.time()-tiempo_inicial, ", en enviar el post con 150.000 objetos de Stock_En_Tienda")
self.assertEqual(
tienda_models.Stock_En_Tienda.objects.all().count(), how_many_elem)
'''
Testing Delete
'''
ret = self.client.delete(
f'/api/stock_en_tienda/{tienda_models.Stock_En_Tienda.objects.all().first().id}/', format='json')
self.assertEqual(
tienda_models.Stock_En_Tienda.objects.all().count(), how_many_elem-1)
ret = self.client.delete(
f'/api/subcategoria/{subcategoria.data["id"]}/', format='json')
self.assertEqual(tienda_models.SubCategoria.objects.all().count(), 1)
tienda_models.SubCategoria.objects.all().delete()
ret = self.client.delete(
f'/api/categoria/{categoria.data["id"]}/', format='json')
self.assertEqual(tienda_models.Categoria.objects.all().count(), 1)
ret = self.client.delete(
f'/api/producto/{producto.data["id"]}/', format='json')
self.assertEqual(tienda_models.Producto.objects.all().count(), 1)
ret = self.client.delete(
f'/api/tienda/{tienda.data["id"]}/', format='json')
self.assertEqual(tienda_models.Tienda.objects.all().count(), 1)
|
[
"banimontoya@gmail.com"
] |
banimontoya@gmail.com
|
2431e59efa5919189179d9c5146283b232285086
|
ff4b578ae48434cd656fa8f3a88fe332590ad02f
|
/buffer.py
|
03e02c08d3308eec7941c4d9aa6cfad62114fc89
|
[
"MIT"
] |
permissive
|
abefetterman/rl-testing
|
1944729cc76b47cbbcade0758201d1bfde377585
|
557eadd5895e629632e701a27cdafde96e210215
|
refs/heads/master
| 2020-03-17T16:55:56.867948
| 2018-10-15T19:02:07
| 2018-10-15T19:02:07
| 133,768,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
import numpy as np
import random
class ReplayBuffer(object):
def __init__(self, len):
self.buf = []
self.len = len
def __len__(self):
return len(self.buf)
def add(self, new):
if len(self.buf) >= self.len:
self.buf.pop(0)
self.buf.append(new)
def sample(self, count):
s = random.sample(self.buf, count)
return [x for x in zip(*s)]
class PriorityBuffer(object):
def __init__(self, len, alpha=1, beta=1):
self.buf = []
self.priorities = []
self.priorities_max = 1
self.alpha = alpha
self.beta = beta
self.len = len
def __len__(self):
return len(self.buf)
def add(self, new):
if len(self.buf) >= self.len:
self.buf.pop(0)
self.priorities.pop(0)
self.buf.append(new)
self.priorities.append(self.priorities_max)
def sample(self, count):
buffer_size = len(self.buf)
p = np.array(self.priorities) ** self.alpha
p = p / np.sum(p)
idxs = np.random.choice(buffer_size, count, p=p)
p_choice = p[idxs]
buf_choice = [self.buf[x] for x in idxs]
is_weights = (count * p_choice) ** ( - self.beta)
is_weights = is_weights / np.max(is_weights)
sample = [x for x in zip(*buf_choice)]
return sample + [idxs, is_weights]
def update_priorities(self, idxs, td_error):
for i,e in zip(idxs,td_error):
self.priorities[i] = e
self.priorities_max = max(self.priorities)
|
[
"abe@nomiku.com"
] |
abe@nomiku.com
|
c68931669cfa6c4fe750ddb797f8060f030a8daf
|
48d79f14fc307022913217b4dd1ea3abbaa01c65
|
/Stepper_Motor_Control/inverse_kinematics_control.py
|
21dd46681ff5d0a098fce9c1e96807318c5e77ea
|
[] |
no_license
|
zmykevin/SPP2_Kevin
|
506d2e12f456f470b2383bc27cd106d5c3d250ed
|
6aa11db1d267a2a1f0c1b38c6604bc80fb757b83
|
refs/heads/master
| 2020-04-06T03:40:48.392814
| 2015-12-15T05:19:00
| 2015-12-15T05:19:00
| 38,252,741
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,544
|
py
|
__author__ = 'kevin'
from stepper_motor_setup import *
from camera_setup import *
from time import sleep
import math
def Forward_Kinematics(theta_1,theta_2):
h = 27
z = 1521
r = 32
assert (theta_2 != 0),"theta_2 cannot equal to 0"
X = math.cos(theta_1)*h-math.sin(theta_1)*(z+r*math.cos(-theta_2))/(math.tan(-theta_2))+35
Y = h*math.sin(theta_1)+math.cos(theta_1)*(z+r*math.cos(-theta_2))/math.tan(-theta_2)
real_word_coordinate = [X,Y]
return real_word_coordinate
if __name__ == "__main__":
print('The inverse_kinematics_control start')
###################Initialize Motor#####################
[Stepper_1,Stepper_2] = stepper_init()
#Setup the current position as zero position
Stepper_1.setCurrentPosition(0,0)
Stepper_2.setCurrentPosition(0,0)
#Engage the Stepper Motors
Stepper_1.setEngaged(0,True)
Stepper_2.setEngaged(0,True)
#Setup speed, acceleration, and current
setup_limit(Stepper_1,0,120000,1.2,40000)#acceleration,current & velocity
setup_limit(Stepper_2,0,240000,0.6,120000)
sleep(2)
###################Initialize Camera#####################
Flycam = flycamera_init()
Flycam.start_capture()
#The main program is here
try:
while(True):
angle_1 = float(raw_input("Please enter the angle for motor 1"))
angle_2 = float(raw_input("Please enter the angle for motor 2"))
theta_1 = (angle_1-2.5)/180.0*math.pi
theta_2 = angle_2/180.0*math.pi
print theta_1
print theta_2
floor_coord = Forward_Kinematics(theta_1,theta_2)
print("X:%f, Y:%f"%(floor_coord[0],floor_coord[1]))
step_1 = angel2step(angle_1,1)
step_2 = angel2step(angle_2,2)
Stepper_1.setTargetPosition(0,step_1)
Stepper_2.setTargetPosition(0,step_2)
sleep(1)
except KeyboardInterrupt:
pass
################Terminate camera and Motors#####################
Flycam.stop_capture()
Flycam.disconnect()
try:
#Move to the original spot
Stepper_1.setTargetPosition(0,0)
Stepper_2.setTargetPosition(0,0)
sleep(3)
#Turn off the engage of the motors
Stepper_1.setEngaged(0,False)
Stepper_2.setEngaged(0,False)
sleep(1)
Stepper_1.closePhidget()
Stepper_2.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
exit(0)
|
[
"zmykevin@umich.edu"
] |
zmykevin@umich.edu
|
81bfffd21903b2d8852165aa09091831171679bf
|
62a20685d1044f75efb13de25db97bfac7e05236
|
/build/icontrol/catkin_generated/pkg.develspace.context.pc.py
|
ab6a8fe5b2a1f02a87d6bbc10d69c3a559abf643
|
[] |
no_license
|
Irakli359/obst_avoidance_Irakli
|
184dc2dfd115b964ab12efae0bd6f723e440a12f
|
8503c69df885eb34d26733ba21335357db6f7654
|
refs/heads/master
| 2020-05-27T02:10:06.707618
| 2019-05-24T18:22:11
| 2019-05-24T18:22:11
| 188,448,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "icontrol"
PROJECT_SPACE_DIR = "/home/irakli/mybot_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"ikelb14@freeuni.edu.ge"
] |
ikelb14@freeuni.edu.ge
|
b99388307239664522c8b4196709221a72b6ab45
|
f14c198ad1b8f6324956e5bcdb4cd910c67eb3e1
|
/exercises/migrations/0006_exercise_relevant.py
|
723e7b4f8656890e09a7dc417e0bcff931632395
|
[
"Unlicense"
] |
permissive
|
rattletat/python-homework-server
|
8150e9059d43cc24677a632fbac503856a66e971
|
abfac831ed45cc567a6a1610edee934200ffada7
|
refs/heads/master
| 2022-12-26T02:25:48.455504
| 2020-10-01T11:08:24
| 2020-10-01T11:08:24
| 258,362,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# Generated by Django 3.0.5 on 2020-05-13 18:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exercises', '0005_auto_20200512_1116'),
]
operations = [
migrations.AddField(
model_name='exercise',
name='relevant',
field=models.BooleanField(default=True, verbose_name='Gibt an ob die Aufgabe in die Wertung einfließt.'),
),
]
|
[
"michael.brauweiler@posteo.de"
] |
michael.brauweiler@posteo.de
|
1d9088244fedb844902f6ec924f6f0df2dbc23d0
|
4d5f94dc96edc28d550b187fea51a89a8a5ba5f9
|
/products/admin.py
|
de0b450bec84d197b28524ba1cb88d9b395a1bbe
|
[] |
no_license
|
curiousTauseef/Django-e-Commerce
|
f4c36c416150cef63d614739ae309542c33a8bb6
|
6221b83966c6a488fdd7c13a443bc58aee4b8d6f
|
refs/heads/master
| 2022-09-17T23:49:28.406391
| 2019-09-18T20:40:32
| 2019-09-18T20:40:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
from django.contrib import admin
from .models import Category, Product
# Register your models here.
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug': ('name',)}
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'price', 'available', 'created', 'updated']
list_filter = ['available', 'created', 'updated']
list_editable = ['price', 'available']
prepopulated_fields = {'slug': ('name',)}
|
[
"hunter.me33@gmail.com"
] |
hunter.me33@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.