blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ecc9df8a0632fe31e32b4f814d1e56f458ca38fc
|
77515dad095d1e8254ccc05fee4f20b019759142
|
/project/views.py
|
faca923b1f4d649fab8af807162bdbf993cdbc90
|
[] |
no_license
|
vesperalwall860/fin_o
|
1dea92b1094ca6d2d0476f472c0a6a35787c59b0
|
170d34d7968e197f3569ecdc8f8b52153410dd83
|
refs/heads/master
| 2020-12-25T10:36:18.687041
| 2016-07-05T07:01:21
| 2016-07-05T07:01:21
| 62,416,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
from flask import render_template, abort
from jinja2 import TemplateNotFound
from project import app
@app.route('/')
def index():
try:
return render_template('index.html')
except TemplateNotFound:
abort(404)
|
[
"vesperalwall860@gmail.com"
] |
vesperalwall860@gmail.com
|
7d4f9cdc84e95ca0e8f5f3ead8f8e11eb445f36a
|
e44a24cfe359bc161f285096ee0e0829fedc6ad3
|
/natlet/novatlet/migrations/0006_auto_20200228_0806.py
|
dc54a9a39fdf17dfcdd4453396f4b43dacb6b607
|
[] |
no_license
|
palachintosh/natlet-project
|
ef1fe03ed1cbf6fdf75011453e91e315b040a6d0
|
e5fe0785123a6d1a5c9cbad8e2689112ea672862
|
refs/heads/master
| 2022-09-22T01:11:23.942003
| 2020-05-27T07:54:34
| 2020-05-27T07:54:34
| 264,766,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# Generated by Django 2.2.10 on 2020-02-28 08:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('novatlet', '0005_auto_20200228_0805'),
]
operations = [
migrations.AlterField(
model_name='location',
name='dir_object',
field=models.CharField(blank=True, default='', max_length=100, verbose_name='Location'),
),
]
|
[
"parser9000@gmail.com"
] |
parser9000@gmail.com
|
057f167b065d2afdc42adc634c407aa2f6d99b86
|
72c19aa3a168f217421afba80b59e8dfb1d5fb47
|
/dishonest/spiders/gsxt.py
|
5aab01f3031edef248999c2f44ca71f265b8b685
|
[] |
no_license
|
BJLIYI/dishonest
|
435819a9a2369f7bcd06d16b5f083e8765c04981
|
249d9cfb9d7841f8985c3cdc4643aaed78b791b1
|
refs/heads/master
| 2020-04-23T15:25:34.881011
| 2019-02-18T10:40:02
| 2019-02-18T10:40:02
| 171,264,797
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,322
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import json
from dishonest.items import DishonestItem
import re
from datetime import datetime
"""
5.2.4 完善爬虫
步骤:
1. 解析页面中的城市名称和id, 构建公告信息的URL
2. 解析失信企业公告信息
"""
class GsxtSpider(scrapy.Spider):
name = 'gsxt'
allowed_domains = ['gsxt.gov.cn']
start_urls = ['http://www.gsxt.gov.cn/corp-query-entprise-info-xxgg-100000.html']
# 失信企业公告信息的URL
data_url = 'http://www.gsxt.gov.cn/affiche-query-area-info-paperall.html?noticeType=21&areaid=100000¬iceTitle=®Org={}'
def parse(self, response):
# print(response.status)
# print(response.text)
# 获取包含省/直辖市的名称和id, div标签列表
divs = response.xpath('//div[@class="label-list"]/div')
# 遍历divs, 获取省/直辖市的名称和id
for div in divs:
area = div.xpath('./label/text()').extract_first()
id = div.xpath('./@id').extract_first()
# print(area)
# print(id)
data_url = self.data_url.format(id)
for i in range(0, 50, 10):
data = {
'start':str(i),
'length':'10'
}
yield scrapy.FormRequest(data_url, formdata=data, callback=self.parse_data, meta={'area': area})
def parse_data(self, response):
"""取出传递过来的区域"""
area = response.meta['area']
# print(response.text)
# 把json格式字符串, 转换为字典
results = json.loads(response.text)
# 获取公告信息列表
datas = results['data']
# 遍历datas, 获取每一个公告信息
for data in datas:
item = DishonestItem()
# 获取通知标题
notice_title= data['noticeTitle']
# 获取通知内容
notice_content = data['noticeContent']
# 失信人名称
names = re.findall('关?于?(.+?)的?列入.*', notice_title)
item['name'] = names[0] if len(names) != 0 else ''
name_card_num_s = re.findall('经?查?,?(.+?)\s*(统一社会信用代码/注册号:(\w+)):.*', notice_content)
if len(name_card_num_s) != 0:
item['name'] = name_card_num_s[0][0]
# 失信人号码
item['card_num'] = name_card_num_s[0][1]
# 失信人年龄, 由于都是企业, 年龄都是 0
item['age'] = 0
# 区域
item['area'] = area
# 法人(企业)
item['business_entity'] = ''
# 失信内容
item['content'] = notice_content
# 公布日期
publish_ms = data['noticeDate']
publish_date = datetime.fromtimestamp(publish_ms / 1000)
item['publish_date'] = publish_date.strftime('%Y-%m-%d')
# 公布/执行单位
item['publish_unit'] = data['judAuth_CN']
# 创建日期
item['create_date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 更新日期
item['update_date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# print(item)
yield item
|
[
"18911594826@163.com"
] |
18911594826@163.com
|
353da4dae3088c0582f0548c7cbac5f55304498e
|
ee47140dc1cd015f783f5674201a5fa0a5765f74
|
/clitool/utils/xmind_sdk/xmind.py
|
67c13ad6b4438098d244c752144355558c8b70b3
|
[
"MIT"
] |
permissive
|
baimingjiang/clitool
|
6d5597072e62b2ad068114b5cbfdc21d833b8a9f
|
100e3f54cd313a3d7609b9bad73da85a4d54318e
|
refs/heads/master
| 2020-07-08T14:18:21.785813
| 2019-08-22T06:22:45
| 2019-08-22T06:22:45
| 203,698,786
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .core.loader import WorkbookLoader
from .core.saver import WorkbookSaver
class xmind(object):
@classmethod
def load(self, path):
loader = WorkbookLoader(path)
return loader.get_workbook()
@classmethod
def save(self, workbook, path=None):
saver = WorkbookSaver(workbook)
saver.save(path)
|
[
"baimingjiang@pwrd.com"
] |
baimingjiang@pwrd.com
|
75e69d2795658c8d45410571adc4f419eee9b539
|
8d3ac2bee2d8ad19ffee55eaf80986e748ac59e2
|
/selfmodule/datetimeTest.py
|
9edf7d00ed20dd71284edf297375e7e2e012511d
|
[] |
no_license
|
TianDehua/pythonLearn
|
58c2aae8a8b95a6424a63c53cde1245fb2811a5d
|
84e6eb98fc1389b3123b17ff749fecd4cb78577a
|
refs/heads/master
| 2021-05-24T10:39:43.309036
| 2020-05-09T03:54:16
| 2020-05-09T03:54:16
| 253,523,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
from datetime import datetime, timedelta, timezone
#测试datetime
now = datetime.now()
print(now)
print('type now=', type(now))
#创建时间by数字
dt = datetime(1970, 2, 28, 20, 10, 50)
print(dt)
#时间戳
timestamp = dt.timestamp()
print(timestamp) #5055050.0 单位s,小数点后表示ms
#创建时间by时间戳
dt = datetime.fromtimestamp(timestamp)
print(dt)
print(datetime.utcfromtimestamp(timestamp))
#创建时间by string
dt = datetime.strptime('2015/06/01 18:19:59', '%Y/%m/%d %H:%M:%S')
print(dt)
print(dt.strftime('%a, %d %H:%M'))
#时间运算
print(now + timedelta(hours=10))
print(now + timedelta(days=2, hours=10))
|
[
"tiandehua@ellabook.cn"
] |
tiandehua@ellabook.cn
|
2cb256696adb3ff4636f5e1f72d14249d395432d
|
9a00baf648a1135916141cfa8d6ed87665dd827a
|
/Day3m.py
|
aaac53c9a1599ae56ef6f86c5eea555d279367c7
|
[] |
no_license
|
JonathanSum/Deep-Learning-Happy-Sugar-Life-Weekly-Training
|
10786bd62853aedeb4d06a452c9ec597b25d3a0f
|
37b0ef4669e32eb9ffb74413f96f6a533270e141
|
refs/heads/master
| 2022-12-31T08:05:16.657932
| 2020-10-12T02:51:12
| 2020-10-12T02:51:12
| 292,529,405
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,414
|
py
|
import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
"""GPT-1 like network roughtly 125m parmas """
n_layer = 12
n_head = 12
n_embd = 768
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nonthing too scary here.
"""
def ___init___(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projection for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.attn_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("mask", torch.tril(
torch.ones(config.block_size, config.block_size)))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C //
self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C //
self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C //
self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:, :, :T, :T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
# re-assemble all head output side by side
y = y.transpose(1, 2).contiguous().view(B, T, C)
# output projection
y = self.resid_drop(self.proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd)
nn.Dropout(config.resid_pdrop)
)
def forward(self, x):
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(
1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config)
for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
logger.info("number of parameters: %e", sum(p.numel()
for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is ot None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
This long function is unfortunately doing something very simple and is very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the Pytorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whilelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s,%s ' (mn, pn) if mn else pn # full param name
if pn.endwitch('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(
inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dic.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorc optimizer object
optim_groups = [
{"paramas": [param_dict[pn] for pn in sorted(
list(decay))], "weight_decay": train_config.weight_decay},
{"paramas": [param_dict[pn]
for pn in sorted(list(no_decay))], "weight_decay":0.0}
]
optimizer = torch.optim.adamW(
optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
return optimizer
def forward(self, idx, target=None):
b, t = idx.size()
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
# forward the GPT model
# each index maps to a (learnable) vector
token_embeddings = self.tok_emb(idx)
# each position maps to a (learnable) vector
position_embeddings = self.pos_emb[:, :t, :]
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
|
[
"noreply@github.com"
] |
JonathanSum.noreply@github.com
|
03f4906d5608d54bc061086dc9511077b6fb6acd
|
9d291ff8ad0a455b8b38065546e1c9a6617d36cf
|
/python/tkinter-test/global.py
|
25359a44216154de6b564a58f6c0eb507ca1a196
|
[
"BSD-2-Clause"
] |
permissive
|
seekindark/helloworld
|
7128b8d8b6ebdc79c2ec3aa17137fb2a8e18885e
|
3f36e040399a998e43c63bca0586fe517b7fef69
|
refs/heads/master
| 2023-05-26T22:04:08.714917
| 2023-05-13T01:31:06
| 2023-05-13T01:31:06
| 193,409,142
| 0
| 0
|
BSD-2-Clause
| 2019-09-27T08:07:02
| 2019-06-24T00:31:59
|
C
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
def testGlobal(var):
print("before global: x = ", x)
global x
x=2
print("after global: x = ", x)
x=100
print("before testGlobal, x=100")
testGlobal(x)
print("after global: x = ", x)
|
[
"fei_n_chen@163.com"
] |
fei_n_chen@163.com
|
3b9c6df380abb62eca139e0921fb212b689b32f4
|
49273a7e6e0d4726f38fab1c430b86dbfc4b2345
|
/codingbat/p2_triangle.py
|
01524a16b9b97f430f2e248bb393a32eb057c4a4
|
[] |
no_license
|
adqz/interview_practice
|
d16d8c56436dde1f7fa96dc0d8dcc827295e0ff0
|
f55fb9c0a39c2482c98cc452c185a938a59ad57c
|
refs/heads/master
| 2023-01-11T01:30:03.353498
| 2023-01-03T14:48:08
| 2023-01-03T14:48:08
| 207,520,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
'''
------- Start: Problem -------
We have triangle made of blocks. The topmost row has 1 block, the next row down has 2 blocks,
the next row has 3 blocks, and so on. Compute recursively (no loops or multiplication) the total
number of blocks in such a triangle with the given number of rows.
triangle(0) → 0
triangle(1) → 1
triangle(2) → 3
------- End: Problem -------
'''
class Solution:
def triangle(self, num_rows):
return self.triangle_helper(num_rows, 0)
def triangle_helper(self, curr_row, num_blocks):
if curr_row == 0:
return num_blocks
else:
return self.triangle_helper(curr_row-1, num_blocks + curr_row)
if __name__ == "__main__":
sol = Solution()
print(sol.triangle(0)) #0
print(sol.triangle(1)) #1
print(sol.triangle(2)) #3
print(sol.triangle(4)) #10
|
[
"ashahpur@eng.ucsd.edu"
] |
ashahpur@eng.ucsd.edu
|
024d9dd310fa67c101a745994a8b3f57c66e2c76
|
d8aa9e13534db5aeb328a439fe1bc14718a5cbb3
|
/manage.py
|
818e1183b8c65261939a2594d999283bf4040753
|
[] |
no_license
|
mati23/mtg-django
|
ec742d46cc2ca5402f82ddb7d746a42d8a720dd1
|
e342faa59ab58342106275d52865c4475179b6c1
|
refs/heads/master
| 2021-05-19T17:02:11.881148
| 2020-07-01T01:58:26
| 2020-07-01T01:58:26
| 253,079,948
| 3
| 0
| null | 2021-01-05T23:39:11
| 2020-04-04T19:14:11
|
Python
|
UTF-8
|
Python
| false
| false
| 905
|
py
|
#!/usr/bin/env python
import os
import sys
home = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(home, "mtgblueprint/model"))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mtgblueprint.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"mateusarruda@alu.ufc.br"
] |
mateusarruda@alu.ufc.br
|
0b91ae681bcb8e985d62e92073255c5a45b82b87
|
c0c29c1c3c8550536c2a16dc3b5ef5b2be4b77e2
|
/DynamicProgramming/ClimbingStairs.py
|
087881865993fcde4fe8942de403e10cd4f3fcc1
|
[] |
no_license
|
safkat33/LeetCodeProblems
|
0d404c07f7ffd631afc9ae65cc28b6e847991dd0
|
3f443e90a97be81c18aebfc9fd5bad047016e9ee
|
refs/heads/main
| 2023-07-11T07:15:33.651447
| 2021-08-27T13:05:24
| 2021-08-27T13:05:24
| 388,070,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
"""
You are climbing a staircase. It takes n steps to reach the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
Example 1:
Input: n = 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps
practice
steps = |1|2|3|5|8
"""
class Solution:
def climbStairs(self, n: int) -> int:
res = [1] * n
for i in range(1, n):
if i == 1:
res[i] += res[i - 1]
else:
res[i] = res[i - 1] + res[i - 2]
return res[-1]
|
[
"safkat.33@gmail.com"
] |
safkat.33@gmail.com
|
965a646233dd47098e1893b396f1ea31c88b0a4b
|
290a083092f26a0faa1b277848a330f1e241e25b
|
/count_Ts.py
|
e55a8207749acbb2736748f88f37f4da60efbf05
|
[] |
no_license
|
Ayogee/Python
|
34334ca97871cb8cd6fdde46c3c371a795acfe5f
|
a05c66f9bf86989e0d5923f2cdb8885486ea27d2
|
refs/heads/main
| 2023-02-05T07:16:32.734019
| 2020-12-29T23:49:50
| 2020-12-29T23:49:50
| 324,831,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
# This program counts the number of times
# the letter T (uppercase or lowercase)
# appears in the string.
def main():
# Create a variable to use to hold the count.
# The accumulator must start with 0.
count = 0
# Get a string from the user.
my_string = input('Enter a sentence: ')
# Count the Ts.
for ch in my_string:
if ch == 'T' or ch == 't':
count += 1
# Print the result.
print('The letter T appears', count, 'times.')
# Call the main function.
main()
|
[
"noreply@github.com"
] |
Ayogee.noreply@github.com
|
e9e1724afbec3386c9ad9fe57c0c6fb3dd2bf831
|
7a49dda91c75aac8189e64aa849a88bfd982af0b
|
/src/segmentation/model_components/OutConv.py
|
c26f08c9937bc4426fbf9b36739b3a9acfeac894
|
[] |
no_license
|
cmtrom01/Al-SiC_Segmentation
|
9e64c33ee6779dd4ca5061f844da106d6471b96d
|
c99c790bf0f5a82d3a8d075c0523718777ea85ad
|
refs/heads/main
| 2023-08-16T17:28:56.453647
| 2021-09-10T20:23:16
| 2021-09-10T20:23:16
| 392,110,804
| 0
| 0
| null | 2021-08-03T01:47:12
| 2021-08-02T22:23:29
|
Python
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
import torch.nn as nn
class OutConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=1)
def forward(self, x):
x = self.conv(x)
return x
|
[
"cmtrom01@louisville.edu"
] |
cmtrom01@louisville.edu
|
80b8e1e6ed59b8d4333ecef3f712286bc2453d3d
|
f554acc4c261c6a7a8ef16847005d5f7758da44c
|
/gui/qt/console.py
|
5016e45144c49aca252ee2140a9c3819facfaed3
|
[
"MIT"
] |
permissive
|
lbtcio/lbtc-lightwallet-client
|
aee15de0defffd1255641d307414bb49c1b39100
|
49ae12e5603eeb0dea2f2dd65b16414be2891d9e
|
refs/heads/master
| 2022-12-12T12:15:12.850115
| 2021-07-20T03:16:42
| 2021-07-20T03:16:42
| 124,226,294
| 23
| 13
|
MIT
| 2022-12-07T23:46:56
| 2018-03-07T11:29:34
|
Python
|
UTF-8
|
Python
| false
| false
| 10,166
|
py
|
# source: http://stackoverflow.com/questions/2758159/how-to-embed-a-python-interpreter-in-a-pyqt-widget
import sys, os, re
import traceback, platform
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from electrum import util
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class Console(QtWidgets.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(MONOSPACE_FONT, 10, QtGui.QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run':self.run_script})
self.set_json(False)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
result = eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
self.moveCursor(QtGui.QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = map(lambda x: x.split('.')[-1], completions)
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
l = self.completions_end - self.completions_pos
for x in range(l): c.deleteChar()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def register_command(self, c, func):
methods = { c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
if type(self.namespace.get(command)) == type(lambda:None):
self.appendPlainText("'%s' is a function. Type '%s()' to use it in the Python console."%(command, command))
self.newPrompt()
return
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
result = eval(command, self.namespace, self.namespace)
if result != None:
if self.is_json:
util.print_msg(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
exec(command, self.namespace, self.namespace)
except SystemExit:
self.close()
except Exception:
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3,2,1,-1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(' |\(|\)',cmd)[-1]
beginning = cmd[0:-len(lastword)]
path = lastword.split('.')
ns = self.namespace.keys()
if len(path) == 1:
ns = ns
prefix = ''
else:
obj = self.namespace.get(path[0])
prefix = path[0] + '.'
ns = dir(obj)
completions = []
for x in ns:
if x[0] == '_':continue
xx = prefix + x
if xx.startswith(lastword):
completions.append(xx)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p)>len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1' : app, 'myVar2' : 1234})
console.show()
sys.exit(app.exec_())
|
[
"sunshine.benjamin.smith@gmail.com"
] |
sunshine.benjamin.smith@gmail.com
|
72bdd81923ec647703686dbfafe202baf7b5ef88
|
db0e8aa3a92a30c9b1cc8da03725e951ff64f3f1
|
/lenv/lib/python3.6/site-packages/django/db/backends/oracle/compiler.py
|
9aa4acc0fe57684499af5f84260d87d743225f06
|
[
"BSD-3-Clause"
] |
permissive
|
shrey-c/DataLeakageDjango
|
ffeef61caa347520747fc70cf3f7f8b84a9610cf
|
a827c5a09e5501921f9fb97b656755671238dd63
|
refs/heads/master
| 2022-11-30T03:30:12.313025
| 2020-07-12T06:47:44
| 2020-07-12T06:47:44
| 242,569,637
| 6
| 1
|
BSD-3-Clause
| 2022-11-22T05:20:22
| 2020-02-23T18:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,246
|
py
|
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
)
else:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=True,
)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
if self.query.low_mark:
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
else:
# Simplify the query to support subqueries if there's no offset.
sql = (
'SELECT * FROM (SELECT "_SUB".* FROM (%s) "_SUB" %s)' % (sql, high_where)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
[
"shreyansh.chheda@gmail.com"
] |
shreyansh.chheda@gmail.com
|
4c22848bb06b78c312c0e2e564ca37517aea883d
|
0a830ace2253924ca216f1bcca5a61327a710d78
|
/DiseasePrediction/migrations/0003_auto_20201230_2247.py
|
8b5bdd5870605124cd3bcb38a69b266e63da0996
|
[] |
no_license
|
AyushSolanki-17/HealthGaurd-Server
|
39f0161e54f3a70d2014aafc723a6a52217277d3
|
64661afecbd226c0962ff7181c6038690b2fb0b6
|
refs/heads/main
| 2023-05-04T10:14:17.024867
| 2021-05-25T13:59:26
| 2021-05-25T13:59:26
| 370,714,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
# Generated by Django 3.1.3 on 2020-12-30 17:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('DiseasePrediction', '0002_auto_20201230_2141'),
]
operations = [
migrations.RemoveField(
model_name='testreport',
name='desc',
),
migrations.DeleteModel(
name='TestReportDescription',
),
]
|
[
"ayush17solanki@gmail.com"
] |
ayush17solanki@gmail.com
|
0bb039607a408a124099b2a8c1061279109510fd
|
8b9e473478c080765271d7606a93c0c10df93602
|
/property_manager/blog/views.py
|
6fa0b985298d8272cdf7276ec186d01d976136d6
|
[] |
no_license
|
BigRLab/python-web
|
e5d2eaca08a81c8698783fa0fd58f76c85f4dfe5
|
8d01d2d2e2a64c12df723526226e09714f77fe0a
|
refs/heads/master
| 2020-04-12T22:25:49.018664
| 2018-11-19T07:48:57
| 2018-11-19T07:48:57
| 162,789,406
| 2
| 1
| null | 2018-12-22T07:01:44
| 2018-12-22T07:01:44
| null |
UTF-8
|
Python
| false
| false
| 23,271
|
py
|
# Create your views here.
# -*- coding: utf-8 -*-
from django.shortcuts import render,render_to_response,loader,redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext,Context
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.contrib.auth.models import User
from django.db.models import Count, Sum
from django.contrib.auth.models import User
from blog.models import Property_operation_history
from blog.models import Property_content
import blog.ansible_property as ansible_property
from email.mime.text import MIMEText
from email.header import Header
import smtplib
import time
import tempfile
import json
import os
import sys
import shutil
import re
def home(request):
return render_to_response('login.html')
@login_required
def index(request):
return render_to_response('index.html')
@login_required
def chpasswd(request):
return render_to_response('chpasswd.html')
@login_required
def property_add(request):
return render_to_response('property_add.html')
#######################
#资产代码
#######################
@login_required
def property_write(request):
if request.method == "POST":
user_name = request.COOKIES.get('username','')
staff_name = user_info(user_name)
ip_name = request.POST.get('ip_name','')
all_name = request.POST.get('all_name','')
project_name = request.POST.get('project_name','')
team_name_html = request.POST.get('team_name_html','')
domain_name_html = request.POST.get('domain_name_html','')
ip_remark_html = request.POST.get('ip_remark_html','')
app_name_html = request.POST.get('app_name_html','')
system_name_html = request.POST.get('system_name_html','')
cpu_name_html = request.POST.get('cpu_name_html','')
memory_name_html = request.POST.get('memory_name_html','')
disk_name_html = request.POST.get('disk_name_html','')
model_name_html = request.POST.get('model_name_html','')
principal_name_html = request.POST.get('principal_name_html','')
xen_name_html = request.POST.get('xen_name_html','')
room_name_html = request.POST.get('room_name_html','')
xen_ip_html = request.POST.get('xen_ip_html','')
update_operation = request.POST.get('update_operation','')
print "+++" * 200
print project_name
if update_operation == "update_operation":
ip_name = ip_name.encode('utf-8')
ip_name = ''.join(ip_name.split())
ip_name = ''.join(ip_name.split("<nobr>"))
ip_name = '\n'.join(ip_name.split("</nobr>"))
print ip_name
file_dir="./upload/property_tempfile"
if project_name == "pool_all":
operation_content = "添加池"
print operation_content
for ip_name in ip_name.split("\n"):
baseDir = os.path.dirname(os.path.abspath(__name__));
hosts_filedir = os.path.join(baseDir,'upload', 'property_tempfile', time.strftime('%Y'), time.strftime('%m'), time.strftime('%d'));
if not os.path.exists(hosts_filedir):
os.makedirs(hosts_filedir)
property_hosts_list=tempfile.NamedTemporaryFile(prefix=ip_name, suffix="hosts",dir=file_dir)
property_hosts_list.writelines(['[pool-ip]\n', '%s ansible_ssh_user=root ansible_ssh_pass=Zh@0P1n!123\n' % ip_name])
property_hosts_list.seek(0)
property_ip_file = ansible_property.run_adhoc(property_hosts_list.name, "script", "./blog/pool_info.sh")
property_copy = ansible_property.run_adhoc(property_hosts_list.name, "synchronize", "mode=pull src=/tmp/%s dest=%s" % (ip_name, hosts_filedir))
property_hosts_list.close()
if property_copy == 0:
hosts_file_name = hosts_filedir+"/"+ip_name
property_info = property_write_db(hosts_file_name)
elif project_name == "add_list":
operation_content = "添加主机"
print operation_content
for i in ip_name.split("\n"):
ip_exist = Property_content.objects.filter(ip_name=i)
print "==="
print i
print "==="
if len(ip_exist) == 0:
property_info = property_db_write(Property_content(), team_name=team_name_html, domain_name=domain_name_html, ip_name=i, ip_remark=ip_remark_html, app_name=app_name_html, system_name=system_name_html, principal_name=principal_name_html, xen_name=xen_name_html, room_name=room_name_html, xen_ip=xen_ip_html, host_cpu=cpu_name_html, host_memory=memory_name_html, host_disk=disk_name_html, server_model=model_name_html)
elif project_name == "room_list":
operation_content = "更改机房位置"
print operation_content
for i in ip_name.split("\n"):
ip_exist = Property_content.objects.filter(ip_name=i)
if len(ip_exist) != 0:
blog = Property_content.objects.get(ip_name=i)
blog.room_name=all_name
blog.save()
property_info = "0"
elif project_name == "domain_list":
operation_content = "更改域名"
print operation_content
for i in ip_name.split("\n"):
ip_exist = Property_content.objects.filter(ip_name=i)
if len(ip_exist) != 0:
blog = Property_content.objects.get(ip_name=i)
blog.domain_name=all_name
blog.save()
property_info = "0"
elif project_name == "principal_list":
operation_content = "更改负责人"
print operation_content
for i in ip_name.split("\n"):
ip_exist = Property_content.objects.filter(ip_name=i)
if len(ip_exist) != 0:
blog = Property_content.objects.get(ip_name=i)
blog.principal_name=all_name
blog.save()
property_info = "0"
elif project_name == "app_list":
operation_content = "更改应用"
print operation_content
for i in ip_name.split("\n"):
ip_exist = Property_content.objects.filter(ip_name=i)
if len(ip_exist) != 0:
blog = Property_content.objects.get(ip_name=i)
blog.app_name=all_name
blog.save()
property_info = "0"
elif project_name == "system_list":
operation_content = "更改系统"
print operation_content
for i in ip_name.split("\n"):
ip_exist = Property_content.objects.filter(ip_name=i)
if len(ip_exist) != 0:
blog = Property_content.objects.get(ip_name=i)
blog.system_name=all_name
blog.save()
property_info = "0"
elif project_name == "team_list":
operation_content = "更改团队"
print operation_content
for i in ip_name.split("\n"):
ip_exist = Property_content.objects.filter(ip_name=i)
if len(ip_exist) != 0:
blog = Property_content.objects.get(ip_name=i)
blog.team_name=all_name
blog.save()
property_info = "0"
elif project_name == "change_hosts":
print "=======" * 20
print ip_name,team_name_html,domain_name_html,ip_remark_html,app_name_html,system_name_html,principal_name_html,xen_name_html,room_name_html,xen_ip_html, cpu_name_html, memory_name_html, disk_name_html, model_name_html
print "=======" * 20
operation_content = "更改记录"
print operation_content
for i in ip_name.split("\n"):
print "=======" * 20
print i
print "=======" * 20
blog = Property_content.objects.get(ip_name=i)
print "=======" * 20
print blog
print "=======" * 20
blog.team_name=team_name_html
blog.domain_name=domain_name_html
blog.ip_remark=ip_remark_html
blog.app_name=app_name_html
blog.system_name=system_name_html
blog.principal_name=principal_name_html
blog.xen_name=xen_name_html
blog.room_name=room_name_html
blog.xen_ip=xen_ip_html
blog.host_cpu=cpu_name_html
blog.host_memory=memory_name_html
blog.host_disk=disk_name_html
blog.server_model=model_name_html
blog.save()
property_info = "0"
else:
return render_to_response('property_add.html')
print "===" * 100
print property_info
print "===" * 100
if property_info == "0":
result_operation="%s : %s" % (operation_content, str(all_name.encode('utf-8')))
property_db_history(Property_operation_history(), staff_name, ip_name, operation_name=result_operation)
return HttpResponse(json.dumps(property_info))
else:
return HttpResponse(json.dumps(property_info))
else:
return render_to_response('property_add.html')
def property_write_db(hosts_file_name):
f = open(hosts_file_name, "r")
line = f.readlines()
host_list = []
for i in line:
host_list.append(i.strip())
f.close()
##########
pool_name=host_list[0]
host_list = host_list[1:]
xe_name = ""
dic_name = {}
for i in host_list:
if i[:2] == "xe":
host_name = ""
xe_name = i
dic_name[xe_name] = host_name
else:
host_name = i
if dic_name[xe_name] == "":
dic_name[xe_name] = host_name
else:
list_name = dic_name[xe_name]
dic_name[xe_name] = list_name+"#####"+host_name
#########
dic_key = dic_name.keys()
ip_failed=[]
for xe_ip in dic_key:
if dic_name[xe_ip] == "":
continue
else:
for host_ip in dic_name[xe_ip].split("#####"):
try:
ip_name = host_ip.split("-",1)[0]
ip_info = host_ip.split("-",1)[1]
host_info = ip_info.split('===')
ip_remark=host_info[0]
ip_cpu=host_info[1]
ip_memory=str(round(float(host_info[2])/1024/1024/1024))
ip_disk_all=host_info[3].split()
ip_disk=""
for i in ip_disk_all:
ip_disk=ip_disk+str(int(round(float(i)/1024/1024/1024)))+" "
ip_exist = Property_content.objects.filter(ip_name=ip_name)
if len(ip_exist) == 0:
property_db_write(Property_content(), ip_name=ip_name, ip_remark=ip_remark, xen_name=pool_name, xen_ip=xe_ip[3:], host_cpu=ip_cpu, host_memory=ip_memory, host_disk=ip_disk, server_model="虚拟机")
except IndexError:
ip_name = host_ip.split("===",1)[0]
ip_failed.append(ip_name)
except ValueError:
ip_name = host_ip.split("===",1)[0]
ip_failed.append(ip_name)
if len(ip_failed) != 0:
property_send_mail(ip_failed)
return "0"
def property_send_mail(ip_name):
property_send_info = "以下IP信息在xenserver中命名规范不符合标准,标准为:命名不能有空格,并且以ip开头加上-用处,如:(172.30.1.100-nginx),赶紧去补,不然每天都发邮件,烦死你!!!\n\n"
for i in ip_name:
property_send_info = property_send_info + " --- " + i + "\n"
print property_send_info
sender = 'cmdb@test.com.cn'
receivers = ['ligh@test.com.cn'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
mail_msg = """
<p>%s</p>
""" % (property_send_info)
message = MIMEText('%s' % property_send_info, 'plain', 'utf-8')
message['From'] = Header("cmdb")
message['To'] = Header("ligh")
subject = '命名不规范IP'
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message.as_string())
print "邮件发送成功"
except smtplib.SMTPException:
print "Error: 无法发送邮件"
@login_required
def property_details(request):
if request.method == "GET":
user_name = request.COOKIES.get('username','')
staff_name = user_info(user_name)
search_team_name = request.GET.get('search_team','')
search_domain_name = request.GET.get('search_domain','')
search_ip_name = request.GET.get('search_ip','')
search_remark_name = request.GET.get('search_remark','')
search_app_name = request.GET.get('search_app','')
search_system_name = request.GET.get('search_system','')
search_xen_name = request.GET.get('search_xen_name','')
search_xen_ip = request.GET.get('search_xen_ip','')
rank_ip = request.GET.get('rank_ip')
rank_domain = request.GET.get('rank_domain')
rank_name = ""
if rank_ip == "rank_ip":
rank_name = "ip_name"
elif rank_domain == "rank_domain":
rank_name = "domain_name"
property_condition = {}
if search_team_name != "":
property_condition["team_name__contains"] = search_team_name
if search_domain_name != "":
property_condition["domain_name__contains"] = search_domain_name
if search_ip_name != "":
property_condition["ip_name__contains"] = search_ip_name
if search_remark_name != "":
property_condition["ip_remark__contains"] = search_remark_name
if search_app_name != "":
property_condition["app_name__contains"] = search_app_name
if search_system_name != "":
property_condition["system_name__contains"] = search_system_name
if search_xen_name != "":
property_condition["xen_name__contains"] = search_xen_name
if search_xen_ip != "":
property_condition["xen_ip__contains"] = search_xen_ip
if not property_condition :
if rank_name != "":
property_his = Property_content.objects.all().order_by(rank_name)
else:
property_his = Property_content.objects.all().order_by("-update_time")
else:
if rank_name != "":
property_his = Property_content.objects.filter(**property_condition).order_by(rank_name)
else:
property_his = Property_content.objects.filter(**property_condition).order_by("-update_time")
property_number=len(property_his)
page = request.GET.get('page')
if page == "all":
t = loader.get_template("property_details.html")
c = Context({'property_his': property_his, 'search_team_name': search_team_name, 'search_domain_name': search_domain_name, 'search_ip_name': search_ip_name, 'search_remark_name': search_remark_name, 'search_app_name': search_app_name, 'search_system_name': search_system_name, 'search_xen_name': search_xen_name, 'search_xen_ip': search_xen_ip, 'rank_ip': rank_ip, 'rank_domain': rank_domain})
return HttpResponse(t.render(c))
else:
limit = 20
paginator = Paginator(property_his, limit)
try:
show_details = paginator.page(page)
except PageNotAnInteger:
show_details = paginator.page(1)
except EmptyPage:
show_details = paginator.page(paginator.num_pages)
t = loader.get_template("property_details.html")
c = Context({'property_his': show_details, 'search_team_name': search_team_name, 'search_domain_name': search_domain_name, 'search_ip_name': search_ip_name, 'search_remark_name': search_remark_name, 'search_app_name': search_app_name, 'search_system_name': search_system_name, 'search_xen_name': search_xen_name, 'search_xen_ip': search_xen_ip, 'rank_ip': rank_ip, 'rank_domain': rank_domain, 'property_number': property_number})
return HttpResponse(t.render(c))
@login_required
def property_operation(request):
if request.method == "GET":
user_name = request.COOKIES.get('username','')
staff_name = user_info(user_name)
search_name = request.GET.get('search','')
if search_name != "":
property_his = Property_operation_history.objects.filter(ip_name__contains=search_name).order_by("-update_time")
else:
property_his = Property_operation_history.objects.all().order_by("-update_time")
limit = 10
page = request.GET.get('page')
paginator = Paginator(property_his, limit)
try:
show_details = paginator.page(page)
except PageNotAnInteger:
show_details = paginator.page(1)
except EmptyPage:
show_details = paginator.page(paginator.num_pages)
t = loader.get_template("property_operation_history.html")
c = Context({'property_his': show_details, 'search_ip_name': search_name})
return HttpResponse(t.render(c))
@login_required
def property_record_delete(request):
if request.method == "POST":
user_name = request.COOKIES.get('username','')
staff_name = user_info(user_name)
operation_content = "删除数据"
ip_address = request.POST.get('ip_name','').replace('\t','').replace('\n','').replace(' ','')
print "delete property !!!%s!!!" % ip_address
property_db_history(Property_operation_history(), staff_name, ip_address, operation_name=operation_content)
db_delete = Property_content.objects.filter(ip_name=ip_address).delete()
#result_operation="%s : %s" % (operation_content, str(all_name))
if db_delete[0] == 0:
return HttpResponse(json.dumps(1))
else:
return HttpResponse(json.dumps(0))
else:
return render_to_response('property_details.html')
@login_required
def property_record_query(request):
if request.method == "POST":
user_name = request.COOKIES.get('username','')
ip_address = request.POST.get('ip_name','').replace('\t','').replace('\n','').replace(' ','')
print "query property !!!%s!!!" % ip_address
db_query = Property_content.objects.filter(ip_name=ip_address).values()
for i in db_query:
db_dict = i
if "update_time" in db_dict.keys():
del db_dict["update_time"]
print db_dict
#t=template.Template('[%s, {{ team_name }}, {{ domain_name }}, {{ ip_remark }}, {{ app_name }}, {{ system_name }}, {{ principal_name }}, {{ xen_name }}, {{ room_name }}, {{ xen_ip }}]' % ip_address)
#c=Context(db_result)
#print t.render(c)
return HttpResponse(json.dumps(db_dict))
else:
return render_to_response('property_details.html')
def property_db_history(table, staff_name=None, ip_name=None, operation_name=None):
print "DB write history"
print staff_name, ip_name, operation_name
blog = table
blog.staff_name = staff_name
blog.ip_name = ip_name
blog.operation_name = operation_name
blog.save()
return HttpResponse(json.dumps("ok"))
def property_db_write(table, team_name=None, domain_name=None, ip_name=None, ip_remark=None, app_name=None, system_name=None, principal_name=None, xen_name=None, room_name=None, xen_ip=None, host_cpu=None, host_memory=None, host_disk=None, server_model=None):
print "DB write"
print team_name, domain_name, ip_name, ip_remark, app_name, system_name, host_cpu, host_memory, host_disk, server_model, principal_name, xen_name, room_name, xen_ip
blog = table
blog.team_name = team_name
blog.domain_name = domain_name
blog.ip_name = ip_name
blog.ip_remark = ip_remark
blog.app_name = app_name
blog.system_name = system_name
blog.principal_name = principal_name
blog.xen_name = xen_name
blog.room_name = room_name
blog.xen_ip = xen_ip
blog.host_cpu = host_cpu
blog.host_memory = host_memory
blog.host_disk = host_disk
blog.server_model = server_model
blog.save()
return "0"
#return HttpResponse(json.dumps("ok"))
def user_info(user_name):
if user_name == "admin":
Staff_info = User.objects.all()
Staff_name = []
for i in Staff_info:
staff_info = User.objects.filter(username=str(i)).values()
for staff_name in staff_info:
Staff_name.append(staff_name['first_name'])
return Staff_name
else:
staff_info = User.objects.filter(username=user_name).values()
for staff_name in staff_info:
return staff_name['first_name']
#######################
#######################
#登录代码
#######################
def login_view(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
#response = HttpResponseRedirect('/index.html')
response = HttpResponseRedirect('/property_details.html')
response.set_cookie('username',username,86400)
return response
else:
return HttpResponse('Please check the user and password, login again!')
else:
return HttpResponse('With the registered password is not correct!')
else:
return render_to_response('login.html')
def logout_view(request):
logout(request)
return render_to_response('login.html')
#######################
#改密代码
#######################
@login_required
def changepwd(request):
if request.method == 'POST':
user_name = request.COOKIES.get('username','')
pass_name = request.POST.get('pass_name','')
print pass_name
u = User.objects.get(username__exact=user_name)
u.set_password(pass_name)
u_passwd = u.save()
if u_passwd == None:
return HttpResponse(json.dumps(0))
else:
return HttpResponse(json.dumps(1))
else:
return render_to_response('index.html')
# Create your views here.
|
[
"root@centos-linux.shared"
] |
root@centos-linux.shared
|
70e36484c35ec2e55406087c861d54bc88665295
|
56c136635f0f22e5933d00c540907bc8570e88af
|
/Shell/__init__.py
|
1c0bd0761ca98eb9469f933131b6ace79b8a1d9a
|
[
"Apache-2.0"
] |
permissive
|
githubforwuchaofan/SandyP2019
|
8e072c54e27a8392f650362a3b093cc1019e0abf
|
10f344a9e1b403096c5fedb93aafdf49cfae6cbe
|
refs/heads/master
| 2020-04-10T20:53:59.399547
| 2018-12-17T08:41:49
| 2018-12-17T08:41:49
| 161,281,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
# coding=utf8
"""
_author: wcf
_date: 2018/12/11-下午6:32
_desc: //ToDo
"""
|
[
"wuchaofan@soyoung.com"
] |
wuchaofan@soyoung.com
|
bc11e7852724f014cbf5e79d7002ac8fc179d61f
|
ea1d655f539dcf55d8b6cbe0e2178e553e1a6e8b
|
/oguz/examples/sendhtmlmail.py
|
e195ca9eecfd5aeaa0ea3093429c6b487675f2cf
|
[] |
no_license
|
COMU/android
|
9dc274f540f960087b794f8d1fd7e05e6f214d24
|
c260a2dab62257e7177251f2b2fcf95692b92ffa
|
refs/heads/master
| 2021-01-01T19:45:59.662328
| 2011-10-11T17:22:12
| 2011-10-11T17:22:12
| 3,224,570
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,955
|
py
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
def createhtmlmail (html, text, subject, fromEmail):
"""Create a mime-message that will render HTML in popular
MUAs, text in better ones"""
import MimeWriter
import mimetools
import cStringIO
out = cStringIO.StringIO() # output buffer for our message
htmlin = cStringIO.StringIO(html)
txtin = cStringIO.StringIO(text)
writer = MimeWriter.MimeWriter(out)
#
# set up some basic headers... we put subject here
# because smtplib.sendmail expects it to be in the
# message body
#
writer.addheader("From", fromEmail)
writer.addheader("Subject", subject)
writer.addheader("MIME-Version", "1.0")
#
# start the multipart section of the message
# multipart/alternative seems to work better
# on some MUAs than multipart/mixed
#
writer.startmultipartbody("alternative")
writer.flushheaders()
#
# the plain text section
#
subpart = writer.nextpart()
subpart.addheader("Content-Transfer-Encoding", "quoted-printable")
pout = subpart.startbody("text/plain", [("charset", 'utf-8')])
mimetools.encode(txtin, pout, 'quoted-printable')
txtin.close()
#
# start the html subpart of the message
#
subpart = writer.nextpart()
subpart.addheader("Content-Transfer-Encoding", "quoted-printable")
#
# returns us a file-ish object we can write to
#
pout = subpart.startbody("text/html", [("charset", 'utf-8')])
mimetools.encode(htmlin, pout, 'quoted-printable')
htmlin.close()
#
# Now that we're done, close our writer and
# return the message body
#
writer.lastpart()
msg = out.getvalue()
out.close()
#print msg
return msg
def mailsend(user, link, to):
import smtplib
# Create the body of the message (a plain-text and an HTML version).
link = "http://ldap.comu.edu.tr/akademik/bilgi/?id=" + link
text = u"Sayın " + user + "," + "\r\n\r\n"
text += u"Üniversitemizde kullanılmaya başlanan kablosuz ağın Eduroam'a geçişinde sizin için bir kullanıcı adı ve parola oluşturulmuştur.\r\n"
text += u"Bu geçiş ile tek kullanıcı adı ve parola ile sunulan servislere erişim hedeflenmektedir.\r\n"
text += u"Lütfen aşağıdaki linki tarayıcınızda açarak kaydınızı tamamlayınız!" + "\r\n"
text += link + "\r\n\r\n"
text += u"Eduroam, Education Roaming (Eğitim Gezintisi) kelimelerinin kısaltmasıdır.\r\n"
text += u"RADIUS tabanlı altyapı üzerinden 802.1x güvenlik standartlarını kullanarak, eduroam üyesi kurumların kullanıcılarının diğer eğitim kurumlarında da sorunsuzca ağ kullanımını amaçlamaktadır.\r\n"
text += u"Daha fazla bilgiye http://bidb.comu.edu.tr/eduroam/eduroam.htm veya http://www.eduroam.org/ web adreslerinden ulaşabilirsiniz.\r\n"
text += "\r\n\r\n"
text += u"Çanakkale Onsekiz Mart Üniversitesi\r\n\
Bilgi İşlem Dairesi Başkanlığı\r\n\
Tel : +90 286 218 00 18\r\n\
Tel - Fax : +90 286 218 05 18\r\n"
text = text.encode("utf-8")
html = u"""\
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
<p>Sayın, """ + user + u"""<br> \
Üniversitemiz kullanılmaya başlanan kablosuz ağın Eduroam'a geçişinde sizin için bir kullanıcı adı ve parola oluşturulmuştur. <br>
Bu geçiş ile tek kullanıcı adı ve parola ile sunulan servislere erişim hedeflenmektedir. <br>
Lütfen aşağıdaki linki tarayıcınızda açarak kaydınızı tamamlayınız!
<a href=\"""" + link + u"""\">link</a>
</p>
<p>
Eduroam, Education Roaming (Eğitim Gezintisi) kelimelerinin kısaltmasıdır.
RADIUS tabanlı altyapı üzerinden 802.1x güvenlik standartlarını
kullanarak, eduroam üyesi kurumların kullanıcılarının diğer eğitim
kurumlarında da sorunsuzca ağ kullanımını amaçlamaktadır.
Daha fazla bilgiye http://bidb.comu.edu.tr/eduroam/eduroam.htm veya
http://www.eduroam.org/ web adreslerinden ulaşabilirsiniz.
</p>
<br>
<p>
Çanakkale Onsekiz Mart Üniversitesi<br>
Bilgi İşlem Dairesi Başkanlığı<br>
Tel : +90 286 218 00 18<br>
Tel - Fax : +90 286 218 05 18<br>
</p
</body>
</html>
"""
html = html.encode("utf-8")
subject = "Eduroam Bilgi Giris Form Linki"
message = createhtmlmail(html, text, subject, 'Bilgi Islem Dairesi Baskanligi <yardim@comu.edu.tr>')
server = smtplib.SMTP('mail.comu.edu.tr', 587)
#server.set_debuglevel(1)
server.starttls()
server.login('yardim@comu.edu.tr', 'artemis@nso')
rtr_code = server.verify(to)
server.sendmail('test@comu.edu.tr', to, message)
server.quit()
#print rtr_code
return rtr_code[0]
if __name__ == "__main__":
mailsend("OGUZ YARIMTEPE", "http://www.google.com", "oguzy@comu.edu.tr")
|
[
"oguzyarimtepe@gmail.com"
] |
oguzyarimtepe@gmail.com
|
336f92367959770043fa467c424ddefc1eca152b
|
1a86d8329ae3b863ca68f0f53f79f87e52ead6f9
|
/conf.py
|
4c11e7c806f35c50927e784c6d1b57e3bd4fd0a7
|
[] |
no_license
|
adamrpah/npcompleteheart
|
a1d3d1494e7e322ac4515095f9a65fd58dc783b4
|
cc9a54956a081e9a6ccad738a4709512ad257348
|
refs/heads/master
| 2021-01-19T14:34:47.767996
| 2020-09-20T21:56:47
| 2020-09-20T21:56:47
| 34,268,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55,171
|
py
|
# -*- coding: utf-8 -*-
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "Adam Pah" # (translatable)
BLOG_TITLE = "NP Complete Heart" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link. Don't forget the protocol (http/https)!
SITE_URL = "http://www.npcompleteheart.com/"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "https://example.com/"
BLOG_EMAIL = "adamrpah@gmail.com"
BLOG_DESCRIPTION = "The musings, thoughts, and technical notes of Adam Pah. But really, probably just an out of date Vitae." # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# af Afrikaans
# ar Arabic
# az Azerbaijani
# bg Bulgarian
# bs Bosnian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# fur Friulian
# gl Galician
# he Hebrew
# hi Hindi
# hr Croatian
# hu Hungarian
# ia Interlingua
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# lt Lithuanian
# ml Malayalam
# nb Norwegian (Bokmål)
# nl Dutch
# pa Punjabi
# pl Polish
# pt Portuguese
# pt_br Portuguese (Brazil)
# ru Russian
# sk Slovak
# sl Slovene
# sq Albanian
# sr Serbian (Cyrillic)
# sr_latin Serbian (Latin)
# sv Swedish
# te Telugu
# th Thai
# tr Turkish [NOT tr_TR]
# uk Ukrainian
# ur Urdu
# vi Vietnamese
# zh_cn Chinese (Simplified)
# zh_tw Chinese (Traditional)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('https://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('https://apple.com/', 'Apple'),
# ('https://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 4 theme,
# may present issues if the menu is too large.
# (in Bootstrap, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be highlighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/pages/about/index.html", "<strong>About</strong>"),
("/pdf/curriculumvitae.pdf", "<strong>Curriculum Vitae</strong>"),
("/pages/research/index.html", "<strong>Research</strong>"),
("/pages/publications/publications/index.html", "<strong>Publications</strong>"),
("/pages/socialdna/index.html", "<strong>SocialDNA</strong>"),
),
}
# Alternative navigation links. Works the same way NAVIGATION_LINKS does,
# although themes may not always support them. (translatable)
# (Bootstrap 4: right-side of navbar, Bootblog 4: right side of title)
NAVIGATION_ALT_LINKS = {
DEFAULT_LANG: ()
}
# Name of the theme to use.
THEME = "bootstrap3"
# Primary color of your theme. This will be used to customize your theme.
# Must be a HEX value.
THEME_COLOR = '#000000'
# Theme configuration. Fully theme-dependent. (translatable)
# Examples below are for bootblog4.
# bootblog4 supports: featured_large featured_small featured_on_mobile
# featured_large_image_on_mobile featured_strip_html sidebar
# bootstrap4 supports: navbar_light (defaults to False)
THEME_CONFIG = {
DEFAULT_LANG: {
# Show the latest featured post in a large box, with the previewimage as its background.
'featured_large': False,
# Show the first (remaining) two featured posts in small boxes.
'featured_small': False,
# Show featured posts on mobile.
'featured_on_mobile': False,
# Show image in `featured_large` on mobile.
# `featured_small` displays them only on desktop.
'featured_large_image_on_mobile': False,
# Strip HTML from featured post text.
'featured_strip_html': False,
# Contents of the sidebar, If empty, the sidebar is not displayed.
'sidebar': ''
}
}
# POSTS and PAGES contains (wildcard, destination, template) tuples.
# (translatable)
#
# The wildcard is used to generate a list of source files
# (whatever/thing.rst, for example).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for Spanish, with code "es"):
# whatever/thing.es.rst and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output/TRANSLATIONS[lang]/destination/pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
# The page might also be placed in /destination/pagename/index.html
# if PRETTY_URLS are enabled.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds, indexes, tag lists and archives and are considered part
# of a blog, while PAGES are just independent HTML pages.
#
# Finally, note that destination can be translated, i.e. you can
# specify a different translation folder per language. Example:
# PAGES = (
# ("pages/*.rst", {"en": "pages", "de": "seiten"}, "page.tmpl"),
# ("pages/*.md", {"en": "pages", "de": "seiten"}, "page.tmpl"),
# )
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("pages/*.rst", "pages", "page.tmpl"),
("pages/*.md", "pages", "page.tmpl"),
("pages/*.txt", "pages", "page.tmpl"),
("pages/*.html", "pages", "page.tmpl"),
)
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "CDT"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates. (translatable)
# Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time
# You can also use 'full', 'long', 'medium', or 'short'
# DATE_FORMAT = 'yyyy-MM-dd HH:mm'
# Date format used to display post dates, if local dates are used. (translatable)
# Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting
# Example for presets: {'preset': True, 'format': 'DATE_FULL'}
# LUXON_DATE_FORMAT = {
# DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'},
# }
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE (without JS)
# 1 = using LUXON_DATE_FORMAT and local user time (JS, using Luxon)
# 2 = using a string like “2 days ago” (JS, using Luxon)
#
# Your theme must support it, Bootstrap already does.
# DATE_FANCINESS = 0
# Customize the locale/region used for a language.
# For example, to use British instead of US English: LOCALES = {'en': 'en_GB'}
# LOCALES = {}
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing code listings to be processed and published on
# the site. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# The default compiler for `new_post` is the first entry in the POSTS tuple.
#
# 'rest' is reStructuredText
# 'markdown' is Markdown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ['.rst', '.txt'],
"markdown": ['.md', '.mdown', '.markdown'],
"textile": ['.textile'],
"txt2tags": ['.t2t'],
"bbcode": ['.bb'],
"wiki": ['.wiki'],
"ipynb": ['.ipynb'],
"html": ['.html', '.htm'],
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ['.php'],
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ['.rst', '.md', '.txt'],
}
# Enable reST directives that insert the contents of external files such
# as "include" and "raw." This maps directly to the docutils file_insertion_enabled
# config. See: http://docutils.sourceforge.net/docs/user/config.html#file-insertion-enabled
# REST_FILE_INSERTION_ENABLED = True
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# Preferred metadata format for new posts
# "Nikola": reST comments, wrapped in a HTML comment if needed (default)
# "YAML": YAML wrapped in "---"
# "TOML": TOML wrapped in "+++"
# "Pelican": Native markdown metadata or reST docinfo fields. Nikola style for other formats.
# METADATA_FORMAT = "Nikola"
# Use date-based path when creating posts?
# Can be enabled on a per-post basis with `nikola new_post -d`.
# The setting is ignored when creating pages.
# NEW_POST_DATE_PATH = False
# What format to use when creating posts with date paths?
# Default is '%Y/%m/%d', other possibilities include '%Y' or '%Y/%m'.
# NEW_POST_DATE_PATH_FORMAT = '%Y/%m/%d'
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# When linking posts to social media, Nikola provides Open Graph metadata
# which is used to show a nice preview. This includes an image preview
# taken from the post's previewimage metadata field.
# This option lets you use an image to be used if the post doesn't have it.
# The default is None, valid values are URLs or output paths like
# "/images/foo.jpg"
# DEFAULT_PREVIEW_IMAGE = None
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag RSS_EXTENSION (RSS feed for a tag)
# (translatable)
# TAG_PATH = "categories"
# By default, the list of tags is stored in
# output / TRANSLATION[lang] / TAG_PATH / index.html
# (see explanation for TAG_PATH). This location can be changed to
# output / TRANSLATION[lang] / TAGS_INDEX_PATH
# with an arbitrary relative path TAGS_INDEX_PATH.
# (translatable)
# TAGS_INDEX_PATH = "tags.html"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for tag pages. The default is "Posts about TAG".
# TAG_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ['mathjax']
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# A list of dictionaries specifying tags which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# For example:
# [
# {'en': 'private', 'de': 'Privat'},
# {'en': 'work', 'fr': 'travail', 'de': 'Arbeit'},
# ]
# TAG_TRANSLATIONS = []
# If set to True, a tag in a language will be treated as a translation
# of the literally same tag in all other languages. Enable this if you
# do not translate tags, for example.
# TAG_TRANSLATIONS_ADD_DEFAULTS = True
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category RSS_EXTENSION (RSS feed for a category)
# (translatable)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# By default, the list of categories is stored in
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html
# (see explanation for CATEGORY_PATH). This location can be changed to
# output / TRANSLATION[lang] / CATEGORIES_INDEX_PATH
# with an arbitrary relative path CATEGORIES_INDEX_PATH.
# (translatable)
# CATEGORIES_INDEX_PATH = "categories.html"
# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in
# hierarchies. For a post, the whole path in the hierarchy must be specified,
# using a forward slash ('/') to separate paths. Use a backslash ('\') to escape
# a forward slash or a backslash (i.e. '\//\\' is a path specifying the
# subcategory called '\' of the top-level category called '/').
CATEGORY_ALLOW_HIERARCHIES = False
# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output
# contains only the name of the leaf category and not the whole path.
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for category pages. The default is "Posts about CATEGORY".
# CATEGORY_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
HIDDEN_CATEGORIES = []
# A list of dictionaries specifying categories which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# See TAG_TRANSLATIONS example above.
# CATEGORY_TRANSLATIONS = []
# If set to True, a category in a language will be treated as a translation
# of the literally same category in all other languages. Enable this if you
# do not translate categories, for example.
# CATEGORY_TRANSLATIONS_ADD_DEFAULTS = True
# If no category is specified in a post, the destination path of the post
# can be used in its place. This replaces the sections feature. Using
# category hierarchies is recommended.
# CATEGORY_DESTPATH_AS_DEFAULT = False
# If True, the prefix will be trimmed from the category name, eg. if the
# POSTS destination is "foo/bar", and the path is "foo/bar/baz/quux",
# the category will be "baz/quux" (or "baz" if only the first directory is considered).
# Note that prefixes coming from translations are always ignored.
# CATEGORY_DESTPATH_TRIM_PREFIX = False
# If True, only the first directory of a path will be used.
# CATEGORY_DESTPATH_FIRST_DIRECTORY_ONLY = True
# Map paths to prettier category names. (translatable)
# CATEGORY_DESTPATH_NAMES = {
# DEFAULT_LANG: {
# 'webdev': 'Web Development',
# 'webdev/django': 'Web Development/Django',
# 'random': 'Odds and Ends',
# },
# }
# By default, category indexes will appear in CATEGORY_PATH and use
# CATEGORY_PREFIX. If this is enabled, those settings will be ignored (except
# for the index) and instead, they will follow destination paths (eg. category
# 'foo' might appear in 'posts/foo'). If the category does not come from a
# destpath, first entry in POSTS followed by the category name will be used.
# For this setting, category hierarchies are required and cannot be flattened.
# CATEGORY_PAGES_FOLLOW_DESTPATH = False
# If ENABLE_AUTHOR_PAGES is set to True and there is more than one
# author, author pages are generated.
# ENABLE_AUTHOR_PAGES = True
# Path to author pages. Final locations are:
# output / TRANSLATION[lang] / AUTHOR_PATH / index.html (list of authors)
# output / TRANSLATION[lang] / AUTHOR_PATH / author.html (list of posts by an author)
# output / TRANSLATION[lang] / AUTHOR_PATH / author RSS_EXTENSION (RSS feed for an author)
# (translatable)
# AUTHOR_PATH = "authors"
# If AUTHOR_PAGES_ARE_INDEXES is set to True, each author's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# AUTHOR_PAGES_ARE_INDEXES = False
# Set descriptions for author pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the author list or index page’s title.
# AUTHOR_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "Juanjo Conti": "Python coder and writer.",
# "Roberto Alsina": "Nikola father."
# },
# }
# If you do not want to display an author publicly, you can mark it as hidden.
# The author will not be displayed on the author list page and posts.
# Tag pages will still be generated.
HIDDEN_AUTHORS = ['Guest']
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# (translatable)
# INDEX_PATH = ""
# Optional HTML that displayed on “main” blog index.html files.
# May be used for a greeting. (translatable)
FRONT_INDEX_HEADER = {
DEFAULT_LANG: ''
}
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Create previous, up, next navigation links for archives
# CREATE_ARCHIVE_NAVIGATION = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# (translatable)
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Extension for RSS feed files
# RSS_EXTENSION = ".xml"
# RSS filename base (without extension); used for indexes and galleries.
# (translatable)
# RSS_FILENAME_BASE = "rss"
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / RSS_FILENAME_BASE RSS_EXTENSION
# (translatable)
# RSS_PATH = ""
# Final location for the blog main Atom feed is:
# output / TRANSLATION[lang] / ATOM_PATH / ATOM_FILENAME_BASE ATOM_EXTENSION
# (translatable)
# ATOM_PATH = ""
# Atom filename base (without extension); used for indexes.
# (translatable)
ATOM_FILENAME_BASE = "feed"
# Extension for Atom feed files
# ATOM_EXTENSION = ".atom"
# Slug the Tag URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# Slug the Author URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_AUTHOR_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ joe@my.site:/srv/www/site",
# ]
# }
DEPLOY_COMMANDS = {
'default': [
"cd output; nikola build; rm */*/*.md; rm */*/*/*.md; git add --all; git commit -m 'standard commit'; git push origin master",
]
}
# github_deploy configuration
# For more details, read the manual:
# https://getnikola.com/handbook.html#deploying-to-github
# You will need to configure the deployment branch on GitHub.
GITHUB_SOURCE_BRANCH = 'master'
GITHUB_DEPLOY_BRANCH = 'gh-pages'
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = 'origin'
OUTPUT_FOLDER = 'output'
# Whether or not github_deploy should commit to the source branch automatically
# before deploying.
GITHUB_COMMIT_SOURCE = True
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <https://getnikola.com/handbook.html#post-processing-filters>
#
# from nikola import filters
# FILTERS = {
# ".html": [filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Executable for the "yui_compressor" filter (defaults to 'yui-compressor').
# YUI_COMPRESSOR_EXECUTABLE = 'yui-compressor'
# Executable for the "closure_compiler" filter (defaults to 'closure-compiler').
# CLOSURE_COMPILER_EXECUTABLE = 'closure-compiler'
# Executable for the "optipng" filter (defaults to 'optipng').
# OPTIPNG_EXECUTABLE = 'optipng'
# Executable for the "jpegoptim" filter (defaults to 'jpegoptim').
# JPEGOPTIM_EXECUTABLE = 'jpegoptim'
# Executable for the "html_tidy_withconfig", "html_tidy_nowrap",
# "html_tidy_wrap", "html_tidy_wrap_attr" and "html_tidy_mini" filters
# (defaults to 'tidy5').
# HTML_TIDY_EXECUTABLE = 'tidy5'
# List of XPath expressions which should be used for finding headers
# ({hx} is replaced by headers h1 through h6).
# You must change this if you use a custom theme that does not use
# "e-content entry-content" as a class for post and page contents.
# HEADER_PERMALINKS_XPATH_LIST = ['*//div[@class="e-content entry-content"]//{hx}']
# Include *every* header (not recommended):
# HEADER_PERMALINKS_XPATH_LIST = ['*//{hx}']
# File blacklist for header permalinks. Contains output path
# (eg. 'output/index.html')
# HEADER_PERMALINKS_FILE_BLACKLIST = []
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.atom', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
# Use a thumbnail (defined by ".. previewimage:" in the gallery's index) in
# list of galleries for each gallery
GALLERIES_USE_THUMBNAIL = False
# Image to use as thumbnail for those galleries that don't have one
# None: show a grey square
# '/url/to/file': show the image in that url
GALLERIES_DEFAULT_THUMBNAIL = None
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
# If set to True, EXIF data will be copied when an image is thumbnailed or
# resized. (See also EXIF_WHITELIST)
# PRESERVE_EXIF_DATA = False
# If you have enabled PRESERVE_EXIF_DATA, this option lets you choose EXIF
# fields you want to keep in images. (See also PRESERVE_EXIF_DATA)
#
# For a full list of field names, please see here:
# http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf
#
# This is a dictionary of lists. Each key in the dictionary is the
# name of a IDF, and each list item is a field you want to preserve.
# If you have a IDF with only a '*' item, *EVERY* item in it will be
# preserved. If you don't want to preserve anything in a IDF, remove it
# from the setting. By default, no EXIF information is kept.
# Setting the whitelist to anything other than {} implies
# PRESERVE_EXIF_DATA is set to True
# To preserve ALL EXIF data, set EXIF_WHITELIST to {"*": "*"}
# EXIF_WHITELIST = {}
# Some examples of EXIF_WHITELIST settings:
# Basic image information:
# EXIF_WHITELIST['0th'] = [
# "Orientation",
# "XResolution",
# "YResolution",
# ]
# If you want to keep GPS data in the images:
# EXIF_WHITELIST['GPS'] = ["*"]
# Embedded thumbnail information:
# EXIF_WHITELIST['1st'] = ["*"]
# If set to True, any ICC profile will be copied when an image is thumbnailed or
# resized.
# PRESERVE_ICC_PROFILES = False
# Folders containing images to be used in normal posts or pages.
# IMAGE_FOLDERS is a dictionary of the form {"source": "destination"},
# where "source" is the folder containing the images to be published, and
# "destination" is the folder under OUTPUT_PATH containing the images copied
# to the site. Thumbnail images will be created there as well.
# To reference the images in your posts, include a leading slash in the path.
# For example, if IMAGE_FOLDERS = {'images': 'images'}, write
#
# .. image:: /images/tesla.jpg
#
# See the Nikola Handbook for details (in the “Embedding Images” and
# “Thumbnails” sections)
# Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE
# options, but will have to be referenced manually to be visible on the site
# (the thumbnail has ``.thumbnail`` added before the file extension by default,
# but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT).
IMAGE_FOLDERS = {'images': 'images'}
# IMAGE_THUMBNAIL_SIZE = 400
# IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}'
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
# INDEXES_PAGES_MAIN = False
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# prettier URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
#
# If the following is true, a page range navigation will be inserted to indices.
# Please note that this will undo the effect of INDEXES_STATIC, as all index pages
# must be recreated whenever the number of pages changes.
# SHOW_INDEX_PAGE_NAVIGATION = False
# If the following is True, a meta name="generator" tag is added to pages. The
# generator tag is used to specify the software used to generate the page
# (it promotes Nikola).
# META_GENERATOR_TAG = True
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored. Set to None to disable.
# Can be any of:
# algol, algol_nu, autumn, borland, bw, colorful, default, emacs, friendly,
# fruity, igor, lovelace, manni, monokai, murphy, native, paraiso-dark,
# paraiso-light, pastie, perldoc, rrt, tango, trac, vim, vs, xcode
# This list MAY be incomplete since pygments adds styles every now and then.
# Check with list(pygments.styles.get_all_styles()) in an interpreter.
#
# CODE_COLOR_SCHEME = 'default'
# FAVICONS contains (name, file, size) tuples.
# Used to create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = (
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# )
# Show teasers (instead of full posts) in indexes? Defaults to False.
INDEX_TEASERS = True
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {post_title} The title of the post.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)
FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced
# option used for traffic source tracking.
# Minimum example for use with Piwik: "pk_campaign=feed"
# The following tags exist and are replaced for you:
# {feedRelUri} A relative link to the feed.
# {feedFormat} The name of the syndication format.
# Example using replacement for use with Google Analytics:
# "utm_source={feedRelUri}&utm_medium=nikola_feed&utm_campaign={feedFormat}_feed"
FEED_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# https://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# If you need to use the literal braces '{' and '}' in your footer text, use
# '{{' and '}}' to escape them (str.format is used)
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# A simple copyright tag for inclusion in RSS feeds that works just
# like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS
RSS_COPYRIGHT = 'Contents © {date} <a href="mailto:{email}">{author}</a> {license}'
RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}'
RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, intensedebate, isso, muut, commento
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = ""
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = ""
# Create index.html for page folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the PAGE_INDEX
# will not be generated for that directory.
# PAGE_INDEX = False
# Enable comments on pages (i.e. not posts)?
# COMMENTS_IN_PAGES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
STRIP_INDEXES = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in <slug>/index.html.
# No web server configuration is required. Also enables STRIP_INDEXES.
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata.
PRETTY_URLS = True
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts (not pages!) by default
# SCHEDULE_ALL = False
# Do you want to add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you want support for the $.$ syntax (which may conflict with running
# text!), just use this config:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'center', // Change this to 'left' if you want left-aligned equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Want to use KaTeX instead of MathJax? While KaTeX may not support every
# feature yet, it's faster and the output looks better.
# USE_KATEX = False
# KaTeX auto-render settings. If you want support for the $.$ syntax (which may
# conflict with running text!), just use this config:
# KATEX_AUTO_RENDER = """
# delimiters: [
# {left: "$$", right: "$$", display: true},
# {left: "\\\\[", right: "\\\\]", display: true},
# {left: "\\\\begin{equation*}", right: "\\\\end{equation*}", display: true},
# {left: "$", right: "$", display: false},
# {left: "\\\\(", right: "\\\\)", display: false}
# ]
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter': {'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# Defaults are markdown.extensions.(fenced_code|codehilite|extra)
# markdown.extensions.meta is required for Markdown metadata.
MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra']
# Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/)
# Default is {} (no config at all)
# MARKDOWN_EXTENSION_CONFIGS = {}
# Extra options to pass to the pandoc command.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# Pandoc does not demote headers by default. To enable this, you can use, for example
# ['--base-header-level=2']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty (which is
# the default right now)
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
SHOW_SOURCELINK = False
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
COPY_SOURCES = False
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# By default, Nikola does not generates Atom files for indexes and links to
# them. Generate Atom for tags by setting TAG_PAGES_ARE_INDEXES to True.
# Atom feeds are built based on INDEX_DISPLAY_POST_COUNT and not FEED_LENGTH
# Switch between plain-text summaries and full HTML content using the
# FEED_TEASER option. FEED_LINKS_APPEND_QUERY is also respected. Atom feeds
# are generated even for old indexes and have pagination link relations
# between each other. Old Atom feeds with no changes are marked as archived.
# GENERATE_ATOM = False
# Only include teasers in Atom and RSS feeds. Disabling include the full
# content. Defaults to True.
# FEED_TEASERS = True
# Strip HTML from Atom and RSS feed summaries and content. Defaults to False.
# FEED_PLAIN = False
# Number of posts in Atom and RSS feeds.
# FEED_LENGTH = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a FeedBurner feed or something else.
# RSS_LINK = None
# A search form to search this site, for the sidebar. You can use a Google
# custom search (https://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- DuckDuckGo custom search -->
# <form method="get" id="search" action="https://duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s">
# <input type="hidden" name="k8" value="#444444">
# <input type="hidden" name="k9" value="#D51920">
# <input type="hidden" name="kt" value="h">
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;">
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Google custom search -->
# <form method="get" action="https://www.google.com/search" class="navbar-form navbar-right" role="search">
# <div class="form-group">
# <input type="text" name="q" class="form-control" placeholder="Search">
# </div>
# <button type="submit" class="btn btn-primary">
# <span class="glyphicon glyphicon-search"></span>
# </button>
# <input type="hidden" name="sitesearch" value="%s">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
# Use content distribution networks for jQuery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jQuery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '.*\/(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.rst'
# (Note the '.*\/' in the beginning -- matches source paths relative to conf.py)
# FILE_METADATA_REGEXP = None
# Should titles fetched from file metadata be unslugified (made prettier?)
# FILE_METADATA_UNSLUGIFY_TITLES = True
# If enabled, extract metadata from docinfo fields in reST documents.
# If your text files start with a level 1 heading, it will be treated as the
# document title and will be removed from the text.
# USE_REST_DOCINFO_METADATA = False
# If enabled, hide docinfo fields in reST document output
# HIDE_REST_DOCINFO = False
# Map metadata from other formats to Nikola names.
# Supported formats: yaml, toml, rest_docinfo, markdown_metadata
# METADATA_MAPPING = {}
#
# Example for Pelican compatibility:
# METADATA_MAPPING = {
# "rest_docinfo": {"summary": "description", "modified": "updated"},
# "markdown_metadata": {"summary": "description", "modified": "updated"}
# }
# Other examples: https://getnikola.com/handbook.html#mapping-metadata-from-other-formats
# Map metadata between types/values. (Runs after METADATA_MAPPING.)
# Supported formats: nikola, yaml, toml, rest_docinfo, markdown_metadata
# The value on the right should be a dict of callables.
# METADATA_VALUE_MAPPING = {}
# Examples:
# METADATA_VALUE_MAPPING = {
# "yaml": {"keywords": lambda value: ', '.join(value)}, # yaml: 'keywords' list -> str
# "nikola": {
# "widgets": lambda value: value.split(', '), # nikola: 'widgets' comma-separated string -> list
# "tags": str.lower # nikola: force lowercase 'tags' (input would be string)
# }
# }
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# Bundle JS and CSS into single files to make site loading faster in a HTTP/1.1
# environment but is not recommended for HTTP/2.0 when caching is used.
# Defaults to True.
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Special settings to disable only parts of the indexes plugin.
# Use with care.
# DISABLE_INDEXES = False
# DISABLE_MAIN_ATOM_FEED = False
# DISABLE_MAIN_RSS_FEED = False
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Add the absolute paths to directories containing themes to use them.
# For example, the `v7` directory of your clone of the Nikola themes
# repository.
# EXTRA_THEMES_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# Enabling hyphenation has been shown to break math support in some cases,
# use with caution.
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# If set to True, the tags 'draft', 'mathjax' and 'private' have special
# meaning. If set to False, these tags are handled like regular tags.
USE_TAG_METADATA = False
# If set to True, a warning is issued if one of the 'draft', 'mathjax'
# and 'private' tags are found in a post. Useful for checking that
# migration was successful.
WARN_ABOUT_TAG_METADATA = False
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
# Add any post types here that you want to be displayed without a title.
# Ir your theme supports it, the titles will not be shown.
TYPES_TO_HIDE_TITLE = []
|
[
"adamrpah@gmail.com"
] |
adamrpah@gmail.com
|
bfeffd867ec839b6d4c31f22392bd1fe0e3ba636
|
3b571a72cfd71bd1085b85f25f8613c5bed09883
|
/music/pipelines.py
|
27c8a98c62ea3d80f92a311fe558893ba49e14fa
|
[] |
no_license
|
IsaacTay/Music-Crawler-Thing
|
cd8356ecc5d73f6ca6a7d778700ae8d0b238cc7d
|
0ead8d8af6306c16926cfb21188cc34ef7951261
|
refs/heads/master
| 2020-03-31T22:50:23.131942
| 2018-10-22T04:51:00
| 2018-10-22T04:51:00
| 152,633,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import hashlib
import json
class MusicPipeline(object):
def __init__(self):
self.albums = {}
def process_item(self, item, spider):
hash_string = item["title"].lower()+item["main_artist"].lower()#hashlib.sha256(item["title"].encode() + item["artist_string"].encode()).hexdigest()
if self.albums.get(hash_string) is not None:
if item.get("position") is not None:
if self.albums[hash_string]["position"] < item["position"]:
self.albums[hash_string]["position"] = item["position"]
else:
for k in item:
self.albums[hash_string][k] = item[k]
else:
for k in item:
self.albums[hash_string][k] = item[k]
else:
self.albums[hash_string] = item
return item
def close_spider(self, spider):
print(len(list(self.albums.values())))
with open("billboard.json", "w") as f:
f.write(json.dumps(list(self.albums.values())))
pass
|
[
"IsaacTay@programmer.net"
] |
IsaacTay@programmer.net
|
e953460e42f7c6af06c7b08ae2ad739a665a57a5
|
1721904383464c44eea23455fe0a8f9e78894087
|
/copysite/main_app/views.py
|
04dafecef156d24a206770858859bb022e8a7680
|
[] |
no_license
|
cmz3ro/new_rep
|
da1952ac84be281cc228be109fe6b8b108eb9296
|
4702f15a3b05b43b9eb1475741d6c313344a11b4
|
refs/heads/master
| 2020-08-06T04:11:39.496736
| 2019-10-04T14:08:01
| 2019-10-04T14:08:01
| 212,828,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
from django.shortcuts import render
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def main_app1(request):
return render(request, 'index.html')
def net_gh(request):
return render(request, 'new.html')
def treni(request):
return render(request, 'treni.html')
def rapid(request):
return render(request, 'rapidshot.html')
def tumba(request):
return render(request, 'tumba.html')
def dorojka(request):
return render(request, 'dorijka.html')
def vorota(request):
return render(request, 'vorota.html')
def sopernik(request):
return render(request, 'sopernik.html')
def duga(request):
return render(request, 'duga.html')
|
[
"mr.madzero@gmail.com"
] |
mr.madzero@gmail.com
|
3f3e5da40772c8618d50f6231921c209c3688c0c
|
443610f8a32c2e5a9b1022e99830d3ebd451dae2
|
/finance/application.py
|
35d945bdc12a260ffabb75325be7af4dd968ded7
|
[] |
no_license
|
pr0grammers0/Web-sites
|
3efac0e7be479aa588910917328592fa83c2be4e
|
069e8423795ee31803d189f72632a82c64deb427
|
refs/heads/master
| 2020-12-23T00:28:46.347117
| 2020-01-29T12:25:55
| 2020-01-29T12:25:55
| 236,976,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,990
|
py
|
import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
cash = db.execute("SELECT cash FROM users WHERE id = :id", id=session["user_id"])
stocks = db.execute("SELECT stock, shares FROM history WHERE id = :id", id=session["user_id"])
dec = {}
dc = {}
lis = []
ls = []
total = cash[0]["cash"]
for raw in stocks:
if raw["stock"] in dec:
dec[raw["stock"]] += raw["shares"]
else:
dec[raw["stock"]] = raw["shares"]
for raw in stocks:
if raw["stock"] not in ls:
ls.append(raw["stock"])
name = lookup(raw["stock"])
dc["name"] = name["name"]
dc["price"] = name["price"]
dc["symbol"] = raw["stock"]
dc["shares"] = dec[raw["stock"]]
if dc["shares"] == 0:
dc = {}
continue
dc["total"] = dc["price"] * dc["shares"]
lis.append(dc)
total += dc["total"]
dc = {}
return render_template("index.html", cash=cash[0]["cash"], lis=lis, total=total)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "POST":
if not request.form.get("symbol") or not request.form.get("shares"):
return apology("blank field", 400)
elif int(request.form.get("shares")) < 1:
return apology("enter a positive nubmer", 400)
symbol = lookup(request.form.get("symbol"))
if not symbol:
return apology("invalid symbol", 400)
cash = db.execute("SELECT * FROM users WHERE id = :id", id=session["user_id"])
if symbol["price"] * int(request.form.get("shares")) > cash[0]["cash"]:
return apology("u don't have enough cash", 400)
db.execute("UPDATE users SET cash = cash - :symbol WHERE id = :id",
symbol=symbol["price"] * int(request.form.get("shares")), id=session["user_id"])
db.execute("INSERT INTO history (id, stock, shares, price) VALUES(:id, :stock, :shares, :price)",
id=session["user_id"], stock=request.form.get("symbol").upper(),
shares=int(request.form.get("shares")), price=symbol["price"])
return redirect("/")
else:
return render_template("buy.html")
@app.route("/check", methods=["GET"])
def check():
"""Return true if username available, else false, in JSON format"""
name = db.execute("SELECT username FROM users")
for i in name:
if i["username"] == request.args.get("username"):
return jsonify(False)
return jsonify(True)
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
history = db.execute("SELECT * FROM history WHERE id = :id", id=session["user_id"])
return render_template("history.html", history=history)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method == "POST":
stock = lookup(request.form.get("symbol"))
if not stock:
return apology("invalid stock symbol", 400)
return render_template("stock.html", stock=stock)
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
if request.method == "POST":
if not request.form.get("username"):
return apology("must provide username", 400)
elif not request.form.get("password"):
return apology("must provide password", 400)
elif not request.form.get("confirmation"):
return apology("must confirm password", 400)
elif request.form.get("confirmation") != request.form.get("password"):
return apology("password don't match", 400)
hash = generate_password_hash(request.form.get("password"))
result = db.execute("INSERT INTO users (username, hash) VALUES(:username, :hash)",
username=request.form.get("username"),
hash=hash)
if not result:
return apology("username already exist", 400)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
stocks = db.execute("SELECT * FROM history WHERE id = :id", id=session["user_id"])
if request.method == "POST":
if not request.form.get("symbol") or not request.form.get("shares"):
return apology("please input things", 403)
elif int(request.form.get("shares")) < 1:
return apology("please enter positive number")
symbol = lookup(request.form.get("symbol"))
shares = db.execute("SELECT shares FROM history WHERE id= :id AND stock = :stock",
id=session["user_id"], stock=symbol["symbol"])
i = 0
for share in shares:
i += share["shares"]
if int(request.form.get("shares")) > i:
return apology("u don't have enough shares")
db.execute("UPDATE users SET cash = cash + :symbol WHERE id = :id",
symbol=symbol["price"] * int(request.form.get("shares")), id=session["user_id"])
db.execute("INSERT INTO history (id, stock, shares, price) VALUES(:id, :stock, :shares, :price)",
id=session["user_id"], stock=request.form.get("symbol"),
shares=-int(request.form.get("shares")), price=symbol["price"])
return redirect("/")
else:
ls = []
for raw in stocks:
if raw["stock"] not in ls:
ls.append(raw["stock"])
return render_template("sell.html", ls=ls)
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
[
"noreply@github.com"
] |
pr0grammers0.noreply@github.com
|
6d7e01af8ef92a837eda42fa9617c824a1c7d238
|
d0eff281dea9c254b05d3858837d2f84b85c4967
|
/comp110/Lectures/Lecture17.py
|
52de7cb308f835542c40a236d7689db9f46377f7
|
[] |
no_license
|
chadsixt/USD
|
3f98587bdcc7ac18ccddebc2570623f38deeb1df
|
e08421988021a2e3efe524bc57e5e20421db8d9e
|
refs/heads/main
| 2023-08-26T15:08:09.059596
| 2021-04-03T20:20:48
| 2021-04-03T20:20:48
| 354,385,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
# # animals = open('animals.txt', 'w')
# # animals.
# # done = False
# # while not done:
# # a = input("Enter an animal or STOP to finish:")
# # if a == "STOP":
# # done = True
# # else:
# # animals.write(a + '\n')
# # Append-adds at last
# file1 = open("animals.txt", "a") # append mode
# file1.write("Today \n")
# file1.close()
# file1 = open("animals.txt", "r")
# print("Output of Readlines after appending")
# print(file1.read())
# print()
# file1.close()
# def foo(in_name, out_name, n) :
# print(type(in_name))
# in_file = open(in_name, 'r')
# out_file = open(out_name, 'w')
# i = 0
# count = 0
# line = in_file.readline()
# print(type(in_file))
# while ((i < n) and (line != "")):
# count +=1
# if int(line) < 0:
# out_file.write(line)
# i += 1
# line = in_file.readline()
# print(count)
# in_file.close()
# out_file.close()
# foo("nums.txt", "nums2.txt", 2)
def is_prime(n):
if n < 2:
return False
i = 2
while i < n:
if n % i == 0:
return False
i += 1
return True
def is_prime(n):
if n < 2:
return False
for i in range(2,n):
if n % i == 0:
return False
return True
def NumberOfPrimes(n):
i = 2
numOfPrimes = 0
total = 0
while total < n:
if is_prime(i):
total += i
print(i)
numOfPrimes += 1
i += 1
return numOfPrimes
print(NumberOfPrimes(100))
# def bar(n):
# i = 2
# j = 0
# sumOfPrimes = 0
# totalPrimes = 0
# while sumOfPrimes < n:
# if is_prime(i):
# sumOfPrimes += i
# totalPrimes += 1
# i += 1
# return j
|
[
"chadsixt@gmail.com"
] |
chadsixt@gmail.com
|
2f2ee6dfe338018229d53382f6b0a744a6bdc60d
|
1257783c225775e4c01754a4f6ded76ca7d0a080
|
/bin/easy_install-2.7
|
913c53facb098c3efa74176140b413a9cc15132f
|
[] |
no_license
|
Cosmin-Parvulescu/BLoto649
|
8da54d1772527ec93c63bd314aa49bfb80bdbacf
|
53008dd378d5642f05566aafd1ccd009e83dcc71
|
refs/heads/master
| 2021-03-12T23:34:54.518719
| 2013-08-16T09:23:01
| 2013-08-16T09:23:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
7
|
#!/home/jabber/Dev/ParseME/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.9.7','console_scripts','easy_install-2.7'
__requires__ = 'setuptools==0.9.7'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('setuptools==0.9.7', 'console_scripts', 'easy_install-2.7')()
)
|
[
"Cosmin.Parvulescu@gmail.com"
] |
Cosmin.Parvulescu@gmail.com
|
547782c1c7f1062f8ca8d9cf8a7763a41703c0d1
|
5fc72b3a6f3e180867d216335faee14626331bd5
|
/custom_dockerfile.py
|
1b8c9e93f347d23f6d02bc38f2ac1964cfc3e086
|
[] |
no_license
|
pombredanne/dummy-flows
|
14eaaba225e5e593fe3b32c2fe9b3c37b7a27329
|
a9e56d027f8327ccc698ead6bfec8d80ae925c69
|
refs/heads/master
| 2020-09-23T00:01:03.901673
| 2019-11-21T15:38:31
| 2019-11-21T15:38:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
from prefect import task, Flow
from prefect.environments.storage import Docker
@task
def add(x, y):
return x + y
with Flow(
"dockerfile-test",
storage=Docker(
prefect_version="fix-custom-dockerfiles", dockerfile="Dockerfiles/Dockerfile"
),
) as flow:
add(1, 2)
flow.deploy(project_name="Demo")
|
[
"josh@prefect.io"
] |
josh@prefect.io
|
25a6fdb6017333cd3bf73ebe7ad545cd251d6a9c
|
160c7e90ba06ec3b7a2c5fee07536341a15ab2a0
|
/command/common.py
|
67d45d07b932e4e1ef075ef6427ecdd272ebd656
|
[] |
no_license
|
nginer/WenbenSys
|
4296844e74a6c361072c1c500a61f1950699306c
|
135d4b173c56dac567029e629bc8c3b27864e1a5
|
refs/heads/master
| 2021-01-10T06:28:13.569627
| 2016-03-22T08:54:35
| 2016-03-22T08:54:35
| 54,448,522
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
__author__ = 'Steven'
import re
def start_end_str_split(start, end, base_str):
if base_str is None:
return None
if start is not None and end is not None:
if re.search(start, base_str) is not None and re.search(end, base_str) is None:
str_list = re.split(start, base_str)
return str_list[1]
if re.search(start, base_str) is None and re.search(end, base_str) is not None:
str_list = re.split(end, base_str)
return str_list[0]
if re.search(start, base_str) is not None and re.search(end, base_str) is not None:
str_list = re.split(start, base_str)
str_list = re.split(end, str_list[1])
return str_list[0]
return None
|
[
"1125082967@qq.com"
] |
1125082967@qq.com
|
0e06e88b79e2ed5e7dddf73795ab1db8b60f2920
|
dc7523a441e649c1f5e669dc3feb802e025c4fc8
|
/mysite/settings.py
|
e3e4e26d401f1e57397bd1210ccb7f0fbc4d9e04
|
[] |
no_license
|
amirGG138/djangolessons
|
0461e30a38be55bdbe4346bd0a6ad90afe3469a3
|
cfc27fa0fb2000b4b5d1424535a53df1d674fa1a
|
refs/heads/master
| 2022-11-13T03:56:43.690937
| 2020-07-06T16:07:08
| 2020-07-06T16:07:08
| 277,575,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,190
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v9%_-&l824n^-36m&0js4i=if=tr-n=v*8273+d#&$mw*t2g_b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','localhost', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"amir3265hdh@gmail.com"
] |
amir3265hdh@gmail.com
|
143ff1899e2587c560a214ed859ca4e54878ddad
|
7b13e6acb2a1f26936462ed795ee4508b4088042
|
/算法题目/算法题目/贪心/LeetCode435不重叠的区间个数.py
|
cacd337e8dadd87d2f66fcbdb7a499ddc00dc09d
|
[] |
no_license
|
guojia60180/algorithm
|
ed2b0fd63108f30cd596390e64ae659666d1c2c6
|
ea81ff2722c7c350be5e1f0cd6d4290d366f2988
|
refs/heads/master
| 2020-04-19T08:25:55.110548
| 2019-05-13T13:29:39
| 2019-05-13T13:29:39
| 168,076,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
#Author guo
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
def eraseOverlapIntervals(self, intervals):
#计算最多组成的不重叠区间个数,区间总数-不重叠的个数
#按区间结尾进行排序,每次选择结尾最小,且和前一个区间不重叠的区间
if not intervals:
return 0
intervals.sort(key=lambda x:x.start)#根据起始点把区间排序
curr,cnt=intervals[0].end,0#初始化
for x in intervals[1:]:
if x.start<curr:
cnt+=1
curr=min(curr,x.end)#每次更新最小的end值
else:
curr=x.end
return cnt
|
[
"44565715+guojia60180@users.noreply.github.com"
] |
44565715+guojia60180@users.noreply.github.com
|
9ecc63658ca30a6ca035c9ca8b1d9e0404aaeae2
|
59e874a27b798dd1898932c57c1e52331a9e1f46
|
/main/line_selector.py
|
abcf2d7fa931f8998279bd5f1cce976fdf3379d2
|
[] |
no_license
|
cm-hirano-shigetoshi/fzfyml3
|
d1b8673134abb51b5bed5ac599b02ec6ec8ba9ab
|
7757cba7ebe075e06ba9d514d0fa76affab022e2
|
refs/heads/master
| 2023-04-20T10:14:12.121062
| 2021-02-19T00:16:20
| 2021-02-19T00:16:20
| 312,772,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
#!/usr/bin/env python3
import os
import sys
import argparse
sys.stdout.reconfigure(line_buffering=True)
p = argparse.ArgumentParser()
p.add_argument('target_file')
p.add_argument('-0', '--zero', action='store_true')
p.add_argument('-o', '--fzf_output', action='store_true')
args = p.parse_args()
try:
stdin_lines = sys.stdin.readlines()
if args.fzf_output:
# query
print(stdin_lines.pop(0), end='')
# key
print(stdin_lines.pop(0), end='')
if len(stdin_lines) == 0:
sys.exit()
index_queue = [int(x) for x in stdin_lines.pop(0).split(' ')]
index_set = set(index_queue)
max_index = max(index_set)
lines = {}
with open(args.target_file, 'r') as f:
i = 0 if not args.zero else -1
while len(index_queue) > 0:
index = index_queue.pop(0)
if index in lines:
print(lines[index])
continue
while True:
i += 1
line = f.readline()
if i in index_set:
line = line.strip("\n")
lines[i] = line
if i == index:
print(line)
break
except BrokenPipeError:
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(1)
|
[
"hirano.shigetoshi@classmethod.jp"
] |
hirano.shigetoshi@classmethod.jp
|
257edf239d8281df07b1f4dea826287042555ddf
|
cc90321cc6750ab2ca939545b26ed0c82e7aed48
|
/Algorithms/res_manager.py
|
751d0a9148844d0e74e24ad60a8f94a2e409cab2
|
[] |
no_license
|
MatPont/Deep_Dimensionality_Reduction
|
da74af98b9635ce528361e35c4694ad0c19759ba
|
9e1e04ea6a17f693c318b4d2449158cf6723daea
|
refs/heads/master
| 2020-09-27T15:56:45.844705
| 2020-03-02T18:27:05
| 2020-03-02T18:27:05
| 226,550,521
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
import sys
import pandas as pd
from statistics import mean, stdev
df = pd.read_csv(sys.argv[1], header=None)
print(df.shape)
for i in range(df.shape[1]):
print("=====================")
print(mean(df.iloc[:, i]))
print(stdev(df.iloc[:, i]))
|
[
"matthieu.pont@hotmail.fr"
] |
matthieu.pont@hotmail.fr
|
5a46453ae23df0b34ebbd21aea03b23826841bcf
|
06a863150a7a3a7bfc0c341b9c3f267727606464
|
/packages/Mock/DebugInfo/__init__.py
|
aff74a93b017d77ea82d359e419b302063003e6e
|
[
"MIT"
] |
permissive
|
brucelevis/gii
|
c843dc738a958b4a2ffe42178cff0dd04da44071
|
03624a57cf74a07e38bfdc7f53c50bd926b7b5a7
|
refs/heads/master
| 2020-10-02T00:41:02.723597
| 2016-04-08T07:44:45
| 2016-04-08T07:44:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17
|
py
|
import DebugInfo
|
[
"tommo.zhou@gmail.com"
] |
tommo.zhou@gmail.com
|
9547290fc4d8dc92507084af21e16b149380bfbb
|
023eff43972f8a5635dace98a95fa52a0aef8a80
|
/classproject/manage.py
|
b47b2ddfca9b4e3ce017c1d609bcdce59715e6d1
|
[] |
no_license
|
blackjackal982/summer2019_smec_saisirisha
|
9b430714c29ca378d9a3d0d378295c9f6900bb8b
|
ff5413d168b34d45080075e5a158ace1feb79fbc
|
refs/heads/master
| 2020-06-01T22:57:09.847612
| 2019-07-16T05:44:56
| 2019-07-16T05:44:56
| 190,958,117
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE',"classproject.settings")
django.setup()
def main():
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"ssirisha639@gmail.com"
] |
ssirisha639@gmail.com
|
0fbb7d8e6ebac1dbda9ad2f4bf13286215789b11
|
e50fb7ffcd848b018aa875124b500c4240f92701
|
/config.py
|
daf523429d4373133b35d3f19f3c19e3b7af0c60
|
[] |
no_license
|
GameBuilders/student-gdc-2014
|
711ec4c43c7c6068f06fc106e268da36ac0a6214
|
8d930d3335eafb078ed9349be27c74a101b19507
|
refs/heads/master
| 2021-01-23T12:04:43.837237
| 2014-11-23T01:46:25
| 2014-11-23T01:46:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
# A class for storing any configuration values needed by multiple classes.
class Config():
# Width and height of game window (in pixels)
WIDTH = 1024
HEIGHT = 768
# Map scroll speed (px/sec)
SCROLL_SPEED = 200
|
[
"norby2@illinois.edu"
] |
norby2@illinois.edu
|
0c7787c64433d6b03e6b55db2b8c6bae50bc9ef9
|
edf6909d67c332bdc62f3e7d26591e736b90358c
|
/books/urls.py
|
fb3e5306d51e9ed2acc20647a48f5ac807f888e4
|
[] |
no_license
|
Ibrohim9862/books
|
843de0e02a6d667abcf804f8e2dce97db0ae1ab1
|
052c94828a0284aaf06570afd3e46a51f6521d64
|
refs/heads/master
| 2023-08-19T15:58:04.315520
| 2021-10-31T10:03:18
| 2021-10-31T10:03:18
| 423,108,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
from django.urls import path
from .views import(BookListView,AllBooks, BookdetailView, HomePageView, Muqova)
urlpatterns = [
path('',HomePageView.as_view(),name='home'),
path('allbooks/',AllBooks.as_view(),name='allbooks'),
path('muqova/',Muqova.as_view(),name='muqova'),
path('<int:pk>', BookListView.as_view(),name='book_list'),
path("<slug:slug>", BookdetailView.as_view(), name="book_detail")
]
|
[
"ibrohim9862@gmail.com"
] |
ibrohim9862@gmail.com
|
1403bd3e81a977b7bcd5b27bae127eba0fab073a
|
76afd0db1899350f775e3248d181e7c11e1fd662
|
/Jun 2021/src/catboost_model.py
|
701b1e10417706b4fd58201dbb462131db85ac3e
|
[
"MIT"
] |
permissive
|
Bakar31/Kaggle-Tabular-Playground-Series
|
7720393a6dec26963f5bae4804c0141a5063cdf0
|
bf4e6c38cae4d995411c9703b441fc8bac8044ed
|
refs/heads/master
| 2023-08-11T03:04:45.779587
| 2021-09-22T19:35:45
| 2021-09-22T19:35:45
| 407,869,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 27 16:11:24 2021
@author: Abu Bakar
"""
# public Score: 1.75047
import pickle
import pandas as pd
from sklearn.metrics import log_loss
from catboost import CatBoostClassifier
from data_preprocessing import x_train, x_test, y_train, y_test
from input import test, sample_sub
cat_model = CatBoostClassifier(depth=8,
iterations=1000,
learning_rate=0.02,
eval_metric='MultiClass',
loss_function='MultiClass',
bootstrap_type= 'Bernoulli',
leaf_estimation_method='Gradient',
random_state=13)
cat_model.fit(x_train, y_train, verbose=100)
cat_prediction = cat_model.predict_proba(x_test)
print(log_loss(y_test, cat_prediction)) #1.7477
pickle.dump(cat_model, open('catboost model.h5', 'wb'))
#predicting on test set
cat_preds = cat_model.predict_proba(test)
# output to csv
cat_submission = pd.concat([sample_sub.id, pd.DataFrame(cat_preds,
columns=["Class_1", "Class_2", "Class_3","Class_4", "Class_5", "Class_6", "Class_7", "Class_8", "Class_9"])], axis = 1)
cat_submission.to_csv('cat_submission.csv', index = False)
|
[
"abubakar1808031@gmail.com"
] |
abubakar1808031@gmail.com
|
194786fcd5a14548ba9caecb895d18781f1722c7
|
23b34a9a25755648d637f8f88bc2e19ec8569d7d
|
/pokes/migrations/0008_auto_20210130_1614.py
|
da301a15fa800a0d30e316eb10682b276cb69e9d
|
[] |
no_license
|
Mateusallz1/ProjetoFinalPI2
|
8e31bd027d0b67b624c4745c645f85ffe162be14
|
25fb5ace20467cce461716a206409c85fdfc7f97
|
refs/heads/master
| 2023-02-23T15:53:26.921719
| 2021-02-02T21:25:31
| 2021-02-02T21:25:31
| 334,478,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
# Generated by Django 3.0 on 2021-01-30 19:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pokes', '0007_auto_20210130_1614'),
]
operations = [
migrations.AlterField(
model_name='pokemon',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pokemonowner', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='trainer',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='trainers', to=settings.AUTH_USER_MODEL),
),
]
|
[
"trafalgarmateus@gmail.com"
] |
trafalgarmateus@gmail.com
|
9978e5150fc697dcbc874158d958d0f62d313ef9
|
1d7256a89de883ddd45a7731d309aae404491035
|
/information_boards/old_codes/a_overall_analysis/a1_intermediate/a1_i1_trip_modes.py
|
f3c8c96e607c5c237be7598004686027f76594e0
|
[] |
no_license
|
M20190649/taxi_projects
|
a559329366fe2357b826909543a54b12b6d98301
|
b910e2308d4725e88fb00714e489b1291cfeb215
|
refs/heads/master
| 2021-06-17T15:14:55.849298
| 2017-06-01T10:17:23
| 2017-06-01T10:17:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,197
|
py
|
#
import csv
import datetime
from information_boards import AM2, AM5
from information_boards import DIn_PIn, DIn_POut, DOut_PIn, DOut_POut
from information_boards import IN, OUT
from information_boards import ap_poly, ns_poly
from information_boards import error_hours
from information_boards import taxi_home
from information_boards.old_codes.a_overall_analysis import trips_dpath, trip_prefix
from taxi_common.file_handling_functions import check_dir_create
from taxi_common.multiprocess import init_multiprocessor, put_task, end_multiprocessor
def run():
check_dir_create(trips_dpath)
#
init_multiprocessor(11)
count_num_jobs = 0
for y in xrange(9, 11):
for m in xrange(1, 13):
yymm = '%02d%02d' % (y, m)
if yymm in ['0912', '1010']:
# both years data are corrupted
continue
put_task(tripMode_prevTripTime, [yymm])
count_num_jobs += 1
end_multiprocessor(count_num_jobs)
def tripMode_prevTripTime(yymm):
print 'handle the file; %s' % yymm
yy, mm = yymm[:2], yymm[-2:]
yyyy = str(2000 + int(yy))
normal_file = taxi_home + '/%s/%s/trips/trips-%s-normal.csv' % (yyyy, mm, yymm)
ext_file = taxi_home + '/%s/%s/trips/trips-%s-normal-ext.csv' % (yyyy, mm, yymm)
#
vehicle_prev_trip_position_time = {}
with open('%s/%s%s.csv' % (trips_dpath, trip_prefix, yymm), 'wt') as w_csvfile:
writer = csv.writer(w_csvfile, lineterminator='\n')
new_headers = ['tid', 'vid', 'did', 'start-time', 'end-time',
'duration', 'fare',
'ap-trip-mode', 'ns-trip-mode',
'prev-trip-end-time']
writer.writerow(new_headers)
#
with open(normal_file, 'rb') as r_csvfile1:
reader1 = csv.reader(r_csvfile1)
headers1 = reader1.next()
# {'trip-id': 0, 'job-id': 1, 'start-time': 2, 'end-time': 3,
# 'start-long': 4, 'start-lat': 5, 'end-long': 6, 'end-lat': 7,
# 'vehicle-id': 8, 'distance': 9, 'fare': 10, 'duration': 11,
# 'start-dow': 12, 'start-day': 13, 'start-hour': 14, 'start-minute': 15,
# 'end-dow': 16, 'end-day': 17, 'end-hour': 18, 'end-minute': 19}
hid1 = {h : i for i, h in enumerate(headers1)}
with open(ext_file, 'rb') as r_csvfile2:
reader2 = csv.reader(r_csvfile2)
headers2 = reader2.next()
# {'start-zone': 0, 'end-zone': 1, 'start-postal': 2, 'driver-id': 4, 'end-postal': 3}
hid2 = {h : i for i, h in enumerate(headers2)}
for row1 in reader1:
row2 = reader2.next()
tid, vid = row1[hid1['trip-id']], row1[hid1['vehicle-id']]
st_ts, et_ts = row1[hid1['start-time']], row1[hid1['end-time']]
dur, fare = row1[hid1['duration']], row1[hid1['fare']]
s_long, s_lat = eval(row1[hid1['start-long']]), eval(row1[hid1['start-lat']])
e_long, e_lat = eval(row1[hid1['end-long']]), eval(row1[hid1['end-lat']])
c_sl_ap, c_el_ap = ap_poly.is_including((s_long, s_lat)), ap_poly.is_including((e_long, e_lat))
c_sl_ns, c_el_ns = ns_poly.is_including((s_long, s_lat)), ns_poly.is_including((e_long, e_lat))
did = row2[hid2['driver-id']]
#
if not vehicle_prev_trip_position_time.has_key(vid):
# ASSUMPTION
# If this trip is the driver's first trip in a month,
# let's assume that the previous trip occurred at outside of the airport and Night safari
# and also assume that the previous trip's end time is the current trip's start time
# False means the trip occur at outside of the airport or Night safari
vehicle_prev_trip_position_time[vid] = (OUT, OUT, st_ts)
pt_el_ap, pt_el_ns, pt_time = vehicle_prev_trip_position_time[vid]
ap_trip_mode, ns_trip_mode = None, None
#
if pt_el_ap == IN and c_sl_ap == IN: ap_trip_mode = DIn_PIn
elif pt_el_ap == IN and c_sl_ap == OUT: ap_trip_mode = DIn_POut
elif pt_el_ap == OUT and c_sl_ap == IN: ap_trip_mode = DOut_PIn
elif pt_el_ap == OUT and c_sl_ap == OUT: ap_trip_mode = DOut_POut
else: assert False
#
if pt_el_ns == IN and c_sl_ns == IN: ns_trip_mode = DIn_PIn
elif pt_el_ns == IN and c_sl_ns == OUT: ns_trip_mode = DIn_POut
elif pt_el_ns == OUT and c_sl_ns == IN: ns_trip_mode = DOut_PIn
elif pt_el_ns == OUT and c_sl_ns == OUT: ns_trip_mode = DOut_POut
else: assert False
#
vehicle_prev_trip_position_time[vid] = (c_el_ap, c_el_ns, et_ts)
#
# Only consider trips whose start time is before 2 AM and after 6 AM
#
t = eval(row1[hid1['start-time']])
cur_dt = datetime.datetime.fromtimestamp(t)
if AM2 <= cur_dt.hour and cur_dt.hour <= AM5:
continue
need2skip = False
for ys, ms, ds, hs in error_hours:
yyyy0 = 2000 + int(ys)
mm0, dd0, hh0 = map(int, [ms, ds, hs])
if (cur_dt.year == yyyy0) and (cur_dt.month == mm0) and (cur_dt.day == dd0) and (
cur_dt.hour == hh0):
need2skip = True
if need2skip: continue
#
new_row = [tid, vid, did,
st_ts, et_ts,
dur, fare,
ap_trip_mode, ns_trip_mode, pt_time]
writer.writerow(new_row)
if __name__ == '__main__':
run()
|
[
"jerryhan88@gmail.com"
] |
jerryhan88@gmail.com
|
b1f17c0fb429a5cd9197e863ed673ee111735cd5
|
027053eafb8930d58df937d93016feb80d7eb7b1
|
/taf/testlib/linux/etcd_helper.py
|
aab25ad4259ae7e6f1a3e875fd8afa28f3a47d4a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
mskrocki/taf
|
6b6131c770b1b48976931f093fad37a55a2f8dd3
|
c17986c99102c252fbef1e33ea441aedd7320130
|
refs/heads/master
| 2021-01-11T23:26:50.007851
| 2017-03-23T20:15:42
| 2017-03-23T20:34:51
| 78,583,195
| 0
| 0
| null | 2017-01-10T23:12:40
| 2017-01-10T23:12:40
| null |
UTF-8
|
Python
| false
| false
| 3,597
|
py
|
"""
@copyright Copyright (c) 2016 - 2017, Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@file: etcd_helper.py
"""
import itertools
from contextlib import suppress
import time
import etcd
from plugins import loggers
from testlib.linux.utils import wait_for
ROOT_KEY = '/intel.com/tests'
class EtcdHelperException(Exception):
pass
class EtcdHelper(object):
CLASS_LOGGER = loggers.ClassLogger()
def __init__(self, endpoint):
if isinstance(endpoint, str):
etcd_protocol, address_port = endpoint.split('://')
etcd_address, etcd_port = address_port.split(':')
self.etcd_config = {
'host': etcd_address,
'port': int(etcd_port),
'protocol': etcd_protocol
}
elif isinstance(endpoint, dict):
self.etcd_config = endpoint
self.etcd = etcd.Client(**self.etcd_config)
self._root_key = ROOT_KEY
self._cwd = ROOT_KEY
self._latest_id_key = '/'.join([ROOT_KEY, 'latest'])
def init_etcd(self):
self.CLASS_LOGGER.debug("Initializing etcd test entries")
self.etcd.write(self._latest_id_key, "0")
def change_dir(self, directory):
self._cwd = '/'.join([self._root_key, directory])
def _get_key(self, item):
return '/'.join(itertools.chain([self._cwd], item.split('__')[1:]))
def _get_root_key(self, item):
return '/'.join(itertools.chain([self._root_key], item.split('__')[1:]))
def __getattr__(self, item):
if item.startswith('key__'):
return self._get_key(item)
elif item.startswith('rootvalue__'):
return self.etcd.read(self._get_root_key(item))
elif item.startswith('value__'):
return self.etcd.read(self._get_key(item))
raise AttributeError('Unknown attribute {}'.format(item))
def __setattr__(self, item, value):
if item.startswith('rootvalue__'):
self.etcd.write(self._get_root_key(item), value)
elif item.startswith('value__'):
self.etcd.write(self._get_key(item), value)
else:
super().__setattr__(item, value)
@property
def latest_id(self):
with suppress(AttributeError):
return self._latest_id
for _ in range(2):
with suppress(etcd.EtcdKeyNotFound):
self._latest_id = int(self.etcd.read(self._latest_id_key).value) # pylint: disable=no-member
return self._latest_id
self.init_etcd()
raise EtcdHelperException("Failed to find test_id")
def read_list(self, key):
return self.etcd.read(key).leaves
def wait_for_key_count(self, key, count, timeout=15):
def get_key_count():
with suppress(etcd.EtcdKeyNotFound):
return len(list(self.read_list(key)))
return 0
self.CLASS_LOGGER.info('Waiting for %s to give %d. Timeout is %d.', key, count, timeout)
wait_for(iter(get_key_count, count), timeout)
self.CLASS_LOGGER.debug('%s gave %d', key, count)
|
[
"ross.b.brattain@intel.com"
] |
ross.b.brattain@intel.com
|
0b3a8a35fce42091a9057025ff22471931c9812f
|
1e619d03b1a237dce91bc6dff7df5ec4384c6d01
|
/BTRE/contacts/models.py
|
9ea452d7e6f6f556020f6d12697dafae5c893831
|
[] |
no_license
|
afsana1210/BT_RealState_Django_Project
|
10405df7bfbcea3d840adfbcd56ce78d48ba20bd
|
787ad0a0e84d45d5b2bf6a837c6ed406677a81d8
|
refs/heads/master
| 2023-01-06T04:24:47.061839
| 2020-11-02T16:28:55
| 2020-11-02T16:28:55
| 298,036,312
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
from django.db import models
from datetime import datetime
# Create your models here.
class Contact(models.Model):
listing=models.CharField(max_length=200)
listing_id=models.IntegerField()
name=models.CharField(max_length=200)
email=models.CharField(max_length=100)
phone=models.CharField(max_length=100)
message=models.TextField(blank=True)
contact_date=models.DateTimeField(default=datetime.now, blank=True)
user_id=models.IntegerField(blank=True)
def __str__(self):
return self.name
|
[
"afsanaansari1210@gmail.com"
] |
afsanaansari1210@gmail.com
|
b86457b5bb6a53902a3c11b837c941d8a9d25d12
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/INTERVIEW-PREP-COMPLETE/notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/File_Transfer_Protocol/ftp_send_receive.py
|
8fe11e88cab788737b6eb99068d2b507c94cdb2e
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
"""
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root username & password for security reasons
Create a seperate user and provide access to a home directory of the user
Use login id and password of the user created
cwd here stands for current working directory
"""
from ftplib import FTP
ftp = FTP("xxx.xxx.x.x") # Enter the ip address or the domain name here
ftp.login(user="username", passwd="password")
ftp.cwd("/Enter the directory here/")
"""
The file which will be received via the FTP server
Enter the location of the file where the file is received
"""
def ReceiveFile():
FileName = "example.txt" """ Enter the location of the file """
LocalFile = open(FileName, "wb")
ftp.retrbinary("RETR " + FileName, LocalFile.write, 1024)
ftp.quit()
LocalFile.close()
"""
The file which will be sent via the FTP server
The file send will be send to the current working directory
"""
def SendFile():
FileName = "example.txt" """ Enter the name of the file """
ftp.storbinary("STOR " + FileName, open(FileName, "rb"))
ftp.quit()
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
f2409fcabd671cdb993f85ac86c011d4f843389b
|
fe75ee7acae299d23825f83abf7492b3b8b9518c
|
/171_car_trialling/SConstruct
|
8a54ec90720fb9e63905ccf94452c4d36b67d063
|
[] |
no_license
|
doctormanhattan/uva-cpp
|
e9aea4aa55b063981cb4743042e4b7c093962034
|
17861799ee3340b18863ef6303406a7c60851813
|
refs/heads/master
| 2021-01-22T21:28:11.018228
| 2013-06-06T10:39:21
| 2013-06-06T10:39:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
Program('171_car_trialling.bin', Glob('*.cpp'))
|
[
"drmanhattan@gmx.us"
] |
drmanhattan@gmx.us
|
|
6fec9f33fd7ed9421dc0dcecc2103c6060ff66dd
|
00d930ffaa543604f2bcd6cabbdedabeeaf09215
|
/riboSeed/riboTry.py
|
d0aabe89f995e5ef0a14c9d4dd6bd2967272e25b
|
[
"MIT"
] |
permissive
|
nickp60/riboSeed
|
ebf2817151310bf55e70946403e4248ad4b0232f
|
636eaf78a1bbe4517c43ddb120e5ca5bb2b97212
|
refs/heads/master
| 2021-05-01T18:38:47.939543
| 2020-06-30T17:37:34
| 2020-06-30T17:37:34
| 68,617,544
| 9
| 3
| null | 2017-09-13T16:09:15
| 2016-09-19T15:10:06
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,211
|
py
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
# Copyright 2017, National University of Ireland and The James Hutton Insitute
# Author: Nicholas Waters
#
# This code is part of the riboSeed package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
import pkg_resources
import sys
import os
import shutil
import subprocess
import argparse
from .shared_methods import set_up_logging
helpstring = """
Welcome to the ribo try! Here we test the integration of several parts of the
riboSeed pipeline. First, `ribo run` is performed on the included test
dataset. Then, essentially the same thing is done, but calling the
individual subcommands (`ribo scan`, `ribo select`, etc)
If all goes well, no errors should occur, and you should essentially have
two "identical" riboSeed assemblies (although due to random assignments
of mapping duplicates, the nature of error correction, etc, I can't
guarantee that you will get the exact same result
Have fun!
"""
def get_args(test_args=None): # pragma: no cover
parser = argparse.ArgumentParser(
prog="ribo try",
description=helpstring,
add_help=False) # to allow for custom help
parser.prog = "ribo try"
parser.add_argument("-o", "--output", dest='output', action="store",
help="output directory; " +
"default: %(default)s",
default=os.path.join(
os.getcwd(), "riboSeed_sample_results"),
type=str)
parser.add_argument("-v", "--verbosity", dest='verbosity',
action="store",
default=2, type=int, choices=[1, 2, 3, 4, 5],
help="Logger writes debug to file in output dir; " +
"this sets verbosity level sent to stderr. " +
" 1 = debug(), 2 = info(), 3 = warning(), " +
"4 = error() and 5 = critical(); " +
"default: %(default)s")
parser.add_argument("-c", "--cores", dest='cores', action="store",
default=2, type=int,
help="cores to be used" +
"; default: %(default)s")
parser.add_argument("-t", "--threads", dest='threads',
action="store",
default=1, type=int,
choices=[1, 2, 4],
help="if your cores are hyperthreaded, set number" +
" threads to the number of threads per processer." +
"If unsure, see 'cat /proc/cpuinfo' under 'cpu " +
"cores', or 'lscpu' under 'Thread(s) per core'." +
": %(default)s")
parser.add_argument("-m", "--memory", dest='memory', action="store",
default=8, type=int,
help="system memory available" +
"; default: %(default)s")
parser.add_argument("-h", "--help",
action="help", default=argparse.SUPPRESS,
help="Displays this help message")
args = parser.parse_args(sys.argv[2:])
return args
def main(args, logger=None):
output_root = os.path.abspath(os.path.expanduser(args.output))
try:
os.makedirs(output_root, exist_ok=False)
except OSError:
print("Output directory %s already exists; exiting..." % output_root)
sys.exit(1)
log_path = os.path.join(output_root, "riboTry.log")
if logger is None:
logger = set_up_logging(verbosity=args.verbosity,
outfile=log_path,
name=__name__)
logger.info("Testing your installation of riboSeed on some test data")
# here we locate the test data we packaged with riboSeed -
# some reads and a reference
resource_package = pkg_resources.Requirement.parse("riboSeed")
logger.debug(resource_package)
# this looks like I should be using os.path.join, but the package resource
# stuff needs unix-style path seps
resource_path_fasta = '/'.join(('riboSeed',
'integration_data', 'concatenated_seq.fasta'))
resource_path_reffasta = '/'.join(('riboSeed',
'integration_data', 'NC_000913.3.fasta'))
resource_path_1 = '/'.join(('riboSeed',
'integration_data', 'test_reads1.fq'))
resource_path_2 = '/'.join(('riboSeed',
'integration_data', 'test_reads2.fq'))
logger.debug(resource_path_fasta)
fasta = pkg_resources.resource_filename(resource_package, resource_path_fasta)
reffasta = pkg_resources.resource_filename(resource_package,
resource_path_reffasta)
fastq1 = pkg_resources.resource_filename(resource_package, resource_path_1)
fastq2 = pkg_resources.resource_filename(resource_package, resource_path_2)
# fasta_path = pkg_resources.resource_string("/", resource_path)
logger.debug(fasta)
logger.debug(reffasta)
logger.debug(fastq1)
logger.debug(fastq2)
for i in ["blastn", "spades.py", "bwa", "mafft",
"samtools", "barrnap"]:
assert shutil.which(i) is not None, \
"{0} executable not found in PATH!".format(i)
ribo_run_cmd = str(
"ribo run -r {0} -o {1} -F {2} -R {3} --serialize -v 1 " +
"--subassembler skesa " +
"--stages stack score spec --cores {4} --threads {5} --memory {6}"
).format(
fasta,
os.path.join(output_root, "run"),
fastq1,
fastq2,
args.cores,
args.threads,
args.memory)
logger.info("running " + ribo_run_cmd)
logger.info("This usually take about ~4-5 minutes to run all the modules")
subprocess.run([ribo_run_cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
logger.info("finished running integration test with ribo run!")
|
[
"nickp60@gmail.com"
] |
nickp60@gmail.com
|
dcd8d140d3cdc66d0d6c05ac8c4de0b0b4d7dc71
|
030a3f09173ac54645bd2e22e8966539bb9baa25
|
/dserv1.py
|
117b74f647bf98c627e6ce60017e30ccde37fc95
|
[] |
no_license
|
mstram/SATK-redis-nodeJS-DasdServer
|
a1c1d203e0773035a9307802a1c39798e7754569
|
c0466b53483991822156229f921b69b1d18a49f2
|
refs/heads/master
| 2020-05-15T20:06:16.010358
| 2014-08-02T12:22:58
| 2014-08-02T12:22:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
#!/home/action/python3/bin/python
#http://www.openlogic.com/wazi/bid/315666/Developing-a-pubsub-application-in-Redis
import redis
import sys
import time
import ckdutil
from ckdutil import *
r = redis.Redis("localhost")
p = r.pubsub()
#p.subscribe(sys.argv[1])
key = "herc"
p.subscribe("herc")
def doRead():
print("---- doRead ---")
print("r.hvals cmd03:")
print(r.hvals('cmd03:'))
def pt(n,ob):
print("type(" +n +")")
print(type(ob))
#print("Waiting for redis: %s '" % sys.argv[1] +"'")
smsg = "SATK-Dasd Server : Waiting for msg: %s '" % key +"'"
#print("(SATK-Dasd Server : Waiting for msg: %s '" % key +"'")
print(smsg)
while True:
message = p.get_message()
if message:
# do something with the message
print("---------- new msg ---------------------")
print(message)
#print("message['data']")
#print(message['data'])
print(smsg)
#print("len(message) :%d" % len(message))
#print("len(message['data']) :%d" % len(message['data']))
bdata = message['data']
print("bdata:")
print(bdata)
try:
cmd = bdata.decode("utf-8")
except:
cmd = bdata
if(cmd == "rdrec"):
print("calling rd")
doRead()
else:
print("unknown cmd : '%s' " % cmd)
#print("len(data]) :%d" % len(data))
# pt('bdata',bdata)
# pt('data',data)
#for item in message:
#print(item['data'])
#time.sleep(0.001) # be nice to the system :)
time.sleep(0.101)
#for item in ps_obj.listen():
# print(item['data'])
|
[
"mikestramba@gmail.com"
] |
mikestramba@gmail.com
|
3141406ac3e7f9f622238235b76ca965bee2f067
|
a0c962e7cf0b58ebf17ef5d2c1e1b3a7ecd7c246
|
/SMUnews-1.py
|
58bb5880d73e7bada59ff138c9dc4a58c10fc13b
|
[] |
no_license
|
Crew-Spider/EIE152_XCX
|
56214194c5a079149736db29566fac2ce8f31959
|
5d5c97212f8a4380cd5f1a453128ef090e9663eb
|
refs/heads/master
| 2020-03-16T08:30:07.603674
| 2018-06-03T13:22:03
| 2018-06-03T13:22:03
| 132,597,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
#coding:utf-8
#引入相关模块
import requests
from bs4 import BeautifulSoup
url = "http://news.baidu.com/ns?cl=2&rn=20&tn=news&word=%E4%B8%8A%E6%B5%B7%E6%B5%B7%E4%BA%8B%E5%A4%A7%E5%AD%A6"
#请求搜索上海海事大学关键字新闻网页的URL,获取其text文本
response = requests.get(url) #对获取到的文本进行解析
html = response.text
soup=BeautifulSoup(html,features='lxml') #根据HTML网页字符串创建BeautifulSoup对象
news=soup.find_all('div', {"class": "result"})
#times=soup.find_all('p',{"class":"c-author"})
#href=soup.find_all('h3',{"class":"c-title"})
'''
for l in href:
hs=l.find_all('a') #标题在a标签中
for h in hs:
title=h.text
url=h['href']
for t in news:
time = t.find("p").get_text() #时间在p标签中
data = {'标题':title,
'链接':url,
'时间':time}
print(data)
'''
for t in news:
data = {
"标题":t.find('a').text,
"链接":t.find('a')['href'],
"时间":t.find('p').get_text()
}
print(data)
|
[
"chunxiao199732@163.com"
] |
chunxiao199732@163.com
|
bbf12cd7e02681f5ab47f090fde7f18e4214ec74
|
a106a279fd189102d21cab22c9169e83e3503860
|
/geometric_progression.py
|
3f239e1f5aba88c58c0366bcbc095b67b71ddb9f
|
[] |
no_license
|
kofi-py/kingkofi
|
b0db85c5434269f5d024d3805a12e771686521e7
|
f015e88acb274e3aca7bc453d0159fbec2823852
|
refs/heads/main
| 2023-06-15T23:40:16.458043
| 2021-07-02T22:48:32
| 2021-07-02T22:48:32
| 382,479,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
def printGP(a, r, n):
for i in range(0, n):
curr_term = a * pow(r, i)
print(curr_term, end=" ")
# a is starting number
# r is common ratio
# n is number of terms
|
[
"noreply@github.com"
] |
kofi-py.noreply@github.com
|
3a444cb89197836f0cec8f3682c62dde00a19f03
|
9188e6d36a850c1f890ffaebf4c18a74f53c0ea3
|
/com/a/example42/variable_scope.py
|
0b8f515afa2072c2a6470adf84bdf75becff8ba6
|
[] |
no_license
|
IamA1536/Execrise
|
e3f1a1037f97fdf7a43e04f2df2347ad15c433b9
|
325e8c5c2d6045e1b33d0b78ba38791e6747c875
|
refs/heads/master
| 2020-03-31T02:17:35.414037
| 2019-02-12T05:59:24
| 2019-02-12T05:59:24
| 151,816,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
i = 0
n = 0
def dummy():
i = 0
print(i)
i += 1
def dummy2():
global n
print(n)
n += 1
if __name__ == "__main__":
print('函数内部的同名变量')
for j in range(20):
print(i)
dummy()
i += 1
print('global声明同名变量')
for k in range(20):
print(n)
dummy2()
n += 10
|
[
"1694522669@QQ.COM"
] |
1694522669@QQ.COM
|
adbf7405edd5903ea18f7cefc84f692c4853ce30
|
571d36f865b545c0a72134e586fbcddd6953a68b
|
/eng/save.py
|
68401ec448371c02f32fe108147957f27298d016
|
[] |
no_license
|
andrew-turner/Ditto
|
a5a79faaf31cc44d08ac5f70fa2ac51e51d1b60f
|
72841fc503c716ac3b524e42f2311cbd9d18a092
|
refs/heads/master
| 2020-12-24T14:19:01.164846
| 2015-05-20T08:42:26
| 2015-05-20T08:42:26
| 35,935,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,643
|
py
|
import pickle
from . import script_engine
def readSave(fn):
#open the file for reading, load the save game, and close
try:
with open(fn, "rb") as f:
return pickle.load(f)
except IOError:
raise error.DittoIOException("Save file", fn)
def writeSave(savegame):
#open the file for writing, dump the save game, and close
try:
with open(savegame.fn, "wb") as f:
pickle.dump(savegame, f)
except IOError:
raise error.DittoIOException("Save file", fn)
class Save(script_engine.ScriptableObject):
"""
Save object to store game save data.
Gets pickled and unpickled to act as a save.
"""
def __init__(self, fn):
"""
Create a blank save.
fn - the filename where it will be saved.
"""
#store fn
self.fn = fn
#initialise world position data
self.currentMap = None
self.currentPosition = None
self.currentLevel = None
self.currentDirection = None
#initialise player data
self.party = None
self.bag = None
self.playtime = 0
#initialise script engine save variables
self.variables = {}
self.variables["PLAYERNAME"] = "(No name)"
self.variables["MONEY"] = 0
self.variables["GENDER"] = "MALE"
#scripting functions
def getVar(self, name):
#return the value if it exists, else raise an error
try:
return self.variables[name]
except KeyError:
raise script_engine.DLookupError(name)
def setVar(self, name, val):
#even if the value isn't yet specified, set it
self.variables[name] = val
|
[
"andrew.turner@merton.ox.ac.uk"
] |
andrew.turner@merton.ox.ac.uk
|
f49f70846efddaa2c9e266139901a8b7d3fae374
|
fab215713c1b72974a0dc7db73a20e4b5abefe4a
|
/简明python教程/复习/类/objvar.py
|
aaaefaa7be1a0fb5cc303614daee120b8bfa0ce1
|
[] |
no_license
|
cwdgit/learn-python
|
e6774dcea506cfa461bfccc001205bf75a1d126b
|
c5ba97a917bd2b8d7b767ce704ca5ff441b9dfee
|
refs/heads/master
| 2020-03-24T16:41:47.837953
| 2018-08-23T03:13:04
| 2018-08-23T03:13:04
| 142,832,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
#!/usr/bin/python
class person:
'''represents a person. '''
population = 0
def __init__(self,name):
self.name=name
print '(initializing %s)' %self.name
person.population += 1
def __del__(self):
'''I am dying.'''
print '%s says bye.' %self.name
person.population -= 1
if person.population ==0:
print 'I am the last one.'
else:
print 'There are still %d people left.' %person.population
def sayhi(self):
'''greeting by the person.'''
print 'Hi, my name is %s.' %self.name
def howmany(self):
'''prints the current population.'''
if person.population == 1:
print 'I am the only person here.'
else:
print 'we have %d persons here.' %person.population
swaroop = person('swaroop')
swaroop.sayhi()
swaroop.howmany()
kalam=person('abdul kalam')
kalam.sayhi()
kalam.howmany()
swaroop.sayhi()
swaroop.howmany()
|
[
"you@example.com"
] |
you@example.com
|
1bfd24bb27e65b8ae3302cebdf2d66a5b93239ce
|
d70256c671e567b916ca9467927856539205a044
|
/generator/unity_tolua_wrapper_parser_ldt.py
|
3f5a1abcdb651a085c6afd7f141ad16416bc8ebf
|
[] |
no_license
|
kekexinshuai/tolua-ldtdoc
|
aa7a47ffc3a6fd4cc366d3d92ada09042252a108
|
0b9dc4b12b893a61c4768b69f68f95b5a804e6d2
|
refs/heads/master
| 2021-01-20T01:11:22.712320
| 2017-04-24T11:27:23
| 2017-04-24T11:27:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,553
|
py
|
# -*- coding: utf-8 -*-
import re
import os
# i gave up (T_T)
override = {
"System.Array": {
"ToTable": {
"return_type": "#list<System_Object#Object>"
}
}
}
# this parser can't handle these files yet
ignore_files = [
"LuaInterface_LuaOutWrap.cs",
"System_Collections_Generic_DictionaryWrap.cs",
"System_Collections_Generic_Dictionary_KeyCollectionWrap.cs",
"System_Collections_Generic_Dictionary_ValueCollectionWrap.cs",
"System_Collections_Generic_KeyValuePairWrap.cs",
"System_Collections_Generic_ListWrap.cs",
"System_Collections_ObjectModel_ReadOnlyCollectionWrap.cs"]
builtin_types_map = {
"int": "#number",
"number": "#number",
"string": "#string",
"bool": "#boolean",
"boolean": "#boolean",
"float": "#number",
"integer": "#number",
"ushort": "#number",
"sbyte": "#number",
"unit": "#number",
"byte": "#number",
"long": "#number",
"lightuserdata": "#number"
}
def get_class_name_from_file_name(ifile):
file_name = os.path.basename(ifile)
if file_name.endswith("Wrap.cs"):
return file_name[:-7].replace("_",".")
def cstype_map_to_ldttype(cs_type):
ldt_type = None
if cs_type is not None:
if cs_type in builtin_types_map:
ldt_type = builtin_types_map[cs_type]
elif cs_type.endswith("[]"):
cs_type = cs_type[:-2]
ldt_type = "System_Array#Array"
else:
module = cs_type.replace(".","_")
_type = cs_type.split(".")[-1]
ldt_type = module + "#" + _type
return ldt_type
def parse(ifile,odir):
parsing_class = None
function_defs = {}
filed_defs = {}
with open(ifile,encoding="utf-8",mode="r") as f:
brace_level = 0
cs_function_def_parsing_func_name = None
cs_function_def_breace_level = -1
cs_function_def_max_args = 0
cs_function_def_min_args = 11
cs_function_def_is_static = True
cs_function_def_return_type = None
for line in f:
if line.find("{") > 0: brace_level = brace_level + 1
if line.find("}") > 0:
brace_level = brace_level - 1
if brace_level == cs_function_def_breace_level and cs_function_def_parsing_func_name is not None:
# will out c# function
def_func_name = cs_function_def_parsing_func_name
if cs_function_def_parsing_func_name.startswith("_CreateUnityEngine_"):
def_func_name = "New"
cs_function_def_is_static = True
cs_function_def_max_args = 0
cs_function_def_return_type = parsing_class["name"]
elif cs_function_def_parsing_func_name.startswith("get_"):
def_func_name = cs_function_def_parsing_func_name[len("get_"):]
if def_func_name in function_defs:
function_def = function_defs[def_func_name]
function_def["param_count"] = 0 if cs_function_def_min_args == 11 else cs_function_def_min_args
function_def["return_type"] = cstype_map_to_ldttype(cs_function_def_return_type)
function_def["is_static"] = cs_function_def_is_static
function_def["valid"] = True
override_class = override.get(parsing_class["name"], {})
override_func_def = override_class.get(def_func_name, {})
override_return_type = override_func_def.get("return_type", "")
if override_return_type:
function_defs[def_func_name]["return_type"] = override_return_type
elif def_func_name in filed_defs:
filed_def = filed_defs[def_func_name]
filed_def["type"] = cstype_map_to_ldttype(cs_function_def_return_type)
filed_def["valid"] = True
cs_function_def_breace_level = -1
if cs_function_def_breace_level >= 0:
# in c# function
cs_function_def_arg_match = re.search(r'count == (\d+)', line)
argc = None
if cs_function_def_arg_match:
argc = int(cs_function_def_arg_match.group(1))
cs_function_def_arg_match = re.search(r'CheckArgsCount\(L, (\d+)\)', line)
if cs_function_def_arg_match:
argc = int(cs_function_def_arg_match.group(1))
if argc:
if cs_function_def_max_args < argc:
cs_function_def_max_args = argc
if cs_function_def_min_args > argc:
cs_function_def_min_args = argc
cs_function_def_instance_method_match = re.search(r' obj = ', line)
if cs_function_def_instance_method_match:
cs_function_def_is_static = False
if cs_function_def_max_args < 1:
cs_function_def_max_args = 1
if cs_function_def_return_type is None:
cs_function_def_return_match = re.match(r'^\s*([^\s]+?) o = [^n].*;$', line)
if cs_function_def_return_match is None:
cs_function_def_return_match = re.match(r'^\s*(.*?) ret = .*;$', line)
if cs_function_def_return_match is None:
cs_function_def_return_match = re.match(r'^\s*LuaDLL\.lua_push(.*?)\(', line)
# kinda bad
if cs_function_def_return_match is None:
try_match = re.match(r'^\s*ToLua\.Push\(L, (.*?)\)', line)
if try_match is not None and try_match.group(1) != "ret":
fs = try_match.group(1).split(".")
fs = fs[:-1]
k = ".".join(fs)
cs_function_def_return_type = k
if cs_function_def_return_match:
cs_function_def_return_type = cs_function_def_return_match.group(1)
continue
class_def_match = re.match(r'^\s*L\.BeginClass\(typeof\((.*?)\), typeof\((.*?)[,\)]', line)
if class_def_match:
assert(parsing_class == None)
parsing_class = {"name": get_class_name_from_file_name(ifile),
"parent": class_def_match.group(2)}
class_def_match = re.match(r'^\s*L\.BeginClass\(typeof\((.*?)\), null[,\)]', line)
if class_def_match:
assert(parsing_class == None)
parsing_class = {"name": get_class_name_from_file_name(ifile)}
class_def_match = re.match(r'^\s*L\.BeginStaticLibs\("(.*?)"\)', line)
if class_def_match:
assert(parsing_class == None)
parsing_class = {"name": get_class_name_from_file_name(ifile)}
class_def_match = re.match(r'^\s*L\.BeginEnum\(typeof\((.*?)\)', line)
if class_def_match:
assert(parsing_class == None)
parsing_class = {"name": class_def_match.group(1)}
function_def_match = re.match(r'^\s*L\.RegFunction\("(.*?)"', line)
if function_def_match:
function_name = function_def_match.group(1)
function_defs[function_name] = {"name": function_name}
field_def_match = re.match(r'^\s*L\.RegVar\("(.*?)"', line)
if field_def_match:
field_name = field_def_match.group(1)
filed_defs[field_name] = {"name":field_name}
cs_function_def_match = re.match(r'^\s*static int (.*?)\(', line)
if cs_function_def_match:
cs_function_def_parsing_func_name = cs_function_def_match.group(1)
cs_function_def_breace_level = brace_level
cs_function_def_max_args = 0
cs_function_def_min_args = 11
cs_function_def_is_static = True
cs_function_def_return_type = None
# output
ldt_type = cstype_map_to_ldttype(parsing_class["name"])
parsing_module = ldt_type.split("#")[0]
parsing_type = ldt_type.split("#")[1]
with open(os.path.join(odir,parsing_module+".doclua"),"w") as of:
of.write("---\n")
of.write("-- @module %s\n\n" % parsing_module)
of.write("---\n")
of.write("-- @type %s\n" % parsing_type)
if "parent" in parsing_class:
of.write("-- @extends %s\n" % cstype_map_to_ldttype(parsing_class["parent"]))
of.write("\n")
for _, func in function_defs.items():
if not "valid" in func: continue
of.write("---\n")
of.write("-- @function [parent=#%s] %s\n" % (parsing_type, func["name"]))
if not func["is_static"]:
of.write("-- @param self\n")
for i in range(func["param_count"] - (0 if func["is_static"] else 1)):
of.write("-- @param arg%d\n" % i)
if func["return_type"] is not None:
of.write("-- @return %s\n" % func["return_type"])
of.write("\n")
for _, field in filed_defs.items():
if not "valid" in field: continue
of.write("---\n")
_type = field["type"] + " " if field["type"] is not None else ""
of.write("-- @field [parent=#%s] %s%s\n\n" % (parsing_type, _type, field["name"]))
of.write("return nil\n")
if __name__ == "__main__":
srcdir1 = r"D:\develop\projects\Test Unity Project\Assets\3rd\tolua\Source\Generate"
srcdir2 = r"D:\develop\projects\Test Unity Project\Assets\3rd\tolua\ToLua\BaseType"
destdir = r"D:\develop\projects\tolua-ldtdoc\generated_doclua"
flist1 = [os.path.join(srcdir1,f) for f in os.listdir(srcdir1)]
flist2 = [os.path.join(srcdir2,f) for f in os.listdir(srcdir2)]
for fpath in flist1 + flist2:
fname = os.path.basename(fpath)
if fpath.endswith("Wrap.cs") and fname not in ignore_files:
parse(fpath, destdir)
# generate doclua for root module (ex: UnityEngine.doclua)
root_module_to_fields = {} # {module_name: {field_name:{type: }, ...}, ...}
for fpath in flist1 + flist2:
module_name = get_class_name_from_file_name(fpath)
if module_name:
module_paths = module_name.split(".")
if len(module_paths) == 2: # field directly under root module
root_module, filed_name = module_paths[0], module_paths[1]
root_module_fields = root_module_to_fields.setdefault(root_module,{})
root_module_fields[filed_name] = {"type": cstype_map_to_ldttype(module_name)}
for module,fields in root_module_to_fields.items():
with open(os.path.join(destdir, module + ".doclua"),"w") as of:
of.write("---\n-- @module %s\n\n" % module)
for field, field_info in fields.items():
of.write("---\n-- @field [parent=#%s] %s %s\n\n" % (module, field_info["type"], field))
of.write("return nil\n")
|
[
"ps5mhg@gmail.com"
] |
ps5mhg@gmail.com
|
7466ce6e92d074ebe5cdcdffa64afe71726173e0
|
83a44062b3f469a5c58813e40dd0ba9a71934ec3
|
/yugteatr/wsgi.py
|
3571473408d31858930c214941aab58794614330
|
[] |
no_license
|
aliismayilov/yugteatr
|
5c41d58195539f71ba8c3e782f1053f8e261ad16
|
ea6107f0505e55f2155e646ea2cdbfa80f23a586
|
refs/heads/master
| 2016-09-09T20:33:37.103846
| 2013-05-11T16:11:21
| 2013-05-11T16:11:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
"""
WSGI config for yugteatr project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "yugteatr.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yugteatr.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"ali@ismailov.info"
] |
ali@ismailov.info
|
50b2f16b5a10bdc41ef814a5790c032d227a5dca
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17s_1_02/vrf/address_family/ipv6/unicast/ipv6/route/static_route_oif/route_attributes/__init__.py
|
4b26a6d7ec10f8fd1ad380e2be4e9d37e1abf5bb
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,960
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class route_attributes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vrf - based on the path /vrf/address-family/ipv6/unicast/ipv6/route/static-route-oif/route-attributes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__metric','__distance','__tag',)
_yang_name = 'route-attributes'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'vrf', u'address-family', u'ipv6', u'unicast', u'ipv6', u'route', u'static-route-oif', u'route-attributes']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'vrf', u'address-family', u'ipv6', u'unicast', u'ipv6', u'route', u'static-route-oif']
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/static_route_oif/route_attributes/metric (uint32)
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/static_route_oif/route_attributes/metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)""",
})
self.__metric = t
if hasattr(self, '_set'):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
def _get_distance(self):
"""
Getter method for distance, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/static_route_oif/route_attributes/distance (uint32)
"""
return self.__distance
def _set_distance(self, v, load=False):
"""
Setter method for distance, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/static_route_oif/route_attributes/distance (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """distance must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)""",
})
self.__distance = t
if hasattr(self, '_set'):
self._set()
def _unset_distance(self):
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
def _get_tag(self):
"""
Getter method for tag, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/static_route_oif/route_attributes/tag (uint32)
YANG Description: Tag can be configured to filter the static routes
for route redistribution.
Default value is 0, indicating no tag.
"""
return self.__tag
def _set_tag(self, v, load=False):
"""
Setter method for tag, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/static_route_oif/route_attributes/tag (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tag() directly.
YANG Description: Tag can be configured to filter the static routes
for route redistribution.
Default value is 0, indicating no tag.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tag must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)""",
})
self.__tag = t
if hasattr(self, '_set'):
self._set()
def _unset_tag(self):
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
metric = __builtin__.property(_get_metric, _set_metric)
distance = __builtin__.property(_get_distance, _set_distance)
tag = __builtin__.property(_get_tag, _set_tag)
_pyangbind_elements = {'metric': metric, 'distance': distance, 'tag': tag, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
c8f037d1c232478ee45c226ab4e1ffd739091272
|
3d0351d1834c3e4dfea6c1b01b0dd6269110983d
|
/localtest/download/filter_docu.py
|
edd535c4a982cd771aed8af50bc80f9d83d8f4ac
|
[] |
no_license
|
SUNCHAO1212/announcement-pipeline
|
13f0ee0b1d39458b4d8a464265a0d81a4c3f7091
|
096d05e5c8de476ae857a5250bd221afe6c0910a
|
refs/heads/master
| 2020-03-14T01:54:06.058605
| 2018-06-22T09:09:57
| 2018-06-22T09:09:57
| 131,387,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# -*- coding:UTF-8 -*-
#!/usr/bin/env python3
from pymongo import MongoClient
client = MongoClient(host='192.168.1.251', port=27017)
db = client.SecurityAnnouncement
coll_from = db.pledge
coll_to = db.pledge_filtered
for i, document in enumerate(coll_from.find({'crawOpt.secName':'信邦制药', 'title':{'$regex':'^[^解]*$'}})):
print(i, document['title'])
coll_to.save(document)
|
[
"qjzhu@quant-chi.com"
] |
qjzhu@quant-chi.com
|
88ca5a1c8fe4b91fae725e5c1ec75ce17b634be3
|
69a84314e21db2349fb9267dc52baa5fe49bf5a3
|
/model/User.py
|
60bd7182eee61e9277523fc663bd23b3afff1dfa
|
[] |
no_license
|
hakhyunkim123/fmp-serving
|
2feb0c542419af23954083e7f46d8f77568d8e3b
|
4d1aac723e291a19f57a1c75246e7a2ffd4e6a04
|
refs/heads/main
| 2023-01-23T09:14:10.364903
| 2020-11-28T05:47:22
| 2020-11-28T05:47:22
| 312,974,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
class User:
def __init__(self, employ_num=None, name=None, email=None, position=None,
comp_division=None, comp_division_detail=None, pwd=None, user_dict=None):
if user_dict is None:
self.employ_num = employ_num
self.name = name
self.email = email
self.position = position
self.comp_division = comp_division
self.comp_division_detail = comp_division_detail
self.pwd = pwd
else:
self.employ_num = user_dict['employ_num']
self.name = user_dict['name']
self.email = user_dict['email']
self.position = user_dict['position']
self.comp_division = user_dict['comp_division']
self.comp_division_detail = user_dict['comp_division_detail']
self.pwd = user_dict['pwd']
def to_dict(self):
user_to_dict = {
'employ_num': self.employ_num,
'name': self.name,
'email': self.email,
'position': self.position,
'comp_division': self.comp_division,
'comp_division_detail': self.comp_division_detail
}
return user_to_dict
def print_user_info(self):
print('행번:', self.employ_num)
print('이름:', self.name)
print('이메일:', self.email)
print('직급:', self.position)
print('부서:', self.comp_division)
print('팀:', self.comp_division_detail)
|
[
"hyun24436@gmail.com"
] |
hyun24436@gmail.com
|
2e5e09553134fb539521dd55ed4e42edcf62b99a
|
93badab6ebd099f140e545f63814df18b93ab611
|
/painter/fund_trade_history_graph.py
|
27304f0efb8b392e2ec0fc38620894f51873a9e3
|
[] |
no_license
|
rovinyu/fund_hold_assist
|
002e95758afdf78516439bfeb27619a862d6435f
|
e361ffe5e744fbe9fcfdaa0eec8525877b01f571
|
refs/heads/master
| 2022-12-10T18:19:10.449346
| 2020-09-01T12:01:58
| 2020-09-01T12:01:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,093
|
py
|
# Python 3
# -*- coding:utf-8 -*-
from utils import *
from user import *
from painter import FundHistoryGraph
from decimal import Decimal
import matplotlib.pyplot as plt
class FundTradeHistoryGraph(FundHistoryGraph):
"""draw fund trade history graph"""
def __init__(self, sqldb, userfund, allTrade = False):
super(FundTradeHistoryGraph, self).__init__(sqldb, userfund.code)
self.ppg = 1 if not ppgram.__contains__(self.code) else ppgram[self.code]
self.userfund = userfund
self.allTrade = allTrade
self.dates_buy = None
self.dates_buy_sold = None
self.dates_sell = None
def postProcessData(self):
self.average = 0 if not self.userfund.average else Decimal(str(self.userfund.average))
buytable = self.userfund.buy_table
sDate = None
if self.sqldb.isExistTable(buytable):
dates_buy = self.sqldb.select(buytable, "min(%s)" % column_date, "%s = 0" % column_soldout if not self.allTrade else "")
if dates_buy:
((sDate,),) = dates_buy
if not sDate:
sDate = ""
if self.dates.__contains__(sDate):
sDateIdx = self.dates.index(sDate)
self.dates = self.dates[sDateIdx:]
self.values = self.values[sDateIdx:]
if not self.ppg == 1:
self.values = [self.ppg * v for v in self.values]
self.average = self.ppg * Decimal(str(self.average))
if not self.average == 0:
self.earn_percent = str(((Decimal(str(self.values[-1])) - self.average) * 100/self.average).quantize(Decimal("0.0000"))) + "%"
if self.sqldb.isExistTable(buytable):
dates_buy = self.sqldb.select(buytable, [column_date], ["%s >= '%s'" % (column_date, sDate), "%s = 0" % column_soldout])
self.dates_buy = [d for (d,) in dates_buy]
self.values_buy = [self.values[self.dates.index(d)] for d in self.dates_buy]
dates_buy_sold = self.sqldb.select(buytable, [column_date], ["%s >= '%s'" % (column_date, sDate), "%s = 1" % column_soldout])
self.dates_buy_sold = [d for (d,) in dates_buy_sold]
self.values_buy_sold = [self.values[self.dates.index(d)] for d in self.dates_buy_sold]
selltable = self.userfund.sell_table
if self.sqldb.isExistTable(selltable):
dates_sell = self.sqldb.select(selltable, [column_date], "%s >= '%s'" % (column_date, sDate))
self.dates_sell = [d for (d,) in dates_sell]
self.values_sell = [self.values[self.dates.index(d)] for d in self.dates_sell]
def drawAdditionalLines(self):
info_posx = self.dates[self.cursXidx]
latestVal = Decimal(str(self.values[-1]))
if not self.average == 0:
plt.axhline(y=self.average, ls = '-', lw = 0.75, color = 'r', alpha = 0.5)
plt.gca().text(self.dates[0], self.average, str(self.average))
plt.axhline(y=self.values[-1], ls = '-', lw = 0.75, color = 'r', alpha = 0.5)
plt.axvline(x=self.dates[-1], ls = '-.', lw = 0.5, color='r', alpha = 0.8)
plt.gca().text(self.dates[-1], (self.average + latestVal)/2, self.earn_percent)
cursY = self.values[self.cursXidx]
if not self.average == 0 and not cursY == 0:
plt.gca().text(info_posx, (Decimal(cursY) + self.average)/2, str((((Decimal(cursY) - self.average) * 100/self.average)).quantize(Decimal("0.0000"))) + "%")
if not self.values[-1] == cursY:
plt.gca().text(info_posx, (Decimal(cursY) + latestVal)/2, str(((latestVal - Decimal(cursY)) * 100/Decimal(cursY)).quantize(Decimal("0.0000"))) + "%")
if self.dates_buy:
plt.scatter(self.dates_buy, self.values_buy, c = 'r')
if self.dates_buy_sold:
plt.scatter(self.dates_buy_sold, self.values_buy_sold, c = 'w', edgecolors = 'r')
if self.dates_sell:
plt.scatter(self.dates_sell, self.values_sell, c = 'k')
def show_distribute(self):
print("call FundHistoryGraph.show_distribute")
|
[
"zhcbfly@qq.com"
] |
zhcbfly@qq.com
|
4b1c11c9440ab0a42b1e77da03ceb072935e5c9b
|
db75e02b0aadb81c4d55369018dcf50bbe0dfb56
|
/run_sourceanalyzer.py
|
4c27e96c8a38885b1d302cce1e7dd25a94856603
|
[] |
no_license
|
relativeabsolute/rowanducks
|
10c4a7c4df38f16ec53251b685dea1bb9ced6159
|
4a956dca0df0158e246d34e0c0ce3eb267569144
|
refs/heads/dev
| 2021-01-19T09:25:20.592887
| 2017-05-08T17:35:15
| 2017-05-08T17:35:15
| 82,107,924
| 4
| 1
| null | 2017-05-08T17:35:15
| 2017-02-15T21:19:50
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
import sys
from src import SourceAnalyzer
if __name__ == "__main__":
SourceAnalyzer.main(sys.argv[1:])
|
[
"jburke4126@gmail.com"
] |
jburke4126@gmail.com
|
53ba31c2f17ff6fb8924df64bb4c7706f0094743
|
48551bc4557e4488ec0d8bbb1fed49830a7a7d55
|
/src/profiles/admin.py
|
1a67111d97c3af46198808e836e5b2a6340da462
|
[] |
no_license
|
ishani33/ecommerce-website-django
|
b68554600a7746f8d84b796068c5050d55023b17
|
e19f7311a69c73d87df5612572351f56bb498cab
|
refs/heads/master
| 2020-05-17T08:33:49.005926
| 2019-04-26T12:09:39
| 2019-04-26T12:09:39
| 183,605,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from django.contrib import admin
from .models import profile
# Register your models here.
class profileAdmin(admin.ModelAdmin):
class Meta:
model = profile
admin.site.register(profile,profileAdmin)
|
[
"ishanichauhan781@gmail.com"
] |
ishanichauhan781@gmail.com
|
dbae681e911dd6388492d15db5ab5ee37e24fc02
|
8a240c8d3c057f506447fe18407263d110f4d6c9
|
/jobplus/handlers/front.py
|
30ca83bedd759bce106e7bf170aa8055056dd4d7
|
[] |
no_license
|
starhappy/jobplus
|
e7bc9ad4be14fe4ea8d6e541e9361c6c9802ef83
|
b6d7fa6bf09963bd57b71d5fe01c4be13b86a3c9
|
refs/heads/master
| 2022-12-12T17:29:52.286079
| 2018-12-03T07:49:16
| 2018-12-03T07:49:16
| 159,120,063
| 0
| 1
| null | 2022-12-07T23:53:03
| 2018-11-26T06:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
from flask import Blueprint, render_template, redirect, url_for, flash
from jobplus.models import User, db, Job
from jobplus.forms import RegisterForm, LoginForm
from flask_login import login_user, logout_user, login_required
front = Blueprint('front', __name__)
@front.route('/')
def index():
newest_jobs = Job.query.filter(Job.is_disable.is_(False)).order_by(Job.created_at.desc()).limit(9)
newest_companies = User.query.filter(
User.role==User.ROLE_COMPANY
).order_by(User.created_at.desc()).limit(8)
return render_template(
'index.html',
active='index',
newest_jobs=newest_jobs,
newest_companies=newest_companies,
)
@front.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user.is_disable:
flash('用户已经被禁用')
return redirect(url_for('front.login'))
else:
login_user(user, form.remember_me.data)
next = 'user.profile'
if user.is_admin:
next = 'admin.index'
elif user.is_company:
next = 'company.profile'
return redirect(url_for(next))
return render_template('login.html', form=form)
@front.route('/userregister', methods=['GET', 'POST'])
def userregister():
form = RegisterForm()
if form.validate_on_submit():
form.create_user()
flash('注册成功,请登录!', 'success')
return redirect(url_for('.login'))
return render_template('userregister.html', form=form)
@front.route('/companyregister', methods=['GET', 'POST'])
def companyregister():
form = RegisterForm()
form.name.label = u'企业名称'
if form.validate_on_submit():
company_user = form.create_user()
company_user.role = User.ROLE_COMPANY
db.session.add(company_user)
db.session.commit()
flash('注册成功,请登录!', 'success')
return redirect(url_for('.login'))
return render_template('companyregister.html', form=form)
@front.route('/logout')
@login_required
def logout():
logout_user()
flash('您已经退出登录!', 'success')
return redirect(url_for('.index'))
|
[
"konglingxing@126.com"
] |
konglingxing@126.com
|
93adae40653efd455c7f5cc03cb0f03b3be76427
|
c42d80ed2e6d313fd4d340dfa50881447b824968
|
/PLOTS/PlotJKblocks.py
|
ad50dbedfb26e4f39d02b9458a636c2970339d05
|
[] |
no_license
|
mbaityje/STRUCTURAL-GLASS
|
3adcd324f5c13411ba9d6ed48be44655383bb516
|
6d8dc9e6f82618a65fc09f82901a25533d5afb59
|
refs/heads/master
| 2021-06-24T22:50:19.024502
| 2019-04-25T18:09:54
| 2019-04-25T18:09:54
| 111,572,772
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,924
|
py
|
#!/usr/bin/env python
import sys
import numpy as np
import argparse
from matplotlib import pyplot as plt
parser = argparse.ArgumentParser(prog='python '+sys.argv[0]+' [--hoomd-flags] --user=" HERE YOU PUT THE FOLLOWING ARGUMENTS"', add_help=True)
# parser.add_argument('-f','--filename', default="", help='name of the file you want to plot')
parser.add_argument('-T','--T', default='5.0', help='Temperature')
parser.add_argument('-N','--N', default=1080, help='number of particles')
parser.add_argument('-t','--thermostat', default='NVT', help='thermostat')
parser.add_argument('-o','--obs', choices=['CPP','CFP','CFF','K'], default='CPP', help='What to plot')
parser.add_argument('-M','--M', default=3, help='M of Gaver-Stehfest (only for K)')
parser.add_argument('--nomean', action='store_true', help='if activated, mean is not plotted')
args = parser.parse_args()
# LOAD DATA JK
path='../OUTPUT/T{}/N{}/'.format(args.T, args.N)
times=np.load(path+'times_{}.npy'.format(args.thermostat))
if args.obs=='CPP' or args.obs=='CFP' or args.obs=='CFF':
filename=path+'/{}JK_{}.npy'.format(args.obs, args.thermostat)
corr=np.load(filename).item()
elif args.obs=='K':
filename=path+'/noisecorrJK_{}_M{}.npy'.format(args.thermostat,args.M)
corr=np.load(filename).item()['combine']
av=np.loadtxt(path+'noisecorr_{}_combine_M{}.txt'.format(args.thermostat,args.M))
# LOAD DATA AVERAGE (average is different than mean because of the order of the operations)
#PLOT
fig=plt.subplot(111, xscale='log')
if args.obs=='K':
plt.ylim(top=1.1*corr['mean'][0])
plt.title(args.obs+', T='+args.T+' (all JK blocks + mean)')
nblo=len(corr['blocksJK'])
for iblo in range(nblo):
plt.plot(times,corr['blocksJK'][iblo])
if not args.nomean:
plt.errorbar(times, corr['mean'], yerr=corr['errJK'], linewidth=3, color='black')
try:
plt.plot(av[:,0],av[:,1], label="Average", color='grey', linewidth=3)
except NameError:
pass
plt.show()
|
[
"marcoan266bb@hotmail.com"
] |
marcoan266bb@hotmail.com
|
e5f249cde5c028d3de1713180f7dd53c8868c394
|
75c3a5e1ec4b2e5b4c2fc876b1515e15d077e20d
|
/Preprocessing part/10.22.py
|
a057b2c2aac638ff7efdf39c238fde7573788e35
|
[] |
no_license
|
William0111/Speech-separation
|
1a2b2c2c0115ee7e054999e1c79dadaafb710413
|
c60b37fd978624ac13a7ec55a303644d559e51ca
|
refs/heads/master
| 2020-05-01T08:51:14.331098
| 2019-05-10T20:39:32
| 2019-05-10T20:39:32
| 177,387,174
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,514
|
py
|
#学习目的:review
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.2 + 0.7
# plt.plot(x_data, y_data)
# plt.show()
#Structure
Weights = tf.Variable(tf.random_uniform([1],-1,1)) #维度为1,【-1,1】随机均匀分布
Biases = tf.Variable(tf.zeros([1])) #维度为1,初始为0
y = Weights*x_data + Biases #y=Wx+b
loss = tf.reduce_mean(tf.square(y-y_data)) #计算loss
optimizer = tf.train.GradientDescentOptimizer(0.4) #优化器,最基本的GradientDescentOptimizer,每一步优化0.4
train = optimizer.minimize(loss)
init = tf.initialize_all_variables() #这一步必不可少
#Structure
sess = tf.Session()
sess.run(init) #Dont forget this step,一定要run一下这个init
for step in range(100): #这就开始训练了,训练100步,每10步打印出一次W & b, 记住打印的时候也要用sess.run()这就类似一个指针的作用
sess.run(train)
if step%10 == 0:
print(step,sess.run(Weights),sess.run(Biases))
matrix1 = tf.constant([[4],[4]]) #两行一列的矩阵
matrix2 = tf.constant([[8,8]]) #一行两列的矩阵
product = tf.matmul(matrix2, matrix1) #矩阵相乘
#method 1
result = sess.run(product)
print(result)
sess.close()
#method 2
with tf.Session() as sesss:
result2 = sesss.run(product)
print(result2)
#矩阵相乘出结果的两种方法,第二种不需要close
state = tf.Variable(0,name='counter') #计步器
#print(state.name)
one = tf.constant(1) #张量里面的1
new_value = tf.add(state,one) #往里面加一
update = tf.assign(state,new_value) #把new_value的值传递到update
init = tf.initialize_all_variables()
with tf.Session() as sess: #加了5次
sess.run(init)
for _ in range(9):
sess.run(update)
print(sess.run(state))
input1 = tf.placeholder(tf.float32) #float32是tf里面最普适的数据类型
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1,input2) #数据乘法
with tf.Session() as sess:
print(sess.run(output,feed_dict={input1:[8],input2:[3]}))
#placeholder需要feed_dict给赋值
def add_layer(input,in_size,out_size, n_layer, activation_function=None):
#define了一个add_layer,里面需要赋值的有,输入,输入的size,输出的size,layer的名字,激励函数是什么
layer_name = 'layer%s'% n_layer
with tf.name_scope(layer_name):
with tf.name_scope('Weights'):
Weights = tf.Variable(tf.random_uniform([in_size,out_size]), name='W')
#W的size应当是in_size X out_size,这不难想,比如你认为要有多少行,显然矩阵要能相乘,必须要和in_size一致;
# 而多少列就是你希望输出矩阵的多少列
tf.summary.histogram(layer_name+'Weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1, name='b')
#b是一个数值,每一条神经元链接的线上都有一个b,那么显然b的个数由输出的个数多少决定
tf.summary.histogram(layer_name + 'biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(input,Weights) + biases
#矩阵相乘加上b
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name + 'outputs', outputs)
return outputs
#人造数据
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.1,x_data.shape).astype(np.float32)
y_data = np.square(x_data) - 0.8 + noise
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32,[None,1], name='x_input')
ys = tf.placeholder(tf.float32,[None,1], name='y_input')
#可视化操作中的步骤
l1 = add_layer(xs,1,10, n_layer=111, activation_function=tf.nn.relu)
#第一层,输入为1,输出为10,relu作为激励函数
prediction = add_layer(l1,10,1, n_layer=222, activation_function=None)
#第二次,输入为10.输出为1,没有激励函数
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
#计算loss,非常关键的一部
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#优化器是GD,0.1的学习率
init = tf.global_variables_initializer()
sess = tf.Session()
merge = tf.summary.merge_all()
#merge就相当于把上面可视化操作全合并在一起
writer = tf.summary.FileWriter('/Users/admin/Desktop/tensorgraph/',sess.graph)
#z这一步是把生成并存入(写入)指定文件夹中
sess.run(init)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()
#plot至今没玩熟练,还是要花时间去系统学
for i in range(1000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i%50 == 0:
result = sess.run(merge,feed_dict={xs:x_data,ys:y_data})
writer.add_summary(result,i)
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value = sess.run(prediction,feed_dict={xs:x_data})
lines = ax.plot(x_data, prediction_value, 'r-', lw=5)
plt.plot(x_data,prediction_value)
plt.pause(0.1)
|
[
"44120449+William0111@users.noreply.github.com"
] |
44120449+William0111@users.noreply.github.com
|
a43541954127dd7b9cdee21d29cdddb1e9efb836
|
4ab4fdb108d6bdf3c6dfff8a07902b4a6d0be2cf
|
/fb_terminal.py
|
643e5158df8ba285218397b4719720fd71039484
|
[] |
no_license
|
karthikshathiri/scripts
|
6f5fc195f89464ad5555ef9f9e7d6125a70f8f1e
|
3a01742c1c52549c219d3cdfc0552acc6855ec6e
|
refs/heads/master
| 2021-01-22T06:18:32.428545
| 2017-05-26T18:03:14
| 2017-05-26T18:03:14
| 92,535,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,390
|
py
|
import os
import sys
import time
import zipfile
import re
from urllib.request import urlretrieve
from selenium import webdriver
from getpass import getpass
try:
from configparser import SafeConfigParser
except ImportError:
from ConfigParser import SafeConfigParser
chrome_options = webdriver.ChromeOptions()
prefs = {"profile.default_content_setting_values.notifications" : 2}
chrome_options.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome(chrome_options = chrome_options)
driver.set_window_size(1080,800) #Required, removes the "element not found" bug
replyButton = imgList = fileList = None
customCommands = {}
try:
input = raw_input
except NameError:
pass
def clear():
if os.name == 'nt':
os.system('cls')
else :
os.system('clear')
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def waitForNextMessage():
driver.implicitly_wait(30)
messageList = driver.find_elements_by_css_selector('.null')
command = ''
while True:
driver.implicitly_wait(30)
element = driver.find_elements_by_css_selector('.null')
if element != messageList:
command = element[-1].find_elements_by_css_selector("*")[0].text
if not(command.split('\n')[0] == '@CLI'):
print('\033[94m {} \033[0m'.format(command))
runCommand(command)
break
time.sleep(0.50)
def runCommand(command):
driver.implicitly_wait(30)
output = os.popen(command).read()
url = fpath = ''
cmd = command.lower().split(' ')
fileButton = driver.find_elements_by_xpath('//input[@type="file"]')[0]
if (len(cmd) >= 2):
fpath = os.getcwd() + '/' + ' '.join(cmd[1:])
urlIden = cmd[1].split(':')[0]
if urlIden == 'http' or urlIden == 'https':
url = cmd[1]
if (len(cmd) >= 4):
if cmd[0] == 'set' and cmd[2] == 'as':
global customCommands
if cmd[1] not in customCommands:
final = ' '.join(cmd[3:])
with open('commands.txt','a') as foo:
foo.write(cmd[1] + ' ' + final + '\n')
customCommands[cmd[1]] = final
output = 'Command set : {} = {}'.format(cmd[1], final)
else:
output = 'ERROR\nCommand already defined : {}'.format(cmd[1])
if cmd[0] == 'save':
path = os.getcwd() + '/' + ''.join(cmd[2:])
global imgList
global fileList
if cmd[1] == 'img':
newImgList = driver.find_elements_by_css_selector('._4yp9')
if imgList != newImgList:
urlretrieve(newImgList[-1].get_attribute('style').split("\"")[1],path)
output = 'Image saved as ' + path
imgList = newImgList
else: output = 'ERROR\nImage not found'
if cmd[1] == 'file':
newFileList = [x for x in driver.find_elements_by_tag_name('a') if x.get_attribute('rel') == 'nofollow']
if newFileList != fileList:
urlretrieve(newFileList[-1].get_attribute('href'),path)
output = 'File saved as ' + path
fileList = newFileList
else : output = 'ERROR\nFile not found'
if cmd[0] in customCommands:
output = os.popen(customCommands[cmd[0]]).read()
if cmd[0] == 'senddir':
name = ''.join(cmd[1:])+'.zip'
if os.path.isdir(fpath):
zipf = zipfile.ZipFile(name, 'w')
zipdir(fpath, zipf)
zipf.close()
fileButton.send_keys(os.getcwd()+'/'+name)
output = fpath
else:
output = 'ERROR\nNo such directory: {}'.format(fpath)
if cmd[0] == 'cd':
if os.path.isdir(fpath):
os.chdir(fpath)
output = os.getcwd()
else :
output = 'ERROR\nNo such directory: {}'.format(fpath)
if cmd[0] == 'send':
if os.path.isfile(fpath):
fileButton.send_keys(fpath)
output = fpath
else:
output = 'ERROR\nFile not found : {}'.format(fpath)
if cmd[0] == 'quit':
print('Session Ended')
driver.quit()
sys.exit(0)
if cmd[0] == 'show':
dr = webdriver.Chrome()
foo = True
if url:
dr.get(url)
elif os.path.isfile(fpath):
dr.get('file:///'+fpath)
else :
output = 'ERROR\nInvalid Path/URL : '
foo = False
if foo:
dr.save_screenshot('ss.png')
dr.quit()
if url:
output = url
else:
utput = fpath
fileButton.send_keys(os.getcwd() + '/ss.png')
if cmd[0] == 'memory':
if os.name == 'nt':
output = 'ERROR\nCurrently, the memory command is only supported for UNIX-based machines'
else:
output = os.popen('top -l 1 -s 0 | grep PhysMem').read()
if cmd[0] == 'help':
output = 'help : Displays this\n\nquit : Ends current session\n\nsend __filePath : Sends the file at the path specfied\n\nsenddir __dirPath : Sends directory after coverting to .zip\n\nmemory : Gives current memory stats of system\n\nshow __filePath/URL : Previews file/url \n\nset *NewCommandName* as *actualCommand* : Define alias name for command\n\n------USER DEFINED ALIAS------\n\n' + '\n'.join(customCommands.keys()) + '\n\n------------\n\nRun any other command as you would on your CLI'
if not output:
output = '(Y)'
driver.find_element_by_css_selector('.uiTextareaNoResize.uiTextareaAutogrow._1rv').send_keys('@CLI\n\n'+output)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
replyButton.click()
def init():
cont = False
clear()
credentials_from_file = False
credentials = SafeConfigParser();
if os.path.isfile('settings.txt') and os.name != 'nt':
os.system('chmod +r settings.txt')
credentials.read('settings.txt')
if (credentials.has_option('main','email')
and credentials.has_option('main','password')):
credentials_from_file = True
while not cont:
driver.get('https://www.facebook.com/')
if credentials_from_file:
email = credentials.get('main', 'email')
password = credentials.get('main', 'password')
else:
email = input('Email : ')
password = getpass('Password : ')
inputs=driver.find_elements_by_tag_name('input')
inputs[1].send_keys(email)
inputs[2].send_keys(password)
driver.implicitly_wait(30)
inputs[3].click()
if str(driver.current_url).split('=')[0] == 'https://www.facebook.com/login.php?login_attempt':
clear()
print('Invalid Email/Password')
if credentials_from_file:
print('Switching to manual input')
credentials_from_file = False
else:
cont = True
print('Loading...\n')
if os.path.isfile('settings.txt') and os.name != 'nt':
os.system('chmod -r settings.txt')
profile_url = [x for x in driver.find_elements_by_tag_name('a') if x.get_attribute('title') == 'Profile'][0].get_attribute('href')
re_search = re.search(r'(\?id=\d+)$', profile_url)
profile = ''
if re_search:
profile = re_search.group(0)
profile = profile.replace('?id=', '')
else:
profile = profile_url[profile_url.rfind('/')+1:]
driver.get('https://www.facebook.com/messages/' + profile)
global replyButton
replyButton = [x for x in driver.find_elements_by_tag_name('input') if x.get_attribute('value') == 'Reply'][0]
if not(replyButton.is_displayed()):
driver.find_element_by_css_selector('._1s0').click()
if os.path.isfile(os.getcwd() + '/commands.txt'):
with open('commands.txt','r') as foo:
for a in foo.read().split('\n'):
ls = a.split(' ')
if len(ls) >= 2:
global customCommands
customCommands[ls[0]] = ' '.join(ls[1:])
print('\033[92mReady!\033[0m\n\n-------------- COMMANDS --------------')
if __name__ == '__main__':
init()
imgList = driver.find_elements_by_css_selector('._4yp9')
fileList = [x for x in driver.find_elements_by_tag_name('a') if x.get_attribute('rel') == 'nofollow']
while True:
waitForNextMessage()
time.sleep(0.50)
|
[
"noreply@github.com"
] |
karthikshathiri.noreply@github.com
|
70ada97be94c89d47931fdfd5e67ac45db59b12e
|
5356d7171ca957ed89b0361df601c5bf2be60de7
|
/djangoProject/profile_user/apps.py
|
58dc283a7be33f54a955016d890f0d7d5c4b6131
|
[] |
no_license
|
tawsifshahriar7/imdb
|
cfb29233e12c0fedce3064b4563cdfc29d822e12
|
37775606b061b9c959e46e5e5aec85b91c9175ab
|
refs/heads/master
| 2023-02-18T03:26:29.872042
| 2020-12-16T16:00:58
| 2020-12-16T16:00:58
| 303,738,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
from django.apps import AppConfig
class ProfileUserConfig(AppConfig):
name = 'djangoProject.profile_user'
|
[
"tawsifshahriar7@gmail.com"
] |
tawsifshahriar7@gmail.com
|
02551032c65dbbbd8d416c3d3717cc9d1146ec48
|
2b1b9612882a8a8d180522f0d51f956296e5f594
|
/youTube/myproject/settings.py
|
b2a10ad3eb618af2a04f841a2f3642823cff5926
|
[] |
no_license
|
sebastianmaxwell1/youtube_clone_django
|
7a9379e30117ad472818fd1b585025b660ebc6cc
|
c4a0b51043e9e240978a5f8e51f962d9fc5e7082
|
refs/heads/main
| 2023-04-10T11:43:28.703062
| 2021-04-27T14:20:47
| 2021-04-27T14:20:47
| 361,862,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,282
|
py
|
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=p5)u+8b-3vd6wqrg(azde@an@crg)ibb7!)dc!7+8ej5$(v*q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'youTube.apps.YoutubeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': 'you_tube',
'USER': 'root',
'PASSWORD': 'Rmck@2319',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {
'autocommit': True
}
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"79651450+RobinHeath-Albuquerque@users.noreply.github.com"
] |
79651450+RobinHeath-Albuquerque@users.noreply.github.com
|
9be0008a4ca37a11244fcdf7c68e919c3e8e3528
|
4b99bc230f8c88e9116cd76a76bc636fc550c574
|
/Projet_Gestion/EMA/admin.py
|
c2faadaf00725c9b9197cf663da1c3ac8a637eba
|
[] |
no_license
|
rigosias/Gestion-Administration-Ecole---Django
|
afe6bb12e2cc1bfd9d287e91d87ecd3a764e140c
|
3bcac27fe3b42f94cd519f3b0504b84d92c3b43c
|
refs/heads/master
| 2021-05-29T03:01:08.082144
| 2015-04-19T19:56:14
| 2015-04-19T19:56:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from django.contrib import admin
from .models import Eleve,Matiere,Notes
# Register your models here.
admin.site.register(Eleve)
admin.site.register(Matiere)
admin.site.register(Notes)
|
[
"osiasrigobert@gmail.com"
] |
osiasrigobert@gmail.com
|
d664f3e84f89caa714f05a270ee948e90844669d
|
364d97a3888062711b47cd0ba883c4e1ba76736f
|
/utils/time_helpers.py
|
a7e03439d07bb7034f477ac64dc1358fe95e2d24
|
[] |
no_license
|
Ruii-Wang/my-twitter
|
75f608e501584b6fc031b5a7d48e80a108ca28bb
|
e88aaae243d5e7311e0eb6f442c46591bc0cb66f
|
refs/heads/main
| 2023-07-09T20:32:58.941923
| 2021-08-12T03:24:06
| 2021-08-12T03:24:06
| 359,600,436
| 0
| 0
| null | 2021-08-12T03:23:59
| 2021-04-19T21:15:09
|
Python
|
UTF-8
|
Python
| false
| false
| 128
|
py
|
from datetime import datetime
# python timezone
import pytz
def utc_now():
return datetime.now().replace(tzinfo=pytz.utc)
|
[
"ruiwang.bh@hotmail.com"
] |
ruiwang.bh@hotmail.com
|
e9627b7298229c5fb631207ec0b69ac4549bc2ef
|
00c7658ee35ffc9ab7cba7bf2a7c33e0c9ca16e6
|
/functions.py
|
44d2a22126741cf1ac618b3a8fbcf9de3ab9c902
|
[] |
no_license
|
klemen-nedelko/Quiz-App
|
fa9f139c07298852048477c12e5c6d09c9d62f14
|
90676707f36de8438fdf01e270b85c10449b3eb7
|
refs/heads/main
| 2023-06-13T00:45:42.921707
| 2021-07-08T11:33:58
| 2021-07-08T11:33:58
| 384,088,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,348
|
py
|
from PyQt5.QtWidgets import QGridLayout, QLabel, QPushButton
from PyQt5.QtGui import QPixmap, QCursor
from PyQt5 import QtCore
from urllib.request import urlopen
import json
import pandas as pd
import random
#https://opentdb.com/api.php?amount=50&category=9&difficulty=medium&type=multiple
with urlopen("https://opentdb.com/api.php?amount=50&category=9&difficulty=easy&type=multiple") as webpage:
data = json.loads(webpage.read().decode())
df = pd.DataFrame(data["results"])
#preload vprasanj
def preload_data(indx):
question = df["question"][indx]
correct = df["correct_answer"][indx]
wrong = df["incorrect_answers"][indx]
#formatiranje neznanih simbolov v prave simbole
formatting = [
("#039;", "'"),
("&'", "'"),
(""", '"'),
("<", "<"),
(">", ">")
]
#replace v stringu
for tuple in formatting:
question = question.replace(tuple[0], tuple[1])
correct = correct.replace(tuple[0], tuple[1])
#replace v listi
for tuple in formatting:
wrong = [char.replace(tuple[0], tuple[1]) for char in wrong]
#.append doda v array
parameters["question"].append(question)
parameters["correct"].append(correct)
all_answers = wrong + [correct]
random.shuffle(all_answers)
parameters["answer1"].append(all_answers[0])
parameters["answer2"].append(all_answers[1])
parameters["answer3"].append(all_answers[2])
parameters["answer4"].append(all_answers[3])
#globalni parametri
parameters = {
"question":[],
"answer1":[],
"answer2":[],
"answer3":[],
"answer4":[],
"correct":[],
"score":[],
"index":[]
}
#global spremenljivke za widget
widgets = {
"logo": [],
"button": [],
"score": [],
"question": [],
"answer1": [],
"answer2": [],
"answer3": [],
"answer4": [],
"message": [],
"message2": []
}
# grid layout
grid = QGridLayout()
#brisanje dodanih graficnih gumbov in vprasanj
def clear_widgets():
''' hide all existing widgets and erase
them from the global dictionary'''
for widget in widgets:
if widgets[widget] != []:
widgets[widget][-1].hide()
for i in range(0, len(widgets[widget])):
widgets[widget].pop()
# funkcija brisanje parametrov ob koncu igre
def clear_parameters():
for parm in parameters:
if parameters[parm] !=[]:
for i in range(0, len(parameters[parm])):
parameters[parm].pop()
parameters["index"].append(random.randint(0,49))
parameters["score"].append(0)
#funkcija zacetka igre
def start_game():
#zacetek igre, reset vse widget
clear_widgets()
clear_parameters()
preload_data(parameters["index"][-1])
frame2()
#kreiranje gumbov z vprasanji
def create_buttons(answer, l_margin, r_margin):
#kreiranje posodnih buttonov z custom left - right margin
button = QPushButton(answer)
button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
button.setFixedWidth(485)
button.setStyleSheet(
#setting variable margins
"*{margin-left: " + str(l_margin) +"px;"+
"margin-right: " + str(r_margin) +"px;"+
'''
border: 4px solid '#176744';
color: white;
font-family: 'shanti';
font-size: 16px;
border-radius: 25px;
padding: 15px 0;
margin-top: 20px;
}
*:hover{
background: '#176744';
}
'''
)
button.clicked.connect(lambda x: is_correct(button))
return button
#funkcija preverjanje ce je vprasanje pravilno
def is_correct(btn):
if btn.text() == parameters["correct"][-1]:
print(btn.text() + "is correct")
temp_score = parameters["score"][-1]
#.pop() vzame samo en elemetn iz arraya
parameters["score"].pop()
parameters["score"].append(temp_score + 10)
parameters["index"].pop()
parameters["index"].append(random.randint(0,49))
preload_data(parameters["index"][-1])
widgets["score"][-1].setText(str(parameters["score"][-1]))
widgets["question"][0].setText(parameters["question"][-1])
widgets["answer1"][0].setText(parameters["answer1"][-1])
widgets["answer2"][0].setText(parameters["answer2"][-1])
widgets["answer3"][0].setText(parameters["answer3"][-1])
widgets["answer4"][0].setText(parameters["answer4"][-1])
if parameters["score"][-1]==100:
clear_widgets()
frame3()
else:
clear_widgets()
frame4()
# funkcija za zacetni meni
def frame1():
clear_widgets()
#logo widget
image = QPixmap("logo.png")
logo = QLabel()
logo.setPixmap(image)
logo.setAlignment(QtCore.Qt.AlignCenter)
logo.setStyleSheet("margin-top: 100px;")
widgets["logo"].append(logo)
#button widget
button = QPushButton("PLAY")
button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
button.setStyleSheet(
'''
*{
border: 4px solid '#176744';
border-radius: 45px;
font-size: 35px;
color: 'white';
padding: 25px 0;
margin: 100px 200px;
}
*:hover{
background: '#176744';
}
'''
)
#button callback
button.clicked.connect(start_game)
widgets["button"].append(button)
#place global widgets
grid.addWidget(widgets["logo"][-1], 0, 0, 1, 2)
grid.addWidget(widgets["button"][-1], 1, 0, 1, 2)
#FRAME 2
#funkcija za igro
def frame2():
#score widget
score = QLabel(str(parameters["score"][-1]))
score.setAlignment(QtCore.Qt.AlignRight)
score.setStyleSheet(
'''
font-size: 35px;
color: 'white';
padding: 15px 10px;
margin: 20px 200px;
background: '#64A314';
border: 1px solid '#64A314';
border-radius: 35px;
'''
)
widgets["score"].append(score)
#question widget
question = QLabel(parameters["question"][-1])
question.setAlignment(QtCore.Qt.AlignCenter)
question.setWordWrap(True)
question.setStyleSheet(
'''
font-family: 'shanti';
font-size: 25px;
color: 'white';
padding: 75px;
'''
)
widgets["question"].append(question)
#answer button widgets
button1 = create_buttons(parameters["answer1"][-1], 85, 5)
button2 = create_buttons(parameters["answer2"][-1], 5, 85)
button3 = create_buttons(parameters["answer3"][-1], 85, 5)
button4 = create_buttons(parameters["answer4"][-1], 5, 85)
widgets["answer1"].append(button1)
widgets["answer2"].append(button2)
widgets["answer3"].append(button3)
widgets["answer4"].append(button4)
#place widget
grid.addWidget(widgets["score"][-1], 0, 1)
grid.addWidget(widgets["question"][-1], 1, 0, 1, 2)
grid.addWidget(widgets["answer1"][-1], 2, 0)
grid.addWidget(widgets["answer2"][-1], 2, 1)
grid.addWidget(widgets["answer3"][-1], 3, 0)
grid.addWidget(widgets["answer4"][-1], 3, 1)
#FRAME 3 - WIN GAME
#v primeru pravilnih 10 odgovorov se izrise ta frame ki prikazuje stevilo tock in pohvalo
def frame3():
#congradulations widget
message = QLabel("Congratulations! your score is:")
message.setAlignment(QtCore.Qt.AlignRight)
message.setStyleSheet(
"font-family: 'Shanti'; font-size: 25px; color: 'white'; margin: 100px 0px;"
)
widgets["message"].append(message)
#score widget
score = QLabel("100")
score.setStyleSheet("font-size: 100px; color: #8FC740; margin: 0 75px 0px 75px;")
widgets["score"].append(score)
#go back to work widget
message2 = QLabel("Nice work")
message2.setAlignment(QtCore.Qt.AlignCenter)
message2.setStyleSheet(
"font-family: 'Shanti'; font-size: 30px; color: 'white'; margin-top:0px; margin-bottom:75px;"
)
widgets["message2"].append(message2)
#button widget
button = QPushButton('TRY AGAIN')
button.setStyleSheet(
"*{background:'#176744'; padding:25px 0px; border: 1px solid '#176744'; color: 'white'; font-family: 'Arial'; font-size: 25px; border-radius: 40px; margin: 10px 300px;} *:hover{background:'#176744';}"
)
button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
button.clicked.connect(frame1)
widgets["button"].append(button)
#logo widget
pixmap = QPixmap('logo_bottom.png')
logo = QLabel()
logo.setPixmap(pixmap)
logo.setAlignment(QtCore.Qt.AlignCenter)
logo.setStyleSheet(
"padding :10px; margin-top:75px; margin-bottom: 20px;"
)
widgets["logo"].append(logo)
#place widgets on the grid
grid.addWidget(widgets["message"][-1], 2, 0)
grid.addWidget(widgets["score"][-1], 2, 1)
grid.addWidget(widgets["message2"][-1], 3, 0, 1, 2)
grid.addWidget(widgets["button"][-1], 4, 0, 1, 2)
grid.addWidget(widgets["logo"][-1], 5, 0, 2, 2)
# FRAME 4 - FAIL
#funkcija za frame ko je odgovor napacen
def frame4():
#sorry widget
message = QLabel("Sorry, this answer \nwas wrong\n your score is:")
message.setAlignment(QtCore.Qt.AlignRight)
message.setStyleSheet(
"font-family: 'Shanti'; font-size: 35px; color: 'white'; margin: 75px 5px; padding:20px;"
)
widgets["message"].append(message)
#score widget
score = QLabel(str(parameters["score"][-1]))
score.setStyleSheet("font-size: 100px; color: white; margin: 0 75px 0px 75px;")
widgets["score"].append(score)
#button widget
button = QPushButton('TRY AGAIN')
button.setStyleSheet(
'''*{
padding: 25px 0px;
background: '#176744';
color: 'white';
font-family: 'Arial';
font-size: 35px;
border-radius: 40px;
margin: 10px 200px;
}
*:hover{
background: '#176744';
}'''
)
button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
button.clicked.connect(frame1)
widgets["button"].append(button)
#place widgets na zaslonu
grid.addWidget(widgets["message"][-1], 1, 0)
grid.addWidget(widgets["score"][-1], 1, 1)
grid.addWidget(widgets["button"][-1], 2, 0, 1, 2)
#grid.addWidget(widgets["logo"][-1], 3, 0, 1, 2)
|
[
"56438480+symtc@users.noreply.github.com"
] |
56438480+symtc@users.noreply.github.com
|
a6a256d2eaf295080f2c0d4a2367b72c1d59aed7
|
ba3531caeb4822ec3bd82a60ea13c00a8179f26c
|
/basis/quick_sort.py
|
96e469b96f05ba1d2687ffde73090b6e5af44d98
|
[] |
no_license
|
XuHaoIgeneral/PyTest
|
d52cf723f0685cc6db405169f7ade6e915de6e59
|
ae09f3890798cb91b1daeaef1151728ff5406919
|
refs/heads/master
| 2020-03-25T03:44:53.389089
| 2018-09-28T08:33:57
| 2018-09-28T08:33:57
| 143,358,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
def quick_sort(list, left, right):
if left >= right:
return
low = left
hight = right
key = list[low]
while right > left:
while right > left and list[right] > key:
right = right - 1
list[left] = list[right]
while right > left and list[left] <= key:
left = left + 1
list[right] = list[left]
list[right] = key
quick_sort(list, low, left - 1)
quick_sort(list, left + 1, hight)
list = [6, 5, 8, 9, 7, 2, 3, 4, 5, 6]
quick_sort(list, 0, len(list) - 1)
print(list)
def quicksort(list):
if len(list)<2:
return list
else:
midpivot = list[0]
lessbeforemidpivot = [i for i in list[1:] if i<=midpivot]
biggerafterpivot = [i for i in list[1:] if i > midpivot]
finallylist = quicksort(lessbeforemidpivot)+[midpivot]+quicksort(biggerafterpivot)
return finallylist
list = [6, 5, 8, 9, 7, 2, 3, 4, 5, 6]
l=quicksort(list)
print(list)
print(l)
|
[
"noreply@github.com"
] |
XuHaoIgeneral.noreply@github.com
|
6e1e36143324892f629b7f48298f2c2bdf067b6e
|
a83aaa311792fe8ff03726758afe211606feb15c
|
/spatial_experiment/spatial_run.py
|
84a7cbbb5d73aa9befeb02ad8ab0816831448901
|
[
"MIT"
] |
permissive
|
muskanmahajan37/jiant
|
10cf0850604cfd8d5bc3b251a1bb1906b234c418
|
e76f58e274e0e554ce43b9563085436861e8f3a6
|
refs/heads/master
| 2023-05-20T23:37:01.065016
| 2021-06-10T19:08:43
| 2021-06-10T19:08:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
import jiant.proj.main.runscript as main_runscript
run_args = main_runscript.RunConfiguration(
jiant_task_container_config_path="./spatial_experiment/run_configs/spatial_run_config.json",
output_dir="./spatial_experiment/runs/spatial",
hf_pretrained_model_name_or_path="bert-base-uncased",
model_path="./spatial_experiment/models/bert/model/model.p",
model_config_path="./spatial_experiment/models/bert/model/config.json",
learning_rate=1e-5,
eval_every_steps=500,
do_train=False,
do_val=True,
do_save=True,
force_overwrite=True,
)
main_runscript.run_loop(run_args)
|
[
"lukeholman@my.unt.edu"
] |
lukeholman@my.unt.edu
|
e99ce3d607576d6aafdc6083c2a214c496b8cf41
|
8496679260155744119ebcd1834a7c7017fc1281
|
/app/seeds/users.py
|
72de125e9d0a51e171cde28eddc748bc2f49ad37
|
[] |
no_license
|
laurengus17/HomeGrown
|
b696af8bf690d61fdc4c85e44c774c367e50f9fe
|
642c0046040b86554cea20b7c6e213440c4e5836
|
refs/heads/main
| 2023-06-29T19:47:37.353781
| 2021-08-04T19:09:10
| 2021-08-04T19:09:10
| 381,188,381
| 11
| 0
| null | 2021-08-04T19:09:11
| 2021-06-28T23:48:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,126
|
py
|
from app.models import db, User
# Adds a demo user, you can add other users here if you want
def seed_users():
demo = User(
first_name='Demo', last_name='McDemo', username='Demo', email='demoMcdemo@demo.io',
password='password', imgURL='https://1.bp.blogspot.com/-gGzDUMqfv4o/WN7wk3zHm3I/AAAAAAAAlR4/h2XjcsBmyJY5WXV-aJ6YA8uYe1XxmLhAwCLcB/s1600/Picture%2B9.png')
david = User(
first_name='David', last_name='Attenborough', username='davidA', email='davidA@borough.com',
password='password', imgURL='https://cdn.britannica.com/83/136183-050-28D77230/David-Attenborough-2008.jpg')
jane = User(
first_name='Jane', last_name='Goodall', username='janeGood', email='jGood@all.com',
password='password', imgURL='https://bloximages.chicago2.vip.townnews.com/idahopress.com/content/tncms/assets/v3/editorial/5/f1/5f1f5910-1762-5b42-a771-eac424e304f0/5f1f5910-1762-5b42-a771-eac424e304f0.preview-300.jpg?crop=201%2C201%2C49%2C0&resize=1200%2C1200&order=crop%2Cresize')
ansel = User(
first_name='Ansel', last_name='Adams', username='aAdams', email='ansel@Adams.com',
password='password', imgURL='https://i0.wp.com/www.anseladams.com/wp-content/uploads/2018/10/AA-moonrise-768x768-1.jpg?ssl=1')
john = User(
first_name='John', last_name='Muir', username='jMuir', email='john@mountains.com',
password='password', imgURL='https://www.rucsacs.com/wp-content/uploads/2020/08/ciclwarefblogspot.com-_Young_John__Muir.jpg')
mary = User(
first_name='Mary', last_name='Oliver', username='mOliver', email='maryO@poems.com',
password='password', imgURL='https://bostonglobe-prod.cdn.arcpublishing.com/resizer/s9E-QY5SFm8bhLCkd_01CdzXcwg=/1440x0/arc-anglerfish-arc2-prod-bostonglobe.s3.amazonaws.com/public/BKY27MQ2QII6TKCUYBGA6AGH3Q.jpg')
alice = User(
first_name='Alice', last_name='Waters', username='aWaters', email='alice@freshfood.com',
password='password', imgURL='https://i0.wp.com/slowfoodnations.org/wp-content/uploads/Alice-Waters_sq.jpg?fit=500%2C500&ssl=1')
michael = User(
first_name='Michael', last_name='Pollan', username='pollanSeason', email='mike@pollan.com',
password='password', imgURL='https://news.northeastern.edu/wp-content/uploads/2019/09/091019_AG_Michael_Pollan_00222-590x886.jpg')
stephen = User(
first_name='Stephen', last_name='Satterfield', username='stephenS', email='satterfield@onthehog.com',
password='password', imgURL='https://www.moadsf.org/wp-content/uploads/2018/04/Stephen-Satterfield.jpg')
users = [demo, david, jane, ansel, john, mary, alice, michael, stephen]
for user in users:
db.session.add(user)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the users table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and RESET IDENTITY
# resets the auto incrementing primary key, CASCADE deletes any
# dependent entities
def undo_users():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit()
|
[
"lauren.gustafson17@gmail.com"
] |
lauren.gustafson17@gmail.com
|
d52d90deed52cf2c8c2a57e3e6dd3c538eef5629
|
0af238daa690fcbb075fd75a5742ff880d7262ef
|
/OptionPricingPython/OptionPricingPython/BlackScholesPricing.py
|
5f1b4a6ed2a69d4293b470ce190bf5e282fecc2b
|
[] |
no_license
|
BRutan/OptionPricingPython
|
aabc34b00279de76829720df094f4f103c826ee0
|
4e14dc91e76b1e43bfd9eeb60e67f3df1011a2ac
|
refs/heads/master
| 2020-09-22T20:21:15.902209
| 2019-12-11T19:27:22
| 2019-12-11T19:27:22
| 225,310,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,786
|
py
|
#############################################
## BlackScholesPricing.py
#############################################
## Description:
## * Computes the price of a European stock option
## using Black-Scholes formula.
import itertools
import math as m
import numpy
from scipy.stats import norm
import matplotlib.pyplot as plotter
from mpl_toolkits.mplot3d import Axes3D
import threading
__all__ = ['BlackScholes']
class BlackScholes(object):
"""
* Computes the price of a European stock option
using Black-Scholes formula.
"""
# All valid y arguments for plotter (must be one of):
__yArgsValid = {'price' : True, 'delta' : True, 'gamma' : True, 'vega' : True, 'rho' : True, 'theta' : True, 'expectedreturn' : True, 'optionvol' : True}
# Valid x arguments for plotter (variable -> (StartVal, EndVal):
__xArgsPlotting = {'k' : (0, 0), 'q' : (0, 0), 'r' : (0, 0), 's' : (0, 0), 'sigma' : (0, 0), 't' : (0, 0)}
__xArgsConstructor = {'k' : 0, 'q' : 0, 'r' : 0, 's' : 0, 't' : 0, 'sigma' : 0, 'type' : ''}
def __init__(self, args):
"""
* Construct object to compute price of European stock option
under risk-neutral pricing framework.
Inputs:
* args: Expecting dictionary containing all of the following keys:
{
* r: Yearly risk-free rate (numeric).
* sigma: Yearly standard deviation of prices (numeric, > 0).
* strike: Strike price for option (numeric, >= 0).
* s: Starting price (numeric, >= 0).
* q: Continuous dividend rate (numeric, >= 0).
* T: Years until expiry (numeric, > 0).
* type: 'call' or 'put' (String, case insensitive).
}
"""
self.__reqArgs = BlackScholes.__xArgsConstructor.copy()
for arg in self.__reqArgs:
self.__reqArgs[arg] = False
BlackScholes.__ValidateAndSetConstructor(self, args)
###################################
# Properties:
###################################
@property
def S_0(self):
return self.__s_0
@property
def Strike(self):
return self.__strike
@property
def DivRate(self):
return self.__divRate
@property
def RiskFree(self):
return self.__riskFree
@property
def Sigma(self):
return self.__sigma
@property
def Type(self):
return self.__type
@property
def T(self):
return self.__expiry
@property
def D_1(self):
"""
* Calculate and return d_1 used in option pricing.
"""
d_1 = m.log(self.S_0/self.Strike) + (self.RiskFree - self.DivRate + .5 * self.Sigma * self.Sigma) * self.T
return d_1 / (self.Sigma * m.sqrt(self.T))
@property
def D_2(self):
"""
* Calculate and return d_2 used in option pricing.
"""
return self.D_1 - self.Sigma * m.sqrt(self.T)
@property
def Price(self):
"""
* Compute price of option.
"""
r = self.__riskFree
s_0 = self.__s_0
k = self.__strike
T = self.__expiry
q = self.__divRate
sig = self.__sigma
d_1 = self.D_1
d_2 = self.D_2
price = m.exp(-q * T) * s_0 * norm.cdf(d_1) - m.exp(-r * T) * k * norm.cdf(d_2)
if self.__type == 'call':
# Return price of call using Black-Scholes formula:
return price
else:
# Compute put option price using put-call parity:
return price + k * m.exp(-r * T) - s_0 * m.exp(-q * T)
@property
def Delta(self):
"""
* Return Delta of option (linear change in price wrt S_0).
"""
q = self.__divRate
T = self.__expiry
d_1 = self.D_1
delta = m.exp(-q * T) * norm.cdf(d_1)
if self.__type == 'call':
return delta
else:
# Calculate delta using put-call parity:
return delta - m.exp(-q * T)
@property
def Gamma(self):
"""
* Return Gamma of option (second order derivative wrt S_0).
"""
s_0 = self.__s_0
T = self.__expiry
d_1 = self.D_1
sig = self.__sigma
# Option gamma is same for puts and calls:
return norm.pdf(d_1) / (s_0 * sig * m.sqrt(T))
@property
def Rho(self):
"""
* Calculate the Rho of option (linear change in price wrt risk-free rate).
"""
k = self.__strike
T = self.__expiry
r = self.__riskFree
d_2 = self.D_2
rho = k * T * m.exp(-r * T)
if self.__type == 'call':
return rho * norm.cdf(d_2)
else:
return -rho * norm.cdf(-d_2)
@property
def Theta(self):
"""
* Calculate the Theta of option (first order derivative wrt T).
"""
q = self.__divRate
s_0 = self.__s_0
T = self.__expiry
r = self.__riskFree
k = self.__strike
sig = self.__sigma
d_1 = self.D_1
d_2 = self.D_2
theta = -m.exp(-q * T) * s_0 * norm.pdf(d_1) * sig / (2 * m.sqrt(T))
if self.__type == 'call':
theta -= r * k * m.exp(-r * T) * norm.cdf(d_2)
else:
# Calculate theta using put-call parity:
theta += r * k * m.exp(-r * T) * norm.cdf(-d_2)
return theta
@property
def Vega(self):
"""
* Calculate the Vega of option (linear change in price wrt sigma).
"""
# Vega is the same for puts and calls:
q = self.__divRate
s_0 = self.__s_0
T = self.__expiry
d_1 = self.D_1
return s_0 * m.exp(-q * T) * norm.pdf(d_1) * m.sqrt(T)
@property
def OptionVol(self):
"""
* Return option volatility.
"""
return self.__s_0 * self.Delta * self.__sigma / self.Price
@property
def ParamsString(self):
"""
* Return parameters in string format.
"""
args = [self.GetProperty('type')]
for arg in BlackScholes.__xArgsConstructor:
val = self.GetProperty(arg)
if isinstance(val, int):
args.append(arg + ":" + str(int(val)))
elif isinstance(val, float):
args.append(arg + ":{0:.2f}".format(val))
return ','.join(args)
@property
def Greeks(self):
"""
* Return dictionary containing all greeks given current
parameters.
"""
return {'Delta:' : self.Delta, 'Gamma:': self.Gamma, 'Rho:' : self.Rho, 'Vega:': self.Vega, 'Theta:' : self.Theta}
@property
def AttributeString(self):
greeks = self.Greeks
strs = [''.join(['European {', self.ParamsString, '}'])]
for key in greeks.keys():
strs.append(''.join([key, ':', str(greeks[key])]))
strs.append(''.join(['ExpectedReturn:', str(self.ExpectedReturn())]))
strs.append(''.join(['Volatility:', str(self.OptionVol)]))
return '\n'.join(strs)
###################################
# Setters:
###################################
@S_0.setter
def S_0(self, s_0):
if not isinstance(s_0, float) and not isinstance(s_0, int):
raise Exception("S_0 must be numeric.")
elif s_0 <= 0:
raise Exception("S_0 must be positive.")
self.__reqArgs["s"] = True
self.__s_0 = s_0
@Strike.setter
def Strike(self, strike):
if not isinstance(strike, float) and not isinstance(strike, int):
raise Exception("Strike must be numeric.")
elif strike < 0:
raise Exception("Strike must be non-negative.")
self.__reqArgs["k"] = True
self.__strike = strike
@DivRate.setter
def DivRate(self, divRate):
if not isinstance(divRate, float) and not isinstance(divRate, int):
raise Exception("DivRate must be numeric.")
elif divRate < 0:
raise Exception("DivRate must be non-negative")
self.__reqArgs["q"] = True
self.__divRate = divRate
@RiskFree.setter
def RiskFree(self, riskFree):
if not isinstance(riskFree, float) and not isinstance(riskFree, int):
raise Exception("riskFree must be numeric.")
self.__reqArgs["r"] = True
self.__riskFree = riskFree
@Sigma.setter
def Sigma(self, sigma):
if not isinstance(sigma, float) and not isinstance(sigma, int):
raise Exception("sigma must be numeric.")
elif sigma <= 0:
raise Exception("sigma must be positive.")
self.__reqArgs["sigma"] = True
self.__sigma = sigma
@Type.setter
def Type(self, type_in):
if not isinstance(type_in, str):
raise Exception("type must be a string.")
elif type_in.lower() != 'call' and type_in.lower() != 'put':
raise Exception("type must be 'call' or 'put'.")
self.__reqArgs["type"] = True
self.__type = type_in.lower()
@T.setter
def T(self, T_in):
if not isinstance(T_in, float) and not isinstance(T_in, int):
raise Exception("T must be numeric.")
elif T_in <= 0:
raise Exception("T must be positive.")
self.__reqArgs["t"] = True
self.__expiry = T_in
###################################
# Interface Methods:
###################################
def InstantaneousChg(self, mu = None):
"""
* Compute instantaneous change in price of option, given risk free rate (mu).
Inputs:
* mu: P measure risk premium (numeric or None). If None, then uses current
risk-free rate (Q measure mu).
"""
if not mu:
mu = self.RiskFree
if not isinstance(mu, int) and not isinstance(mu, float):
raise Exception("mu must be numeric.")
r_orig = self.RiskFree
sig = self.__sigma
s = self.__s_0
chg = .5 * (( sig * s )** 2) * self.Gamma + mu * s * self.Delta + self.Theta
# Reset the risk-free rate to original value:
self.RiskFree = r_orig
return chg
def ExpectedReturn(self, mu = None):
"""
* Compute expected return of option, given risk free rate (mu).
Inputs:
* mu: Expecting numeric value, or None. If not specified, then uses current
risk-free rate.
"""
if mu and not isinstance(mu, int) and not isinstance(mu, float):
raise Exception("mu must be numeric.")
return self.InstantaneousChg(mu) / self.Price
def PlotRelationships(self, yArg, xArgs, numPts = 100):
"""
* Plot one relationship of
y = [ Price, Delta, Gamma, Vega, Rho, Theta, ExpectedReturn, OptionVolatility] (one, case insensitive)
vs x = [ s, r, q, t, k, sigma ] (max of 3, case insensitive).
Inputs:
* yArg: Expecting string denoting which y variable to map x values to.
* xArgs: Expecting { xStr -> tuple() } mapping of { max of 3 x listed above -> (StartVal, EndVal) }.
* numPts: Expecting integer denoting # of points to generate grid with. 100 by default. > 100 will result
in significantly slower speeds.
"""
# Ensure all parameters were valid, throw exception if not:
BlackScholes.__ValidatePlotting(yArg, xArgs, numPts)
yArg = BlackScholes.__ConvertYArg(yArg)
# Plot data for each sensitivity:
data = {}
xArgNames = []
# Generate all x data:
for arg in xArgs.keys():
origVal = self.GetProperty(arg)
xArgNames.append([arg, origVal])
data[arg] = []
startVal = xArgs[arg][0]
endVal = xArgs[arg][1]
stepSize = (endVal - startVal) / numPts
currStep = startVal
while currStep <= endVal:
data[arg].append(currStep)
currStep += stepSize
# Handle floating point issues:
while len(data[arg]) > numPts:
data[arg].pop()
# If doing multidimensional plot, generate all cross varying maps to y given
# all possible 2 combinations of x dimensions:
xParams = list(xArgs.keys())
combins = []
if len(xParams) > 1:
# Get all combinations of 2 X parameters
for subset in itertools.combinations(xParams, 2):
combins.append(subset)
else:
combins.append([xParams[0]])
# Generate all y data given x data (or all possible 2-combinations thereof):
pt = 0
meshes = {}
if len(xArgNames) > 1:
# Generate meshes for each combination of parameters, and mesh for output:
for combin in combins:
xArg1 = combin[0]
xArg2 = combin[1]
combinKey = xArg1 + xArg2
X_1, X_2 = numpy.meshgrid(data[xArg1], data[xArg2])
Y = numpy.zeros((numPts, numPts))
row = 0
# Calculate all y values using mesh values:
while row < numPts:
col = 0
while col < numPts:
self.SetProperty(xArg1, X_1[row][col])
self.SetProperty(xArg2, X_2[row][col])
Y[row][col] = getattr(self, yArg)
col += 1
row += 1
meshes[combinKey] = (X_1, X_2, Y)
else:
# Generate y value for every x value for 2D plot:
xArg = xArgNames[0][0]
Y = []
X = data[xArg]
while pt < numPts:
self.SetProperty(xArg, data[xArg][pt])
Y.append(getattr(self, yArg))
pt += 1
meshes[xArg] = (X, Y)
# Reset all x argument values to original values:
for vals in xArgNames:
self.SetProperty(vals[0], vals[1])
##################
# Plot all of the sensitivities:
##################
plotObj = plotter.figure()
title = ''.join(['European {', self.ParamsString, '}'])
if len(xArgs.keys()) > 1:
for combin in combins:
# Use 3-D plot:
combinKey = combin[0] + combin[1]
X_1, X_2, Y = meshes[combinKey]
axes = plotObj.add_subplot(111, projection='3d')
axes.plot_wireframe(X_1, X_2, Y)
axes.set_xlabel(combin[0])
axes.set_ylabel(combin[1])
axes.set_zlabel(yArg)
axes.title.set_text(title)
else:
# Use 2-D plot:
xArg = xArgNames[0][0]
X, Y = meshes[xArg]
plotObj.suptitle(title, fontsize = 10)
axis = plotObj.add_subplot('111')
axis.plot(X, Y)
axis.set_ylabel(yArg)
axis.set_xlabel(xArg)
plotObj.show()
return plotObj
def PrintAttributes(self):
"""
* Print all option attributes (price info, greeks, etc) to stdout.
"""
print(self.AttributeString)
###################################
# Static Helpers:
###################################
@staticmethod
def XArgsPlotting():
"""
* Return copy x arg map for use in plotting.
"""
return BlackScholes.__xArgsPlotting.copy()
@staticmethod
def RequiredConstructorArgs():
"""
* Return copy of all required arguments for this class' constructor.
"""
return BlackScholes.__xArgsConstructor.copy()
###################################
# Private Helpers:
###################################
def __ValidateAndSetConstructor(self, args):
"""
* Validate all passed parameters to the constructor. Raise
exception if any are invalid. Set if acceptable.
"""
errMsgs = []
invalidArgs = []
# Validate all passed parameters:
for arg in args.keys():
try:
_lower = str(arg).lower()
if _lower not in self.__reqArgs.keys():
invalidArgs.append(arg)
elif _lower == 'k':
self.Strike = args[arg]
elif _lower == 'q':
self.DivRate = args[arg]
elif _lower == 'r':
self.RiskFree = args[arg]
elif _lower == 's':
self.S_0 = args[arg]
elif _lower == 'sigma':
self.Sigma = args[arg]
elif _lower == 't':
self.T = args[arg]
elif _lower == 'type':
self.Type = args[arg]
except Exception as ex:
errMsgs.append(ex.message)
# List all invalid arguments (not in the required arguments dictionary):
if len(invalidArgs) > 0:
errMsgs.Append(''.join(['The following args were invalid:', ','.join(invalidArgs)]))
# Ensure all required parameters were passed:
missingArgs = []
for arg in self.__reqArgs.keys():
if self.__reqArgs[arg] == False:
missingArgs.append(arg)
if len(missingArgs) > 0:
errMsgs.append(''.join(['The following required args were missing:', ','.join(missingArgs)]))
# Raise exception if any parameters were invalid:
if len(errMsgs) > 0:
raise Exception('\n'.join(errMsgs))
@staticmethod
def __ConvertYArg(yArg):
"""
* Convert case insensitive y argument to case sensitive version to allow use with
getattr().
{'price' : True, 'delta' : True, 'gamma' : True, 'vega' : True, 'rho' : True, 'theta' : True, 'expectedreturn' : True, 'optionvol' : True}
"""
if yArg == 'price':
return 'Price'
if yArg == 'delta':
return 'Delta'
if yArg == 'gamma':
return 'Gamma'
if yArg == 'vega':
return 'Vega'
if yArg == 'rho':
return 'Rho'
if yArg == 'theta':
return 'Theta'
if yArg == 'expectedreturn':
return 'ExpectedReturn'
if yArg == 'optionvol':
return 'OptionVol'
@staticmethod
def __ValidatePlotting(yArg, xArgs, numPts):
"""
* Validate all input arguments to the plotting method.
"""
invalidArgs = []
messages = []
# Ensure that input arguments are valid:
if not isinstance(yArg, str):
invalidArgs.append("yArg must be a string.")
elif not yArg.lower() in BlackScholes.__yArgsValid.keys():
validYArgs = ','.join(list(BlackScholes.__yArgsValid.keys()))
temp = ["yArg {", yArg.lower(), "} must be one of [", validYArgs, "]"]
invalidArgs.append(''.join(temp))
if not isinstance(numPts, int) and not isinstance(numPts, float):
invalidArgs.append("numPts must be numeric.")
if not isinstance(xArgs, dict):
invalidArgs.append("xArgs must be a dictionary.")
elif len(xArgs.keys()) == 0 or len(xArgs.keys()) > 3:
invalidArgs.append("xArgs must have at least one key, max of 3.")
else:
# Ensure that tuples of correct dimension were provided for each argument:
invalidMap = []
invalidXArgs = []
for arg in xArgs.keys():
_arg = arg.lower()
tup = xArgs[arg]
if _arg not in BlackScholes.__xArgsPlotting.keys():
invalidXArgs.append(_arg)
elif not isinstance(tup, tuple):
invalidMap.append(arg)
elif len(tup) != 2:
invalidMap.append(arg)
if len(invalidXArgs) > 0:
invalidXArgs = ['The following xArgs keys are invalid:{', ','.join(invalidXArgs), '}']
invalidArgs.append(''.join(invalidXArgs))
if len(invalidMap) > 0:
invalidMap = ['The following xArgs were not mapped to tuples of length 2:{', ','.join(invalidMap), '}']
invalidArgs.append(''.join(invalidMap))
if len(invalidArgs) > 0:
raise Exception('\n'.join(invalidArgs))
def SetProperty(self, arg, val):
"""
* Set the requested property using string and value.
Inputs:
* arg: Expecting a string.
* val: Expecting a numeric value or a string (if for type).
"""
arg = str(arg).lower()
if arg == 'k':
self.Strike = val
elif arg == 'q':
self.DivRate = val
elif arg == 'r':
self.RiskFree = val
elif arg == 's':
self.S_0 = val
elif arg == 'sigma':
self.Sigma = val
elif arg == 't':
self.T = val
elif arg == 'type':
self.Type = val
else:
raise Exception(arg + ' is invalid.')
def GetProperty(self, arg):
"""
* Return requested property using string.
"""
arg = str(arg).lower()
if arg == 'k':
return self.Strike
if arg == 'q':
return self.DivRate
if arg == 'r':
return self.RiskFree
if arg == 's':
return self.S_0
if arg == 'sigma':
return self.Sigma
if arg == 't':
return self.T
if arg == 'type':
return self.Type
else:
raise Exception(arg + ' is invalid.')
|
[
"rutan.benjamin@gmail.com"
] |
rutan.benjamin@gmail.com
|
3ba52608ffeb61c615f02a14ad5a8f39235e6c6f
|
cc052460adb5bc1617cc2dba65427af665c9e7da
|
/accounts/migrations/0002_book_category.py
|
2452e6dcccfc00b20924747c11cbe76417688cba
|
[] |
no_license
|
Howl31/student-API-v1
|
7dc260db225e65b852b09694b1c97288180318ba
|
94b929537e8dc230bff7156d5f66d2d55050aef7
|
refs/heads/master
| 2023-06-02T08:26:22.622255
| 2021-06-18T09:30:52
| 2021-06-18T09:30:52
| 377,963,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
# Generated by Django 3.2.4 on 2021-06-17 09:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=100)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.category')),
],
),
]
|
[
"armakshay31@gmail.com"
] |
armakshay31@gmail.com
|
27e78548c52dd44b6f02bdcd3fc54c0d153d669d
|
3154395d10ff2f8d85d90758f2fa6d2dc9addfd0
|
/autotest/test_gwt_mst05.py
|
b21f8d0fbae93fe52d440bc9da5fdc9539a00be8
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
jtwhite79/modflow6
|
40507e5ce5a08f56e8a8029094b4cf147048e5c0
|
95ff014a1bddcee2d5d35859350571ba4fd9fb2f
|
refs/heads/master
| 2023-05-27T11:29:53.421941
| 2021-02-18T18:00:23
| 2021-02-18T18:00:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,633
|
py
|
"""
MODFLOW 6 Autotest
Test isotherms.
"""
import os
import sys
import numpy as np
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
from binary_file_writer import write_head, write_budget, uniform_flow_field
ex = ['mst05a', 'mst05b']
isotherm = ['freundlich', 'langmuir']
distcoef = [0.3, 100.]
sp2 = [0.7, 0.003]
xmax_plot = [1500, 500]
ymax_plot = [0.5, 1.0]
exdirs = []
for s in ex:
exdirs.append(os.path.join('temp', s))
ddir = 'data'
def get_model(idx, dir):
nlay, nrow, ncol = 1, 1, 101
perlen = [160., 1340.]
nper = len(perlen)
tslength = 10.
nstp = [p / tslength for p in perlen]
tsmult = nper * [1.]
delr = 0.16
delc = 0.16
top = 1.
botm = 0.
velocity = 0.1
porosity = 0.37
bulk_density = 1.587
dispersivity = 1.0
source_concentration = 0.05
specific_discharge = velocity * porosity
inflow_rate = specific_discharge * delc * (top - botm)
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 1.
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(sim_name=name, version='mf6',
exe_name='mf6',
sim_ws=ws)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units='SECONDS',
nper=nper, perioddata=tdis_rc)
# create gwt model
gwtname = 'gwt_' + name
gwt = flopy.mf6.MFModel(sim, model_type='gwt6', modelname=gwtname,
model_nam_file='{}.nam'.format(gwtname))
gwt.name_file.save_flows = True
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(sim, print_option='SUMMARY',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='NONE',
inner_maximum=ninner,
inner_dvclose=hclose, rcloserecord=rclose,
linear_acceleration='BICGSTAB',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwtname))
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(gwt, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm)
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.)
adv = flopy.mf6.ModflowGwtadv(gwt, scheme="TVD")
dsp = flopy.mf6.ModflowGwtdsp(gwt, xt3d_off=True, alh=dispersivity,
ath1=dispersivity)
mst = flopy.mf6.ModflowGwtmst(gwt,
sorption=isotherm[idx],
porosity=porosity,
bulk_density=bulk_density,
distcoef=distcoef[idx],
sp2=sp2[idx])
# sources
sourcerecarray = [('WEL-1', 'AUX', 'CONCENTRATION')]
ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray)
# create a heads file with head equal top
fname = os.path.join(ws, 'myhead.hds')
with open(fname, 'wb') as fbin:
kstp = 0
totim = 0
for kper in range(nper):
totim += perlen[kper]
write_head(fbin, top * np.ones((nrow, ncol)),
kstp=kstp + 1, kper=kper + 1,
pertim=perlen[kper], totim=totim)
# create a budget file
qx = specific_discharge
qy = 0.
qz = 0.
shape = (nlay, nrow, ncol)
spdis, flowja = uniform_flow_field(qx, qy, qz, shape, delr=delr, delc=delc)
dt = np.dtype([('ID1', np.int32),
('ID2', np.int32),
('FLOW', np.float64),
('CONCENTRATION', np.float64),
])
wel = [np.array([(0 + 1, 0 + 1, inflow_rate, source_concentration)], dtype=dt),
np.array([(0 + 1, 0 + 1, inflow_rate, 0.)], dtype=dt)]
chd = np.array([(ncol - 1 + 1, ncol - 1 + 1, -inflow_rate, 0.)], dtype=dt)
dt = np.dtype([('ID1', np.int32),
('ID2', np.int32),
('FLOW', np.float64),
('SATURATION', np.float64),
])
sat = np.array([(i, i, 0., 1.) for i in range(nlay * nrow * ncol)], dtype=dt)
fname = os.path.join(ws, 'mybudget.bud')
with open(fname, 'wb') as fbin:
kstp = 0
totim = 0
for kper in range(nper):
totim += perlen[kper]
delt = perlen[kper] / nstp[kper]
write_budget(fbin, flowja, kstp=kstp + 1, kper=kper + 1,
pertim=perlen[kper], totim=totim, delt=delt)
write_budget(fbin, spdis, text=' DATA-SPDIS', imeth=6,
kstp=kstp + 1, kper=kper + 1,
pertim=perlen[kper], totim=totim, delt=delt)
write_budget(fbin, sat, text=' DATA-SAT', imeth=6,
kstp=kstp + 1, kper=kper + 1,
pertim=perlen[kper], totim=totim, delt=delt)
write_budget(fbin, wel[kper], text=' WEL', imeth=6,
text2id2=' WEL-1',
kstp=kstp + 1, kper=kper + 1,
pertim=perlen[kper], totim=totim, delt=delt)
write_budget(fbin, chd, text=' CHD', imeth=6,
text2id2=' CHD-1',
kstp=kstp + 1, kper=kper + 1,
pertim=perlen[kper], totim=totim, delt=delt)
fbin.close()
# flow model interface
packagedata = [('GWFBUDGET', 'mybudget.bud', None),
('GWFHEAD', 'myheads.hds', None)]
fmi = flopy.mf6.ModflowGwtfmi(gwt, packagedata=packagedata)
# output control
oc = flopy.mf6.ModflowGwtoc(gwt,
budget_filerecord='{}.cbc'.format(gwtname),
concentration_filerecord='{}.ucn'.format(gwtname),
concentrationprintrecord=[
('COLUMNS', 10, 'WIDTH', 15,
'DIGITS', 6, 'GENERAL')],
saverecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')],
printrecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')])
obs_data = {'conc_obs.csv': [
('X008', 'CONCENTRATION', (0, 0, 50)),
]}
obs_package = flopy.mf6.ModflowUtlobs(gwt, pname='conc_obs',
filename='{}.obs'.format(gwtname),
digits=10, print_input=True,
continuous=obs_data)
return sim
def build_models():
for idx, dir in enumerate(exdirs):
sim = get_model(idx, dir)
sim.write_simulation()
return
def eval_transport(sim):
print('evaluating transport...')
name = ex[sim.idxsim]
gwtname = 'gwt_' + name
fpth = os.path.join(sim.simpath, '{}.ucn'.format(gwtname))
try:
cobj = flopy.utils.HeadFile(fpth, precision='double',
text='CONCENTRATION')
conc = cobj.get_data()
except:
assert False, 'could not load data from "{}"'.format(fpth)
fpth = os.path.join(sim.simpath, 'conc_obs.csv')
try:
obs = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, 'could not load data from "{}"'.format(fpth)
cnorm = obs['X008'] / 0.05
cnorm_max = [0.32842034, 0.875391418]
msg = '{} /= {}'.format(cnorm_max[sim.idxsim], cnorm.max())
assert np.allclose(cnorm_max[sim.idxsim], cnorm.max(), atol=0.001), msg
savefig = False
if savefig:
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(obs['time'], obs['X008'] / 0.05, 'bo-')
plt.xlim(0, xmax_plot[sim.idxsim])
plt.ylim(0, ymax_plot[sim.idxsim])
plt.xlabel("Time, in seconds")
plt.ylabel('Normalized Concentration')
plt.title(isotherm[sim.idxsim])
fname = os.path.join(sim.simpath, 'results.png')
plt.savefig(fname)
return
# - No need to change any code below
def test_mf6model():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
yield test.run_mf6, Simulation(dir, exfunc=eval_transport, idxsim=idx)
return
def main():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
sim = Simulation(dir, exfunc=eval_transport, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print('standalone run of {}'.format(os.path.basename(__file__)))
# run main routine
main()
|
[
"noreply@github.com"
] |
jtwhite79.noreply@github.com
|
998a6446831189fb3a5e8df5deac0510483c271d
|
60458d3b777475f0e23b03a09e387966d0a1bced
|
/shop/migrations/0002_auto_20200723_0315.py
|
be5ec0f2094f4dd4e1f31055f93e48f2cd059a38
|
[] |
no_license
|
NouKD/creto-master
|
9dcf1f46a386cd9c8f2c8eb8b8a4d1970271f66f
|
c29a9b7a1ecf6b268a5a352d2cbaca7a0ce8082b
|
refs/heads/master
| 2022-11-19T17:07:32.463466
| 2020-07-23T06:53:39
| 2020-07-23T06:53:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
# Generated by Django 2.2.10 on 2020-07-23 03:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='produit',
old_name='price',
new_name='prix',
),
]
|
[
"franck@gmail.com"
] |
franck@gmail.com
|
5b16dbbaf44a3db17ccb3f121b89b3260ba19ecb
|
6ef006114c16b8ad4fa288a26cd1a15bd95d6b56
|
/resources/post.py
|
61c51acb1b7fff06dc4b0154328ed790320ae69c
|
[] |
no_license
|
a-soliman/flask-blog-api
|
8cac592da1b14703a6b41a8a9bad6908afe58a66
|
c6914cdea4da91db9d23711b7b8af65c4f985cf1
|
refs/heads/master
| 2020-03-19T10:45:58.995325
| 2018-06-07T16:44:05
| 2018-06-07T16:44:05
| 136,399,203
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,717
|
py
|
from flask_restful import Resource, reqparse
from flask_jwt import JWT, jwt_required, current_identity
from models.post import PostModel
class Post(Resource):
parser = reqparse.RequestParser()
parser.add_argument('title', type=str, required=True, help='title is required.')
parser.add_argument('body', type=str, required=True, help='body is required.')
parser.add_argument('category_id', type=int, required=True, help='Categpry is required.')
@jwt_required()
def get(self):
print(current_identity.id)
return{'message': 'hey'}
@jwt_required()
def delete(self, id):
# get the current user's id
user_id = current_identity.id
post = PostModel.find_by_id(id)
if post is None:
return {'success': False, 'message': 'Post was not found'}, 404
# check if the current user is the owner of the post
if post.user != user_id:
return {
'success': False,
'message': 'Not Authorized to delete this post'
}, 401
# try to delete the post or 500
try:
post.delete_from_db()
except:
return {'message': 'Something went wrong'}, 500
return {
'success': True,
'message': 'Post was deleted successfully.'
}, 200
@jwt_required()
def put(self, id):
# get the current user's id
user_id = current_identity.id
post = PostModel.find_by_id(id)
print('here')
if post is None:
return {
'success': False,
'message': 'Post was not found'
}, 404
# check if the current user is the owner of the post
if post.user != user_id:
return {
'success': False,
'message': 'Not Authorized to Edit this post'
}, 401
data = Post.parser.parse_args()
post.title = data['title']
post.body = data['body']
post.category_id = data['category_id']
# try to delete the post or 500
try:
post.save_to_db()
except:
return {'message': 'Something went wrong'}, 500
return {
'success': True,
'message': 'Post was edited successfully.'
}, 200
class AddPost(Resource):
parser = reqparse.RequestParser()
parser.add_argument('title', type=str, required=True, help='title is required.')
parser.add_argument('body', type=str, required=True, help='body is required.')
parser.add_argument('category_id', type=int, required=True, help='Categpry is required.')
@jwt_required()
def post(self):
# get the current user's id
user_id = current_identity.id
# get the post data
data = AddPost.parser.parse_args()
# Create a new post using the data and user_id
post = PostModel(None, data['title'], data['body'], user_id, data['category_id'])
# Try saving the post
try:
post.save_to_db()
except:
return {'success': False, 'message': 'Something went wrong'}, 500
return {'sucess': 'Created successfully'}, 201
class ListPosts(Resource):
@jwt_required()
def get(self):
# store current user id
user_id = current_identity.id
posts = [post for post in PostModel.query.all()]
# check to see if the current user is the owner of the post
for post in posts:
if post.user == user_id:
post.owner = True
else:
post.owner = False
return {'posts': [post.json() for post in posts]}
|
[
"ahmed.soliman@programmer.net"
] |
ahmed.soliman@programmer.net
|
4c80b9a24642a4f8b3b47861780be734a9a24b03
|
4d8c6d4831d8958b7836c99c877d89a257691c5f
|
/app/schemas.py
|
1d7bba7c0f647c6683c8c32278b92d06c84332c6
|
[] |
no_license
|
vutran1710/FlaskFun
|
4dac0e8ca5f14dae01746c33f4f480337db6aa98
|
f5bb1e7e8b9e234045bea66d800c0c289f058146
|
refs/heads/master
| 2023-05-10T12:07:50.607919
| 2023-02-26T07:00:45
| 2023-02-26T07:00:45
| 213,142,610
| 4
| 1
| null | 2023-05-02T18:35:46
| 2019-10-06T09:42:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
user_schema = {
'email': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 128,
'valid_email': True
},
'name': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 128
},
'password': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 128,
'valid_password': True
}
}
password_schema = {
'new_password': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 128,
'valid_password': True
}
}
reset_schema = {
'email': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 128,
'valid_email': True
},
}
login_schema = {
'name': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 128
},
'password': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 128,
'valid_password': True
}
}
|
[
"me@vutr.io"
] |
me@vutr.io
|
b941539adb3781f158456ada9b43fb10c2ed3e80
|
8ebc8e9905687a88ae519e768d9ecb84a9bcc1a2
|
/libro/problemas_resueltos/capitulo2/problema2_8.py
|
4c7388b2933e86be1ad4324e5c4018113b39fa6a
|
[] |
no_license
|
alexsunseg/CYPAlejandroDG
|
b1d09e2efbfee77eba6af5e1767ebb93ac9e5880
|
531ec3536db408cbb5155821ef5cc6a76db2db2a
|
refs/heads/master
| 2020-07-23T18:47:03.496794
| 2019-11-14T23:16:51
| 2019-11-14T23:16:51
| 207,672,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
NUM=float(input("Ingrese el valor del monto: "))
if NUM<500:
print("Pagar monto ingresado")
elif NUM<=1000:
DES= NUM-NUM*0.05
print(f"Pagar nuevo monto con 5% de descuento: ${DES}")
elif NUM<=7000:
DES= NUM-NUM*0.11
print(f"Pagar nuevo monto con 11% de descuento: ${DES}")
elif NUM<= 15000:
DES=NUM-NUM*0.18
print(f"Pagar nuevo monto con 18% de descuento: ${DES}")
else:
DES=NUM-NUM*0.25
print (f"Pagar nuevo monto con 25% de descuento: ${DES}")
print("Fin del programa")
|
[
"alejandrodelao411@gmail.com"
] |
alejandrodelao411@gmail.com
|
a43e176268464b92c9a26d063990ecf73e6ca3df
|
4c48a5052d772ac472a99b34d4eb65e330e9810e
|
/14.py
|
c83097b0cc256b65db0474f2c96ecdb3c0137b6a
|
[] |
no_license
|
jack21997/C109156253
|
52f3ad788ec3ae422cb036beebd65d1476f0ee73
|
1033d6b5e5812e4aad806cfdb57276ebd4cf0e32
|
refs/heads/main
| 2023-04-15T02:36:23.662612
| 2021-05-06T12:47:47
| 2021-05-06T12:47:47
| 364,514,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
s = input("輸入一字串為:")
print("There are %s characters" %len(s))
|
[
"noreply@github.com"
] |
jack21997.noreply@github.com
|
9246d5acd392ab1ae2820e78f0dd068372dfdd71
|
a69db990501a86e8154898855733ea3a3c6a593d
|
/test/base1.py
|
2df801234201c5370e603e624ac0e0c6a8b38084
|
[] |
no_license
|
cahthuranag/AoI_RL
|
118ddc5b615b0e5cf3e3ffc15cc40ff603061378
|
3cf52b367ee8c84ed5b32c8955364d9b22de013b
|
refs/heads/master
| 2023-05-27T21:44:58.256318
| 2019-01-21T14:57:13
| 2019-01-21T14:57:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,676
|
py
|
import os
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
import tensorflow as tf
import fixed_env as env
import a3c
import load_trace
import matplotlib.pyplot as plt
import time
S_INFO = 12 # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end
S_LEN = 5 # take how many frames in the past
A_DIM = 10
ACTOR_LR_RATE = 0.001
CRITIC_LR_RATE = 0.001
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
M_IN_K = 200.0
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps
SMOOTH_PENALTY = 1
DEFAULT_QUALITY = 1 # default video quality without agent
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log_sim_rl'
TRAIN_SEQ_LEN = 100
# log in format of time_stamp bit_rate buffer_size rebuffer_time chunk_size download_time reward
NN_MODEL = './models/nn_model_ep_9100.ckpt'
DEFAULT_SELECTION = 0
age = np.zeros((A_DIM,10000000))
#gamma = [0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99, 1]#, 1,1,0.9,0.85,0.8,0.7,0.6,0.6,0.6,0.5]
gamma = [1,1,0.9,0.85,0.8,0.7,0.6,0.6,0.6,0.5]
violation = np.zeros((A_DIM,1))
violation_n_k = np.zeros((A_DIM,10000000))
violation_hist = np.zeros((A_DIM,1))
lamba = 1000
mu = 0.0#10*lamba
#tau = [50,100,150,200,250,300,350,400,450,500]
tau = [30,50,70,90,110,130,150,170,190,210]
PACKET_SIZE = [50,100,150,200,250,300,350,400,450,500]
#PACKET_SIZE = [500,550,600,650,700,750,800,850,900,950]
epsilon = [0.001,0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004,0.0045,0.005]
hamza = np.zeros((A_DIM,1))
anis = np.zeros((A_DIM,10000000))
j = np.zeros((A_DIM,1))
expected_age = np.zeros((A_DIM,1))
expected_age_n = np.zeros((A_DIM,1))
exp_queue = []
prob = [0.2079 ,0.1599, 0.1247, 0.1040, 0.0891, 0.0780, 0.0693, 0.0624, 0.0567, 0.0520]
def main():
np.random.seed(RANDOM_SEED)
assert len(PACKET_SIZE) == A_DIM
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace()
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw)
log_path = LOG_FILE + '_' + 'base1' + '_' + all_file_names[net_env.trace_idx]
log_file = open(log_path, 'wb')
with tf.Session() as sess:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver() # save neural net parameters
# restore neural net parameters
nn_model = NN_MODEL
if nn_model is not None: # nn_model is the path to file
saver.restore(sess, nn_model)
print("Model restored.")
time_stamp = 0
sensor_selection = DEFAULT_SELECTION
action_vec = np.zeros(A_DIM)
prob_violation = np.zeros(A_DIM)
violation_n = np.zeros(A_DIM)
action_vec[sensor_selection] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [action_vec]
r_batch = []
entropy_record = []
video_count = 0
k = 0
sum_age = 0
sum_violation = 0
objective = 0
while k < 30000: # serve video forever
# the action is from the last decision
# this is to make the framework similar to the real
delay, sleep_time, video_chunk_size = net_env.get_video_chunk(sensor_selection)
#time_stamp += delay # in ms
#time_stamp += sleep_time # in ms
#for n in range(0,A_DIM):
# violation[n] = 0
# if n == sensor_selection:
# age[n,k] = delay
# else:
# age[n,k] = age[n,k-1] + delay
# if age[n,k] > tau[n]:
# violation[n] += 1
# sum_age = np.sum(age[:,:])
# sum_violation = np.sum(violation)
# expected_age=sum_age/(k*A_DIM)
sum_age_before = np.sum(age[:,k])
current_violation = 0
for n in range(0,A_DIM):
#for k in range (1,TRAIN_SEQ_LEN):
if n == sensor_selection:
#print (j)
#time.sleep(2)
dummy = int(j[n])
j[n] += 1
age[n,k] = delay
anis[n,dummy]= age[n,k]
#violation[n] = 0
else:
age[n,k] = age[n,k-1] + delay
dummy = int(j[n])
anis[n,dummy]= age[n,k]
if age[n,k] > tau[n]:
violation[n] += 1
current_violation =current_violation+(10-n/10)
violation_n_k[n,k] += 1
prob_violation = violation/(k+1)
#print violation_n
#time.sleep(2)
for n in range(0,A_DIM):
#expected_age[n] = gamma[n]*np.sum((anis[n,:int(j[n])+1])/(int(j[n])+1))
expected_age_n[n]=np.sum(age[n,:])/((k+1))
if violation_n[n] > epsilon[n]:
hamza[n] = 1
else:
hamza[n] = 0
expected_age = np.sum(expected_age_n[:])/A_DIM
#prob_violation = violation/k
#reward = (-np.sum(age[:,k]) - lamba*np.sum(violation_n_k[:,k]) - mu*np.sum(hamza[:]))/100
reward = (-np.sum(age[:,k]) - lamba*current_violation - mu*np.sum(hamza[:]))/100
sum_age += np.sum(age)
if k == 29999:
for n in range(0,A_DIM):
violation_n[n] = 1000*(10-n/10)*violation[n]/(k+1)
sum_age = sum_age/((k+1)*A_DIM)
sum_violation = np.sum(violation_n)
print(sum_age+sum_violation)
print(100*violation[:]/(k+1))
print(expected_age_n[:])
r_batch.append(reward)
log_file.write(str(time_stamp) + '\t' +
str(PACKET_SIZE[sensor_selection]) + '\t' +
str(delay) + '\t' +
str(reward) + '\t' +
str(age[0,k]) + '\t' +
str(age[1,k]) + '\t'+
str(age[2,k]) + '\t'+
str(age[3,k]) + '\t' +
str(age[4,k]) + '\t' +
str(age[5,k]) + '\t' +
str(age[6,k]) + '\t'+
str(age[7,k]) + '\t' +
str(age[8,k]) + '\t' +
str(age[9,k]) + '\n')
log_file.flush()
# retrieve previous state
if len(s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(s_batch[-1], copy=True)
# dequeue history record
state = np.roll(state, -1, axis=1)
state[0, -1] = float(age[0,k])/M_IN_K
state[1, -1] = float(age[1,k])/M_IN_K
state[2, -1] = float(age[2,k])/M_IN_K
state[3, -1] = float(age[3,k])/M_IN_K
state[4, -1] = float(age[4,k])/M_IN_K
state[5, -1] = float(age[5,k])/M_IN_K
state[6, -1] = float(age[6,k])/M_IN_K
state[7, -1] = float(age[7,k])/M_IN_K
state[8, -1] = float(age[8,k])/M_IN_K
state[9, -1] = float(age[9,k])/M_IN_K
#state[10, -1] = float(PACKET_SIZE[0])/float(PACKET_SIZE[9])
#state[11, -1] = float(PACKET_SIZE[1])/float(PACKET_SIZE[9])
#state[12, -1] = float(PACKET_SIZE[2])/float(PACKET_SIZE[9])
#state[13, -1] = float(PACKET_SIZE[3])/float(PACKET_SIZE[9])
#state[14, -1] = float(PACKET_SIZE[4])/float(PACKET_SIZE[9])
#state[15, -1] = float(PACKET_SIZE[5])/float(PACKET_SIZE[9])
#state[16, -1] = float(PACKET_SIZE[6])/float(PACKET_SIZE[9])
#state[17, -1] = float(PACKET_SIZE[7])/float(PACKET_SIZE[9])
#state[18, -1] = float(PACKET_SIZE[8])/float(PACKET_SIZE[9])
#state[19, -1] = float(PACKET_SIZE[9])/float(PACKET_SIZE[9])
state[10, -1] = float(delay)/100
state[11, -1] = float(PACKET_SIZE[sensor_selection])/(100*float(delay)*float(PACKET_SIZE[9]))
# compute action probability vector
action_prob = actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
#action_cumsum = np.cumsum(action_prob)
random_value = np.random.rand(1,1)#(action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
if random_value <= prob[0]:
sensor_selection = 0
elif prob[0] < random_value <= prob[0]+prob[1]:
sensor_selection = 1
elif prob[0]+prob[1] < random_value <= prob[0]+prob[1]+prob[2]:
sensor_selection = 2
elif prob[0]+prob[1]+prob[2] < random_value <= prob[0]+prob[1]+prob[2]+prob[3]:
sensor_selection = 3
elif prob[0]+prob[1]+prob[2]+prob[3] < random_value <= prob[0]+prob[1]+prob[2]+prob[3]+prob[4]:
sensor_selection = 4
elif prob[0]+prob[1]+prob[2]+prob[3]+prob[4] < random_value <= prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]:
sensor_selection = 5
elif prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5] < random_value <= prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]+prob[6]:
sensor_selection = 6
elif prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]+prob[6] < random_value <= prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]+prob[6]+prob[7]:
sensor_selection = 7
elif prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]+prob[6]+prob[7] < random_value <= prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]+prob[6]+prob[7]+prob[8]:
sensor_selection = 8
elif prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]+prob[6]+prob[7]+prob[8] < random_value <= prob[0]+prob[1]+prob[2]+prob[3]+prob[4]+prob[5]+prob[6]+prob[7]+prob[8]+prob[9]:
sensor_selection = 9
# Note: we need to discretize the probability into 1/RAND_RANGE steps,
# because there is an intrinsic discrepancy in passing single state and batch states
entropy_record.append(a3c.compute_entropy(action_prob[0]))
time_stamp += 1
# log time_stamp, bit_rate, buffer_size, reward
#if end_of_video:
# del s_batch[:]
# del a_batch[:]
# del r_batch[:]
# del entropy_record[:]
#k = 0
#for n in range(0,A_DIM):
# violation[n] = 0
# age[n,:] = 0
#sensor_selection = DEFAULT_SELECTION
#log_file.write('\n') # so that in the log we know where video ends
s_batch.append(state)
action_vec = np.zeros(A_DIM)
action_vec[sensor_selection] = 1
a_batch.append(action_vec)
#log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]
#log_file = open(log_path, 'wb')
k += 1
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
cahthuranag.noreply@github.com
|
de89e13c8f0334377898a604c9807d3a9a46ecdd
|
11ecbb9e99d93c3fd4f4a530e66f02769aa67b9e
|
/my_site/redact/views.py
|
afcdba7e5b5f90989cd29c06469bc5dfdecfa977
|
[] |
no_license
|
AlexanderEganov/Test_BD_site
|
a3b0bd021b6952ae4fd1998a4958c760912023a7
|
a0c76c882d438bcd6b28c66de17924ed323ed500
|
refs/heads/master
| 2020-04-27T12:00:53.364979
| 2019-06-26T14:40:24
| 2019-06-26T14:40:24
| 174,318,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'redact.html')
|
[
"48315596+AlexanderEganov@users.noreply.github.com"
] |
48315596+AlexanderEganov@users.noreply.github.com
|
aeb5d323de2e52d26432d831b5260498c79306f1
|
4acdda2a7e17c8cfbaa8fe2cb6c39db964dc25d2
|
/app/auth/views.py
|
8651cb98b746e0b51f56340d4b3aaba3ff32aeb9
|
[
"MIT"
] |
permissive
|
Vitalis-Kirui/Personal-Blog
|
c5dafb0aac197effac92d191ca39a25495108140
|
49af71b70f32ff4a588df26cd38091a6d80eb805
|
refs/heads/master
| 2023-07-31T00:49:04.553066
| 2021-09-26T19:47:50
| 2021-09-26T19:47:50
| 409,132,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from . import auth
from ..email import mail_message
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to F-Society","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "watchlist login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
[
"vitalis.kirui@student.moringaschool.com"
] |
vitalis.kirui@student.moringaschool.com
|
140129092a25295712a41e42b7da332f70196910
|
9a21a1bebe41118a62a7b7933103fa6fe8ebef11
|
/code/political_bias_bert_extractive_summarizer.py
|
4aa2e3b52eaeea6c071ac32276ea2af861599f0a
|
[
"MIT"
] |
permissive
|
Itz-Antaripa/bias-in-political-news
|
bc143b7efea03791f6e2a30f2a6fd4c60f8ede1b
|
3b69b981e01c7ee354e57739f68bc955ca615fd1
|
refs/heads/main
| 2023-03-16T20:41:56.331270
| 2021-03-06T17:05:22
| 2021-03-06T17:05:22
| 345,143,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,590
|
py
|
# -*- coding: utf-8 -*-
"""political_bias_bert_extractive_summarizer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1W0CsR4WdlJkKoBTBrtzh2nD0kKGXd1L7
"""
from google.colab import files
files.upload()
# importing libraries
import nltk
nltk.download('punkt')
nltk.download('stopwords')
import pandas as pd
import re
import string
df = pd.read_csv('political_bias_labeled_binary.csv')
df.sample(5)
df.shape
# adding a separate column to count total words in the articles
def word_count(text):
count = len(text.split())
return count
df['word_count'] = df['article_text'].apply(lambda x: word_count(x))
df.info()
df['word_count'].describe()
!pip install -q bert-extractive-summarizer
!pip install -q spacy==2.1.3
!pip install -q transformers==2.2.2
!pip install -q neuralcoref
from summarizer import Summarizer
from pprint import pprint
model = Summarizer() # instantiating bert-extractive-summarizer object
def summarize(text):
result = model(text, num_sentences=5, min_length=60)
summary = ''.join(result)
return summary
# adding summary of article_text
df['summary'] = df['article_text'].apply(lambda x: summarize(x))
df['summary_word_count'] = df['summary'].apply(lambda x: len(str(x)))
# reordering the columns
df=df[['article_text', 'word_count', 'summary', 'summary_word_count', 'label']]
df.sample(5)
df.loc[340]['summary']
df['label'].value_counts()
df.to_csv('political_bias_summarized_binary_levels.csv', index=False)
files.download('political_bias_summarized_binary_levels.csv')
|
[
"noreply@github.com"
] |
Itz-Antaripa.noreply@github.com
|
3a2f9afe79c847c9ef2af1cd551af5ef44cdfbe0
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_12_01/aio/operations/_subnets_operations.py
|
5e45e86c6802a0c8a4d25f223f92b6a82b80e83b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 21,550
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations:
"""SubnetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.Subnet":
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_12_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs
) -> "_models.Subnet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs
) -> AsyncLROPoller["_models.Subnet"]:
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2016_12_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_12_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> AsyncIterable["_models.SubnetListResult"]:
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
c86036ae46c5abf01a1796cbffc7794ece43cd5c
|
26560e0322a5919209327fc3d1b1ff4877378949
|
/boundingbox.py
|
447b5e15e201799f77ed19ceb8acb8ab977c0c60
|
[] |
no_license
|
xolbynz/bounding_box_test
|
515551de8f866e9ba6a97640c4d02a5762162c67
|
e7436de68b04d15d21626815ba0fd8b0c5e13ee6
|
refs/heads/master
| 2022-12-10T08:39:29.388631
| 2020-09-11T01:28:44
| 2020-09-11T01:28:44
| 293,472,806
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
##주어진 이미지와 라벨 파일을 활용하여 이미지 위에 bounding box를 표시해 보시기 바랍니다.
img0=cv2.imread('000000.png')
df0=pd.read_csv('000000.txt',sep=' ',header=None)
img1=cv2.imread('000001.png')
df1=pd.read_csv('000001.txt',sep=' ',header=None)
img2=cv2.imread('000002.png')
df2=pd.read_csv('000002.txt',sep=' ',header=None)
img3=cv2.imread('000003.png')
df3=pd.read_csv('000003.txt',sep=' ',header=None)
img4=cv2.imread('000004.png')
df4=pd.read_csv('000004.txt',sep=' ',header=None)
img5=cv2.imread('000005.png')
df5=pd.read_csv('000005.txt',sep=' ',header=None)
def boundingbox(img,df):
for X in range (0,len(df)):
label=df[0][X]
x1=round(float(df[4][X]))
y1=round(float(df[5][X]))
x2=round(float(df[6][X]))
y2=round(float(df[7][X]))
img2=cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),1)
img2=cv2.putText(img, label, (x1-10,y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
return img2
img0=boundingbox(img0,df0)
img1=boundingbox(img1,df1)
img2=boundingbox(img2,df2)
img3=boundingbox(img3,df3)
img4=boundingbox(img4,df4)
img5=boundingbox(img5,df5)
cv2.imshow('img',img0)
cv2.waitKey(0)
cv2.imshow('img',img1)
cv2.waitKey(0)
cv2.imshow('img',img2)
cv2.waitKey(0)
cv2.imshow('img',img3)
cv2.waitKey(0)
cv2.imshow('img',img4)
cv2.waitKey(0)
cv2.imshow('img',img5)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
xolbynz.noreply@github.com
|
b813456b9ffefbcabbaf62af7fbbd853102b02f0
|
4b6f3a93366fa7e8760ae0ac3183a0ec9d67c67c
|
/zeromq/req-broker-rep/request.py
|
94099f1f2a558d5a668b5a4c7e78ef6bbb65d931
|
[] |
no_license
|
ztenv/python
|
456e4b73542716698594e0cfd1974c4942ec81f5
|
fe4bfd92b8c19bda1707b3cfab6d9da6e53f2782
|
refs/heads/master
| 2022-12-09T23:58:37.638697
| 2020-08-02T11:56:04
| 2020-08-02T11:56:04
| 71,128,766
| 1
| 0
| null | 2022-11-22T03:14:28
| 2016-10-17T10:57:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
# -*- coding: utf-8 -*-
# @file : request.py
# @author : shlian
# @date : 2019/6/10
# @version: 1.0
# @desc :
import zmq
import zmq.asyncio
import asyncio
import datetime
import json
import signal
import sys
import random
run_flag=True
async def start(name):
context=zmq.asyncio.Context(io_threads=2)
req_socket=context.socket(socket_type=zmq.REQ)
poller=zmq.asyncio.Poller()
poller.register(req_socket)
req_socket.connect("tcp://127.0.0.1:55558")
operators=["+","-","*","/"]
global run_flag
while(run_flag):
for event in await poller.poll(timeout=1):
if event[1] & zmq.POLLIN:
data=json.loads(await event[0].recv_json())
print("recv:{0}".format(data))
elif event[1] & zmq.POLLOUT:
data={"number1":random.randint(0,100),"operator":operators[random.randint(0,3)],"number2":random.randint(0,100),
"name":name,"timestamp":datetime.datetime.now().timestamp()}
data_str=json.dumps(data,ensure_ascii=False)
await event[0].send_json(data_str)
print("send:{0}".format(data_str))
#await asyncio.sleep(1)
elif event[1] & zmq.POLLERR:
print("poll error:{0},{1}".format(event[0],event[1]))
def sig_handler(signum,frame):
global run_flag
run_flag=False
print(signum,run_flag)
asyncio.sleep(2)
try:
asyncio.get_event_loop().stop()
asyncio.get_event_loop().close()
except Exception as ee:
print(ee)
if __name__=="__main__":
signal.signal(signal.SIGINT,sig_handler)
signal.signal(signal.SIGTERM,sig_handler)
name=sys.argv[0] if len(sys.argv)<=1 else sys.argv[1]
asyncio.get_event_loop().run_until_complete(future=start(name))
|
[
"shaohua.lian@xuncetech.com"
] |
shaohua.lian@xuncetech.com
|
1cd32f72ad8218d822b458f0feceec85ce3248c2
|
ffc5722dd49bb1e8f268a5459f6bae1fd6139679
|
/Common/test/generateProjects
|
65990105e440e08db68aa253d7d721c355d63c26
|
[] |
no_license
|
javarias/scheduling
|
a5672d6ae9c44df34f04501e8bb1aa5ac6e7ff08
|
f7d4e40672993d4b786e979a22e89d13849c7023
|
refs/heads/master
| 2021-10-25T00:51:49.954776
| 2014-09-07T23:50:05
| 2014-09-07T23:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,387
|
#!/usr/bin/env python
#
# generateProject.py - Rafael Hiriart (rhiriart@nrao.edu)
#
# Generates ObsProjects with parameters following suitable distributions.
#
import math
import random
from users import users
from sensitivity import pointSourceSensitivity
###############################################################################
#
#
# Number of SchedBlocks that this script will generate
nschedblks = 500
pis = []
for user in users:
pis.append(user[2])
#
# RA, Dec distribution, uniformly distributed in
# 0.0 <= RA <= 360.0
# -90.0 <= Dec <= 47.0
#
deltaRA = 1.0
startRA = 0.0
endRA = 360.0
deltaDec = 1.0
startDec = -90.0
endDec = 47.0
K = 10
points = []
ra = startRA
while ra <= (endRA - deltaRA):
dec = startDec
while dec <= (endDec - deltaDec):
npoints = int( K * deltaRA * deltaDec * math.cos(math.radians(dec)) )
for i in range(npoints):
pra = ra + random.random() * deltaRA
pdec = dec + random.random() * deltaDec
points.append((pra, pdec))
dec = dec + deltaDec
ra = ra + deltaRA
#
# Receiver band distribution
#
# Receiver band limits (GHz)
bandLimits = { 'band3' : (84.0, 116.0),
'band4' : (125.0, 163.0),
'band5' : (163.0, 211.0),
'band6' : (211.0, 275.0),
'band7' : (275.0, 373.0),
'band8' : (385.0, 500.0),
'band9' : (602.0, 720.0) }
# Receiver band distribution
bandPercent = { 'band3' : 0.26,
'band4' : 0.018,
'band5' : 0.015,
'band6' : 0.33,
'band7' : 0.221,
'band8' : 0.078,
'band9' : 0.078 }
# Frequency distribution
numFreqs = 1000
freqs = []
for band in bandPercent.keys():
nfreq = int( bandPercent[band] * numFreqs )
for i in range(nfreq):
f = bandLimits[band][0] + random.random() * ( bandLimits[band][1] - bandLimits[band][0] )
freqs.append(f)
#
# Observation time distribution
#
times = []
# distribution shape parameters (hours)
t1 = 6.0 # 3-8 hours
d1 = 10.0
t2 = 30.0
d2 = 5.0
tmax = 40.0 # 40-50 hours
deltat = 0.1
t = 0
while t < t1:
n = (d1/t1)*t
for i in range(int(n)):
times.append(t + random.random() * deltat)
t = t + deltat
while t < t2:
n = d1 + (t-t1)*(d2-d1)/(t2-t1)
for i in range(int(n)):
times.append(t + random.random() * deltat)
t = t + deltat
while t < tmax:
n = d2 - d2*(t-t2)/(tmax-t2)
for i in range(int(n)):
times.append(t + random.random() * deltat)
t = t + deltat
#
# Array requested distribution
#
ACA = 0.25
BL = 1.0 - ACA
arrayRequestedDist = []
for i in range(int(100*ACA)): arrayRequestedDist.append("ACA")
for i in range(int(100*BL)): arrayRequestedDist.append("TWELVE_M")
#
# Sensitivity goal
#
def sensitivity(freq, obsTime, decl):
exposureTime = 0.5 * obsTime
bandwidth = 4.0
numberAntennas = 50
antennaDiameter = 12.0
latitude = -23.0
opacity = 0.2
atmBrightnessTemp = 0.26
return pointSourceSensitivity(exposureTime, freq, bandwidth, decl,
numberAntennas, antennaDiameter, latitude, opacity, atmBrightnessTemp)
#
# Science score and rank
#
scores = []
min_score = 1.0
max_score = 7.0
for i in range(nschedblks):
scores.append(random.uniform(min_score, max_score))
scores.sort()
prjHeader = """<?xml version="1.0" encoding="UTF-8"?>
<ObsProject xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ArchiveUID>%uid%</ArchiveUID>
<Code>%code%</Code>
<Name>%name%</Name>
<PrincipalInvestigator>%piid%</PrincipalInvestigator>
<ScientificScore>%score%</ScientificScore>
<ScientificRank>%rank%</ScientificRank>
<ObsUnitSet>
"""
prjFooter = """ </ObsUnitSet>
</ObsProject>
"""
sbTemplate = """ <SchedBlock>
<ArchiveUID>%uid%</ArchiveUID>
<Name>%name%</Name>
<WeatherConstraints maxWindVelocity="0.0" maxOpacity="0.0" minPhaseStability="0.0"
maxSeeing="0.0"/>
<SchedulingConstraints representativeTargetIdRef="T1" maxAngularResolution="%resolution%" representativeFrequency="%freq%"/>
<Preconditions maxAllowedHA="4.0" minAllowedHA="-4.0"/>
<ObsParameters id="OP1">
<ScienceParameters duration="180.0" representativeBandwidth="4.0"
representativeFrequency="%freq%" sensitivityGoal="%sensitivity%"/>
</ObsParameters>
<InstrumentSpec id="SS1">
<SpectralSpec/>
</InstrumentSpec>
<Target id="T1" sourceIdRef="S1" instrumentSpecIdRef="SS1"/>
<FieldSource id="S1" name="%srcname%" RA="%ra%" Dec="%dec%" pmRA="0.0" pmDec="0.0"/>
<SchedBlockControl>
<MaximumTime>%maxtime%</MaximumTime>
<EstimatedExecutionTime>%exectime%</EstimatedExecutionTime>
<ArrayRequested>%array_requested%</ArrayRequested>
<IndefiniteRepeat>false</IndefiniteRepeat>
</SchedBlockControl>
</SchedBlock>
"""
def createObsProjectFile(i, params):
sbTime = 0.5
fileName = "GenObsProject%04d.xml" % i
file = open(fileName, 'w')
srcName = "%.4f-%.3f" % (params[0][0], params[0][1])
prjxml = prjHeader
prjxml = prjxml.replace("%uid%", "uid://X0/X%d/X1" % i)
prjxml = prjxml.replace("%code%", "2910.1.%d.N" % i)
prjxml = prjxml.replace("%name%", "Test Project %03d" % i)
prjxml = prjxml.replace("%piid%", pis[random.randint(0, len(pis)-1)])
prjxml = prjxml.replace("%score%", str(params[6]))
prjxml = prjxml.replace("%rank%", str(int(params[7])))
sbxml = sbTemplate
sbxml = sbxml.replace("%uid%", "uid://X0/X%d/X2" % i)
sbxml = sbxml.replace("%name%", "Test SchedBlock %03d" % i)
sbxml = sbxml.replace("%srcname%", srcName)
sbxml = sbxml.replace("%ra%", str(params[0][0]))
sbxml = sbxml.replace("%dec%", str(params[0][1]))
sbxml = sbxml.replace("%freq%", str(params[1]))
sbxml = sbxml.replace("%maxtime%", str(params[2]))
sbxml = sbxml.replace("%exectime%", str(sbTime))
sbxml = sbxml.replace("%sensitivity%", str(params[4]))
sbxml = sbxml.replace("%resolution%", str(params[3]))
sbxml = sbxml.replace("%array_requested%", str(params[5]))
# prjTime = params[2]
# while prjTime > 0:
# prjxml = prjxml + sbxml
# prjTime = prjTime - 0.5
prjxml = prjxml + sbxml
prjxml = prjxml + prjFooter
file.write(prjxml)
file.close()
# createObsProjectFile(1, sbparams[0])
#
# SchedBlock generation
#
# List of tuples with SB parameters
# 0 - tuple with RA, Dec coordinates (degrees, degrees)
# 1 - frequency (GHz)
# 2 - project total observation time (hours)
# 3 - resolution (arcseconds)
# 4 - sensitivity goal (Jy)
# sbparams = []
for i in range(nschedblks):
src_coords = points[random.randint(0, len(points)-1)]
freq = freqs[random.randint(0, len(freqs)-1)]
obsTime = times[random.randint(0, len(times)-1)]
resolution = random.uniform(0.001, 3.0)
arrayReq = arrayRequestedDist[random.randint(0, len(arrayRequestedDist)-1)]
# sbparams.append((src_coords, freq, obsTime, resolution, sensitivity(freq, obsTime)))
createObsProjectFile(i, (src_coords, freq, obsTime, resolution, sensitivity(freq, obsTime, src_coords[1]), arrayReq, scores[i], i))
# print sbparams
|
[
"rhiriart@523d945c-050c-4681-91ec-863ad3bb968a"
] |
rhiriart@523d945c-050c-4681-91ec-863ad3bb968a
|
|
827bd0efeae805a3e97813fb60fedffd62025ab3
|
af2ceb24cd4b9d00d0e25beed00a04da119842c0
|
/소수구하기.py
|
b5fbda48da6bc35d6aa06c7f55cc59ad4c25d043
|
[] |
no_license
|
100jy/algorithm
|
f3ed175f705561cd30390310fc7ca21abfbbe578
|
21acc9ac663c3a70fa175b342c212baaf4f8f2f6
|
refs/heads/master
| 2023-03-30T01:32:19.693019
| 2021-04-13T04:15:44
| 2021-04-13T04:15:44
| 323,490,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
import math
def is_prime(k):
if k == 1:
return False
for i in range(2, int(math.sqrt(k)) + 1):
if k % i == 0:
return False
return True
start, end = list(map(int, input().split()))
for i in range(start, end+1):
if is_prime(i):
print(i)
|
[
"wnduq08217@naver.com"
] |
wnduq08217@naver.com
|
76bdbb0fe0cc41a49570b67005d8f89ce7235ddf
|
243f64a13a1f496a08d05508ccf73c61e03a69de
|
/max_even_seq/subs/2017B/98.py
|
27c9f1212ac347a6dd0ced0ec52f0adb9fe87ba5
|
[] |
no_license
|
RazLandau/pybryt
|
5f3e946a99338fb159d6044098380bce2aacdc6f
|
8973b15fc48d1f278e7b8a3990d2f73a3bffb128
|
refs/heads/main
| 2023-08-19T12:30:46.622088
| 2021-10-14T18:35:21
| 2021-10-14T18:35:21
| 350,324,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
def max_even_seq(n):
count = 0
max_count=0
for i in str(n):
if (int(i)%2!=0 and count>0):
if(count>max_count):
max_count=count
count=0
elif int(i)%2==0:
count=count+1
if(count>max_count):
max_count=count
return max_count
|
[
"landau.raz@gmail.com"
] |
landau.raz@gmail.com
|
e3526409f06dc1174eb50b59d35056c81db5e60a
|
4ee1fdc3b7369bbf872a092084e105760af99f34
|
/Week14/todo-back/demo/api/migrations/0030_auto_20190506_1703.py
|
e3eb0779ec35079c7610b22459337eec8267acd9
|
[] |
no_license
|
ukozhakova/webdev2019
|
55ba3417b7e6976969ca2fe89f733870d32ea88e
|
02f1289e374a46f01e10c06037ccbf458976be38
|
refs/heads/master
| 2022-11-08T10:37:51.424810
| 2019-05-06T12:55:55
| 2019-05-06T12:55:55
| 167,842,221
| 0
| 1
| null | 2022-10-18T19:36:49
| 2019-01-27T18:19:26
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
# Generated by Django 2.2 on 2019-05-06 11:03
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0029_auto_20190506_1527'),
]
operations = [
migrations.AlterField(
model_name='task',
name='created_at',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 5, 6, 17, 3, 40, 9406)),
),
migrations.AlterField(
model_name='task',
name='due_on',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 5, 8, 17, 3, 40, 9406)),
),
]
|
[
"you@example.com"
] |
you@example.com
|
630e0c0147d040f5a67bed01779e5ef723a8fe05
|
8af2850a23159f89adf18e13a0bb2f817c01df56
|
/HamroRecipe/recipies/migrations/0004_auto_20200212_2021.py
|
09dce2328681ad2981707bd183e03f81960f5bfe
|
[] |
no_license
|
MunaShrestha/HamroRecipes
|
878e7243c5e17cbffc01b38c641bd913fc9e0586
|
ce617beef13d08de9e04bf5b2fcd322e876cfe29
|
refs/heads/master
| 2021-01-04T00:02:56.581853
| 2020-02-13T14:36:19
| 2020-02-13T14:36:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
# Generated by Django 3.0.2 on 2020-02-12 14:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipies', '0003_auto_20200212_1837'),
]
operations = [
migrations.RemoveField(
model_name='recipe',
name='incredient1',
),
migrations.RemoveField(
model_name='recipe',
name='incredient10',
),
migrations.RemoveField(
model_name='recipe',
name='incredient11',
),
migrations.RemoveField(
model_name='recipe',
name='incredient12',
),
migrations.RemoveField(
model_name='recipe',
name='incredient13',
),
migrations.RemoveField(
model_name='recipe',
name='incredient14',
),
migrations.RemoveField(
model_name='recipe',
name='incredient15',
),
migrations.RemoveField(
model_name='recipe',
name='incredient16',
),
migrations.RemoveField(
model_name='recipe',
name='incredient17',
),
migrations.RemoveField(
model_name='recipe',
name='incredient2',
),
migrations.RemoveField(
model_name='recipe',
name='incredient3',
),
migrations.RemoveField(
model_name='recipe',
name='incredient4',
),
migrations.RemoveField(
model_name='recipe',
name='incredient5',
),
migrations.RemoveField(
model_name='recipe',
name='incredient6',
),
migrations.RemoveField(
model_name='recipe',
name='incredient7',
),
migrations.RemoveField(
model_name='recipe',
name='incredient8',
),
migrations.RemoveField(
model_name='recipe',
name='incredient9',
),
migrations.AddField(
model_name='recipe',
name='incredients',
field=models.TextField(default=1),
),
]
|
[
"bijay.simkhada26@gmail.com"
] |
bijay.simkhada26@gmail.com
|
129ad0ae42a5127e1e2441625589fcb11df25df4
|
bcb6a520fb71ce56a671743c5dd6cc5a138cecb0
|
/apps/usuarios/models.py
|
de526f4ebe231e65942e1da6b41cac58360ccaef
|
[] |
no_license
|
mendozalvarito/proyectosafin
|
8bb840796586f2067f60b3475d19f246693919a8
|
ce1c2519dd88a0636ff3b70b771bb11c5d93759f
|
refs/heads/master
| 2020-04-20T22:47:15.754723
| 2019-01-28T20:52:02
| 2019-01-28T20:52:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from django.db import models
# Create your models here.
class TimeStampModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
#TimeStampModel
|
[
"ajerhy@gmail.com"
] |
ajerhy@gmail.com
|
7c736a7f77475871807bec7ae1398458622380c3
|
2b9205984018538b96390692500b3d9fd4622096
|
/photos/migrations/0001_initial.py
|
02310dbb5e0e27df0a385a4d7a8e286e5af80ef6
|
[
"MIT"
] |
permissive
|
JECINTA534521/The-photo-gallery-project
|
e1b64152dbf700b97859cfa2c1eaecd4dfc7c17b
|
2187cc4cdc7cd3d003ed3221a795e052bd0dfb32
|
refs/heads/master
| 2021-09-09T18:19:22.114668
| 2019-11-11T14:46:26
| 2019-11-11T14:46:26
| 220,175,371
| 0
| 0
| null | 2021-09-08T01:25:21
| 2019-11-07T07:19:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-11-07 14:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_name', models.CharField(max_length=30)),
('image_description', models.TextField()),
('image_path', models.ImageField(upload_to='photos/')),
('image_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.Category')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location_name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='image',
name='image_location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.Location'),
),
]
|
[
"jecintawanjirug@gmail.com"
] |
jecintawanjirug@gmail.com
|
9f74386bfd8d6201ad4afbb4c3e05cb520769647
|
d9fb9c229740eafe890d5ebc4c87cebce8c354d1
|
/garudaHack2020Service/package/bin/rst2html5.py
|
15f3cf8d74522ae83b2834fc3d92cefca3ef11e5
|
[] |
no_license
|
sutjin/garuda-hacks-2020
|
9f96bc543bc9ea946a7b35c3332e3c89134db871
|
c8d1a4c0946b25703a41812d49ad669d2895a0ac
|
refs/heads/master
| 2022-12-02T12:35:43.602849
| 2020-08-15T14:40:52
| 2020-08-15T14:40:52
| 287,497,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
#!d:\development\garuda-hacks-2020\venv\scripts\python.exe
# -*- coding: utf8 -*-
# :Copyright: © 2015 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
#
# Revision: $Revision: 7847 $
# Date: $Date: 2015-03-17 18:30:47 +0100 (Di, 17. Mär 2015) $
"""
A minimal front end to the Docutils Publisher, producing HTML 5 documents.
The output also conforms to XHTML 1.0 transitional
(except for the doctype declaration).
"""
try:
import locale # module missing in Jython
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
from docutils.core import publish_cmdline, default_description
description = (u'Generates HTML 5 documents from standalone '
u'reStructuredText sources '
+ default_description)
publish_cmdline(writer_name='html5', description=description)
|
[
"sutjin@hotmail.com"
] |
sutjin@hotmail.com
|
bd7a30f240768161198e1692602d698c9989af03
|
011799be8bb4efb5220be04d6b853a695740d2bb
|
/generate_chunks.py
|
0361e7daf7cec0e51da2fd38378d114b4707149b
|
[] |
no_license
|
codedearta/arctic-demo
|
d163a192a647d26bddfe84fc49e5b15c44b7bce8
|
68e15ef2812d731d5de13a0ece79a74dd8a0d97d
|
refs/heads/master
| 2021-05-07T07:32:38.296083
| 2017-11-06T15:39:41
| 2017-11-06T15:39:41
| 109,137,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,465
|
py
|
#!/usr/bin/python
import random
from arctic import CHUNK_STORE, Arctic
from datetime import datetime
import datetime as dt
import pandas as pd
import sys, getopt
import time
print(sys.path)
dateTimeFormat = '%Y-%m-%d %H:%M'
def parseOptions():
libraryName = 'ticks'
symbol = 'mdb'
beginDate = datetime.strptime('2013-01-01 09:30', dateTimeFormat)
numberOfDays = 366*5
host = 'localhost:27017'
chunkSize = 'M'
try:
opts, args = getopt.getopt(sys.argv[1:],"l:s:b:n:h:c:",["libraryName=","symbol=", "beginDate=", "numberOfDays=", "host=", "chunkSize="])
except getopt.GetoptError as err:
print(err)
printOptions()
sys.exit(2)
for opt, arg in opts:
if opt in ("-l", "--libraryName"):
libraryName = arg
elif opt in ("-s", "--symbol"):
symbol = arg
elif opt in ("-b", "--beginDate"):
beginDate = datetime.strptime(arg, dateTimeFormat)
elif opt in ("-n", "--numberOfDays"):
numberOfDays = int(arg)
elif opt in ("-h", "--host"):
host = arg
elif opt in ("-c", "--chunkSize"):
chunkSize = arg
return (libraryName,symbol,beginDate,numberOfDays,host,chunkSize)
def printOptions():
print(' -l [ --libraryName ] arg name of the library in MongoDB')
print(' -s [ --symbol ] arg symbol of the time series')
print(' -b [ --beginDate ] arg begin date of the time series. Format "01-01-2006 09:30", "%d-%m-%Y %H:%M"')
print(' -n [ --numberOfDays ] arg number of days of the time series')
print(' -h [ --host ] arg MongoDB hostname')
print(' -c [ --chunkSize ] arg possible: D, M, Y for (Day, Month, Year)')
libraryName,symbol,beginDate,numberOfDays,host,chunkSize = parseOptions()
def printUsedOptions():
print("using argument '1:libraryName': " + libraryName)
print("using argument '2:symbol': " + symbol)
print("using argument '3:beginDate': " + str(beginDate))
print("using argument '4:numberOfDays': " + str(numberOfDays))
print("using argument '5:host': " + host)
print("using argument '6:chunkSize': " + chunkSize)
printUsedOptions()
def generateDayDataFrame(tradingDateTime):
def genRecord(seconds):
date = tradingDateTime + dt.timedelta(milliseconds=seconds)
askBid = 'ASK' if random.random() < 0.5 else 'BID'
price = random.uniform(24, 100)
amount = random.randrange(10000)
return (date, askBid, price, amount)
records = map(genRecord, range(0, 25200 * 1000, 333))
dates,askBids,prices,amounts = map(list, zip(*records))
df = pd.DataFrame(data={'askBid': askBids, 'amount': amounts, 'price': prices, 'date': dates})
return df
dates = pd.date_range(beginDate, periods=numberOfDays).tolist()
frames = map(generateDayDataFrame, dates)
store = Arctic(host)
#
store.initialize_library(libraryName, lib_type=CHUNK_STORE)
#
# # Access the library
library = store[libraryName]
# singdf = pd.DataFrame(data={'data': [1], 'date': [beginDate] })
# library.write(symbol, singdf)
start = time.time()
isFirst = True
for frame in frames:
if isFirst:
library.write(symbol, frame, chunk_size=chunkSize)
isFirst = False
else:
library.append(symbol, frame)
# library.append(symbol, frame, chunk_size=chunkSize)
end = time.time()
print("elapsed time to write data: " + str(end - start))
|
[
"sepp.renfer@gmail.com"
] |
sepp.renfer@gmail.com
|
d7c711a4ceceeea34d54d0627cbd96a2efb4f943
|
b6d48319f4f49b8384da640d57cb1dc5b75fa30d
|
/weather_report.py
|
512ab89b88402114ac45c5a132d2b7d5cda539eb
|
[] |
no_license
|
ksaswin/Weather-data-using-python
|
e2160b7413e50b5e31717b783126c12c2b112f23
|
a3a6bbe1af5cae753f42ef1b2193cb3a2255f67d
|
refs/heads/master
| 2023-02-15T17:28:11.822645
| 2021-01-11T18:38:49
| 2021-01-11T18:38:49
| 293,256,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
import json
import urllib.request
import urllib.parse
baseurl = 'https://api.openweathermap.org/data/2.5/weather?q='
api_key = 'API_KEY' #Create an account in https://home.openweathermap.org/users/sign_in and paste your personal API_KEY here
location = input("Enter location: ")
complete_url = baseurl + location + '&APPID=' + api_key + '&units=metric'
info = json.loads(urllib.request.urlopen(complete_url).read().decode('utf-8'))
print("Weather status in", info["name"])
print("Weather: ", info["weather"][0]["main"])
print("Weather description: ", info["weather"][0]["description"].capitalize())
print("Temperature: ", str(info["main"]["temp"]) + "°C")
print("Feels: ", str(info["main"]["feels_like"]) + "°C")
print("Wind speed: ", str(info["wind"]["speed"]) + "m/s")
|
[
"noreply@github.com"
] |
ksaswin.noreply@github.com
|
bf3ba99acdc16be621c35be9ff9704ab091e23bd
|
e081eebc37aef48084fa62a1b36443f03b9e2abe
|
/Collisions.py
|
113b2fcfd23ceb133859a9239a70ecfc22ae212b
|
[] |
no_license
|
S-C-U-B-E/CodeChef-Practise-Beginner-Python
|
93fa202eede83cf4f58177bffb4ecc4ddb7f19bc
|
78a02303b3cdd7eb7b0c45be59a1c282234f8719
|
refs/heads/master
| 2021-03-24T01:10:43.913150
| 2020-03-16T12:49:12
| 2020-03-16T12:49:12
| 247,501,633
| 16
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
# cook your dish here
for _ in range(int(input())):
n,m=[int(x) for x in input().split()]
a=[]
c=0
for _ in range(n):
a.append([int(i) for i in input()])
for i in range(m):
for j in range(n):
if a[j][i]==1:
for k in range(j+1,n):
if a[k][i]==1:
c+=1
print(c)
|
[
"sssanyal10@gmail.com"
] |
sssanyal10@gmail.com
|
dbe03be4bbe0d278643dfe46f4d5a7358eb1e8a2
|
8d98bcb23037c7a10d088a82b0645b28a17e27a3
|
/loop/break.py
|
d6f861ca676d5aec6af068c4c37c8c131a08b81d
|
[] |
no_license
|
anggadarkprince/learn-python
|
c7016886fb7ed85925bc7dc011a50cee52cf08d1
|
2f612ff0f6bd28a5ba4849d263811612c640828c
|
refs/heads/master
| 2021-01-13T13:19:27.864949
| 2016-11-03T02:41:25
| 2016-11-03T02:41:25
| 72,619,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
number = 0
total = 0
average = 0.0
count = 0
# loop until something triggered to stop
while True:
print("Enter a number: ")
number = float(raw_input())
if number == -1:
# stop and exit from the loop
break
total = total + number
count = count + 1
average = total / count
print("Average: " + str(average))
|
[
"anggadarkprince@gmail.com"
] |
anggadarkprince@gmail.com
|
05108c794f6d644ab95f556ce40ff26227122432
|
d1386ad2ced40ef89190a1ad3f3ce7969aa1318e
|
/advent of code - 2021/hubspot/LRU Cache.py
|
51a1cbe1210c37c3ca5f8671cf150a2cb40e4c60
|
[] |
no_license
|
achunero/Problems-sloved
|
442a4cff28e4bd774d69e7ad7cf8afd098aad892
|
089f78a6c6f4c309d93e0b98c14636dbd13cddeb
|
refs/heads/master
| 2022-06-17T06:03:28.397107
| 2022-05-20T13:14:26
| 2022-05-20T13:14:26
| 15,371,447
| 0
| 0
| null | 2022-05-20T13:16:04
| 2013-12-22T06:31:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.length = 0
self.hash = {}
self.head = Node(-1, -1)
self.tail = self.head
def get(self, key: int) -> int:
if key not in self.hash:
return -1
node = self.hash[key]
val = node.value
if node.next:
node.prev.next = node.next
node.next.prev = node.prev
self.tail.next = node
node.prev = self.tail
node.next = None
self.tail = node
return val
def put(self, key: int, value: int) -> None:
if key in self.hash:
node = self.hash[key]
node.value = value
if node.next:
node.prev.next = node.next
node.next.prev = node.prev
self.tail.next = node
node.prev = self.tail
node.next = None
self.tail = node
else:
node = Node(key, value)
self.hash[key] = node
self.tail.next = node
node.prev = self.tail
self.tail = node
self.length += 1
if self.length > self.capacity:
remove = self.head.next
self.head.next = self.head.next.next
self.head.next.prev = self.head
del self.hash[remove.key]
self.length -= 1
|
[
"noreply@github.com"
] |
achunero.noreply@github.com
|
5f2cc966d93877550c0877012738e9c25a98cdc8
|
9376b76d63a50f36c4e613924a94d906902ae775
|
/app/main/config.py
|
c3206662aaf8e8689e5e808f8601da5cca1a2b05
|
[] |
no_license
|
fremfi/casting-agency-api-python-flask
|
a6065905034c46581a00c60092fb4b7eeb32d557
|
74d74cef23dec296b342eb8b63471d8dc63a3ced
|
refs/heads/master
| 2022-12-11T06:36:33.560592
| 2019-10-29T15:27:39
| 2019-10-29T15:27:39
| 217,799,761
| 0
| 0
| null | 2022-12-08T06:47:01
| 2019-10-27T03:24:39
|
Python
|
UTF-8
|
Python
| false
| false
| 816
|
py
|
import os
postgres_url = os.environ['DATABASE_URL']
test_postgres_url = os.environ['TEST_DATABASE_URL']
dev_postgres_url = os.environ['DEV_DATABASE_URL']
basedir = os.path.abspath(os.path.dirname(__file__))
class DevelopmentConfig():
DEBUG = True
ENV = 'development'
SQLALCHEMY_DATABASE_URI = dev_postgres_url
SQLALCHEMY_TRACK_MODIFICATIONS = False
class TestingConfig():
DEBUG = True
TESTING = True
ENV = 'testing'
SQLALCHEMY_DATABASE_URI = test_postgres_url
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig():
DEBUG = False
ENV = 'production'
SQLALCHEMY_DATABASE_URI = postgres_url
config_by_name = dict(
development=DevelopmentConfig,
testing=TestingConfig,
production=ProductionConfig
)
|
[
"frederick.mfinanga@gmail.com"
] |
frederick.mfinanga@gmail.com
|
b66ffec97541d2bb7e8bedd642c5e38eea68043a
|
d91232083e3bdd19a2cb3769aacae46b869898af
|
/twostones.py
|
86c665485792b6b569868ef129545038abacfab9
|
[] |
no_license
|
pawanprjl/Kattis
|
01329c10e5796415b04f868582ae69ec1e759b49
|
4b9d049dfa77650e0a09c1a431c9704283b7d004
|
refs/heads/master
| 2020-08-29T08:18:10.459768
| 2019-10-28T16:04:10
| 2019-10-28T16:04:10
| 217,979,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
N = int(input())
if(N%2==0):
print("Bob")
else:
print("Alice")
|
[
"pawanparajuli8@gmail.com"
] |
pawanparajuli8@gmail.com
|
fc6d704bfb1ac54421bcf6bb3ddb23d796895fd9
|
bec6f966238003c157027014b081521dc999e7c8
|
/WeatherApp/getHightstown.py
|
c5147308f905e4877cc847f0aed271e3d87435f6
|
[] |
no_license
|
BDMC786/WeatherApp
|
ec583e05108a019bced90285c328065c2a1058df
|
fbafac891d852aded3e96f68fad52ac4e9754c40
|
refs/heads/master
| 2022-12-16T19:59:56.829167
| 2021-07-28T20:59:45
| 2021-07-28T20:59:45
| 219,050,719
| 0
| 0
| null | 2022-12-08T07:00:17
| 2019-11-01T19:34:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,084
|
py
|
def getHightstown():
import json
import requests
from datetime import datetime as dt
import datetime, pytz
url = "https://api.darksky.net/forecast/5fc1f583fa2e15d8a27208e502ba5fb0/40.2695538,-74.5232089"
place = "Hightstown, NJ"
#Make API Call
response = requests.get(url)
response_json = response.json()
time_adjust = -18000
#Get Current Conditions from API data
current_weather = response_json["currently"]
#Pull data from current conditions
current_dict = {}
current_list_try = ["apparentTemperature", "cloudCover", "humidity", "precipIntensity", "precipAccumulation", "precipProbability", "summary", "temperature", "time", "uvIndex"]
for elements in current_list_try:
if elements in current_weather:
current_dict[elements] = current_weather[elements]
#Adjusts Time to local time
current_dict["time"] = current_dict["time"] + time_adjust
#Gets the Day of the week and the time in AM PM format
current_dict["time"] = f"{dt.utcfromtimestamp(current_dict['time']).strftime('%A')} {dt.utcfromtimestamp(current_dict['time']).strftime('%r')}"
#Correct Formats Current
current_dict["apparentTemperature"] = f'{round(current_dict["apparentTemperature"])} °F' #Round
current_dict["cloudCover"] = f'{round(current_dict["cloudCover"] * 100)}%' #Percentage
current_dict["humidity"] = f'{round(current_dict["humidity"] * 100)}%'
current_dict["precipProbability"] = f'{round(current_dict["precipProbability"] * 100)}%'
current_dict["temperature"] = f'{round(current_dict["temperature"])} °F'
if "precipAccumulation" in current_dict:
current_dict["precipAccumulation"] = f'Snowfall Accumulation: {current_dict["precipAccumulation"]} Inches'
#Hourly Conditions
hourly_weather = response_json["hourly"]["data"]
#Pull data from Hourly Conditions
hourly_list = []
hourly_elements_try = ["apparentTemperature", "cloudCover", "humidity", "precipIntensity", "precipAccumulation", "precipProbability",
"summary", "temperature", "time", "uvIndex", "windSpeed"]
hourly_elements = []
for hours in hourly_weather:
hour_dict = {}
for element in hourly_elements_try:
if element in hours:
hour_dict[element] = hours[element]
if element not in hourly_elements:
hourly_elements.append(element)
try:
hour_dict["time"] = dt.utcfromtimestamp(int(hours["time"]) + int(time_adjust)).strftime('%A %r')
except:
print('hour_dict["time"] failed')
try:
hour_dict["apparentTemperature"] = f'{round(hour_dict["apparentTemperature"])} °F' #Round
except:
print('hour_dict["apparentTemperature"] failed')
try:
hour_dict["cloudCover"] = f'{round(hour_dict["cloudCover"] * 100)}%' #Percentage
except:
print('hour_dict["cloudCover"] failed')
try:
hour_dict["humidity"] = f'{round(hour_dict["humidity"] * 100)}%'
except:
print('hour_dict["humidity"] failed')
try:
hour_dict["precipProbability"] = f'{round(hour_dict["precipProbability"] * 100)}%'
except:
print('hour_dict["precipProbability"] failed')
try:
hour_dict["temperature"] = f'{round(hour_dict["temperature"])} °F'
except:
print('hour_dict["temperature"] failed')
try:
hour_dict["windSpeed"] = round(hour_dict["windSpeed"])
except:
print('hour_dict["windSpeed"]')
hourly_list.append(hour_dict)
#Daily
daily_weather = response_json["daily"]["data"]
#Pull data from Daily Conditions
daily_elements_try = ["cloudCover", "humidity", "icon", "precipIntensity", "precipIntensityMax",
"precipIntensityMaxTime", "precipProbability", "precipType", "precipAccumulation", "summary", "sunriseTime", "sunsetTime", "temperatureHigh",
"temperatureHighTime", "temperatureLow", "temperatureLowTime", "time", "uvIndex", "uvIndexTime", "windSpeed"]
daily_elements = []
daily_list = []
for days in daily_weather:
daily_dict = {}
for elements in daily_elements_try:
# print("elements")
if elements in days:
# print(elements)
daily_dict[elements] = days[elements]
# print(daily_dict[elements])
if elements not in daily_elements:
daily_elements.append(elements)
# print(daily_elements)
try:
daily_dict["cloudCover"] = f'{round(daily_dict["cloudCover"] * 100)}%'
except:
print('daily_dict["cloudCover"] failed')
try:
daily_dict["humidity"] = f'{round(daily_dict["humidity"] * 100)}%'
except:
print('daily_dict["humidity"] failed')
try:
daily_dict["precipProbability"] = f'{round(daily_dict["precipProbability"] * 100)}%'
except:
print('daily_dict["precipProbability"] failed')
try:
daily_dict["temperatureHigh"] = f'{round(daily_dict["temperatureHigh"])} °F'
except:
print('daily_dict["temperatureHigh"] failed')
try:
daily_dict["temperatureLow"] = f'{round(daily_dict["temperatureLow"])} °F'
except:
print('daily_dict["temperatureLow"] failed')
try:
daily_dict["windSpeed"] = round(daily_dict["windSpeed"])
except:
print('daily_dict["windSpeed"] failed')
#TIMES
try:
daily_dict["precipIntensityMaxTime"] = f"{dt.utcfromtimestamp(daily_dict['precipIntensityMaxTime'] + time_adjust).strftime('%r')}"
except:
print('daily_dict["precipIntensityMaxTime"] failed')
try:
daily_dict["sunriseTime"] = f"{dt.utcfromtimestamp(daily_dict['sunriseTime'] + time_adjust).strftime('%r')}"
except:
print('daily_dict["sunriseTime"] failed')
try:
daily_dict["sunsetTime"] = f"{dt.utcfromtimestamp(daily_dict['sunsetTime'] + time_adjust).strftime('%r')}"
except:
print('daily_dict["sunsetTime"] failed')
try:
daily_dict["temperatureHighTime"] = f"{dt.utcfromtimestamp(daily_dict['temperatureHighTime'] + time_adjust).strftime('%r')}"
except:
print('daily_dict["temperatureHighTime"] failed')
try:
daily_dict["temperatureLowTime"] = f"{dt.utcfromtimestamp(daily_dict['temperatureLowTime'] + time_adjust).strftime('%r')}"
except:
print('daily_dict["temperatureLowTime"] failed')
try:
daily_dict["time"] = f"{dt.utcfromtimestamp(daily_dict['time'] + time_adjust).strftime('%A %B %d')}"
except:
print('daily_dict["time"] failed')
try:
daily_dict["uvIndexTime"] = f"{dt.utcfromtimestamp(daily_dict['uvIndexTime'] + time_adjust).strftime('%r')}"
except:
print('daily_dict["uvIndexTime"] failed')
# print(daily_dict["precipIntensityMaxTime"])
daily_list.append(daily_dict)
#Add all data sets to a list to pass to app
try:
next_hour = response_json["minutely"]["summary"]
except:
print("next hour failed")
next_hour = "Unavailable"
data = [place, current_dict, hourly_list, daily_list, next_hour]
#Add Alerts
if "alerts" in response_json:
all_alerts = response_json["alerts"]
alerts = []
for messages in all_alerts:
alerts.append(messages["description"])
else:
alerts = []
alerts.append("CLEAR")
data.append(alerts)
print("END")
return data
|
[
"noreply@github.com"
] |
BDMC786.noreply@github.com
|
cc1d3257bd543bbfeaca6caaef848986db12fd41
|
1140372e0ddd597f4092150d23fb50c3f2e91d8d
|
/generate_dynamo_json.py
|
0a783ba3fc5223dbad4b743f26139716c1252558
|
[
"Apache-2.0"
] |
permissive
|
acuevas-15/dynamo-db-generate-bulk-upload-json
|
096eb0f03cfcf5b4f4434269112a09e0d3d70623
|
5d482ef2766ce674e98ee5605440e69e16baea4f
|
refs/heads/main
| 2023-02-23T18:37:52.839379
| 2021-01-30T19:10:10
| 2021-01-30T19:10:10
| 334,486,125
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
from __future__ import print_function
import csv
import json
import ntpath
import sys
def generate_dynamo_batch_json(name):
"""
using a file name pointing to a csv that has been exported from dynamo db, or conforms
to the expected format: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html
returns a json string representation.
"""
input_filename = name
name = ntpath.basename(name).split('.')[0]
with open(input_filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
header = next(csvreader, None)
fields = []
for field in header:
field_metadata = {}
parts = field.split(' ') # ex: 'CountryId (S)' yields ['CountryId', '(S)']
field_metadata["field_name"] = parts[0]
field_metadata["field_type"] = parts[1][1:-1]
fields.append(field_metadata)
output_items = []
for row in csvreader:
output_item = {}
for i, column_value in enumerate(row):
field_name = fields[i]["field_name"]
field_type = fields[i]["field_type"]
field_value = column_value
field = { field_type: field_value }
output_item[field_name] = field
output_items.append({
"PutRequest": {
"Item": output_item
}
})
return json.dumps({ name: output_items }, indent=2)
if __name__ == "__main__":
arg_len = len(sys.argv)
if arg_len > 2:
print("ERROR: missing input csv file name", file=sys.stderr)
print(" usage: python generate_dynamo_json.py [input_filename]", file=sys.stderr)
sys.exit()
elif arg_len == 1:
print("usage: python generate_dynamo_json.py [input_filename]")
sys.exit()
file_name = sys.argv[1]
print(generate_dynamo_batch_json(file_name))
|
[
"antcue@amazon.com"
] |
antcue@amazon.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.