blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f2edfe179c6a18015940a9ce02b525818d25de88 | 7b0c90185aa3d4ae7c422ff32fcc0ebf930f1eed | /venv/bin/rst2latex.py | 002c40782a6d22022fddcd97bd7aa49b9d76b130 | [] | no_license | skilllauncher/lets-hi5 | d3c83052886027575e5e3b5d4e92cb934105fab5 | 8277d3ea641b44fc70c4bfb1f5581e6ae8e395cb | refs/heads/master | 2020-03-24T03:14:35.276636 | 2018-07-26T08:14:19 | 2018-07-26T08:14:19 | 142,410,670 | 0 | 1 | null | 2018-07-26T08:16:33 | 2018-07-26T08:16:33 | null | UTF-8 | Python | false | false | 829 | py | #!/Users/saicharanreddy/Desktop/lets-hi5/venv/bin/python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"saicharan.reddy1@gmail.com"
] | saicharan.reddy1@gmail.com |
8e0e9ff54a1b264abfcc842a94ec36f8c53d0cfa | ff29c013c24068e7409340ba1cd5b88cf3e82436 | /kobocat/onadata/libs/authentication.py | 20d192ec92a035efa730aba0877ea328cb992763 | [
"BSD-2-Clause"
] | permissive | Jubair70/Acid-Survivors-MIS | 01c64a0589e5ed275d9f1a9cf5973303ea4b937e | 89f19607d18a6d4c88717527e2e8d557aaec2fa9 | refs/heads/master | 2021-05-26T11:26:34.503102 | 2019-08-26T10:38:59 | 2019-08-26T10:38:59 | 254,112,228 | 1 | 0 | null | 2020-04-08T14:53:52 | 2020-04-08T14:33:00 | HTML | UTF-8 | Python | false | false | 906 | py | from django.utils.translation import ugettext as _
from django_digest import HttpDigestAuthenticator
from rest_framework.authentication import (
BaseAuthentication, get_authorization_header)
from rest_framework.exceptions import AuthenticationFailed
class DigestAuthentication(BaseAuthentication):
def __init__(self):
self.authenticator = HttpDigestAuthenticator()
def authenticate(self, request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'digest':
return None
if self.authenticator.authenticate(request):
return request.user, None
else:
raise AuthenticationFailed(
_(u"Invalid username/password"))
def authenticate_header(self, request):
response = self.authenticator.build_challenge_response()
return response['WWW-Authenticate']
| [
"jubair@mpower-social.com"
] | jubair@mpower-social.com |
8d8d28d252d52ec1db6699a9526d2c622795c93d | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/account/models/account_full_reconcile.py | f2ff0ea0895aae478432ed3fde5739b1a75f6bec | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 1,646 | py | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
class AccountFullReconcile(models.Model):
_name = "account.full.reconcile"
_description = "Full Reconcile"
name = fields.Char(string='Number', required=True, copy=False, default=lambda self: self.env['ir.sequence'].next_by_code('account.reconcile'))
partial_reconcile_ids = fields.One2many('account.partial.reconcile', 'full_reconcile_id', string='Reconciliation Parts')
reconciled_line_ids = fields.One2many('account.move.line', 'full_reconcile_id', string='Matched Journal Items')
exchange_move_id = fields.Many2one('account.move')
def unlink(self):
""" When removing a full reconciliation, we need to revert the eventual journal entries we created to book the
fluctuation of the foreign currency's exchange rate.
We need also to reconcile together the origin currency difference line and its reversal in order to completely
cancel the currency difference entry on the partner account (otherwise it will still appear on the aged balance
for example).
"""
# Avoid cyclic unlink calls when removing partials.
if not self:
return True
moves_to_reverse = self.exchange_move_id
res = super().unlink()
# Reverse all exchange moves at once.
today = fields.Date.context_today(self)
default_values_list = [{
'date': today,
'ref': _('Reversal of: %s') % move.name,
} for move in moves_to_reverse]
moves_to_reverse._reverse_moves(default_values_list, cancel=True)
return res
| [
"36736117+SHIVJITH@users.noreply.github.com"
] | 36736117+SHIVJITH@users.noreply.github.com |
be55eb5dd2519645a214a1282c5a37ce629cb118 | a02a40f0df624c74820ad25c27a5e2fb683610f7 | /qc2tsv/qc2tsv.py | eb179093ed0160a01ab0fa2d4d89c1f3518cddb7 | [
"MIT"
] | permissive | ENCODE-DCC/qc2tsv | d3d688bd3f4b3afb0ec4ba7969a001bb9cb7afb2 | f6bd9959d71254a10792bc30414dddcdcdf3865c | refs/heads/master | 2020-09-01T20:49:34.641168 | 2020-08-10T19:39:36 | 2020-08-10T19:39:36 | 219,053,354 | 2 | 0 | MIT | 2020-08-10T19:39:38 | 2019-11-01T19:54:50 | Python | UTF-8 | Python | false | false | 4,980 | py | import json
import logging
import pandas
from copy import deepcopy
from autouri import AutoURI, AbsPath
from caper.dict_tool import split_dict, merge_dict
logger = logging.getLogger(__name__)
class Qc2Tsv(object):
"""Qc2Tsv converts multiple JSON objects into a single spreadsheet
Its header will have multiple rows according to the hierachy of JSON objects.
"""
SEP = '*_:_*'
SEP_COLLAPSED_HEADER = '.'
def __init__(self, qcs, delim='\t'):
"""
Args:
qcs:
list of QC file URIs (path/URL/S3/GCS)
delim:
delimiter for output ([TAB] by default)
"""
self._delim = delim
self._jsons = []
for qc in qcs:
qc = AbsPath.get_abspath_if_exists(qc)
if not AutoURI(qc).exists:
logger.error('File does not exists. Skipping... {uri}'.format(uri=qc))
continue
s = AutoURI(qc).read()
j = json.loads(s)
self._jsons.append(j)
def flatten_to_tsv(self, row_split_rules=None,
merge_split_rows=None,
collapse_header=False, transpose=False):
"""Flatten JSON objects by using pandas.json_normalize
Header will be multi-line according to the hierachy of JSON objects
The last entry of each column will be aligned to bottom
all the others will be aligned to top
"""
jsons = []
for j in self._jsons:
# split JSONs first according to split rules
splitted_jsons = split_dict(j, rules=row_split_rules)
if merge_split_rows is not None:
merged_jsons = []
rule_name, first_key = merge_split_rows.split(':', 1)
j_not_caught = None
j_with_first_key = None
for j_ in splitted_jsons:
if rule_name not in j_:
j_not_caught = j_
elif rule_name in j_ and j_[rule_name] == first_key:
j_with_first_key = j_
else:
merged_jsons.append(j_)
if j_not_caught is not None and j_with_first_key is not None:
j_merge = deepcopy(j_not_caught)
merge_dict(j_not_caught, j_with_first_key)
merged_jsons = [j_not_caught] + merged_jsons
elif j_not_caught is not None:
merged_jsons = [j_not_caught] + merged_jsons
elif j_with_first_key is not None:
merged_jsons = [j_with_first_key] + merged_jsons
else:
merged_jsons = splitted_jsons
jsons.extend(merged_jsons)
if collapse_header:
sep = Qc2Tsv.SEP_COLLAPSED_HEADER
else:
sep = Qc2Tsv.SEP
df = pandas.json_normalize(jsons, sep=sep)
tsv = df.to_csv(sep=self._delim, index=False).strip('\n')
(header, contents) = tsv.split('\n', 1)
if collapse_header:
# single-row header
header_matrix_t = [[col for col in header.split(self._delim)]]
else:
# multi-row header
# find number of lines of header (maximum nesting level in JSON)
header_sparse_matrix = [col.split(sep) for col in header.split(self._delim)]
num_header_lines = max([len(c) for c in header_sparse_matrix])
# align sparse matrix to top (fill zeros for empty entries)
# except for the lowest level. align lowest level to bottom
header_matrix = []
for cols in header_sparse_matrix:
m = num_header_lines*['']
# align all but lowest to top
m[:len(cols)-1] = cols[:-1]
# align lowest level to bottom
m[-1] = cols[-1]
header_matrix.append(m)
# transpose temp matrix
header_matrix_t = [[header_matrix[j][i] for j in range(len(header_matrix))] \
for i in range(len(header_matrix[0]))]
# remove repeating entries except for the last row
for row in header_matrix_t[:-1]:
for i, col in enumerate(row):
if not col:
continue
for j in range(i + 1, len(row)):
if col == row[j]:
row[j] = ''
else:
break
contents_matrix = [row.split(self._delim) for row in contents.split('\n')]
final_matrix = header_matrix_t + contents_matrix
# transpose the final matrix if required
if transpose:
final_matrix = [[final_matrix[j][i] for j in range(len(final_matrix))] \
for i in range(len(final_matrix[0]))]
return '\n'.join([self._delim.join(row) for row in final_matrix])
| [
"leepc12@gmail.com"
] | leepc12@gmail.com |
227c923b155c129254ce3c05e10ed9edd285112b | 793318429f3ea697f257b1340f198e2aa9ee46d3 | /pytorch_sentiment_analysis/2lstm.py | 6808356605274436439b62b40e1c0162a1cd46b0 | [
"MIT"
] | permissive | PandoraLS/python_toys | 129eeaba03c6c81509fded2c56e3ac922d22ee41 | d3f78815ada5c20863656c643342d48bcc3aef53 | refs/heads/main | 2023-03-22T18:53:22.028849 | 2021-03-18T14:32:36 | 2021-03-18T14:32:36 | 349,050,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,487 | py | #!/usr/bin/env python
# coding: utf-8
# 同样使用RNN,本节的改进如下
#
# * 预训练的词向量
# * 不同的RNN结构(LSTM)
# * 多层RNN
# * 正则化
# * 不同的优化器
# ---
# ## 1. 数据准备
#
# 通过预训练词向量初始化每个词
# In[3]:
import torch
from torchtext import data, datasets
import random
import time
SEED = 1234
begin_t = time.time()
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize='spacy')
LABEL = data.LabelField(dtype=torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split(random_state=random.seed(SEED))
print('prepare data time:', time.time() - begin_t)
# In[4]:
TEXT.build_vocab(train_data, max_size=25000, vectors='glove.6B.100d')
LABEL.build_vocab(train_data)
print('build vocabulary time:', time.time() - begin_t)
# In[5]:
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device
)
# ## 2. 构建模型
#
# * 使用复杂的RNN模型
# * 使用Dropout防止过拟合
# In[6]:
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout):
super().__init__()
# embedding层的结构,1. 有多少个词,2. 每个词多少维
self.embedding = nn.Embedding(vocab_size, embedding_dim)
# RNN的结构,1. 输入x的维度,2. 隐藏层的维度,3. RNN的层数,4. 是否是双向的, 5. 层与层之间的dropout比率
self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout)
# rnn的输出最终会将前向和后向的最后隐藏层状态拼接起来,所以输入到线性全连接层时,维度是hidden_dim的两倍
self.fc = nn.Linear(hidden_dim * 2, output_dim)
# dropout层在forward中使用,应用到我们想要dropout的层
# dropout永远不应该在输入层和输出层使用
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# x = [sent len, batch size]
# 不是说输入层不能用dropout吗?是为了避免预训练的词向量特性太强?
embedded = self.dropout(self.embedding(x))
# embedded = [sent len, batch size, emb dim]
output, (hidden, cell) = self.rnn(embedded)
# output = [sent len, batch size, hid dim * num directions]
# hidden = [num layers * num directions, batch size, hid dim]
# cell = [num layers * num directions, batch size, hid dim]
# hidden[-2, :, :] --> forward_layer_n
# hidden[-1, :, :] --> backward_layer_n
hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
# hidden = [1, batch size, hid dim * num directions] ?
return self.fc(hidden.squeeze(0))
# In[7]:
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.5
model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT)
# In[8]:
pretrained_embedding = TEXT.vocab.vectors
print(pretrained_embedding.shape)
# In[9]:
model.embedding.weight.data.copy_(pretrained_embedding)
# ## 3. 训练(与评估)
# In[10]:
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
# In[11]:
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train() # 必须包含,以确保开启"dropout"
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward() # 求参数梯度
optimizer.step() # 更新参数权重
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval() # 必须包含,确保"dropout"关闭
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# In[ ]:
N_EPOCHS = 5
for epoch in range(N_EPOCHS):
begin_t = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
print('Epoch: %02d, Train loss: %.3f, Train acc: %.2f, Valid loss: %.3f, Valid acc: %.2f' %
(epoch, train_loss, train_acc, valid_loss, valid_acc), time.time() - begin_t)
# In[ ]:
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print('Test loss: %.3f, Test acc: %.2f' % (test_loss, test_acc))
# ## 使用
# In[ ]:
import spacy
nlp = spacy.load('en')
def predict_sentiment(sentence):
tokenized = [tok.text for tok in nlp.tokenizer(sentence)] # 分词
indexed = [TEXT.vocab.stoi[t] for t in tokenized] # 转化为下标
tensor = torch.LongTensor(indexed).to(device) # python list转换成pytorch tensor
tensor = tensor.unsqueeze(1) # [1, 2, 3] -> [[1, 2, 3]],将单个样例转换为batch大小为1的batch
prediction = torch.sigmoid(model(tensor)) # 预测值,并使用sigmoid挤压
return prediction.item() # 获取单个数字
# In[ ]:
review1 = 'This film is terrible'
print(review1, predict_sentiment(review1))
# In[ ]:
review2 = 'This film is stupid'
print(review2, predict_sentiment(review2))
# In[ ]:
review3 = 'This film is awesome'
print(review3, predict_sentiment(review3))
| [
"lisen_work@163.com"
] | lisen_work@163.com |
969aee15d51bbe3bb9dc711ec920609108fc317f | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /other_hand_or_government.py | 6422da2f135a28999761b5955c922750ace9b5de | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
#! /usr/bin/env python
def thing_and_part(str_arg):
number_and_big_point(str_arg)
print('person_and_next_point')
def number_and_big_point(str_arg):
print(str_arg)
if __name__ == '__main__':
thing_and_part('place_or_year')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
0902603a00ec16a6c553ebb574aaaaee0f7380b5 | c521d9934cc92390a8bd51c0a5a37257bf1a29df | /mkt/feed/fakedata.py | 6c1e068efe1e236cb05442005c0048d66f5f1a96 | [] | no_license | graingert/zamboni | 1f36992205fbd1329d0d0b0b2bcdb558f363cf60 | d92a0aa4ac6d3129bf3b72b3a8a2a223ced22ef3 | refs/heads/master | 2021-01-22T12:44:36.287310 | 2015-04-17T13:05:11 | 2015-04-17T13:05:11 | 34,050,296 | 0 | 0 | null | 2015-04-16T10:49:13 | 2015-04-16T10:49:12 | null | UTF-8 | Python | false | false | 6,408 | py | import hashlib
import random
from django.core.files.storage import default_storage as storage
from mpconstants.collection_colors import COLLECTION_COLORS
import pydenticon
from mkt.constants.regions import REGIONS_DICT
from mkt.constants.carriers import CARRIER_CHOICE_DICT
from mkt.webapps.fakedata import foreground, generate_apps
from mkt.feed.models import (FeedApp, FeedBrand,
FeedBrandMembership,
FeedCollection, FeedCollectionMembership,
FeedShelf, FeedShelfMembership, FeedItem)
dummy_text = 'foo bar baz blee zip zap cvan fizz buzz something'.split()
def rand_text(n=10):
"""Generate random string."""
return ' '.join(random.choice(dummy_text) for i in xrange(n))
def shelf(apps, **kw):
carrier = kw.get('carrier', random.choice(CARRIER_CHOICE_DICT.values()))
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
sh = FeedShelf.objects.create(
carrier=carrier.id,
description=kw.get('description', 'shelf for ' + carrier.name),
name=kw.get('name', '%s Op Shelf' % carrier.name),
region=region)
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(unicode(sh.name).encode('utf8'), 128, 128,
output_format='png')
with storage.open(sh.image_path(''), 'wb') as f:
f.write(img)
with storage.open(sh.image_path('_landing'), 'wb') as f:
f.write(img)
image_hash = hashlib.md5(img).hexdigest()[:8]
sh.update(slug=kw.get('slug', 'shelf-%d' % sh.pk),
image_hash=image_hash,
image_landing_hash=image_hash)
for a in apps:
FeedShelfMembership.objects.create(obj=sh, app=a)
FeedItem.objects.create(item_type='shelf', shelf=sh, region=region)
return sh
def brand(apps, type, **kw):
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
br = FeedBrand.objects.create(
layout=kw.get('layout', random.choice(['list', 'grid'])),
slug='brand-',
type=type)
br.update(slug=kw.get('slug', 'brand-%d' % br.pk))
for a in apps:
FeedBrandMembership.objects.create(obj=br, app=a)
FeedItem.objects.create(item_type='brand', brand=br, region=region)
return br
def collection(apps, slug, background_image=True, **kw):
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
colorname = kw.get('color', random.choice(COLLECTION_COLORS.keys()))
co = FeedCollection.objects.create(
type=kw.get('type', 'listing'),
color=colorname,
background_color=COLLECTION_COLORS[colorname],
slug=slug,
description=kw.get('description', ''))
name = kw.get('name', 'Collection %s' % co.pk)
if background_image:
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(name, 128, 128,
output_format='png')
with storage.open(co.image_path(''), 'wb') as f:
f.write(img)
image_hash = hashlib.md5(img).hexdigest()[:8]
else:
image_hash = None
co.name = name
co.image_hash = image_hash
co.save()
for a in apps:
FeedCollectionMembership.objects.create(obj=co, app=a)
FeedItem.objects.create(item_type='collection', collection=co,
region=region)
return co
def app_item(a, type, **kw):
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
colorname = kw.get('color', random.choice(COLLECTION_COLORS.keys()))
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(a.app_slug, 128, 128,
output_format='png')
ap = FeedApp.objects.create(
app=a,
description=kw.get('description', rand_text(12)),
type=type,
color=colorname,
preview=kw.get('preview', None),
pullquote_attribution=kw.get('pullquote_attribution', None),
pullquote_rating=kw.get('pullquote_rating', None),
pullquote_text=kw.get('pullquote_text', None),
background_color=COLLECTION_COLORS[colorname],
slug=kw.get('slug', 'feed-app-%d' % a.pk))
with storage.open(ap.image_path(''), 'wb') as f:
f.write(img)
image_hash = hashlib.md5(img).hexdigest()[:8]
ap.update(image_hash=image_hash)
FeedItem.objects.create(item_type='app', app=ap, region=region)
return ap
def generate_feed_data():
apps = generate_apps(24)
apps1, apps2, apps3, apps4 = apps[:6], apps[6:12], apps[12:18], apps[18:]
shelf(apps1, slug='shelf', name='Shelf', description='')
shelf(apps2, slug='shelf-desc', name='Shelf Description',
description=rand_text())
brand(apps1, 'hidden-gem', slug='brand-grid', layout='grid')
brand(apps2, 'travel', slug='brand-list', layout='list')
co = collection([], slug='grouped')
co.add_app_grouped(apps1[0].pk, 'group 1')
co.add_app_grouped(apps1[1].pk, 'group 1')
co.add_app_grouped(apps1[2].pk, 'group 2')
co.add_app_grouped(apps1[3].pk, 'group 2')
co.add_app_grouped(apps1[4].pk, 'group 3')
co.add_app_grouped(apps1[5].pk, 'group 3')
collection(apps2, slug='coll-promo', type='promo', name='Coll Promo')
collection(apps2, slug='coll-promo-desc', type='promo',
name='Coll Promo Desc',
description=rand_text(),
background_image=False)
collection(apps2, slug='coll-promo-bg', type='promo',
description='', name='Coll Promo Background')
collection(apps2, slug='coll-promo-bg-desc', type='promo',
name='Coll Promo Background Desc',
description=rand_text(),
background_image=False)
collection(apps3, slug='coll-listing', type='listing',
name='Coll Listing')
collection(apps3, slug='coll-listing-desc', type='listing',
name='Coll Listing Desc',
description=rand_text())
app_item(apps4[0], type='icon', slug='feedapp-icon')
app_item(apps4[1], type='image', slug='feedapp-image')
app_item(apps4[2], type='description', slug='feedapp-description')
app_item(apps4[3], type='quote', slug='feedapp-quote',
pullquote_text='"%s"' % rand_text(12),
pullquote_rating=4,
pullquote_attribution="matt basta")
app_item(apps4[4], type='preview', slug='feedapp-preview')
| [
"ashort@mozilla.com"
] | ashort@mozilla.com |
615c56157da6e85bb253d3360f1fe42404143c73 | 382d9643dfc15aaf605112fa1624c2cca5300106 | /build/lib/BearSki/utils/hartool.py | 0d873891b1ad430ed6af29a946532e6fa886a59b | [
"MIT"
] | permissive | xglh/BearSki | 150f67c42d263d142656c4d5e254c1b0472bee12 | ba4c34de658a1b744694a9424b63bd9a5795c42f | refs/heads/master | 2021-06-10T19:10:44.559173 | 2020-03-14T14:02:44 | 2020-03-14T14:02:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | # -*- encoding: utf-8 -*-
'''
@File : hartool.py
@Time : 2020/01/29 15:29:32
@Author : chenjiusi
'''
import logging
import json,os
from har2case.core import HarParser
from BearSki.utils.arguments import runArg
from BearSki.template import ApiTest_har
logger=logging.getLogger("BearSki.HarTool")
class HarTool(HarParser):
def __init__(self, har_file_path, filter_str=None, exclude_str=None):
self.har_file_path = har_file_path
self.filter_str = filter_str
self.exclude_str = exclude_str or ""
self._openfiles()
self.rArg=runArg()
# 生成单接口测试模型,_prepare_teststep继承harparser方法
def _getOneRequest(self,entry_json):
return self._prepare_teststep(entry_json)
def _openfiles(self):
fo = open(self.har_file_path, "r+",encoding='utf8')
jstr=json.loads(fo.read())
req_list=jstr['log']['entries']
i=0
self.harfile={}
for req_index in req_list:
request_str=self._getOneRequest(req_index)
name=request_str['name']
response_str=self._getResponse(req_index)
self.harfile[name]={}
self.harfile[name]['request']=request_str
self.harfile[name]['response']=response_str
def _getResponse(self,req):
# print(req)
return req['response']
def createAllCase(self):
for name in self.harfile:
result,res=self.getMessage(name)
self.createTestCase(result,res)
#功能函数
def getMessage(self,name):
print(name)
req=self.harfile[name]['request']
res=self.harfile[name]['response']
return req,res
def createOneCase(self,name):
req=self.harfile[name]['request']
res=self.harfile[name]['response']
self.createTestCase(req,res)
def createTestCase(self,req_str,res):
# print(req_str)
newrul=req_str['name']
# print(req_str)
modelpath=self.rArg.auto_model_path
casepath=self.rArg.auto_case_path
modelname=newrul.replace('/','_')[1:-1]
isExists=os.path.exists(modelpath)
if not isExists:
os.makedirs(modelpath)
self.writeFile(modelpath+'/'+modelname+'_model.json',json.dumps(req_str, sort_keys=True, indent=4, separators=(',', ': '),ensure_ascii=False))
self.writeFile(modelpath+'/'+modelname+'_res.json',json.dumps(res, sort_keys=True, indent=4, separators=(',', ': '),ensure_ascii=False))
testcase=ApiTest_har.TESTCASE
newcase=testcase.replace("${modelname}",modelname).replace("${model_file_path}",modelpath)
self.writeFile(casepath+'/'+"atest_"+modelname+'.py',newcase)
def writeFile(self,filename,context):
fo= open(filename,"w+")
# print(context)
fo.write(context)
fo.close
| [
"chen6_9@163.com"
] | chen6_9@163.com |
643efd214235c664eeffb24299b11c9b101390ab | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_run_20200424192105.py | 875feea371ca462d383947ebe8dcbfabd7f8fd6b | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,213 | py | from matrix_squaring import *
################################################################################################
# PANEL DE CONTROL
################################################################################################
# Decide si corre algoritmo matrix squaring con aproximación de trotter
run_ms_algorithm = True
# Decide si corre algoritmo para cálculo de energía interna
run_avg_energy = False
# Decide si corre algoritmo para optimización de dx y beta_ini
run_optimization = False
################################################################################################
# PARÁMETROS GENERALES PARA LAS FIGURAS
################################################################################################
# Usar latex en texto de figuras y agrandar tamaño de fuente
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size':15,'text.latex.unicode':True})
# Obtenemos path para guardar archivos en el mismo directorio donde se ubica el script
script_dir = os.path.dirname(os.path.abspath(__file__))
################################################################################################
# CORRE ALGORITMO MATRIX SQUARING
################################################################################################
if run_ms_algorithm:
# Parámetros físicos del algoritmo
physical_kwargs = {
'x_max': 5.,
'nx': 201,
'N_iter': 7,
'beta_fin': 4,
'potential': harmonic_potential,
'potential_string': 'harmonic_potential'
}
# Parámetros técnicos (generar archivos y figuras, etc.)
technical_kwargs = {
'print_steps': False,
'save_data': True,
'file_name': None,
'relevant_info': None,
'plot': True,
'save_plot': True,
'show_plot': True,
'plot_file_name': None
}
kwargs = {**physical_kwargs, **technical_kwargs}
rho, trace_rho, grid_x = run_pi_x_sq_trotter(**kwargs)
################################################################################################
# CORRE ALGORITMO PARA CÁLCULO DE ENERGÍA INTERNA
################################################################################################
if run_avg_energy:
# Parámetros técnicos función partición y cálculo de energía
read_Z_data = False
generate_Z_data = True
Z_file_name = None
plot_energy = True
save_plot_E = True
show_plot_E = True
E_plot_name = None
# Parámetros físicos para calcular Z y <E>
temp_min = 1./10
temp_max = 1./2
N_temp = 10
potential, potential_string = harmonic_potential, 'harmonic_potential'
# Más parámetros técnicos
save_Z_csv = True
relevant_info_Z = None
print_Z_data = False
x_max = 7.
nx = 201
N_iter = 7
print_steps = False
save_pi_x_data = False
pi_x_file_name = None
relevant_info_pi_x = None
plot_pi_x = False
save_plot_pi_x = False
show_plot_pi_x = False
plot_pi_x_file_name = None
average_energy(read_Z_data, generate_Z_data, Z_file_name, plot_energy, save_plot_E,
show_plot_E, E_plot_name,
temp_min, temp_max, N_temp, save_Z_csv, relevant_info_Z, print_Z_data,
x_max, nx, N_iter, potential, potential_string, print_steps, save_pi_x_data,
pi_x_file_name, relevant_info_pi_x,plot_pi_x, save_plot_pi_x, show_plot_pi_x,
plot_pi_x_file_name)
################################################################################################
# CORRE ALGORITMO PARA OPTIMIZACIÓN DE DX Y BETA_INI
################################################################################################
if run_optimization:
# Parámetros físicos
beta_fin = 4
x_max = 5
potential, potential_string = harmonic_potential, 'harmonic_potential'
nx_min = 10
nx_max = 310
nx_sampling = 60
N_iter_min = 8
N_iter_max = 20
# Parámetros técnicos
generate_opt_data = True
read_opt_data = False
save_opt_data = True
opt_data_file_name = None
opt_relevant_info = None
plot_opt = True
show_opt_plot = True
save_plot_opt = True
opt_plot_file_name = None
error, dx_grid, beta_ini_grid, comp_time = \
optimization(generate_opt_data, read_opt_data, beta_fin, x_max, potential,
potential_string, nx_min, nx_max, nx_sampling, N_iter_min,
N_iter_max, save_opt_data, opt_data_file_name,opt_relevant_info,
plot_opt, show_opt_plot, save_plot_opt, opt_plot_file_name)
print('-----------------------------------------'
+ '-----------------------------------------\n'
+ 'Optimization: beta_fin=%.3f, x_max=%.3f, potential=%s\n \
nx_min=%d, nx_max=%d, N_iter_min=%d, N_iter_max=%d\n \
computation time = %.3f sec.\n'%(beta_fin,x_max,potential_string,nx_min,
nx_max,N_iter_min,N_iter_max,comp_time)
+ '-----------------------------------------'
+ '-----------------------------------------')
| [
"jeaz.git@gmail.com"
] | jeaz.git@gmail.com |
a9b9060400b2d7c665146225a0b6e3d64c518c3a | 11763b1150a3a05db89c13dcd6152f8fcca87eaa | /designs/linear/homomorphic/latticebased/keyagreement11.py | f9a954cf9ab102e26637538b7316f642ae13b71d | [] | no_license | acad2/crypto | 343c32fa25aaec73e169290579fc3d02c4b226f6 | cb283df4101fcd618a0478a0018273f00d0734ae | refs/heads/master | 2021-08-19T06:36:26.068033 | 2017-11-25T00:41:03 | 2017-11-25T00:41:03 | 113,048,326 | 2 | 0 | null | 2017-12-04T13:49:02 | 2017-12-04T13:49:01 | null | UTF-8 | Python | false | false | 2,049 | py | #s(a + e)
#x(a + y)
# as + se 16 32 48 32 48 80
# ax + xy
#se(ax + xy) == axse + xyse == xs(ae + ye)
#xy(as + se) == asxy + xyse == xs(ay + ye)
# 24 32 32 32 32 32 32 32 128 - 120 = 8
# 48 64 64 64 64 64 64 64 256 - 240 = 16
# 96 128 128 128 128 128 128 128 512 - 480 = 32
# 31 32 32 32 32 32 32 32 128 - 127 = 1
# 30 32 32 32 32 32 32 32 128 - 126 = 2
# 18 32 32 32 32 32 32 32 128 - 114 = 14
# 17 32 32 32 32 32 32 32 128 - 113 = 15
# 31x , 32x
# 62 64 64 64 64 64 64 64 256 - 254 = 2
# 992 1024 1024 1024 1024 1024 1024 1024 4096 - 4064 = 32
from crypto.utilities import random_integer
SECURITY_LEVEL = 32 + 1 # + 1 to make sure we have enough after we shift some bits away
A = random_integer(SECURITY_LEVEL * 3)
def generate_private_key(security_level=SECURITY_LEVEL):
s = random_integer(security_level * 4)
e = random_integer(security_level * 4)
return s, e, (s * e)
def generate_public_key(private_key, security_level=SECURITY_LEVEL, a=A):
s, e, se = private_key
return s * (a + e)
def generate_keypair(security_level=SECURITY_LEVEL, a=A):
private_key = generate_private_key(security_level)
public_key = generate_public_key(private_key, security_level, a)
return public_key, private_key
def key_agreement(public_key, private_key, shift=(SECURITY_LEVEL * 15 * 8) + SECURITY_LEVEL):
return (public_key * private_key[2]) >> shift
def unit_test():
from unittesting import test_key_agreement
test_key_agreement("s(a + e) key agreement", generate_keypair, key_agreement, iterations=10000, key_size=SECURITY_LEVEL)
if __name__ == "__main__":
unit_test()
| [
"python_pride@protonmail.com"
] | python_pride@protonmail.com |
0ef5ddc8e7980eeeaa8c87ee02a17c7dd1c0cae8 | 969b7b94eba36e07ce89b41dd2d3201988fda9e4 | /coverage_overlay/old-version/overlap.py | 0142937bd87011e58983fdf807f4197046f4980c | [] | no_license | shahbazymoh/proteomicstools | b1554c5c1729c04ab478d7bcc2cbaeaa8886bcf6 | 59bfcd19ed05ceb59b98bc1d37515b7ff0581da2 | refs/heads/master | 2021-05-30T11:03:10.482346 | 2016-02-03T01:36:39 | 2016-02-03T01:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | py | from __future__ import print_function
import csv
import uniprot
import re
def clean_seqid(seqid):
pieces = seqid.split('|')
return pieces[1]
def is_overlap(i1, seq1, i2, seq2):
j1 = i1 + len(seq1)
j2 = i2 + len(seq2)
if i1 >= j2 or i2 >= j1:
return False
return True
def gap(i1, seq1, i2, seq2):
if not is_overlap(i1, seq1, i2, seq2):
j1 = i1 + len(seq1)
j2 = i2 + len(seq2)
return min(abs(i1 - j2), abs(i2 - j1))
return None
fname = 'KL-A1_pep15.csv'
peptides = []
proteins = {}
for row in csv.DictReader(open(fname)):
seqid = row['Protein']
if seqid not in proteins:
proteins[seqid] = []
proteins[seqid].append(row)
row['matches'] = []
peptides.append(row)
seqids, fasta = uniprot.read_fasta('use.fasta')
for seqid, protein in proteins.items():
bare_seqid = clean_seqid(seqid)
full_sequence = fasta[bare_seqid]['sequence']
for i_peptide1, peptide1 in enumerate(protein):
seq1 = peptide1['Sequence']
i1 = full_sequence.find(seq1)
peptide1['Start'] = i1 + 1
peptide1['End'] = i1 + len(seq1)
for peptide2 in protein:
if peptide1 == peptide2:
continue
seq2 = peptide2['Sequence']
i2 = full_sequence.find(seq2)
match = {
'overlap': is_overlap(i1, seq1, i2, seq2),
'gap': gap(i1, seq1, i2, seq2),
'seq1': seq1,
'seq2': seq2,
'i1': i1,
'i2': i2,
}
peptide1['matches'].append(match)
f = open('KL-A1_pep15.out.csv', 'w')
writer = csv.writer(f)
in_keys = ['Sequence','Protein','Protein Description','Length']
headers = in_keys + ['Start', 'End', 'Overlap', '3AA', '4AA', '5AA']
writer.writerow(headers)
first_keys = in_keys + ['Start', 'End']
for peptide in peptides:
row = [peptide[key] for key in first_keys]
overlap = False
aa3 = False
aa4 = False
aa5 = False
for match in peptide['matches']:
if match['overlap']:
overlap = True
else:
gap = match['gap']
if gap >= 3:
aa3 = True
if gap >= 4:
aa4 = True
if gap >= 5:
aa5 = True
for is_test in [overlap, aa3, aa4, aa5]:
row.append('X' if is_test else '')
writer.writerow(row)
f.close()
| [
"apposite@gmail.com"
] | apposite@gmail.com |
e6e0ebace35bc62a8bc4052e358519b0eb898000 | 87cecf5db9c46b86290e468216d6d6d0bc648d29 | /nerodia/element_collection.py | ebbca08cf9b986d77f0dd89fe1a931a6894f28ca | [
"MIT"
] | permissive | ed00m/nerodia | 84efcb8a5a9b798af669c346a77fb12dfffe475f | 8a1aff52942cf5a58567fb5753cb0a1974310b5a | refs/heads/master | 2020-03-27T16:15:36.202560 | 2018-08-30T14:28:11 | 2018-08-30T14:28:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,561 | py | from importlib import import_module
from itertools import islice
import nerodia
from .locators.class_helpers import ClassHelpers
class ElementCollection(ClassHelpers):
def __init__(self, query_scope, selector):
self.query_scope = query_scope
self.selector = selector
self.generator = ()
self.locator = None
self.elements = None
self._els = []
def __iter__(self):
"""
Yields each element in collection
:rtype: iter
:Example:
divs = browser.divs(class='kls')
for div in divs:
print(div.text)
"""
from .elements.html_elements import HTMLElement
from .elements.input import Input
dic = {}
for idx, e in enumerate(self._elements):
element = self._element_class(self.query_scope, dict(self.selector, index=idx,
element=e))
if element.__class__ in [HTMLElement, Input]:
tag_name = element.tag_name
dic[tag_name] = dic.get(tag_name, 0)
dic[tag_name] += 1
kls = nerodia.element_class_for(tag_name)
new_selector = dict(self.selector, element=e, tag_name=tag_name,
index=dic[tag_name] - 1)
yield kls(self.query_scope, new_selector)
else:
yield element
def __len__(self):
"""
Returns the number of elements in the collection
:rtype: int
"""
self._els = self._els or [_ for _ in self]
return len(self._els)
def __getitem__(self, idx):
"""
Get the element at the given index or slice
Any call to an ElementCollection including an adjacent selector
can not be lazy loaded because it must store correct type
Slices can only be lazy loaded if the indices are positive
:param idx: index of wanted element, 0-indexed
:type idx: int
:return: instance of Element subclass
:rtype: nerodia.elements.element.Element
"""
if isinstance(idx, slice):
if idx.start and idx.start < 0 or idx.stop and idx.stop < 0:
return list(self)[idx.start:idx.stop]
else:
return list(islice(self, idx.start, idx.stop, idx.step))
elif 'adjacent' in self.selector:
try:
return list(islice(self, idx + 1))[idx]
except IndexError:
return self._element_class(self.query_scope, {'invalid_locator': True})
elif self._els and idx > 0 and len(self._els) > idx:
return self._els[idx]
else:
return self._element_class(self.query_scope, dict(self.selector, index=idx))
@property
def is_empty(self):
"""
Returns True if no elements are found
:Example:
browser.select_list(name='new_user_languages').options(class_name='not_here').is_empty
:Example:
browser.select_list(name='new_user_languages').options(id='danish').is_empty
:return: True if no elements are found
:rtype: bool
"""
return len(self) == 0
@property
def to_list(self):
"""
This collection as a list
:rtype: list[nerodia.elements.element.Element]
"""
nerodia.logger.deprecate('ElementCollection.to_list', 'list(self)')
return list(self)
def locate(self):
"""
Locate all elements and return self
:rtype: ElementCollection
"""
self.els = list(self)
return self
@property
def browser(self):
"""
Returns the browser of the current query_scope
:rtype: nerodia.browser.Browser
"""
return self.query_scope.browser
def __eq__(self, other):
"""
Returns true if two element collections are equal.
:param other: other collection
:rtype: bool
:Example:
browser.select_list(name='new_user_languages').options == \
browser.select_list(id='new_user_languages').options #=> True
browser.select_list(name=;new_user_role').options == \
browser.select_list(id='new_user_languages').options #=> false
"""
return list(self) == list(other)
eql = __eq__
# private
@property
def _elements(self):
if self.locator is None:
self.locator = self._build_locator()
if self.elements is None:
self.elements = self.locator.locate_all()
return self.elements
@property
def _element_class(self):
from .elements.svg_elements import SVGElementCollection
from .elements.html_elements import HTMLElementCollection
from .module_mapping import map_module
name = self.__class__.__name__.replace('Collection', '')
element_module = map_module(name)
try:
module = import_module('nerodia.elements.{}'.format(element_module))
except ImportError:
if isinstance(self, HTMLElementCollection):
module = import_module('nerodia.elements.html_elements')
elif isinstance(self, SVGElementCollection):
module = import_module('nerodia.elements.svg_elements')
else:
raise TypeError(
'element class for {} could not be determined'.format(name))
return getattr(module, name)
| [
"lucast1533@gmail.com"
] | lucast1533@gmail.com |
2e143358fc3e1ac3a4de45f1c3825d405d0e713e | e953441fb81040ca1c55dc4c410fb606dda000ef | /humanoid_maze/envs/humanoid_maze.py | e537da5274eb489e281014768e92b5704974f2e5 | [] | no_license | Rowing0914/MuJoCo_Humanoid_Maze | 5dd25f408526e9666e84e7902a0054a796c496a1 | 2da3330dd58d27ab5b07507e69d0efa466a45d40 | refs/heads/master | 2020-06-21T12:29:56.540230 | 2019-07-17T19:45:39 | 2019-07-17T19:45:39 | 197,451,363 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,441 | py | # https://github.com/openai/gym/blob/master/gym/envs/mujoco/humanoid_v3.py
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 1,
'distance': 4.0,
'lookat': np.array((0.0, 0.0, 2.0)),
'elevation': -20.0,
}
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, axis=1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, axis=0) / np.sum(mass))[0:2].copy()
class HumanoidMazeEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='humanoid_maze.xml',
forward_reward_weight=1.25,
ctrl_cost_weight=0.1,
contact_cost_weight=5e-7,
contact_cost_range=(-np.inf, 10.0),
healthy_reward=5.0,
terminate_when_unhealthy=True,
healthy_z_range=(1.0, 2.0),
reset_noise_scale=1e-2,
exclude_current_positions_from_observation=True,
rgb_rendering_tracking=True):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._contact_cost_range = contact_cost_range
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking)
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy
) * self._healthy_reward
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(
np.square(self.sim.data.ctrl))
return control_cost
@property
def contact_cost(self):
contact_forces = self.sim.data.cfrc_ext
contact_cost = self._contact_cost_weight * np.sum(
np.square(contact_forces))
min_cost, max_cost = self._contact_cost_range
contact_cost = np.clip(contact_cost, min_cost, max_cost)
return contact_cost
@property
def is_healthy(self):
min_z, max_z = self._healthy_z_range
is_healthy = min_z < self.sim.data.qpos[2] < max_z
return is_healthy
@property
def done(self):
done = ((not self.is_healthy)
if self._terminate_when_unhealthy
else False)
return done
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
com_inertia = self.sim.data.cinert.flat.copy()
com_velocity = self.sim.data.cvel.flat.copy()
actuator_forces = self.sim.data.qfrc_actuator.flat.copy()
external_contact_forces = self.sim.data.cfrc_ext.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
return np.concatenate((
position,
velocity,
com_inertia,
com_velocity,
actuator_forces,
external_contact_forces,
))
def step(self, action):
xy_position_before = mass_center(self.model, self.sim)
self.do_simulation(action, self.frame_skip)
xy_position_after = mass_center(self.model, self.sim)
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
ctrl_cost = self.control_cost(action)
contact_cost = self.contact_cost
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost + contact_cost
observation = self._get_obs()
reward = rewards - costs
done = self.done
info = {
'reward_linvel': forward_reward,
'reward_quadctrl': -ctrl_cost,
'reward_alive': healthy_reward,
'reward_impact': -contact_cost,
'x_position': xy_position_after[0],
'y_position': xy_position_after[1],
'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),
'x_velocity': x_velocity,
'y_velocity': y_velocity,
'forward_reward': forward_reward,
}
return observation, reward, done, info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| [
"kosakaboat@gmail.com"
] | kosakaboat@gmail.com |
8db1dd021ad9782e8d54300ed0170c3c75dd5f50 | be0fdb8cd97067212788a1e3d7acb3e03a7a4258 | /data_loader.py | 68ecc46c27a7e9c540f5782664b5ad174399b0c3 | [] | no_license | speedcell4/logistic | dacca0c98a1cf201c1ae47d094823dab9198917e | 0ef2564b400cc8a5bea9f81b4ec9dd8dd630cd61 | refs/heads/master | 2020-03-21T02:43:15.067612 | 2018-06-20T10:05:14 | 2018-06-20T10:05:14 | 138,014,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | from collections import Counter
from pathlib import Path
from random import shuffle
project_dir = Path(__file__).expanduser().absolute().parent
positive = project_dir / 'data' / 'data' / 'books' / 'positive.review'
negative = project_dir / 'data' / 'data' / 'books' / 'negative.review'
def sentence_stream(path: Path):
with path.open('r', encoding='utf-8') as reader:
for raw_line in reader:
sentence = []
for item in raw_line.strip().split():
token, freq = item.split(':')
try:
sentence.append((token, int(freq)))
except ValueError:
print(token, freq)
exit(1)
yield sentence
def update_counter(path: Path, counter: Counter):
for sentence in sentence_stream(path):
for token, freq in sentence:
counter[token] += freq
def build_vocabulary(vocab_size: int):
counter = Counter()
update_counter(positive, counter)
update_counter(negative, counter)
vocabulary = {}
for ix, (token, _) in enumerate(counter.most_common(vocab_size)):
vocabulary[token] = ix
return vocabulary
def prepare(vocab_size: int, debug: bool):
vocabulary = build_vocabulary(vocab_size)
print(f'vocabulary size => {vocabulary.__len__()}')
def handle(path: Path, target: float):
for sentence in sentence_stream(path):
datum = [0] * vocabulary.__len__()
for token, freq in sentence:
if token in vocabulary:
datum[vocabulary[token]] = 1
yield datum, target
if debug:
break
pos_data, pos_targets = zip(*handle(positive, 1.0))
neg_data, neg_targets = zip(*handle(negative, 0.0))
dataset = list(zip(pos_data + neg_data, pos_targets + neg_targets))
shuffle(dataset)
data, targets = zip(*dataset)
return data, targets
def data_iteration(data, targets, batch_size: int):
for index in range((data.__len__() + batch_size - 1) // batch_size):
yield data[batch_size * index:batch_size * (index + 1)], \
targets[batch_size * index:batch_size * (index + 1)]
if __name__ == '__main__':
data, targets = prepare(200, True)
print('data', data.__len__())
print('targets', targets.__len__())
| [
"speedcell4@gmail.com"
] | speedcell4@gmail.com |
5261207a2efcbd341065c3dc64ec1e5c816a1b63 | 245a3f8cea6f232bf3142706c11188b51eb21774 | /python/hetu/gpu_links/MatrixDivideConstLink.py | a613061bff3e74d4d6f4526b31cf818adffcce45 | [
"Apache-2.0"
] | permissive | initzhang/Hetu | 5bfcb07e62962fbc83def14148f8367fab02625a | 447111a358e4dc6df5db9c216bdb3590fff05f84 | refs/heads/main | 2023-06-20T18:37:21.760083 | 2021-07-27T04:37:48 | 2021-07-27T04:37:48 | 389,848,768 | 0 | 0 | Apache-2.0 | 2021-07-27T04:32:57 | 2021-07-27T04:32:57 | null | UTF-8 | Python | false | false | 395 | py | from __future__ import absolute_import
import ctypes
from .._base import _LIB
from .. import ndarray as _nd
def matrix_elementwise_divide_const(val, in_mat, out_mat, stream=None):
assert isinstance(in_mat, _nd.NDArray)
assert isinstance(out_mat, _nd.NDArray)
_LIB.DLGpuMatrixDivConst(
ctypes.c_float(val), in_mat.handle, out_mat.handle, stream.handle if stream else None)
| [
"swordonline@foxmail.com"
] | swordonline@foxmail.com |
08d3f21518ff323ed41e12bbf58539628b0d1d8b | c291ba4506a8998df8d7f384c911f6a0a1294001 | /bai__48/KhaiNiemVeHam.py | 98b0fa0105f06dadc22e7a9e273b79f3d0b51d22 | [] | no_license | thanh-falis/Python | f70804ea4a3c127dcb7738d4e7c6ddb4c5a0a9d4 | fa9f98d18e0de66caade7c355aa6084f2d61aab3 | refs/heads/main | 2023-08-18T17:34:29.851365 | 2021-09-30T12:09:30 | 2021-09-30T12:09:30 | 398,952,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | '''
- Hàm là một khối lệnh thực hiện một công việc hoàn chỉnh (mudule),
được đặt tên và gọi thực thj nhiều lần tại nhiều vị trí trong chuong trình.
- Hàm còn được gọi là chương trình con.
- Nếu không viết hàm thì
+ Rất khó để viết chính xác khi dự án lớn
+ Khó debug
+ Khó mở rộng
- có 2 loại hàm:
+ Hàm thư viện: là những hàm được xây dựng sẵn
+ Hàm do người dùng định nghĩa
''' | [
"thanhelma2020|@gmail.com"
] | thanhelma2020|@gmail.com |
311fd6ae560e67365300344f56e6f7c6b238c27a | c3a76533d1fbb53f291f49fb95b5e89ed69a75f5 | /好咧,最后还是要搞google/hard/RangeSumQuery2DMutable308.py | ac4a3007e5e59c792b05380b333bcff23f1254be | [] | no_license | jing1988a/python_fb | 5feb68efd32bd63952b4df0c0cd2e766b83451ea | fd310ec0a989e003242f1840230aaac150f006f0 | refs/heads/master | 2020-03-28T21:03:19.375549 | 2019-01-31T17:02:17 | 2019-01-31T17:02:17 | 149,125,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,380 | py | # Given a 2D matrix matrix, find the sum of the elements inside the rectangle defined by its upper left corner (row1, col1) and lower right corner (row2, col2).
#
#
# The above rectangle (with the red border) is defined by (row1, col1) = (2, 1) and (row2, col2) = (4, 3), which contains sum = 8.
#
# Example:
# Given matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]
# ]
#
# sumRegion(2, 1, 4, 3) -> 8
# update(3, 2, 2)
# sumRegion(2, 1, 4, 3) -> 10
#
#
# The matrix is only modifiable by the update function.
# You may assume the number of calls to update and sumRegion function is distributed evenly.
# You may assume that row1 ≤ row2 and col1 ≤ col2.
# 这道题让我们求二维区域和检索,而且告诉我们数组中的值可能变化,这是之前那道Range
# Sum
# Query
# 2
# D - Immutable的拓展,由于我们之前做过一维数组的可变和不可变的情况Range
# Sum
# Query - Mutable和Range
# Sum
# Query - Immutable,那么为了能够通过OJ,我们还是需要用到树状数组Binary
# Indexed
# Tree(参见Range
# Sum
# Query - Mutable),其查询和修改的复杂度均为O(logn),那么我们还是要建立树状数组,我们根据数组中的每一个位置,建立一个二维的树状数组,然后还需要一个getSum函数,以便求得从(0, 0)
# 到(i, j)
# 的区间的数字和,然后在求某一个区间和时,就利用其四个顶点的区间和关系可以快速求出,参见代码如下:
#
# http://www.cnblogs.com/grandyang/p/5300458.html
# http://www.cnblogs.com/grandyang/p/4985506.html
# 这道题是之前那道Range Sum Query - Immutable 区域和检索 - 不可变的延伸,之前那道题由于数组的内容不会改变,所以我们只需要建立一个累计数组就可以支持快速的计算区间值了,而这道题说数组的内容会改变,如果我们还是用之前的方法建立累计和数组,那么每改变一个数字,之后所有位置的数字都要改变,这样如果有很多更新操作的话,就会十分不高效。这道题我们要使用一种新的数据结构,叫做树状数组Binary Indexed Tree,又称Fenwick Tree,这是一种查询和修改复杂度均为O(logn)的数据结构。这个树状数组比较有意思,所有的奇数位置的数字和原数组对应位置的相同,偶数位置是原数组若干位置之和,假如原数组A(a1, a2, a3, a4 ...),和其对应的树状数组C(c1, c2, c3, c4 ...)有如下关系:
#
#
#
# C1 = A1
# C2 = A1 + A2
# C3 = A3
# C4 = A1 + A2 + A3 + A4
# C5 = A5
# C6 = A5 + A6
# C7 = A7
# C8 = A1 + A2 + A3 + A4 + A5 + A6 + A7 + A8
# ...
# 那么是如何确定某个位置到底是有几个数组成的呢,原来是根据坐标的最低位Low Bit来决定的,所谓的最低位,就是二进制数的最右边的一个1开始,加上后面的0(如果有的话)组成的数字,例如1到8的最低位如下面所示:
#
# 坐标 二进制 最低位
#
# 1 0001 1
#
# 2 0010 2
#
# 3 0011 1
#
# 4 0100 4
#
# 5 0101 1
#
# 6 0110 2
#
# 7 0111 1
#
# 8 1000 8
#
# ...
#
# 最低位的计算方法有两种,一种是x&(x^(x–1)),另一种是利用补码特性x&-x。
#
# 这道题我们先根据给定输入数组建立一个树状数组bit,然后更新某一位数字时,根据最低位的值来更新后面含有这一位数字的地方,一般只需要更新部分偶数位置的值即可,在计算某一位置的前缀和时,利用树状数组的性质也能高效的算出来,参见代码如下:
#
# if brutefoce, update O(1), sum O(n * m) if preSum, update O(n * m), sum O(1)
#
# if only do prefix of row, update O(m), sum O(n)
#
# if Binary Indexed Tree.which is update O(logN * logM), sum(logN * logM)
class NumMatrix:
def __init__(self, matrix):
"""
:type matrix: List[List[int]]
"""
self.matrix=matrix
self.preSumMatrix = []
for i in range(len(matrix)):
cur = 0
self.preSumMatrix.append([])
for j in range(len(matrix[0])):
cur += matrix[i][j]
self.preSumMatrix[i].append(cur)
def update(self, row, col, val):
"""
:type row: int
:type col: int
:type val: int
:rtype: void
"""
diff = val - self.matrix[row][col]
for i in range(col, len(self.preSumMatrix[0])):
self.preSumMatrix[row][i] = self.preSumMatrix[row][i] + diff
self.matrix[row][col]=val
def sumRegion(self, row1, col1, row2, col2):
"""
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
ans = 0
for i in range(row1, row2 + 1):
ans += self.preSumMatrix[i][col2]
if col1 != 0:
ans -= self.preSumMatrix[i][col1 - 1]
return ans
# ["NumMatrix","update","update","update","sumRegion"]
# [[[[2,4],[-3,5]]],[0,1,3],[1,1,-3],[0,1,1],[0,0,1,1]]
test = NumMatrix([[2,4],[-3,5]])
test.update(0,1,3)
test.update(1,1,-3)
test.update(0,1,1)
print(test.sumRegion(0,0,1,1))
| [
"jing1988a@gmail.com"
] | jing1988a@gmail.com |
bd3d29706b19819ef147fd249b1fa1c9c7a3a5da | 9889e7fd73314382fb2f9e8f63d92cf3254b75fb | /Vaango/src/StandAlone/inputs/MPM/J2PlasticityModels/J2Test_07_HydrostaticLoadUnload.py | 964f66a2f9b7b9d044a6331b35f7a3df04c15cf0 | [] | no_license | bbanerjee/ParSim | 0b05f43cff8e878658dc179b4a604eabd873f594 | 87f87816b146f40013a5e6648dfe20f6d2d002bb | refs/heads/master | 2023-04-27T11:30:36.252023 | 2023-04-13T22:04:50 | 2023-04-13T22:04:50 | 13,608,512 | 16 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,849 | py | from J2TestSuite_PostProcUtils import *
from J2YieldSurfaceUtils import *
def hydrostaticLoadUnload(uda_path, save_path,**kwargs):
print("Post Processing Test: 07 - hydrostatic Compression With Loading and Unloading")
# Read the stress simulation data
times, sigmas, sigma_a_sim, sigma_r_sim, sigma_ar_sim, pp_sim, qq_sim = readSimStressData(uda_path, matID = 0)
# Set up time points
analytical_times = np.linspace(0.0, times[-1], 15)
# Read the interval variable simulation data
ev_e_list, ev_p_list, times_list, ev_e_sim, ev_p_sim = \
getInternalVariables(uda_path, analytical_times, matID = 0)
# Get the model parameters
material_dict = get_yield_surface_data(uda_path)
param_text = material_dict['material string']
elastic_table = getJSONTable(material_dict['elastic_filename'])
yield_table = getJSONTable(material_dict['yield_filename'])
hydrostat_table = getJSONTable('DrySand_HydrostatData.json')
# Extract the data from the hydrostat table
ev_hydrostat = hydrostat_table['TotalStrainVol']
pbar_hydrostat = hydrostat_table['Pressure']
# Get snapshots of pq data
t_sim_snap, p_sim_snap = getDataTimeSnapshots(analytical_times, times, pp_sim)
t_sim_snap, q_sim_snap = getDataTimeSnapshots(analytical_times, times, qq_sim)
# Find the plot limits
Sxx = []
Syy = []
for sigma in sigmas:
Sxx.append(sigma[0][0])
Syy.append(sigma[1][1])
# Find min/max values
Sxx_min = min(Sxx)
Syy_min = min(Syy)
Sxx_max = max(Sxx)
Syy_max = max(Syy)
print("Sxx_min = ", Sxx_min)
print("Sxx_max = ", Sxx_max)
print("Syy_min = ", Syy_min)
print("Syy_max = ", Syy_max)
###PLOTTING
formatter = ticker.FormatStrFormatter('$\mathbf{%g}$')
param_text = material_dict['material string']
#----------------------------------------------------------------
# Plot the yield surface
#----------------------------------------------------------------
# Set up figure
fig1 = plt.figure(1)
plt.clf()
#plt.subplots_adjust(right=0.75)
#plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='xx-small')
# Plot p vs. q simulation results
eqShear_vs_meanStress(pp_sim, qq_sim)
# Plot filled circles at time snapshots
for ii in range(0, len(t_sim_snap)):
# Choose the Paired colormap
plt_color = cm.Paired(float(ii)/len(t_sim_snap))
plt.plot(p_sim_snap[ii], q_sim_snap[ii], 'o', color=plt_color)
# Plot yield surfaces
pmin = min(pp_sim)
pmax = max(pp_sim)
qmax = max(map(lambda q : abs(q), qq_sim))
plotPQYieldSurfaceSim(plt, material_dict, yield_table,
ev_e_list, ev_p_list, times_list,
pmin, pmax, qmax)
savePNG(save_path+'/HydrostaticLoadUnload_yield_surface','1280x960')
#plt.show()
#---------------------------------------------------------------------------------
# Plot experimental and simulation data as a function of time
fig2 = plt.figure(2)
plt.clf()
#plt.subplots_adjust(right=0.75)
#plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='xx-small')
plotSimDataSigmaTime(fig2, analytical_times, times, sigma_a_sim, sigma_r_sim, sigma_ar_sim,
'$\sigma_{xx}$ (sim)', '$\sigma_{yy}$ (sim)',
'$\sigma_{xy}$ (sim)')
axes = plt.gca()
axes.xaxis.set_major_formatter(formatter)
axes.yaxis.set_major_formatter(formatter)
plt.xlabel(str_to_mathbf('Time (sec)'))
plt.ylabel(str_to_mathbf('Stress (Pa)'))
plt.grid(True)
plt.legend(loc='best', prop={'size':10})
savePNG(save_path+'/HydrostaticLoadUnload_sigma_time','1280x960')
fig3 = plt.figure(3)
plt.clf()
plotSimDataSigmaEps(fig3, analytical_times, times, pp_sim, ev_e_sim, ev_p_sim,
compression = 'positive')
plt_color = cm.Paired(1)
plt.plot(ev_hydrostat, pbar_hydrostat, '-', color=plt_color, label='Experimental data')
axes = plt.gca()
axes.xaxis.set_major_formatter(formatter)
axes.yaxis.set_major_formatter(formatter)
axes.set_xlim([0, 0.5])
axes.set_ylim([0, 1.2*max(pbar_hydrostat)])
plt.xlabel(str_to_mathbf('Strain '))
plt.ylabel(str_to_mathbf('Stress (Pa)'))
plt.grid(True)
plt.legend(loc='best', prop={'size':10})
savePNG(save_path+'/HydrostaticLoadUnload_pbar_evbar','1280x960')
plt.show()
#fig3 = plt.figure(3)
#plt.clf()
##plt.subplots_adjust(right=0.75)
##plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='xx-small')
#plotSimDataPQTime(fig3, analytical_times, times, pp_sim, qq_sim)
#axes = plt.gca()
#axes.xaxis.set_major_formatter(formatter)
#axes.yaxis.set_major_formatter(formatter)
#plt.xlabel(str_to_mathbf('Time (sec)'))
#plt.ylabel(str_to_mathbf('Stress (Pa)'))
#plt.grid(True)
#plt.legend(loc='best', prop={'size':8})
#savePNG(save_path+'/HydrostaticLoadUnload_pq_time','1280x960')
#plt.show()
| [
"b.banerjee.nz@gmail.com"
] | b.banerjee.nz@gmail.com |
4b17c48950b5703e184c9e5989c37bc617dbc282 | 796f96e8c01db0bb9493c4871be66d92689b73ab | /백준특강대비/7551 pqr(못품)/beakjun3.py | a68b609e1ab473b2ce655f7828e8e1e058986c16 | [] | no_license | swhan9404/swExpertAcademy | cf82b957b7ea6231d1f4b95f77f74e7717d2de0d | dea4176229121983c6daed80811de20de6da5ff6 | refs/heads/master | 2023-04-19T22:57:37.111757 | 2021-05-02T14:58:44 | 2021-05-02T14:58:44 | 337,338,058 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | import math
# p * q를 넣었을 때 필요한 arr[r]을 구하는 메서드
def getVal(i, j):
val = K // math.gcd(K, arr[i])
val = val // math.gcd(val, arr[j])
return val
def pqr(N, arr):
# key : 필요한 arr[r]
# val : 개수
pq_dic = {}
pq_dic[getVal(0, 1)] = 1
count = 0
for r in range(2, N):
for pq in pq_dic:
# arr[r]이 pd의 배수여야 한다. 최대 공약수가 pq, 즉 pd를 포함하는 수여야 한다
if pq == math.gcd(pq, arr[r]):
count += pq_dic[pq]
# r을 q로 하고 i가 p라 생각. 새로운 2개 조합을 넣어주는 것
# 있으면 개수 + 1, 없으면 새로 1
for i in range(0, r):
if getVal(r, i) not in pq_dic:
pq_dic[getVal(r, i)] = 1
else:
pq_dic[getVal(r, i)] += 1
return count
N, K = map(int, input().split())
arr = list(map(int, input().split()))
print(pqr(N, arr))
| [
"swhan9404@naver.com"
] | swhan9404@naver.com |
d1f59b1f9aa84f499108cd6bef7c8587bd0cd50d | 028d788c0fa48a8cb0cc6990a471e8cd46f6ec50 | /Python-Web/pythons/pythons/settings.py | 93838833498a2b5c20877f91d18fe4b3b7bcc5c9 | [] | no_license | Sheko1/SoftUni | d6b8e79ae545116f4c0e5705ad842f12d77a9c9d | a9fbeec13a30231b6a97c2b22bb35257ac1481c0 | refs/heads/main | 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,159 | py | """
Django settings for pythons project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.urls import reverse_lazy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't+csd-v4wvmxou7t^&2kb3t_3*k+u!53ele6wb_bt#z$f3xnb+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'pythons.pythons_app',
'pythons.pythons_auth',
'pythons.profiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pythons.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pythons.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_URL = reverse_lazy('login')
| [
"martinkypar@gmail.com"
] | martinkypar@gmail.com |
b1b5472d4feb7ec07917f68036bc5806fb608af3 | d963fb56dbb92cc7317c0a042c9059239ebaa028 | /problems/AE59.py | f68623c9ff4659b56d6c696b45c4688cf85ec956 | [] | no_license | ClaudioCarvalhoo/you-can-accomplish-anything-with-just-enough-determination-and-a-little-bit-of-luck | 20572bde5482ddef379506ce298c21dd5e002492 | df287ed92a911de49ed4bc7ca5a997d18a96c3f6 | refs/heads/master | 2023-06-02T16:27:57.994351 | 2021-06-24T23:16:47 | 2021-06-24T23:16:47 | 284,845,707 | 1 | 0 | null | 2020-10-29T21:35:30 | 2020-08-04T01:24:23 | Python | UTF-8 | Python | false | false | 554 | py | # Sum of Linked Lists
# O(max(n, m))
# n = len(linkedListOne) | m = len(linkedListTwo)
def sumOfLinkedLists(linkedListOne, linkedListTwo):
res = LinkedList(None)
curRes = res
curOne = linkedListOne
curTwo = linkedListTwo
carry = 0
while curOne or curTwo or carry > 0:
val1 = 0
val2 = 0
if curOne:
val1 = curOne.value
curOne = curOne.next
if curTwo:
val2 = curTwo.value
curTwo = curTwo.next
sumVal = val1 + val2 + carry
carry = sumVal // 10
curRes.next = LinkedList(sumVal % 10)
curRes = curRes.next
return res.next | [
"carvalhooclaudio@gmail.com"
] | carvalhooclaudio@gmail.com |
11d61fa58038c3e79014caf950f9575522104906 | f366c19ce822a3e8f3cd5f670b25c6fa54322d0b | /python_udemy/introducao-python/combinations-permutations-product.py | 8d169448eb5230b183f3e546f5c00d63b9fb39ec | [] | no_license | marcelomatz/py-studiesRepo | b83875a366010c9a60bc15d853fcf81c31cee260 | ce99014228f00d8c73cc548dd6c4d5fedc3f1b68 | refs/heads/main | 2023-09-05T00:03:47.712289 | 2021-06-15T09:43:27 | 2021-06-15T09:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """
Combinations, Permutations e Product - Itertools
Combinação - Ordem não importa
Permutação - Ordem importa
Ambos não repetem valores únicos
Produto - Ordem importa e repete valores únicos
"""
from itertools import combinations, permutations, product
# pessoas = ['Marcelo', 'André', 'Ana', 'João', 'Larissa', 'Silvia']
# for grupo in product(pessoas, repeat=2):
# print(grupo)
grupo_1 = ['Pessoa 1', 'Pessoa 2', 'Pessoa 3']
grupo_2 = ['Pessoa 4', 'Pessoa 5', 'Pessoa 6']
grupo_3 = ['Pessoa 7', 'Pessoa 8', 'Pessoa 9']
todos = grupo_1 + grupo_2 + grupo_3
for grupo in combinations(todos, 3):
print(grupo)
| [
"agenciahipster@gmail.com"
] | agenciahipster@gmail.com |
c5da74564f339261ee7acfb82368cc4a5ef17348 | 88dfb9a6ce6a094e5b61deeed3d05ee043067a27 | /Activities/migrations/0011_auto_20160406_1100.py | 3505062bd39317eadfc61a58b2eb571977f9c4cd | [] | no_license | jsyx1994/pinu | 9aebcf4dbc454b3bf19a4b0d6fb355e02409edc3 | ac79d1fcecb13b260dfa3570240a0a86380b2161 | refs/heads/master | 2021-01-10T09:26:52.436373 | 2019-07-23T02:22:32 | 2019-07-23T02:22:32 | 55,506,885 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-06 03:00
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Activities', '0010_auto_20160406_1050'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='advocator',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='advocator', to=settings.AUTH_USER_MODEL),
),
]
| [
"526593251@qq.com"
] | 526593251@qq.com |
06977c84ef4943b2f2b9de193047098885436e9c | 6a0930a8762caa3aff74d376a38d581d25240189 | /backend/course/api/v1/serializers.py | da73f0615ea1e9f2628834db782765a74df1ec3a | [] | no_license | crowdbotics-apps/teset101-22014 | d26c1b64bfbe6fa6f7a55074f0bfcd0c7b603eda | dd18df5fc13b428ec6888005bc343120ac8d166b | refs/heads/master | 2023-01-06T05:45:54.757818 | 2020-10-27T18:45:06 | 2020-10-27T18:45:06 | 307,771,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7bbc7e4c9690bf90ffb4bec114c9d97a27a627f5 | c8de76f87ef929e08b018bae5de20d3b3d8f9f78 | /santosCua/settings.py | 271c4d96a141bf9b4b88b7d91ad3a1025b2e06d2 | [] | no_license | hanztura/santos-cua | 205dc3052f4107cbc3e819e25b4f913c890356ec | 5be4007d0d75e5d5438b4e1f1e6793d60c4e34ec | refs/heads/master | 2021-08-24T16:06:25.085178 | 2017-11-29T16:40:52 | 2017-11-29T16:40:52 | 111,128,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,140 | py | """
Django settings for santosCua project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!fvp$+2*fcfihcca&2+4_u+1a076f!&a_pf%fmb83beop1k$ov'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'grappelli',
'django.contrib.admindocs',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'contacts.apps.ContactsConfig',
'employees.apps.EmployeesConfig',
'bir.apps.BirConfig',
'compliance.apps.ComplianceConfig',
'public.apps.PublicConfig',
'events.apps.EventsConfig',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'public.middleware.LoginRequiredMiddleware',
]
ROOT_URLCONF = 'santosCua.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS' : [
os.path.join(BASE_DIR, "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'santosCua.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Manila'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
]
# templates
# https://stackoverflow.com/questions/22595916/how-do-you-extend-a-django-project-root-html-file-in-project-apps
#grappelli
GRAPPELLI_ADMIN_TITLE = 'SantosCua'
# https://simpleisbetterthancomplex.com/tutorial/2016/08/01/how-to-upload-files-with-django.html
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# http://onecreativeblog.com/post/59051248/django-login-required-middleware
LOGIN_URL = '/login/'
LOGIN_EXEMPT_URLS = [
r'^$',
r'^home/', # home url*
] | [
"hctura.official@gmail.com"
] | hctura.official@gmail.com |
fc59bc762240df0da1577655afc2b35089481cc0 | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/equipment/EquipmentPfcMmuCap.py | a58342d74435dcb78bd5264dc18854db4e11d29c | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,385 | py | """This module contains the general information for EquipmentPfcMmuCap ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentPfcMmuCapConsts():
INT_ID_NONE = "none"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
class EquipmentPfcMmuCap(ManagedObject):
"""This is EquipmentPfcMmuCap class."""
consts = EquipmentPfcMmuCapConsts()
naming_props = set([])
mo_meta = MoMeta("EquipmentPfcMmuCap", "equipmentPfcMmuCap", "pfc-mmu-cap", VersionMeta.Version311e, "InputOutput", 0xff, [], [""], [u'equipmentSwitchCapProvider'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version311e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"fcoe": MoPropertyMeta("fcoe", "fcoe", "byte", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"fcoe_jumbo_nodrop": MoPropertyMeta("fcoe_jumbo_nodrop", "fcoeJumboNodrop", "byte", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"fcoe_normal_nodrop": MoPropertyMeta("fcoe_normal_nodrop", "fcoeNormalNodrop", "byte", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version311e, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"jumbo_breakout_port": MoPropertyMeta("jumbo_breakout_port", "jumboBreakoutPort", "byte", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["local", "pending-policy", "policy"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, 0x40, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"fcoe": "fcoe",
"fcoeJumboNodrop": "fcoe_jumbo_nodrop",
"fcoeNormalNodrop": "fcoe_normal_nodrop",
"intId": "int_id",
"jumboBreakoutPort": "jumbo_breakout_port",
"name": "name",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.descr = None
self.fcoe = None
self.fcoe_jumbo_nodrop = None
self.fcoe_normal_nodrop = None
self.int_id = None
self.jumbo_breakout_port = None
self.name = None
self.policy_level = None
self.policy_owner = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "EquipmentPfcMmuCap", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
026b94cfbca00fbfec261053f3cf317886e7e818 | 185169048d1b0b584fdef39c9505f8ea6255b7c3 | /tests/unit/test_serializers.py | a1f5e37c3bc02ad6701b30e239c4c183b9373de3 | [
"BSD-2-Clause"
] | permissive | sdbht/requests-cache | 410baad3910348fa586fc0bc40c82a9bcf00a861 | 73cc36133b7eb3d4f714b2199042d7b6fc6bd902 | refs/heads/master | 2023-08-11T02:52:38.137694 | 2021-10-10T19:05:53 | 2021-10-10T19:05:53 | 416,177,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | # Note: Almost all serializer logic is covered by parametrized integration tests.
# Any additional serializer-specific tests can go here.
import json
import sys
from importlib import reload
from unittest.mock import patch
from uuid import uuid4
import pytest
from itsdangerous import Signer
from itsdangerous.exc import BadSignature
from requests_cache import CachedResponse, CachedSession, safe_pickle_serializer
def test_stdlib_json():
import requests_cache.serializers.preconf
with patch.dict(sys.modules, {'ujson': None, 'cattr.preconf.ujson': None}):
reload(requests_cache.serializers.preconf)
from requests_cache.serializers.preconf import json as module_json
assert module_json is json
reload(requests_cache.serializers.preconf)
def test_ujson():
import ujson
from requests_cache.serializers.preconf import json as module_json
assert module_json is ujson
def test_optional_dependencies():
import requests_cache.serializers.preconf
with patch.dict(sys.modules, {'bson': None, 'itsdangerous': None, 'yaml': None}):
reload(requests_cache.serializers.preconf)
from requests_cache.serializers.preconf import (
bson_serializer,
safe_pickle_serializer,
yaml_serializer,
)
for obj in [bson_serializer, yaml_serializer]:
with pytest.raises(ImportError):
obj.dumps('')
with pytest.raises(ImportError):
safe_pickle_serializer('')
reload(requests_cache.serializers.preconf)
def test_cache_signing(tempfile_path):
serializer = safe_pickle_serializer(secret_key=str(uuid4()))
session = CachedSession(tempfile_path, serializer=serializer)
assert isinstance(session.cache.responses.serializer.steps[-1].obj, Signer)
# Simple serialize/deserialize round trip
response = CachedResponse()
session.cache.responses['key'] = response
assert session.cache.responses['key'] == response
# Without the same signing key, the item shouldn't be considered safe to deserialize
serializer = safe_pickle_serializer(secret_key='a different key')
session = CachedSession(tempfile_path, serializer=serializer)
with pytest.raises(BadSignature):
session.cache.responses['key']
| [
"jordan.cook@pioneer.com"
] | jordan.cook@pioneer.com |
0047bc2f0d1817d7ac10b9e6da28ca7d17faecf1 | fe8694029e6a3ebd4da74ddde71c2aea8664b615 | /setup.py | d8145bec8148ea5e4d624f7f76548ae63bdcd1b2 | [
"MIT"
] | permissive | sveetch/PO-Projects-client | 5cb50456b616ad5a8ab745eb0c43322361dd608f | e553cb2a8da9c113d3f4b605f156a8d150a2dae2 | refs/heads/master | 2021-01-16T01:01:48.617778 | 2014-07-26T14:56:11 | 2014-07-26T14:56:11 | 19,954,579 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | from setuptools import setup, find_packages
setup(
name='PO-Projects-client',
version=__import__('po_projects_client').__version__,
description=__import__('po_projects_client').__doc__,
long_description=open('README.rst').read(),
author='David Thenon',
author_email='dthenon@emencia.com',
url='http://pypi.python.org/pypi/PO-Projects-client',
license='MIT',
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'nap >= 1.0.0',
'pkginfo >= 1.2b1',
'argparse==1.2.1',
'argcomplete==0.8.0',
'argh==0.24.1',
],
entry_points={
'console_scripts': [
'po_projects = po_projects_client.cli:main',
]
},
include_package_data=True,
zip_safe=False
) | [
"sveetch@gmail.com"
] | sveetch@gmail.com |
4c4f472a3c2af26ec95db14be4693f1ed730c4e2 | 8d715aba85829ec824e119c83f324336f6a11883 | /arxiv/canonical/record/tests/test_listing.py | 22c5ed71a557e350598ac2c8359be3f128a4096e | [
"MIT"
] | permissive | arXiv/arxiv-canonical | 9155955075aaee5063ac5a6ff5ee99608ee6a128 | 407cb0b2cef83c7f653dabdf998e797b18475b13 | refs/heads/develop | 2023-04-13T14:59:59.167434 | 2023-03-17T17:18:12 | 2023-03-17T17:18:12 | 160,495,479 | 5 | 4 | MIT | 2023-02-07T23:16:15 | 2018-12-05T09:43:32 | Python | UTF-8 | Python | false | false | 3,794 | py | import io
import json
import os
import tempfile
from datetime import datetime
from pytz import UTC
from typing import IO
from unittest import TestCase, mock
import jsonschema
from ..core import RecordEntry
from ..listing import RecordListing
from ..version import RecordVersion, D
def fake_dereferencer(uri: D.URI) -> IO[bytes]:
"""Simulates a dereferencer for canonical URIs."""
return io.BytesIO(b'fake content for ' + uri.encode('utf-8'))
class TestRecordListing(TestCase):
"""RecordListing provides keys and serialization for Listings."""
SCHEMA_PATH = os.path.abspath('schema/resources/Listing.json')
def setUp(self):
"""We have a Listing..."""
with open(self.SCHEMA_PATH) as f:
self.schema = json.load(f)
self.resolver = jsonschema.RefResolver(
'file://%s/' % os.path.abspath(os.path.dirname(self.SCHEMA_PATH)),
None
)
self.identifier = D.VersionedIdentifier('2901.00345v1')
self.created = datetime(2029, 1, 29, 20, 4, 23, tzinfo=UTC)
self.listing_id = D.ListingIdentifier.from_parts(self.created.date(),
'foo')
self.version = D.Version(
identifier=self.identifier,
announced_date=self.created.date(),
announced_date_first=self.created.date(),
submitted_date=self.created,
updated_date=self.created,
is_announced=True,
events=[],
previous_versions=[],
metadata=D.Metadata(
primary_classification=D.Category('cs.DL'),
secondary_classification=[D.Category('cs.IR')],
title='Foo title',
abstract='It is abstract',
authors='Ima N. Author (FSU)',
license=D.License(href="http://some.license")
),
source=D.CanonicalFile(
filename='2901.00345v1.tar',
modified=self.created,
size_bytes=4_304,
content_type=D.ContentType.tar,
ref=D.URI('/fake/path.tar'),
is_gzipped=False,
),
render=D.CanonicalFile(
filename='2901.00345v1.pdf',
modified=self.created,
size_bytes=404,
content_type=D.ContentType.pdf,
ref=D.URI('/fake/path.pdf')
)
)
self.event = D.Event(
identifier=self.identifier,
event_date=self.created,
event_type=D.EventType.NEW,
categories=[D.Category('cs.DL')],
version=self.version
)
self.listing = D.Listing(self.listing_id, [self.event])
def test_from_domain(self):
"""Can load a RecordListing from a Listing domain object."""
record = RecordListing.from_domain(self.listing)
self.assertEqual(
record.key,
'arxiv:///announcement/2029/01/29/2029-01-29-foo.json',
'Key for listing file is generated correctly'
)
self.assertEqual(record.key, record.stream.domain.ref)
self.assertEqual(record.stream.content_type, D.ContentType.json,
'Correctly identified as a JSON resource')
def test_schema(self):
"""Serialized record is schema compliant."""
record = RecordListing.from_domain(self.listing)
raw = json.load(record.stream.content)
jsonschema.validate(raw, self.schema, resolver=self.resolver)
def test_to_domain(self):
"""Re-casting to domain should preserve state."""
record = RecordListing.from_domain(self.listing)
self.assertEqual(RecordListing.to_domain(record.stream), self.listing)
| [
"brp53@cornell.edu"
] | brp53@cornell.edu |
251b3c32c213565592f609af6280166f5120fd92 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/Rajnikanth/gapac_1.py | 528a6b43071f83547a1c014b5e3dd254e13977cf | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 482 | py | t = input()
for T in xrange(1,t+1):
print "Case #"+str(T)+":",
s = list(raw_input())+['#']
temp = 0
ans = 0
while(s[0]=='+'):
s.pop(0)
temp+=1
while(s[0]!='#'):
x = 0
while(s[0]=='-'):
s.pop(0)
x+=1
if(temp!=0):
ans+=2
else:
ans+=1
temp+=x
while(s[0]=='+'):
s.pop(0)
temp+=1
print ans
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
b57f0971021b54e3944fb18c60bd6f3b7bf72742 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03778/s021239120.py | 879487fb7a7e0b76dfc5b0b89e7933a2b9266b7d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | import sys
input = sys.stdin.readline
def I(): return int(input())
def MI(): return map(int, input().split())
def LI(): return list(map(int, input().split()))
def main():
mod=10**9+7
W,a,b=MI()
d1=max(0,b-(a+W))
d2=max(0,a-(b+W))
print(max(d1,d2))
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f9a48e523e0af79139a143e1dcde6296e0a697b5 | 7283ffa8da2f64a419218af0329b43fb055c4694 | /src/classify_softmax.py | e090acdc564e8375bec795f0a94ae6723b9e50e0 | [
"MIT"
] | permissive | modanesh/facenet | 996b11df5617a849ac9dab81f782a6aa99d6b83f | 0ca5a667b7168caada340dd1c7dbe36cead9fc72 | refs/heads/master | 2021-09-19T02:29:29.399713 | 2018-07-22T08:24:19 | 2018-07-22T08:24:19 | 112,828,989 | 1 | 0 | null | 2017-12-02T09:18:35 | 2017-12-02T09:18:35 | null | UTF-8 | Python | false | false | 4,947 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import Counter
import shutil
from scipy import misc
import tensorflow as tf
import numpy as np
import os
import facenet
import sys
np.set_printoptions(threshold=np.nan)
def main(args):
image_predictions = []
with tf.Graph().as_default():
sess = tf.Session()
facenet.load_model(args.model_dir, sess)
validation_set = facenet.get_dataset(args.data_dir)
image_list, label_list = facenet.get_image_paths_and_labels(validation_set)
# Run forward pass to calculate embeddings
nrof_images = len(image_list)
batch_size = args.batch_size
nrof_batches = (nrof_images // batch_size) + 1
for i in range(nrof_batches):
if i == nrof_batches - 1:
n = nrof_images
else:
n = i * batch_size + batch_size
images = facenet.load_data(image_list[i * batch_size:n], False, False, args.image_size, do_prewhiten=True)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
logits = tf.get_default_graph().get_tensor_by_name("Logits/BiasAdd:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
scores = tf.nn.sigmoid(logits)
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
probs = sess.run(scores, feed_dict=feed_dict)
print('Completed batch', i + 1, 'of', nrof_batches)
name = image_list[i * batch_size:n]
for j in range(len(probs)):
indice = str(np.argmax(probs[j]))
value = str(np.amax(probs[j]))
image_predictions.append((str(name[j]), indice, value))
accuracy = get_label(image_predictions, args.map_file)
print("Classification accuracy: ", accuracy)
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def get_label(preds, map_file):
correct_counter, wrong_counter = 0, 0
info = match_category_name(map_file)
for flnm, indc, acc in preds:
label_indice = map_labels(indc, map_file)
label_indice = sorted(label_indice, key=lambda x: int(x[2]), reverse=True)
if len(label_indice) == 0:
print("The file: " + str(flnm) + " is unknown.")
name = "unknown"
elif len(label_indice) == 1:
print("The file: " + str(flnm) + " is classified as: " + str(label_indice[0][0]))
name = str(label_indice[0][0])
else:
print("The file: " + str(flnm) + " is classified as: " + str(label_indice[0][0]))
name = str(label_indice[0][0])
category = str(flnm).split("\t")[0].split("/")[-2]
if (category, name) in info:
correct_counter += 1
elif category == "unknown" and name == "unknown":
correct_counter += 1
else:
wrong_counter += 1
return correct_counter / (correct_counter + wrong_counter)
def match_category_name(map_file_dir):
map_file = open(map_file_dir)
info = []
for line in map_file:
if line.startswith("m"):
cat = line.split("\t")[0]
name = line.split("\t")[1]
info.append((cat, name))
return info
def map_labels(indc, map_file):
map_file = open(map_file)
labels = []
for line in map_file.read().splitlines():
if line.startswith("m"):
category = line.split("\t")[0]
label = line.split("\t")[1]
indice = line.split("\t")[2]
file_counter = line.split("\t")[3]
if indice == indc:
labels.append((label, indice, file_counter))
return labels
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', type=str,
help='Path to the data directory containing test images.')
parser.add_argument('model_dir', type=str,
help='Path to the data directory containing the meta_file and ckpt_file.')
parser.add_argument('map_file', type=str,
help='Path to the mapping file.')
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Size of input images.', default=160)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:])) | [
"mohamad4danesh@gmail.com"
] | mohamad4danesh@gmail.com |
0610b2da12be032cca602030b81b69f1803b8461 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/sbysiz002/question1.py | 27f32f4b6107608292622328cb7af08af88b754c | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | x=eval(input("Enter a year:\n"))
if x%400==0 or x%4==0 and x%100!=0 :
print(x,"is a leap year.")
else :
print(x,"is not a leap year.") | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
9411e951e27533f6611fd1c881d52275eaa24cd8 | 0c70dcec22a090e70b1f20613ea6e0a64fd9a037 | /GPS卫星位置的计算/venv/Lib/site-packages/pandas/io/sas/sasreader.py | 370a8baa5b3047371689818f36dbdc312d90aa46 | [
"MIT"
] | permissive | payiz-asj/Gis | 82c1096d830878f62c7a0d5dfb6630d4e4744764 | 3d315fed93e2ab850b836ddfd7a67f5618969d10 | refs/heads/main | 2023-06-27T15:25:17.301154 | 2021-08-03T10:02:58 | 2021-08-03T10:02:58 | 392,269,853 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,154 | py | """
Read SAS sas7bdat or xport files.
"""
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Optional, Union, overload
from pandas._typing import FilePathOrBuffer, Label
from pandas.io.common import stringify_path
if TYPE_CHECKING:
from pandas import DataFrame # noqa: F401
# TODO(PY38): replace with Protocol in Python 3.8
class ReaderBase(metaclass=ABCMeta):
"""
Protocol for XportReader and SAS7BDATReader classes.
"""
@abstractmethod
def read(self, nrows=None):
pass
@abstractmethod
def close(self):
pass
@overload
def read_sas(
filepath_or_buffer: FilePathOrBuffer,
format: Optional[str] = ...,
index: Optional[Label] = ...,
encoding: Optional[str] = ...,
chunksize: int = ...,
iterator: bool = ...,
) -> ReaderBase:
...
@overload
def read_sas(
filepath_or_buffer: FilePathOrBuffer,
format: Optional[str] = ...,
index: Optional[Label] = ...,
encoding: Optional[str] = ...,
chunksize: None = ...,
iterator: bool = ...,
) -> Union["DataFrame", ReaderBase]:
...
def read_sas(
filepath_or_buffer: FilePathOrBuffer,
format: Optional[str] = None,
index: Optional[Label] = None,
encoding: Optional[str] = None,
chunksize: Optional[int] = None,
iterator: bool = False,
) -> Union["DataFrame", ReaderBase]:
"""
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.sas``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
format : str {'xport', 'sas7bdat'} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
Returns
-------
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader
"""
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
"than a string name, you must specify a format string"
)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
raise ValueError(buffer_error_msg)
fname = filepath_or_buffer.lower()
if fname.endswith(".xpt"):
format = "xport"
elif fname.endswith(".sas7bdat"):
format = "sas7bdat"
else:
raise ValueError("unable to infer format of SAS file")
reader: ReaderBase
if format.lower() == "xport":
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(
filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize
)
elif format.lower() == "sas7bdat":
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(
filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize
)
else:
raise ValueError("unknown SAS format")
if iterator or chunksize:
return reader
data = reader.read()
reader.close()
return data
| [
"1778029840@qq.com"
] | 1778029840@qq.com |
6ce2b5a34b712b1e165327443aef8be25161aa5e | 79020987de8d1ea21fc3d83d965ec594640f5e66 | /Wind/Architectures/NBeatsArchitecture.py | 7fd70b478ee991cb8d59fa606033556790730777 | [] | no_license | bejar/Wind | 63f6de218cdcace8a4e207f3f5fc4b5f66502fee | a339737ec23d6f54090396eda7d8507c313a8c79 | refs/heads/master | 2021-11-08T22:47:03.363133 | 2021-10-21T09:37:16 | 2021-10-21T09:37:16 | 93,608,891 | 7 | 2 | null | 2020-10-02T05:58:53 | 2017-06-07T08:02:38 | HTML | UTF-8 | Python | false | false | 4,458 | py | """
.. module:: NBeatsArchitecture
NBeatsArchitecture
******
:Description: NBeatsArchitecture
Implementation of the NBeats architecture:
Authors: Oreshkin, Boris N and Carpov, Dmitri and Chapados, Nicolas and Bengio, Yoshua
Pub: International Conference on Learning Representations. 2019.
Link: https://openreview.net/forum?id=r1ecqn4YwB
:Authors:
bejar
:Version:
:Date: 25/03/2021
"""
__author__ = 'bejar'
from tensorflow.keras.layers import Dense, Flatten, Dropout, Input, Add, Subtract
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l1, l2
from Wind.Architectures.NNS2SArchitecture import NNS2SArchitecture
from Wind.Train.Activations import generate_activation
from Wind.Train.Layers import generate_recurrent_layer
__author__ = 'bejar'
class NBeatsArchitecture(NNS2SArchitecture):
"""
Recurrent architecture for sequence to sequence
"""
modfile = None
modname = 'NBeats'
data_mode = ('2D', '2D')
def main_block(self, input, neurons_input, neurons_forecast, neurons_backcast, activation, dropout):
"""
The main block is composed by an input MLP and two linear transformations for forecasting and backcasting
"""
# Originally four layers
orig = Dense(neurons_input)(input)
input_block = Dense(neurons_input)(input)
input_block = generate_activation(activation)(input_block)
for i in range(3):
input_block = Dense(neurons_input)(input_block)
input_block = generate_activation(activation)(input_block)
input_block = Dropout(rate=dropout)(input_block)
# Forecast output
forecast = Dense(neurons_forecast)(input_block)
forecast = generate_activation(activation)(forecast)
forecast = Dropout(rate=dropout)(forecast)
# Backcast output
backcast = Dense(neurons_backcast)(input_block)
backcast = generate_activation(activation)(backcast)
backcast = Dropout(rate=dropout)(backcast)
# We apply the subtraction to obtain the input for the next block
return Subtract()([input, backcast]), forecast
def group_block(self, n_blocks, input, neurons_input, neurons_forecast, neurons_backcast, activation, dropout):
block, forecast_sum = self.main_block(input, neurons_input, neurons_forecast, neurons_backcast, activation, dropout)
for i in range(n_blocks-1):
block, forecast = self.main_block(block, neurons_input, neurons_forecast, neurons_backcast, activation, dropout)
forecast_sum = Add()([forecast_sum, forecast])
return block, forecast_sum
def generate_model(self):
"""
Model for NBeats architecture
-------------
json config:
"arch": {
"ninput": 64,
"nforecast": 64,
"nbackcast": 65,
"niblocks": 3,
"neblocks": 1,
"dropout": 0.3,
"activation": ["relu"],
"mode":"NBeats"
}
:return:
"""
neurons_input = self.config['arch']['ninput']
neurons_forecast = self.config['arch']['nforecast']
neurons_backcast= self.config['arch']['nbackcast']
neurons_full= self.config['arch']['nfull']
dropout = self.config['arch']['dropout']
niblocks = self.config['arch']['niblocks'] # number of internal blocks
neblocks = self.config['arch']['neblocks'] # number of external blocks
activation = self.config['arch']['activation']
# Extra added from training function
idimensions = self.config['idimensions']
odimensions = self.config['odimensions']
impl = self.runconfig.impl
input = Input(shape=(idimensions))
eblock, forecast_sum = self.group_block(niblocks, input, neurons_input, neurons_forecast, neurons_backcast, activation, dropout)
for i in range(neblocks-1):
eblock, forecast = self.group_block(niblocks, eblock, neurons_input, neurons_forecast, neurons_backcast, activation, dropout)
forecast_sum = Add()([forecast_sum, forecast])
eforecast = Dense(neurons_full)(forecast_sum)
eforecast = generate_activation(activation)(eforecast)
output = Dense(odimensions, activation='linear')(eforecast)
self.model = Model(inputs=input, outputs=output)
| [
"bejar@cs.upc.edu"
] | bejar@cs.upc.edu |
d219f89daf35876f7eb0e0f619ed2f5ab3f262d2 | f8d3d9bd1c97e9c14ae51ce35b0a49e902dc3a72 | /pacho/day02/day06/04分词.py | 010bd1d82074537a32cd103285b0ea5aa22c118e | [] | no_license | dingmf/mygit | 226cf38bae4a9fecd051733c375fed15f65d5fd2 | bbf36d31d2edbfae663fe45762457f3b98e05161 | refs/heads/master | 2022-11-30T17:18:09.197751 | 2018-09-12T07:40:55 | 2018-09-12T07:40:55 | 139,680,867 | 0 | 1 | null | 2022-11-28T16:17:01 | 2018-07-04T06:52:19 | Python | UTF-8 | Python | false | false | 235 | py | import jieba
str1 = '你可真skr弟弟,笑skr人!!!'
# 按搜索引擎搜索,带符号
mycut = jieba.cut_for_search(str1)
cutStr = jieba.cut(str1, cut_all=True)
print(mycut)
print('/'.join(mycut))
print('*'.join(cutStr)) | [
"1175674559@qq.com"
] | 1175674559@qq.com |
8e5f44a147c6f19cc4e28b11d1783cef2c418de1 | 5f4f3ab6ece4eda1066bda2f80b9cf89a898f409 | /0x06-python-classes/5-square.py | 03fb26572cf5137899c1684988e89074c8c5cc6f | [] | no_license | s0m35h1t/holbertonschool-higher_level_programming | 8af7f3bc54159efa05859f81ca3b9fb1739190e8 | f3b7ddeabf41b5cbc8460841c429b4b3bf254fea | refs/heads/master | 2020-09-28T20:32:23.955579 | 2020-05-14T20:22:50 | 2020-05-14T20:22:50 | 226,859,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | #!/usr/bin/python3
"""Defines: class Square"""
class Square:
"""Represents a square
Attributes:
__size (int): size of a side of the square
"""
def __init__(self, size=0):
"""Initializes a square
Args:
size (int): size of a side of the square
Returns: None
"""
self.__size = size
def area(self):
"""calculates the square's area
Returns:
The area of the square
"""
return self.__size * self.__size
@property
def size(self):
"""get squar size
Returns:
Private instance attribute __size
"""
return self.__size
@size.setter
def size(self, value):
"""set of __size
Args:
value (int): the size of a size of the square
Returns:
None
"""
if type(value) is not int:
raise TypeError("size must be an integer")
else:
if value < 0:
raise ValueError("size must be >= 0")
else:
self.__size = value
def my_print(self):
"""that prints in stdout the
square with the character #
Returns:
None
"""
if self.__size == 0:
print()
else:
for i in range(self.__size):
print("#" * self.__size, end="")
print()
| [
"adib.grouz@gmail.com"
] | adib.grouz@gmail.com |
9bf1dd53a1f021085b6ad67f4a8ee62d5ebb370c | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/storagecache/v20201001/__init__.py | 81cba06478ab89a63ffea2d18cb8fba1406ba9e4 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .cache import *
from .get_cache import *
from .get_storage_target import *
from .storage_target import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:storagecache/v20201001:Cache":
return Cache(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:storagecache/v20201001:StorageTarget":
return StorageTarget(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "storagecache/v20201001", _module_instance)
_register_module()
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
6ff24a8b7e3a25b2617ad1080b4a92a7dfdcf9d1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_161/ch161_2020_06_15_19_33_29_835504.py | 15945963281606445038fcdc114e6e253b984983 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py |
def PiWallis(elementos):
numerador = 2
denominador = 1
contador = 0
multiplicacao = 1
while(contador < elementos):
multiplicacao *= numerador / denominador
#print("{0}/{1}".format(numerador, denominador))
if contador%2 == 0:
denominador += 2
else:
numerador += 2
contador += 1
return multiplicacao*2 | [
"you@example.com"
] | you@example.com |
f00d79c165da9f5f415dcbcd61dc41d4a99a4bbe | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/3 kyu/Binomial Expansion 540d0fdd3b6532e5c3000b5b.py | d7a0668726b90a8fd963e987d8998d2735c4f972 | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # https://www.codewars.com/kata/540d0fdd3b6532e5c3000b5b
from functools import lru_cache
import re
@lru_cache(maxsize=None)
def fact(n):
return 1 if n < 1 else n * fact(n - 1)
def c(n, k):
return fact(n) // (fact(k) * fact(n - k))
def format(a, x=True):
return ('+' if a > 0 else '-') + ('' if x and abs(a) == 1 else str(abs(a)))
def format_x(x, pow):
return '' if pow == 0 else (x + ('' if pow == 1 else '^' + str(pow)))
def expand(expr):
m = re.match(r'\(([+-]?\d*)([a-z])([+-]?\d+)?\)\^(\d+)', expr)
a, x, b, n = m.groups()
a = (int(a) if a != '-' else -1) if a else 1
b = int(b) if b else 0
n = int(n)
res = None
if n == 0:
res = '1'
elif n == 1:
res = format(a) + x + format(b, x=False)
elif b == 0:
res = format(pow(a, n)) + var
else:
res = ''.join(format(c(n, k) * pow(a, n - k) * pow(b, k), x=n!=k) + format_x(x, n - k) for k in range(n + 1))
return res[res[0] == '+':] | [
"alichek95@mail.ru"
] | alichek95@mail.ru |
90cfae906d2dbdf784cb82584a0604f5729e49dc | 7839d009f3ae0a0c1bc360b86756eba80fce284d | /devel/lib/python2.7/site-packages/gazebo_msgs/msg/_ODEPhysics.py | d26775045ca569df97509ded579dfc284ef87fac | [] | no_license | abhat91/ros_osx | b5022daea0b6fdaae3489a97fdb1793b669e64f5 | 39cd8a79788d437927a24fab05a0e8ac64b3fb33 | refs/heads/master | 2021-01-10T14:43:41.047439 | 2016-03-13T23:18:59 | 2016-03-13T23:18:59 | 53,812,264 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,359 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/ODEPhysics.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ODEPhysics(genpy.Message):
_md5sum = "667d56ddbd547918c32d1934503dc335"
_type = "gazebo_msgs/ODEPhysics"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool auto_disable_bodies # enable auto disabling of bodies, default false
uint32 sor_pgs_precon_iters # preconditioning inner iterations when uisng projected Gauss Seidel
uint32 sor_pgs_iters # inner iterations when uisng projected Gauss Seidel
float64 sor_pgs_w # relaxation parameter when using projected Gauss Seidel, 1 = no relaxation
float64 sor_pgs_rms_error_tol # rms error tolerance before stopping inner iterations
float64 contact_surface_layer # contact "dead-band" width
float64 contact_max_correcting_vel # contact maximum correction velocity
float64 cfm # global constraint force mixing
float64 erp # global error reduction parameter
uint32 max_contacts # maximum contact joints between two geoms
"""
__slots__ = ['auto_disable_bodies','sor_pgs_precon_iters','sor_pgs_iters','sor_pgs_w','sor_pgs_rms_error_tol','contact_surface_layer','contact_max_correcting_vel','cfm','erp','max_contacts']
_slot_types = ['bool','uint32','uint32','float64','float64','float64','float64','float64','float64','uint32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
auto_disable_bodies,sor_pgs_precon_iters,sor_pgs_iters,sor_pgs_w,sor_pgs_rms_error_tol,contact_surface_layer,contact_max_correcting_vel,cfm,erp,max_contacts
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ODEPhysics, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.auto_disable_bodies is None:
self.auto_disable_bodies = False
if self.sor_pgs_precon_iters is None:
self.sor_pgs_precon_iters = 0
if self.sor_pgs_iters is None:
self.sor_pgs_iters = 0
if self.sor_pgs_w is None:
self.sor_pgs_w = 0.
if self.sor_pgs_rms_error_tol is None:
self.sor_pgs_rms_error_tol = 0.
if self.contact_surface_layer is None:
self.contact_surface_layer = 0.
if self.contact_max_correcting_vel is None:
self.contact_max_correcting_vel = 0.
if self.cfm is None:
self.cfm = 0.
if self.erp is None:
self.erp = 0.
if self.max_contacts is None:
self.max_contacts = 0
else:
self.auto_disable_bodies = False
self.sor_pgs_precon_iters = 0
self.sor_pgs_iters = 0
self.sor_pgs_w = 0.
self.sor_pgs_rms_error_tol = 0.
self.contact_surface_layer = 0.
self.contact_max_correcting_vel = 0.
self.cfm = 0.
self.erp = 0.
self.max_contacts = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_B2I6dI.pack(_x.auto_disable_bodies, _x.sor_pgs_precon_iters, _x.sor_pgs_iters, _x.sor_pgs_w, _x.sor_pgs_rms_error_tol, _x.contact_surface_layer, _x.contact_max_correcting_vel, _x.cfm, _x.erp, _x.max_contacts))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 61
(_x.auto_disable_bodies, _x.sor_pgs_precon_iters, _x.sor_pgs_iters, _x.sor_pgs_w, _x.sor_pgs_rms_error_tol, _x.contact_surface_layer, _x.contact_max_correcting_vel, _x.cfm, _x.erp, _x.max_contacts,) = _struct_B2I6dI.unpack(str[start:end])
self.auto_disable_bodies = bool(self.auto_disable_bodies)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_B2I6dI.pack(_x.auto_disable_bodies, _x.sor_pgs_precon_iters, _x.sor_pgs_iters, _x.sor_pgs_w, _x.sor_pgs_rms_error_tol, _x.contact_surface_layer, _x.contact_max_correcting_vel, _x.cfm, _x.erp, _x.max_contacts))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 61
(_x.auto_disable_bodies, _x.sor_pgs_precon_iters, _x.sor_pgs_iters, _x.sor_pgs_w, _x.sor_pgs_rms_error_tol, _x.contact_surface_layer, _x.contact_max_correcting_vel, _x.cfm, _x.erp, _x.max_contacts,) = _struct_B2I6dI.unpack(str[start:end])
self.auto_disable_bodies = bool(self.auto_disable_bodies)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B2I6dI = struct.Struct("<B2I6dI")
| [
"abhat@wpi.edu"
] | abhat@wpi.edu |
18bfeb5cc6ceb600401c040391ad37ceb6251c8a | f023f1b025a49968c1050e7726054e2d69eaddd8 | /practice/practice_4/classde5.py | c65accd7059aa708beecf8de2936e5629602e814 | [] | no_license | soulorman/Python | 225444cab3ce65511ab91db753f07ce28e5bd239 | 4febccac57bfa5f7ef46f5f57e52206c8b0a57ac | refs/heads/master | 2023-05-11T01:50:08.012001 | 2020-06-22T07:14:24 | 2020-06-22T07:14:24 | 149,143,223 | 0 | 0 | null | 2023-04-29T10:08:28 | 2018-09-17T15:03:08 | Python | UTF-8 | Python | false | false | 285 | py | class Singleton(type):
def __call__(cls, *args,**kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super().__call__(*args,**kwargs)
return cls._instance
class Foo(metaclass = Singleton):
pass
f1 = Foo()
f2 = Foo()
print(f1)
print(f2)
| [
"2379797392@qq.com"
] | 2379797392@qq.com |
8f10a168a255600db610bb7ffff9dddf7935dc82 | 5ffdef59c244f719c43ee24d23de7201bf42eab5 | /Model2-EASE/src/nltk/nltk/corpus/reader/rte.py | 1a0972e56044ae9be7ef08cee4bc1a56a684f7fa | [
"AGPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-ND-3.0"
] | permissive | midas-research/calling-out-bluff | 8db408efe1c211a8685bfc1b2553117770689639 | 4de3c56b64edeeef9968288679c4e5b261e9949c | refs/heads/models_test | 2022-12-13T02:36:24.054646 | 2020-08-19T07:05:55 | 2020-08-19T07:05:55 | 280,080,456 | 10 | 9 | MIT | 2020-08-09T18:57:22 | 2020-07-16T07:07:19 | null | UTF-8 | Python | false | false | 4,586 | py | # Natural Language Toolkit: RTE Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora.
The files were taken from the RTE1, RTE2 and RTE3 datasets and the files
were regularized.
Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the
gold standard annotated files.
Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following
example is taken from RTE3::
<pair id="1" entailment="YES" task="IE" length="short" >
<t>The sale was made to pay Yukos' US$ 27.5 billion tax bill,
Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known
company Baikalfinansgroup which was later bought by the Russian
state-owned oil company Rosneft .</t>
<h>Baikalfinansgroup was sold to Rosneft.</h>
</pair>
In order to provide globally unique IDs for each pair, a new attribute
``challenge`` has been added to the root element ``entailment-corpus`` of each
file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the
challenge number and 'n' is the pair ID.
"""
from util import *
from api import *
from xmldocs import *
def norm(value_string):
"""
Normalize the string value in an RTE pair's ``value`` or ``entailment``
attribute as an integer (1, 0).
:param value_string: the label used to classify a text/hypothesis pair
:type value_string: str
:rtype: int
"""
valdict = {"TRUE": 1,
"FALSE": 0,
"YES": 1,
"NO": 0}
return valdict[value_string.upper()]
class RTEPair:
"""
Container for RTE text-hypothesis pairs.
The entailment relation is signalled by the ``value`` attribute in RTE1, and by
``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment``
attribute of this class.
"""
def __init__(self, pair, challenge=None, id=None, text=None, hyp=None,
value=None, task=None, length=None):
"""
:param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3)
:param id: identifier for the pair
:param text: the text component of the pair
:param hyp: the hypothesis component of the pair
:param value: classification label for the pair
:param task: attribute for the particular NLP task that the data was drawn from
:param length: attribute for the length of the text of the pair
"""
self.challenge = challenge
self.id = pair.attrib["id"]
self.gid = "%s-%s" % (self.challenge, self.id)
self.text = pair[0].text
self.hyp = pair[1].text
if "value" in pair.attrib:
self.value = norm(pair.attrib["value"])
elif "entailment" in pair.attrib:
self.value = norm(pair.attrib["entailment"])
else:
self.value = value
if "task" in pair.attrib:
self.task = pair.attrib["task"]
else:
self.task = task
if "length" in pair.attrib:
self.length = pair.attrib["length"]
else:
self.length = length
def __repr__(self):
if self.challenge:
return '<RTEPair: gid=%s-%s>' % (self.challenge, self.id)
else:
return '<RTEPair: id=%s>' % self.id
class RTECorpusReader(XMLCorpusReader):
"""
Corpus reader for corpora in RTE challenges.
This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected
structure of input documents.
"""
def _read_etree(self, doc):
"""
Map the XML input into an RTEPair.
This uses the ``getiterator()`` method from the ElementTree package to
find all the ``<pair>`` elements.
:param doc: a parsed XML document
:rtype: list(RTEPair)
"""
try:
challenge = doc.attrib['challenge']
except KeyError:
challenge = None
return [RTEPair(pair, challenge=challenge)
for pair in doc.getiterator("pair")]
def pairs(self, fileids):
"""
Build a list of RTEPairs from a RTE corpus.
:param fileids: a list of RTE corpus fileids
:type: list
:rtype: list(RTEPair)
"""
if isinstance(fileids, basestring): fileids = [fileids]
return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
| [
"meharmailbox@gmail.com"
] | meharmailbox@gmail.com |
fb0e647d59ffab8ab02a0836a0b6d540142cf42d | 4e3c976773526fd610d64ffb83589bccfaee5e68 | /sponge-integration-tests/examples/core/processors_scan_java_packages.py | cbce872a1ff83358bc79c246a6ea1149cfd127c7 | [
"Apache-2.0"
] | permissive | softelnet/sponge | 2313d2328953fcff49a002e727bb803757870627 | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | refs/heads/master | 2022-10-28T16:19:55.619882 | 2021-09-16T19:50:08 | 2021-09-16T19:50:08 | 95,256,030 | 10 | 2 | Apache-2.0 | 2022-10-04T23:55:09 | 2017-06-23T20:58:49 | Java | UTF-8 | Python | false | false | 167 | py | """
Sponge Knowledge Base
Scanning Java-based processors
"""
def onLoad():
sponge.enableJavaByScan("org.openksavi.sponge.integration.tests.core.scanning")
| [
"marcin.pas@softelnet.com"
] | marcin.pas@softelnet.com |
7d25591fd9cf0ec0e907b3d119660f7d28b4951f | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayUserFamilyShareRelationsSyncModel.py | e389c563bdbcf4ee6dd5b45b0e12ee326dd2ea53 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 6,168 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserFamilyShareRelationsSyncModel(object):
def __init__(self):
self._expired_date = None
self._resource_content = None
self._resource_id = None
self._scene_id = None
self._sharing_user_ids = None
self._sharing_user_type = None
self._target_status = None
self._update_date = None
self._user_id = None
self._version_no = None
@property
def expired_date(self):
return self._expired_date
@expired_date.setter
def expired_date(self, value):
self._expired_date = value
@property
def resource_content(self):
return self._resource_content
@resource_content.setter
def resource_content(self, value):
self._resource_content = value
@property
def resource_id(self):
return self._resource_id
@resource_id.setter
def resource_id(self, value):
self._resource_id = value
@property
def scene_id(self):
return self._scene_id
@scene_id.setter
def scene_id(self, value):
self._scene_id = value
@property
def sharing_user_ids(self):
return self._sharing_user_ids
@sharing_user_ids.setter
def sharing_user_ids(self, value):
if isinstance(value, list):
self._sharing_user_ids = list()
for i in value:
self._sharing_user_ids.append(i)
@property
def sharing_user_type(self):
return self._sharing_user_type
@sharing_user_type.setter
def sharing_user_type(self, value):
self._sharing_user_type = value
@property
def target_status(self):
return self._target_status
@target_status.setter
def target_status(self, value):
self._target_status = value
@property
def update_date(self):
return self._update_date
@update_date.setter
def update_date(self, value):
self._update_date = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def version_no(self):
return self._version_no
@version_no.setter
def version_no(self, value):
self._version_no = value
def to_alipay_dict(self):
params = dict()
if self.expired_date:
if hasattr(self.expired_date, 'to_alipay_dict'):
params['expired_date'] = self.expired_date.to_alipay_dict()
else:
params['expired_date'] = self.expired_date
if self.resource_content:
if hasattr(self.resource_content, 'to_alipay_dict'):
params['resource_content'] = self.resource_content.to_alipay_dict()
else:
params['resource_content'] = self.resource_content
if self.resource_id:
if hasattr(self.resource_id, 'to_alipay_dict'):
params['resource_id'] = self.resource_id.to_alipay_dict()
else:
params['resource_id'] = self.resource_id
if self.scene_id:
if hasattr(self.scene_id, 'to_alipay_dict'):
params['scene_id'] = self.scene_id.to_alipay_dict()
else:
params['scene_id'] = self.scene_id
if self.sharing_user_ids:
if isinstance(self.sharing_user_ids, list):
for i in range(0, len(self.sharing_user_ids)):
element = self.sharing_user_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.sharing_user_ids[i] = element.to_alipay_dict()
if hasattr(self.sharing_user_ids, 'to_alipay_dict'):
params['sharing_user_ids'] = self.sharing_user_ids.to_alipay_dict()
else:
params['sharing_user_ids'] = self.sharing_user_ids
if self.sharing_user_type:
if hasattr(self.sharing_user_type, 'to_alipay_dict'):
params['sharing_user_type'] = self.sharing_user_type.to_alipay_dict()
else:
params['sharing_user_type'] = self.sharing_user_type
if self.target_status:
if hasattr(self.target_status, 'to_alipay_dict'):
params['target_status'] = self.target_status.to_alipay_dict()
else:
params['target_status'] = self.target_status
if self.update_date:
if hasattr(self.update_date, 'to_alipay_dict'):
params['update_date'] = self.update_date.to_alipay_dict()
else:
params['update_date'] = self.update_date
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.version_no:
if hasattr(self.version_no, 'to_alipay_dict'):
params['version_no'] = self.version_no.to_alipay_dict()
else:
params['version_no'] = self.version_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserFamilyShareRelationsSyncModel()
if 'expired_date' in d:
o.expired_date = d['expired_date']
if 'resource_content' in d:
o.resource_content = d['resource_content']
if 'resource_id' in d:
o.resource_id = d['resource_id']
if 'scene_id' in d:
o.scene_id = d['scene_id']
if 'sharing_user_ids' in d:
o.sharing_user_ids = d['sharing_user_ids']
if 'sharing_user_type' in d:
o.sharing_user_type = d['sharing_user_type']
if 'target_status' in d:
o.target_status = d['target_status']
if 'update_date' in d:
o.update_date = d['update_date']
if 'user_id' in d:
o.user_id = d['user_id']
if 'version_no' in d:
o.version_no = d['version_no']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
e6b3b17394ae5c56893b76733cf80dc8bfcb603b | 93205a7cc036732b95b4002bfc766c02fc2a26e4 | /script/control_team_gold | 2b938f8ddf2faf3bd80c30719501cf4b032c2aaf | [
"Apache-2.0"
] | permissive | FrankGaLu/uctf | 2cd4e65d481e0f494ea0671ecd5c73277ddc513c | f7d597b9532995b1509cc29aed4ac7115c2b0cb8 | refs/heads/master | 2023-03-22T12:03:19.303123 | 2017-07-03T20:57:06 | 2017-07-03T20:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | #!/usr/bin/env python
import sys
from uctf.control import control_team
if __name__ == '__main__':
sys.exit(control_team('gold'))
| [
"dthomas@osrfoundation.org"
] | dthomas@osrfoundation.org | |
033973ddc81a5fdf0e40009c4f321215fe3f4217 | 0ae2bb21d7ca71a691e33cb044a0964d380adda2 | /python/P523.py | 8ee3b72bfe030c43e82624dca3aa76adc9574477 | [] | no_license | xwang322/Coding-Interview | 5d27ec92d6fcbb7b929dd98bb07c968c1e1b2a04 | ee5beb79038675ce73c6d147ba9249d9a5ca346a | refs/heads/master | 2020-03-10T08:18:34.980557 | 2018-06-24T03:37:12 | 2018-06-24T03:37:12 | 129,282,263 | 2 | 6 | null | 2018-04-19T19:31:24 | 2018-04-12T16:41:28 | Python | UTF-8 | Python | false | false | 579 | py | class Solution(object):
def checkSubarraySum(self, nums, k):
if not nums or len(nums) == 1:
return False
sum_array = [0]*(len(nums)+1)
for i, num in enumerate(nums):
sum_array[i+1] = sum_array[i]+num
if k == 0:
if sum_array[-1] == 0:
return True
else:
return False
for i in range(1, len(sum_array)):
for j in range(i-1):
if not (sum_array[i]-sum_array[j])%k:
return True
return False | [
"noreply@github.com"
] | xwang322.noreply@github.com |
7594cc59c0f7b3e0b0660a1a5bf904bcb186dc40 | 25dcac1b6b7fd82d689ef78946ae92768cf1b419 | /aries/products/views_product_list.py | dbb763356b8b7569b1fcd752f12d9159a5ef0c1b | [] | no_license | youngwanim/project_aries_primitive | c239809363802235d4c77dfeece0bb174938dc80 | bd3c155be4eb39a4b67b18ec7b0458e550c7baf3 | refs/heads/master | 2023-05-14T01:07:05.907692 | 2019-06-13T07:33:46 | 2019-06-13T07:33:46 | 262,193,936 | 0 | 0 | null | 2021-06-10T22:52:45 | 2020-05-08T01:07:20 | Python | UTF-8 | Python | false | false | 3,343 | py | import logging
from rest_framework.response import Response
from rest_framework.views import APIView
from aries.common import code
from aries.common.exceptions.exceptions import DataValidationError, BusinessLogicError
from aries.common.http_utils.header_parser import parse_language_v2
from aries.common.models import ResultResponse
from aries.common.product_util import get_date_information_v3
from aries.products.common.product_func import add_discount_information
from aries.products.manager.menu_manager_v2 import MenuManagerV2
from aries.products.manager.product_manager_v3 import ProductManagerV3
from aries.products.manager.time_bomb_manager import TimeBombManager
logger_info = logging.getLogger('products_info')
logger_error = logging.getLogger('products_error')
class ProductListV2(APIView):
"""
Product list v2 class
"""
logger_info = logging.getLogger('products_info')
logger_error = logging.getLogger('products_error')
def get(self, request, hub_id):
lang_info = parse_language_v2(request.META)
date_info = get_date_information_v3(hub_id)
target_db = lang_info.target_db
cn_header = lang_info.cn_header
os_type = lang_info.os_type
time_type = date_info.time_type
try:
# Get product list from specific hub
product_manager = ProductManagerV3(self.logger_info, self.logger_error)
product_list = product_manager.get_product_list(hub_id, date_info.current_date)
menu_manager = MenuManagerV2(self.logger_info, self.logger_error)
for product in product_list:
menu_manager.get_menu_data_for_list(product, target_db, cn_header, product['sales_time'])
# Check the current available time bomb
time_bomb_manager = TimeBombManager(self.logger_info, self.logger_error)
time_bomb_id = time_bomb_manager.get_time_bomb_now(hub_id, os_type, has_after=False)
if time_bomb_id is not None:
# Discount information add
discount_map = product_manager.get_all_discount_info(time_bomb_id, cn_header)
for product in product_list:
if product['id'] in discount_map:
discount_info = discount_map[product['id']]
add_discount_information(product, discount_info)
except DataValidationError as instance:
message, err_code = instance.args
result = ResultResponse(code.ARIES_400_BAD_REQUEST, message, err_code)
except BusinessLogicError as instance:
message, err_code, data_set = instance.args
result = ResultResponse(code.ARIES_500_INTERNAL_SERVER_ERROR, message, err_code)
result.set_map(data_set)
except Exception as e:
result = ResultResponse(code.ARIES_500_INTERNAL_SERVER_ERROR, str(e))
else:
result = ResultResponse(code.ARIES_200_SUCCESS, 'success')
result.set('hub_id', hub_id)
result.set('current_time_type', time_type)
result.set('phase_next_day', date_info.phase_next_day)
result.set('phase_date', date_info.current_date.isoformat())
result.set('products', product_list)
return Response(result.get_response(), status=result.get_code())
| [
"ywlim98@gmail.com"
] | ywlim98@gmail.com |
2e5ebde578be01caaa95fcb9a0db42b6ea91e245 | 921b3a67a24df947f085e93ba58833ec20f6b89e | /producer-tutorial/Lib/site-packages/faker/providers/phone_number/ar_AE/__init__.py | 16c9a8ff934d1e3342027fad2d1ef959a5cd28a1 | [] | no_license | jaslanm/python | e3bacd7ad0020b7e11adcb1b17dd6da3e4b2f65c | 5cfa3913b89acb0b8cf79247de1b2820a8b92f3a | refs/heads/main | 2023-08-30T01:53:13.752918 | 2021-10-23T13:24:48 | 2021-10-23T13:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,472 | py | from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
# Source: https://en.wikipedia.org/wiki/Telephone_numbers_in_the_United_Arab_Emirates
cellphone_formats = (
'{{area_code}} {{cellphone_provider_code}} ### ####',
'{{area_code}}{{cellphone_provider_code}}#######',
'0{{cellphone_provider_code}} ### ####',
'0{{cellphone_provider_code}}#######',
)
telephone_formats = (
'{{area_code}} {{telephone_provider_code}} ### ####',
'{{area_code}}{{telephone_provider_code}}#######',
'0{{telephone_provider_code}} ### ####',
'0{{telephone_provider_code}}#######',
)
toll_formats = (
'200####',
'600######',
'800###',
'800####',
'800#####',
'800######',
'800#######',
)
services_phones_formats = (
'999',
'901',
'998',
'997',
'996',
'991',
'922',
)
formats = cellphone_formats + \
telephone_formats + \
services_phones_formats + \
toll_formats
def cellphone_provider_code(self) -> str:
return self.random_element([
'50',
'52',
'54',
'55',
'56',
'58',
])
def telephone_provider_code(self) -> str:
return self.random_element([
'1',
'2',
'3',
'4',
'6',
'7',
'9',
])
def area_code(self) -> str:
return self.random_element([
'00971',
'+971',
])
def cellphone_number(self) -> str:
pattern: str = self.random_element(self.cellphone_formats)
return self.numerify(self.generator.parse(pattern))
def telephone_number(self) -> str:
pattern: str = self.random_element(self.telephone_formats)
return self.numerify(self.generator.parse(pattern))
def service_phone_number(self) -> str:
pattern: str = self.random_element(self.services_phones_formats)
return self.numerify(self.generator.parse(pattern))
def toll_number(self) -> str:
pattern: str = self.random_element(self.toll_formats)
return self.numerify(self.generator.parse(pattern))
def phone_number(self) -> str:
pattern: str = self.random_element(self.formats)
return self.numerify(self.generator.parse(pattern))
| [
"jaslanm@gmail.com"
] | jaslanm@gmail.com |
bb0b83b7e5cb59927bf67837eed003b2cb14409b | 4e78d3a0a05d4f0986c3a6204c84605097e10b5e | /setup.py | f3eb8001ffd413ef312c9596950a81848f62147f | [
"Apache-2.0"
] | permissive | Nickycc/AndroidViewClient | eb62c8e3f7092b29cb077974e8c9e2f2a1bbce2a | 20b7dd0f6d058b179cfd32ef6ed900805f746542 | refs/heads/master | 2020-04-07T10:45:11.303302 | 2015-02-06T18:28:35 | 2015-02-06T18:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='androidviewclient',
version='10.0.0',
description='''AndroidViewClient is a 100% pure python library and tools
that simplifies test script creation providing higher level
operations and the ability of obtaining the tree of Views present at
any given moment on the device or emulator screen.
''',
license='Apache',
keywords='android uiautomator viewclient monkeyrunner test automation',
author='Diego Torres Milano',
author_email='dtmilano@gmail.com',
url='https://github.com/dtmilano/AndroidViewClient/',
packages=find_packages('src'),
package_dir={'':'src'},
package_data={'':['*.png']},
include_package_data=True,
scripts=['tools/culebra', 'tools/dump'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License'],
install_requires=['setuptools'],
)
| [
"dtmilano@gmail.com"
] | dtmilano@gmail.com |
2c802f4ab678b0d656ee10fdd2976f9ad0d0d79c | 81cfd404276f73b7b622cf10382eaf44a0e2e15f | /site/cgi-bin/index.py | 165f39c5af34021f1d3713c134068914218fa07b | [
"MIT"
] | permissive | jsmith1024/toonsite | 55e744a63176bccbbae512e4ddc40ce02f7a00b6 | 909f57ca28c80939d32a1799e9bdd65d37ae96a8 | refs/heads/main | 2023-06-11T19:26:15.876247 | 2021-07-09T17:23:05 | 2021-07-09T17:23:05 | 379,032,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | #!/usr/bin/env python3
import cgi
import cgitb
cgitb.enable()
from PageManager import PageManager
Page = PageManager()
Page.printStart()
Page.printTitle()
Page.printLine()
Page.printButtons()
Page.printLine()
Page.printContent()
Page.printFinish()
| [
"you@example.com"
] | you@example.com |
01b3bdc93b5c3c35aac100c12641a9a360149ba0 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2X2uZysLJ3CpsxLDD_6.py | 71dcb4db3c2ca6ea3efa61b032d1f239180d3f8e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | """
Create a function that takes an angle in radians and returns the corresponding
angle in degrees rounded to one decimal place.
### Examples
radians_to_degrees(1) ➞ 57.3
radians_to_degrees(20) ➞ 1145.9
radians_to_degrees(50) ➞ 2864.8
### Notes
The number `π` can be loaded from the math module with `from math import pi`.
"""
import math
def radians_to_degrees(rad):
return round(rad * (180/math.pi),1)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9b008072db9fffb97edae7824a538495c55b0bab | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/__future__.py | f226254bc476d2fcf9abbfe601942660f2d823b6 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,838 | py | # 2016.08.04 19:58:04 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/__future__.py
"""Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = ['nested_scopes',
'generators',
'division',
'absolute_import',
'with_statement',
'print_function',
'unicode_literals']
__all__ = ['all_feature_names'] + all_feature_names
CO_NESTED = 16
CO_GENERATOR_ALLOWED = 0
CO_FUTURE_DIVISION = 8192
CO_FUTURE_ABSOLUTE_IMPORT = 16384
CO_FUTURE_WITH_STATEMENT = 32768
CO_FUTURE_PRINT_FUNCTION = 65536
CO_FUTURE_UNICODE_LITERALS = 131072
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return '_Feature' + repr((self.optional, self.mandatory, self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, 'beta', 1), (2, 2, 0, 'alpha', 0), CO_NESTED)
generators = _Feature((2, 2, 0, 'alpha', 1), (2, 3, 0, 'final', 0), CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, 'alpha', 1), (3, 0, 0, 'alpha', 0), CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, 'alpha', 1), (2, 6, 0, 'alpha', 0), CO_FUTURE_WITH_STATEMENT)
print_function = _Feature((2, 6, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), CO_FUTURE_PRINT_FUNCTION)
unicode_literals = _Feature((2, 6, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), CO_FUTURE_UNICODE_LITERALS)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\__future__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:58:04 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
1717f1b245587d6fa78d65d1cc9655ce7ba3846d | 0dcf78e319956f2cb2327c5cb47bd6d65e59a51b | /Python3/Array/JumpGame/DPBottomUp055.py | 6810003594fc54ebbb2716a1b36da74aafbb2846 | [] | no_license | daviddwlee84/LeetCode | 70edd09a64a6f61492aa06d927e1ec3ab6a8fbc6 | da1774fd07b7326e66d9478b3d2619e0499ac2b7 | refs/heads/master | 2023-05-11T03:16:32.568625 | 2023-05-08T05:11:57 | 2023-05-09T05:11:57 | 134,676,851 | 14 | 4 | null | 2018-05-29T14:50:22 | 2018-05-24T07:18:31 | Python | UTF-8 | Python | false | false | 1,132 | py | from typing import List
from enum import Enum
class Index(Enum):
GOOD = 0
BAD = 1
UNKNOWN = 2
class Solution:
def canJump(self, nums: List[int]) -> bool:
"""
Top-down to bottom-up conversion is done by eliminating recursion
benefit
* no method stack overhead
* might benefit from come caching
* open up possibilities for future optimization
eliminating recursion: usually by trying to reverse the order of the steps
Time Complexity: O(n^2)
Space Complextiy: O(n)
"""
memo = [Index.UNKNOWN] * len(nums)
memo[-1] = Index.GOOD
# start from the position right before the end
for i in reversed(range(len(nums) - 1)):
# if any next position in jump range can reach good, then it is good
furthestJump = min(i + nums[i], len(nums) - 1)
for j in range(i + 1, furthestJump + 1):
if memo[j] == Index.GOOD:
memo[i] = Index.GOOD
break
# state of the start is the answer
return memo[0] == Index.GOOD
| [
"daviddwlee84@gmail.com"
] | daviddwlee84@gmail.com |
d0e7f2c538f49d35a54659e8d3811af7d8e4ed27 | 15eea21e7fa3531d45ad0eb8bfab305fcc0fe37a | /程序员代码面试指南-左程云/链表问题/detectCycle_1.py | 3d0c71063ccd93da0fae56b88f7b302a6f63652f | [] | no_license | yearing1017/Algorithm_Note | a3fcfd852903536d02f9360d96d31fc1a7aa6f8f | 8a3f1bda222cb777ff7786170c6d2071ad951ac0 | refs/heads/master | 2023-02-20T19:19:32.641519 | 2023-02-06T15:55:44 | 2023-02-06T15:55:44 | 223,118,925 | 43 | 9 | null | 2023-02-06T15:55:45 | 2019-11-21T07:55:50 | Python | UTF-8 | Python | false | false | 345 | py | # 判断给定的链表中是否有环。如果有环则返回true,否则返回false。
def hasCycle(self , head ):
if not head:
return False
slow, fast = head, head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
if fast == slow:
return True
return False | [
"yearing1017@126.com"
] | yearing1017@126.com |
c9a44024e9392ef9fe0c8b75c0886a1b815546c5 | ab197194e6f4a7dae78b4cf72456aff78e71740f | /makeBoostedMassResStudies2016.py | 230895ba131caabcf0237c72dbdb7b7a46414980 | [] | no_license | JanFSchulte/MuonResolution | b75919e4dabb064476bc71c997a42b916dd039e0 | e39b4055b654f2b2e93b3b0fdac826fa630b042f | refs/heads/master | 2023-07-02T12:02:40.541267 | 2023-06-06T16:14:45 | 2023-06-06T16:14:45 | 164,695,533 | 0 | 2 | null | 2019-09-13T09:47:52 | 2019-01-08T17:01:51 | Python | UTF-8 | Python | false | false | 3,606 | py | import subprocess
tracks = ["Inner","Outer","Global","TPFMS","Picky","DYT","TunePNew"]
#~ tracks = ["TunePNew"]
for track in tracks:
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016Boosteddefault","--weight","True","-f","doubleCB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016Boostedcrystal","--weight","True","-f","CB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016Boostedcruijff","--weight","True","-f","cruijff","-t","%s"%track]
subprocess.call(command)
#~ command = ["python","makeMassRes.py","-i","2016MassBinned","-o","default","-f","doubleCB"]
#~ subprocess.call(command)
#~ command = ["python","makeMassRes.py","-i","2016MassBinned","-o","cruijff"]
#~ subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedRebin2","--weight","True","--rebin","2","-f","doubleCB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedRebin2Cruijff","--weight","True","--rebin","2","-f","cruijff","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedRebin2Crystal","--weight","True","--rebin","2","-f","CB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedRebin4","--weight","True","--rebin","4","-f","doubleCB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedRebin4Cruijff","--weight","True","--rebin","4","-f","cruijff","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedRebin4Crystal","--weight","True","--rebin","4","-f","CB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedWindowSmall","--weight","True","--xMin","80","--xMax","100","-f","doubleCB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedWindowSmallCruijff","--weight","True","--xMin","80","--xMax","100","-f","cruijff","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedWindowSmallCrystal","--weight","True","--xMin","800","--xMax","100","-f","CB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedWindowLarge","--weight","True","--xMin","60","--xMax","120","-f","doubleCB","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedWindowLargeCruijff","--weight","True","--xMin","60","--xMax","120","-f","cruijff","-t","%s"%track]
subprocess.call(command)
command = ["python","makeMassRes_atZ3.py","--iDATA","data_2016.root","--iMC","2016PtBinned","-o","2016BoostedWindowLargeCrystal","--weight","True","--xMin","60","--xMax","120","-f","CB","-t","%s"%track]
subprocess.call(command)
| [
"jschulte@cern.ch"
] | jschulte@cern.ch |
c9d4ed43154d7509f5afc42a0b0bbcf1299abe96 | 72c90301d4753c3d1534473196c6cb0b2f923bc8 | /tests/languages/helpers_test.py | 7019e260fc888bdea88a7d14e2ad147b1aca8539 | [
"MIT"
] | permissive | KevinHock/pre-commit | ec5ab3725fe6678b16abb0978a7414de9babba3f | ab47d08a38c67d6e974295fb58af753b4e8930ad | refs/heads/master | 2021-05-06T23:49:28.970479 | 2017-11-09T02:04:13 | 2017-11-09T02:04:34 | 110,043,358 | 3 | 2 | null | 2017-11-08T23:42:14 | 2017-11-08T23:42:14 | null | UTF-8 | Python | false | false | 302 | py | from __future__ import absolute_import
from __future__ import unicode_literals
from pre_commit.languages import helpers
def test_basic_get_default_version():
assert helpers.basic_get_default_version() == 'default'
def test_basic_healthy():
assert helpers.basic_healthy(None, None) is True
| [
"asottile@umich.edu"
] | asottile@umich.edu |
c5141ba8aba72647f297f5ba22c046a10c2d145e | eb4d8be0efcdfd208d1eebe8ab063bbcbe383155 | /test.py | cf6f97b354d4c3a3e0a133e2b3edada0e9fb6c4a | [] | no_license | r2d2-lex/openni-player | 3148df774b22a0e884bffb0a20f83e3065ccb221 | d50d56fecf4f8be81358593e2ee39ef91d812253 | refs/heads/main | 2023-01-07T03:38:57.316968 | 2020-11-10T21:27:44 | 2020-11-10T21:27:44 | 306,824,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from openni import openni2
OPENNI_FOLDER_PATH = r'./OpenNI-Linux-x64-2.3/Redist'
openni2.initialize(OPENNI_FOLDER_PATH)
# dev = openni2.Device.open_file('cap1.oni'.encode('utf-8'))
dev = openni2.Device('cap2.oni'.encode('utf-8'))
playback = openni2.PlaybackSupport(dev)
depth_stream = dev.create_depth_stream()
depth_stream.start()
print('Prepare...')
playback.seek(depth_stream, 20)
print('Ok')
# # Clear windows
# blank_image = np.zeros((depth_frame.height, depth_frame.width, 3), np.uint8)
# cv2.imshow('Depth', blank_image)
# cv2.imshow("Color", blank_image)
| [
"r2d2-lex@yandex.ru"
] | r2d2-lex@yandex.ru |
e1a45b14465bd9ffc9845cc54a5d14051862b8ea | 03dd1cbd8448a6bab045bdab7fbd94a454ab861c | /menu/migrations/0004_auto_20170927_1122.py | 13c40faf53ece1520ed40ef254faff8eba312d4f | [] | no_license | UnruledLab/ICO-Full-Stack | feb03d7eba6c0d29febdaf47a9947837d8c25f00 | 94dc4c2a7ea176a1901e0c8643795f416f112396 | refs/heads/master | 2022-12-08T11:41:09.577255 | 2017-11-16T19:23:01 | 2017-11-16T19:23:01 | 120,464,345 | 0 | 0 | null | 2022-12-08T00:39:41 | 2018-02-06T13:51:59 | JavaScript | UTF-8 | Python | false | false | 755 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-27 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0003_auto_20170922_1022'),
]
operations = [
migrations.AddField(
model_name='menuitem',
name='name_de',
field=models.CharField(max_length=50, null=True, verbose_name='\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435'),
),
migrations.AddField(
model_name='menuitem',
name='name_es',
field=models.CharField(max_length=50, null=True, verbose_name='\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435'),
),
]
| [
"phonxis@gmail.com"
] | phonxis@gmail.com |
c238aaa2a3c5d23684c563af6b598786d80d5c57 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_326/ch32_2020_04_22_17_35_06_732357.py | 9a442cbe1c7975424562f6996fdc8419632775d3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | def lista_primos(numero_indice):
lista_primeiros_primos = []
i = 0
numero_primo = 0
while numero_primo < numero_indice:
if numero_primo < 3:
numero_primo += 1
lista_primeiros_primos.append(numero_primo)
else:
if numero_primo % 3 != 0 and numero_primo % 5 != 0 and numero_primo % 7 != 0:
numero_primo += 2
lista_primeiros_primos.append(numero_primo)
else:
numero_primo += 2
print(lista_primeiros_primos)
return lista_primeiros_primos | [
"you@example.com"
] | you@example.com |
9379cda15de724ef0cc38515415428fb839fe9cd | 810bbf089f20a4e26658d8186567da9a54db9a26 | /flexx/app/__init__.py | 32dc5bdf772c85196847588fad3015e87e3a0ab2 | [
"BSD-2-Clause"
] | permissive | sanen/flexx | fedffbf18f8ccfb6f2d6d02a06258273a1678d19 | a3cd1d31223107ecdee5cd4d71ae40d831831307 | refs/heads/master | 2021-01-15T17:01:55.338519 | 2016-02-24T13:18:46 | 2016-02-24T13:18:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | """
The app module implements the connection between Python and JavaScript.
It implements a simple server based on Tornado. HTML is served to
provide the client with the JavaScript and CSS, but once connected, all
communication goed via a websocket.
A central component is the ``Model`` class, which allows definition of
objects that have both a Python and JavaScript representation, forming
a basis for model-view-like systems.
Some background info on the server process
------------------------------------------
Each server process hosts on a single URL (domain+port), but can serve
multiple applications (via different paths). Each process uses one
tornado IOLoop (the default one), and exactly one Tornado Application
object.
Applications
------------
A ``Model`` class can be made into an application by decorating it with
``app.serve``. This registers the application, so that clients can connect
to the app based on its name. One instance of this class is instantiated
per connection. Multiple apps can be hosted from the same process simply
be specifying more app classes. To connect to the application
corresponding to the `MyApp` class, one should connect to
"http://domain:port/MyApp".
An app can also be launched (via ``app.launch()``), which will invoke
a client webruntime which is connected to the returned app object. This
is the intended way to launch desktop-like apps. An app can also be
exported to HTML via ``app.export()``.
Further, there is a notion of a default app, intended for interactive use
and use inside the Jupyter notebook; any ``Model`` instance created
without a ``session`` argument will connect to this default app.
Starting the server
-------------------
Use ``start()`` to enter the mainloop for the server. Optionally, the
hostname and port can be specified. Avoid ``sys.exit(app.start())``,
because ``start()`` may return immediately in interactive environments.
For desktop applications you can use ``run()``, which does what
``start()`` does, except the main loop exits when there are no more
connections (i.e. the server stops when the window is closed).
In the notebook
---------------
In the IPython/Jupyter notebook, the user needs to run
``init_notebook()`` which will inject JS and CSS into the browser.
For each widget that gets used as a cell output, a container DOM
element is created, in which the widget is displayed.
"""
_DEV_NOTES = """
Overview of classes:
* Model: the base class for creating Python-JS objects.
* AssetStore: one instance of this class is used to provide all client
assets in this process (JS, CSS, images, etc.).
* SessionAssets: base class for Session that implements the assets part.
Assets specific to the session are name-mangled.
* Session: object that handles connection between Python and JS. Has a
websocket, and optionally a reference to the runtime.
* WebSocket: tornado WS handler.
* AppManager: keeps track of what apps are registered. Has functionality
to instantiate apps and connect the websocket to them.
* Server: handles http requests. Uses manager to create new app
instances or get the page for a pending session. Hosts assets by using
the global asset store.
* FlexxJS (in clientcore.py): more or less the JS side of a session.
"""
from .session import manager, Session # noqa
from .model import Model, get_instance_by_id, get_model_classes # noqa
from .funcs import run, start, stop, call_later # noqa
from .funcs import init_notebook, serve, launch, export # noqa
from .assetstore import assets # noqa
from .clientcore import FlexxJS # noqa
from ..pyscript.stdlib import get_full_std_lib as _get_full_std_lib
assets.create_module_assets('flexx.app', js='%s\n\n%s\nvar flexx = new FlexxJS();\n' %
(_get_full_std_lib(), FlexxJS))
| [
"almar.klein@gmail.com"
] | almar.klein@gmail.com |
d9b4124cd21066e60a8212c47f75404550ce692b | 28da2837b480e999f3af6311addf1ae7b79e2401 | /bst_14/closed_elements_in_sorted_arrays.py | c909f0f2a65402b92b7aba7e2cc15c5fed0d27bc | [] | no_license | mirzasaad/EPI_Solutions | 3f62b502dd48f3525a7e2e4591b0e9241f3b231a | 29732b367787194562a3178feaa6a0d531f4fc92 | refs/heads/master | 2020-07-26T06:37:46.649086 | 2020-05-15T09:54:51 | 2020-05-15T09:54:51 | 208,566,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | from binary_tree_prototype import BinaryTreeNode
import bintrees
# @include
def find_closet_elements_in_sorted_arrays(sorted_arrays):
iters = bintrees.RBTree()
min_distance_so_far = float('inf')
for idx, sorted_array in enumerate(sorted_arrays):
it = iter(sorted_array)
first_min = next(it, None)
if first_min is not None:
iters.insert((first_min, idx), it)
while True:
min_value, min_idx = iters.min_key()
max_value = iters.max_key()[0]
min_distance_so_far = min(max_value - min_value, min_distance_so_far)
it = iters.pop_min()[1]
next_min = next(it, None)
if next_min is None:
return min_distance_so_far
iters.insert((next_min, min_idx), it)
return float('-inf')
sorted_arrays = [[5, 10, 15], [3, 6, 9, 12, 15], [8, 16, 24]]
assert 1 == find_closet_elements_in_sorted_arrays(sorted_arrays) | [
"saaad.mirxa@gmail.com"
] | saaad.mirxa@gmail.com |
726f7745745eaefaa3d27be283d6fcfc35069b2d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/Isam/countingSheep.py | 4d64230a7000e992ee0b1a70a2f705a6763d7f34 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 849 | py | import time
digits = [i for i in range(10)]
def countSheep(N):
myDigits = [9999 for i in range(10)]
i = 1
l = len(N)
s = N
N = int(N)
num = N
if num == 0:
return 'INSOMNIA'
while myDigits != digits:
num = i * N
s = str(num)
for ele in s:
if int(ele) not in myDigits:
myDigits[int(ele)] = int(ele)
check = True
i += 1
if len(s) > l+1:
return 'INSOMNIA'
return num
if __name__ == '__main__':
#print(digits)
#print(countSheep(0))
start_time = time.time()
f = open('A-large.in', 'r')
lineList = f.readlines()
n = int(lineList[0])
fOut = open('A-result-large.txt','w')
for a in range(1,n+1):
s = lineList[a].replace('\n','')
dig = countSheep(s)
fOut.write('Case #'+str(a)+': '+str(dig)+'\n')
print("--- %s seconds ---" % (time.time() - start_time))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
9461b956bf1440b44ecbf939ddeb69a235358c5b | 9eedae344b05a4d7af73c76a10599b4a90db3c4c | /Python_Weakref_Object_ID_Retrieval.py | 7e4a9c7726374e87ebe9e22f7d28f06318e6e77c | [] | no_license | VakinduPhilliam/Python_Weakref_Caching | ab73d94d91fc43316d245e76cb35cf2c5749cac5 | 0df5a6843495fc62638a8e9f55f59e6d6ed874da | refs/heads/master | 2020-05-31T09:46:57.307715 | 2019-06-04T14:53:03 | 2019-06-04T14:53:03 | 190,221,842 | 2 | 0 | null | null | null | null | MacCentralEurope | Python | false | false | 1,322 | py | # Python Weakref
# weakref ó Weak references.
# The weakref module allows the Python programmer to create weak references to objects.
#
# In the following, the term referent means the object which is referred to by a weak reference.
# A weak reference to an object is not enough to keep the object alive: when the only remaining references to a referent are weak references, garbage
# collection is free to destroy the referent and reuse its memory for something else.
# However, until the object is actually destroyed the weak reference may return the object even if there are no strong references to it.
# A primary use for weak references is to implement caches or mappings holding large objects, where itís desired that a large object not be kept alive
# solely because it appears in a cache or mapping.
#
#
# This simple example shows how an application can use object IDs to retrieve objects that it has seen before.
# The IDs of the objects can then be used in other data structures without forcing the objects to remain alive, but the objects can still be retrieved by
# ID if they do.
#
import weakref
_id2obj_dict = weakref.WeakValueDictionary()
def remember(obj):
oid = id(obj)
_id2obj_dict[oid] = obj
return oid
def id2obj(oid):
return _id2obj_dict[oid]
| [
"noreply@github.com"
] | VakinduPhilliam.noreply@github.com |
2a78db7216be49bce75c25ff134cea3662420ff1 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5753053697277952_0/Python/Grzesiu/A-source.py | 55aae270ef635068c044124ccda5ba50e8686be5 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import imp, sys
sys.modules["utils"] = __mod = imp.new_module("utils")
exec """#!/usr/bin/python
from itertools import chain, repeat, izip
def line(*args):
L = raw_input().strip().split()
L = izip( L, chain( args, repeat(str) ) )
return [ type(data) for data, type in L ]
def iline(): return map( int, raw_input().strip().split() )
def fline(): return map( float, raw_input().strip().split() )""" in vars(__mod)
#!/usr/bin/python
from utils import iline
def test():
iline()
nums = iline()
yield
total = sum(nums)
nums = [ [count, chr(ord('A')+i), i] for i, count in enumerate(nums) ]
answer = ''
while total > 0:
count, c, i = max(nums)
answer += c
nums[i][0] -= 1
total -= 1
if total%2 == 0:
answer += ' '
print answer
if __name__ == '__main__':
T = input()
for i in xrange(1, T+1):
print 'Case #%d:' % i,
solver = test()
if hasattr(solver, 'next'):
list(solver)
elif callable(solver):
solver()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
eb346ee316187d62bcf91198f7e119ad1fd21d29 | 2bb96128322c0307742c6809f0d8c7eddb09c523 | /try_finally_tutorial.py | e89c7aeef3b2381959a4cf1ed87404de0fa2b0b9 | [
"MIT"
] | permissive | twtrubiks/python-notes | 2390c66d25b39f5a73968829702d4fd02ccf1b28 | 042a651aaa04c07f76c52bc52bace665d0df99d7 | refs/heads/master | 2023-08-17T11:50:13.122573 | 2023-08-05T04:14:56 | 2023-08-05T04:14:56 | 90,001,387 | 122 | 49 | MIT | 2021-12-21T07:18:06 | 2017-05-02T06:55:21 | Python | UTF-8 | Python | false | false | 1,362 | py | def base_example_1():
try:
print("run_1")
except Exception:
print("Exception")
finally:
print("other code")
def example_1():
try:
print("run_1")
except Exception:
print("Exception")
return "re Exception"
finally:
print("other code")
def example_1_except():
try:
1 / 0
except Exception:
print("Exception")
return "re Exception"
finally:
print("other code")
def example_2_diff():
try:
print("run_1")
except Exception:
print("Exception")
return "re Exception"
print("other code")
def example_2_diff_except():
try:
1 / 0
except Exception:
print("Exception")
return "re Exception"
print("other code")
def example_file():
# better with as statement
myfile = open("test.txt", "w")
try:
# 1/0
myfile.write("data") # raises Exception
except Exception:
print("Exception")
finally:
print("close file")
myfile.close() # has run
if __name__ == "__main__":
print(base_example_1())
# print(example_1())
# print(example_1_except()) # -> has print("other code") ## important
# print(example_2_diff())
# print(example_2_diff_except()) # -> no print("other code")
# example_file()
| [
"twtrubiks@gmail.com"
] | twtrubiks@gmail.com |
d5b0e340a8e2295c9c0cfd3cd688b5bb192dcdda | 98316f1635a0092a9e92f8070dce3e8ceeeee537 | /pybids_examples.py | 7d6948bd2c69e41af00f56d9be7b6df408aaba3c | [] | no_license | oesteban/grabbit | 692bd2d3293f8ea22a767f5b08d464717f38d817 | 68e54b84aa4b93def2dfb25c0a47a41a127fc2ae | refs/heads/master | 2020-04-10T21:01:21.883610 | 2016-08-05T00:46:36 | 2016-08-05T00:46:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py |
# get func, t1w, and fmap for two particular subjects
# only take run 2 for motor task
# HOW TO DEAL WITH RUN SINCE IT COULD REFER TO FUNC OR T1W?
# ANOTHER TRICK WITH FIELDMAPS - DO WE RETURN ONLY THE FIELDMAPS
# THAT MATCH THE SPECIFIC FUNCTIONAL RUN BEING RETURNED?
pybids_instance = pybids(path='/home/chris/bids_data', config='this_config.json')
filtered_output=pybids_instance.searcher(sub=['01','02'],
image_type=['T1w','bold','fmap'],relative_path=True,
ext='nii',return_as_list=False,flatten=False,
filter={'run':['02'],'task':['motor']}})
{'basedir':'<base directory for dataset>',
'01':{
'01':{
'T1w':['anat/sub-01_run-01_T1w.nii.gz','anat/sub-01_run-02_T1w.nii.gz'],
'func':['func/sub-01_run-02_task-motor_bold.nii.gz'],
'fmap':['fmap/sub-01_magnitude.nii.gz']},
},
'02':{
'01':{
'T1w':['anat/sub-02_run-01_T1w.nii.gz'],
'func':['func/sub-02_run-01_task-motor_bold.nii.gz'],
'fmap':['fmap/sub-02_magnitude.nii.gz']}
}
}
### OLDER
pybidgetter=PyBIDS.getter(pipeline=None)
# get all T1w and T2w images from a dataset
# say there are three subjects - subj 3 is missing its T1w
output=pybidgetter(image_type=['T1w'])
{'basedir':'<base directory for dataset>',
'sub-01':{
'T1w':['anat/sub-01_run-01_T1w.nii.gz','anat/sub-01_run-02_T1w.nii.gz'],
'T2w':['anat/sub-01_run-01_T2w.nii.gz','anat/sub-01_run-02_T2w.nii.gz']},
'sub-02':{
'T1w':['anat/sub-02_run-01_T1w.nii.gz','anat/sub-02_run-02_T1w.nii.gz'],
'T2w':['anat/sub-02_run-01_T2w.nii.gz','anat/sub-02_run-02_T2w.nii.gz']},
'sub-03':{
'T2w':['anat/sub-03_run-01_T2w.nii.gz','anat/sub-03_run-02_T2w.nii.gz']}
}
| [
"poldrack@gmail.com"
] | poldrack@gmail.com |
19fc188a0c2aec52d03f8e3c1e583d1fb53a48c2 | 670f4ba8ded99b420c3454c6ae35789667880cc8 | /tobiko/tests/unit/test_conftest.py | ee6e63d49558fa535e6928f19e30857aa2ef1e5e | [
"Apache-2.0"
] | permissive | FedericoRessi/tobiko | 892db522198ab48380892138459d801c4bd00efa | ce2a8734f8b4203ec38078207297062263c49f6f | refs/heads/master | 2022-07-26T22:52:10.273883 | 2022-07-20T20:04:43 | 2022-07-20T20:04:43 | 145,856,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | # Copyright 2021 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_log import log
import mock
import tobiko
from tobiko.tests import unit
from tobiko.tests import conftest
LOG = log.getLogger(__name__)
class CaplogTest(unit.TobikoUnitTest):
def test_configure_caplog(self):
pytest_config = mock.MagicMock(inicfg={
'log_level': '<existing>',
'log_format': '<existing>',
'log_date_format': '<existing>'})
conftest.configure_caplog(pytest_config)
self.assertEqual('<existing>', pytest_config.inicfg['log_level'])
self.assertEqual('<existing>', pytest_config.inicfg['log_format'])
self.assertEqual('<existing>', pytest_config.inicfg['log_date_format'])
def test_configure_caplog_debug(self):
self.patch_caplog_config(capture_log=True, debug=True)
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_caplog(pytest_config)
self.assertEqual('DEBUG', pytest_config.inicfg['log_level'])
def test_configure_caplog_info(self):
self.patch_caplog_config(capture_log=True, debug=False)
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_caplog(pytest_config)
self.assertEqual('INFO', pytest_config.inicfg['log_level'])
def test_configure_caplog_fatal(self):
self.patch_caplog_config(capture_log=False)
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_caplog(pytest_config)
self.assertEqual('FATAL', pytest_config.inicfg['log_level'])
def test_configure_caplog_log_format(self):
self.patch_caplog_config(line_format='<some-format>')
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_caplog(pytest_config)
self.assertEqual('<some-format>', pytest_config.inicfg['log_format'])
def test_configure_caplog_date_format(self):
self.patch_caplog_config(date_format='<some-format>')
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_caplog(pytest_config)
self.assertEqual('<some-format>',
pytest_config.inicfg['log_date_format'])
def patch_caplog_config(self,
capture_log: bool = None,
debug: bool = None,
line_format: str = None,
date_format: str = None):
tobiko_config = self.patch(tobiko, 'tobiko_config').return_value
if capture_log is not None:
tobiko_config.logging.capture_log = capture_log
if debug is not None:
tobiko_config.debug = debug
if line_format is not None:
tobiko_config.logging.line_format = line_format
if date_format is not None:
tobiko_config.logging.date_format = date_format
class TimeoutTest(unit.TobikoUnitTest):
def test_configure_timeout_existing(self):
pytest_config = mock.MagicMock(inicfg={'timeout': '<existing>'})
conftest.configure_timeout(pytest_config)
self.assertEqual('<existing>', pytest_config.inicfg['timeout'])
def test_configure_timeout_none(self):
self.patch_timeout_config(timeout=None)
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_timeout(pytest_config)
self.assertNotIn('timeout', pytest_config.inicfg)
def test_configure_timeout_zero(self):
self.patch_timeout_config(timeout=0.)
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_timeout(pytest_config)
self.assertNotIn('timeout', pytest_config.inicfg)
def test_configure_timeout_negative(self):
self.patch_timeout_config(timeout=-1.)
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_timeout(pytest_config)
self.assertNotIn('timeout', pytest_config.inicfg)
def test_configure_timeout_positive(self):
self.patch_timeout_config(timeout=10.)
pytest_config = mock.MagicMock(inicfg={})
conftest.configure_timeout(pytest_config)
self.assertEqual(10., pytest_config.inicfg['timeout'])
def patch_timeout_config(self, timeout):
tobiko_config = self.patch(tobiko, 'tobiko_config').return_value
tobiko_config.testcase.timeout = timeout
| [
"fressi@redhat.com"
] | fressi@redhat.com |
2b86d2999fe4a3ef7fd96f5e3a7eed40e09f733f | e0375e71cf476cbe76729959d32dfc20ae263efe | /week3/rectangle_area.py | 46c1f4579fbb1c930f2e73205af94cea989692bf | [] | no_license | jonathanqbo/moncton-python-2020 | c920093c0199eed5b918b0aa7e3cbc2c4ea12715 | 40ee150bb80df36889169ad9d2b834939cc257bc | refs/heads/master | 2023-02-27T10:21:17.863058 | 2021-01-30T21:04:52 | 2021-01-30T21:04:52 | 291,563,403 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py |
def rect_area_v1(width, height):
area = width * height
print('Area of rectangle', [width, height], 'is', area)
rect_area_v1(10, 20)
rect_area_v1(20, 20)
rect_area_v1(30, 20)
def rect_area_v2(width, height):
return width * height
width, height = 10, 20
print('Area of rectangle', [width, height], 'is', rect_area_v2(width, height))
width, height = 20, 20
print('Area of rectangle', [width, height], 'is', rect_area_v2(width, height))
width, height = 30, 20
print('Area of rectangle', [width, height], 'is', rect_area_v2(width, height))
| [
"jonathan.q.bo@gmail.com"
] | jonathan.q.bo@gmail.com |
dedec16126478e91710dec3c1df5614fe7e4ccfd | 395e06560c7b794a965add40c586684cb0b4e59c | /docs/tutorials/output1.py | c0af15407c71da52faf54659741b79329c20f486 | [
"BSD-2-Clause",
"Python-2.0"
] | permissive | alanyee/python-terrascript | f01edef3f6e21e5b18bc3295efef1657be17e3ca | e880e7650a7c3a88603d5429dafbacd28cd26c7e | refs/heads/develop | 2023-03-09T07:33:32.560816 | 2020-09-21T07:11:09 | 2020-09-21T07:11:09 | 300,696,024 | 0 | 0 | BSD-2-Clause | 2021-02-18T00:33:30 | 2020-10-02T17:57:18 | null | UTF-8 | Python | false | false | 597 | py | import terrascript
import terrascript.provider
import terrascript.resource
config = terrascript.Terrascript()
# AWS provider
config += terrascript.provider.aws(region="us-east-1")
# Define Variable and add to config
v = terrascript.Variable("image_id", type="string")
config += v
# Define AWS EC2 instance and add to config
i = terrascript.resource.aws_instance("example", instance_type="t2.micro", ami=v)
config += i
# Output the instance's private IP
config += terrascript.Output(
"instance_ip_addr",
value=i.private_ip,
description="The private IP address of the instance.",
)
| [
"markus@juenemann.net"
] | markus@juenemann.net |
e9a1ead7be3abd84f1b7adfa8920a3cc29ab653e | 6df0d7a677129e9b325d4fdb4bbf72d512dd08b2 | /PycharmProjects/liveshow/old/while1.py | abedbba1b6a78431c887ffd8a9a97ae6f82275df | [] | no_license | yingxingtianxia/python | 01265a37136f2ad73fdd142f72d70f7c962e0241 | 3e1a7617a4b6552bce4a7e15a182f30e1bae221e | refs/heads/master | 2021-06-14T15:48:00.939472 | 2019-12-13T05:57:36 | 2019-12-13T05:57:36 | 152,200,507 | 0 | 0 | null | 2021-06-10T20:54:26 | 2018-10-09T06:40:10 | Python | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/env python3
#__*__coding: utf8__*__
import time
flag = ['-', '/', '|', '\\']
i = 0
while True:
print('\r%s' % flag[i], end = '')
time.sleep(0.5)
i += 1
if i == 4:
i = 0 | [
"903511044@qq.com"
] | 903511044@qq.com |
f47cab049ed7c2aaaa06df25cbcfc32277d38ea0 | 2875edc8b495d37d9d44e1d23d5c0c4e7fdd324b | /src/mkdocs_gallery/mkdocs_compatibility.py | 6526da3283dd325369604ebb9534587a5d98c830 | [
"BSD-3-Clause"
] | permissive | smarie/mkdocs-gallery | c8845a15a452de60ebe6f5384da21c39a9fb452e | 89fea9eacf6b9742fdbe2622f7d6a7d525db42a9 | refs/heads/main | 2023-08-09T21:10:37.119637 | 2023-05-15T16:50:17 | 2023-05-15T16:50:17 | 426,683,311 | 23 | 8 | BSD-3-Clause | 2023-07-27T06:14:53 | 2021-11-10T15:51:30 | Python | UTF-8 | Python | false | false | 936 | py | # Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/mkdocs-gallery>
#
# Original idea and code: sphinx-gallery, <https://sphinx-gallery.github.io>
# License: 3-clause BSD, <https://github.com/smarie/mkdocs-gallery/blob/master/LICENSE>
"""
Backwards-compatility shims for mkdocs. Only logger is here for now.
"""
import logging
from mkdocs.utils import warning_filter
def red(msg):
# TODO investigate how we can do this in mkdocs console
return msg
def getLogger(name="mkdocs-gallery"):
"""From https://github.com/fralau/mkdocs-mermaid2-plugin/pull/19/."""
log = logging.getLogger("mkdocs.plugins." + name)
log.addFilter(warning_filter)
# todo what about colors ? currently we remove the argument in each call
# the verbose method does not exist
log.verbose = log.debug
return log
# status_iterator = sphinx.util.status_iterator
| [
"sylvain.marie@se.com"
] | sylvain.marie@se.com |
fa36b654bb5532a8379da57ce3d509fc1a788083 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/miniclient/invitations/__init__.py | e788e2bd58f23379d23db2cc204a62c1c49e2d0f | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 511 | py | # 2017.02.03 21:48:22 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/miniclient/invitations/__init__.py
import pointcuts as _pointcuts
def configure_pointcuts():
_pointcuts.PrbDisableAcceptButton()
_pointcuts.PrbInvitationText()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\miniclient\invitations\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:48:22 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
bac62e407b444526b07735208e90a1d137933c20 | b7d0f003cfb0ec6fa25f99d9f7b544dc38ae6aa8 | /algorithmPractice/배열 돌리기4.py | 05a4b26eadcb71d6c8b6caf88bbab611f48bf7c2 | [] | no_license | Kimyechan/dataStructureAndArgorithm | 43c2cfa0d12a5c729f687d786ef6dde23bf193a7 | c9f8f614621aee9e236ffef20e5e563b37bab0b3 | refs/heads/master | 2021-07-09T03:31:31.405725 | 2021-03-09T13:18:55 | 2021-03-09T13:18:55 | 231,402,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | #틀린 문제
from copy import deepcopy
N, M, K = map(int, input().split(' '))
A = [list(map(int, input().split(' '))) for _ in range(N)]
Q = [tuple(map(int, input().split(' '))) for _ in range(K)]
dx, dy = [1, 0, -1, 0], [0, -1, 0, 1]
ans = 10000
def value(arr):
return min(sum(i) for i in arr)
def convert(arr, qry):
(r, c, s) = qry
r, c = r-1, c-1
new_array = deepcopy(arr)
for i in range(1, s+1):
rr, cc = r-i, c+i
for w in range(4):
for dd in range(i*2):
rrr, ccc = rr+dx[w], cc+dy[w]
new_array[rrr][ccc] = arr[rr][cc]
rr, cc = rrr, ccc
return new_array
def dfs(arr, qry):
global ans
if sum(qry) == K:
ans = min(ans, value(arr))
return
for i in range(K):
if qry[i]:
continue
new_array = convert(arr, Q[i])
qry[i] = 1
dfs(new_array, qry)
qry[i] = 0
dfs(A, [0 for i in range(K)])
print(ans)
| [
"vlvkcjswo7@naver.com"
] | vlvkcjswo7@naver.com |
a249c495bc8645675eb86766e110e36dde7a29fc | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/market_tools/tools/research/settings.py | 12d824f426777960367397db7cf7f7710c0ede4c | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | # -*- coding: utf-8 -*-
__author__ = 'likunlun'
TOOL_NAME = u'用户调研'
TOOL_ICON_URL = u'/static/img/markettools/vote/vote.jpg' | [
"jiangzhe@weizoom.com"
] | jiangzhe@weizoom.com |
5ea725ba1f92da5aa3dc691cdee9a3d723f8fdf9 | 8f6f27917c1d16856656c62aeed3909f17711144 | /2015/22/puzzle_2.py | 3ad0559d4e83c531d043e45031352f4a8c9650bb | [] | no_license | naydichev/advent-of-code-solutions | 1f2050e6d237bf664dd79925af8656f821080fda | d9f1bc6e95e077d48bd0b1b50e8f5b0ae137ca6a | refs/heads/master | 2022-12-26T15:00:09.429622 | 2022-12-21T08:51:42 | 2022-12-21T08:51:42 | 159,928,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,069 | py | #!/usr/bin/env python3
import sys
from collections import defaultdict
from copy import deepcopy
from recordtype import recordtype
Character = recordtype("Character", ["hp", "damage", "mana", "armor"], default=0)
Spell = recordtype("Spell", ["name", "cost", "effect", "duration"])
def magic_missle(_, boss):
boss.hp -= 4
def drain(player, boss):
boss.hp -= 2
player.hp += 2
def shield(player, _):
player.armor = 7
def poison(_, boss):
boss.hp -= 3
def recharge(player, _):
player.mana += 101
MAGIC_MISSLE = "Magic Missle"
DRAIN = "Drain"
SHIELD = "Shield"
POISON = "Poison"
RECHARGE = "Recharge"
SPELL_NAMES = [
MAGIC_MISSLE,
DRAIN,
SHIELD,
POISON,
RECHARGE
]
SPELLS = {
MAGIC_MISSLE: Spell(MAGIC_MISSLE, 53, magic_missle, 1),
DRAIN: Spell(DRAIN, 73, drain, 1),
SHIELD: Spell(SHIELD, 113, shield, 6),
POISON: Spell(POISON, 173, poison, 6),
RECHARGE: Spell(RECHARGE, 229, recharge, 5)
}
MIN_REQUIRED_MANA = 53
def main(boss):
mana = fight(
Character(50, 0, 500),
boss
)
print(f"least amount of mana required to win: {mana}")
LEAST = sys.maxsize
def fight(player, boss, effects=defaultdict(int), turn=0, mana_spent=0):
global LEAST
if mana_spent > LEAST:
return None
if turn % 2 == 0:
player.hp -= 1
if player.hp <= 0:
return None
for key in effects.keys():
if effects[key] > 0:
SPELLS[key].effect(player, boss)
effects[key] -= 1
elif key == SHIELD:
player.armor = 0
if boss.hp <= 0:
LEAST = min(LEAST, mana_spent)
return mana_spent
if turn % 2 == 1:
player.hp -= max(1, boss.damage - player.armor)
if player.hp <= 0:
return None
return fight(player, boss, effects, turn + 1, mana_spent)
else:
# pick a spell, if able (if not, return False, None)
if player.mana < MIN_REQUIRED_MANA:
return None
available_spells = [
spell for spell in SPELLS.values() \
if effects[spell.name] == 0 \
and spell.cost <= player.mana
]
options = []
for spell in available_spells:
effects_copy = deepcopy(effects)
player_copy = deepcopy(player)
boss_copy = deepcopy(boss)
effects_copy[spell.name] = spell.duration
player_copy.mana -= spell.cost
options.append(
fight(
player_copy,
boss_copy,
effects_copy,
turn + 1,
mana_spent + spell.cost
)
)
return min(filter(None, options + [sys.maxsize]))
def parse_boss(raw):
parts = []
for r in raw:
n = int(r.split()[-1])
parts.append(n)
return Character(*parts)
if __name__ == "__main__":
with open("boss.pi") as f:
boss = parse_boss(f.read().strip().split("\n"))
main(boss)
| [
"git@julian.appwarden.com"
] | git@julian.appwarden.com |
d3638f8d683f699e883d5ec2ab3d18a2de509b45 | 4dddd01ca6a60f2fa408ee55fbaebe868917184a | /venv/bin/mitmproxy | 28734f1e2cad0554ee4f2a9f66709b4f554ab352 | [] | no_license | fanpl-sourse/all_study_practice | 6f056c18f0eb7afd6af649e5b595895683bb0cbd | b02f25231855e149b95476b20dd8d53318cfe1a5 | refs/heads/master | 2023-01-22T09:13:15.107616 | 2020-11-30T05:53:43 | 2020-11-30T05:53:43 | 304,493,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | #!/Users/a/Documents/2020study/all_study_practice/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from mitmproxy.tools.main import mitmproxy
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(mitmproxy())
| [
"fanpengli@fangdd.com"
] | fanpengli@fangdd.com | |
d8376be88be729318e829b87da4ca137d0356495 | a76f49febfa90d57032068fda127534e491d70b7 | /sample-apps/trello-integration-app/src/formatters/trello/cards.py | cf37dfb5b65e05d91af243cd85d439cf23e930f1 | [] | permissive | jtruty/hubspot-api-python | dedf3b5b6838931d2b7cfbb667e80dc0cea58166 | 3f1b2d2007733a734daee2010611592249b72a0b | refs/heads/master | 2022-12-07T16:04:23.496317 | 2020-08-05T08:03:07 | 2020-08-05T08:03:07 | 288,502,982 | 0 | 0 | Apache-2.0 | 2020-08-18T16:07:45 | 2020-08-18T16:07:44 | null | UTF-8 | Python | false | false | 1,314 | py | from flask import url_for
def format_card_extension_data_response(deal_associated, card=None):
if deal_associated:
result = {
"objectId": card.short_id,
"title": card.name,
"link": card.short_url,
}
if len(card.members) > 0:
result["properties"] = [
{
"label": "Members",
"dataType": "STRING",
"value": ", ".join([member.username for member in card.members]),
}
]
results = [result]
primary_action = {
"type": "ACTION_HOOK",
"httpMethod": "DELETE",
"associatedObjectProperties": ["hs_object_id",],
"uri": url_for("trello.associations.delete_association", _external=True),
"label": "Remove the association",
}
else:
results = []
primary_action = {
"type": "IFRAME",
"width": 650,
"height": 350,
"uri": url_for("trello.associations.search_frame", _external=True),
"label": "Associate Trello card",
"associatedObjectProperties": ["hs_object_id", "dealname",],
}
return {
"results": results,
"primaryAction": primary_action,
}
| [
"atanasiuk@hubspot.com"
] | atanasiuk@hubspot.com |
da3b6043135ee77de71cc89fa3441c895b69fb87 | da3fac285886bf0f00fd9040fe20a02111722f67 | /foundation/jobs/models.py | 2226f7b8fc36e91c06f1d1332a5ab0cc3d85c6de | [
"MIT"
] | permissive | mikbuddha/foundation | 00b721e8e32de66c8dba34533dc8aea35e09e357 | ac8be263d321afe017a84aebc29ef735e65de393 | refs/heads/master | 2021-01-12T20:40:07.464776 | 2016-04-06T08:08:48 | 2016-04-06T08:09:53 | 55,972,178 | 1 | 0 | null | 2016-04-11T12:54:01 | 2016-04-11T12:54:00 | null | UTF-8 | Python | false | false | 398 | py | from django.db import models
class Job(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=150)
description = models.TextField()
submission_email = models.EmailField()
submission_closes = models.DateTimeField()
class Meta:
ordering = ('submission_closes',)
| [
"nick@whiteink.com"
] | nick@whiteink.com |
0fdf6b70ec039cb750c2d0e9f3526c5889b7cca9 | dbb0aef6d16f2946c7400a67c0a778cc7c899edb | /antspynet/architectures/create_autoencoder_model.py | 9cd55af3d27e53786eb10a7d0ea05abb2dcbfa57 | [] | no_license | mattcieslak/ANTsPyNet | 8b0e0004360aa7ca50e9501b806b8ddc67f2af3e | b98e7453dcdce13cff5d30ae9c093b2b37e67135 | refs/heads/master | 2020-12-29T08:18:31.297150 | 2020-02-02T02:15:40 | 2020-02-02T02:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py |
from keras.models import Model
from keras.layers import Input, Dense
def create_autoencoder_model(number_of_units_per_layer,
activation='relu',
initializer='glorot_uniform'
):
"""
2-D implementation of the Vgg deep learning architecture.
Builds an autoencoder based on the specified array definining the
number of units in the encoding branch. Ported to Keras R from the
Keras python implementation here:
https://github.com/XifengGuo/DEC-keras
Arguments
---------
number_of_units_per_layer : tuple
A tuple defining the number of units in the encoding branch.
activation : string
Activation type for the dense layers
initializer : string
Initializer type for the dense layers
Returns
-------
Keras model
An encoder and autoencoder Keras model.
Example
-------
>>> model = create_autoencoder_model((784, 500, 500, 2000, 10))
>>> model.summary()
"""
number_of_encoding_layers = len(number_of_units_per_layer) - 1
inputs = Input(shape=(number_of_units_per_layer[0],))
encoder = inputs
for i in range(number_of_encoding_layers - 1):
encoder = Dense(units=number_of_units_per_layer[i + 1],
activation=activation,
kernel_initializer=initializer)(encoder)
encoder = Dense(units=number_of_units_per_layer[-1])(encoder)
autoencoder = encoder
for i in range(number_of_encoding_layers-1, 0, -1):
autoencoder = Dense(units=number_of_units_per_layer[i],
activation=activation,
kernel_initializer=initializer)(autoencoder)
autoencoder = Dense(units=number_of_units_per_layer[0],
kernel_initializer=initializer)(autoencoder)
encoder_model = Model(inputs=inputs, outputs=encoder)
autoencoder_model = Model(inputs=inputs, outputs=autoencoder)
return([autoencoder_model, encoder_model])
| [
"ntustison@gmail.com"
] | ntustison@gmail.com |
303d9c00db90a3ecbcefa406c9241290e9583f2f | 11301b586c69aa6c9cbbaa79b51c72e25e9e7820 | /encoder/esim.py | 198d95ad3a4dfd57750718c7d4f930e7f8677c4a | [] | no_license | sduchh/nlp_research | 1d1a321280cab678f9d0b95b5aa86697f89d3811 | 9237d2b8ba85254016bd0386e819666f73286d58 | refs/heads/master | 2020-05-24T15:08:13.300994 | 2019-05-17T10:43:09 | 2019-05-17T10:43:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,067 | py | import keras
import tensorflow as tf
from keras.layers import *
from keras.activations import softmax
from keras.models import Model
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.utils import multi_gpu_model
#refer:https://arxiv.org/abs/1609.06038
class ESIM():
def __init__(self, **kwargs):
self.maxlen = kwargs['maxlen']
self.embedding_size = kwargs['embedding_size']
self.keep_prob = kwargs['keep_prob']
self.num_output = kwargs['num_output']
self.recurrent_units = 300
self.dense_units = 300
def feed_dict(self, **kwargs):
feed_dict = {}
return feed_dict
def pb_feed_dict(self,graph, name = 'esim', **kwargs):
feed_dict = {}
return feed_dict
def __call__(self, x_query, x_sample, reuse = tf.AUTO_REUSE):
#embedding_sequence_q1 = BatchNormalization(axis=2)(x_query)
#embedding_sequence_q2 = BatchNormalization(axis=2)(x_sample)
#final_embedding_sequence_q1 = SpatialDropout1D(0.25)(embedding_sequence_q1)
#final_embedding_sequence_q2 = SpatialDropout1D(0.25)(embedding_sequence_q2)
#################### 输入编码input encoding #######################
#分别对query和sample进行双向编码
rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_query)
rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_sample)
#rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q1)
#rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q2)
############## 局部推理local inference modeling ###################
#计算dot attention
attention = Dot(axes=-1)([rnn_layer_q1, rnn_layer_q2])
#分别计算query和sample进行attention后的结果
w_attn_1 = Lambda(lambda x: softmax(x, axis=1))(attention)
w_attn_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2))(attention))
align_layer_1 = Dot(axes=1)([w_attn_1, rnn_layer_q1])
align_layer_2 = Dot(axes=1)([w_attn_2, rnn_layer_q2])
############# 推理组合Inference Composition #######################
subtract_layer_1 = subtract([rnn_layer_q1, align_layer_1])
subtract_layer_2 = subtract([rnn_layer_q2, align_layer_2])
multiply_layer_1 = multiply([rnn_layer_q1, align_layer_1])
multiply_layer_2 = multiply([rnn_layer_q2, align_layer_2])
m_q1 = concatenate([rnn_layer_q1, align_layer_1, subtract_layer_1, multiply_layer_1])
m_q2 = concatenate([rnn_layer_q2, align_layer_2, subtract_layer_2, multiply_layer_2])
############### 编码+池化 #######################
v_q1_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q1)
v_q2_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q2)
avgpool_q1 = GlobalAveragePooling1D()(v_q1_i)
avgpool_q2 = GlobalAveragePooling1D()(v_q2_i)
maxpool_q1 = GlobalMaxPooling1D()(v_q1_i)
maxpool_q2 = GlobalMaxPooling1D()(v_q2_i)
merged_q1 = concatenate([avgpool_q1, maxpool_q1])
merged_q2 = concatenate([avgpool_q2, maxpool_q2])
final_v = BatchNormalization()(concatenate([merged_q1, merged_q2]))
#output = Dense(units=self.dense_units, activation='relu')(final_v)
output = Dense(units=self.num_output, activation=None)(final_v)
#output = BatchNormalization()(output)
#output = Dropout(self.dropout_rate)(output)
#output = tf.nn.dropout(output, self.keep_prob)
#高级api tf.layer.dropout 与 keras的Dropout都使用dropout
#tf.nn.dropout使用keep_prob
#output = Dense(units=self.num_output, activation='sigmoid')(output)
#output = Dense(units=self.num_output, activation=None)(output)
output = tf.squeeze(output, -1)
return output
| [
"zfz@zfzdeMacBook-Pro.local"
] | zfz@zfzdeMacBook-Pro.local |
4d3a66401e45ff0c99a630f8f3df823bb67c3a8a | ed97fb5c71da7ed89235432e3971bb0ef6064f8b | /algorithms/python/13.py | 272df1e8d68434f51e5fbca99ac031ed2e5ccb05 | [
"MIT"
] | permissive | viing937/leetcode | 8241be4f8bc9234a882b98ada2e5d13b0ebcca68 | b07f7ba69f3d2a7e294f915934db302f43c0848f | refs/heads/master | 2023-08-31T18:25:06.443397 | 2023-08-31T15:31:38 | 2023-08-31T15:31:38 | 37,374,931 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
sym = ['IV', 'IX', 'XL', 'XC', 'CD', 'CM', 'I', 'V', 'X', 'L', 'C', 'D', 'M']
val = [4, 9, 40, 90, 400, 900, 1, 5, 10, 50, 100, 500, 1000]
rls = 0
for i in range(len(sym)):
k, v = sym[i], val[i]
rls += s.count(k) * v
s = s.replace(k, '')
return rls
| [
"viing937@gmail.com"
] | viing937@gmail.com |
9ff40c2a2acc8f4ac7abe253f44de58e75d4e87f | 45e23d1db1c1ecd19b4f4055b60b994cf866f240 | /源程序/12.1关键字竞价排名/Auction/useBidResultWin.py | 47498595780a55a6a4bea65ea4ae832f65f8df0c | [] | no_license | hushaoqi/MIS_project | ff8d60ac5461e3be6d1c599378790baa059f43c0 | d23aaf791809d912b15277dc057d84c8f2685f8e | refs/heads/master | 2022-12-23T21:50:37.024694 | 2019-07-24T13:05:28 | 2019-07-24T13:05:28 | 150,825,940 | 0 | 1 | null | 2022-12-17T11:17:42 | 2018-09-29T04:53:06 | Python | UTF-8 | Python | false | false | 442 | py | import sys
from bidResultWin import Ui_BidResultWin
from PyQt5.QtWidgets import QApplication,QMainWindow
import pymysql
#这是结果类
class useBidResultWin(QMainWindow, Ui_BidResultWin):
def __init__(self, parent = None):
super(useBidResultWin,self).__init__(parent)
self.setupUi(self)
if __name__ == "__main__":
app = QApplication(sys.argv)
ui = useBidResultWin()
ui.show()
sys.exit(app.exec_()) | [
"1941017133@qq.com"
] | 1941017133@qq.com |
97d0d0256245daf2b951bbf11057513bbe9b52ea | 19ab46afc572466465b794a5feb3257992932b85 | /setup.py | 2b358142faff664bfa7db166133a77631dbb3f78 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/MODEL1310110017 | 577d52a57f1d8b91286c03babd5e94e4514f8395 | 98237210ce09328a49e5262955de95547a28aa8a | refs/heads/master | 2020-03-28T18:49:44.580065 | 2014-10-16T05:31:02 | 2014-10-16T05:31:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from setuptools import setup, find_packages
setup(name='MODEL1310110017',
version=20140916,
description='MODEL1310110017 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1310110017',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
0c467616e06d40320fca4cca6f94d31d63d17957 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_143/ch20_2020_03_04_19_10_56_872662.py | f7d34d7f63ab7866d1479c34e2657d2d6697948a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # Pergunta quantos km
qts_km= float(input('quantos km:'))
def P(qts_km):
if qts_km <= 200:
return qts_km*0,5
else:
return qts_km*0,45
| [
"you@example.com"
] | you@example.com |
25d82ccbf3b3dfdc1a8f89dd1e6a98e1d7d97438 | 58a5bc060c870f9e0ffd824d25823e818ad45ce4 | /weather_app.py | b7db06418294b524f134c728e473466b80942f9d | [] | no_license | sincekara/SQL_Alchemy_Analysis | 73ae6e50734ca9f489020bf6c1d8a8331e0d7519 | 2e6c444751a2f324e27feb0e71fe64d45aab1760 | refs/heads/master | 2020-08-03T10:14:07.390519 | 2020-05-29T00:58:44 | 2020-05-29T00:58:44 | 211,716,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify, render_template
import datetime as dt
engine = create_engine("sqlite:///hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurements = Base.classes.measurement
Stations = Base.classes.station
weather_app = Flask(__name__)
@weather_app.route("/")
def Home_page():
"""Listing all available api routes."""
return (
f"All routes that are available:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end<br/>"
)
@weather_app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
results = session.query(Measurements.date, Measurements.prcp).all()
session.close()
all_results = list(np.ravel(results))
return jsonify(all_results)
@weather_app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
results = session.query(Stations.station).all()
session.close()
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@weather_app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
results = session.query(Measurements.date, Measurements.tobs).filter(Measurements.date>"2016-08-23").all()
session.close()
all_tobs = list(np.ravel(results))
return jsonify(all_tobs)
@weather_app.route("/api/v1.0/<start>")
def start_date(start):
session = Session(engine)
"""date = dt.datetime(int(start), 1, 1)"""
results = session.query(func.min(Measurements.tobs), func.avg(Measurements.tobs), func.max(Measurements.tobs)).\
filter(Measurements.date >= start).all()
session.close()
all_start = list(np.ravel(results))
return jsonify(all_start)
@weather_app.route("/api/v1.0/<start>/<end>")
def start_end(start, end):
session = Session(engine)
"""date = dt.datetime(int(start, end), 1, 1)"""
if end < start:
results = session.query(func.min(Measurements.tobs), func.avg(Measurements.tobs), func.max(Measurements.tobs)).\
filter(Measurements.date <= start).filter(Measurements.date >= end).all()
else:
results = session.query(func.min(Measurements.tobs), func.avg(Measurements.tobs), func.max(Measurements.tobs)).\
filter(Measurements.date >= start).filter(Measurements.date <= end).all()
session.close()
start_end = list(np.ravel(results))
return jsonify(start_end)
if __name__ == '__main__':
weather_app.run(debug=True)
| [
"test"
] | test |
ee5064935e2dcaa64edb9296955420c3590bdc50 | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/python_re_test_file/regexlib_4271.py | 679ccaecf031a6dd4a7516a068a0dd02612beca0 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # 4271
# ^[A-Za-z0-9](([_\.\-]?[a-zA-Z0-9]+)*)@([A-Za-z0-9]+)(([\.\-]?[a-zA-Z0-9]+)*)\.([A-Za-z]{2,})$
# EXPONENT
# nums:4
# EXPONENT AttackString:"A"+"a"*32+"!1 __NQ"
import re
from time import perf_counter
regex = """^[A-Za-z0-9](([_\.\-]?[a-zA-Z0-9]+)*)@([A-Za-z0-9]+)(([\.\-]?[a-zA-Z0-9]+)*)\.([A-Za-z]{2,})$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "A" + "a" * i * 1 + "!1 __NQ"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
f97d38f60f3af84331b5d2fedba787633e71e1e2 | f33b30743110532ddae286ba1b34993e61669ab7 | /1002. 查找常用字符.py | e138cc8408b653a1a12090fbe6b8044f7104e698 | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | class Solution:
def commonChars(self, A) :
from collections import Counter
if not A:
return []
lookup = Counter(A[0])
for a in A[1:]:
tmp = Counter(a)
c = Counter()
for t in (tmp & lookup):
c[t] = min(tmp[t],lookup[t])
lookup = c
return list(lookup.elements())
a = Solution()
print(a.commonChars(["bella","label","roller"]))
print(a.commonChars(["cool","lock","cook"])) | [
"762307667@qq.com"
] | 762307667@qq.com |
d210fc2188700e1f94d762d2d4eeb71ee4343895 | 59b839f796c73f19de9baee2f2e8e1c5822506ed | /pwncat/db/user.py | 57ba810ac6bc2942a77822bdd455d52610759d26 | [] | no_license | tilt41/pwncat | ac3377a99ff6a690cd38cfea794bc7041f5a208f | 30e084ab6e8c41fa2f0a43b557b308599eb0bdf3 | refs/heads/master | 2022-12-08T11:11:14.285109 | 2020-08-25T17:18:14 | 2020-08-25T17:18:14 | 290,603,252 | 1 | 0 | null | 2020-08-26T20:57:46 | 2020-08-26T20:57:45 | null | UTF-8 | Python | false | false | 1,761 | py | #!/usr/bin/env python3
from sqlalchemy import Column, Integer, String, ForeignKey, Table
from sqlalchemy.orm import relationship
from pwncat.db.base import Base
SecondaryGroupAssociation = Table(
"secondary_group_association",
Base.metadata,
Column("group_id", Integer, ForeignKey("groups.id")),
Column("user_id", ForeignKey("users.id")),
)
class Group(Base):
__tablename__ = "groups"
id = Column(Integer, primary_key=True)
host_id = Column(Integer, ForeignKey("host.id"), primary_key=True)
host = relationship("Host", back_populates="groups")
name = Column(String)
members = relationship(
"User", back_populates="groups", secondary=SecondaryGroupAssociation
)
class User(Base):
__tablename__ = "users"
# The users UID
id = Column(Integer, primary_key=True)
host_id = Column(Integer, ForeignKey("host.id"), primary_key=True)
host = relationship("Host", back_populates="users")
# The users GID
gid = Column(Integer, ForeignKey("groups.id"))
# The actual DB Group object representing that group
group = relationship("Group")
# The name of the user
name = Column(String, primary_key=True)
# The user's full name
fullname = Column(String)
# The user's home directory
homedir = Column(String)
# The user's password, if known
password = Column(String)
# The hash of the user's password, if known
hash = Column(String)
# The user's default shell
shell = Column(String)
# The user's secondary groups
groups = relationship(
"Group", back_populates="members", secondary=SecondaryGroupAssociation
)
def __repr__(self):
return f"""User(uid={self.id}, gid={self.gid}, name={repr(self.name)})"""
| [
"caleb.stewart94@gmail.com"
] | caleb.stewart94@gmail.com |
84bde73fa42eabd69ba5bea4fc838c0be58cfc32 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03007/s730382399.py | 981228d0cf9c62b57f9fcd8ada7c352af271632d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | import sys
def input(): return sys.stdin.readline().strip()
def mapint(): return map(int, input().split())
sys.setrecursionlimit(10**9)
N = int(input())
As = list(mapint())
As.sort()
mini = As.pop(0)
maxi = As.pop()
ans = []
for a in As:
if a>=0:
ans.append((mini, a))
mini -= a
else:
ans.append((maxi, a))
maxi -= a
print(maxi-mini)
for a in ans:
print(*a)
print(maxi, mini) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1441dde7e105cd3cde97729e1031e35a9f9e86e6 | 37e87b3d5e1ee9009f0ea0671bc0c6edf0e233b7 | /100.py | a0df2217a46082b4c28a98defe34177c131fb5f5 | [] | no_license | Jane11111/Leetcode2021 | d9f4987792938597bf89ff72ba6bbcb4a3f9d081 | a95b871578aae0103066962c33b8c0f4ec22d0f2 | refs/heads/master | 2023-07-14T21:29:41.196752 | 2021-08-23T03:28:02 | 2021-08-23T03:28:02 | 344,804,297 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | # -*- coding: utf-8 -*-
# @Time : 2021-04-19 10:21
# @Author : zxl
# @FileName: 100.py
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def dfs(self,p1,p2):
if p1 == None and p2 == None:
return True
if p1 == None:
return False
if p2 == None:
return False
if p1.val != p2.val:
return False
f1 = self.dfs(p1.left,p2.left)
f2 = self.dfs(p1.right,p2.right)
return f1 and f2
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
return self.dfs(p,q)
| [
"791057615@qq.com"
] | 791057615@qq.com |
b15addfc45ab37cec5e041e5a2373309e44ff52c | a6ad2fb11672d4500077e438ae17d6566a6423db | /tutorial/parse_ranking2.py | 9b697699831b12f1ed8e5864ef56b9fe399b5c0d | [
"MIT"
] | permissive | disc5/RLScore | 7d884ac575d3814a346a6d0db0d84116005bc693 | cfc34e999da02807ea1582358fe3e382fe100b70 | refs/heads/master | 2020-12-26T03:00:51.505825 | 2016-04-17T10:09:07 | 2016-04-17T10:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | import numpy as np
from rlscore.learner import QueryRankRLS
from rlscore.measure import cindex
from rlscore.reader import read_sparse
from rlscore.utilities.cross_validation import map_ids
def train_rls():
#Select regparam with k-fold cross-validation,
#where instances related to a single sentence form
#together a fold
X_train = read_sparse("train_2000_x.txt")
Y_train = np.loadtxt("train_2000_y.txt")
X_test = read_sparse("test_2000_x.txt", X_train.shape[1])
Y_test = np.loadtxt("test_2000_y.txt")
#list of sentence ids
qids_train = np.loadtxt("train_2000_qids.txt")
qids_test = np.loadtxt("test_2000_qids.txt")
learner = QueryRankRLS(X_train, Y_train, qids_train)
P_test = learner.predict(X_test)
folds = map_ids(qids_train)
perfs = []
for fold in folds:
if np.var(Y_train[fold]) != 0:
P = learner.holdout(fold)
c = cindex(Y_train[fold], P)
perfs.append(c)
perf = np.mean(perfs)
print("leave-query-out cross-validation cindex %f" %perf)
partition = map_ids(qids_test)
test_perfs = []
#compute the ranking accuracy separately for each test query
for query in partition:
#skip such queries, where all instances have the same
#score, since in this case cindex is undefined
if np.var(Y_test[query]) != 0:
perf = cindex(Y_test[query], P_test[query])
test_perfs.append(perf)
test_perf = np.mean(test_perfs)
print("test cindex %f" %test_perf)
if __name__=="__main__":
train_rls()
| [
"ajairo@utu.fi"
] | ajairo@utu.fi |
506816b1070a86d2d6f242f4ae1a7f83d40ef933 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fabric/rtpathl2outatt.py | 3e4dff57b7e1b4cd5566226d80dc40b8dbdc1972 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 4,789 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtPathL2OutAtt(Mo):
"""
A target relation to an abstraction of a path endpoint. Note that this relation is an internal object.
"""
meta = TargetRelationMeta("cobra.model.fabric.RtPathL2OutAtt", "cobra.model.l2ext.LIfP")
meta.moClassName = "fabricRtPathL2OutAtt"
meta.rnFormat = "rtl2extPathL2OutAtt-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Logical Interface Profile"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x80000800003
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fabric.PathEp")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtl2extPathL2OutAtt-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 12082, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1753
prop.defaultValueStr = "l2extLIfP"
prop._addConstant("l2extLIfP", None, 1753)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 12081, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("FabricPathEPToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
23846ce4bec5d5384d38a3c2953d879d89b4d23d | 8e81c6c053f0a886675f47c84185ee2c749d7144 | /quiz/Problem 5-1.py | 69e13e5800c0e5a891f1bf88b5a2fd8502cf15e3 | [] | no_license | Silver-Iron/EDX-MITx--6.00.2x | 81acf404cda7fe34e0e3c0a9532faafb29048fe6 | 47740ea97d9f54ef5396fe3186e6485cbfeeba0f | refs/heads/master | 2021-01-22T22:34:16.283793 | 2017-03-20T09:34:31 | 2017-03-20T09:34:31 | 85,552,811 | 0 | 0 | null | 2017-03-20T08:33:25 | 2017-03-20T08:33:25 | null | UTF-8 | Python | false | false | 1,162 | py | """
You are taking a class that plans to assign final grades based on two midterm quizzes and a final exam. The final grade will be based on 25% for each midterm, and 50% for the final. You are told that the grades on the exams were each uniformly distributed integers:
Midterm 1: 50 <= grade <= 80
Midterm 2: 60 <= grade <= 90
Final Exam: 55 <= grade <= 95
Write a function called sampleQuizzes that implements a Monte Carlo simulation that estimates the probability of a student having a final score >= 70 and <= 75. Assume that 10,000 trials are sufficient to provide an accurate answer.
Note: Do not include any "import" statements in your code. We import the random module for you, and you should not be using any functions from the Pylab module for this problem.
"""
import random
def sampleQuizzes():
yes = 0
for e in range(10000):
mid1 = random.choice(range(50, 80))
mid2 = random.choice(range(60, 90))
finalExam = random.randrange(55, 95)
score = mid1*0.25 + mid2*0.25 + finalExam*0.5
if score >= 70 and score <= 75:
yes += 1
return yes / 10000.0
print sampleQuizzes()
| [
"dr.tallin@gmail.com"
] | dr.tallin@gmail.com |
1939dc7db68354a767eb23fbeb3da366e3043063 | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/preety_10022/Day2/Guess_Word_Game.py | 54df69da926c00a645e99a803a03b03a155f693c | [] | no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | word_input="Apple"
word_length=len(word_input)
word_input=word_input.lower()
letter_guess = ''
count = 0
limit = 10
livesleft=limit
correct_words=0
while livesleft >= 1:
letter_guess = input('Guess a letter: ')
letter_guess=letter_guess.lower()
if letter_guess in word_input:
print(' It is correct')
count += 1
correct_words +=1
if letter_guess not in word_input:
livesleft -=1
if livesleft==0:
print("Sorry .Left with No Chances!!!")
else:
print('Wrong Answer!Left with {}'.format(livesleft))
count += 1
if correct_words==word_length:
print("You have guessed all the letters correctly")
break
| [
"jsatapathy007@gmail.com"
] | jsatapathy007@gmail.com |
d0570e0c7100f66c352d8a6765156f68a49587d1 | ffc4f38fcb8fd341132152fad008d10ff5b3f4e7 | /menu-duplicatefirst.py | 467265b5b8cd581f940580f34412a8b3a9af1e87 | [] | no_license | imclab/AddLocator | 840a88371bf11b4f44818bd88e803f4bdf3c1b13 | 67b6f47a8eade3601a233dccfdeee2d6918ee20d | refs/heads/master | 2021-01-21T02:20:18.746329 | 2013-08-13T20:36:29 | 2013-08-13T20:36:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from maya.cmds import *
import maya.mel as mel
#1. make an array of all selected objects
target = ls(sl=True)
#2. if only one selection, just make a new duplicate at the same coordinates...
if(len(target)==1):
#call through mel because python has no rc option!
mel.eval("duplicate -un -ic -rc")
else:
try:
#3. check if the first selection is skinned.
select(target[0])
skinCluster(q=True)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print "Select the root joint for this to work properly."
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
except:
#4. ...otherwise, for each selected object...
for i in range(1,len(target)):
#5. ...get current selection's position and copy keyframes
select(target[i])
pos = xform(target[i], q=True, t=True, ws=True)
try:
copyKey()
except:
print "Couldn't copy keys."
#6. duplicate the first selection
select(target[0])
#call through mel because python has no rc option!
mel.eval("duplicate -un -ic -rc")
#7. move first selection to position and paste keyframes
move(pos[0],pos[1],pos[2])
try:
pasteKey()
except:
print "Couldn't paste keys."
#8. delete selection
delete(target[i])
| [
"nick@fox-gieg.com"
] | nick@fox-gieg.com |
3943ff161decd9e1262e58fabc959079a0dbd52b | 07aa9b5a07df2a80b7d899da1da63c84b1060fec | /src/iegen/codegen/visitor/_visitor.py | 573f2e54a91763a9bcdc2516091ce08d8217736e | [] | no_license | lamielle/iegen | f26da812a01557daca086e0a1c76a62af8fe7cd4 | 0f48edad8d14ae18c907d705751552cf6eb53c8e | refs/heads/master | 2016-09-05T12:48:23.698779 | 2010-12-14T19:17:13 | 2010-12-14T19:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | # _visitor.py
#
# Visitor class for traversing an AST of a program
# Alan LaMielle 10/20/2008
from iegen import IEGenObject
#---------- Depth First Visitor ----------
class DFVisitor(IEGenObject):
def __init__(self):
#---------- State members ----------
#Will be True if we are within a function, False otherwise
self.in_function=False
#-----------------------------------
#---------- Default In/Out Methods ----------
#Do nothing by default
def defaultIn(self,node): pass
def defaultOut(self,node): pass
def defaultBetween(self,node): pass
#--------------------------------------------
#---------- In/Out/Between Methods ----------
def inProgram(self,node):
self.defaultIn(node)
def outProgram(self,node):
self.defaultOut(node)
def betweenFunctions(self,node):
self.defaultBetween(node)
def inFunction(self,node):
self.defaultIn(node)
def outFunction(self,node):
self.defaultOut(node)
def betweenParamsStatements(self,node):
self.defaultBetween(node)
def inParameter(self,node):
self.defaultIn(node)
def outParameter(self,node):
self.defaultOut(node)
def betweenParameters(self,node):
self.defaultBetween(node)
def inStatement(self,node):
self.defaultIn(node)
def outStatement(self,node):
self.defaultOut(node)
def inVarDecl(self,node):
self.defaultIn(node)
def outVarDecl(self,node):
self.defaultOut(node)
def inComment(self,node):
self.defaultIn(node)
def outComment(self,node):
self.defaultOut(node)
#------------------------------------
#---------- Visit methods ----------
def visit(self,node):
node.apply_visitor(self)
return self
def visitProgram(self,node):
from iegen.util import iter_islast
self.inProgram(node)
for statement in node.preamble:
statement.apply_visitor(self)
for function,is_last in iter_islast(node.functions):
function.apply_visitor(self)
if not is_last:
self.betweenFunctions(node)
self.outProgram(node)
def visitFunction(self,node):
from iegen.util import iter_islast
self.in_function=True
self.inFunction(node)
for param,is_last in iter_islast(node.params):
param.apply_visitor(self)
if not is_last:
self.betweenParameters(param)
self.betweenParamsStatements(node)
for statement in node.body:
statement.apply_visitor(self)
self.outFunction(node)
self.in_function=False
def visitParameter(self,node):
self.inParameter(node)
self.outParameter(node)
def visitStatement(self,node):
self.inStatement(node)
self.outStatement(node)
def visitVarDecl(self,node):
self.inVarDecl(node)
self.outVarDecl(node)
def visitComment(self,node):
self.inComment(node)
self.outComment(node)
#-----------------------------------
#-----------------------------------------
| [
"lamielle@cs.colostate.edu"
] | lamielle@cs.colostate.edu |
1270547d683a15d46dcba368745d6f9d8a4a63fd | c646ad2dfab80f7183076dde82a82e6e1a6222d2 | /athenatools/migrations/0021_auto_20190310_1802.py | 80ac23bf864276666e7f2ae06dfa0036f6cc49eb | [
"MIT"
] | permissive | taojy123/AthenaTools | b7f5a799dca60237fb69f312f5a913964ae00097 | 612b113c5c9aeb0e6612242540fa05b7f0ac02c5 | refs/heads/master | 2023-07-21T06:32:15.638271 | 2023-07-19T09:42:01 | 2023-07-19T09:42:01 | 141,523,525 | 9 | 2 | MIT | 2023-06-30T22:19:13 | 2018-07-19T04:14:20 | Python | UTF-8 | Python | false | false | 2,397 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-03-10 10:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('athenatools', '0020_auto_20190310_1740'),
]
operations = [
migrations.AlterField(
model_name='product',
name='default_check_freeze',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe5\x86\xbb\xe5\x93\x81\xe6\xb8\xa9\xe5\xba\xa6\xe2\x89\xa4-12\xe2\x84\x83\xe4\xb8\x94\xe6\x97\xa0\xe8\xbd\xaf\xe5\x8c\x96'),
),
migrations.AlterField(
model_name='product',
name='default_check_label',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe6\xa0\x87\xe7\xad\xbe\xe6\xad\xa3\xe5\xb8\xb8'),
),
migrations.AlterField(
model_name='product',
name='default_check_odorless',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe6\x97\xa0\xe5\xbc\x82\xe5\x91\xb3'),
),
migrations.AlterField(
model_name='product',
name='default_check_package',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe5\x8c\x85\xe8\xa3\x85\xe5\xae\x8c\xe5\xa5\xbd'),
),
migrations.AlterField(
model_name='purchase',
name='check_freeze',
field=models.BooleanField(default=False, verbose_name=b'\xe5\x86\xbb\xe5\x93\x81\xe6\xb8\xa9\xe5\xba\xa6\xe2\x89\xa4-12\xe2\x84\x83\xe4\xb8\x94\xe6\x97\xa0\xe8\xbd\xaf\xe5\x8c\x96'),
),
migrations.AlterField(
model_name='purchase',
name='check_label',
field=models.BooleanField(default=False, verbose_name=b'\xe6\xa0\x87\xe7\xad\xbe\xe6\xad\xa3\xe5\xb8\xb8'),
),
migrations.AlterField(
model_name='purchase',
name='check_odorless',
field=models.BooleanField(default=False, verbose_name=b'\xe6\x97\xa0\xe5\xbc\x82\xe5\x91\xb3'),
),
migrations.AlterField(
model_name='purchase',
name='check_package',
field=models.BooleanField(default=False, verbose_name=b'\xe5\x8c\x85\xe8\xa3\x85\xe5\xae\x8c\xe5\xa5\xbd'),
),
]
| [
"taojy123@163.com"
] | taojy123@163.com |
ce991260a7e6c3aa07a348aed0fc5b6a06f621dd | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/T/tlevine/do_floats_break_the_datatable.py | ca732952aa668563159bca0a2b24b53642ebc4ac | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from scraperwiki.sqlite import save
save([],{"chainsaw":float(334.00023)})from scraperwiki.sqlite import save
save([],{"chainsaw":float(334.00023)}) | [
"pallih@kaninka.net"
] | pallih@kaninka.net |
0e1f8fb4c87029d81455a43272cd900f200b5f9e | 51575eeda79a6e12c8839046721168e5cc5b6774 | /gns/inference/optimization/tune.py | b369718420e2fe4eae814ede4bb9deea5eb6b764 | [] | no_license | rfeinman/GNS-Modeling | 59ad26efea4045c7dae98e98263d1193d53052b8 | 2c6b3400bfbb30f8f117042722fbcca2a8e9cb98 | refs/heads/master | 2023-06-08T21:22:27.914054 | 2021-07-08T14:17:56 | 2021-07-08T14:17:56 | 274,778,209 | 22 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | import numpy as np
import torch
def get_param_grid(pblur, peps, nbins_blur, nbins_eps):
blur_grid = np.linspace(-50, pblur, nbins_blur)
eps_grid = np.linspace(-2, 1, nbins_eps-1)
eps_grid = np.append(eps_grid, peps)
param_grid = np.meshgrid(blur_grid, eps_grid)
param_grid = np.stack(param_grid, axis=-1).reshape(-1,2)
return param_grid
@torch.no_grad()
def render_tuning_multi(parse_list, img, tune_fn, nbins_blur=20, nbins_eps=40):
K = len(parse_list)
drawing_list = [parse.drawing for parse in parse_list]
blur_params = [parse.blur_base.item() for parse in parse_list]
eps_params = [parse.epsilon_base.item() for parse in parse_list]
param_grids = [get_param_grid(blur_params[k], eps_params[k], nbins_blur, nbins_eps)
for k in range(K)]
losses = torch.zeros(nbins_blur*nbins_eps, K)
for i, param_vals in enumerate(zip(*param_grids)):
for parse, (pblur, peps) in zip(parse_list, param_vals):
parse.blur_base.data = torch.tensor(pblur, device=parse.blur_base.device)
parse.epsilon_base.data = torch.tensor(peps, device=parse.epsilon_base.device)
losses[i] = tune_fn(parse_list, drawing_list, img)
best_losses, best_idx = torch.min(losses, dim=0)
for parse, ix, grid in zip(parse_list, best_idx, param_grids):
pblur, peps = grid[ix]
parse.blur_base.data = torch.tensor(pblur, device=parse.blur_base.device)
parse.epsilon_base.data = torch.tensor(peps, device=parse.epsilon_base.device)
best_states = [parse.state for parse in parse_list]
return best_losses, best_states | [
"rfeinman16@gmail.com"
] | rfeinman16@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.