blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbaa6d49368db0becbe90cabc3be773834120c82 | ad6eb2236acdf525c10af6c1cf62e877039301c2 | /lfs_order_numbers/models.py | 0ec1751dcead67933630dd17b9a6b1905885d42a | [] | no_license | diefenbach/lfs-order-numbers | 1dad836eded78d830cd6af79d1ce3fa2b9357640 | f9c3342dc7ebedfa286ac927ba84b433c2cbbc80 | refs/heads/master | 2021-05-25T11:14:21.547104 | 2017-02-23T10:12:19 | 2017-02-23T10:12:19 | 4,285,159 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from lfs.plugins import OrderNumberGenerator as Base
class OrderNumberGenerator(Base):
"""
Generates order numbers and saves the last one.
**Attributes:**
last
The last stored/returned order number.
format
The format of the integer part of the order number.
"""
last = models.IntegerField(_(u"Last Order Number"), default=0)
format = models.CharField(_(u"Format"), blank=True, max_length=20)
def get_next(self, formatted=True):
"""Returns the next order number.
**Parameters:**
formatted
If True the number will be returned within the stored format.
"""
self.last += 1
self.save()
if formatted and self.format:
return self.format % self.last
else:
return self.last
| [
"kai.diefenbach@iqpp.de"
] | kai.diefenbach@iqpp.de |
d2921fe8f0ce8ec4fb95b0e4aae8a9b2d90db54d | 19380415ccdcb0dac20f7bd67fcc8a0f631a3b90 | /models/union-find.py | c1b9a401d367ca82e8e843172825c2ccacca6e0c | [
"MIT"
] | permissive | italo-batista/problems-solving | c06c811364db7439d842db76e743dd7a1a7c8365 | f83ad34f0abebd52925c4020635556f20743ba06 | refs/heads/master | 2021-10-28T07:01:21.643218 | 2019-04-22T15:27:19 | 2019-04-22T15:27:19 | 76,066,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # n : numero de nos
# q : numero de operacoes
n, q = map(int, raw_input().split())
parents = range(n+1)
sizes = [1] * (n+1)
def get_parent(x):
if x == parents[x]:
return parents[x]
else:
parents[x] = get_parent(parents[x])
return parents[x]
def same_set(x, y):
return get_parent(x) == get_parent(y)
def connect(x, y):
if not same_set(x, y):
parent_x = get_parent(x)
parent_y = get_parent(y)
if sizes[parent_x] > sizes[parent_y]:
parents[parent_y] = parent_x
sizes[parent_x] += sizes[parent_y]
else:
parents[parent_x] = parent_y
sizes[parent_y] += sizes[parent_x]
def get_size(x):
return sizes[get_parent(x)]
| [
"italo.batista@ccc.ufcg.edu.br"
] | italo.batista@ccc.ufcg.edu.br |
d13ae0b389a9aad24520238844270163decc9f47 | c2e06926e58e49e2659c77ec454716ccb42bd729 | /Test3/hawc2/2_postPro.py | 970866b6948734ae8d86a446cdf8f6be3f9fcab6 | [] | no_license | ptrbortolotti/BeamDyn_CpLambda | 72bfd6c831ebc5b86fdbc1f3dd10b3c05e693141 | e2f9a70044f7c0f1e720d828949faf1d392872c6 | refs/heads/main | 2023-08-28T22:34:21.546502 | 2021-09-27T17:34:01 | 2021-09-27T17:34:01 | 331,133,603 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,381 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Local
import weio
from welib.fast.fastlib import find_matching_pattern, averageDF
# --- Parameters
IPostPro=[0,1]
simDir='cases'
BD_mainfile = '../load_conv/f_1e5/Box_Beam_SCALED_1_BeamDyn.dat'
load = 500000;
vf = np.array([0.2,0.4,0.6,0.8,1.0,2.0,4.0])*load;
vf.sort()
# --- Derived params
bdLine = weio.read(BD_mainfile).toDataFrame()
kp_x = bdLine['kp_xr_[m]'].values
kp_y = bdLine['kp_yr_[m]'].values
# Hawc2 = BeamDyn
x = -kp_y
y = kp_x
z = bdLine['kp_zr_[m]'].values
nSpan=len(z)
if 0 in IPostPro:
# --- Loop on outputs and extract deflections
for isim, load in enumerate(vf):
outfilename = os.path.join(simDir,'f_{:5.1e}.dat'.format(load))
print(outfilename)
df = weio.read(outfilename).toDataFrame()
dfAvg = averageDF(df,avgMethod='constantwindow',avgParam=2.0)
colsX, sIdx = find_matching_pattern(df.columns, 'N(\d+)xb')
colsY, sIdx = find_matching_pattern(df.columns, 'N(\d+)yb')
colsZ, sIdx = find_matching_pattern(df.columns, 'N(\d+)zb')
Icol = [int(s) for s in sIdx]
if len(colsX)!=nSpan:
raise Exception('Number of columns dont match. Make this script more general or adapt')
u=np.zeros((3,nSpan))
for i,(cx,cy,cz,id) in enumerate(zip(colsX,colsY,colsZ,Icol)):
if i+1!=id:
raise Exception('Index mismatch, columns are not sorted')
u[:,i]=[dfAvg[cx]-x[i] ,dfAvg[cy]-y[i] ,dfAvg[cz]-z[i]]
fig,axes = plt.subplots(3, 1, sharex=True, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
for i,(ax,sc) in enumerate(zip(axes.ravel(),['x','y','z'])):
ax.plot(z, u[i,:]*1000) #, label=r'$u_{}$'.format(sc))
ax.set_ylabel(r'$u_{}$ [mm]'.format(sc))
ax.tick_params(direction='in')
ax.set_xlabel('Span [m]')
#plt.show()
fig.savefig(outfilename.replace('.dat','.png'))
cols=['r_[m]','u_x_[m]','u_y_[m]','u_z_[m]']
data =np.column_stack((z,u.T))
dfOut = pd.DataFrame(columns=cols, data=data)
dfOut.to_csv(outfilename.replace('.dat','.csv'), index=False, sep=',')
if 1 in IPostPro:
# --- Loop on csv and extract tip deflections
utip=np.zeros((3,len(vf)))
for isim, load in enumerate(vf):
outfilename = os.path.join(simDir,'f_{:5.1e}.csv'.format(load))
df=weio.read(outfilename).toDataFrame()
utip[:,isim] = [df['u_x_[m]'].values[-1], df['u_y_[m]'].values[-1], df['u_z_[m]'].values[-1]]
cols=['f_[N]','u_x_[m]','u_y_[m]','u_z_[m]']
data =np.column_stack((vf,utip.T))
dfOut = pd.DataFrame(columns=cols, data=data)
dfOut.to_csv('tiploads3.csv', index=False, sep='\t')
fig,axes = plt.subplots(3, 1, sharex=True, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
for i,(ax,sc) in enumerate(zip(axes.ravel(),['x','y','z'])):
ax.plot(np.arange(len(vf))+1, utip[i,:]*1000) #, label=r'$u_{}$'.format(sc))
ax.set_ylabel(r'$u_{}$ [mm]'.format(sc))
ax.tick_params(direction='in')
ax.set_xlabel('Load i')
fig.savefig('tiploads3.png')
if __name__ == '__main__':
pass
| [
"emmanuel.branlard@nrel.gov"
] | emmanuel.branlard@nrel.gov |
4978a0b12d379e159f7293cb2652dda29e4c98b6 | 3b76f9f2317e1eb2cd9553cab0b4dd01ce216ad5 | /using nested list find the second lower score using python.py | bbbf48ebc25de5392d90b5cabd6ffa32747cf7f2 | [] | no_license | KaziMotiour/Hackerrank-problem-solve-with-python | f12ea978c5274a90745545d3d2c9fb6a4f9b5230 | 798ce2a6c2b63ea24dc28a923bfee4b528fb2b5e | refs/heads/master | 2022-05-26T19:45:44.808451 | 2020-05-05T09:44:40 | 2020-05-05T09:44:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | arr=[]
n=int(input())
list=[[input(),float(input())] for _ in range(n)]
x=10000.0
a=0
i=0
j=1
while(n!=0):
if(list[i][j]<x ):
x=list[i][j]
i+=1
n-=1
a+=1
else:
i+=1
n-=1
a+=1
i=0
j=1
y=1000.0
p=0
for i in range(len(list)):
if(list[i][j]>x):
if(list[i][j]<y):
y=list[i][j]
z = list[i][j]
i+=1
a-=1
n+=1
else:
pass
else:
i+=1
a-=1
n+=1
i=0
j=1
for i in range(len(list)):
if list[i][j]==y:
arr.append(list[i][j-1])
i+=1
p+=1
else:
i+=1
a+=1
arr.sort()
for i in range(len(arr)):
print(arr[i])
| [
"kmatiour30@gmail.com"
] | kmatiour30@gmail.com |
b29bf0c42c3bd7b3ef74fd50f2d5c415917e4666 | 6dbf099660ee82b72fb2526a3dc242d99c5fb8c8 | /tests/standalone/PmwUsing.py | b763dd902383fb55b22258463d256f19f0b49337 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Hellebore/Nuitka | 3544af691bc352769858ec1d44b6e9de46087bcf | 252d8e5d24521f8fff38142aa66c6b9063151f57 | refs/heads/develop | 2021-01-06T15:33:49.111250 | 2020-02-18T14:24:49 | 2020-02-18T14:24:49 | 241,380,473 | 0 | 0 | Apache-2.0 | 2020-07-11T17:52:04 | 2020-02-18T14:21:01 | Python | UTF-8 | Python | false | false | 1,038 | py | # Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import Pmw
# nuitka-skip-unless-expression: __import__("Tkinter" if sys.version_info[0] < 3 else "tkinter")
# nuitka-skip-unless-imports: Pmw
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
2a8156a8a96ce1b6fba8ac57fb61505dc07461e3 | c90c88f662ca3f6294ae7d5b7adb04831a2c01d9 | /WalletCenter/alembic/env.py | df09303374a28e8df4b7f0812639d3baf82e3c2b | [] | no_license | BigJeffWang/blockchain-py | a41512fbd52b182306ea00607e6b93871d5aa04d | 9d2abf10e9ff5a4ed7203564026919c1e2bd088a | refs/heads/master | 2021-07-20T22:52:13.496892 | 2020-08-05T09:54:18 | 2020-08-05T09:54:18 | 203,220,932 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from __future__ import with_statement
import sys
from pathlib import Path
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
sys.path.append(str(Path(__file__).resolve().parent.parent))
from tools.mysql_tool import MysqlTools
from models import base_model
from models import __alembic__
__alembic__.call_dynamic()
config = context.config
fileConfig(config.config_file_name)
connect_string = MysqlTools().get_connect_string()
config.set_main_option('sqlalchemy.url', connect_string)
target_metadata = base_model.BaseModel.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| [
"bigjeffwang@163.com"
] | bigjeffwang@163.com |
229d34fcec99565b3f6d19af0cbe8c1e7108dde1 | ecb22ddf7a927d320d2447feddf970c6ed81adbe | /src/plotAnswerLengthDistribution.py | f6a1c47810f11104d60e0195c7f698393074b053 | [] | no_license | shiannn/ADL2020-HW2-BertForQA | b1733339703dffb2fbdda481a5f090c26182c4a4 | 9e4f38bdeaaf61bd2c08ddd163271a699f21f16e | refs/heads/master | 2022-12-17T00:02:48.287380 | 2020-09-27T08:26:47 | 2020-09-27T08:26:47 | 257,008,637 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from transformers import BertTokenizer
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage: python3 plotAnswerLengthDistribution.py dataName saveName')
exit(0)
dataName = sys.argv[1]
saveName = sys.argv[2]
ansLengthFile = Path('ansLength.npy')
print(ansLengthFile.exists())
if(not ansLengthFile.exists()):
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
distributionList = []
with open(dataName, 'r') as f:
A = json.load(f)
#view all answer
for data in A['data']:
#print(data['paragraphs'])
for paragraph in data['paragraphs']:
for qas in paragraph['qas']:
for ans in qas['answers']:
#temp = ('#' in ans['text'])
#if temp == True:
# print('a')
print(ans)
ansTokens = tokenizer.tokenize(ans['text'])
print(ansTokens)
distributionList.append(len(ansTokens))
np.save('ansLength', np.array(distributionList))
ansLength = np.load(ansLengthFile)
print(len(ansLength))
bins = np.arange(0,120+5, step=5)
print(bins)
plt.hist(ansLength, bins=bins, edgecolor='black', cumulative=True, density=True)
plt.xlabel('Length')
plt.ylabel('Count (%)')
plt.title('Cumulative Answer Length')
plt.savefig(saveName/Path('length.png')) | [
"b05502087@ntu.edu.tw"
] | b05502087@ntu.edu.tw |
cbfa5d9b795b084ed6548df8174d6450302ffb67 | d4bb21370ab020aa9d1dad2d812cdd0f25722ed4 | /test/support/git_fixture.py | c1102e4132706de3b9ce846406dfe120dc7f1820 | [
"MIT"
] | permissive | richo/groundstation | e6b74fb0a428b26408feae06ce16ad98997f2709 | 7ed48dd355051ee6b71164fc801e3893c09d11db | refs/heads/master | 2023-07-21T16:20:17.244184 | 2018-12-09T22:39:41 | 2018-12-09T22:39:41 | 7,293,510 | 27 | 5 | MIT | 2023-07-06T21:04:24 | 2012-12-23T09:09:49 | Python | UTF-8 | Python | false | false | 372 | py | def fake_tree():
return """100644 blob fadc864ddfed4a93fabf6d23939db4d542eb4363
.gitignore100644 blob 48e87b133a2594371acd57c49339dc8c04d55146 .gitmodules
100644 blob 725455bca81c809ad55aac363c633988f9207620 .jshintignore
100644 blob 40928639c7903f83f26e1aed78401ffde587e437 .jshintrc
100644 blob f3a9c9a807be340a7b929557aea3088540c77a6c .rbenv-version"""
| [
"richo@psych0tik.net"
] | richo@psych0tik.net |
5b7dfee3f6b4c14d728cbcd104dab4bee21ee794 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2761/60747/246217.py | 9d03172af498a3a1277332b3a310877d13fc4478 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | n=int(input())
result=[]
for i in range(n):
sum=0
num=int(input())
for i in range(num+1):
sum=sum+i*i
result.append(sum)
for f in range(n):
print(result[f]) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
510f696ac8f8d51de2808c5a04deb4bf2d448ff3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_54/457.py | eb264093bd6ce407dfba0a75b15923f580c80baa | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from sys import stdin
def gcd(A, B):
a = A
b = B
while b != 0:
r = a % b
a = b
b = r
return a
if __name__ == '__main__':
C = int(stdin.readline())
for c in xrange(1, C + 1):
a = stdin.readline().split()
N = int(a[0])
t = map(long, a[1:])
first = t[0]
for i in xrange(N - 1):
t[i] = abs(t[i] - t[i + 1])
t[-1] = abs(t[-1] - first)
T = t[-1]
for i in xrange(N - 1):
T = gcd(T, t[i])
if first % T == 0:
y = 0
else:
y = T - first % T
print "Case #%d: %d" % (c, y)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f677802888481b96a2c6b0d4537ffe39daea4b66 | a14ec6e367e6a471bfc74c066fb958ef585bc269 | /2019/08/common.py | cb39afff51d0c807e5697dc5794d241a5969ab81 | [] | no_license | jimhendy/AoC | 90641814ed431f46a8500ff0f022c6c957567563 | a1727f88bc2e6f739d65902dce188377966b3fb4 | refs/heads/master | 2023-09-02T14:48:39.860352 | 2023-08-28T08:09:19 | 2023-08-28T08:09:19 | 225,152,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import numpy as np
def in_to_array(inputs, n_rows=6, n_cols=25):
data = np.array(list(inputs))
return data.reshape(-1, n_rows, n_cols).astype(int)
| [
"jimhendy88@gmail.com"
] | jimhendy88@gmail.com |
8e022b65157dfae25c3997ca0bfcf8bfd5b3af03 | 57ea6657b4deb620c4e29b606a5ec259d22fadcd | /Chatbot_Web/impl/view/kg_overview.py | 44d5dfc7bee3607865aecc31e6b77c9d710eaab7 | [
"Apache-2.0"
] | permissive | orchestor/Chatbot_CN | 021d05849257d66e8e2a65d4ead5a777e09d7d3d | 43922d7f73946d00faad3f27d86188ec18022965 | refs/heads/master | 2020-05-09T12:48:48.124981 | 2019-04-09T13:54:24 | 2019-04-09T13:54:24 | 181,124,145 | 1 | 0 | Apache-2.0 | 2019-04-13T05:11:09 | 2019-04-13T05:11:06 | null | UTF-8 | Python | false | false | 3,273 | py | #-*- coding:utf-8 _*-
"""
@author:charlesXu
@file: kg_overview.py
@desc: 知识图谱概览页面
@time: 2019/01/28
"""
import sys
from django.shortcuts import render
from pinyin import pinyin
from Chatbot_KG.toolkit.pre_load import tree
def show_overview(request):
ctx = {}
if 'node' in request.GET:
node = request.GET['node']
fatherList = tree.get_father(node)
branchList = tree.get_branch(node)
leafList = tree.get_leaf(node)
ctx['node'] = "分类专题:[" + node + "]"
rownum = 4 # 一行的词条数量
leaf = ""
alpha_table = {}
for alpha in range(ord('A'), ord('Z') + 1):
alpha_table[chr(alpha)] = []
for p in leafList:
py = pinyin.get_initial(p)
alpha = ord('A')
for s in py:
t = ord(s)
if t >= ord('a') and t <= ord('z'):
t = t + ord('A') - ord('a')
if t >= ord('A') and t <= ord('Z'):
alpha = t
break
alpha_table[chr(alpha)].append(p)
for kk in range(ord('A'), ord('Z') + 1):
k = chr(kk)
v = alpha_table[k]
if len(v) == 0:
continue
add_num = rownum - len(v) % rownum # 填充的数量
add_num %= rownum
for i in range(add_num): # 补充上多余的空位
v.append('')
leaf += '<div><span class="label label-warning"> ' + k + ' </span></div><br/>'
for i in range(len(v)):
if i % rownum == 0:
leaf += "<div class='row'>"
leaf += '<div class="col-md-3">'
leaf += '<p><a href="detail?title=' + v[i] + '">'
if len(v[i]) > 10:
leaf += v[i][:10] + '...'
else:
leaf += v[i]
leaf += '</a></p>'
leaf += '</div>'
if i % rownum == rownum - 1:
leaf += "</div>"
leaf += '<br/>'
ctx['leaf'] = leaf
# 父节点列表
father = '<ul class="nav nav-pills nav-stacked">'
for p in fatherList:
father += '<li role="presentation"> <a href="overview?node='
father += p + '">'
father += '<i class="fa fa-hand-o-right" aria-hidden="true"></i> ' + p + '</a></li>'
father += '</ul>'
if len(fatherList) == 0:
father = '<p>已是最高级分类</p>'
ctx['father'] = father
# 非叶子节点列表
branch = '<ul class="nav nav-pills nav-stacked">'
for p in branchList:
branch += '<li role="presentation"> <a href="overview?node='
branch += p + '">'
branch += '<i class="fa fa-hand-o-right" aria-hidden="true"></i> ' + p + '</a></li>'
branch += '</ul>'
if len(branchList) == 0:
branch = '<p>已是最低级分类</p>'
ctx['branch'] = branch
# 分类树构建
level_tree = tree.create_UI(node)
ctx['level_tree'] = level_tree
return render(request, "knowledge_graph/kg_overview.html", ctx)
| [
"charlesxu86@163.com"
] | charlesxu86@163.com |
1f799e8e7be3bb458b963d6e3341620559bc09f8 | 0680311baa2a401f93bf4124fb6bbc229950848c | /model/one_stage_detector.py | a62d198fd9f49d880deaa56845b6398bc0a618c0 | [] | no_license | TonojiKiobya/m2det_pytorch | 8b56342862ef5dbc74dd905957cb41ab30273aff | 20a00c4ece288148e6112daa822451c6904560c6 | refs/heads/master | 2023-06-08T14:41:29.110670 | 2019-03-26T10:40:46 | 2019-03-26T10:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,483 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 16:05:16 2019
@author: ubuntu
"""
import logging
import torch.nn as nn
import numpy as np
import pycocotools.mask as maskUtils
import mmcv
from dataset.utils import tensor2imgs
from dataset.class_names import get_classes
from utils.registry_build import registered, build_module
@registered.register_module
class OneStageDetector(nn.Module):
"""one stage单级检测器: 整合了base/singlestagedetector在一起
"""
def __init__(self, cfg):
super(OneStageDetector, self).__init__()
# self.backbone = SSDVGG(**cfg.model.backbone)
# self.bbox_head = SSDHead(**cfg.model.bbox_head)
self.cfg = cfg
self.backbone = build_module(cfg.model.backbone, registered)
self.bbox_head = build_module(cfg.model.bbox_head, registered)
if cfg.model.neck is not None:
self.neck = build_module(cfg.model.neck, registered)
self.train_cfg = cfg.train_cfg
self.test_cfg = cfg.test_cfg
self.init_weights(pretrained=cfg.model.pretrained)
def init_weights(self, pretrained=None):
if pretrained is not None:
logger = logging.getLogger()
logger.info('load model from: {}'.format(pretrained))
self.backbone.init_weights(pretrained=pretrained)
self.bbox_head.init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.cfg.model.neck is not None:
x = self.neck(x)
return x
def forward_train(self, img, img_metas, gt_bboxes, gt_labels):
x = self.extract_feat(img)
outs = self.bbox_head(x)
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(*loss_inputs)
return losses
def forward_test(self, imgs, img_metas, **kwargs):
"""用于测试时的前向计算:如果是单张图则跳转到simple_test(),
如果是多张图则跳转到aug_test(),但ssd当前不支持多图测试(aug_test未实施)
即在验证时每个gpu只能放1张图片
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(imgs), len(img_metas)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = imgs[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
def forward(self, img, img_meta, return_loss=True, **kwargs):
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def simple_test(self, img, img_meta, rescale=False):
"""用于测试时单图前向计算:
输出
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [
self.bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results[0]
def aug_test(self, imgs, img_metas, rescale=False):
"""用于测试时多图前向计算: 当前ssd不支持多图测试"""
raise NotImplementedError
def show_result(self, data, result, img_norm_cfg,
dataset='coco',
score_thr=0.3):
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
img_tensor = data['img'][0]
img_metas = data['img_meta'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_norm_cfg)
assert len(imgs) == len(img_metas)
if isinstance(dataset, str):
class_names = get_classes(dataset)
elif isinstance(dataset, (list, tuple)) or dataset is None:
class_names = dataset
else:
raise TypeError(
'dataset must be a valid dataset name or a sequence'
' of class names, not {}'.format(type(dataset)))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr)
def bbox2result(self, bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [
np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)
]
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes - 1)] | [
"ximitiejiang@163.com"
] | ximitiejiang@163.com |
8f358c1bc133197673c67695d220540b3e6a5394 | c8371b410f19dc87059bbe0a28e983c3cfe0f4f8 | /src/etheroll/roll.py | 1e52d4a97491b5fb857622c89d755a84f9517b56 | [
"MIT"
] | permissive | homdx/EtherollApp | c70e37cff4fbbde8c605a8ca87776535185a7167 | 4953ce0f10ac58d43517fbc3a18bc5ed43297858 | refs/heads/master | 2020-03-28T19:05:10.591229 | 2018-09-30T21:25:32 | 2018-09-30T21:25:32 | 148,942,827 | 0 | 0 | MIT | 2018-09-15T21:52:51 | 2018-09-15T21:52:51 | null | UTF-8 | Python | false | false | 5,457 | py | from etherscan.client import ConnectionRefused
from kivy.app import App
from kivy.clock import Clock, mainthread
from kivy.properties import NumericProperty, StringProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import Screen
from etheroll.utils import Dialog, load_kv_from_py, run_in_thread
from pyetheroll.constants import ROUND_DIGITS
load_kv_from_py(__file__)
class RollUnderRecap(GridLayout):
roll_under_property = NumericProperty()
profit_property = NumericProperty()
wager_property = NumericProperty()
class BetSize(BoxLayout):
def __init__(self, **kwargs):
super(BetSize, self).__init__(**kwargs)
Clock.schedule_once(self._after_init)
def _after_init(self, dt):
"""
Binds events.
"""
slider = self.ids.bet_size_slider_id
inpt = self.ids.bet_size_input_id
cast_to = float
# shows less digits than the constant default to keep the input tiny
round_digits = 1
BetSize.bind_slider_input(slider, inpt, cast_to, round_digits)
@staticmethod
def bind_slider_input(
slider, inpt, cast_to=float, round_digits=ROUND_DIGITS):
"""
Binds slider <-> input both ways.
"""
# slider -> input
slider.bind(
value=lambda instance, value:
setattr(inpt, 'text', "{0:.{1}f}".format(
cast_to(value), round_digits)))
# input -> slider
inpt.bind(
on_text_validate=lambda instance:
setattr(slider, 'value', cast_to(inpt.text)))
# also when unfocused
inpt.bind(
focus=lambda instance, focused:
inpt.dispatch('on_text_validate')
if not focused else False)
# synchronises values slider <-> input once
inpt.dispatch('on_text_validate')
@property
def value(self):
"""
Returns normalized bet size value.
"""
try:
return round(
float(self.ids.bet_size_input_id.text), ROUND_DIGITS)
except ValueError:
return 0
class ChanceOfWinning(BoxLayout):
def __init__(self, **kwargs):
super(ChanceOfWinning, self).__init__(**kwargs)
Clock.schedule_once(self._after_init)
def _after_init(self, dt):
"""
Binds events.
"""
slider = self.ids.chances_slider_id
inpt = self.ids.chances_input_id
cast_to = self.cast_to
round_digits = 0
BetSize.bind_slider_input(slider, inpt, cast_to, round_digits)
@staticmethod
def cast_to(value):
return int(float(value))
@property
def value(self):
"""
Returns normalized chances value.
"""
try:
# `input_filter: 'int'` only verifies that we have a number
# but doesn't convert to int
chances = float(self.ids.chances_input_id.text)
return int(chances)
except ValueError:
return 0
class RollScreen(Screen):
current_account_string = StringProperty()
balance_property = NumericProperty()
def __init__(self, **kwargs):
super(RollScreen, self).__init__(**kwargs)
Clock.schedule_once(self._after_init)
def _after_init(self, dt):
"""
Binds `Controller.current_account` -> `RollScreen.current_account`.
"""
controller = App.get_running_app().root
controller.bind(current_account=self.on_current_account)
def on_current_account(self, instance, account):
"""
Sets current_account_string.
"""
if account is None:
return
self.current_account_string = '0x' + account.address.hex()
def get_roll_input(self):
"""
Returns bet size and chance of winning user input values.
"""
bet_size = self.ids.bet_size_id
chance_of_winning = self.ids.chance_of_winning_id
return {
"bet_size": bet_size.value,
"chances": chance_of_winning.value,
}
@mainthread
def toggle_widgets(self, enabled):
"""
Enables/disables widgets (useful during roll).
"""
self.disabled = not enabled
@property
def pyetheroll(self):
"""
We want to make sure we go through the `Controller.pyetheroll` property
each time, because it recreates the Etheroll object on chain_id
changes.
"""
controller = App.get_running_app().root
return controller.pyetheroll
@mainthread
def update_balance(self, balance):
"""
Updates the property from main thread.
"""
self.balance_property = balance
@staticmethod
@mainthread
def on_connection_refused():
title = 'No network'
body = 'No network, could not retrieve account balance.'
dialog = Dialog.create_dialog(title, body)
dialog.open()
@run_in_thread
def fetch_update_balance(self):
"""
Retrieves the balance and updates the property.
"""
address = self.current_account_string
if not address:
return
try:
balance = self.pyetheroll.get_balance(address)
except ConnectionRefused:
self.on_connection_refused()
return
self.update_balance(balance)
| [
"andre.miras@gmail.com"
] | andre.miras@gmail.com |
a07012e4a01d74426aafbfa004b80c190341161a | 2d05050d0ada29f7680b4df20c10bb85b0530e45 | /python/tvm/exec/gpu_memory_bandwidth.py | a5f2021f733c2cd1e3ba8e6c82cce9db1dc4c994 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] | permissive | apache/tvm | 87cb617f9a131fa44e1693303aaddf70e7a4c403 | d75083cd97ede706338ab413dbc964009456d01b | refs/heads/main | 2023-09-04T11:24:26.263032 | 2023-09-04T07:26:00 | 2023-09-04T07:26:00 | 70,746,484 | 4,575 | 1,903 | Apache-2.0 | 2023-09-14T19:06:33 | 2016-10-12T22:20:28 | Python | UTF-8 | Python | false | false | 5,788 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A script to measure GPU memory bandwidth"""
import argparse
import itertools
import numpy as np
import tvm
from tvm import te, tir
from tvm.meta_schedule.runner import EvaluatorConfig
from tvm.testing import local_run
def _parse_args() -> argparse.Namespace:
def _parse_list_int(source: str):
return [int(i) for i in source.split(",")]
parser = argparse.ArgumentParser(
prog="GPU memory bandwidth testing",
description="""Example:
python -m tvm.exec.gpu_memory_bandwidth "nvidia/geforce-rtx-3090-ti" \
--dtype "float32"
--bx "8,16,32,64,128,256" \
--tx "32,64,128,256,512,1024" \
--vec "1,2,4"
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"target",
type=str,
help="The target to be benchmarked",
)
parser.add_argument(
"--xo",
type=int,
default=1024,
help="The value of `XO` in [XO, K, XI] => [XO, XI] reduction",
)
parser.add_argument(
"--k",
type=int,
default=64,
help="The value of `K` in [XO, K, XI] => [XO, XI] reduction",
)
parser.add_argument(
"--xi",
type=int,
default=4096,
help="The value of `XI` in [XO, K, XI] -> [XO, XI] reduction",
)
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="The data type to be used in the workload",
)
parser.add_argument(
"--bx",
type=_parse_list_int,
default=[8, 16, 32, 64, 128, 256],
help="The value to be used to split `XO` into [BX, _]",
)
parser.add_argument(
"--tx",
type=_parse_list_int,
default=[32, 64, 128, 256, 512, 1024],
help="Number of threads to be used",
)
parser.add_argument(
"--vec",
type=_parse_list_int,
default=[1, 2, 4],
help="Vector length to be used in vectorized load",
)
return parser.parse_args()
def _workload(
len_xo: int,
len_k: int,
len_xi: int,
dtype: str,
):
# pylint: disable=invalid-name
A = te.placeholder((len_xo, len_k, len_xi), dtype=dtype, name="A")
k = te.reduce_axis((0, len_k), "k")
B = te.compute(
(len_xo, len_xi),
lambda i, j: te.sum(A[i, k, j], axis=k),
name="B",
)
# pylint: enable=invalid-name
return te.create_prim_func([A, B])
def _schedule(
sch: tir.Schedule,
len_bx: int,
len_tx: int,
len_vec: int,
):
# pylint: disable=invalid-name
block = sch.get_block("B")
xo, xi, k = sch.get_loops(block)
bx, xo = sch.split(xo, factors=[len_bx, None])
xi, tx, vec = sch.split(xi, factors=[None, len_tx, len_vec])
sch.reorder(bx, xi, tx, xo, k, vec)
bx = sch.fuse(bx, xi)
sch.bind(bx, "blockIdx.x")
sch.bind(tx, "threadIdx.x")
ldg = sch.cache_read(block, 0, "local")
sch.compute_at(ldg, k, preserve_unit_loops=True)
sch.vectorize(sch.get_loops(ldg)[-1])
sch.decompose_reduction(block, k)
# pylint: enable=invalid-name
def main(): # pylint: disable=too-many-locals
"""Entry point"""
args = _parse_args()
# pylint: disable=invalid-name
target = tvm.target.Target(args.target)
dtype = args.dtype
a = np.random.uniform(-1, 1, (args.xo, args.k, args.xi)).astype(dtype)
b = np.zeros((args.xo, args.xi), dtype=dtype)
num_bytes = a.size * a.itemsize + b.size * b.itemsize
print("###### Bandwidth Test ######")
print(
f"Workload [XO, K, XI] => [XO, XI]. "
f"[{args.xo}, {args.k}, {args.xi}] => [{args.xo}, {args.xi}]"
)
print(f"Input size: {num_bytes / 1048576} MB")
print(f"Target: {target}")
# pylint: enable=invalid-name
best_bandwidth = -1
for len_bx, len_tx, len_vec in itertools.product(
args.bx,
args.tx,
args.vec,
):
func = _workload(
len_xo=args.xo,
len_k=args.k,
len_xi=args.xi,
dtype=dtype,
)
sch = tir.Schedule(func)
_schedule(sch, len_bx, len_tx, len_vec)
_, profile_result = local_run(
tvm.build(sch.mod, target=target),
target.kind.name,
[a, b],
evaluator_config=EvaluatorConfig(
number=10,
repeat=1,
min_repeat_ms=100,
enable_cpu_cache_flush=False,
),
)
bandwidth = num_bytes / profile_result.mean / (1024**3)
bx = len_bx * args.xi // (len_tx * len_vec) # pylint: disable=invalid-name
mbs = num_bytes / 1024 / 1024
print(
f"bandwidth = {bandwidth:.3f} GB/s, bx = {bx}, tx = {len_tx}, "
f"len_vec = {len_vec}, bytes = {mbs} MB"
)
if bandwidth > best_bandwidth:
best_bandwidth = bandwidth
print(f"peak bandwidth: {best_bandwidth:.3f} GB/s")
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | apache.noreply@github.com |
0d9346ea6bfd98a4ea4aa39fdecc95754315d40f | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/anderspeders/sa_tester.py | 72d36cd4c6c8bfde2f227c119e5d3741ea5aa7be | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | import scraperwiki
scraperwiki.sqlite.save_var('data_columns', ['id', 'name', 'adress', 'city', 'postcode', 'country', 'Virksomhedstype'])
ftUrl = 'http://ec.europa.eu/competition/elojade/isef/index.cfm'
import lxml.html
root = lxml.html.fromstring(ftUrl)
for tr in root.cssselect("div[align='left'] tr.tcont"):
tds = tr.cssselect("td")
data = {
'Navn' : tds[0].text_content(),
'Adresse' : int(tds[4].text_content())
}
print data
data=ftUrl
import scraperwiki
scraperwiki.sqlite.save_var('data_columns', ['id', 'name', 'adress', 'city', 'postcode', 'country', 'Virksomhedstype'])
ftUrl = 'http://ec.europa.eu/competition/elojade/isef/index.cfm'
import lxml.html
root = lxml.html.fromstring(ftUrl)
for tr in root.cssselect("div[align='left'] tr.tcont"):
tds = tr.cssselect("td")
data = {
'Navn' : tds[0].text_content(),
'Adresse' : int(tds[4].text_content())
}
print data
data=ftUrl
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
e5a36deb356ed45d8a49e46daa46887d4e9d4c1e | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210714202552.py | 2ac589eab5e3a7e94d03b273ea1e766f243f7093 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,777 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from icecream import ic
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method, p_w_turbofan_max=72, p_w_motorfun_max=10, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.p_w_turbofan_max = p_w_turbofan_max
self.p_w_motorfun_max = p_w_motorfun_max
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = altitude.size
self.hp = np.linspace(0, 1+1/self.n, self.n+1)
self.hp_threshold = self.p_w_motorfun_max / (self.p_w_motorfun_max + self.p_w_turbofan_max)
# method1 = Mattingly_Method, method2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method1(self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self):
p_w = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
for i in range(1, 8):
for j in range(len(self.hp)):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1 = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2 = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1 = problem1.allFuncs[i](problem1)
p_w_2 = problem2.allFuncs[i](problem2)
if p_w_1 > self.p_w_turbofan_max:
p_w_1 = 100000
elif p_w_2 > self.p_w_motorfun_max:
p_w_2 = 100000
p_w[i, j] = p_w_1 + p_w_2
return p_w
def strategy(self):
p_w = Design_Point_Select_Strategy.p_w_compute(self)
#find the min p_w for difference hp for each flight condition:
p_w_min = np.amin(p_w, axis=1)
#find the index of p_w_min which is the hp
hp_p_w_min = np.zeros(8)
for i in range(1, 8):
for j in range(len(self.hp)):
if p_w[i, j] - p_w_min[i] < 0.001:
hp_p_w_min[i] = j * 0.01
hp_p_w_min[0] = self.hp_threshold
#find the max p_w_min for each flight condition which is the design point we need:
design_point = np.array([self.w_s, np.amax(p_w_min)])
return hp_p_w_min, design_point
if __name__ == "__main__":
constrains = np.array([[0, 80, 1, 0.2], [0, 68, 0.988, 0.5], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.8], [3000, 100,
0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.7]])
h = constrains[:, 0]
v = constrains[:, 1]
beta = constrains[:, 2]
problem = Design_Point_Select_Strategy(h, v, beta, method=2)
hp_p_w_min, design_point = problem.strategy()
ic(hp_p_w_min, design_point)
| [
"libao@gatech.edu"
] | libao@gatech.edu |
3c8f27457cd7e7975d7b68a101cbdd624eb043d1 | 73c2716fc72d0a389f14f21a5de73da818b54dc4 | /udemy-recipe-api/app/user/views.py | 75c415e65ee1bdfb16165a1ebbd5bc9b67e3722d | [
"MIT"
] | permissive | washimimizuku/django-tutorials | e13a429aa43cee24d84466d4cf3f22c518b17673 | 4f0e3836778dd3ea5403ef713e2f6777e44eae8d | refs/heads/main | 2023-06-04T05:47:16.863511 | 2021-06-15T13:38:37 | 2021-06-15T13:38:37 | 363,867,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authenticated user"""
return self.request.user
| [
"nuno.barreto@inventsys.ch"
] | nuno.barreto@inventsys.ch |
570549c76e261ef39c724067588e6f90a8bd18af | 57d1580fd540b4819abb67f9db43fdfbba63725f | /lib/var.py | 37390233f5572cf38a87bfd5b95f5894ff38f029 | [] | no_license | glyfish/alpaca | 49edfcb9d80551825dfa4cf071f21aeb95a3502f | 2b5b69bcf50ed081a526742658be503706af94b4 | refs/heads/master | 2023-02-22T00:24:19.293502 | 2022-09-05T17:20:23 | 2022-09-05T17:20:23 | 186,169,438 | 1 | 3 | null | 2023-02-11T00:52:12 | 2019-05-11T18:38:58 | Python | UTF-8 | Python | false | false | 7,328 | py | import numpy
from matplotlib import pyplot
from lib import config
def multivariate_normal_sample(μ, Ω, n):
return numpy.random.multivariate_normal(μ, Ω, n)
def timeseries_plot(samples, tmax, ylabel, title, plot_name):
nplot, nsample = samples.shape
ymin = numpy.amin(samples)
ymax = numpy.amax(samples)
figure, axis = pyplot.subplots(nplot, sharex=True, figsize=(12, 9))
axis[0].set_title(title)
axis[nplot-1].set_xlabel(r"$t$")
time = numpy.linspace(0, tmax-1, tmax)
for i in range(nplot):
stats=f"μ={format(numpy.mean(samples[i]), '2.2f')}\nσ={format(numpy.std(samples[i]), '2.2f')}"
bbox = dict(boxstyle='square,pad=1', facecolor="#FEFCEC", edgecolor="#FEFCEC", alpha=0.75)
axis[i].text(0.05, 0.75, stats, fontsize=15, bbox=bbox, transform=axis[i].transAxes)
axis[i].set_ylabel(ylabel[i])
axis[i].set_ylim([ymin, ymax])
axis[i].set_xlim([0.0, tmax])
axis[i].plot(time, samples[i,:tmax], lw=1.0)
config.save_post_asset(figure, "mean_reversion", plot_name)
def autocorrelation_plot(title, samples, γt, ylim, plot):
max_lag = len(γt)
figure, axis = pyplot.subplots(figsize=(10, 7))
axis.set_title(title)
axis.set_ylabel(r"$\gamma_{\tau}$")
axis.set_xlabel("Time Lag (τ)")
axis.set_xlim([-1.0, max_lag])
axis.set_ylim(ylim)
ac = autocorrelation(samples)
axis.plot(range(max_lag), numpy.real(ac[:max_lag]), marker='o', markersize=10.0, linestyle="None", markeredgewidth=1.0, alpha=0.75, label="Simulation", zorder=6)
axis.plot(range(max_lag), γt, lw="2", label=r"$γ_{\tau}$", zorder=5)
axis.legend(fontsize=16)
config.save_post_asset(figure, "mean_reversion", plot)
def cross_correlation_plot(title, x, y, γt, ylim, plot):
max_lag = len(γt)
figure, axis = pyplot.subplots(figsize=(10, 7))
axis.set_title(title)
axis.set_ylabel(r"$\gamma_{\tau}$")
axis.set_xlabel("Time Lag (τ)")
cc = cross_correlation(x, y)
axis.set_xlim([-1.0, max_lag])
axis.set_ylim(ylim)
axis.plot(range(max_lag), numpy.real(cc[:max_lag]), marker='o', markersize=10.0, linestyle="None", markeredgewidth=1.0, alpha=0.75, label="Simulation", zorder=6)
axis.plot(range(max_lag), γt, lw="2", label=r"$γ_{\tau}$", zorder=5)
axis.legend(fontsize=16)
config.save_post_asset(figure, "mean_reversion", plot)
def plot_data_frame(df, tmax, plot_name):
_, nplot = df.shape
if nplot > 4:
nrows = int(nplot / 2)
ncols = 2
else:
nrows = nplot
ncols = 1
figure, axis = pyplot.subplots(nrows=nrows, ncols=ncols, figsize=(10, 8))
for i, axis in enumerate(axis.flatten()):
data = df[df.columns[i]]
axis.plot(data[:tmax], lw=1)
axis.set_title(df.columns[i], fontsize=12)
axis.tick_params(axis="x", labelsize=10)
axis.tick_params(axis="y", labelsize=10)
pyplot.tight_layout(pad=1.0)
config.save_post_asset(figure, "mean_reversion", plot_name)
def time_series_to_data_frame(columns, series):
n = len(columns)
d = {}
for i in range(n):
d[columns[i]] = series[i]
return pandas.DataFrame(d)
def var_simulate(x0, μ, φ, Ω, n):
m, l = x0.shape
xt = numpy.zeros((m, n))
ε = multivariate_normal_sample(μ, Ω, n)
for i in range(l):
xt[:,i] = x0[:,i]
for i in range(l, n):
xt[:,i] = ε[i]
for j in range(l):
t1 = φ[j]*numpy.matrix(xt[:,i-j-1]).T
t2 = numpy.squeeze(numpy.array(t1), axis=1)
xt[:,i] += t2
return xt
def phi_companion_form(φ):
l, n, _ = φ.shape
p = φ[0]
for i in range(1,l):
p = numpy.concatenate((p, φ[i]), axis=1)
for i in range(1, n):
if i == 1:
r = numpy.eye(n)
else:
r = numpy.zeros((n, n))
for j in range(1,l):
if j == i - 1:
r = numpy.concatenate((r, numpy.eye(n)), axis=1)
else:
r = numpy.concatenate((r, numpy.zeros((n, n))), axis=1)
p = numpy.concatenate((p, r), axis=0)
return numpy.matrix(p)
def mean_companion_form(μ):
n = len(μ)
p = numpy.zeros(n**2)
p[:n] = μ
return numpy.matrix([p]).T
def omega_companion_form(ω):
n, _ = ω.shape
p = numpy.zeros((n**2, n**2))
p[:n, :n] = ω
return numpy.matrix(p)
def vec(m):
_, n = m.shape
v = numpy.matrix(numpy.zeros(n**2)).T
for i in range(n):
d = i*n
v[d:d+n] = m[:,i]
return v
def unvec(v):
n2, _ = v.shape
n = int(numpy.sqrt(n2))
m = numpy.matrix(numpy.zeros((n, n)))
for i in range(n):
d = i*n
m[:,i] = v[d:d+n]
return m
def stationary_mean(φ, μ):
Φ = phi_companion_form(φ)
Μ = mean_companion_form(μ)
n, _ = Φ.shape
tmp = numpy.matrix(numpy.eye(n)) - Φ
return numpy.linalg.inv(tmp)*Μ
def stationary_covariance_matrix(φ, ω):
Ω = omega_companion_form(ω)
Φ = phi_companion_form(φ)
n, _ = Φ.shape
eye = numpy.matrix(numpy.eye(n**2))
tmp = eye - numpy.kron(Φ, Φ)
inv_tmp = numpy.linalg.inv(tmp)
vec_var = inv_tmp * vec(Ω)
return unvec(vec_var)
def stationary_autocovariance_matrix(φ, ω, n):
t = numpy.linspace(0, n-1, n)
Φ = phi_companion_form(φ)
Σ = stationary_covariance_matrix(φ, ω)
l, _ = Φ.shape
γ = numpy.zeros((n, l, l))
γ[0] = numpy.matrix(numpy.eye(l))
for i in range(1,n):
γ[i] = γ[i-1]*Φ
for i in range(n):
γ[i] = Σ*γ[i].T
return γ
def eigen_values(φ):
Φ = phi_companion_form(φ)
λ, _ = numpy.linalg.eig(Φ)
return λ
def autocorrelation(x):
n = len(x)
x_shifted = x - x.mean()
x_padded = numpy.concatenate((x_shifted, numpy.zeros(n-1)))
x_fft = numpy.fft.fft(x_padded)
h_fft = numpy.conj(x_fft) * x_fft
ac = numpy.fft.ifft(h_fft)
return ac[0:n]/ac[0]
def cross_correlation(x, y):
n = len(x)
x_shifted = x - x.mean()
y_shifted = y - y.mean()
x_padded = numpy.concatenate((x_shifted, numpy.zeros(n-1)))
y_padded = numpy.concatenate((y_shifted, numpy.zeros(n-1)))
x_fft = numpy.fft.fft(x_padded)
y_fft = numpy.fft.fft(y_padded)
h_fft = numpy.conj(x_fft)*y_fft
cc = numpy.fft.ifft(h_fft)
return cc[0:n] / float(n)
def yt_parameter_estimation_form(xt):
l, n = xt.shape
yt = xt[:,l-1:n-1]
for i in range(2,l+1):
yt = numpy.concatenate((yt, xt[:,l-i:n-i]), axis=0)
return yt
def theta_parameter_estimation(xt):
l, n = xt.shape
yt = yt_parameter_estimation_form(xt)
m, _ = yt.shape
yy = numpy.matrix(numpy.zeros((m, m)))
xy = numpy.matrix(numpy.zeros((l, m)))
for i in range(l, n):
x = numpy.matrix(xt[:,i]).T
y = numpy.matrix(yt[:,i-l]).T
yy += y*y.T
xy += x*y.T
return xy*numpy.linalg.inv(yy)
def split_theta(theta):
l, _ = theta.shape
return numpy.split(theta, l, axis=1)
def omega_parameter_estimation(xt, theta):
l, n = xt.shape
yt = yt_parameter_estimation_form(xt)
omega = numpy.matrix(numpy.zeros((l, l)))
for i in range(l, n):
x = numpy.matrix(xt[:,i]).T
y = numpy.matrix(yt[:,i-l]).T
term = x - theta*y
omega += term*term.T
return omega / float(n-l)
| [
"troy.stribling@gmail.com"
] | troy.stribling@gmail.com |
6d45a0aafcf85c8215157029bef4318a5c2d0836 | 7ab41799fd38489c93282f1beb3b20e7ef8ff165 | /python/79.py | baa33400e8230c73e0d6ae18a6d3a474fff1fc2d | [] | no_license | scturtle/leetcode-sol | 86c4095df6b31a9fcad683f2d63669ce1691633c | e1a9ce5d9b8fe4bd11e50bd1d5ba1933de845db7 | refs/heads/master | 2020-04-23T00:01:37.016267 | 2015-11-21T04:15:27 | 2015-11-21T04:15:27 | 32,385,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | class Solution(object):
@staticmethod
def search(board, word, idx, i, j, vit):
if idx == len(word):
return True
if i < 0 or i >= len(board) or\
j < 0 or j >= len(board[0]) or\
board[i][j] != word[idx]:
return False
vit.add((i, j))
for di, dj in ((1, 0), (0, 1), (-1, 0), (0, -1)):
ni, nj = i+di, j+dj
if (ni, nj) not in vit:
if Solution.search(board, word, idx+1, ni, nj, vit):
return True
vit.remove((i, j))
return False
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
n = len(board)
if not n:
return False
m = len(board[0])
if not m:
return False
vit = set()
for i, j in itertools.product(range(n), range(m)):
if board[i][j] == word[0]:
if Solution.search(board, word, 0, i, j, vit):
return True
return False
| [
"scturtle@gmail.com"
] | scturtle@gmail.com |
0d8d655f64d764621a9762d4c449a17f82d6ac57 | b4d160ff9bc139752f04ead3c38b88cf2d91c8a2 | /Tests/DegenPrimer_Tests/Test_SecStructures.py | feda0edcb1189bb46d9320c9c0c9a697b3bbb902 | [] | no_license | allista/DegenPrimer | 2c69bf832f908601c28245c735db9b6b1efa9932 | c610551c9f6f769dcd03f945d7682471ea91bade | refs/heads/master | 2022-06-03T01:16:12.269221 | 2022-05-12T11:16:02 | 2022-05-12T11:16:02 | 45,181,326 | 7 | 4 | null | 2022-05-07T12:22:54 | 2015-10-29T12:20:21 | Python | UTF-8 | Python | false | false | 1,656 | py | # coding=utf-8
#
# Copyright (C) 2012 Allis Tauri <allista@gmail.com>
#
# degen_primer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# degen_primer is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 2016-01-14
@author: Allis Tauri <allista@gmail.com>
'''
def test():
import cProfile
import DegenPrimer.TD_Functions as tdf
from DegenPrimer.SecStructures import Duplex, reverse_complement, Dimer
tdf.PCR_P.Na = 50.0e-3
tdf.PCR_P.Mg = 3.0e-3
tdf.PCR_P.dNTP = 0.15e-6
tdf.PCR_P.DNA = 1.0e-9
tdf.PCR_P.DMSO = 0.0
tdf.PCR_P.PCR_T = 60.0
with tdf.AcquireParameters():
du = Duplex('AGAGAACGCAAAGATCGGGAAC', 'CTTGCGTTTCTAACCCTTG'[::-1], dimer=Dimer((3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21), 3))
print du
cProfile.runctx('for x in xrange(100000): du.print_most_stable()',
globals(), locals(), 'Duplex.print_stable.profile')
# seq = 'ATGCGTCACTACCAGT'*10000
# cProfile.runctx('''for x in xrange(100):
# reverse_complement(seq)''',
# globals(), locals(), 'reverse_complement.profile')
test() | [
"allista@gmail.com"
] | allista@gmail.com |
c2de7d559633cefc527fd6e213dd4284f75e0499 | 94bb879816dbdd69559ecfcc70a09f33d104af67 | /source/functions/sqlmap/plugins/dbms/oracle/fingerprint.py | 3e471ca628ae7c75cc2d193a498e8d77b0777536 | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"GPL-CC-1.0",
"LicenseRef-scancode-unknown",
"MIT"
] | permissive | 51000000/CampusCyberInspectionTool2021 | f328ad571ab88051aa6928a67209dd94ce25eb6c | 27a2de7ff3707ba6ab084acfce79a7d3f42b8f84 | refs/heads/main | 2023-03-28T01:11:22.678066 | 2021-04-01T05:23:54 | 2021-04-01T05:23:54 | 353,502,239 | 0 | 0 | MIT | 2021-03-31T22:06:49 | 2021-03-31T22:06:48 | null | UTF-8 | Python | false | false | 3,859 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.session import setDbms
from lib.core.settings import ORACLE_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.ORACLE)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.ORACLE
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp.get("dbmsVersion")
if banVer:
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and Backend.isDbmsWithin(ORACLE_ALIASES):
setDbms(DBMS.ORACLE)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT LENGTH(SYSDATE)=LENGTH(SYSDATE) FROM DUAL does
# not work connecting directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("LENGTH(SYSDATE)=LENGTH(SYSDATE)")
if result:
infoMsg = "confirming %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT NVL(RAWTOHEX([RANDNUM1]),[RANDNUM1])=RAWTOHEX([RANDNUM1]) FROM DUAL does
# not work connecting directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("NVL(RAWTOHEX([RANDNUM1]),[RANDNUM1])=RAWTOHEX([RANDNUM1])")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
setDbms(DBMS.ORACLE)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.ORACLE
logger.info(infoMsg)
# Reference: https://en.wikipedia.org/wiki/Oracle_Database
for version in ("19c", "18c", "12c", "11g", "10g", "9i", "8i", "7"):
number = int(re.search(r"([\d]+)", version).group(1))
output = inject.checkBooleanExpression("%d=(SELECT SUBSTR((VERSION),1,%d) FROM SYS.PRODUCT_COMPONENT_VERSION WHERE ROWNUM=1)" % (number, 1 if number < 10 else 2))
if output:
Backend.setVersion(version)
break
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
def forceDbmsEnum(self):
if conf.db:
conf.db = conf.db.upper()
if conf.tbl:
conf.tbl = conf.tbl.upper()
| [
"55148245+51000000@users.noreply.github.com"
] | 55148245+51000000@users.noreply.github.com |
7c363abe67e32e125b760f67b97c0168b77ec74a | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /ABC/abc201-abc250/abc248/c/main.py | f8dc5e6782ba90a0e4f2cd025916315e4147dbc5 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 608 | py | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, m, k = map(int, input().split())
size = n * m + 10
dp = [0 for _ in range(size)]
mod = 998244353
for i in range(m):
dp[i] = 1
for i in range(n - 1):
ndp = [0 for _ in range(size)]
for j in range(k + 1):
for x in range(1, m + 1):
if j + x >= k:
continue
ndp[j + x] += dp[j]
ndp[j + x] %= mod
dp = ndp
print(sum(dp) % mod)
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
5ef51b0a8c32b67780cc1894aa59fcc513e7b686 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_3_1_neat/16_3_1_bsoist_a.py | 6d05be7fb34134bb8fb5c0df300c6d30f8e63afe | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,334 | py | from string import uppercase
import sys
import itertools
def no_majority(nums):
total = sum(nums) * 1.0
if total == 0:
return True
for num in nums:
if num / total > 0.5:
return False
return True
def get_indexes(indexes):
for a, b in itertools.permutations(indexes,r=2):
yield a,b
for index in indexes:
yield index
def get_step(parties):
indexes = [i for (i,n) in enumerate(parties) if n]
for a, b in itertools.permutations(indexes,r=2):
step = [None, None]
remaining_senators = parties[:]
if remaining_senators[a]:
step[0] = a
remaining_senators[a] -= 1
if remaining_senators[b]:
step[1] = b
remaining_senators[b] -= 1
if no_majority(remaining_senators):
return step
return None, parties.index(max(parties))
for case_num in xrange(1,int(raw_input()) + 1):
raw_input()
in_parties = map(int, raw_input().split(" "))
plan = []
while sum(in_parties) > 0:
a,b = get_step(in_parties)
plan.append("".join([uppercase[n] for n in (a,b) if n is not None]))
if a is not None:
in_parties[a] -= 1
if b is not None:
in_parties[b] -= 1
print "Case #%s: %s" % (case_num, " ".join(plan))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
1818b2f90162f7c979b4d61341cd65944efcddbd | 7cd6a7bc72f0026056a7238c0feea081bfff13a7 | /bioprocs/chipseq.py | 17d1dc9bfa97f4b60ece6b03c19b086b816f8d32 | [
"MIT"
] | permissive | shijianasdf/biopipen | 8d963ccca38e2a9d7a46582a5eec45c38924655c | d53b78aa192fd56a5da457463b099b2aa833b284 | refs/heads/master | 2023-08-18T18:28:03.306877 | 2019-12-31T16:17:35 | 2019-12-31T16:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | """ChIP-seq data analysis"""
from pyppl import Proc
from diot import Diot
from . import params, proc_factory
pPeakToRegPotential = proc_factory(
desc = 'Convert peaks to regulatory potential score for each gene.',
config = Diot(annotate = """
@name:
pPeakToRegPotential
@description:
Convert peaks to regulatory potential score for each gene
The formula is:
```
-(0.5 + 4*di/d0)
PC = sum (pi * e )
```
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4489297/
@input:
`peakfile:file`: The BED/peak file for peaks
`genefile:file`: The BED file for gene coordinates
@output:
`outfile:file`: The regulatory potential file for each gene
@args:
`signal`: `pi` in the formula. Boolean value, whether use the peak intensity signale or not, default: `True`,
`genefmt`: The format for `genefile`, default: `ucsc+gz`. It could be:
- ucsc or ucsc+gz: typically, you can download from http://hgdownload.cse.ucsc.edu/goldenPath/hg38/database/refGene.txt.gz
- bed or bed+gz: [format](https://genome.ucsc.edu/FAQ/FAQformat#format1), 4th column required as gene identity.
`peakfmt`: The format for `peakfile`, default: `peak`. It could be:
- peak or peak+gz: (either [narrowPeak](https://genome.ucsc.edu/FAQ/FAQformat.html#format12) or [broadPeak](https://genome.ucsc.edu/FAQ/FAQformat.html#format13), the 7th column will be used as intensity
- bed or bed+gz: [format](https://genome.ucsc.edu/FAQ/FAQformat#format1), 5th column will be used as intensity.
`window`: `2 * d0` in the formula. The window where the peaks fall in will be consided, default: `100000`.
```
|--------- window ----------|
|---- d0 -----|
|--- 50K --- TSS --- 50K ---|
^ (peak center)
|-- di --|
```
"""))
pPeakToRegPotential.input = "peakfile:file, genefile:file"
pPeakToRegPotential.output = "outfile:file:{{peakfile | fn}}.rp.txt"
pPeakToRegPotential.args.signal = True
pPeakToRegPotential.args.genefmt = 'ucsc+gz',
pPeakToRegPotential.args.peakfmt = 'peak',
pPeakToRegPotential.args.window = 100000
pPeakToRegPotential.lang = params.python.value
| [
"pwwang@pwwang.com"
] | pwwang@pwwang.com |
f34a9115279a8d7cbc59260fc5acfcab05b942c6 | 0e25dc15ae9efce8bfd716d4d2041da07767968b | /qbench/benchmarks/QLib/OPENQL_converted/benstein_vazirani_48b_secret_4.py | fc296603b534cdd78fb80f43ed23a31bafc0d90d | [] | no_license | alxhotel/crossbar-bench | f608fc0062b4f8a5162ec33d61c0204aaf27b6ff | 3bf7536e7697d29c3089b0ba564ba22d39698b88 | refs/heads/master | 2021-07-13T16:06:50.085838 | 2020-10-04T23:39:05 | 2020-10-04T23:39:05 | 213,409,122 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,382 | py | from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 50
p = ql.Program('benstein_vazirani_48b_secret_4', platform, num_qubits)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('benstein_vazirani_48b_secret_4', platform, num_qubits)
k.gate('prepz',[48])
k.gate('x',[48])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
k.gate('h',[42])
k.gate('h',[43])
k.gate('h',[44])
k.gate('h',[45])
k.gate('h',[46])
k.gate('h',[47])
k.gate('h',[48])
k.gate('cnot',[2,48])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
k.gate('h',[42])
k.gate('h',[43])
k.gate('h',[44])
k.gate('h',[45])
k.gate('h',[46])
k.gate('h',[47])
k.gate('h',[48])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise | [
"alxmorais8@msn.com"
] | alxmorais8@msn.com |
e881c76005641b8cc28159d379e1ebb69b36acda | 67a7c314fc99d9cd7a677fcb6bc2b6dfa20a9cff | /spambayes-1.0.4/windows/py2exe/setup_all.py | 60446035118e67633d6b59e7c128165b8689f6a7 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Xodarap/Eipi | 7ebbb9fd861fdb411c1e273ea5d2a088aa579930 | d30997a737912e38316c198531f7cb9c5693c313 | refs/heads/master | 2016-09-11T06:28:01.333832 | 2011-05-03T15:35:20 | 2011-05-03T15:35:20 | 1,367,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,674 | py | # setup_all.py
# A distutils setup script for SpamBayes binaries
import sys, os, glob
sb_top_dir = os.path.abspath(os.path.dirname(os.path.join(__file__, "../../../..")))
sys.path.append(sb_top_dir)
sys.path.append(os.path.join(sb_top_dir, "windows"))
sys.path.append(os.path.join(sb_top_dir, "scripts"))
sys.path.append(os.path.join(sb_top_dir, "Outlook2000"))
sys.path.append(os.path.join(sb_top_dir, "Outlook2000/sandbox"))
import spambayes.resources
# Generate the dialogs.py file.
import dialogs
dialogs.LoadDialogs()
# ModuleFinder can't handle runtime changes to __path__, but win32com uses them,
# particularly for people who build from sources. Hook this in.
try:
import modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", p)
for extra in ["win32com.shell","win32com.mapi"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.AddPackagePath(extra, p)
except ImportError:
# no build path setup, no worries.
pass
from distutils.core import setup
import py2exe
py2exe_options = dict(
packages = "spambayes.resources,encodings",
excludes = "win32ui,pywin,pywin.debugger", # pywin is a package, and still seems to be included.
includes = "dialogs.resources.dialogs,weakref", # Outlook dynamic dialogs
dll_excludes = "dapi.dll,mapi32.dll",
typelibs = [
('{00062FFF-0000-0000-C000-000000000046}', 0, 9, 0),
('{2DF8D04C-5BFA-101B-BDE5-00AA0044DE52}', 0, 2, 1),
('{AC0714F2-3D04-11D1-AE7D-00A0C90F26F4}', 0, 1, 0),
]
)
# These must be the same IDs as in the dialogs. We really should just extract
# them from our rc scripts.
outlook_bmp_resources = [
( 125, os.path.join(sb_top_dir, r"Outlook2000\dialogs\resources\sbwizlogo.bmp")),
( 127, os.path.join(sb_top_dir, r"Outlook2000\dialogs\resources\folders.bmp")),
(1062, os.path.join(sb_top_dir, r"Outlook2000\dialogs\resources\sblogo.bmp")),
# and these are currently hard-coded in addin.py
(6000, os.path.join(sb_top_dir, r"Outlook2000\images\recover_ham.bmp")),
(6001, os.path.join(sb_top_dir, r"Outlook2000\images\delete_as_spam.bmp")),
]
# These are just objects passed to py2exe
outlook_addin = dict(
modules = ["addin"],
dest_base = "bin/outlook_addin",
bitmap_resources = outlook_bmp_resources,
create_exe = False,
)
#outlook_manager = Options(
# script = os.path.join(sb_top_dir, r"Outlook2000\manager.py"),
# bitmap_resources = outlook_bmp_resources,
#)
outlook_dump_props = dict(
script = os.path.join(sb_top_dir, r"Outlook2000\sandbox\dump_props.py"),
dest_base = "bin/outlook_dump_props",
icon_resources = [(100, os.path.join(sb_top_dir,
r"windows\resources\sbicon.ico")),
],
)
# A "register" utility for Outlook. This should not be necessary, as
# 'regsvr32 dllname' does exactly the same thing. However, Inno Setup
# version 4 appears to, upon uninstall, do something that prevents the
# files used by the unregister process to be deleted. Unregistering via
# this EXE solves the problem.
outlook_addin_register = dict(
script = os.path.join(sb_top_dir, r"Outlook2000\addin.py"),
dest_base = "bin/outlook_addin_register",
icon_resources = [(100, os.path.join(sb_top_dir,
r"windows\resources\sbicon.ico")),
],
)
service = dict(
dest_base = "bin/sb_service",
modules = ["pop3proxy_service"],
icon_resources = [(100, os.path.join(sb_top_dir,
r"windows\resources\sbicon.ico")),
],
)
sb_server = dict(
dest_base = "bin/sb_server",
script = os.path.join(sb_top_dir, "scripts", "sb_server.py")
)
sb_pop3dnd = dict(
dest_base = "bin/sb_pop3dnd",
script = os.path.join(sb_top_dir, "scripts", "sb_pop3dnd.py")
)
sb_upload = dict(
dest_base = "bin/sb_upload",
script = os.path.join(sb_top_dir, "scripts", "sb_upload.py")
)
pop3proxy_tray = dict(
dest_base = "bin/sb_tray",
script = os.path.join(sb_top_dir, "windows", "pop3proxy_tray.py"),
icon_resources = [(100, os.path.join(sb_top_dir, r"windows\resources\sbicon.ico")),
(1000, os.path.join(sb_top_dir, r"windows\resources\sb-started.ico")),
(1010, os.path.join(sb_top_dir, r"windows\resources\sb-stopped.ico"))],
)
autoconfigure = dict(
dest_base = "bin/setup_server",
script = os.path.join(sb_top_dir, "windows", "autoconfigure.py"),
)
outlook_data_files = [
["docs/outlook", [os.path.join(sb_top_dir, r"Outlook2000\about.html")]],
["docs/outlook/docs", glob.glob(os.path.join(sb_top_dir, r"Outlook2000\docs\*.html"))],
["docs/outlook/docs/images", glob.glob(os.path.join(sb_top_dir, r"Outlook2000\docs\images\*.jpg"))],
["bin", [os.path.join(sb_top_dir, r"Outlook2000\default_bayes_customize.ini")]],
]
proxy_data_files = [
["docs/sb_server", [os.path.join(sb_top_dir, r"windows\readme_proxy.html")]],
["docs/sb_server", [os.path.join(sb_top_dir, r"windows\docs\troubleshooting.html")]],
# note that this includes images that are already in the outlook/docs/images
# directory - we need to consolidate the documentation (in terms of
# sharing images, if nothing else)
["docs/sb_server/docs/images", glob.glob(os.path.join(sb_top_dir, r"windows\docs\images\*.jpg"))],
]
common_data_files = [
["", [os.path.join(sb_top_dir, r"windows\resources\sbicon.ico")]],
["", [os.path.join(sb_top_dir, r"LICENSE.txt")]],
]
# Default and only distutils command is "py2exe" - save adding it to the
# command line every single time.
if len(sys.argv)==1 or \
(len(sys.argv)==2 and sys.argv[1] in ['-q', '-n']):
sys.argv.append("py2exe")
setup(name="SpamBayes",
packages = ["spambayes.resources"],
package_dir = {"spambayes.resources" : spambayes.resources.__path__[0]},
# We implement a COM object.
com_server=[outlook_addin],
# A service
service=[service],
# console exes for debugging
console=[sb_server, sb_upload, outlook_dump_props, sb_pop3dnd],
# The taskbar
windows=[pop3proxy_tray, outlook_addin_register, autoconfigure],
# and the misc data files
data_files = outlook_data_files + proxy_data_files + common_data_files,
options = {"py2exe" : py2exe_options},
zipfile = "lib/spambayes.modules",
)
| [
"eipi@mybox.(none)"
] | eipi@mybox.(none) |
5c713a2c568f78b3d8fbfcb025ede730942f035b | 1dcd3e78eca92356365faceb824addb53ff592e5 | /oracle_monitor.py | 789e9634d54f2a4b2765c646e2a8a3999331ad18 | [] | no_license | LingFangYuan/SendMailOrWX | b7201afce52d32c3d92fd087000aaa87c0f8006d | ca4beb1888823604a19283b6db5b07cd46948b6b | refs/heads/master | 2020-05-16T22:33:12.084808 | 2019-05-07T09:34:42 | 2019-05-07T09:34:42 | 183,339,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | import oracle_exec
from mysql_monitor import formattext
from get_sqltext import get_sql
def get_data(path):
"""
获取数据
:return:
"""
sql = get_sql(path)
return oracle_exec.exec(sql)
def set_table(subject, path):
re, de = get_data(path)
content = "<strong>" + subject + "</strong><table border='1' cellpadding='5' cellspacing='0'>" \
+ '<caption><strong></strong></caption>'
content = content + set_heads(de)
content = content + set_rows(re)
content = content + "</table>"
return content
def set_heads(de):
content = "<tr>"
for i in range(len(de) - 1):
content = content + "<td>" + de[i][0] + "</td>"
content = content + "</tr>"
return content
def set_rows(re):
content = ''
l = len(re[0])
for i in re:
content = content + ('<tr style="color:red">' if i[l - 1] == 1 else "<tr>")
for j in range(l - 1):
content = content + "<td>" + (oracle_exec.datetime.datetime.strftime(i[j], '%Y-%m-%d') \
if isinstance(i[j], oracle_exec.datetime.datetime) else str(
i[j])) + "</td>"
content = content + "</tr>"
return content
def get_html(subject, path):
return set_table(subject, path)
def get_text(subject, path):
re, de = get_data(path)
text = None
if re:
text = formattext(re, de)
text = subject + "\n" + text + '<a href="https://mail.qq.com/cgi-bin/loginpage">查看详情</a>'
return text
| [
"786173189@qq.com"
] | 786173189@qq.com |
64ef22513cede77e09605582fc911425e63ca7ac | 233d852269c62cf5792adc74e50b78161f9d29d0 | /apps/question_record/migrations/0003_auto_20181128_2351.py | 0bb0cd31ca72b9cf2e2c1746715a8f8d55eb60a2 | [] | no_license | liao-shuai/chaflow | ce83aa52383c335c2e8ad4863c40ac3f53b3f472 | f26dd0b9c74e1fb8dbb5181fdb9d0ec6cad6f981 | refs/heads/master | 2021-02-04T16:24:21.852056 | 2019-09-13T13:36:58 | 2019-09-13T13:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-11-28 15:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('question_record', '0002_auto_20181128_2328'),
]
operations = [
migrations.AlterField(
model_name='ask_question',
name='ctime',
field=models.DateTimeField(auto_now_add=True, verbose_name='提交时间'),
),
]
| [
"irainsun@live.cn"
] | irainsun@live.cn |
241932a8711c5e9ed45a20db44356998e7522fb1 | 162b86e030ccb6b2c3adb2e540f9892f25abccbf | /gradient_descent_new/experiments/2019-04-25/observables_vs_gamma-variations/scan_gamma.py | 94e51a71f241a80e8218a6d3e73da11d807a667a | [] | no_license | samueljmcameron/pfc_fibrils | a0d647a1a4490f25c4b7d42be2815c93967d18df | 564164b815229f3b5e863a95aa885dab4c52dfae | refs/heads/master | 2021-07-03T20:50:46.260599 | 2019-05-31T19:40:05 | 2019-05-31T19:40:05 | 144,604,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,736 | py | import numpy as np
import subprocess
import sys
import time
sys.path.append('../../scripts/')
from singlerun import SingleRun
from readparams import ReadParams
if __name__=="__main__":
start_time = time.time()
FAILED_E = 1e300
Lambdas = np.array([0.1,0.5,1.0,5.0,10.0,50.0,100.0,500.0],float)
k24,omega,Lindex = sys.argv[1],sys.argv[2],int(sys.argv[3])
gammas = np.linspace(0.01,0.4,num=101,endpoint=True)
scan = {}
scan['\\Lambda'] = str(Lambdas[Lindex])
scan['\\omega']= omega
scan['k_{24}'] = k24
loadsuf=["K_{33}","k_{24}","\\Lambda","\\omega","\\gamma_s"]
savesuf=["K_{33}","k_{24}","\\Lambda","\\omega"]
scan_dir = "scanforward"
i = 0
while (i <len(gammas)):
gamma = gammas[i]
scan['\\gamma_s'] = str(gamma)
# read in file name info
rp = ReadParams(scan=scan,loadsuf=loadsuf,savesuf=savesuf)
# create a class to do calculations with current parameters in scan.
run = SingleRun(rp,scan_dir=scan_dir)
# run C executable.
run.run_exe()
# move file written by C executable from temporary data path to true data path
run.mv_file('observables')
# load the final values of E, R, eta, delta, and surface twist.
Ei,Ri,etai,deltai,surftwisti = run.get_all_observables('observables',str2float=True)
if (Ei > 0.1*FAILED_E and gamma > 0.15):
# if the energy calculation fails, this will be true.
print('hi')
# remove current file with observables for the current gamma value that are higher than
# the delta = 0 energy.
print(Ei)
run.remove_file("observables")
for j,gamma in enumerate(gammas[i:]):
# write the remaining values of observables as those corresponding to the delta = 0
# case, as non-zero d-band produces a higher energy fibril.
scan['\\gamma_s']=str(gamma)
rp = ReadParams(scan=scan,loadsuf=loadsuf,savesuf=savesuf)
run = SingleRun(rp,scan_dir=scan_dir)
run.write_observables(E0,R0,eta0,delta0,surftwist0,"\\gamma_s")
break
if (np.isnan(Ri) or Ri <= 0) and gamma > 0.15:
# if Ri is infinite, then the calculation failed.
# Retry it with a different initial guess.
print("Ri is NAN, trying again with Rguess = 1.0")
# remove the current observables file, so that a new one can be written.
run.remove_file("observables")
if abs(float(scan['Rguess'])-1.0)>1e-10:
Ri = 1.0
else:
break
else:
# calculation ran smoothly.
run.concatenate_observables("\\gamma_s")
i+= 1
Rguess,etaguess,deltaguess = str(Ri),str(etai),str(deltai)
if not np.isnan(float(Rguess)):
scan['Rguess'] = Rguess
scan['Rupper'] = str(1.5*float(Rguess))
scan['Rlower'] = str(0.75*float(Rguess))
if not np.isnan(float(etaguess)):
scan['etaguess'] = etaguess
scan['etaupper'] = str(float(etaguess)+0.1)
scan['etalower'] = str(float(etaguess)-0.02)
if not (np.isnan(float(deltaguess))
or abs(float(deltaguess))<1e-5):
scan['deltaguess'] = deltaguess
scan['deltaupper'] = '0.818'
if float(deltaguess) < 0.81:
scan['deltalower'] = str(0.95*float(deltaguess))
else:
scan['deltalower'] = '0.81'
print(f"Took {(time.time()-start_time)/3600} hours to complete.")
| [
"samuel.j.m.cameron@gmail.com"
] | samuel.j.m.cameron@gmail.com |
7fcc04cd97bc7d308475cfdbc01f435f93e2b87d | 51dc3ab902fbbd335fde207e06967d636879c853 | /predict/urls.py | 083424ae89095eb35fe458b56e9abd529d360435 | [] | no_license | khanansha/uberprediction | 2c06cd9c8525b363e16a0ce58385b9c01344f8e2 | 8e8340f22b6b26aba0e164a295b344d34bfce19a | refs/heads/master | 2022-11-14T11:52:43.518135 | 2020-07-12T15:11:10 | 2020-07-12T15:11:10 | 279,088,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('predict', views.predict, name="predict"),
path('ran', views.check, name="check"),
]
| [
"anjumkhan88987@gmail.com"
] | anjumkhan88987@gmail.com |
6588ac9881281d906936463f633ae4e4a3fa2047 | aa4024b6a846d2f6032a9b79a89d2e29b67d0e49 | /UMLRT2Kiltera_MM/graph_MT_pre__MetaModelElement_T.py | 4b496aa73b98fca4db94f79526f7d4b9f5446fc4 | [
"MIT"
] | permissive | levilucio/SyVOLT | 41311743d23fdb0b569300df464709c4954b8300 | 0f88827a653f2e9d3bb7b839a5253e74d48379dc | refs/heads/master | 2023-08-11T22:14:01.998341 | 2023-07-21T13:33:36 | 2023-07-21T13:33:36 | 36,246,850 | 3 | 2 | MIT | 2023-07-21T13:33:39 | 2015-05-25T18:15:26 | Python | UTF-8 | Python | false | false | 2,229 | py | """
__graph_MT_pre__MetaModelElement_T.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
________________________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_pre__MetaModelElement_T(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 173, 91
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([209.0, 88.0, 209.0, 88.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([38.0, 38.0, 209.0, 127.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'cyan')
self.gf5 = GraphicalForm(drawing, h, "gf5")
self.graphForms.append(self.gf5)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_pre__MetaModelElement_T
| [
"levi"
] | levi |
b454d814b5714c7a47a34188e03b5bb70289c5f9 | c09817490b36beaea98abc8c955904528c5cd4fd | /tests/test_0058-detach-model-objects-from-files.py | 9229de78be7f626e95ef00c107e5a2b832e99960 | [
"BSD-3-Clause"
] | permissive | oshadura/uproot4 | 245b7e14a3341d87a9e655792c6ee912ad443586 | ee535f6632d371d82b5173a43d6445c854968315 | refs/heads/master | 2023-08-19T13:48:23.541016 | 2021-09-22T23:51:52 | 2021-09-22T23:51:52 | 287,539,468 | 0 | 0 | BSD-3-Clause | 2020-08-14T13:29:03 | 2020-08-14T13:29:02 | null | UTF-8 | Python | false | false | 2,524 | py | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import copy
import os
import pickle
import sys
import numpy
import pytest
import skhep_testdata
import uproot
def test_detachment():
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
assert getattr(f["hpx"].file, "file_path", None) is not None
assert getattr(f["hpx"].file, "source", None) is None
assert getattr(f["ntuple"].file, "file_path", None) is not None
assert getattr(f["ntuple"].file, "source", None) is not None
with uproot.open(
skhep_testdata.data_path("uproot-small-evnt-tree-nosplit.root")
) as f:
array = f["tree/evt"].array(library="np", entry_stop=1)
assert getattr(array[0].file, "file_path", None) is not None
assert getattr(array[0].file, "source", None) is None
assert isinstance(
f.file.streamer_named("Event").file, uproot.reading.DetachedFile
)
assert (
str(f.file.streamer_named("Event").file_uuid)
== "9eebcae8-366b-11e7-ab9d-5e789e86beef"
)
def test_copy():
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
original = f["hpx"]
original_file_path = original.file.file_path
reconstituted = copy.deepcopy(original)
reconstituted_file_path = reconstituted.file.file_path
assert original_file_path == reconstituted_file_path
def test_pickle():
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
original = f["hpx"]
original_file_path = original.file.file_path
reconstituted = pickle.loads(pickle.dumps(original))
reconstituted_file_path = reconstituted.file.file_path
assert original_file_path == reconstituted_file_path
@pytest.mark.skipif(
sys.version_info < (3, 0),
reason="boost_histogram is wrapped with pybind11, which can't be pickled in Python 2.7.",
)
def test_pickle_boost():
boost_histogram = pytest.importorskip("boost_histogram")
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
original = f["hpx"]
original_boost = original.to_boost()
reconstituted = pickle.loads(pickle.dumps(original))
reconstituted_boost = reconstituted.to_boost()
pickle.loads(pickle.dumps(original_boost))
pickle.loads(pickle.dumps(reconstituted_boost))
| [
"noreply@github.com"
] | oshadura.noreply@github.com |
f6dab0896b1864866a10f0a2f3fe89e1a8b2b76d | 9184e230f8b212e8f686a466c84ecc89abe375d1 | /histogrammode/docs/DeveloperGuide/txml2xml_html.py | f9b117894a239bb84a1432c1da25bbcd1186e193 | [] | no_license | danse-inelastic/DrChops | 75b793d806e6351dde847f1d92ab6eebb1ef24d2 | 7ba4ce07a5a4645942192b4b81f7afcae505db90 | refs/heads/master | 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 | Python | UTF-8 | Python | false | false | 914 | py | # The following dictionary will be used to convert docbook
# xml templates (*.txml)
# to docbook xml source. All keys are map to values.
# There are now two files: web_absolutepath and web_relativepath
# web_absolutepath is for pdf format product. Because pdf cannot contain movies and some other medias, we have to used absolute web links
# web_relativepath is for html tar ball producet. It can contain everything that is documentation in one tar ball, so in most cases relative link is sufficient. Certainly there still are cases where you want to put up absolute links, for example those links to download binary installers, which should not be included in the documentation tar ball..
from webserver import webserver
urldict = {
'xxxWEBSITExxx': "../../..",
'xxxDOWNLOADSxxx': "%s/click_monitor" % webserver,
'xxxARCSBOOKxxx': "../../../ARCSBook/Inelastic_Book/latex/Inelastic_Book.pdf",
}
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
3894ab976831c651854a100add14f3fc3c94768b | a45b5742814cc51c706c707d3b86e4f0a97c864b | /lists/urls.py | 4474f773460fd2ba16f25eafd5e0007017886c98 | [] | no_license | stanislavBozhanov/superlists | 9c6f0628c0eb02f56e6d0eb1b232fac033edcbe9 | 3c4906004a878d00c2912dffed310e098c841043 | refs/heads/master | 2016-09-06T16:56:35.534154 | 2015-02-12T20:55:10 | 2015-02-12T20:55:10 | 29,201,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.conf.urls import patterns, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^(\d+)/$', 'lists.views.view_list', name='view_list'),
url(r'^new$', 'lists.views.new_list', name='new_list'),)
# url(r'^admin/', include(admin.site.urls)),)
| [
"stanislav.bozhanov@gmail.com"
] | stanislav.bozhanov@gmail.com |
9c935b6339889fef28e5bb09557912ee3b39ac4f | c6bfa138886150d219b9086165a845a3542aca32 | /apps/home/serializers.py | cf7356ee02677f6bec7ff1346a70b393d2a87b84 | [] | no_license | pyeye/dub-back | 1192505eab64425d59af880703b9634b35384fd7 | 4079121cc7f825df6feedb4b8fbd842cfe4ec16d | refs/heads/master | 2023-02-28T15:40:04.784157 | 2021-02-11T12:08:59 | 2021-02-11T12:08:59 | 115,289,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from rest_framework import serializers
from .models import Banner, Advertisement
class BannerSerializer(serializers.ModelSerializer):
class Meta:
model = Banner
fields = ('pk', 'title', 'image', 'url', 'extra')
class AdvertisementSerializer(serializers.ModelSerializer):
class Meta:
model = Advertisement
fields = ('pk', 'title', 'image', 'url', 'extra')
| [
"pyeye.91@gmail.com"
] | pyeye.91@gmail.com |
955e892dde47cf831c34694c926ee24685eeb8e0 | 501615c82801733e69c7447ab9fd68d3883ed947 | /9527/.svn/pristine/95/955e892dde47cf831c34694c926ee24685eeb8e0.svn-base | 9329b5957bacc5793ce39718051eb56e10531980 | [] | no_license | az0ne/python | b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee | aec5d23bb412f7dfca374fb5c5b9988c1b817347 | refs/heads/master | 2021-07-18T02:08:46.314972 | 2017-10-27T06:23:36 | 2017-10-27T06:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | # -*- coding: utf8 -*-
from db.api.apiutils import APIResult
from db.cores.mysqlconn import dec_make_conn_cursor
from utils import tool
from utils.tool import dec_timeit
from utils.logger import logger as log
@dec_make_conn_cursor
@dec_timeit
def get_equation_list(conn, cursor):
"""
获取所有公式
:param conn:
:param cursor:
:return: wiki广告列表
"""
sql = """
SELECT id, equation, description FROM mz_operation_equation
"""
try:
cursor.execute(sql)
result = cursor.fetchall()
log.info("query: %s" % cursor.statement)
except Exception as e:
log.warn(
"execute exception: %s. "
"statement:%s" % (e, cursor.statement))
raise e
return APIResult(result=result)
@dec_make_conn_cursor
@dec_timeit
def add_equation(conn, cursor, equation, description):
"""
新增公式
:param conn:
:param cursor:
:param equation: 公式
:param description: 公式描述
:return: True or False
"""
sql = """
INSERT INTO mz_operation_equation (
equation, description
)
VALUES (%s, %s)
"""
try:
cursor.execute(sql, (equation, description))
e_id = cursor.lastrowid
conn.commit()
log.info("query: %s" % cursor.statement)
except Exception as e:
log.warn(
"execute exception: %s. "
"statement:%s" % (e, cursor.statement))
raise e
return APIResult(result=e_id)
@dec_make_conn_cursor
@dec_timeit
def del_equation(conn, cursor, e_id):
"""
删除公式
:param conn:
:param cursor:
:param e_id: 公式id
:return: True or False
"""
sql = """
DELETE FROM mz_operation_equation WHERE id = %s
"""
try:
cursor.execute(sql, (e_id,))
conn.commit()
log.info("query: %s" % cursor.statement)
except Exception as e:
log.warn(
"execute exception: %s. "
"statement:%s" % (e, cursor.statement))
raise e
return APIResult(result=True)
| [
"1461847795@qq.com"
] | 1461847795@qq.com | |
3229389f779378c837f13abd5093dbfc188cf8fc | d9b53673b899a9b842a42060740b734bf0c63a31 | /leetcode/python/easy/p263_isUgly.py | 5fe4345ff3cf6f18d1402fe8bafc4a61b6c6929c | [
"Apache-2.0"
] | permissive | kefirzhang/algorithms | a8d656774b576295625dd663154d264cd6a6a802 | 549e68731d4c05002e35f0499d4f7744f5c63979 | refs/heads/master | 2021-06-13T13:05:40.851704 | 2021-04-02T07:37:59 | 2021-04-02T07:37:59 | 173,903,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | class Solution:
def __init__(self):
self.helper = {}
def isUgly(self, num):
if self.helper.__contains__(num):
return self.helper[num]
if num <= 0:
return False
if num == 1:
return True
base = [2, 3, 5]
if num % 2 == 0:
if num / 2 in base:
self.helper[num] = True
return True
case2 = self.isUgly(num / 2)
else:
case2 = False
if num % 3 == 0:
if num / 3 in base:
self.helper[num] = True
return True
case3 = self.isUgly(num / 3)
else:
case3 = False
if num % 5 == 0:
if num / 5 in base:
self.helper[num] = True
return True
case5 = self.isUgly(num / 5)
else:
case5 = False
if case2 or case3 or case5:
self.helper[num] = True
return True
else:
return False
slu = Solution()
print(slu.isUgly(2123366400))
| [
"8390671@qq.com"
] | 8390671@qq.com |
08d15901538579db4a4ac16f55acae810550d8ff | 1113c8d5689685106fd77363e5561006d8ecef0d | /confbusterplusplus/utils.py | 06c911d7c30c7936cd8cff9584f2f9012d42e6c6 | [
"MIT"
] | permissive | dsvatunek/ConfBusterPlusPlus | 238f73ab48e6d1d1491cbf4406acf828d76a56f9 | 2de751f409ffdb791d8b04fd4b3d08645beebaa6 | refs/heads/master | 2022-11-09T18:28:26.880541 | 2020-06-24T05:50:35 | 2020-06-24T05:50:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | """
MIT License
Copyright (c) 2019 e-dang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
github - https://github.com/e-dang
"""
import json
import os
from itertools import islice
from rdkit import Chem
def window(iterable, window_size):
"""
Recipe taken from: https://docs.python.org/release/2.3.5/lib/itertools-example.html
Returns a sliding window (of width n) over data from the iterable
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...
"""
it = iter(iterable)
result = tuple(islice(it, window_size))
if len(result) == window_size:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def write_mol(mol, filepath, conf_id=None):
"""
Writes a RDKit Mol to an sdf file. Can specify a specific conformer on the molecule or all conformers.
Args:
mol (RDKit Mol): The molecule to write to file.
filepath (str): The filepath.
conf_id (int, optional): The conformer id on the molecule to write to file. If None, then writes first
conformer, if -1 then writes all conformers. Defaults to None.
Returns:
bool: True if successful.
"""
if filepath.split('.')[-1] != 'sdf':
print('Error needs to be sdf file')
writer = Chem.SDWriter(filepath)
if conf_id is None:
writer.write(mol)
elif conf_id == -1:
for conf in mol.GetConformers():
writer.write(mol, confId=conf.GetId())
else:
writer.write(mol, confId=conf_id)
writer.close()
return True
def file_rotator(filepath):
"""
Checks if the given file exists, if it does, then continues to append larger and larger numbers to the base file
name until a unique file name is found.
Args:
filepath (str): The desired file name/path.
Returns:
str: The unique file name/path.
"""
idx = 0
while True:
new_fp = attach_file_num(filepath, idx)
idx += 1
if not (os.path.exists(new_fp) and os.path.isfile(new_fp)):
return new_fp
def attach_file_num(filepath, file_num):
"""
Helper function that splits the file path on its extension, appends the given file number to the base file name, and
reassembles the file name and extension.
Args:
filepath (str): The desired file path.
file_num (iunt): The file number to attach to the file path's base file name.
Returns:
str: The file path with the file number appended to the base file name.
"""
path, basename = os.path.split(os.path.abspath(filepath))
new_basename, ext = basename.split('.')
new_basename += '_' + str(file_num) + '.' + ext
return os.path.join(path, new_basename)
def list_embed_params(embed_params):
"""
Creates a dictionary filled with the embedding parameter's attribute names and their respective values.
Args:
embed_params (RDKit EmbedParameters): The embedding parameters.
Returns:
dict: Contains all embedding parameter's attributes and their respective values.
"""
attributes = {}
for name in dir(embed_params):
if '__' not in name: # not a python related attribute
attr = getattr(embed_params, name)
if not callable(attr):
attributes[name] = attr
return attributes
def is_json_serializable(value):
try:
json.dumps(value)
return True
except TypeError:
return False
def terminate(message, code):
"""
Helper function that terminates the process if command line argument validation fails.
Args:
message (str): The error message to print to the terminal.
code (int): The error code to exit with.
"""
print(message)
exit(code)
| [
"edang830@gmail.com"
] | edang830@gmail.com |
b6230fc0f27c8b25a5a30e21ff1adf750f7f2d60 | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/ellecf/visualizing-titanic-data/visualizing-titanic-data.py | 2a423da53cf48d3cbf021e394da003ed61c61e90 | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[ ]:
#import data
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
# In[ ]:
#explore the data a little bit
print(train_data.columns.values)
print(train_data.describe())
train_data.head()
# In[ ]:
#find out what the null sitch is
print(train_data.isnull().sum())
# In[ ]:
#Look at the target, how many survivors?
train_data['Survived'].value_counts()
# In[ ]:
train_data['Survived'].astype(int).plot.hist();
# In[ ]:
#let's turn sex into a numerical feature instead of categorical
from sklearn.preprocessing import LabelEncoder
train_data['Sex'] = LabelEncoder().fit_transform(train_data['Sex'])
# In[ ]:
#handling missing values
#print(train_data.isnull().sum())
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
train_data['Age'] = imp.fit_transform(train_data['Age'].values.reshape(-1,1)).reshape(-1)
print(train_data.isnull().sum())
# In[ ]:
# Find correlations with the target and sort
correlations = train_data.corr()['Survived'].sort_values()
# Display correlations
print('Correlations: \n', correlations)
# In[ ]:
#let's look at how the variables correlate with each other
allcorr = train_data.corr()
allcorr
# In[ ]:
# Heatmap of correlations
plt.figure(figsize = (8, 6))
sns.heatmap(allcorr, cmap = plt.cm.RdYlBu_r, vmin = -0.25, annot = True, vmax = 0.6)
plt.title('Correlation Heatmap');
# In[ ]:
plt.figure(figsize = (10, 8))
# KDE plot - smoothed histograms showing distribution of a variable for survived/died outcomes
sns.kdeplot(train_data.loc[train_data['Survived'] == 0, 'Age'], label = 'Survived == 0')
sns.kdeplot(train_data.loc[train_data['Survived'] == 1, 'Age'], label = 'Survived == 1')
# Labeling of plot
plt.xlabel('Age (years)'); plt.ylabel('Density'); plt.title('Distribution of Ages');
# In[ ]:
plt.figure(figsize = (10, 8))
# KDE plot - smoothed histograms showing distribution of a variable for survived/died outcomes
sns.kdeplot(train_data.loc[train_data['Survived'] == 0, 'Fare'], label = 'Survived == 0')
sns.kdeplot(train_data.loc[train_data['Survived'] == 1, 'Fare'], label = 'Survived == 1')
# Labeling of plot
plt.xlabel('Fare'); plt.ylabel('Density'); plt.title('Distribution of Fare');
# In[ ]:
plt.subplots(figsize = (15,10))
sns.barplot(x = "Pclass",
y = "Survived",
data=train_data,
linewidth=2)
plt.title("Passenger Class Distribution - Survived vs Non-Survived", fontsize = 25)
plt.xlabel("Socio-Economic class", fontsize = 15);
plt.ylabel("% of Passenger Survived", fontsize = 15);
labels = ['Upper', 'Middle', 'Lower']
#val = sorted(train.Pclass.unique())
val = [0,1,2] ## this is just a temporary trick to get the label right.
plt.xticks(val, labels);
# In[ ]:
| [
"bitsorific@gmail.com"
] | bitsorific@gmail.com |
fdaf162ebeaaede570a86fafefae08a63d204cad | 32868580ddb697d3a9248952d34f2090e05325b5 | /team.py | c479ddca67df9303506f6d3eb165ffe25b57a928 | [] | no_license | DFettes/basketball-sim | e0e7b99c731654c5348e25c8d17dd49f0a3812ed | 6967fc39df7b1ce05705e32fd9d610e5874a7b5c | refs/heads/master | 2021-01-01T18:55:13.894081 | 2015-02-16T20:17:57 | 2015-02-16T20:17:57 | 30,477,659 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | import player
class Team():
name = ''
home = None
away = None
players = []
on_floor = []
on_bench = []
points = 0
possesions = 0
wins = 0
losses = 0
season_points = 0
def __init__(self, name, players):
self.name = name
self.players = players
self.starters = players[:5]
self.bench = players[5:]
self.on_floor = list(self.starters)
self.on_bench = list(self.bench)
def team_rebound_chance(self):
# Give team a weighted rebounding score based on its players
rebound_chance = 0.29*self.on_floor[4].defense['rebounding'] + \
0.24*self.on_floor[3].defense['rebounding'] + \
0.19*self.on_floor[2].defense['rebounding'] + \
0.15*self.on_floor[1].defense['rebounding'] + \
0.13*self.on_floor[1].defense['rebounding']
return rebound_chance
def player_rebound_chance(self, rand_reb):
# Calculate who on the team gets the rebound from their weighted stats
totals = []
running_total = 0
weights = [0.13, 0.15, 0.19, 0.24, 0.29]
for p, w in zip(self.on_floor, weights):
weighted_reb = p.defense['rebounding'] * w
running_total += weighted_reb
totals.append(running_total)
rand_reb *= running_total
for i, total in enumerate(totals):
if rand_reb < total:
break
return self.on_floor[i]
| [
"="
] | = |
8481b5c0414c9fae0193163d79b056ffc12d6171 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/132/usersdata/158/41060/submittedfiles/al14.py | 40febf8cf6f9e392b4e3564460cc1cf93bff2615 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # -*- coding: utf-8 -*-
q=int(input('digite quantidade de pessoas:'))
soma=0
for i in range(1,q+1,1):
a=int(input('digite idade:'))
soma=soma+a
media=soma/q
print(media)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ea7a9906120df3389f9d770640964c92ab508a71 | 48e1ac111f48bf27b03625f81887a8eaef4d505d | /old/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/disks/delete.py | f25effc93aad64441570fff0441854a5c503fad6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | altock/dev | 74350528ea570925e8fbc584c64939cae86f6ea7 | 90d87b2adb1eab7f218b075886aa620d8d6eeedb | refs/heads/master | 2021-07-10T08:31:48.080736 | 2017-04-15T03:04:12 | 2017-04-15T03:04:12 | 23,088,790 | 0 | 1 | null | 2020-07-25T04:32:05 | 2014-08-18T22:33:25 | Python | UTF-8 | Python | false | false | 668 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for deleting disks."""
from googlecloudsdk.compute.lib import base_classes
class Delete(base_classes.ZonalDeleter):
"""Delete Google Compute Engine disks."""
@property
def service(self):
return self.context['compute'].disks
@property
def resource_type(self):
return 'disks'
Delete.detailed_help = {
'brief': 'Delete Google Compute Engine persistent disks',
'DESCRIPTION': """\
*{command}* deletes one or more Google Compute Engine
persistent disks. Disks can be deleted only if they are not
being used by any virtual machine instances.
""",
}
| [
"sjs382@cornell.edu"
] | sjs382@cornell.edu |
e5731ef0207cc76612873575e5242ec5f23089fb | f5b5a6e3f844d849a05ff56c497638e607f940e0 | /capitulo 10/capitulo 10/10.19 - A Classe Telefone.py | 296d8a4c18cbc78858609e9bcba6515cf9c7a413 | [] | no_license | alexrogeriodj/Caixa-Eletronico-em-Python | 9237fa2f7f8fab5f17b7dd008af215fb0aaed29f | 96b5238437c88e89aed7a7b9c34b303e1e7d61e5 | refs/heads/master | 2020-09-06T21:47:36.169855 | 2019-11-09T00:22:14 | 2019-11-09T00:22:14 | 220,563,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 10\10.19 - A Classe Telefone.py
##############################################################################
class Telefone:
def __init__(self, número, tipo=None):
self.número = número
self.tipo = tipo
def __str__(self):
if self.tipo!=None:
tipo = self.tipo
else:
tipo = ""
return "{0} {1}".format(self.número, tipo)
def __eq__(self, outro):
return self.número == outro.número and (
(self.tipo == outro.tipo) or (
self.tipo == None or outro.tipo == None))
@property
def número(self):
return self.__número
@número.setter
def número(self, valor):
if valor == None or not valor.strip():
raise ValueError("Número não pode ser None ou em branco")
self.__número = valor
| [
"noreply@github.com"
] | alexrogeriodj.noreply@github.com |
011202b97c3d525aba76422bc4ac0353c4ff9d47 | 8cfdc0fb2e1e34b5963badacaf4be853134abf48 | /MySQL_CRUD/createtable_users.py | 67133445c8e0f309a62b017d87e9f12773cb86a6 | [] | no_license | turamant/ToolKit | b88e36ce986cc1a25628409c317930245cc260f5 | 343daa8238cc1fd247d7c06fad8e5c4c729dd0f9 | refs/heads/main | 2023-06-30T11:54:14.130454 | 2021-08-04T22:00:37 | 2021-08-04T22:00:37 | 385,747,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | import sys
import MySQLdb
conn = MySQLdb.connect(host="localhost",
user="user1",
passwd="password1",
db="shop")
cursor = conn.cursor()
try:
cursor.execute("""
create table users (id int primary key,
firstName varchar(20),
lastName varchar(30),
password varchar(12))
""")
except MySQLdb.OperationalError:
print("Table 'users' already exists")
sys.exit(1)
cursor.close()
conn.commit()
conn.close()
| [
"tur1amant@gmail.com"
] | tur1amant@gmail.com |
721beb1176947744136d45e3c6de8ce8515fe84c | 420d4cf595fc8f28be0415aec70a4087e157555c | /Fluent_Python/Day35/tuple_unpacking_is_faster.py | 009896d6908736fa11385ba45fdc76243da6281f | [] | no_license | davidbegin/python-in-the-morning | 8cf89e62e7e8a2df5b8f875aae3cc7815545ad61 | aa4a271d1df0ce0a82d776c0955c1f20deb50937 | refs/heads/master | 2020-09-28T09:55:50.723066 | 2020-08-06T01:17:24 | 2020-08-06T01:17:24 | 226,753,142 | 23 | 3 | null | 2020-03-04T06:36:30 | 2019-12-09T00:31:09 | Python | UTF-8 | Python | false | false | 413 | py | import timeit
import array
print('\033c')
print("\n\t\t\033[36;1;6;4mPerf Tests!\033[0m\n\n")
TIMES = 10000000
SETUP = """
two_elem = ("cool_thang", "no nobody cares")
"""
def clock(label, cmd):
res = timeit.repeat(cmd, setup=SETUP, number=TIMES)
print(label, *('{:.4f}'.format(x) for x in res))
clock("[0] : ", "meth_name = two_elem[0]")
clock("tuple_unpack: ", "meth_name, _ = two_elem")
| [
"davidmichaelbe@gmail.com"
] | davidmichaelbe@gmail.com |
677490ba39e51883cc0eb61a5fc89eeeb212e873 | f0c402d3858f0643561886797578b1e64655b1b3 | /utils/regression/tests/smoke_check/test_critical_section.py | 9e48489ec81279eac77e4e572fb958a6b32d4f41 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Leo-Wang-JL/force-riscv | 39ad2a72abd814df4b63879ce9825b6b06a9391a | deee6acaaee092eb90ac2538de122303334e5be3 | refs/heads/master | 2023-01-28T00:06:58.135651 | 2020-11-18T02:54:10 | 2020-11-18T02:54:10 | 271,873,013 | 0 | 0 | NOASSERTION | 2020-06-28T00:51:26 | 2020-06-12T19:15:26 | C++ | UTF-8 | Python | false | false | 1,105 | py | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# test_critical_section.py
from shared.path_utils import PathUtils
from shared.sys_utils import SysUtils
from shared.msg_utils import Msg
from unit_test import UnitTest
from shared.kernel_objs import HiCriticalSection
from shared.threads import HiThread
class UnitTest_HiCriticalSection( UnitTest ):
def run_test( self ):
Msg.info( "HiCriticalSection: Start Unit Test ..." )
def process_result( self ):
Msg.info( "HiCriticalSection: Process Test Result ..." )
| [
"jwang1@futurewei.com"
] | jwang1@futurewei.com |
f02aff8cc71531fdfa7921a01824d76da76292b0 | df30f97d316e899b07b223bc86cfe53345627f06 | /problems/test2/2.py | 62ecd3c40d29a1e559f5c4cedc42424ca0435ac4 | [] | no_license | GH-Lim/AlgorithmPractice | c6a3aa99fa639aa23d685ae14c1754e0605eaa98 | e7b8de2075348fb9fcc34c1d7f211fdea3a4deb0 | refs/heads/master | 2021-06-18T17:21:10.923380 | 2021-04-18T03:43:26 | 2021-04-18T03:43:26 | 199,591,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(S):
# write your code in Python 3.6
temp = S.split()
temp = ''.join(temp)
temp = temp.split('-')
temp = ''.join(temp)
ans = ''
len_temp = len(temp)
if len_temp % 3 == 0:
k = len_temp // 3
for i in range(k):
ans += temp[3 * i: 3 * i + 3]
if i != k - 1:
ans += '-'
elif len_temp % 3 == 2:
k = len_temp // 3
for i in range(k):
ans += temp[3 * i: 3 * i + 3]
ans += '-'
ans += temp[-2:]
else:
k = len_temp // 3 - 1
for i in range(k):
ans += temp[3 * i: 3 * i + 3]
ans += '-'
ans += temp[-4:-2]
ans += '-'
ans += temp[-2:]
return ans | [
"gunhyuck11@gmail.com"
] | gunhyuck11@gmail.com |
d16f78aa40e2a936776a27b16e5f84c4959b9d0d | d7d010a85125676b82df0fb5b010fdcc0d4c48f8 | /continuum_normalise.py | 0bd29339d263559631aefb1e92c1ad2e75689f6b | [] | no_license | conradtchan/slomp | 1b9b09bccf18cfccb62cac3b8a1880ff108051f1 | c8f6b2a424bfd3913418538c016cf05649222701 | refs/heads/master | 2022-03-03T01:09:41.123493 | 2018-04-11T13:38:47 | 2018-04-11T13:38:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py |
"""
An example script to pseudo-continuum-normalise a single LAMOST spectrum and
save the result to disk.
"""
import matplotlib.pyplot as plt
import pickle
import lamost
# When loading spectra, let's resample them onto a common wavelength scale.
# This makes it easier for any data-driven model or classifier.
with open("common_vac_wavelengths.pkl", "rb") as fp:
common_dispersion = pickle.load(fp)
# wget http://dr3.lamost.org/sas/fits/B5591606/spec-55916-B5591606_sp03-051.fits.gz
# gunzip spec-55916-B5591606_sp03-051.fits.gz
input_path = "spec-55916-B5591606_sp03-051.fits"
dispersion, flux, ivar, meta = lamost.read_dr3_spectrum(input_path,
common_dispersion=common_dispersion)
norm_flux, norm_ivar = lamost.continuum_normalize(dispersion, flux, ivar)
fig, ax = plt.subplots(2)
ax[0].plot(dispersion, flux, c="k")
ax[1].plot(dispersion, norm_flux, c="k")
output_path = "{}.pkl".format(input_path[:-5])
with open(output_path, "wb") as fp:
# We don't save the dispersion array because it is already stored in
# common_vac_wavelengths.pkl
pickle.dump((norm_flux, norm_ivar, meta), fp)
| [
"andycasey@gmail.com"
] | andycasey@gmail.com |
b8fe8a23762651dd6136f569c4efcf35503ee3f6 | ed269e9a4d9d6bfbb833381b7aef65a23f391fe2 | /数据结构 以及部分基础模板算法/树/树demo.py | 301e62346dba2c25ee64ffa1202ee2fc4952b4c3 | [] | no_license | Comyn-Echo/leeCode | fcff0d4c4c10209a47bd7c3204e3f64565674c91 | 67e9daecb7ffd8f7bcb2f120ad892498b1219327 | refs/heads/master | 2023-04-28T17:35:52.963069 | 2021-05-19T01:52:16 | 2021-05-19T01:52:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | class node():
def __init__(self):
self.parent =None
self.chrldren = {} #不重复的子节点, 而且索引效率高
self.val = None
def show(self, deep=0):
print(deep * '--' , end=' ')
print(self.val)
for i in self.chrldren:
child = self.chrldren[i]
child.show(deep +2)
# 前, 中, 后 序遍历树, 这里是前序
def travel(self):
print(self.val)
for i in self.chrldren:
child = self.chrldren[i]
child.travel()
#插入节点, 用层级关系, val作为纽带
def __insert(self, List, position=1):
#到了最后一个节点
if position == len(List):
return
now = List[position]
# 已经存在就继续递进
if now in self.chrldren:
self.chrldren[now].__insert(List, position +1)
# 不存在,先创建,再继续
elif now not in self.chrldren:
tmp = node()
tmp.val = now
tmp.parent = self
self.chrldren[now] = tmp
self.chrldren[now].__insert(List, position +1)
def insert(self, List):
#root 存在值
if self.val:
if self.val == List[0]:
self.__insert(List, position=1)
else:
print('根节点对不上')
else:
self.val = List[0]
self.insert(List)
def delete(self, List):
pass
if __name__ == '__main__':
tree = node()
import random
for i in range(20):
a = [random.randint(0,10) for i in range(5)]
tree.insert(a)
tree.show() | [
"2892211452aa@gmail.com"
] | 2892211452aa@gmail.com |
f8a69439eed2eab8cca7d5ee80baa35e53e002f6 | 1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b | /2020/valid_mountain_array.py | e1546a90581fddd1841ef411d2951d5ec4afa217 | [] | no_license | eronekogin/leetcode | ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c | edb870f83f0c4568cce0cacec04ee70cf6b545bf | refs/heads/master | 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 | Python | UTF-8 | Python | false | false | 506 | py | """
https://leetcode.com/problems/valid-mountain-array/
"""
from typing import List
class Solution:
def validMountainArray(self, arr: List[int]) -> bool:
i, n = 1, len(arr)
if n < 3:
return False
while i < n and arr[i] > arr[i - 1]:
i += 1
if i == 1 or i == n: # All dereasing or increasing.
return False
while i < n and arr[i] < arr[i - 1]:
i += 1
return i == n # Fall to the end from the peak.
| [
"“mengyu.jiang@gmail.com"
] | “mengyu.jiang@gmail.com |
30ea5fff596183df2745dfe231fffcae166b7a08 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1482494_0/Python/ciccino/round1a2.py | 713bdda24c7036d8fcc7b1102ab87344b94bbb0a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import re
def Ryan(n, stars):
times = 0
star = 0
while len(stars) > 0:
keys = stars.keys()
onestar = 0
find = 0
for i in range(len(keys)):
level = stars[keys[i]]
#we can't do a 2-star, try to remember possible 1-star
if level[1] > star:
if star >= level[0] and level[2] == 0:
if onestar == 0 or level[1] > stars[onestar][1]:
onestar = keys[i]
else: #do 2-star
times = times + 1
if level[2] == 0:
#print str(star) + "+ 2 @" + str(keys[i]) + "[2star]"
star = star + 2
else:
#print str(star) + "+ 1 @" + str(keys[i]) + "[1star]"
star = star + 1
del stars[keys[i]]
find = 1
#try 1-star
if find == 0:
if (onestar == 0):
return 0
level = stars[onestar]
#print str(star) + "+ 1 @" + str(onestar) + "[1star]"
star = star + 1
level[2] = 1
times = times + 1
return times
def round1a1(filepath):
f = open(filepath, 'r+')
infile = re.split('in', filepath)
outfile = infile[0] + "out"
print outfile
o = open(outfile, "w+")
#number of test cases
t = int(f.readline())
for i in range(t):
n = int(f.readline())
stars = {}
for j in range(n):
line = f.readline()
sl = re.split(" ", line)
stars[j + 1] = [int(sl[0]), int(sl[1]), 0]
print str(n) + str(stars)
result = Ryan(n, stars)
res = ""
if result == 0:
res = "Too Bad"
else:
res = str(result)
#result = recycledNumbers(sl[0], sl[1], len(sl[0]))
buf = "Case #" + str(i + 1) + ": " + res
i = i + 1
print buf
o.write(buf + '\n')
f.close()
o.close()
round1a1("./B-small-attempt1.in")
#round1a1("./B-test.in") | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
a90d3e76a9136ee5850a8e7e50dc97be3fbca97d | 9ffa2c1d9472c0d686433a353764d03da2159205 | /tests/test_utils.py | d0c47ad37e9bdf82bc5c56e1ec57a453e56ac24c | [
"MIT"
] | permissive | zagaran/instant-census | 7c1a0ab0ff282ebc56dd3a35d18a3ab444da1bfb | 62dd5bbc62939f43776a10708ef663722ead98af | refs/heads/master | 2023-05-08T00:21:24.426828 | 2021-05-31T18:19:02 | 2021-05-31T18:19:02 | 372,590,104 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | from datetime import datetime, timedelta
from tests.common import InstantCensusTestCase
from utils.time import days_in_question_period
class TestUtils(InstantCensusTestCase):
def test_days_in_question_period(self):
every_day = [0, 1, 2, 3, 4, 5, 6]
sunday = [0]
monday = [1]
for i in range(7):
# June 7, 2015 was a Sunday
start_time = datetime(2015, 6, 7) + timedelta(days=i)
days = days_in_question_period(start_time, every_day)
# isoweekday has Monday == 1 ... Sunday == 7
self.assertEqual(days, 1, "day of week %s gave %s against every_day"
% (start_time.isoweekday(), days))
for i in range(7):
# June 7, 2015 was a Sunday
start_time = datetime(2015, 6, 7) + timedelta(days=i)
days = days_in_question_period(start_time, sunday)
# isoweekday has Monday == 1 ... Sunday == 7
self.assertEqual(days, 7 - i, "day of week %s gave %s against sunday"
% (start_time.isoweekday(), days))
for i in range(7):
# June 8, 2015 was a Monday
start_time = datetime(2015, 6, 8) + timedelta(days=i)
days = days_in_question_period(start_time, monday)
# isoweekday has Monday == 1 ... Sunday == 7
self.assertEqual(days, 7 - i, "day of week %s gave %s against monday"
% (start_time.isoweekday(), days))
| [
"iamzags@gmail.com"
] | iamzags@gmail.com |
2dd25eba1dfe1ff5bb8e185add4283af8c09f5b7 | 371235a5c6636020fd9a103c732de8294c66c5de | /case sensitive game.py | 2f2af6c999d4f3090f8cecaeee0d9729e08304a7 | [] | no_license | Akmalhakimteo/10.009-The-Digital-World | 11bf13bc07c73ad36d260656b565cc0955a9217a | c037f6656a0eeb6e50d17c90164c590107a53087 | refs/heads/master | 2020-12-11T11:49:46.205201 | 2020-01-14T13:01:15 | 2020-01-14T13:01:15 | 233,841,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 13:20:49 2019
@author: akmal
"""
print("are there CAPS in your word?")
user_ans=input("type in your input here please\n")
list_caps=[]
for char in user_ans:
if char.isupper()==True:
list_caps.append(char)
print("{},{}".format(char.islower(),list_caps))
#print({},{}.format(user_input.islower(),list_caps)
#
#
#if user_ans.islower()==True:
# print("all is in lowercase")
#else:
# print("At least one letter is not in lowercase")
#
| [
"akmal_hakim_teo@hotmail.com"
] | akmal_hakim_teo@hotmail.com |
0bc9a61e4524f7747997d1b0023fa09bf3c7d9f3 | 9848a719ddfdd21b5fe1fa2f55da290c0f6952dc | /unique-paths-2.py | f301c97817b27d3d04efaed880fc888e4e9a44e5 | [] | no_license | maomao905/algo | 725f7fe27bb13e08049693765e4814b98fb0065a | 84b35ec9a4e4319b29eb5f0f226543c9f3f47630 | refs/heads/master | 2023-03-13T12:38:55.401373 | 2021-03-25T01:55:48 | 2021-03-25T01:55:48 | 351,278,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | """
dynamic programming
current path = dp[i-1] + dp[j-1]
if there is a block in i-1 or j-1, i-1 or j-1 path would be zero
time: O(MN)
space: O(1)
"""
from typing import List
class Solution:
def uniquePathsWithObstacles(self, grid: List[List[int]]) -> int:
for row in range(len(grid)):
for col in range(len(grid[0])):
# block, thus it should stay zero
if grid[row][col] == 1:
grid[row][col] = 0
continue
if row == 0 and col == 0:
grid[row][col] = 1
continue
# add path from previous row
if row > 0:
grid[row][col] += grid[row-1][col]
# add path from previous column
if col > 0:
grid[row][col] += grid[row][col-1]
return grid[-1][-1]
s = Solution()
print(s.uniquePathsWithObstacles([[0,0,0],[0,1,0],[0,0,0]]))
print(s.uniquePathsWithObstacles([[0,1],[0,0]]))
print(s.uniquePathsWithObstacles([[0,0],[0,1]]))
| [
"maoya.sato@gmail.com"
] | maoya.sato@gmail.com |
ebc751ef7abdd04bdbca483941cc28084b496671 | cd9cb38fdc0be20d0b02c554537048f2c71333b6 | /fuzzy_search/__init__.py | 77b6484713f45e150ddf6c458df5f6681fd2bf7b | [
"MIT"
] | permissive | marijnkoolen/fuzzy-search | 15a09cc3bf9249175af2494903c1189b0f0f6608 | 1ac61e558f16b5a35918f55ac1f65857c740601e | refs/heads/master | 2023-08-03T08:37:15.752423 | 2023-07-20T13:23:01 | 2023-07-20T13:23:01 | 218,385,563 | 18 | 1 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | __version__ = '2.0.1a'
from fuzzy_search.search.config import default_config
from fuzzy_search.search.phrase_searcher import FuzzyPhraseSearcher
from fuzzy_search.search.token_searcher import FuzzyTokenSearcher
from fuzzy_search.match.phrase_match import PhraseMatch
from fuzzy_search.phrase.phrase_model import PhraseModel
def make_searcher(phrases: any, config):
phrase_model = PhraseModel(phrases, config)
searcher = FuzzyPhraseSearcher(phrase_model=phrase_model, config=config)
return searcher
| [
"marijn.koolen@gmail.com"
] | marijn.koolen@gmail.com |
e0b4c42b2802188a2cdb0e5fa651d16088cf5d88 | aa13423fa47e405a4b8884fe125b99ef7f0111dc | /backend/service/common/common_service.py | 3b44ad38e701e1cf654486a9d3c9e976ec464163 | [] | no_license | imtos/loonflow | b2f04d2fa5890c0a2a09d34b8c63af2bee38230b | 5f0953c497736b2376757978782b13fb0ca76305 | refs/heads/master | 2020-03-07T19:56:37.527777 | 2018-02-08T09:53:58 | 2018-02-08T09:53:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from rest_framework.views import exception_handler
from service.base_service import BaseService
class CommonService(BaseService):
def __init__(self):
pass
| [
"blackholll@163.com"
] | blackholll@163.com |
494db8147046d97eeae5ef128b6145f4abd174c5 | a9a07bd14e0568c8aa95cc43dc1961c2cceba9bf | /src/util/convert_fasta.py | e0cb667728ce13f9344c8721a40c9e7b5a3b5532 | [] | no_license | daishu-li/AMP_Benchmark | 4781e3b44f0fa16550661aeb9f8bcfc0e5f5aace | 14abf5bb715c1e0ecd8099beac2a1e92d6c72330 | refs/heads/master | 2023-01-12T10:38:10.227157 | 2020-11-15T23:57:19 | 2020-11-15T23:57:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | '''
AMP FAST Formater
Author: Yuya Jeremy Ong (yjo5006@psu.edu)
'''
from __future__ import print_function
# Application Parameters
DATA_DIR = '../../data/proc/'
INPUT_DIR = DATA_DIR + 'data3.csv'
OUTPUT_DIR = '../../data/fasta/data3_merges.fasta.txt'
def read_csv(dir, ignore_header=True):
st = 1 if ignore_header else 0
data = open(dir, 'r').read().split('\n')[st:-1]
return [[d.split(',')[0], d.split(',')[2]] for d in data]
if __name__ == '__main__':
# Read CSV File
data = read_csv(INPUT_DIR)
# FASTA File Generate Output
out = open(OUTPUT_DIR, 'w')
for d in data:
out.write('>' + d[0] + '\n')
out.write(d[1] + '\n')
out.close()
print('Output File: ' + OUTPUT_DIR)
| [
"yuyajeremyong@gmail.com"
] | yuyajeremyong@gmail.com |
72c3f2d8497a0d60d2c0f14e957489a4618e4be4 | 4851d160a423b4a65e81a75d5b4de5218de958ee | /Are You Playing Banjo.py | 433730f43ecfc4a458d8b013baa223ac9e5ed536 | [] | no_license | LarisaOvchinnikova/python_codewars | 519508e5626303dcead5ecb839c6d9b53cb3c764 | 5399f4be17e4972e61be74831703a82ce9badffd | refs/heads/master | 2023-05-05T14:52:02.100435 | 2021-05-25T18:36:51 | 2021-05-25T18:36:51 | 319,399,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # https://www.codewars.com/kata/53af2b8861023f1d88000832
def areYouPlayingBanjo(name):
if name[0] == "r" or name[0] == "R" :
return f"{name} plays banjo"
else:
return F"{name} does not play banjo" | [
"larisaplantation@gmail.com"
] | larisaplantation@gmail.com |
0e05766e147bc75fb98146c9f60ba7e258833825 | c34759a07cee20cdfe067247753f9951d6de77ff | /course/models.py | 3f43ba4570b105d629c6736e73cfec263824bb8c | [] | no_license | Shadyaobuya/PythonWeb | 6e52be7819489de7ae508c92aea4bea4917db828 | 451e193b4000627d3fccc0966be684307d75ca18 | refs/heads/master | 2023-08-02T12:04:02.661068 | 2021-09-30T08:31:39 | 2021-09-30T08:31:39 | 380,287,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | from django.db import models
from django.db.models.deletion import CASCADE, SET_NULL
# Create your models here.
class Course(models.Model):
course_name=models.CharField(max_length=40,null=True)
course_code=models.CharField(max_length=20,null=True)
trainer=models.CharField(max_length=30,null=True)
description=models.TextField(null=True)
class_name=models.CharField(max_length=20,null=True)
def __str__(self):
return self.course_name
def check_course_name(self):
return self.course_name
def check_trainer(self):
return self.trainer
class CourseSyllabus(models.Model):
course=models.OneToOneField(Course,on_delete=SET_NULL,null=True)
topic=models.TextField(null=True)
def __str__(self):
return self.topic
| [
"shadyaobuyagard@gmail.com"
] | shadyaobuyagard@gmail.com |
c10648c141dd42e67722581ebc455b6b420f711c | 93a43800b64c70ecf7069600b9d5fc83a726343e | /Examples/SecurityOperations/UpdateDocumentPassword.py | 7f56bb2b3983d043278cd546bcc5dca26fef5e7a | [
"MIT"
] | permissive | groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples | 2837afeac5cefb966cdafe4e5ec8c4ca8f8ea216 | dc4dffe01b98d68d469cbacab490894a024b79a7 | refs/heads/master | 2023-02-19T13:40:20.721383 | 2023-02-08T06:48:04 | 2023-02-08T06:48:04 | 225,944,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # Import modules
import groupdocs_merger_cloud
from Common import Common
# This example demonstrates how to update document password
class UpdateDocumentPassword:
@classmethod
def Run(cls):
securityApi = groupdocs_merger_cloud.SecurityApi.from_config(Common.GetConfig())
options = groupdocs_merger_cloud.UpdatePasswordOptions()
options.file_info = groupdocs_merger_cloud.FileInfo("WordProcessing/password-protected.docx", None, None, "password")
options.output_path = "Output/update-password.docx"
options.new_password = "NewPassword"
result = securityApi.update_password(groupdocs_merger_cloud.UpdatePasswordRequest(options))
print("Output file path = " + result.path) | [
"sergei.terentev@aspose.com"
] | sergei.terentev@aspose.com |
58b86a62d88299e881d276fb2e91397a3746ba21 | 59b3f3e3c082bf0891e8a117251607dac71c7e9c | /dockit/tests/serializers/common.py | 5d6ecc9d49b6c25711ef94d27ed3c6c6403344cd | [
"BSD-3-Clause"
] | permissive | cuker/django-dockit | 55a42af69b4dd41e941fe07ebc70a7a0826bd253 | 406734280ca6b55f66b73b3b4ec5e97ba58f045d | refs/heads/master | 2021-01-17T22:53:38.324005 | 2013-07-18T09:40:35 | 2013-07-18T09:40:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | from dockit import schema
from django.contrib.contenttypes.models import ContentType
class ChildDocument(schema.Document):
charfield = schema.CharField()
def create_natural_key(self):
return {'charfield': self.charfield}
class ChildSchema(schema.Schema):
ct = schema.ModelReferenceField(ContentType)
class ParentDocument(schema.Document):
title = schema.CharField()
subdocument = schema.ReferenceField(ChildDocument)
subschema = schema.SchemaField(ChildSchema)
| [
"jasonk@cukerinteractive.com"
] | jasonk@cukerinteractive.com |
e98a870a3708af61cf12893b76db95dce8ce711a | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_09_01_polycrystal_FIP/main_plt.py | 881fb484e611cde890483c3bf1796684fce491c5 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | # import plot_correlation as pltcorr
import plot_explained_variance as pev
import plot_pc_map_3d as pltmap3d
import plot_pc_map as pltmap
import plot_dendrogram as pd
import plot_err_v_pc as pevp
import plot_linkage_check as plc
import plot_evd as pe
import plot_evd_predicted as pep
from constants import const
import matplotlib.pyplot as plt
C = const()
names = C['names']
sid = C['sid_split']
ns = C['ns_split']
# Hvec = [6, 15, 41, 90]
Hvec = [6]
H = 6
deg = 2
# """Plot an autocorrelation"""
# sn = 0
# iA = 1
# iB = 1
# pltcorr.pltcorr(ns_cal[0], sid_cal[0], sn, iA, iB)
"""Plot the percentage explained variance"""
pev.variance([.5, 15, 40, 105], Hvec)
"""Plot the microstructures in PC space"""
pcA = 0
pcB = 1
pcC = 2
pltmap.pltmap(H, pcA, pcB)
pltmap3d.pltmap(H, pcA, pcB, pcC)
"""Plot a dendrogram"""
pd.pltdend(ns, sid, H)
"""Plot the errors versus number of PCs and polynomial order"""
emax = 100
pevp.plterr('mu', emax, deg, ['meanerr'], Hvec)
pevp.plterr('mu', emax, deg, ['LOOCV'], Hvec)
pevp.plterr('sigma', emax, deg, ['meanerr'], Hvec)
pevp.plterr('sigma', emax, deg, ['LOOCV'], Hvec)
"""Plot the predicted versus actual values of the property of interest"""
indx1 = plc.plot_check('mu', n_pc=2, n_poly=3, H=H, erv=10)
indx2 = plc.plot_check('sigma', n_pc=2, n_poly=3, H=H, erv=10)
"""Plot the FIP EVDs versus the predicted FIP EVDs"""
pe.pltevd(H)
pep.pltevd(indx1, indx2, H)
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
abb33774e1c174d956e5753d33f6f6a895bfc3c5 | ff4d26332da8b4d31689a68c97c06eca19cc4260 | /projectEuler/webScraping/problemTemplates/135.py | 644ba12b6d7d882d9abaf7cf859a982632096169 | [] | no_license | nickfang/classes | cf1b64686fb34909f6ffface0f669fa88256d20c | 6869deaa5a24782c5a69c7aa41875faf2553e013 | refs/heads/master | 2023-01-04T00:43:31.351247 | 2019-12-30T21:04:12 | 2019-12-30T21:04:12 | 100,035,808 | 0 | 0 | null | 2023-01-03T20:59:30 | 2017-08-11T13:41:17 | HTML | UTF-8 | Python | false | false | 548 | py | # Same differences
#
#Given the positive integers, x, y, and z, are consecutive terms of an arithmetic progression, the least value of the positive integer, n, for which the equation, x^2 − y2 − z2 = n, has exactly two solutions is n = 27:
#34^2 − 272 − 202 = 122 − 92 − 62 = 27
#It turns out that n = 1155 is the least value which has exactly ten solutions.
#How many values of n less than one million have exactly ten distinct solutions?
#
import time
startTime = time.time()
print('Elapsed time: ' + str(time.time()-startTime)) | [
"fang.nicholas@gmail.com"
] | fang.nicholas@gmail.com |
525d43ca59cc5097c97503cf5b04764728628052 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Quote18/HQ_18_135.py | 7dd1a834e9b79a4fe2ce51b8c71d9f8035bfa820 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_135(xtp_test_case):
def subOrderBook(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_order_book(data, error, last):
self.print_msg(case_name, rs_expect, error)
Api.setSubOrderBookHandle(on_order_book)
Api.SubscribeOrderBook(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_135(self):
pyname = 'HQ_18_135'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '', 'exchange_id': 2}
self.subOrderBook(Api, stk_info, pyname,
{'error_id': 11200003, 'error_msg': 'unknown security'}) # 4
Api.Logout()
if __name__=='__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
d57b2dfdaf0801d20f664f6d022bce430e4b2b95 | 86813bf514f3e0257f92207f40a68443f08ee44b | /0892 三维形体的表面积/0892 三维形体的表面积.py | 28d502d4a4003699f10cb5c26e011ee620470549 | [] | no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | #label: maths difficulty: easy
class Solution:
def surfaceArea(self, grid: List[List[int]]) -> int:
n = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] > 0:
n += 2 + 4 * grid[i][j]
if i > 0:
n -= 2 * min(grid[i][j], grid[i - 1][j])
if j > 0:
n -= 2 * min(grid[i][j], grid[i][j - 1])
return n
| [
"noreply@github.com"
] | Aurora-yuan.noreply@github.com |
23942b0d1b41fd4f12a183199851c68f55ddcee6 | 7e41d5ec2f8ba704c99bbb252a216566fa0e7ce3 | /Clases/Estadistica/centralizacion.py | 26fffe21b157bcf0a85b3c5175119168738d30dd | [
"MIT"
] | permissive | juanpanu-zz/PM_DataScience | 11cf1fab4939ba415bbae28b134182e7f3108e37 | 24e71616dae692e931e95cd3815ca88fa9b8a46a | refs/heads/master | 2023-01-03T01:15:50.893425 | 2020-10-24T16:32:16 | 2020-10-24T16:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | datos= [0,0,0,0,0,0,0,0,0,0,0,0,0
,1,1,1,1,1,1,1,1,1,1
,2,2,2,2,2,2,2
,3,3,3,3,3,3
,4,4]
def media(datos):
return sum(datos)/len(datos)
def mediana(datos):
if(len(datos)%2 == 0):
return (datos[int(len(datos)/2)] + datos[int((len(datos)+1)/2)]) / 2
else:
return datos[(len(datos)+1)/2]
if __name__ == '__main__':
print(media(datos))
print(mediana(datos)) | [
"juanpa.nb@gmail.com"
] | juanpa.nb@gmail.com |
02d7211f3b1a728472c1ffef0d6d0e717bc29ca3 | df982b09cb71edeb2f306d5b966c13a45b9a9e70 | /src/encode_task_trimmomatic.py | 83bd7b94706fa278c3d5f819498ba86cdab5b840 | [
"MIT"
] | permissive | Fnyasimi/chip-seq-pipeline2 | 7857c752abbb6fa9c1b3e2e19e54776cdf2583b5 | 15d87e4dfd6a4fdf1419b17a1f25fcde75252e1c | refs/heads/master | 2020-12-15T07:52:35.261071 | 2020-05-10T10:40:36 | 2020-05-10T10:40:36 | 235,036,517 | 0 | 0 | MIT | 2020-05-10T10:40:37 | 2020-01-20T06:47:59 | null | UTF-8 | Python | false | false | 6,081 | py | #!/usr/bin/env python
# ENCODE DCC Trimmomatic wrapper
# Author: Jin Lee (leepc12@gmail.com)
import sys
import os
import argparse
from encode_lib_common import (
assert_file_not_empty, log, ls_l, mkdir_p, rm_f,
run_shell_cmd, strip_ext_fastq)
from encode_lib_genomic import (
locate_trimmomatic)
def parse_arguments(debug=False):
parser = argparse.ArgumentParser(
prog='ENCODE DCC Trimmomatic wrapper.')
parser.add_argument('--fastq1',
help='FASTQ R1 to be trimmed.')
parser.add_argument('--fastq2',
help='FASTQ R2 to be trimmed.')
parser.add_argument('--paired-end', action="store_true",
help='Paired-end FASTQs.')
parser.add_argument('--crop-length', type=int, required=True,
help='Number of basepair to crop.'
'Trimmomatic\'s parameter CROP.')
parser.add_argument('--crop-length-tol', type=int, default=2,
help='Crop length tolerance to keep shorter reads '
'around the crop length. '
'Trimmomatic\'s parameter MINLEN will be --crop-length '
'- abs(--crop-length-tol).')
parser.add_argument('--out-dir-R1', default='', type=str,
help='Output directory for cropped R1 fastq.')
parser.add_argument('--out-dir-R2', default='', type=str,
help='Output directory for cropped R2 fastq.')
parser.add_argument('--trimmomatic-java-heap',
help='Trimmomatic\'s Java max. heap: java -jar Trimmomatic.jar '
'-Xmx[MAX_HEAP]')
parser.add_argument('--nth', type=int, default=1,
help='Number of threads to parallelize.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
if not args.crop_length:
raise ValueError('Crop length must be > 0.')
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def trimmomatic_se(fastq1, crop_length, crop_length_tol, out_dir,
nth=1, java_heap=None):
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_fastq(fastq1)))
crop_length_tol = abs(crop_length_tol)
min_length = crop_length - crop_length_tol
cropped = '{p}.crop_{cl}-{tol}bp.fastq.gz'.format(
p=prefix, cl=crop_length, tol=crop_length_tol)
if java_heap is None:
java_heap_param = '-Xmx6G'
else:
java_heap_param = '-Xmx{}'.format(java_heap)
cmd = 'java -XX:ParallelGCThreads=1 {param} -jar {jar} SE -threads {nth} '
cmd += '{fq1} {cropped} MINLEN:{ml} CROP:{cl}'
cmd = cmd.format(
param=java_heap_param,
jar=locate_trimmomatic(),
nth=nth,
fq1=fastq1,
cropped=cropped,
ml=min_length,
cl=crop_length)
run_shell_cmd(cmd)
return cropped
def trimmomatic_pe(fastq1, fastq2, crop_length, crop_length_tol, out_dir_R1, out_dir_R2,
nth=1, java_heap=None):
prefix_R1 = os.path.join(
out_dir_R1, os.path.basename(strip_ext_fastq(fastq1)))
prefix_R2 = os.path.join(
out_dir_R2, os.path.basename(strip_ext_fastq(fastq2)))
crop_length_tol = abs(crop_length_tol)
min_length = crop_length - crop_length_tol
cropped_R1 = '{p}.crop_{cl}-{tol}bp.fastq.gz'.format(
p=prefix_R1, cl=crop_length, tol=crop_length_tol)
cropped_R2 = '{p}.crop_{cl}-{tol}bp.fastq.gz'.format(
p=prefix_R2, cl=crop_length, tol=crop_length_tol)
tmp_cropped_R1 = '{}.tmp'.format(cropped_R1)
tmp_cropped_R2 = '{}.tmp'.format(cropped_R2)
if java_heap is None:
java_heap_param = '-Xmx6G'
else:
java_heap_param = '-Xmx{}'.format(java_heap)
cmd = 'java -XX:ParallelGCThreads=1 {param} -jar {jar} PE -threads {nth} '
cmd += '{fq1} {fq2} {cropped1} {tmp_cropped1} {cropped2} {tmp_cropped2} '
cmd += 'MINLEN:{ml} CROP:{cl}'
cmd = cmd.format(
param=java_heap_param,
jar=locate_trimmomatic(),
nth=nth,
fq1=fastq1,
fq2=fastq2,
cropped1=cropped_R1,
tmp_cropped1=tmp_cropped_R1,
cropped2=cropped_R2,
tmp_cropped2=tmp_cropped_R2,
ml=min_length,
cl=crop_length)
run_shell_cmd(cmd)
rm_f([tmp_cropped_R1, tmp_cropped_R2])
return cropped_R1, cropped_R2
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir_R1)
if args.paired_end:
mkdir_p(args.out_dir_R2)
log.info(
'Cropping fastqs with Trimmomatic... '
'crop_length={cl}, crop_length_tol={clt}'.format(
cl=args.crop_length,
clt=args.crop_length_tol))
if args.paired_end:
cropped_R1, cropped_R2 = trimmomatic_pe(
args.fastq1, args.fastq2,
args.crop_length, args.crop_length_tol,
args.out_dir_R1, args.out_dir_R2,
args.nth,
args.trimmomatic_java_heap)
else:
cropped_R1 = trimmomatic_se(
args.fastq1,
args.crop_length, args.crop_length_tol,
args.out_dir_R1,
args.nth,
args.trimmomatic_java_heap)
log.info('List all files in output directory...')
ls_l(args.out_dir_R1)
if args.paired_end:
ls_l(args.out_dir_R2)
log.info('Checking if output is empty...')
assert_file_not_empty(cropped_R1, help=
'No reads in FASTQ after cropping. crop_length might be too high? '
'While cropping, Trimmomatic (with MINLEN=crop_length-abs(crop_length_tol)) '
'excludes all reads SHORTER than crop_length.')
log.info('All done.')
if __name__ == '__main__':
main()
| [
"leepc12@gmail.com"
] | leepc12@gmail.com |
7de9384fbd2f2372f2d386386c9cd8ff2c6cbc27 | 591a05e50f2515f6bd4605de6ed9ed7d3936ad9d | /welib/wt_theory/tests/test_wakeexpansion.py | 54e4f50b1cb6405abe136bc768d734c18a01b8ff | [
"MIT"
] | permissive | ebranlard/welib | 679edeec85feb629dc27047a62422d469c6e0081 | 3486e87c6348e9580099fe5c360138e762ab3ea9 | refs/heads/main | 2023-08-09T13:31:40.253283 | 2023-06-16T18:17:09 | 2023-06-16T18:17:09 | 153,533,129 | 50 | 25 | MIT | 2023-06-16T18:17:11 | 2018-10-17T22:47:46 | Python | UTF-8 | Python | false | false | 1,027 | py | import unittest
import numpy as np
from welib.wt_theory.wakeexpansion import *
class TestExpansion(unittest.TestCase):
def test_downstreamD(self):
# Check that analytical solution and numerical solution match when getting downstream distance
CT=0.8
fraction = 0.5
rw0 = wake_expansion_momentum(CT=CT)
expansion = 1 + fraction * (rw0-1)
xa = downstreamDistanceForGivenExpansion(CT, expansion, model='cylinder', method='analytical')
xn = downstreamDistanceForGivenExpansion(CT, expansion, model='cylinder', method='interp')
np.testing.assert_almost_equal(xa, xn, 5)
def test_methods(self):
CT = 0.8
fraction = 0.5
rw0 = wake_expansion_momentum(CT=CT)
np.testing.assert_almost_equal(rw0, 1.27201965, 7)
xb = [0, 1, 20] # xb =r/R
r = wake_expansion(xb, CT=CT, model='cylinder')
np.testing.assert_almost_equal(r, [1, 1.17048, 1.27153], 5)
if __name__ == '__main__':
unittest.main()
| [
"emmanuel.branlard@nrel.gov"
] | emmanuel.branlard@nrel.gov |
515e5f55d7782b81daf3473bf6affdc0b76a7cbe | 1f5f8f95530003c6c66419519d78cb52d21f65c0 | /projects/golem_api/pages/page.py | 4181623ca86611c093bc53b9c670ae1e4e098000 | [] | no_license | golemhq/golem-tests | c5d3ab04b1ea3755d8b812229feb60f513d039ac | dff8fd3a606c3d1ef8667aece6fddef8ac441230 | refs/heads/master | 2023-08-17T23:05:26.286718 | 2021-10-04T20:34:17 | 2021-10-04T20:34:17 | 105,579,436 | 4 | 1 | null | 2018-11-19T00:14:24 | 2017-10-02T20:05:55 | Python | UTF-8 | Python | false | false | 2,597 | py | import requests
from projects.golem_api.pages.utils import url, headers
DELETE_PAGE_ENDPOINT = '/page/delete'
DUPLICATE_PAGE_ENDPOINT = '/page/duplicate'
RENAME_PAGE_ENDPOINT = '/page/rename'
PAGE_COMPONENTS_ENDPOINT = '/page/components'
SAVE_PAGE_ENDPOINT = '/page/save'
SAVE_PAGE_CODE_ENDPOINT = '/page/code/save'
RENAME_PAGE_DIRECTORY_ENDPOINT = '/page/directory/rename'
DELETE_PAGE_DIRECTORY_ENDPOINT = '/page/directory/delete'
def delete_page(project_name, page_name, user=None):
return requests.delete(url(DELETE_PAGE_ENDPOINT), headers=headers(user),
json={'project': project_name, 'fullPath': page_name})
def duplicate_page(project_name, page_name, new_page_name, user=None):
json_ = {
'project': project_name,
'fullPath': page_name,
'newFileFullPath': new_page_name
}
return requests.post(url(DUPLICATE_PAGE_ENDPOINT), headers=headers(user), json=json_)
def rename_page(project_name, page_name, new_page_name, user=None):
json_ = {
'project': project_name,
'fullFilename': page_name,
'newFullFilename': new_page_name
}
return requests.post(url(RENAME_PAGE_ENDPOINT), headers=headers(user), json=json_)
def get_page_components(project_name, page_name, user=None):
return requests.get(url(PAGE_COMPONENTS_ENDPOINT), headers=headers(user),
params={'project': project_name, 'page': page_name})
def save_page(project_name, page_name, elements, functions, import_lines, user=None):
json_ = {
'project': project_name,
'pageName': page_name,
'elements': elements,
'functions': functions,
'importLines': import_lines
}
return requests.put(url(SAVE_PAGE_ENDPOINT), headers=headers(user), json=json_)
def save_page_code(project_name, page_name, content, user=None):
json_ = {
'project': project_name,
'pageName': page_name,
'content': content
}
return requests.put(url(SAVE_PAGE_CODE_ENDPOINT), headers=headers(user), json=json_)
def rename_page_directory(project_name, dir_name, new_dir_name, user=None):
json_ = {
'project': project_name,
'fullDirname': dir_name,
'newFullDirname': new_dir_name
}
return requests.post(url(RENAME_PAGE_DIRECTORY_ENDPOINT), headers=headers(user), json=json_)
def delete_page_directory(project_name, dir_name, user=None):
return requests.delete(url(DELETE_PAGE_DIRECTORY_ENDPOINT), headers=headers(user),
json={'project': project_name, 'fullDirname': dir_name})
| [
"luciano@lucianorenzi.com"
] | luciano@lucianorenzi.com |
ecd23624ad4bf1a877b5602da7d072f654ced6f2 | 19316c08712a502b1124f2b55cb98bfcbcca7af5 | /dev/python/2018-07-25 findcrash.py | 3d319feb710cd4cc991faf1eb345cf56d8c88d28 | [
"MIT"
] | permissive | swharden/pyABF | 49a50d53015c50f1d5524242d4192718e6f7ccfa | 06247e01ca3c19f5419c3b9b2207ee544e30dbc5 | refs/heads/main | 2023-08-28T02:31:59.540224 | 2023-08-17T16:34:48 | 2023-08-17T16:34:48 | 109,707,040 | 92 | 39 | MIT | 2023-04-06T00:37:29 | 2017-11-06T14:39:21 | Jupyter Notebook | UTF-8 | Python | false | false | 179 | py | """
Boilerplate dev test
"""
from imports import *
if __name__ == "__main__":
abf = pyabf.ABF(PATH_DATA+"/180415_aaron_temp.abf")
print(abf.sweepY)
print(abf.sweepC) | [
"swharden@gmail.com"
] | swharden@gmail.com |
3a30c4238d79e5f91a766e65bc38e75f764384bd | 59129c8fee701270a7a69cc03d876834f567597a | /olvidado/.i3pystatus.caladan.py | fdb15af7e800b5bc31251bb794b506c25e0ef9e0 | [] | no_license | jmberros/dotfiles | 0d79e35fc30fe3669464bc979e64bb6a365ab3f6 | 7c12c4e70b25b4c932a160c2142a132eecca5b1d | refs/heads/master | 2023-08-31T15:33:11.801980 | 2023-08-29T00:48:24 | 2023-08-29T00:48:24 | 10,116,001 | 37 | 26 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | # -*- coding: utf-8 -*-
import subprocess
from i3pystatus import Status
status = Status(standalone=True)
status.register("clock",
color="#cccccc",
format="%H:%M, %A %-d %B",)
status.register("pulseaudio",
format="🔊 {volume}",)
status.register("load",
color="#bbbbbb",
critical_limit=8,
format="💻 {avg1} / {avg5}")
status.register("mem",
divisor=10**9,
color="#4CAF50",
format="{used_mem} / {avail_mem} Gb")
status.register("now_playing",
player="clementine",
color="#ffc080",
format='{artist}: "{title}" {song_elapsed}/{song_length}')
status.register("network",
interface="en0",
format_up="{bytes_sent} kB/s↑ {bytes_recv} kB/s↓",
format_down="Internet DOWN",
dynamic_color = True,
start_color="gray",
end_color="yellow",
color_down="#ff2222",
)
status.register("disk",
path="/home/juan",
color="#bbbbbb",
#format="{used} / {total}G [ {avail}G ]",)
format="🏠 {avail}G",)
status.register("disk",
path="/",
color="#bbbbbb",
#format="{used} / {total}G [ {avail}G ]",)
format="/ {avail}G",)
status.run()
| [
"juanmaberros@gmail.com"
] | juanmaberros@gmail.com |
803157c722e9a3da82f6f61490f4b508d74f77c0 | 22b3f1851bf4da5fc8837b31cc276e95f92c7a33 | /deeppy/expr/util.py | 62e19c43957f6237f2f401406e30ca2d0bbf294e | [
"MIT"
] | permissive | nagyistoce/deeppy | db34eda7d4d14077c577ef081ed3edf2b9d00add | f7d073aef9a7070a841d66f34046414c88b01812 | refs/heads/master | 2020-12-11T04:01:49.877773 | 2015-11-19T14:34:43 | 2015-11-19T14:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | import numpy as np
from .base import Identity
_measures = {
'mean': ('%.2e', np.mean),
'std': ('%.2e', np.std),
'shape': ('%s', lambda x: str(x.shape)),
'absnorm': ('%.2e', lambda x: np.sum(np.fabs(x))),
}
class Print(Identity):
def __init__(self, rate=1, label=None, fprop=True, bprop=False,
measures={}):
self.i = 0
self.rate = rate
self.label = label
self.print_fprop = fprop
self.print_bprop = bprop
self.measures = measures
def setup(self):
super(Print, self).setup()
if self.label is None:
self.label = self.x.__class__.__name__
def _message(self, val):
msg = self.label + ' '
for name, (s, fun) in dict(_measures, **self.measures).items():
msg += ' ' + name + ':' + (s % fun(val))
return msg
def fprop(self):
super(Print, self).fprop()
self.i += 1
if self.print_fprop and (self.i-1) % self.rate == 0:
print(self._message(np.array(self.out)))
def bprop(self):
if self.print_bprop and (self.i-1) % self.rate == 0:
print(self._message(np.array(self.out)))
super(Print, self).bprop()
| [
"anders.bll@gmail.com"
] | anders.bll@gmail.com |
6fff296d0c4099761ec2cf15b9a4f8bf629a1a65 | 947e71b34d21f3c9f5c0a197d91a880f346afa6c | /ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/service_advisor.py | bb93a78ea3459b895c9e4d4937a9cde5a51bdf2d | [
"MIT",
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | liuwenru/Apache-Ambari-ZH | 4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0 | 7879810067f1981209b658ceb675ac76e951b07b | refs/heads/master | 2023-01-14T14:43:06.639598 | 2020-07-28T12:06:25 | 2020-07-28T12:06:25 | 223,551,095 | 38 | 44 | Apache-2.0 | 2023-01-02T21:55:10 | 2019-11-23T07:43:49 | Java | UTF-8 | Python | false | false | 6,392 | py | #!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python imports
import imp
import os
import traceback
import inspect
# Local imports
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
if "BASE_SERVICE_ADVISOR" in os.environ:
PARENT_FILE = os.environ["BASE_SERVICE_ADVISOR"]
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
class ZookeeperServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(ZookeeperServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
self.initialize_logger("ZookeeperServiceAdvisor")
self.modifyMastersWithMultipleInstances()
self.modifyCardinalitiesDict()
self.modifyHeapSizeProperties()
self.modifyNotValuableComponents()
self.modifyComponentsNotPreferableOnServer()
self.modifyComponentLayoutSchemes()
def modifyMastersWithMultipleInstances(self):
"""
Modify the set of masters with multiple instances.
Must be overriden in child class.
"""
self.mastersWithMultipleInstances.add("ZOOKEEPER_SERVER")
def modifyCardinalitiesDict(self):
"""
Modify the dictionary of cardinalities.
Must be overriden in child class.
"""
self.cardinalitiesDict["ZOOKEEPER_SERVER"] = {"min": 3}
def modifyHeapSizeProperties(self):
"""
Modify the dictionary of heap size properties.
Must be overriden in child class.
"""
self.heap_size_properties = {"ZOOKEEPER_SERVER": [{"config-name": "zookeeper-env",
"property": "zk_server_heapsize",
"default": "1024m"}]}
def modifyNotValuableComponents(self):
"""
Modify the set of components whose host assignment is based on other services.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentsNotPreferableOnServer(self):
"""
Modify the set of components that are not preferable on the server.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentLayoutSchemes(self):
"""
Modify layout scheme dictionaries for components.
The scheme dictionary basically maps the number of hosts to
host index where component should exist.
Must be overriden in child class.
"""
# Nothing to do
pass
def getServiceComponentLayoutValidations(self, services, hosts):
"""
Get a list of errors. Zookeeper does not have any validations in this version.
"""
self.logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
(self.__class__.__name__, inspect.stack()[0][3]))
return self.getServiceComponentCardinalityValidations(services, hosts, "ZOOKEEPER")
def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
"""
Recommend configurations to set. Zookeeper does not have any recommendations in this version.
"""
self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
self.recommendConfigurations(configurations, clusterData, services, hosts)
def recommendConfigurations(self, configurations, clusterData, services, hosts):
"""
Recommend configurations for this service.
"""
self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
self.logger.info("Setting zoo.cfg to default dataDir to /hadoop/zookeeper on the best matching mount")
zk_mount_properties = [
("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
]
self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
"""
Validate configurations for the service. Return a list of errors.
"""
self.logger.info("Class: %s, Method: %s. Validating Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
items = []
# Example of validating by calling helper methods
'''
configType = "zookeeper-env"
method = self.someMethodInThisClass
resultItems = self.validateConfigurationsForSite(configurations, recommendedDefaults, services, hosts, configType, method)
items.extend(resultItems)
method = self.anotherMethodInThisClass
resultItems = self.validateConfigurationsForSite(configurations, recommendedDefaults, services, hosts, configType, method)
items.extend(resultItems)
'''
return items
'''
def someMethodInThisClass(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
validationItems.append({"config-name": "zookeeper-env", "item": self.getErrorItem("My custom message 1")})
return self.toConfigurationValidationProblems(validationItems, "zookeeper-env")
def anotherMethodInThisClass(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
validationItems.append({"config-name": "zookeeper-env", "item": self.getErrorItem("My custom message 2")})
return self.toConfigurationValidationProblems(validationItems, "zookeeper-env")
''' | [
"ijarvis@sina.com"
] | ijarvis@sina.com |
7890d7748d77cc57c6b36016d9bbadd55f92a3b6 | 063fbbeb14bec58e25147484bfeae0d73124525e | /python/common.py | 0fd5366a8677754812c037d6e85ffb518fb4836e | [
"MIT"
] | permissive | mit-gfx/py_pbrt | 2bd9f60ee2fa5a35d338259747254b39f972029d | 853382447da449be6dcc38ba0f570508600f4698 | refs/heads/master | 2023-03-05T10:55:12.107096 | 2021-02-21T05:19:12 | 2021-02-21T05:19:12 | 340,816,570 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,836 | py | import numpy as np
def ndarray(val):
return np.asarray(val, dtype=np.float64)
###############################################################################
# Pretty print.
###############################################################################
def print_error(*message):
print('\033[91m', 'ERROR ', *message, '\033[0m')
def print_ok(*message):
print('\033[92m', *message, '\033[0m')
def print_warning(*message):
print('\033[93m', *message, '\033[0m')
def print_info(*message):
print('\033[96m', *message, '\033[0m')
class PrettyTabular(object):
def __init__(self, head):
self.head = head
def head_string(self):
line = ''
for key, value in self.head.items():
if 's' in value:
dummy = value.format('0')
else:
dummy = value.format(0)
span = max(len(dummy), len(key)) + 2
key_format = '{:^' + str(span) + '}'
line += key_format.format(key)
return line
def row_string(self, row_data):
line = ''
for key, value in self.head.items():
data = value.format(row_data[key])
span = max(len(key), len(data)) + 2
line += ' ' * (span - len(data) - 1) + data + ' '
return line
###############################################################################
# Folder.
###############################################################################
import shutil
import os
def create_folder(folder_name, exist_ok=False):
if not exist_ok and os.path.isdir(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name, exist_ok=exist_ok)
def delete_folder(folder_name):
shutil.rmtree(folder_name)
###############################################################################
# Rotation.
###############################################################################
# Input (rpy): a 3D vector (roll, pitch, yaw).
# Output (R): a 3 x 3 rotation matrix.
def rpy_to_rotation(rpy):
rpy = ndarray(rpy).ravel()
assert rpy.size == 3
roll, pitch, yaw = rpy
cr, sr = np.cos(roll), np.sin(roll)
R_roll = ndarray([[1, 0, 0], [0, cr, -sr], [0, sr, cr]])
cp, sp = np.cos(pitch), np.sin(pitch)
R_pitch = ndarray([[cp, 0, sp], [0, 1, 0], [-sp, 0, cp]])
cy, sy = np.cos(yaw), np.sin(yaw)
R_yaw = ndarray([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]])
return R_yaw @ R_pitch @ R_roll
###############################################################################
# Export videos.
###############################################################################
import imageio
def export_gif(folder_name, gif_name, fps, name_prefix=''):
frame_names = [os.path.join(folder_name, f) for f in os.listdir(folder_name)
if os.path.isfile(os.path.join(folder_name, f)) and f.startswith(name_prefix) and f.endswith('.png')]
frame_names = sorted(frame_names)
# Read images.
images = [imageio.imread(f) for f in frame_names]
if fps > 0:
imageio.mimsave(gif_name, images, fps=fps)
else:
imageio.mimsave(gif_name, images)
from pathlib import Path
def export_mp4(folder_name, mp4_name, fps, name_prefix=''):
frame_names = [os.path.join(folder_name, f) for f in os.listdir(folder_name)
if os.path.isfile(os.path.join(folder_name, f)) and f.startswith(name_prefix) and f.endswith('.png')]
frame_names = sorted(frame_names)
# Create a temporary folder.
tmp_folder = Path('_export_mp4')
create_folder(tmp_folder, exist_ok=False)
for i, f in enumerate(frame_names):
shutil.copyfile(f, tmp_folder / '{:08d}.png'.format(i))
os.system('ffmpeg -r ' + str(fps) + ' -i ' + str(tmp_folder / '%08d.png') + ' -vcodec mpeg4 -y ' + str(mp4_name))
# Delete temporary folder.
delete_folder(tmp_folder) | [
"taodu@csail.mit.edu"
] | taodu@csail.mit.edu |
797144fdad73105f67b0ff7bb1599091430fcaac | 85235f02e9674877cfcca8976076e26e39e1ca9e | /ForMark/singleton.py | 07cc54c959af4c7a6a368a0152eb749c0a0c22f5 | [] | no_license | zhaolixiang/ForMark | dd4e4bd2effb0d5085001c8e88d4a9811c100698 | 9bb83348fbb84addca2a40d5f9edeeec4bf9e5c3 | refs/heads/master | 2022-12-23T11:17:18.260110 | 2020-04-24T08:14:57 | 2020-04-24T08:14:57 | 250,973,339 | 0 | 0 | null | 2022-12-08T04:01:47 | 2020-03-29T07:01:47 | Python | UTF-8 | Python | false | false | 226 | py | # 单例模式
class Singleton(object):
# 重写
def __call__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
| [
"1782980833@qq.com"
] | 1782980833@qq.com |
e7379a055c87d6291e48f91d2fb474d83a71a427 | 2f44cecd8fc447c9e2f2d9f55abdea36ebb40cc5 | /剑指offer2/11.py | a2ed7b1898b111ad89eba9553fab224aca071393 | [] | no_license | yuzumei/leetcode | 751a234b429131169e3eaf4594ffeb3b94f6ab34 | b6708b03c92ec92e89fc7ecf13f1995dee346657 | refs/heads/master | 2023-07-28T05:48:53.192948 | 2021-09-11T06:16:07 | 2021-09-11T06:16:07 | 365,780,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding:utf-8 -*-
# @Author : Yuzu
# @Time : 2021/9/5 21:09
# @file : 11.py
class Solution:
def findMaxLength(self, nums) -> int:
import collections
memo = collections.defaultdict(list)
memo[0].append(-1)
for i in range(len(nums)):
if nums[i] == 0:
nums[i] = -1
if i >= 1:
nums[i] += nums[i - 1]
memo[nums[i]].append(i)
ans = 0
for item in memo:
ans = max(memo[item][-1] - memo[item][0], ans)
return ans
x =Solution()
print(x.findMaxLength([0,1,0])) | [
"973802530@qq.com"
] | 973802530@qq.com |
16c24fcee56307c226d8c936e065ef10f9edcb43 | 8f35dbebd8fe0fe7eacb2bbcffa6e8c96c9bb506 | /inc/console.py | 701b71839e4509fe01a9feb68bd8fd0ddfba0798 | [
"MIT"
] | permissive | tulibraries/combine | 57280d374a622543ef34da479c721b0b935230aa | eb100ea17193d65485aa6c4a7f05a41b4cab7515 | refs/heads/master | 2020-07-03T16:54:23.618414 | 2019-09-26T15:53:59 | 2019-09-26T15:53:59 | 201,976,306 | 1 | 0 | MIT | 2019-09-26T16:09:16 | 2019-08-12T17:18:32 | JavaScript | UTF-8 | Python | false | false | 755 | py | # convenience methods for Django's shell_plus
import os
from core.models import *
# get Record instance
def get_r(id):
return Record.objects.get(id=id)
# get Job instance
def get_j(id):
return Job.objects.get(pk=int(id))
# get CombineJob instance
def get_cj(id):
return CombineJob.get_combine_job(int(id))
# get RecordGroup instance
def get_rg(id):
return RecordGroup.objects.get(pk=int(id))
# get Organization instance
def get_o(id):
return Organization.objects.get(pk=int(id))
# tail livy
def tail_livy():
os.system('tail -f /var/log/livy/livy.stderr')
# tail django
def tail_celery():
os.system('tail -f /var/log/celery.stdout')
# get StateIO instance
def get_sio(id):
return StateIO.objects.get(id=id)
| [
"ghukill@gmail.com"
] | ghukill@gmail.com |
16f565b451e48fa90ec01e19fbee6dd257f35ac6 | f2ff79ab3d0b1328c66b834826cd311dc8eb5cc2 | /tests/test_clilib.py | 21bc633216f98ccef50a0228ce464d3a38575341 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | sambfloyd/clldutils | 2bc8251d94105f46c192de156741e623c7c33b62 | 92ab4cab4f9a39e0d6f20f09ef75e0ea4a11d025 | refs/heads/master | 2021-01-16T08:58:14.877413 | 2020-02-14T10:30:47 | 2020-02-14T10:30:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,218 | py | import pathlib
import argparse
import importlib
import pytest
from clldutils.clilib import *
from clldutils.path import sys_path
def test_get_parser_and_subparser():
assert get_parser_and_subparsers('a')
def test_register_subcommands(fixtures_dir, mocker):
with sys_path(fixtures_dir):
pkg = importlib.import_module('commands')
class EP:
name = 'abc'
def load(self):
return pkg
mocker.patch(
'clldutils.clilib.pkg_resources',
mocker.Mock(iter_entry_points=mocker.Mock(return_value=[EP()])))
parser, sp = get_parser_and_subparsers('a')
res = register_subcommands(sp, pkg, entry_point='x')
assert 'cmd' in res
assert 'abc.cmd' in res
help = None
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == 'cmd':
help = subparser.format_help()
# Make sure a RawDescription formatter is used:
assert 'Test command\n- formatted' in help
# Make sure default values are formatted:
assert 'o (default: x)' in help
res = register_subcommands(sp, pkg, formatter_class=argparse.HelpFormatter)
help = None
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == 'cmd':
help = subparser.format_help()
# Make sure a RawDescription formatter is used:
assert 'Test command\n- formatted' not in help
# Make sure default values are formatted:
assert 'o (default: x)' not in help
def test_register_subcommands_error(fixtures_dir, mocker, recwarn):
with sys_path(fixtures_dir):
pkg = importlib.import_module('commands')
class EP:
name = 'abc'
def load(self):
raise ImportError()
mocker.patch(
'clldutils.clilib.pkg_resources',
mocker.Mock(iter_entry_points=mocker.Mock(return_value=[EP()])))
_, sp = get_parser_and_subparsers('a')
res = register_subcommands(sp, pkg, entry_point='x')
assert 'abc.cmd' not in res
assert recwarn.pop(UserWarning)
def test_ArgumentParser(capsys):
def cmd(args):
"""
docstring
"""
if len(args.args) < 1:
raise ParserError('not enough arguments')
print(args.args[0])
parser = ArgumentParserWithLogging('pkg', cmd)
parser.main(args=['help', 'cmd'])
out, err = capsys.readouterr()
assert 'docstring' in out
parser.main(args=['cmd', 'arg'])
out, err = capsys.readouterr()
assert 'arg' in out
assert parser.main(args=['cmd', 'arg']) == 0
parser.main(args=['cmd'])
out, err = capsys.readouterr()
assert 'not enough arguments' in out
assert parser.main(args=['x']) != 0
out, err = capsys.readouterr()
assert out.startswith('invalid')
@command()
def ls(args):
"""
my name is ls
"""
return
@command(name='list', usage='my name is {0}'.format('list'))
def f(args):
"""
"""
return
parser = ArgumentParserWithLogging('pkg')
parser.main(args=['help', 'ls'])
out, err = capsys.readouterr()
assert 'my name is ls' in out
parser.main(args=['help', 'list'])
out, err = capsys.readouterr()
assert 'my name is list' in out
assert parser.main(args=['ls', 'arg']) == 0
assert parser.main(args=['list', 'arg']) == 0
def test_cmd_error():
from clldutils.clilib import ArgumentParser
def cmd(args):
raise ValueError
parser = ArgumentParser('pkg', cmd)
with pytest.raises(ValueError):
parser.main(args=['cmd'])
assert parser.main(args=['cmd'], catch_all=True) == 1
def test_confirm(capsys, mocker):
from clldutils.clilib import confirm
mocker.patch('clldutils.clilib.input', mocker.Mock(return_value=''))
assert confirm('a?')
assert not confirm('a?', default=False)
mocker.patch('clldutils.clilib.input', mocker.Mock(side_effect=['x', 'y']))
assert confirm('a?')
out, err = capsys.readouterr()
assert 'Please respond' in out
def test_Table(capsys):
with Table(argparse.Namespace(format='simple'), 'a') as t:
t.append(['x'])
out, _ = capsys.readouterr()
assert out == 'a\n---\nx\n'
def test_add_format():
parser, _ = get_parser_and_subparsers('c')
add_format(parser)
def test_PathType(tmpdir):
parser = argparse.ArgumentParser()
parser.add_argument('a', type=PathType(type='file'))
args = parser.parse_args([__file__])
assert isinstance(args.a, pathlib.Path)
with pytest.raises(SystemExit):
parser.parse_args(['x'])
with pytest.raises(SystemExit):
parser.parse_args([str(tmpdir)])
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
e24e1885f44d205d6a98fab50b4e827710fcbca5 | afcf8a43e0e20c8748a6fe6629235c993c563b40 | /applications/audit-trail/ow/virtualenv/bin/rst2s5.py | 16eeb27f5dba664496b497d58715c8f47286b027 | [] | no_license | jjkotni/faas-benchmarks | dafd0857809e4ff7b1701646799d03517bc7afc2 | 3a22603bc4340d39e610921514d4f75c9f95aec0 | refs/heads/master | 2022-12-12T17:07:47.183247 | 2020-05-31T04:04:53 | 2020-05-31T04:04:53 | 247,883,437 | 0 | 1 | null | 2022-12-08T05:27:06 | 2020-03-17T05:02:32 | Python | UTF-8 | Python | false | false | 705 | py | #!/home/kjj/faas-benchmarks/applications/audit-trail/batching/virtualenv/bin/python3.5
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| [
"kjjswaroop@gmail.com"
] | kjjswaroop@gmail.com |
ffe6d854d8d1aedcc34bedb819dc158015905c8e | bb970bbe151d7ac48d090d86fe1f02c6ed546f25 | /arouse/_dj/conf/__init__.py | 3ecd0cc2268e18e4daa2d70e2fef331cc127a941 | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | thektulu/arouse | 95016b4028c2b8e9b35c5062a175ad04286703b6 | 97cadf9d17c14adf919660ab19771a17adc6bcea | refs/heads/master | 2021-01-13T12:51:15.888494 | 2017-01-09T21:43:32 | 2017-01-09T21:43:32 | 78,466,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,904 | py | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from arouse._dj.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import importlib
import os
import time
from arouse._dj.conf import global_settings
from arouse._dj.core.exceptions import ImproperlyConfigured
from arouse._dj.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return sorted(
s for s in list(self.__dict__) + dir(self.default_settings)
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return (deleted or set_locally or set_on_default)
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
| [
"michal.s.zukowski@gmail.com"
] | michal.s.zukowski@gmail.com |
2dbb8694809973b8675987ec2313935492d5ead1 | 4e1ff54c2f2a21fd6d8e34f2bc3d6dc9990ffa0e | /model/batch_norm_default.py | 3a3c2b7b624283639aa5d6ca6003cb87b3f4cde4 | [] | no_license | haoxiangsnr/A-Convolutional-Recurrent-Neural-Network-for-Real-Time-Speech-Enhancement | 2a037da46f2c89c368cd41b2cba89519cdf471cb | 31610a5b6b398b90ae6b42701ee6cf0e8dcfe871 | refs/heads/master | 2021-07-13T09:08:26.370828 | 2020-09-05T00:50:51 | 2020-09-05T00:50:51 | 201,885,023 | 259 | 55 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
class CRNN(nn.Module):
"""
Input: [batch size, channels=1, T, n_fft]
Output: [batch size, T, n_fft]
"""
def __init__(self):
super(CRNN, self).__init__()
# Encoder
self.bn0 = nn.BatchNorm2d(num_features=1)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(1, 3), stride=(1, 2))
self.bn1 = nn.BatchNorm2d(num_features=16)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(1, 3), stride=(1, 2))
self.bn2 = nn.BatchNorm2d(num_features=32)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(1, 3), stride=(1, 2))
self.bn3 = nn.BatchNorm2d(num_features=64)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(1, 3), stride=(1, 2))
self.bn4 = nn.BatchNorm2d(num_features=128)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(1, 3), stride=(1, 2))
self.bn5 = nn.BatchNorm2d(num_features=256)
# LSTM
self.LSTM1 = nn.LSTM(input_size=1024, hidden_size=1024, num_layers=2, batch_first=True)
# Decoder
self.convT1 = nn.ConvTranspose2d(in_channels=512, out_channels=128, kernel_size=(1, 3), stride=(1, 2))
self.bnT1 = nn.BatchNorm2d(num_features=128)
self.convT2 = nn.ConvTranspose2d(in_channels=256, out_channels=64, kernel_size=(1, 3), stride=(1, 2))
self.bnT2 = nn.BatchNorm2d(num_features=64)
self.convT3 = nn.ConvTranspose2d(in_channels=128, out_channels=32, kernel_size=(1, 3), stride=(1, 2))
self.bnT3 = nn.BatchNorm2d(num_features=32)
# output_padding为1,不然算出来是79
self.convT4 = nn.ConvTranspose2d(in_channels=64, out_channels=16, kernel_size=(1, 3), stride=(1, 2), output_padding=(0, 1))
self.bnT4 = nn.BatchNorm2d(num_features=16)
self.convT5 = nn.ConvTranspose2d(in_channels=32, out_channels=1, kernel_size=(1, 3), stride=(1, 2))
self.bnT5 = nn.BatchNorm2d(num_features=1)
def forward(self, x):
# conv
# (B, in_c, T, F)
x.unsqueeze_(1)
x = self.bn0(x)
x1 = F.elu(self.bn1(self.conv1(x)))
x2 = F.elu(self.bn2(self.conv2(x1)))
x3 = F.elu(self.bn3(self.conv3(x2)))
x4 = F.elu(self.bn4(self.conv4(x3)))
x5 = F.elu(self.bn5(self.conv5(x4)))
# reshape
out5 = x5.permute(0, 2, 1, 3)
out5 = out5.reshape(out5.size()[0], out5.size()[1], -1)
# lstm
lstm, (hn, cn) = self.LSTM1(out5)
# reshape
output = lstm.reshape(lstm.size()[0], lstm.size()[1], 256, -1)
output = output.permute(0, 2, 1, 3)
# ConvTrans
res = torch.cat((output, x5), 1)
res1 = F.elu(self.bnT1(self.convT1(res)))
res1 = torch.cat((res1, x4), 1)
res2 = F.elu(self.bnT2(self.convT2(res1)))
res2 = torch.cat((res2, x3), 1)
res3 = F.elu(self.bnT3(self.convT3(res2)))
res3 = torch.cat((res3, x2), 1)
res4 = F.elu(self.bnT4(self.convT4(res3)))
res4 = torch.cat((res4, x1), 1)
# (B, o_c, T. F)
res5 = F.relu(self.bnT5(self.convT5(res4)))
return res5.squeeze()
| [
"haoxiangsnr@gmail.com"
] | haoxiangsnr@gmail.com |
824ffbe6bcb7970e95b94c794a7386665de41747 | 3996539eae965e8e3cf9bd194123989741825525 | /PhysicsTools/JetMCAlgos/matchGenHFHadron_cfi.py | dc80c83017b34f44fcee282a9cc724b02a224333 | [] | no_license | cms-sw/cmssw-cfipython | 01990ea8fcb97a57f0b0cc44a8bf5cde59af2d98 | 25ee4c810103c4a507ca1b949109399a23a524c5 | refs/heads/CMSSW_11_2_X | 2023-09-01T16:56:00.658845 | 2022-06-20T22:49:19 | 2022-06-20T22:49:19 | 136,184,115 | 1 | 0 | null | 2022-10-19T14:04:01 | 2018-06-05T13:47:28 | Python | UTF-8 | Python | false | false | 336 | py | import FWCore.ParameterSet.Config as cms
matchGenHFHadron = cms.EDProducer('GenHFHadronMatcher',
genParticles = cms.required.InputTag,
jetFlavourInfos = cms.required.InputTag,
noBBbarResonances = cms.bool(True),
onlyJetClusteredHadrons = cms.bool(False),
flavour = cms.int32(5),
mightGet = cms.optional.untracked.vstring
)
| [
"cmsbuild@cern.ch"
] | cmsbuild@cern.ch |
6cd99f4cccde01fec5a66af6c83a998cd6dcf091 | e50954bb35fbc377a1c9a6842fa4ceb6b6234b39 | /zips/plugin.video.wargames/resources/lib/scrapersource/scenedown_mv_tv.py | 9fd1378892301541c1acaec6be953b03008cc793 | [] | no_license | staycanuca/BUILDONLY | f213e242ed869475668933ac7b6ee2d4e8508bbc | f87684bf0111a1079b0e1184e1bfca3f2c5348ed | refs/heads/master | 2021-05-09T04:54:22.747154 | 2018-01-28T08:55:07 | 2018-01-28T08:55:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,537 | py | # -*- coding: utf-8 -*-
'''
Add-on
Copyright (C) 2016
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.scrapermods import cleantitle
from resources.lib.scrapermods import client
from resources.lib.scrapermods import debrid
class source:
def __init__(self):
self.domains = ['scenedown.in']
self.base_link = 'http://scenedown.in'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
u = client.parseDOM(post, 'enclosure', ret='url', attrs={'type': 'video.+?'})
if not u: raise Exception()
c = client.parseDOM(post, 'content.+?')[0]
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
u = client.parseDOM(c, 'a', ret='href')
items += [(t, i, s) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Scenedown', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| [
"biglad@mgawow.co.uk"
] | biglad@mgawow.co.uk |
dc758b6a7277e9de59e2c57cdf69fdd4bce8dd25 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/market_tools/tools/complain/mobile_views.py | 98be8a1fab60f3f9c0eaab6a009dbd7b4ff0c26a | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | # -*- coding: utf-8 -*-
__author__ = 'chuter'
import os
from datetime import datetime, timedelta
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.shortcuts import render_to_response
from core.exceptionutil import unicode_full_stack
from modules.member.util import get_member
from models import *
from webapp.modules.cms.models import SpecialArticle
from modules.member.models import *
template_path_items = os.path.dirname(__file__).split(os.sep)
TEMPLATE_DIR = '%s/templates' % template_path_items[-1]
COUNT_PER_PAGE = 15
def get_settings(request):
settings_id = int(request.GET['settings_id'])
try:
member = request.member
except:
member = None
#default_img = SpecialArticle.objects.filter(owner=request.project.owner_id, name='not_from_weixin')[0].content if SpecialArticle.objects.filter(owner=request.project.owner_id, name='not_from_weixin').count()>0 else None
hide_non_member_cover = False
try:
member_complain_settings = MemberComplainSettings.objects.get(id=settings_id)
if member_complain_settings.is_non_member:
hide_non_member_cover = True
except:
c = RequestContext(request, {
'is_deleted_data': True
})
return render_to_response('%s/complain/webapp/member_complain.html' % TEMPLATE_DIR, c)
request.should_hide_footer = True
c = RequestContext(request, {
'page_title': u'用户反馈',
'member': member,
'member_complain_settings': member_complain_settings,
'hide_non_member_cover' : hide_non_member_cover
})
return render_to_response('%s/complain/webapp/member_complain.html' % TEMPLATE_DIR, c)
| [
"jiangzhe@weizoom.com"
] | jiangzhe@weizoom.com |
4d8da9b345c887bd27fb0d8b57d52da3c1595cb0 | 9ed325dd1cf60458135796b0df32bf0877481134 | /marketplace/migrations/0001_initial.py | c93fc67edd1ee09ca220d5ba40a15a30283a48ad | [] | no_license | codingspider/konetos | ca64b25cb8fa43a44913b9e58067c271ec0d1756 | d484284287e16f807530af11ce1d2918e05d3d42 | refs/heads/master | 2023-02-10T04:48:53.764000 | 2020-12-30T12:13:30 | 2020-12-30T12:13:30 | 324,984,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | # Generated by Django 2.2 on 2020-12-24 06:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('status', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('price', models.FloatField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('user_id', models.IntegerField(blank=True, null=True)),
('qty', models.IntegerField(blank=True, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, null=True, upload_to='products/')),
('price', models.FloatField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, null=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='marketplace.Category')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProductSlider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='sliders/')),
('status', models.CharField(blank=True, max_length=255, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, null=True)),
('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='marketplace.Product')),
],
),
]
| [
"engrokon.rok@gmail.com"
] | engrokon.rok@gmail.com |
39597b6730f6fb6296e2187603b26ceff65fd6e2 | 1944f0b25a19080832933b78d9c191ceb212a62b | /minggu-13/praktik/src/8_7.py | 237368f6f3fbc297c07bd72a1f6199bb694db043 | [] | no_license | satriang/bigdata | b0432683dde5b3cb6c3b2e22c8ce80530b32cb67 | 4075ced73978ae7d1169a42ffead640b94b0fe04 | refs/heads/master | 2020-03-28T15:34:43.885548 | 2019-01-07T22:41:32 | 2019-01-07T22:41:32 | 148,607,481 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[7]:
state_fruit = pd.read_csv('data/state_fruit.csv', index_col=0)
state_fruit.stack() .rename_axis(['state', 'fruit']) .reset_index(name='weight')
# In[ ]:
| [
"ngestusatria7@gmail.com"
] | ngestusatria7@gmail.com |
f202eedd79a9e367604c1e9a49f05eb86ad60345 | 5dbf8039a281c4ba13e9cb19453a4bace0f2b6bd | /billing/admin.py | 76a5c2e8c6c9d2b1f7c499cb34b4b446e54278b0 | [] | no_license | adamtlord/ease | e605d901fc944d48212c4283998e1b4995c09324 | 51e7e0e79e21dad6fa7bdd360cd0a5c0ba3c9d41 | refs/heads/master | 2021-01-18T22:24:12.146024 | 2019-07-30T17:48:14 | 2019-07-30T17:48:14 | 72,482,564 | 0 | 0 | null | 2019-01-02T17:15:51 | 2016-10-31T22:15:56 | CSS | UTF-8 | Python | false | false | 1,242 | py | from django.contrib import admin
from billing.models import Plan, StripeCustomer, Invoice, GroupMembership, Balance, Gift, Subscription
class SubscriptionAdmin(admin.ModelAdmin):
readonly_fields = ('date_created',)
class GroupMembershipAdmin(admin.ModelAdmin):
raw_id_fields = ("address", "user", "ride_account", "subscription_account")
class StripeCustomerAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "stripe_id", "last_4_digits"]
search_fields = ["first_name",
"last_name",
"subscription_customer__first_name",
"subscription_customer__last_name",
"ride_customer__first_name",
"ride_customer__last_name",
"customer__first_name",
"customer__last_name"]
raw_id_fields = ["customer"]
class BalanceAdmin(admin.ModelAdmin):
raw_id_fields = ['stripe_customer']
admin.site.register(Plan)
admin.site.register(StripeCustomer, StripeCustomerAdmin)
admin.site.register(Invoice)
admin.site.register(GroupMembership, GroupMembershipAdmin)
admin.site.register(Balance, BalanceAdmin)
admin.site.register(Gift)
admin.site.register(Subscription, SubscriptionAdmin)
| [
"adam.lord@gmail.com"
] | adam.lord@gmail.com |
ba891550744ddb67f7c86b7185ac71745e52b0c3 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/tutorial/gui/Scaleform/meta/TutorialBattleStatisticMeta.py | 15582a969c3a38f9a7ba3204478e72fb0e9e2039 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 957 | py | # 2017.05.04 15:28:00 Střední Evropa (letní čas)
# Embedded file name: scripts/client/tutorial/gui/Scaleform/meta/TutorialBattleStatisticMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class TutorialBattleStatisticMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
"""
def restart(self):
self._printOverrideError('restart')
def showVideoDialog(self):
self._printOverrideError('showVideoDialog')
def as_setDataS(self, data):
if self._isDAAPIInited():
return self.flashObject.as_setData(data)
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\tutorial\gui\Scaleform\meta\TutorialBattleStatisticMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:28:00 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
62b8df2cdf56956f30f95202944153f596347fa4 | f98f0ae6a318c8efd8aeac770b000534d1c6ab23 | /da_rnn/metrics.py | df615fe802d6bb0b9419a30454e1d4242d5d2a0b | [
"Apache-2.0"
] | permissive | kimsse0430/dual_stage_attention_rnn | aff71111785af91c47f371b785dab5b41fc0d4e7 | 08744ee2cfa3dc71fb1c9da895e879708cea805e | refs/heads/master | 2022-04-19T01:47:21.123150 | 2020-03-09T15:27:26 | 2020-03-09T15:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | """ Metrics utilities """
import numpy as np
class Metrics:
""" Metrics to evaluate prediction performance """
def __init__(self):
pass
def get_metrics_dict(self, predictions, labels):
""" Return the metrics result in dict """
res = dict()
res['rmse'] = self.rmse(predictions, labels)
res['mae'] = self.mae(predictions, labels)
res['mape'] = self.mape(predictions, labels)
return res
@staticmethod
def rmse(predictions, labels):
""" RMSE ratio """
return np.sqrt(np.mean(np.subtract(predictions, labels) ** 2))
@staticmethod
def mae(predictions, labels):
""" MAE ratio """
return np.mean(np.abs(predictions - labels))
@staticmethod
def mape(predictions, labels):
""" MAPE ratio """
return np.mean(np.abs(np.subtract(predictions, labels) / labels))
@staticmethod
def metrics_dict_to_str(metrics_dict):
""" Convert metrics to a string to show in the console """
eval_info = ''
for key, value in metrics_dict.items():
eval_info += '{0} : {1}, '.format(key, value)
return eval_info[:-1]
| [
"siqiao_xue@163.com"
] | siqiao_xue@163.com |
3710d154cf29a95d73ae991be6631907e17e1022 | da2993b3aaa18bb35f42886b1f4d7f938d055ff5 | /backend/mysite/mysite/urls.py | c0f738c13e004c447355fc5fac05e8af36dd7f2c | [] | no_license | karthikeyansa/React_Django_AuthToken | 00838c6ef679b589ad38aba864b21a64478a33c4 | cab17eff3ef75ade389c33c5f9109fdbc366a8d3 | refs/heads/master | 2023-02-09T23:10:24.311347 | 2021-01-01T18:34:53 | 2021-01-01T18:34:53 | 304,940,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from django.contrib import admin
from django.urls import path,include
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('api/',include('app.urls')),
path('auth/',obtain_auth_token)
]
| [
"karthikeyansa39@gmail.com"
] | karthikeyansa39@gmail.com |
79c81670f0d6cb73535fd86bb43d6d32d320e3b6 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_Lag1Trend/cycle_0/ar_/test_artificial_128_None_Lag1Trend_0__0.py | c224a904afad41ec2d371e5e1389e07997af4970 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 263 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
0a37a74c812523a59805fb7704eefccb904cbfb9 | 35fa43655e18d18f2de898bce3a96456b05468de | /fernet_fields/test/settings/sqlite.py | 7ded7cba6f65dacc0789763741bf0df033f41288 | [
"BSD-3-Clause"
] | permissive | singular-labs/django-fernet-fields | b077588ba5f99383b9312c1ebbcb2c104be0f1e4 | f2d57ef179409cee554a177a9f7d90974acd14ed | refs/heads/master | 2023-08-17T18:59:58.627042 | 2023-08-14T12:06:48 | 2023-08-14T12:06:48 | 253,502,252 | 0 | 0 | BSD-3-Clause | 2023-08-14T12:06:50 | 2020-04-06T13:13:54 | Python | UTF-8 | Python | false | false | 293 | py | from .base import * # noqa
import os
HERE = os.path.dirname(os.path.abspath(__file__))
DB = os.path.join(HERE, 'testdb.sqlite')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB,
'TEST': {
'NAME': DB,
},
},
}
| [
"carl@oddbird.net"
] | carl@oddbird.net |
cb6d6b0ee4a364082c3ad8f3a2aa5e19fb59a36c | 8032f671147f62ce91d6a42be5bafebdfeb236f9 | /tests/test_01_dxf_entities/test_133_sun.py | 415a0407d23625c42466f7e16ff56c0d3ecf7231 | [
"MIT"
] | permissive | mamofejo/ezdxf | 3ebcd9afae06e53d56a8622f8406e2c9a95e4971 | bd5a08a85608360266eb8702d48638195c72c247 | refs/heads/master | 2023-02-26T22:04:48.798010 | 2021-02-05T14:06:28 | 2021-02-05T14:06:28 | 336,305,662 | 0 | 0 | MIT | 2021-02-05T15:29:09 | 2021-02-05T15:08:54 | null | UTF-8 | Python | false | false | 2,054 | py | # Copyright (c) 2019 Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
from ezdxf.entities.sun import Sun
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
SUN = """0
SUN
5
0
330
0
100
AcDbSun
90
1
290
1
63
7
421
16777215
40
1.0
291
1
91
2456922
92
43200
292
0
70
0
71
256
280
1
"""
@pytest.fixture
def entity():
return Sun.from_text(SUN)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'SUN' in ENTITY_CLASSES
def test_default_init():
entity = Sun()
assert entity.dxftype() == 'SUN'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = Sun.new(handle='ABBA', owner='0', dxfattribs={
})
assert entity.dxf.version == 1
assert entity.dxf.status == 1
assert entity.dxf.color == 7
assert entity.dxf.true_color == 16777215
assert entity.dxf.intensity == 1
assert entity.dxf.shadows == 1
assert entity.dxf.julian_day == 2456922
assert entity.dxf.time == 43200
assert entity.dxf.daylight_savings_time == 0
assert entity.dxf.shadow_type == 0
assert entity.dxf.shadow_map_size == 256
assert entity.dxf.shadow_softness == 1
def test_load_from_text(entity):
assert entity.dxf.version == 1
assert entity.dxf.status == 1
assert entity.dxf.color == 7
assert entity.dxf.true_color == 16777215
assert entity.dxf.intensity == 1
assert entity.dxf.shadows == 1
assert entity.dxf.julian_day == 2456922
assert entity.dxf.time == 43200
assert entity.dxf.daylight_savings_time == 0
assert entity.dxf.shadow_type == 0
assert entity.dxf.shadow_map_size == 256
assert entity.dxf.shadow_softness == 1
def test_write_dxf():
entity = Sun.from_text(SUN)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(SUN)
assert result == expected
def test_sun():
doc = ezdxf.new('R2007')
sun = doc.objects.new_entity('SUN', {})
assert sun.dxftype() == 'SUN'
assert sun.dxf.version == 1
| [
"mozman@gmx.at"
] | mozman@gmx.at |
428acd38c50ae5ec8e63de2b7e9a6f24510905af | dd74129c42933062ca4a6304f9a715bd18f3806b | /setup.py | d40f460af6508730e4e85c2efdcba1be69c42c05 | [] | no_license | sensein/cmixf | 12d1873508835023a32e6898baee831ea2ef91f9 | 28bf47ee8c7d1ba4a5241bcc19563df75b578fb5 | refs/heads/master | 2022-07-31T15:59:03.878678 | 2020-05-25T20:55:12 | 2020-05-25T20:55:12 | 266,423,055 | 2 | 1 | null | 2020-05-25T20:55:13 | 2020-05-23T21:32:50 | Python | UTF-8 | Python | false | false | 715 | py | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""CMIXF parser
"""
import sys
from setuptools import setup
import versioneer
# Give setuptools a hint to complain if it's too old a version
# 30.3.0 allows us to put most metadata in setup.cfg
# Should match pyproject.toml
SETUP_REQUIRES = ["setuptools >= 30.3.0"]
# This enables setuptools to install wheel on-the-fly
SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
if __name__ == "__main__":
setup(
name="cmixf",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
setup_requires=SETUP_REQUIRES,
)
| [
"satra@mit.edu"
] | satra@mit.edu |
c80ae0ca01e06a0f390815cba89694ab583cb5b0 | 2edaaa8e2d11ac9ec02f3949e684fb5037719fbf | /Python/10 Days of Statistics/Central Limit Theorem 3.py | 9e56f0a395082cb5f24971407aca8733b87599f1 | [] | no_license | vipulsingh24/Hacker-Rank | 5419fb9b29780ad59fea96121a0d0888f1cdc152 | 789d72f5c3f6bf1536ab44c460c59733065823b7 | refs/heads/master | 2020-03-18T15:17:49.542451 | 2018-07-01T16:03:19 | 2018-07-01T16:03:19 | 134,899,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | '''
You have a sample of 100 values from a population with mean 500 and with standard deviation 80.
Compute the interval that covers the middle 95% of the distribution of the sample mean; in other words,
compute A and B such that P(A < x < B) = 0.95. Use the value of z = 1.96.
'''
import math
n = 100 # Sample Size
m = 500 # Mean
sd = 80 # Standard Deviation
z = 1.96
moe = z * (sd / math.sqrt(n)) # Margin of Error
# Lower level (A)
print(round(m - moe, 2))
print(round(m + moe, 2))
| [
"letsmailvipul@gmail.com"
] | letsmailvipul@gmail.com |
c96c4f6fc13242502ce3b163d701be75a220f796 | 97dae48fa3c613a84655c1c0b12cdc0db2c555bb | /algorithm/bitwise/add_two.py | de1cbc069f8c30b9d39c0879e14027c02d41b4b8 | [] | no_license | klknet/geeks4geeks | 6aa5841b15be41057dc987524721ea1ea37e02ea | d7d9099af7617a4000f38c75d2c7214bed570eda | refs/heads/master | 2021-07-12T06:34:30.048691 | 2020-06-22T07:51:14 | 2020-06-22T07:51:14 | 170,288,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | """
Add two numbers without using arithmetic operators.
"""
def add_two(x, y):
while y != 0:
x, y = x ^ y, (x & y) << 1
return x
def add_two_recur(x, y):
if y == 0:
return x
return add_two(x ^ y, (x & y) << 1)
def smallest(x, y, z):
return my_min(x, my_min(y, z))
def my_min(x, y):
return y + ((x - y) & ((x - y) >> 31))
print(add_two(13, 19))
print(add_two_recur(13, 19))
print(smallest(12, -19, 2))
| [
"konglk@aliyun.com"
] | konglk@aliyun.com |
9e8d1c9052e9fd9c61e954143162bfaeaf8a867a | 222c5f0e36717a053bcfd61c7fcfd1e2975d52ad | /mypackage/test/test_mymodule.py | 73a50bc5152b51abd3e56f088075401b1e80ac4b | [] | no_license | ParmEd/test-jenkins-project | ea6ce801bb03d474323294a2c9c9b005736582ed | d05e12a06ea1acc90b6ff1382da7fdb536439454 | refs/heads/master | 2021-06-11T14:39:52.533680 | 2016-12-26T21:03:44 | 2016-12-26T21:03:44 | 77,406,843 | 0 | 0 | null | 2017-02-05T22:25:06 | 2016-12-26T21:04:22 | Python | UTF-8 | Python | false | false | 199 | py | from __future__ import absolute_import
from nose.tools import assert_equal
from ..mymodule import myfunction
def test_my_function():
""" Tests my function """
assert_equal(myfunction(), 0)
| [
"jason.swails@gmail.com"
] | jason.swails@gmail.com |
0a2ba5e6111f6bab5f5212a505cdb7146d25f4f4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/62/usersdata/195/32504/submittedfiles/ex1.py | 88cadca3ab6339c897432a8160b8d83ee8e60d87 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from_future_ import division
a=float(input('Digite a:'))
b=float(input('Digite b:'))
c=float(input('Digite c:'))
delta=(b*b)-(4*a*c)
if delta>=0:
x1=(-b+delta**(1/2))/2*a
x2=(-b-delta**(1/2))/2*a
print('X1:%.2f' %x1)
print('X2:%.2f' %x2)
else:
print('SSR')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2f758c6bccce92a39ee512572b55a09aa2a0f479 | 2c7ae872d789701fafdeada20d0df8f07fd931c6 | /examples/12_write_program_scope.py | d45fb943d2329b2e76ccde63dcb059f254052435 | [
"Apache-2.0"
] | permissive | Ozsqhbj/pylogix | 55b78deb9c730e52f15bfa65d27844cd3bc3e12b | d6774690478334983d3695b367bd67233dc529d7 | refs/heads/master | 2022-11-08T11:40:58.428297 | 2022-11-01T18:37:17 | 2022-11-01T18:37:17 | 166,840,261 | 1 | 1 | Apache-2.0 | 2019-01-21T15:53:11 | 2019-01-21T15:53:11 | null | UTF-8 | Python | false | false | 626 | py | '''
the following import is only necessary because eip.py is not in this directory
'''
import sys
sys.path.append('..')
'''
Write a program scoped tag
I have a program named "MiscHMI" in my main task.
In MiscHMI, the tag I'm reading will be TimeArray[0]
You have to specify that the tag will be program scoped
by appending the tag name with "Program" and the beginning,
then add the program name, finally the tag name. So our
example will look like this:
Program:MiscHMI.TimeArray[0]
'''
from pylogix import PLC
with PLC() as comm:
comm.IPAddress = '192.168.1.9'
comm.Write('Program:MiscHMI.TimeArray[0]', 2019)
| [
"dmroeder@gmail.com"
] | dmroeder@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.