blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38c2bb45e74f0784bbc9558b5007e2f167e34598
|
6f984aeb6184828ed2dde2ad5eca838548f28fc6
|
/chapter-1/WeightedQuickUnionUF.py
|
6930a43dbc9833f08e33fdcbc29bf9149ab10c2d
|
[] |
no_license
|
shortdistance/algorithms
|
5013493f870257cb7b9b743a15160d79831b117e
|
69bae724b8eecd50f1efc019ad905e0cae496c6d
|
refs/heads/master
| 2021-01-20T01:33:30.122508
| 2016-09-29T03:18:03
| 2016-09-29T03:18:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Weighted quick-union.
Rather than arbitrarily connecting the second tree to thefirst for union() in
the quick-union algorithm, we keep track of the size ofeach tree and always
connect the smaller tree to the larger.
加权快速连通。
与其在 union() 中随意将一棵树连接到另一棵树,我们现在会记录每一棵树的大小并总是
将较小的树连接到较大的树上。
"""
from time import time
class UF(object):
def __init__(self, N):
self.count = N
self.id = [n for n in range(N)]
self.size = [1 for n in range(N)]
def connected(self, p, q):
return self.find(p) == self.find(q)
def find(self, p):
# find the root of the point
while p != self.id[p]:
p = self.id[p]
return p
def union(self, p, q):
pRoot = self.find(p)
qRoot = self.find(q)
if pRoot == qRoot:
return
# add weighted to ensure that the smaller one always add to the bigger one
if self.size[pRoot] > self.size[qRoot]:
self.id[qRoot] = pRoot
self.size[pRoot] += self.size[qRoot]
else:
self.id[pRoot] = qRoot
self.size[qRoot] += self.size[pRoot]
self.count -= 1
if __name__ == "__main__":
with open('../test-data/largeUF.txt', 'r') as f:
# read the site counts
N = int(f.readline())
uf = UF(N)
start = time()
for line in f.readlines():
readIn = line.strip().split(' ')
p = int(readIn[0])
q = int(readIn[1])
if (uf.connected(p, q)):
continue
uf.union(p, q)
print('%d %d' %(p, q))
end = time()
print('%d components \ntotal time: %s' % (uf.count, end-start))
|
[
"stevenyuysy@gmail.com"
] |
stevenyuysy@gmail.com
|
8bb5caf667c7890e21dab3281688508aae204174
|
4992a176c6b6c8673d394b2cc4a7ff386911c3a7
|
/config.py
|
27c09f0a050614a79bd3cf80a4314c3b23af36d8
|
[
"MIT"
] |
permissive
|
leeyegy/pytorch-deeplab-xception
|
7afcc0d80b4c55ba57fb8c69eb735847b5f8832f
|
16ef5d6aee03c859fb1422e22a1237384f599a39
|
refs/heads/master
| 2022-11-17T17:17:19.258188
| 2020-07-22T11:55:27
| 2020-07-22T11:55:27
| 260,111,806
| 0
| 0
|
MIT
| 2020-04-30T04:17:32
| 2020-04-30T04:17:32
| null |
UTF-8
|
Python
| false
| false
| 4,880
|
py
|
import argparse
import torch
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='pascal',
choices=['pascal', 'coco', 'cityscapes'],
help='dataset name (default: pascal)')
parser.add_argument('--use-sbd', action='store_true', default=False,
help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=513,
help='base image size')
parser.add_argument('--crop-size', type=int, default=513,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce',
choices=['ce', 'focal'],
help='loss func type (default: ce)')
# training hyper params
parser.add_argument('--epochs', type=int, default=None, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None,
metavar='N', help='input batch size for \
testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False,
help='whether to use balanced weights (default: False)')
# optimizer params
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly',
choices=['poly', 'step', 'cos'],
help='lr scheduler mode: (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None,
help='set the checkpoint name')
# finetuning pre-trained models
parser.add_argument('--ft', action='store_true', default=False,
help='finetuning on a different dataset')
# evaluation option
parser.add_argument('--eval-interval', type=int, default=1,
help='evaluation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')
# backdoor attack
parser.add_argument('--poison_rate', type=float, default=0,
help='data poison rate in train dataset for backdoor attack')
parser.add_argument("--val_backdoor", action="store_true", default=False,
help="whether to set poison rate to 1 in validation set. Only valid in the case of args.resume is not None")
parser.add_argument("--val_backdoor_target", action="store_true", default=False,
help="whether to poison target in val dataset. Only valid in the case of args.resume is not None ans args.val_backdoor is True")
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
|
[
"leeyegy@gmail.com"
] |
leeyegy@gmail.com
|
5f1221ab597a6d42074e6bd8b37d2b3dd65d5255
|
e591159d284a7936c0e0a907f78bee26a738ed97
|
/app.py
|
6511601531835720146def1c5e106540a07145b7
|
[] |
no_license
|
mmagliar/RU-Bootcamp-Portfolio
|
6dc72e905b96f92426ad61107179028da116f0de
|
2d3c3ad32bd28b932eb163f54dd692321243e102
|
refs/heads/master
| 2023-05-11T19:17:14.827848
| 2019-08-06T23:49:38
| 2019-08-06T23:49:38
| 200,939,023
| 0
| 0
| null | 2023-05-01T20:35:57
| 2019-08-06T23:45:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,157
|
py
|
from flask import Flask, render_template, request
import os, pickle
app = Flask(__name__)
# Define routes
@app.route("/", methods=['GET','POST'])
def home():
if request.method == 'POST':
atmResult = {}
stmResult = {}
utmResult = {}
userHouse = request.form.to_dict(flat=False)
forTowns = [[userHouse['ac'][0],
userHouse['lp'][0],
userHouse['fb'][0],
userHouse['hb'][0],
userHouse['bd'][0],
userHouse['rooms'][0],
userHouse['bmnt'][0],
userHouse['cool'][0],
userHouse['heat'][0],
userHouse['water'][0],
userHouse['sewer'][0],
userHouse['fha'][0],
userHouse['la'][0],
userHouse['ba'][0],
userHouse['ta'][0]]]
with open(os.path.join('static','models','countyModel'), 'rb') as f:
model = pickle.load(f)
cResult = sorted(zip(model.classes_, model.predict_proba([[userHouse['ac'][0],
userHouse['lp'][0],
userHouse['fb'][0],
userHouse['hb'][0],
userHouse['bd'][0],
userHouse['rooms'][0],
userHouse['bmnt'][0],
userHouse['ext'][0],
userHouse['cool'][0],
userHouse['heat'][0],
userHouse['water'][0],
userHouse['sewer'][0],
userHouse['fha'][0],
userHouse['la'][0],
userHouse['ba'][0],
userHouse['ta'][0]]])[0]), key=lambda x: x[1], reverse=True)
with open(os.path.join('static','models','allTownModel'), 'rb') as f:
model = pickle.load(f)
atmResult = sorted(zip(model.classes_, model.predict_proba(forTowns)[0]), key=lambda x: x[1], reverse=True)
with open(os.path.join('static','models','sussexTownModel'), 'rb') as f:
model = pickle.load(f)
stmResult = sorted(zip(model.classes_, model.predict_proba(forTowns)[0]), key=lambda x: x[1], reverse=True)
with open(os.path.join('static','models','unionTownModel'), 'rb') as f:
model = pickle.load(f)
utmResult = sorted(zip(model.classes_, model.predict_proba(forTowns)[0]), key=lambda x: x[1], reverse=True)
return render_template('home.html', cResult=cResult, atmResult=atmResult, stmResult=stmResult, utmResult=utmResult)
else:
return render_template("home.html")
# added this route
@app.route("/info")
def info():
return render_template("info.html")
if __name__ == '__main__':
app.run(debug=True, port=8000)
|
[
"mmagliar@its.jnj.com"
] |
mmagliar@its.jnj.com
|
3367b91254064e1d9ff55c69a353df7322f5c35b
|
c67f1d100cc7c90ace6c4198e7d396c94d7a3715
|
/poolExtractor.py
|
afae7ae1861085512a1ca152bc1f8ab8e63ef16c
|
[] |
no_license
|
PresentJay/Keycloak-Json-beautifier
|
38bd707807d8793b39b98cc3fffefbc368baa80b
|
9b62878d113f7eef16fa4e85ff09c1333b2f97a1
|
refs/heads/master
| 2023-04-18T01:56:09.433999
| 2021-05-10T09:09:37
| 2021-05-10T09:09:37
| 365,981,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,274
|
py
|
import json
def pool_extract(data, root='start', depth=0):
trial = 0
if root == 'start':
print('\npool method')
else:
print(f'\t(from {root} . . .)')
for key, value in data.items():
if value is None:
continue
trial = trial + 1
for i in range(depth):
print('\t', end='')
print(f'[{depth}] {trial} : {key} => ', end='')
if isinstance(value, dict):
print(f'{len(value)} size dict')
if len(value) > 1:
pool_extract(value, key, depth = depth + 1)
continue
elif len(value) ==0 and depth ==0:
print('\t{ <empty> }\n')
else:
print("\t{ ", end='')
print(f'{value.keys()}', end='')
print(" }")
elif isinstance(value, list):
print(f'{len(value)} size list')
for i in range(depth):
print('\t', end='')
print('\t[', end='')
if len(value) > 0:
for cnt, item in enumerate(value):
if isinstance(item, dict):
if cnt > 0 :
print(", ", end='')
if 'name' in item:
print(f'{item.get("name")}', end='')
elif 'alias' in item:
print(f'{item.get("alias")}', end='')
elif 'id' in item:
print(f'{item.get("id")}', end='')
else:
if len(item)>0:
print(f'{item}', end='')
else:
print("<empty>", end='')
else:
print('<empty>', end='')
print(']\n')
else:
if value == "":
print('<empty>')
else:
print(f'{value}\n')
def main():
print('start extracting . . .')
print('* * *')
with open('./realm.json') as f:
data = json.load(f)
pool_extract(data)
print("* * *")
print("done")
if __name__ == "__main__":
# execute only if run as a script
main()
|
[
"presentj94@gmail.com"
] |
presentj94@gmail.com
|
3b5bde336ae6ac771ed88f6b907461d6a04c985e
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_None/model_control_one_enabled_None_PolyTrend_Seasonal_Minute_SVR.py
|
b94b04b02eb82af2145a135b891dcbecd553efe3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['PolyTrend'] , ['Seasonal_Minute'] , ['SVR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
f9d277721b730c465688cc7ca2efe3a9e9e13b7d
|
1fec7bb530f57c2ec6da171da4cb1cbe65129c25
|
/blog/admin.py
|
f3a3951e278c8554ddd4a602643d552f051ffbab
|
[] |
no_license
|
JiphoTheJuppis/DjangoBlog
|
99664b07f44ee2be2200e31d497715d39226eb39
|
699ba45b51196f4b8723eed58a4ec81410eada85
|
refs/heads/main
| 2023-03-03T00:53:19.319459
| 2021-02-04T11:19:12
| 2021-02-04T11:19:12
| 315,292,670
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
from django.contrib import admin
from .models import Post, Comment
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'status', 'created_on')
list_filter = ('status', 'created_on')
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'body', 'post', 'created_on', 'active')
list_filter = ('active', 'created_on')
search_fields = ('name', 'email', 'body')
actions = ['approve_comments']
def approve_comments(self, request, queryset):
queryset.update(active=True)
admin.site.register(Post, PostAdmin)
|
[
"jphuhti90@gmail.com"
] |
jphuhti90@gmail.com
|
1fbd226afe501f4c6f6b2bbffc7d4456866a723a
|
53f35793bf1b7145559ba6bee1a2759b9f2219d4
|
/jump7.py
|
a222246a78ffc5b73a51388509aa94ae8ea492e6
|
[] |
no_license
|
707916150/test
|
212ca871962ca78908e9503b0d0cb10a3356b23e
|
f1626f70d861fe3bcb9d899d785b780f22f87fe3
|
refs/heads/master
| 2022-06-21T02:16:41.552349
| 2020-05-14T15:59:39
| 2020-05-14T15:59:39
| 263,955,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
for i in range(1,101):
if i%7==0 or i%10==7 or i // 10==7:
continue
else:
print(i)
|
[
"707916150@qq.com"
] |
707916150@qq.com
|
8a1b547562771e53e5dff0bff248f9dba5126cbb
|
9ff53d4f5290141d3b7b3a59f6c6e9e124bcb86d
|
/venv/Lib/site-packages/openpyxl/utils/datetime.py
|
1f5b9210c3e3ebb59307407c941baa33e4c7f51f
|
[
"MIT"
] |
permissive
|
CoGhent/fetchfilesizes
|
a064077f06aeae403bf3a2e5d75caa15b2432307
|
1e28ec97dd22743028dac538a7fc1e064e394f67
|
refs/heads/master
| 2023-07-10T13:37:17.152540
| 2021-08-11T13:12:22
| 2021-08-11T13:12:22
| 381,747,277
| 0
| 2
|
MIT
| 2021-08-06T14:48:39
| 2021-06-30T15:23:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,750
|
py
|
from __future__ import division
# Copyright (c) 2010-2021 openpyxl
"""Manage Excel date weirdness."""
# Python stdlib imports
import datetime
from datetime import timedelta, timezone
from math import isnan
import re
# constants
MAC_EPOCH = datetime.datetime(1904, 1, 1)
WINDOWS_EPOCH = datetime.datetime(1899, 12, 30)
CALENDAR_WINDOWS_1900 = 2415018.5 # Julian date of WINDOWS_EPOCH
CALENDAR_MAC_1904 = 2416480.5 # Julian date of MAC_EPOCH
CALENDAR_WINDOWS_1900 = WINDOWS_EPOCH
CALENDAR_MAC_1904 = MAC_EPOCH
SECS_PER_DAY = 86400
EPOCH = datetime.datetime.utcfromtimestamp(0)
ISO_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
ISO_REGEX = re.compile(r'''
(?P<date>(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}))?T?
(?P<time>(?P<hour>\d{2}):(?P<minute>\d{2})(:(?P<second>\d{2})(?P<microsecond>\.\d{1,3})?)?)?Z?''',
re.VERBOSE)
ISO_DURATION = re.compile(r'PT((?P<hours>\d+)H)?((?P<minutes>\d+)M)?((?P<seconds>\d+(\.\d{1,3})?)S)?')
def to_ISO8601(dt):
"""Convert from a datetime to a timestamp string."""
if hasattr(dt, "microsecond") and dt.microsecond:
return dt.isoformat(timespec="milliseconds")
return dt.isoformat()
def from_ISO8601(formatted_string):
"""Convert from a timestamp string to a datetime object. According to
18.17.4 in the specification the following ISO 8601 formats are
supported.
Dates B.1.1 and B.2.1
Times B.1.2 and B.2.2
Datetimes B.1.3 and B.2.3
There is no concept of timedeltas in the specification, but Excel
writes them (in strict OOXML mode), so these are also understood.
"""
if not formatted_string:
return None
match = ISO_REGEX.match(formatted_string)
if match and any(match.groups()):
parts = match.groupdict(0)
for key in ["year", "month", "day", "hour", "minute", "second"]:
if parts[key]:
parts[key] = int(parts[key])
if parts["microsecond"]:
parts["microsecond"] = int(float(parts['microsecond']) * 1_000_000)
if not parts["date"]:
dt = datetime.time(parts['hour'], parts['minute'], parts['second'], parts["microsecond"])
elif not parts["time"]:
dt = datetime.date(parts['year'], parts['month'], parts['day'])
else:
del parts["time"]
del parts["date"]
dt = datetime.datetime(**parts)
return dt
match = ISO_DURATION.match(formatted_string)
if match and any(match.groups()):
parts = match.groupdict(0)
for key, val in parts.items():
if val:
parts[key] = float(val)
return datetime.timedelta(**parts)
raise ValueError("Invalid datetime value {}".format(formatted_string))
def to_excel(dt, epoch=WINDOWS_EPOCH):
"""Convert Python datetime to Excel serial"""
if isinstance(dt, datetime.time):
return time_to_days(dt)
if isinstance(dt, datetime.timedelta):
return timedelta_to_days(dt)
if isnan(dt.year): # Pandas supports Not a Date
return
if not hasattr(dt, "date"):
dt = datetime.datetime.combine(dt, datetime.time())
# rebase on epoch and adjust for < 1900-03-01
days = (dt - epoch).days
if 0 < days <= 60 and epoch == WINDOWS_EPOCH:
days -= 1
return days + time_to_days(dt)
def from_excel(value, epoch=WINDOWS_EPOCH, timedelta=False):
"""Convert Excel serial to Python datetime"""
if value is None:
return
if timedelta:
td = datetime.timedelta(days=value)
if td.microseconds:
# round to millisecond precision
td = datetime.timedelta(seconds=td.total_seconds() // 1,
microseconds=round(td.microseconds, -3))
return td
day, fraction = divmod(value, 1)
diff = datetime.timedelta(milliseconds=round(fraction * SECS_PER_DAY * 1000))
if 0 <= value < 1 and diff.days == 0:
return days_to_time(diff)
if 0 < value < 60 and epoch == WINDOWS_EPOCH:
day += 1
return epoch + datetime.timedelta(days=day) + diff
UTC = timezone(timedelta(0))
def time_to_days(value):
"""Convert a time value to fractions of day"""
if value.tzinfo is not None:
value = value.astimezone(UTC)
return (
(value.hour * 3600)
+ (value.minute * 60)
+ value.second
+ value.microsecond / 10**6
) / SECS_PER_DAY
def timedelta_to_days(value):
"""Convert a timedelta value to fractions of a day"""
return value.total_seconds() / SECS_PER_DAY
def days_to_time(value):
mins, seconds = divmod(value.seconds, 60)
hours, mins = divmod(mins, 60)
return datetime.time(hours, mins, seconds, value.microseconds)
|
[
"Flore.Verkest@stad.gent"
] |
Flore.Verkest@stad.gent
|
7454e57e3dd163ffa813ae98831621c6c1289672
|
1ca25c63956602043241460b3b829194995e9c63
|
/app/__init__.py
|
51719d2b17ad42c91b00c14f5e64922e69999532
|
[] |
no_license
|
ziedbouf/flask-prometheus-demo
|
0599e237552eb5ee691ec1a01c35389061195581
|
e5f75d3963aaac03f5367d3641caf83ae73243b5
|
refs/heads/master
| 2022-09-23T16:23:15.013402
| 2021-07-27T10:15:56
| 2021-07-27T10:15:56
| 211,046,807
| 0
| 0
| null | 2022-09-16T18:10:05
| 2019-09-26T09:05:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
import os
from flask import Blueprint, Response,request
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
from flask_restplus import Api, Namespace, Resource
from .main.controller.user_controller import api as user_ns
from .main.controller.auth_controller import api as auth_ns
from prometheus_flask_exporter import PrometheusMetrics
blueprint = Blueprint('api', __name__)
api = Api(blueprint,
title='FLASK RESTPLUS API BOILER-PLATE WITH JWT',
version='1.0',
description='a boilerplate for flask restplus web service'
)
api.add_namespace(user_ns, path='/user')
api.add_namespace(auth_ns)
metrics = PrometheusMetrics(blueprint)
@blueprint.route('/metrics')
def meter():
from prometheus_client import multiprocess, CollectorRegistry
if 'prometheus_multiproc_dir' in os.environ:
registry = CollectorRegistry()
else:
registry = metrics.registry
if 'name[]' in request.args:
registry = registry.restricted_registry(request.args.getlist('name[]'))
if 'prometheus_multiproc_dir' in os.environ:
multiprocess.MultiProcessCollector(registry)
headers = {'Content-Type': CONTENT_TYPE_LATEST}
return generate_latest(metrics.registry), 200, headers
|
[
"zied.boufaden@gmail.com"
] |
zied.boufaden@gmail.com
|
bcea3e3314d20a833c24b4fe9a78f25f69dcf8b0
|
13625dd7375297b066ccd69d6c229e9a1535c9b2
|
/wallet/migrations/0009_wallet_wallet_id.py
|
d1274ff1a241f1b550183569dc0ab85b98cc4095
|
[] |
no_license
|
rajman01/investfy
|
9d5fa3ed7593ec13db575016fc839664630318af
|
a4c8bf16ba7a1ce38d1370e4779284a4d6426733
|
refs/heads/main
| 2023-09-01T19:10:18.411861
| 2023-08-28T02:30:23
| 2023-08-28T02:30:23
| 320,408,218
| 0
| 1
| null | 2023-08-28T02:30:24
| 2020-12-10T22:46:03
| null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
# Generated by Django 3.1.3 on 2020-12-20 22:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wallet', '0008_savingtransaction'),
]
operations = [
migrations.AddField(
model_name='wallet',
name='wallet_id',
field=models.CharField(max_length=32, null=True, unique=True),
),
]
|
[
"alameenraji31@gmail.com"
] |
alameenraji31@gmail.com
|
de8b97999e49669b086bc22d87616886b45d84b4
|
f29c56cb709fa26b38499ff85890efa3fe936d51
|
/while_odd_till_Half_n_iteration.py
|
bf18f3b3b65fd5e991894058263ca0405495a787
|
[] |
no_license
|
manishrana93/python_trg
|
ea016bb7bb80bc81b79ce158b0b9d801372fd279
|
50e9ad86e32ad2e7ffb4334eef4b4f3997f6376c
|
refs/heads/main
| 2023-04-06T03:11:26.711133
| 2021-04-15T09:55:04
| 2021-04-15T09:55:04
| 355,856,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
n = int(input("Enter n:"))
i = 1
while (i<=n//2+1):
if (i%2==1):
print(i)
i=i+1
|
[
"manish93.rana@gmail.com"
] |
manish93.rana@gmail.com
|
2509732464947c4b57444b200c29aff64163c048
|
16e22166b88bcc8f73b1774ad491f559c459e2df
|
/tictactoe.py
|
f44747a4ead4cf46f09e81527bb6ead23b68a486
|
[] |
no_license
|
NovusPrograms/JetBrains_Academy_Projects
|
5d1d43d913b05aff08ddcf34f4e1c5749e96613b
|
203e6d197f98dc24ec8578dfb3699498eec9230d
|
refs/heads/master
| 2022-12-03T07:07:47.977809
| 2020-08-22T18:38:28
| 2020-08-22T18:38:28
| 270,965,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
def make_matrix(game_status):
matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for i in range(3):
for j in range(3):
matrix[i][j] = game_status[j + 3 * i]
return matrix
def is_player_win(matrix, player):
if matrix[0][0] == matrix[1][1] == matrix[2][2] == player:
return True
if matrix[0][2] == matrix[1][1] == matrix[2][0] == player:
return True
for n in range(3):
if matrix[n] == [player, player, player] \
or matrix[0][n] == matrix[1][n] == matrix[2][n] == player:
return True
return False
def is_all_filled(matrix):
for i in range(3):
for j in range(3):
if matrix[i][j] == " ":
return False
return True
def is_game_run(matrix, player1, player2):
if not is_player_win(matrix, player1) and not is_player_win(matrix, player2) and not is_all_filled(matrix):
return True
return False
def is_cell_free(a, b, matrix):
if matrix[abs(b - 4) - 1][a - 1] == " ":
return True
return False
def print_table(matrix):
structure = {
"floor": "---------",
"row0": "| " + " ".join(matrix[0]) + " |",
"row1": "| " + " ".join(matrix[1]) + " |",
"row2": "| " + " ".join(matrix[2]) + " |"}
print(structure["floor"])
print(structure["row0"])
print(structure["row1"])
print(structure["row2"])
print(structure["floor"])
def make_move(matrix, player):
while True:
try:
next_move = list(map(int, input("Enter the coordinates: > ").split()))
if next_move[0] in {1, 2, 3} and next_move[1] in {1, 2, 3}:
if is_cell_free(next_move[0], next_move[1], matrix_variable):
matrix[abs(next_move[1] - 4) - 1][next_move[0] - 1] = player
break
else:
print("This cell is occupied! Choose another one!")
else:
print("Coordinates should be from 1 to 3!")
except ValueError:
print("You should enter numbers!")
return matrix
def play(matrix, player1, player2):
while is_game_run(matrix, player1, player2):
print_table(matrix)
make_move(matrix, player1)
if is_game_run(matrix, player1, player2):
print_table(matrix)
make_move(matrix, player2)
return matrix
def end_game_statement(matrix, player1, player2):
if is_player_win(matrix_variable, player1):
print_table(matrix)
print(f"{mark1} wins")
elif is_player_win(matrix_variable, player2):
print_table(matrix)
print(f"{mark2} wins")
else:
print_table(matrix)
print("Draw")
mark1 = "X"
mark2 = "O"
input_status = " "
matrix_variable = make_matrix(input_status)
play(matrix_variable, mark1, mark2)
end_game_statement(matrix_variable, mark1, mark2)
|
[
"noreply@github.com"
] |
NovusPrograms.noreply@github.com
|
38a3f4b2c4b561700debcdf5586644b18dfdd979
|
36ed9da3183235d4f36aecac2482d4a8f55ca779
|
/RS07/word2vec/RS07-word-similarity.py
|
491f676fc234ad5a6f21a6896e8fbb9267737077
|
[] |
no_license
|
1011365119/RS
|
ee32b1ad6d1d861da18f173f076206a945fc376b
|
02bece1e0ba3cf6683e3c246d2b86e2710158684
|
refs/heads/master
| 2023-08-17T20:12:13.464973
| 2021-10-19T15:24:48
| 2021-10-19T15:24:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
# !/usr/bin/env python
# !/usr/bin/env python
# -*- coding=utf-8 -*-
# Content:将Word转换成Vec,然后计算相似度
# Author: HuiHui
# Date: 2020-03-11
# Reference:
from gensim.models import word2vec
import multiprocessing
# 如果目录中有多个文件,可以使用PathLineSentences
segment_folder = './segment'
sentences = word2vec.PathLineSentences(segment_folder) #如果用列表,当输入量很大的时候,大会占用大量内存,因此用迭代器
# 设置模型参数,进行训练
model = word2vec.Word2Vec(sentences, size=128, window=5, min_count=5,workers=multiprocessing.cpu_count()) #worker训练模型线程数
#'曹操'词向量
print(model['曹操'])
# 计算和曹操最相近的15个词
# ❓每次运行结果都不一样
for key in model.wv.similar_by_word('曹操', topn =15):
print(key)
print(model.wv.most_similar('曹操'))#输出最相近10个词
# 计算得出曹操+刘备-张飞
print(model.wv.most_similar(positive=['曹操', '刘备'], negative=['张飞']))
# 保存模型
model.save('/Users/wangdonghui/Desktop/ZGZ/RS/AI-Training-Course/RS07/model/word2Vec.model') #要使用绝对路径,否则报错
|
[
"1010959447@qq.com"
] |
1010959447@qq.com
|
c9c52670e588b6040e0ad0e2d454dca327951fff
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/third_party/catapult/dashboard/dashboard/email_summary.py
|
99e3b5f74e2c2f9789f30bf88f13b61a838e1759
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 3,883
|
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Send alert summary emails to sheriffs on duty."""
import datetime
import sys
from google.appengine.api import mail
from dashboard import email_template
from dashboard import request_handler
from dashboard.models import anomaly
from dashboard.models import sheriff
# The string to use as the header in an alert summary email.
_EMAIL_HTML_TOTAL_ANOMALIES = """
<br><b><u>%d Total Anomalies</b></u><br>
<b>+++++++++++++++++++++++++++++++</b><br>
"""
_EMAIL_SUBJECT = '%s: %d anomalies found at %d:%d.'
class EmailSummaryHandler(request_handler.RequestHandler):
"""Summarizes alerts and sends e-mail to sheriff on duty.
Identifies sheriffs who have the "summarize" property set to True, and gets
anomalies related to that sheriff that were triggered in the past 24 hours.
"""
def get(self):
"""Emails sheriffs with anomalies identified in most-recent 24 hours."""
# Get all Sheriffs that have requested an e-mail summary.
sheriffs_to_email_query = sheriff.Sheriff.query(
sheriff.Sheriff.summarize == True)
# Start time after which to get anomalies.
start_time = datetime.datetime.now() - datetime.timedelta(hours=24)
for sheriff_entity in sheriffs_to_email_query.fetch():
_SendSummaryEmail(sheriff_entity, start_time)
def _SendSummaryEmail(sheriff_entity, start_time):
"""Sends a summary email for the given sheriff rotation.
Args:
sheriff_entity: A Sheriff entity.
start_time: A starting datetime for anomalies to fetch.
"""
receivers = email_template.GetSheriffEmails(sheriff_entity)
anomalies = _RecentUntriagedAnomalies(sheriff_entity, start_time)
if not anomalies:
return
subject = _EmailSubject(sheriff_entity, anomalies)
html, text = _EmailBody(anomalies)
mail.send_mail(
sender='gasper-alerts@google.com', to=receivers,
subject=subject, body=text, html=html)
def _RecentUntriagedAnomalies(sheriff_entity, start_time):
"""Returns untriaged anomalies for |sheriff| after |start_time|."""
recent_anomalies = anomaly.Anomaly.query(
anomaly.Anomaly.sheriff == sheriff_entity.key,
anomaly.Anomaly.timestamp > start_time).fetch()
return [a for a in recent_anomalies
if not a.is_improvement and a.bug_id is None]
def _EmailSubject(sheriff_entity, anomalies):
"""Returns the email subject string for a summary email."""
lowest_revision, highest_revision = _MaximalRevisionRange(anomalies)
return _EMAIL_SUBJECT % (sheriff_entity.key.string_id(), len(anomalies),
lowest_revision, highest_revision)
def _MaximalRevisionRange(anomalies):
"""Gets the lowest start and highest end revision for |anomalies|."""
lowest_revision = sys.maxint
highest_revision = 1
for anomaly_entity in anomalies:
if anomaly_entity.start_revision < lowest_revision:
lowest_revision = anomaly_entity.start_revision
if anomaly_entity.end_revision > highest_revision:
highest_revision = anomaly_entity.end_revision
return lowest_revision, highest_revision
def _EmailBody(anomalies):
"""Returns the html and text versions of the email body."""
assert anomalies
html_body = []
text_body = []
html_body.append(_EMAIL_HTML_TOTAL_ANOMALIES % len(anomalies))
anomaly_info = {}
for anomaly_entity in anomalies:
test = anomaly_entity.GetTestMetadataKey().get()
anomaly_info = email_template.GetAlertInfo(anomaly_entity, test)
html_body.append(anomaly_info['email_html'])
text_body.append(anomaly_info['email_text'])
assert anomaly_info
html_body.append(anomaly_info['alerts_link'])
# Join details for all anomalies to generate e-mail body.
html = ''.join(html_body)
text = ''.join(text_body)
return html, text
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
f2981de7c07d5ac5a75773d97d61156c5735f892
|
f6474a92c287861424ba6238a6198fbef809e7b7
|
/setup.py
|
ba5744ac15622402ba4e229a184b32fa4f9641c1
|
[] |
no_license
|
madumalt/ANN-Coursework-Assignments
|
8b6b2988bb6ffd7129a7e75dbc01e03e166d26b6
|
db44a741b1cff136867a1cc277e7a274cd107930
|
refs/heads/master
| 2020-04-03T01:46:01.012400
| 2018-10-27T07:46:10
| 2018-10-27T07:46:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
from setuptools import setup, find_packages
setup(
name='ANN',
version='0.0.1',
packages=find_packages(),
license='MIT',
author='thilinamad',
author_email='madumalt@gamil.com',
description='Project for ANN module',
install_requires=['pandas', 'scikit-learn', 'keras', 'numpy'],
zip_safe=True
)
|
[
"madumalt@gmail.com"
] |
madumalt@gmail.com
|
694389398a01fbe59f5d0186f59a8ad0de06dba7
|
a8c5b3de3022ea85f7ab51a4509ce04c0d261c20
|
/parte_2/Context/create_order_context.py
|
205ffadeed79349c202b6974d2befca95f58a010
|
[
"MIT"
] |
permissive
|
jonasrla/desafio_youse
|
6ef00baf67a8fa0374bcdafa0ebe25898f469a18
|
a9f858e6af4dc26e9d6068f5028bd6cecd334955
|
refs/heads/master
| 2022-12-12T13:50:14.070039
| 2020-09-09T02:32:09
| 2020-09-09T02:32:09
| 293,125,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
from pyspark.sql.functions import from_unixtime
from .base_context import BaseContext
class CreateOrderContext(BaseContext):
def __init__(self, file_path):
self.app_name = 'Process Create Order'
super().__init__(file_path)
def process(self):
df_client, df_order = self.transformation()
self.append_table(df_client, 'clients')
self.append_table(df_order, 'orders')
def transformation(self):
self.input.alias('input')
self.input.registerTempTable('input')
self.input = self.spark.sql('SELECT *, uuid() AS client_id FROM input')
df_client = self.input.selectExpr('client_id as id',
'payload.lead_person.*')
df_order = self.input.selectExpr('raw_timestamp as created_at',
'payload.order_uuid as id',
'payload.insurance_type as insurance_type',
'payload.sales_channel as sales_channel',
'client_id')
df_order = df_order.withColumn('created_at',
from_unixtime(df_order.created_at))
return df_client, df_order
|
[
"jonasrla@gmail.com"
] |
jonasrla@gmail.com
|
de43989cb4a518d5a147cb7095d5c6ecece9e584
|
5a664d498959af3eab9c1af43010d5fdf9c239da
|
/tests/distributions/test_util.py
|
4820c9da0284358c03cfd59f2a3b8da93a3b3966
|
[
"MIT"
] |
permissive
|
darthsuogles/pyro
|
4d6aaab2ad3cd7b0f7f8e0095a1b344c249543be
|
d1e6ba143314924aa949bdd6b72b3d62be0e6476
|
refs/heads/dev
| 2021-08-23T07:45:55.122199
| 2017-12-02T00:23:55
| 2017-12-02T00:23:55
| 109,544,037
| 0
| 0
| null | 2017-11-05T01:52:45
| 2017-11-05T01:52:45
| null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from pyro.distributions.util import broadcast_shape
@pytest.mark.parametrize('shapes', [
([],),
([1],),
([2],),
([], []),
([], [1]),
([], [2]),
([1], []),
([2], []),
([1], [2]),
([2], [1]),
([2], [2]),
([2], [3, 1]),
([2, 1], [3]),
([2, 1], [1, 3]),
([1, 2, 4, 1, 3], [6, 7, 1, 1, 5, 1]),
([], [3, 1], [2], [4, 3, 1], [5, 4, 1, 1]),
])
def test_broadcast_shape(shapes):
assert broadcast_shape(*shapes) == np.broadcast(*map(np.empty, shapes)).shape
@pytest.mark.parametrize('shapes', [
([3], [4]),
([2, 1], [1, 3, 1]),
])
def test_broadcast_shape_error(shapes):
with pytest.raises(ValueError):
broadcast_shape(*shapes)
@pytest.mark.parametrize('shapes', [
([],),
([1],),
([2],),
([], []),
([], [1]),
([], [2]),
([1], []),
([2], []),
([1], [1]),
([2], [2]),
([2], [2]),
([2], [3, 2]),
([2, 3], [3]),
([2, 3], [2, 3]),
([4], [1, 2, 3, 4], [2, 3, 4], [3, 4]),
])
def test_broadcast_shape_strict(shapes):
assert broadcast_shape(*shapes, strict=True) == np.broadcast(*map(np.empty, shapes)).shape
@pytest.mark.parametrize('shapes', [
([1], [2]),
([2], [1]),
([3], [4]),
([2], [3, 1]),
([2, 1], [3]),
([2, 1], [1, 3]),
([2, 1], [1, 3, 1]),
([1, 2, 4, 1, 3], [6, 7, 1, 1, 5, 1]),
([], [3, 1], [2], [4, 3, 1], [5, 4, 1, 1]),
])
def test_broadcast_shape_strict_error(shapes):
with pytest.raises(ValueError):
broadcast_shape(*shapes, strict=True)
|
[
"prad.neeraj@gmail.com"
] |
prad.neeraj@gmail.com
|
413c5bf1cb3c7eac25145c4e05e6234c9a517689
|
0fc5294a60e11c8d6a0f2d6253c7d03cec7bb338
|
/solutions/797. All Paths From Source to Target.py
|
f0a239554b3d645d53678023c5bbec9e07ca75f9
|
[] |
no_license
|
NiteshTyagi/leetcode
|
4011f37adbd9e430f23b7796ba8d8aa488153706
|
dddb90daafcd7f4623873d5d84dfd97925c0230f
|
refs/heads/main
| 2023-04-23T23:50:04.722456
| 2021-05-05T06:58:47
| 2021-05-05T06:58:47
| 328,474,411
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
final_result = []
size = len(graph)
for i in graph[0]:
path = [0,i]
adj_list = [[path.copy(),j] for j in graph[i]]
if not adj_list:
final_result.append(path)
while adj_list:
path,node = adj_list.pop()
if not graph[node]:
if node==size-1:
path.append(node)
final_result.append(path)
elif path[-1]==size-1:
final_result.append(path)
else:
path.append(node)
adj_list.extend([[path.copy(),j] for j in graph[node]])
return final_result
|
[
"33661780+NiteshTyagi@users.noreply.github.com"
] |
33661780+NiteshTyagi@users.noreply.github.com
|
51eef664fbd5b709cc22d3cc459e2e826fd595bf
|
d780df6e068ab8a0f8007acb68bc88554a9d5b50
|
/python/g1/files/g1/files/caches.py
|
1e1355d45030ee4adb108363b45437543610b5cb
|
[
"MIT"
] |
permissive
|
clchiou/garage
|
ed3d314ceea487b46568c14b51e96b990a50ed6f
|
1d72863d3a5f5d620b170f4dd36f605e6b72054f
|
refs/heads/master
| 2023-08-27T13:57:14.498182
| 2023-08-15T07:09:57
| 2023-08-15T19:53:52
| 32,647,497
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,817
|
py
|
__all__ = [
'Cache',
'NULL_CACHE',
]
import collections
import contextlib
import dataclasses
import hashlib
import io
import logging
import os
import random
import shutil
import tempfile
import threading
from pathlib import Path
import g1.files
from g1.bases import collections as g1_collections
from g1.bases import functionals
from g1.bases import timers
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
# By default we keep 80% of entries post eviction.
POST_EVICTION_SIZE_RATIO = 0.8
class CacheInterface:
@dataclasses.dataclass(frozen=True)
class Stats:
num_hits: int
num_misses: int
_SENTINEL = object()
def get_stats(self):
raise NotImplementedError
def estimate_size(self):
raise NotImplementedError
def evict(self):
raise NotImplementedError
def get(self, key, default=None):
raise NotImplementedError
def get_file(self, key, default=None):
raise NotImplementedError
def getting_path(self, key, default=None):
raise NotImplementedError
def set(self, key, value):
raise NotImplementedError
def setting_file(self, key):
raise NotImplementedError
def setting_path(self, key):
raise NotImplementedError
def pop(self, key, default=_SENTINEL):
raise NotImplementedError
class NullCache(CacheInterface):
def __init__(self):
self._num_misses = 0
def get_stats(self):
return self.Stats(
num_hits=0,
num_misses=self._num_misses,
)
def estimate_size(self):
return 0
def evict(self):
return 0
def get(self, key, default=None):
del key # Unused.
self._num_misses += 1
return default
def get_file(self, key, default=None):
del key # Unused.
self._num_misses += 1
return default
@contextlib.contextmanager
def getting_path(self, key, default=None):
del key # Unused.
self._num_misses += 1
yield default
def set(self, key, value):
pass
@contextlib.contextmanager
def setting_file(self, key):
yield io.BytesIO()
@contextlib.contextmanager
def setting_path(self, key):
yield Path(os.devnull)
def pop(self, key, default=CacheInterface._SENTINEL):
if default is self._SENTINEL:
raise KeyError(key)
return default
NULL_CACHE = NullCache()
class Cache(CacheInterface):
"""File-based LRU cache.
Cache keys and values are bytes objects. A cache value is stored in
its own file, whose path the MD5 hash of its key, with the first two
hexadecimal digits as the directory name, and the rest as the file
name. This two-level structure should prevent any directory grown
too big.
"""
@staticmethod
def _get_relpath(key):
hasher = hashlib.md5()
hasher.update(key)
digest = hasher.hexdigest()
return Path(digest[:2]) / digest[2:]
def __init__(
self,
cache_dir_path,
capacity,
*,
post_eviction_size=None,
executor=None, # Use this to evict in the background.
):
self._lock = threading.Lock()
self._cache_dir_path = ASSERT.predicate(cache_dir_path, Path.is_dir)
self._capacity = ASSERT.greater(capacity, 0)
self._post_eviction_size = (
post_eviction_size if post_eviction_size is not None else
int(self._capacity * POST_EVICTION_SIZE_RATIO)
)
ASSERT(
0 <= self._post_eviction_size <= self._capacity,
'expect 0 <= post_eviction_size <= {}, not {}',
self._capacity,
self._post_eviction_size,
)
self._executor = executor
# By the way, if cache cold start is an issue, we could store
# and load this table from a file.
self._access_log = collections.OrderedDict()
# getting_path may "lease" paths to the user, and we should not
# evict these paths.
self._active_paths = g1_collections.Multiset()
self._num_hits = 0
self._num_misses = 0
# It's safe to call these methods after this point.
self._eviction_countdown = self._estimate_eviction_countdown()
self._maybe_evict()
def get_stats(self):
return self.Stats(
num_hits=self._num_hits,
num_misses=self._num_misses,
)
def _log_access(self, path):
# Although this is a LRU cache, let's keep access counts, which
# could be useful in understanding cache performance.
self._access_log[path] = self._access_log.get(path, 0) + 1
self._access_log.move_to_end(path, last=False)
def _make_get_recency(self):
recency_table = dict((p, r) for r, p in enumerate(self._access_log))
least_recency = len(self._access_log)
return lambda path: recency_table.get(path, least_recency)
def estimate_size(self):
dir_paths = list(_iter_dirs(self._cache_dir_path))
if not dir_paths:
return 0
# Estimate the size of the cache by multiplying the two, given
# that MD5 yields a uniform distribution.
return len(dir_paths) * _count_files(random.choice(dir_paths))
def _estimate_eviction_countdown(self):
# Just a guess of how far away we are from the next eviction.
return self._capacity - self.estimate_size()
def _should_evict(self):
return (
len(self._access_log) > self._capacity
or self._eviction_countdown < 0
)
def _maybe_evict(self):
with self._lock:
if self._should_evict():
self._evict_require_lock_by_caller()
def evict(self):
with self._lock:
return self._evict_require_lock_by_caller()
def _evict_require_lock_by_caller(self):
stopwatch = timers.Stopwatch()
stopwatch.start()
num_evicted = self._evict()
stopwatch.stop()
LOG.info(
'evict %d entries in %f seconds: %s',
num_evicted,
stopwatch.get_duration(),
self._cache_dir_path,
)
return num_evicted
def _evict(self):
# Estimate post-eviction size per directory, given that MD5
# yields a uniform distribution of sizes.
#
# NOTE: It might "over-evict" when post_eviction_size is less
# than 256, since in which case target_size_per_dir is likely 0.
target_size_per_dir = int(
self._post_eviction_size / _count_dirs(self._cache_dir_path)
)
get_recency = self._make_get_recency()
num_evicted = 0
for dir_path in _iter_dirs(self._cache_dir_path):
num_evicted += self._evict_dir(
dir_path, target_size_per_dir, get_recency
)
self._eviction_countdown = self._estimate_eviction_countdown()
return num_evicted
def _evict_dir(self, dir_path, target_size, get_recency):
num_evicted = 0
paths = list(_iter_files(dir_path))
paths.sort(key=get_recency)
for path in paths[target_size:]:
if path in self._active_paths:
continue
path.unlink()
count = self._access_log.pop(path, 0)
LOG.debug('evict: %d %s', count, path)
num_evicted += 1
g1.files.remove_empty_dir(dir_path)
return num_evicted
def _get_path(self, key):
return self._cache_dir_path / self._get_relpath(key)
def get(self, key, default=None):
with self._lock:
return self._get_require_lock_by_caller(
key, default, Path.read_bytes
)
def get_file(self, key, default=None):
"""Get cache entry as a pair of file object and it size.
The caller has to close the file object. Note that even if this
cache entry is removed or evicted, the file will only removed by
the file system when the file is closed.
"""
with self._lock:
return self._get_require_lock_by_caller(
key,
default,
lambda path: (path.open('rb'), path.stat().st_size),
)
@contextlib.contextmanager
def getting_path(self, key, default=None):
with self._lock:
path = self._get_require_lock_by_caller(
key, default, functionals.identity
)
if path is not default:
self._active_paths.add(path)
try:
yield path
finally:
with self._lock:
if path is not default:
self._active_paths.remove(path)
def _get_require_lock_by_caller(self, key, default, getter):
path = self._get_path(key)
if not path.exists():
self._num_misses += 1
return default
value = getter(path)
self._log_access(path)
self._num_hits += 1
return value
def set(self, key, value):
with self._lock:
return self._set_require_lock_by_caller(
key, lambda path: path.write_bytes(value)
)
@contextlib.contextmanager
def setting_file(self, key):
"""Set a cache entry via a file-like object."""
with self.setting_path(key) as p, p.open('wb') as f:
yield f
@contextlib.contextmanager
def setting_path(self, key):
"""Set a cache entry via a temporary file path."""
# We use mktemp (which is unsafe in general) because we want to
# rename it on success, but NamedTemporaryFile's file closer
# raises FileNotFoundError. I think in our use case here,
# mktemp is safe enough.
value_tmp_path = Path(tempfile.mktemp())
try:
yield value_tmp_path
with self._lock:
# Use shutil.move because /tmp might be in another file
# system than the cache directory. (shutil.move detects
# this and uses os.rename when they are in the same file
# system.)
self._set_require_lock_by_caller(
key,
lambda path: shutil.move(value_tmp_path, path),
)
finally:
value_tmp_path.unlink(missing_ok=True)
def _set_require_lock_by_caller(self, key, setter):
path = self._get_path(key)
if not path.exists():
path.parent.mkdir(exist_ok=True)
self._eviction_countdown -= 1
setter(path)
self._log_access(path)
if self._should_evict():
if self._executor:
self._executor.submit(self._maybe_evict)
else:
self._evict_require_lock_by_caller()
def pop(self, key, default=CacheInterface._SENTINEL):
with self._lock:
return self._pop_require_lock_by_caller(key, default)
def _pop_require_lock_by_caller(self, key, default):
path = self._get_path(key)
if not path.exists():
if default is self._SENTINEL:
raise KeyError(key)
return default
value = path.read_bytes()
path.unlink()
g1.files.remove_empty_dir(path.parent)
self._access_log.pop(path, None)
self._eviction_countdown += 1
return value
def _iter_dirs(dir_path):
return filter(Path.is_dir, dir_path.iterdir())
def _iter_files(dir_path):
return filter(Path.is_file, dir_path.iterdir())
def _count_dirs(dir_path):
return sum(1 for _ in _iter_dirs(dir_path))
def _count_files(dir_path):
return sum(1 for _ in _iter_files(dir_path))
|
[
"clchiou@gmail.com"
] |
clchiou@gmail.com
|
f47925b66babaa146db08d50993839a647c96fa6
|
24bfc47c6c76e80efe6f7c781a63e75dbb69f899
|
/ilexiusapp/models.py
|
c619771ecab23ada79d5742038b600e33c584722
|
[] |
no_license
|
kovacevicu/IlexiusCodingTask
|
4166216d71a6d961321e0d04ab39739cb2210ce6
|
a1d0cd358a8ca704fc17daf4612576c94ae3b8ce
|
refs/heads/master
| 2023-02-07T08:44:07.967850
| 2020-12-24T13:57:58
| 2020-12-24T13:57:58
| 324,168,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
employee_id = models.CharField(max_length=254, blank=True)
login_count = models.PositiveIntegerField(default=0)
|
[
"uroos97@gmail.com"
] |
uroos97@gmail.com
|
045ba6a4c5578c176807f829f312e53c55451857
|
17ad8b4629460b7bed78ff923b0c8261320c21e0
|
/simplesocial/groups/migrations/0001_initial.py
|
2b5c6d0b6f18838cde385f9ccde7bcfe591bf975
|
[] |
no_license
|
afeets/django_blog_project
|
7276f792e6f2cec20907db4a028346a00dfa02f8
|
600c1532f3966877d7769e2d645cbf545747da2a
|
refs/heads/master
| 2022-04-25T21:34:00.983933
| 2020-05-02T10:44:23
| 2020-05-02T10:44:23
| 260,115,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
# Generated by Django 3.0.3 on 2020-05-02 09:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, unique=True)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('description', models.TextField(blank=True, default='')),
('description_html', models.TextField(blank=True, default='', editable=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='GroupMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='groups.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_groups', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('group', 'user')},
},
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='groups.GroupMember', to=settings.AUTH_USER_MODEL),
),
]
|
[
"andyfeetenby@gmail.com"
] |
andyfeetenby@gmail.com
|
30da94580b92fbb6f0e740efe9ac24a4335a9ada
|
4a46adddbd8ff04e8f62fbaf73553e40456aa699
|
/python_src/stackframe.py
|
1bd7617ef40f73f6b1aa66b6b82c7dc1e92c72f3
|
[] |
no_license
|
kmerrill27/thesis
|
0994d2795b06576fefe1103971222c5c4e982712
|
55ef3f0dcf53a8feab32bde4068ba6f0bbac2e1a
|
refs/heads/master
| 2021-01-15T19:28:23.726591
| 2014-04-01T02:07:18
| 2014-04-01T02:07:18
| 17,692,748
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,309
|
py
|
from arch import *
from defs import *
from widgetwrapper import *
class StackFrame:
""" Representation of a stack frame """
def __init__(self, title, architecture, frame_ptr, stack_ptr, bottom, line, assembly):
self.title = title # Function name
self.architecture = architecture
self.frame_ptr = frame_ptr
self.stack_ptr = stack_ptr
self.bottom = bottom # Base address of frame
self.line = line # Current source line
self.assembly = assembly # Current assembly instructions
self.items = [] # Items are symbols, saved registers, etc.
self.selected_row = 0 # Item selected
def addItem(self, frame_item):
""" Add item to top of frame """
self.items.insert(0, frame_item)
class FrameItem:
""" Representation of an object (symbol, saved register, etc.) in a stack frame """
def __init__(self):
self.title = None
self.addr = None
self.length = None # Item length in bits
self.value = None
self.initialized = None
self.struct = None # Symbol prefix - e.x. (struct node *) - blank if not struct
self.zoom_val = None # Detail view - different from value only for structs
class FrameDisplay(QtGui.QTableWidget):
""" Widget for displaying current stack frame """
def __init__(self, frame, addr_box, mode, reverse):
super(FrameDisplay, self).__init__()
self.frame = frame
self.addr_box = addr_box
self.mode = mode
self.reverse = reverse
self.initUI()
def initUI(self):
self.setColumnCount(1)
self.horizontalHeader().hide()
self.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
if self.reverse:
self.showReverse()
else:
self.show()
# Set height to height of items so no whitespace
self.setMaximumHeight(self.rowCount() * self.rowHeight(0) + 2)
def show(self):
""" Display all items in stack frame from highest to lowest address """
last_addr = None
# Sort items in increasing address order
sorted_list = sorted(self.frame.items, key=lambda x: x.addr)
if self.frame.stack_ptr:
# Add empty space between top item and stack pointer (if not in main)
self.addTempStorageSpace(int(sorted_list[0].addr, 16), int(self.frame.stack_ptr, 16))
for item in sorted_list:
item_addr = int(item.addr, 16)
if last_addr and last_addr < item_addr:
# Add any empty space between items
self.addTempStorageSpace(item_addr, last_addr)
last_addr = self.displayItem(item) + 4
self.selectRow(self.frame.selected_row)
def showReverse(self):
""" Display all items in stack frame from lowest to highest address """
last_addr = None
# Sort items in decreasing address order
sorted_list = sorted(self.frame.items, key=lambda x: x.addr, reverse=True)
for item in sorted_list:
item_addr = int(item.addr, 16)
if last_addr and last_addr > item_addr + int(item.length):
# Add any empty space between items
self.addTempStorageSpace(last_addr, item_addr + int(item.length))
last_addr = self.displayItem(item)
if self.frame.stack_ptr:
# Add empty space between top item and stack pointer (if not in main)
self.addTempStorageSpace(int(sorted_list[-1].addr, 16), int(self.frame.stack_ptr, 16))
self.selectRow(self.frame.selected_row)
def displayItem(self, frame_item):
""" Display item, which is a symbol, saved register, etc. """
last_addr = -1
item_title = self.populateItem(frame_item)
row_span = int(frame_item.length)/4
if self.reverse:
offset = row_span - 1
caret = DOWN_CARET
else:
offset = 0
caret = CARET
for i in range(0, row_span):
self.insertRow(self.rowCount())
last_addr = hex(int(frame_item.addr, 16) + 4 * abs(offset - i))
header = QtGui.QTableWidgetItem(HEADER_BLANK) # Add blank space to give header width
header.setToolTip(caret + str(last_addr))
self.setVerticalHeaderItem(self.rowCount() - 1, header)
if i == offset:
self.displayPointers(frame_item.addr, header)
self.setUpSpan(row_span, item_title)
return int(last_addr, 16)
def selectionChanged(self, selected, deselected):
""" Frame item (row in table) selected """
selections = selected.indexes()
if selections:
if self.reverse:
# Lowest addressed header item corresponds to spanning item
self.frame.selected_row = selections[-1].row()
else:
self.frame.selected_row = selections[0].row()
self.setBox()
def populateItem(self, frame_item):
""" Set up name and value display for item """
item_title = QtGui.QLabel()
if not frame_item.initialized:
# Symbol uninitialized
item_title.setText(" " + frame_item.title + " =\n " + UNINITIALIZED)
item_title.setStatusTip(UNINITIALIZED)
else:
# Symbol initialized
item_title.setText(" " + frame_item.title + " =\n " + frame_item.struct + frame_item.value)
item_title.setStatusTip(frame_item.zoom_val)
return item_title
def displayPointers(self, addr, header):
""" Display frame and base pointer markers at their respective addresses """
if self.frame.frame_ptr == addr and self.frame.stack_ptr == addr:
# Frame pointer and stack pointer at address
header.setText(self.frame.architecture.base_pointer + "/" + self.frame.architecture.stack_pointer)
elif self.frame.frame_ptr == addr:
# Frame pointer only at address
header.setText(self.frame.architecture.base_pointer)
elif self.frame.stack_ptr == addr:
# Stack pointer only at address
header.setText(self.frame.architecture.stack_pointer)
def setUpSpan(self, row_span, item_title):
""" Set up item to span number of rows proptional to its length """
new_row = self.rowCount() - row_span
self.setCellWidget(new_row, 0, item_title)
if row_span > 1:
new_row = self.rowCount() - row_span
self.setSpan(new_row, 0, row_span, 1)
def addTempStorageSpace(self, high_addr, low_addr):
""" Add empty temporary storage space to reach top of frame """
# Need temp storage space if stack pointer above topmost item
temp_space = (high_addr - low_addr) / 4
for i in range(0, temp_space):
self.insertRow(self.rowCount())
header = QtGui.QTableWidgetItem("")
self.setVerticalHeaderItem(self.rowCount() - 1, header)
if self.reverse:
curr_addr = hex(low_addr + 4 * (temp_space - 1 - i))
header.setToolTip(DOWN_CARET + str(curr_addr))
else:
curr_addr = hex(low_addr + 4 * i)
header.setToolTip(CARET + str(curr_addr))
# Display stack pointer at its address
if self.frame.stack_ptr == curr_addr:
header.setText(self.frame.architecture.stack_pointer)
def setBox(self):
""" Set address box to item element according to current mode """
if self.mode == ZOOM_MODE:
for i in range(0, self.rowSpan(self.frame.selected_row, 0)):
# Find row with widget in span
item = self.cellWidget(self.frame.selected_row - i, 0)
if item:
# Display item zoom value
self.addr_box.setText(item.statusTip())
return
# No item at address
self.addr_box.clear()
else:
addr = self.verticalHeaderItem(self.frame.selected_row).toolTip()
if self.mode == DECIMAL_MODE:
if self.reverse:
addr = DOWN_CARET + str(int(str(addr.replace(DOWN_CARET, "")), 16))
else:
addr = CARET + str(int(str(addr.replace(CARET, "")), 16))
self.addr_box.setText(addr)
def setMode(self, mode):
""" Set display mode to hexadecimal, decimal, or zoom value """
self.mode = mode
self.setBox()
|
[
"kmerrill27@gmail.com"
] |
kmerrill27@gmail.com
|
5738b6662c0ed99be5f2c01311a8d7ff2c983902
|
6e19e67ef89ee76f3df848110f82e1c8d02a5569
|
/apps/receitas/migrations/0005_auto_20210103_1251.py
|
7265c45430055899ae6e2cae067f85444da82e20
|
[] |
no_license
|
Joaopedromata/django-receitas-alura
|
3dce526adafb949bedf7c837b76a5b6dc534ba4b
|
3dcb978bce5096f183adc0ece6800d16dae1f91a
|
refs/heads/master
| 2023-02-13T02:46:15.347589
| 2021-01-04T01:15:53
| 2021-01-04T01:15:53
| 326,529,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
# Generated by Django 3.1.4 on 2021-01-03 15:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('receitas', '0004_receita_foto_receita'),
]
operations = [
migrations.AlterField(
model_name='receita',
name='pessoa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"joaopmata182@gmail.com"
] |
joaopmata182@gmail.com
|
30fb735a9eb6e13b3bea34b9baf2934a4a85bd8f
|
56e33c6d5560fa6cb34133f4c1f9e6beff9b6d98
|
/tongueTwisters.py
|
4373eacd934d2c7c1abe47105d90c98e22d53a5a
|
[] |
no_license
|
arellaEG/FF_task
|
b92d81b878142affc95a0bb1603dc18cdf0fb4b2
|
a8bf8563b44425a36d038cb450ec85bfa8b44d22
|
refs/heads/master
| 2020-03-29T07:59:36.600197
| 2019-04-10T18:30:01
| 2019-04-10T18:30:01
| 149,689,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,177
|
py
|
###################################
########fill in subject ID #######
subject =''
###################################
###################################
# presents four word from TT trial, one under the other, for participants to read
# little blue dot on the left serves as pacer - marking which word should
# be said; moving at a predetermined speed "pacerTempo"
import random
import csv
import sys
import numpy as np
from psychopy import visual, core, event, gui, microphone
from pyo import *
### R = tr, L = vl, x = ee. changed for programming purposes of keeping all words same length
units=['t','R','v','L','eb','xb','ig','ug']
onsets=['t','R','v','L']
rhymes=['eb','xb','ig','ug']
pacerTempo = .45 # speed of pacer
transLetter = {'R':'tr', 'L':'fl', 'x':'ee','u': 'oo'}
win = visual.Window([800, 500], fullscr=True,
color="white", units='pix')
breakText=visual.TextStim(win=win, height=40,
text="Please take a short break. Press 'c' to continue.",
color='black')
endText=visual.TextStim(win=win, height=40,
text="All Done! Please call the experimenter.",
color='black')
pacer= visual.Circle(win=win, radius = 20, fillColor='blue') # blue dot that marks which word should be pressed
fixationView = visual.Circle(win=win, radius = 40, fillColor='red', pos=(-250,310)) # red circle for viewing time
fixationCross= visual.ShapeStim(win, vertices=((0, -80), (0, 80), (0,0),
(80,0), (-80, 0)),
lineWidth=5, closeShape=False,
lineColor='grey') # used between trials
word1 = visual.TextStim(win=win,pos=(0,300), height = 60, color='black')
word2 = visual.TextStim(win=win,pos=(0,100), height = 60, color='black')
word3 = visual.TextStim(win=win,pos=(0,-100), height = 60, color='black')
word4 = visual.TextStim(win=win,pos=(0,-300), height = 60, color='black')
sep=','
import io
def importTrials(numTrials):
bTrial= open ('FF_bw/TTstim.csv', 'rb')
colNames = bTrial.next().rstrip().split(sep)
reader=csv.DictReader(bTrial)
global trialsList
trialsList = []
for t in range(numTrials):
trialStr=bTrial.next().rstrip().split(sep)
assert len(trialStr) == len(colNames)
trialDict = dict(zip(colNames, trialStr))
trialsList.append(trialDict)
importTrials(32)
random.shuffle(trialsList)
trialsList[0] # just looking
headers=["trialNum", "trialType", "itemID", "rep", "wordInd", "curWord"]
def write4():
word1.draw()
word2.draw()
word3.draw()
word4.draw()
# define expected keys per word
with open(subject+'_TTwb.txt','wb') as resultsFile:
Rwriter=csv.DictWriter(resultsFile, fieldnames=headers)
Rwriter.writeheader()
core.wait(2)
breakTime=core.Clock()
trialNum=0
for trial in trialsList:
trialNum+=1
fixationCross.draw()
win.flip()
core.wait(1)
t = trial['fullTrial'].split()
w1,w2,w3,w4=t
word1.setText(w1)
word2.setText(w2)
word3.setText(w3)
word4.setText(w4)
write4()
fixationView.draw() # fixation to allow brief viewing - 2 sec of big red circle
win.flip()
core.wait(2)
write4()
win.flip()
core.wait(.4) # big red circle disappears, nothing on screen for 1 sec ;
# then small blue circle appears and participants must begin
for rep in range(1,4):
wordInd=0 # index of word within trial (first word, second...)
write4()
pacer.pos = (-250,300)
pacer.draw()
win.flip()
for curWord in trial['fullTrial'].split():
wordInd += 1
write4()
pacer.draw()
win.flip()
reactionTime=core.Clock()
pacerTime=core.Clock()
core.wait(pacerTempo-(pacerTime.getTime())) # wait full time even if participant answered before time's up
string=[str(var) for var in trialNum, trial['type'], trial['ID'],
rep, wordInd, curWord]
print string
line='\t'.join(string) + '\n'
resultsFile.write(line)
resultsFile.flush()
pacer.pos -=(0,200)
fixationCross.draw()
win.flip()
core.wait(.5)
if int(breakTime.getTime())>10:
breakClick=False
while not breakClick:
breakText.draw()
win.flip()
stop= event.waitKeys(['c','q'])
if stop==['c']:
breakTime.reset()
breakClick=True
elif stop==['q']:
win.close()
core.quit()
endText.draw()
resultsFile.close()
win.flip()
core.wait(5)
win.close()
win.close()
core.quit()
|
[
"noreply@github.com"
] |
arellaEG.noreply@github.com
|
f63ee1caa12511f2fa9f55c8264c8e31a31988b8
|
a66faa3e7b981b9126557c4b5994fbd9d76777c6
|
/Example Savings Accounts in Bank.py
|
55b9d4524dabfd9d803c35bf0d2a3970aacea3db
|
[] |
no_license
|
trongdeptrai21/b-i-t-p-chapter-9
|
bfc8b053396c09064fa87be5de9186f1df8b4304
|
40fc194bd26af3921883577776feb79ab519fa2d
|
refs/heads/main
| 2023-08-29T21:35:33.427437
| 2021-11-02T11:43:45
| 2021-11-02T11:43:45
| 423,042,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,233
|
py
|
"""
Author: Le Trong
Date: 30/10/21
File: savingsaccount.py
This module defines the SavingsAccount class.
"""
class SavingsAccount(object):
"""This class represents a savings account
with the owner's name, PIN, and balance."""
RATE = 0.02 # Single rate for all accounts
def __init__(self, name, pin, balance=0.0):
self.name = name
self.pin = pin
self.balance = balance
def __str__(self):
"""Returns the string rep."""
result = 'Name: ' + self.name + '\n'
result += 'PIN: ' + self.pin + '\n'
result += 'Balance: ' + str(self.balance)
return result
def getBalance(self):
"""Returns the current balance."""
return self.balance
def getName(self):
"""Returns the current name."""
return self.name
def getPin(self):
"""Returns the current pin."""
return self.pin
def deposit(self, amount):
"""Deposits the given amount and returns None."""
self.balance += amount
return None
def withdraw(self, amount):
"""Withdraws the given amount.Returns None if successful, or an
error message if unsuccessful."""
if amount < 0:
return "Amount must be >= 0"
elif self.balance < amount:
return "Insufficient funds"
else:
self.balance -= amount
return None
def computeInterest(self):
"""Computes, deposits, and returns the interest."""
interest = self.balance * SavingsAccount.RATE
self.deposit(interest)
return interest
class Bank(object):
def __init__(self):
self.accounts = {}
def __str__(self) :
"""Return the string rep of the entire bank."""
return '\n'.join(map(str, self.accounts.values()))
def makeKey(self, name, pin):
"""Makes and returns a key from name and pin."""
return name + "/" + pin
def add(self, account):
"""Inserts an account with name and pin as a key."""
key = self.makeKey(account.getName(),
account.getPin())
self.accounts[key] = account
def remove(self, name, pin):
"""Removes an account with name and pin as a key."""
key = self.makeKey(name, pin)
return self.accounts.pop(key, None)
def get(self, name, pin):
"""Returns an account with name and pin as a key
or None if not found."""
key = self.makeKey(name, pin)
return self.accounts.get(key, None)
def computeInterest(self):
"""Computes interest for each account and
returns the total."""
total = 0.0
for account in self.accounts.values():
total += account.computeInterest()
return total
def test_Bank():
bank = Bank()
bank.add(SavingsAccount("Wilma", "1001", 4000.00))
bank.add(SavingsAccount("Fred", "1002", 1000.00))
print(bank)
print(f"bank.computeInterest:",bank.computeInterest())
if __name__ == '__main__':
test_Bank()
|
[
"noreply@github.com"
] |
trongdeptrai21.noreply@github.com
|
4afe586e461a753dc3699f4cf8b1439afc93c45f
|
ef74152018a5d0e16c8f30c53b64ed809f4e8113
|
/pygame_ping_pong.py
|
219641a49685275002186178f4113a2bfff3706a
|
[] |
no_license
|
abelyakoff/python-ping-pong
|
43effef6fb3b91b95d991698b0e89783d0d7e7eb
|
93cb668c92ae36eca1ddd6907599bca4d1c069b5
|
refs/heads/main
| 2023-04-06T05:18:32.256730
| 2021-04-12T09:05:32
| 2021-04-12T09:05:32
| 357,127,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,426
|
py
|
# Ping-Pong
# By Alexander Belyakov, alexander.belyakov@gmail.com
import pygame, random, sys
from pygame.locals import *
FPS = 30
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
PADDLEWIDTH = 10
PADDLEHEIGHT = 60
BALLSIZE = 10
MISSILESIZE = 10
NETHEIGHT = WINDOWHEIGHT
NETWIDTH = 10
PADDLESPEED = 5
MISSILESPEED = 10
VICTORYGOALS = 9
BLACK = (0, 0, 0 )
WHITE = (255, 255, 255)
ORANGE = (255, 153, 0 )
BACKGROUNDCOLOR = BLACK
LEFTPLAYERCOLOR = WHITE
RIGHTPLAYERCOLOR = WHITE
BALLCOLOR = WHITE
NETCOLOR = WHITE
TEXTCOLOR = WHITE
MISSILECOLOR = ORANGE
def terminate():
pygame.quit()
sys.exit()
def waitForKeyPress():
while True:
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYUP:
if event.key == K_ESCAPE:
terminate()
if event.key == K_RETURN:
return
def showTitleScreen():
showText = True
while True:
DISPLAYSURF.fill(BACKGROUNDCOLOR)
x0 = WINDOWWIDTH / 2 - 115 # "PING PONG" is 230 pixels wide
y0 = WINDOWHEIGHT / 2 - 100 # "PING PONG" is 150 pixels high + "PRESS ANY KEY TO START"
drawLetter("P", DISPLAYSURF, TEXTCOLOR, x0 + 20, y0)
drawLetter("I", DISPLAYSURF, TEXTCOLOR, x0 + 80, y0)
drawLetter("N", DISPLAYSURF, TEXTCOLOR, x0 + 100, y0)
drawLetter("G", DISPLAYSURF, TEXTCOLOR, x0 + 160, y0)
drawLetter("P", DISPLAYSURF, TEXTCOLOR, x0, y0 + 80)
drawLetter("O", DISPLAYSURF, TEXTCOLOR, x0 + 60, y0 + 80)
drawLetter("N", DISPLAYSURF, TEXTCOLOR, x0 + 120, y0 + 80)
drawLetter("G", DISPLAYSURF, TEXTCOLOR, x0 + 180, y0 + 80)
if showText == True:
drawText("PRESS ENTER TO START", font, TEXTCOLOR, DISPLAYSURF, x0 + 15, y0 + 180)
showText = not showText
drawText("Alexander Belyakov, 2014", smallFont, TEXTCOLOR, DISPLAYSURF, x0 + 40, WINDOWHEIGHT - 20)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYUP:
if event.key == K_ESCAPE:
terminate()
if event.key == K_RETURN:
return
FPSCLOCK.tick(2)
def drawLetter(letter, surface, color, x, y):
if letter == "P":
pygame.draw.rect(surface, color, (x, y, 10, 70))
pygame.draw.rect(surface, color, (x + 10, y, 40, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 30))
pygame.draw.rect(surface, color, (x + 10, y + 30, 30, 10))
elif letter == "I" or letter == "1":
pygame.draw.rect(surface, color, (x, y, 10, 70))
elif letter == "N":
pygame.draw.rect(surface, color, (x, y, 10, 70))
pygame.draw.polygon(surface, color, ((x + 10, y), (x + 10, y + 20), (x + 40, y + 69), (x + 40, y + 49)))
pygame.draw.rect(surface, color, (x + 40, y, 10, 70))
elif letter == "G":
pygame.draw.rect(surface, color, (x, y, 10, 70))
pygame.draw.rect(surface, color, (x + 10, y, 40, 10))
pygame.draw.rect(surface, color, (x + 10, y + 60, 40, 10))
pygame.draw.rect(surface, color, (x + 40, y + 30, 10, 30))
pygame.draw.rect(surface, color, (x + 20, y + 30, 20, 10))
elif letter == "O" or letter == "0":
pygame.draw.rect(surface, color, (x, y, 10, 70))
pygame.draw.rect(surface, color, (x + 10, y, 40, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 10, y + 60, 30, 10))
elif letter == "2":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 20))
pygame.draw.rect(surface, color, (x, y + 30, 50, 10))
pygame.draw.rect(surface, color, (x, y + 40, 10, 20))
pygame.draw.rect(surface, color, (x, y + 60, 50, 10))
elif letter == "3":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 10, y + 30, 40, 10))
pygame.draw.rect(surface, color, (x, y + 60, 50, 10))
elif letter == "4":
pygame.draw.rect(surface, color, (x, y, 10, 40))
pygame.draw.rect(surface, color, (x + 40, y, 10, 70))
pygame.draw.rect(surface, color, (x + 10, y + 30, 30, 10))
elif letter == "S" or letter == "5":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x, y + 10, 10, 20))
pygame.draw.rect(surface, color, (x, y + 30, 50, 10))
pygame.draw.rect(surface, color, (x + 40, y + 40, 10, 20))
pygame.draw.rect(surface, color, (x, y + 60, 50, 10))
elif letter == "6":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 10, y + 30, 40, 10))
pygame.draw.rect(surface, color, (x + 40, y + 40, 10, 30))
pygame.draw.rect(surface, color, (x + 10, y + 60, 40, 10))
elif letter == "7":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 20, y + 30, 30, 10))
elif letter == "8":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 10, y + 30, 30, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 10, y + 60, 40, 10))
elif letter == "9":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x, y + 10, 10, 30))
pygame.draw.rect(surface, color, (x + 10, y + 30, 30, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x, y + 60, 50, 10))
elif letter == "A":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 10, y + 30, 30, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 60))
elif letter == "M":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 20, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 60))
elif letter == "E":
pygame.draw.rect(surface, color, (x, y, 50, 10))
pygame.draw.rect(surface, color, (x, y + 10, 10, 60))
pygame.draw.rect(surface, color, (x + 10, y + 30, 30, 10))
pygame.draw.rect(surface, color, (x, y + 60, 50, 10))
elif letter == "V":
pygame.draw.rect(surface, color, (x, y, 10, 70))
pygame.draw.polygon(surface, color, ((x + 10, y + 50), (x + 10, y + 69), (x + 50, y), (x + 40, y)))
elif letter == "R":
pygame.draw.rect(surface, color, (x, y, 10, 70))
pygame.draw.rect(surface, color, (x + 10, y, 40, 10))
pygame.draw.rect(surface, color, (x + 40, y + 10, 10, 30))
pygame.draw.rect(surface, color, (x + 10, y + 30, 30, 10))
pygame.draw.polygon(surface, color, ((x + 30, y + 40), (x + 40, y + 70), (x + 50, y + 70), (x + 40, y + 40)))
return
def drawText(text, font, color, surface, x, y):
textobj = font.render(text, 1, color)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
def moveBall():
ball["rect"] = pygame.Rect (ball["rect"].left + ball["vel"][0] * ballSpeed, ball["rect"].top + ball["vel"][1] * ballSpeed, BALLSIZE, BALLSIZE)
def hasBallHitWall():
if ball["rect"].top < 0:
ball["rect"].top = 0
ball["vel"][1] = -(ball["vel"][1])
if ball["rect"].bottom > WINDOWHEIGHT:
ball["rect"].bottom = WINDOWHEIGHT
ball["vel"][1] = -(ball["vel"][1])
def showGameScreen(leftScore, rightScore):
DISPLAYSURF.fill(BACKGROUNDCOLOR)
pygame.draw.rect(DISPLAYSURF, NETCOLOR, (WINDOWWIDTH / 2 - NETWIDTH / 2, 0, NETWIDTH, NETHEIGHT)) # Net
if leftScore == 1:
drawLetter(str(leftScore), DISPLAYSURF, LEFTPLAYERCOLOR, WINDOWWIDTH / 2 - NETWIDTH / 2 - 30, 20) # Left Score
else:
drawLetter(str(leftScore), DISPLAYSURF, LEFTPLAYERCOLOR, WINDOWWIDTH / 2 - NETWIDTH / 2 - 70, 20) # Left Score
drawLetter(str(rightScore), DISPLAYSURF, RIGHTPLAYERCOLOR, WINDOWWIDTH / 2 + NETWIDTH / 2 + 20, 20) # Right Score
pygame.draw.rect(DISPLAYSURF, BALLCOLOR, (ball["rect"].left, ball["rect"].top, BALLSIZE, BALLSIZE)) # Ball
pygame.draw.rect(DISPLAYSURF, LEFTPLAYERCOLOR, (leftPlayerPaddle.left, leftPlayerPaddle.top, PADDLEWIDTH, PADDLEHEIGHT)) # Left Paddle
pygame.draw.rect(DISPLAYSURF, RIGHTPLAYERCOLOR, (rightPlayerPaddle.left, rightPlayerPaddle.top, PADDLEWIDTH, PADDLEHEIGHT)) # Right Paddle
# drawText(str(leftPlayerMissile), smallFont, TEXTCOLOR, DISPLAYSURF, 20, 20)
pygame.display.update()
def getPlayerInput():
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
terminate()
if event.type == KEYDOWN:
if event.key == ord("w"):
paddleMoves["leftUp"] = True
paddleMoves["leftDown"] = False
if event.key == ord("s"):
paddleMoves["leftDown"] = True
paddleMoves["leftUp"] = False
if event.key == K_UP:
paddleMoves["rightUp"] = True
paddleMoves["rightDown"] = False
if event.key == K_DOWN:
paddleMoves["rightDown"] = True
paddleMoves["rightUp"] = False
if event.type == KEYUP:
if event.key == ord("w"):
paddleMoves["leftUp"] = False
if event.key == ord("s"):
paddleMoves["leftDown"] = False
if event.key == K_UP:
paddleMoves["rightUp"] = False
if event.key == K_DOWN:
paddleMoves["rightDown"] = False
if event.key == ord("m"):
if musicOn[0]:
musicOn[0] = False
pygame.mixer.music.pause()
elif not musicOn[0]:
musicOn[0] = True
pygame.mixer.music.unpause()
def isMusicSwitched(musicOn):
for event in pygame.event.get():
if event.type == KEYUP:
if event.key == ord("m"):
if musicOn:
musicOn = False
pygame.mixer.music.pause()
elif not musicOn:
musicOn = True
pygame.mixer.music.unpause()
return musicOn
def hasPaddleHitWall(paddle):
if paddle.top < 0: paddle.top = 0
if paddle.bottom > WINDOWHEIGHT: paddle.bottom = WINDOWHEIGHT
def hasBallHitPaddle(paddle):
if ball["rect"].colliderect(paddle):
ball["vel"][0] = -(ball["vel"][0])
hitSound.play()
if paddle.left == 0:
ball["rect"].left = PADDLEWIDTH + 1
if paddle.right == WINDOWWIDTH:
ball["rect"].right = WINDOWWIDTH - PADDLEWIDTH - 1
def movePaddles():
if paddleMoves["leftUp"] == True: leftPlayerPaddle.top -= PADDLESPEED
if paddleMoves["leftDown"] == True: leftPlayerPaddle.top += PADDLESPEED
if paddleMoves["rightUp"] == True: rightPlayerPaddle.top -= PADDLESPEED
if paddleMoves["rightDown"] == True: rightPlayerPaddle.top += PADDLESPEED
def hasPlayerScored():
if ball["rect"].left < 0: return "right"
elif ball["rect"].right > WINDOWWIDTH: return "left"
else: return ""
def showResults(leftScore, rightScore):
x0 = WINDOWWIDTH / 2 - 115 # "GAME OVER" is 230 pixels wide
y0 = WINDOWHEIGHT / 2 - 75 # "GAME OVER" is 150 pixels high
pygame.draw.rect(DISPLAYSURF, BACKGROUNDCOLOR, (x0 - 20, y0 - 20, 270, 190))
drawLetter("G", DISPLAYSURF, TEXTCOLOR, x0, y0)
drawLetter("A", DISPLAYSURF, TEXTCOLOR, x0 + 60, y0)
drawLetter("M", DISPLAYSURF, TEXTCOLOR, x0 + 120, y0)
drawLetter("E", DISPLAYSURF, TEXTCOLOR, x0 + 180, y0)
drawLetter("O", DISPLAYSURF, TEXTCOLOR, x0, y0 + 80)
drawLetter("V", DISPLAYSURF, TEXTCOLOR, x0 + 60, y0 + 80)
drawLetter("E", DISPLAYSURF, TEXTCOLOR, x0 + 120, y0 + 80)
drawLetter("R", DISPLAYSURF, TEXTCOLOR, x0 + 180, y0 + 80)
pygame.display.update()
# Game Starts Here
pygame.mixer.pre_init(frequency=22050, size=-16, channels=1, buffer=512)
pygame.mixer.init(frequency=22050, size=-16, channels=1, buffer=512)
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), pygame.FULLSCREEN)
pygame.display.set_caption("Ping-Pong")
pygame.mouse.set_visible(False)
font = pygame.font.SysFont(None, 24)
smallFont = pygame.font.SysFont(None, 18)
hitSound = pygame.mixer.Sound("hit.wav")
hitSound.set_volume(0.4)
missSound = pygame.mixer.Sound("miss.wav")
missSound.set_volume(0.2)
gameOverSound = pygame.mixer.Sound("gameover2.wav")
pygame.mixer.music.load("ping-pong-music.wav")
pygame.mixer.music.set_volume(0.5)
# Loop for entire program
while True:
leftScore = 0
rightScore = 0
leftPlayerPaddle = pygame.Rect(0, ((WINDOWHEIGHT / 2) - (PADDLEHEIGHT / 2)), PADDLEWIDTH, PADDLEHEIGHT)
rightPlayerPaddle = pygame.Rect((WINDOWWIDTH - PADDLEWIDTH), ((WINDOWHEIGHT / 2) - (PADDLEHEIGHT / 2)), PADDLEWIDTH, PADDLEHEIGHT)
paddleMoves = {"leftUp":False, "leftDown":False, "rightUp":False, "rightDown":False}
gameOverSound.play()
showTitleScreen()
musicOn = [True]
pygame.mixer.music.play(-1, 0.0)
# Loop for one game
while True:
if leftScore == VICTORYGOALS or rightScore == VICTORYGOALS:
showGameScreen(leftScore, rightScore)
FPSCLOCK.tick(2)
break
timeCounter = 0
ballSpeed = 4
ball = {"rect":pygame.Rect(((WINDOWWIDTH / 2) - (BALLSIZE / 2)), ((WINDOWHEIGHT / 2) - (BALLSIZE / 2)), BALLSIZE, BALLSIZE), "vel":[]}
ball["vel"] = [random.choice([-1, 1]), random.choice([-1, 1])]
# Loop for one score
while True:
getPlayerInput() # Check for button presses
movePaddles() # Move paddles
hasPaddleHitWall(leftPlayerPaddle) # If paddle hits wall, don't move further
hasPaddleHitWall(rightPlayerPaddle) # If paddle hits wall, don't move further
moveBall() # Move ball
hasBallHitWall() # If ball hits wall, change vertical velocity to opposite
hasBallHitPaddle(leftPlayerPaddle) # If ball hits paddle, change horizontal velocity to opposite
hasBallHitPaddle(rightPlayerPaddle) # If ball hits paddle, change horizontal velocity to opposite
showGameScreen(leftScore, rightScore) # Draw paddles, ball, net and scores
if hasPlayerScored() == "left":
leftScore += 1
missSound.play()
break
if hasPlayerScored() == "right":
rightScore += 1
missSound.play()
break
# Ball gradually speeds up
if ballSpeed < 10:
timeCounter += 1
if timeCounter == FPS * 2 and ballSpeed == 4:
timeCounter = 0
ballSpeed = 5
if timeCounter == FPS * 4 and ballSpeed == 5:
timeCounter = 0
ballSpeed = 6
if timeCounter == FPS * 6 and ballSpeed == 6:
timeCounter = 0
ballSpeed = 7
if timeCounter == FPS * 8 and ballSpeed == 7:
timeCounter = 0
ballSpeed = 8
if timeCounter == FPS * 10 and ballSpeed == 8:
timeCounter = 0
ballSpeed = 9
if timeCounter == FPS * 12 and ballSpeed == 9:
timeCounter = 0
ballSpeed = 10
FPSCLOCK.tick(FPS)
showResults(leftScore, rightScore)
pygame.mixer.music.stop()
gameOverSound.play()
waitForKeyPress()
|
[
"agnostyx@mail.ru"
] |
agnostyx@mail.ru
|
20f7641bd7b51f3a84fc5532be73c1e339797206
|
f39528e9bad8cfa78b38fcbb7a5b430ac0c7a942
|
/Heavy_Neutrino/test2/HeavyNeutrino_trilepton_M-1_V-0.212367605816_e_massiveAndCKM_LO.py
|
997415941cd6fce0c48dc4a8fc8f8f7aeb7fd610
|
[] |
no_license
|
isildakbora/EXO-MC-REQUESTS
|
c0e3eb3a49b516476d37aa464c47304df14bed1e
|
8771e32bbec079de787f7e5f11407e9e7ebe35d8
|
refs/heads/master
| 2021-04-12T11:11:03.982564
| 2019-04-29T15:12:34
| 2019-04-29T15:12:34
| 126,622,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
import FWCore.ParameterSet.Config as cms
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.4.2/exo_heavyNeutrino/displaced_2017/v1/HeavyNeutrino_trilepton_M-1_V-0.212367605816_e_massiveAndCKM_LO_slc6_amd64_gcc481_CMSSW_7_1_30_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
)
)
)
|
[
"bora.isildak@cern.ch"
] |
bora.isildak@cern.ch
|
5327b337520db1598892321e98f500d4cb28f8a7
|
46880f89491164035a3c236e714e0d14ab76dd10
|
/sendingemail.py
|
e71739ec5bffa15286729680a28330a01eca5da2
|
[] |
no_license
|
Namansaraswat7/Raspberry-pi-projects
|
6a16f2586b937fed3f17cb3e46d4d996132ca9a0
|
881510a72c5ae40757385be70418ed247503e33f
|
refs/heads/master
| 2020-03-20T04:26:48.778931
| 2018-06-13T11:05:42
| 2018-06-13T11:05:42
| 137,183,045
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
import smtplib
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login("yr email id", "yr password")
msg = "hello user ,this mail send to you using python!"
server.sendmail("sender email id", "receiver email id",msg)
server.quit()
|
[
"noreply@github.com"
] |
Namansaraswat7.noreply@github.com
|
28aec89ef145bd2b44e42597ab1610f90746b67a
|
90cef34f7f3159fe9a547ceaac6f13a2f9442df7
|
/dockPlat/Platform/admin.py
|
dbc427f1b5d4ec4a570c382870a95ee4fb3c34df
|
[] |
no_license
|
gunkiratk/SA-Docker
|
bf76540b5dc9b9ea6bea45b49754dab5eb450706
|
650c2100dc4c5d8e015b803cece8b31c62f4a6d9
|
refs/heads/master
| 2021-06-12T10:47:33.462938
| 2016-11-28T18:58:05
| 2016-11-28T18:58:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
from django.contrib import admin
from .models import Container
admin.site.register(Container)
|
[
"kushagra15049@iiitd.ac.in"
] |
kushagra15049@iiitd.ac.in
|
c894c3fd43449cd06c5cd1e09b145d312fcf2112
|
2b030cb44b0537b1b34de6951656d612edb4a22c
|
/site_packages/BNL/c4.py
|
f1053e0f2cfac5a437cbed907d359b2c25ca3b24
|
[
"BSD-3-Clause"
] |
permissive
|
alhajri/FUDGE
|
2c389912addc28ddde51cf7ba455164e47574c89
|
9566131c37b45fc37f5f8ad07903264864575b6e
|
refs/heads/master
| 2021-08-31T20:39:25.834146
| 2017-12-22T19:50:56
| 2017-12-22T19:50:56
| 115,145,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,906
|
py
|
from collections import namedtuple
import unittest, copy
# ------------------------------------------------
# Simple C4 containers
# ------------------------------------------------
C4Point = namedtuple( 'C4Point', 'projectile target targetMetastableState MF MT productMetastableState status cmFlag energy dEnergy data dData cosMuOrLegendreOrder dCosMuOrLegendreOrder eLevelOrHalflife dELevelOrHalflife idOf78 reference exforEntry exforSubEntry multiDimFlag' )
C5Covariance = namedtuple( 'C5Covariance', 'covariance comment algorithm covarData' ) # unused
C4DataSet = namedtuple( 'C4DataSet', 'dataSet date reaction projectile target MF MT c4Begin numData data' )
C5DataSet = namedtuple( 'C4DataSet', 'dataSet date reaction projectile target MF MT product c4Begin numData data' ) # unused
C4Entry = namedtuple( 'C4Entry', 'entry author1 year institute title authors refCode reference numDataSets dataSets' )
# ------------------------------------------------
# General purpose parsers
# ------------------------------------------------
def emptyStringToNone( s ):
if s.strip() == '': return None
return s
def NoneToEmptyString( n ):
if n == None: return " "
return n
def readFunkyFloat( value ):
value = emptyStringToNone( value )
if value == None: return
try :
f = float( value )
except :
value = value.replace( ' ', '' )
i = value.find( '.' )
if( i != -1 ) :
j = value[i:].find( '+' )
if( j == -1 ) : j = value[i:].find( '-' )
if( j == -1 ) : raise ValueError( "Float value %s is not funky enough <%s>" % ( value ) )
value = value[:i+j] + 'e' + value[i+j:]
try :
f = float( value )
except :
if( value == len( value ) * ' ' ) : return( 0 )
raise ValueError( 'Could not convert value "%s"' % ( value ) )
return( f )
def writeFunkyFloat( f, width=11, significant_digits=None ):
'''
Write one of Red's Funky Floats...
Anatomy of a funky float:
Consider the number 3124.5611 written to a field with width=10
' 3.12456+3'
'_X.XXXXX+E'
where '_' is either a space (' ') or a minus sign ('-'),
'X.XXXXX' is the number itself, in exponential notation, with exponent 'E'.
The floating point 'E' is dropped, saving a character.
'''
if f == None: return width*' '
# compute the number of sig figs the user "really" wants
if significant_digits == None or significant_digits > width-1: significant_digits=width-1
# compute the possible output formats
g_frmt='%'+str(width)+'.'+str(significant_digits)+'g'
e_frmt='%'+str(width)+'.'+str(significant_digits-4)+'e'
# print g_frmt, e_frmt
# try with the 'g' format, we may get lucky
s = (g_frmt) % round(f,significant_digits)
# OK, number too big, so must use explicit exponential notation, but with E-less representation
if 'e' in s or len(s) > width:
s = (e_frmt) % round(f,significant_digits)
s = s.replace( 'e', '' )
# Check to make sure the number isn't too long
if len(s) > width: print 'too big, len(%s)=%i<%i'%(s,len(s),width)
return s.strip().ljust(width)
# ------------------------------------------------
# C4 Parsers
# ------------------------------------------------
def readC4File( fList, asPointList = True ):
if asPointList: return map( readC4Point, filter( lambda x: not ( x.startswith( '#' ) or x.strip()=='' ), fList ) )
newList = []
for line in fList:
if line.strip() in [ '', '#' ]: pass
elif line.startswith( '#ENTRY' ): newList.append( [ line ] )
else: newList[-1].append( line )
return map( readC4Entry, newList )
def readC4Entry( fList ):
'''
An example header taken from a c4 file:
#ENTRY 40617
#AUTHOR1 M.V.Pasechnik+
#YEAR 1980
#INSTITUTE (4CCPIJI)
#TITLE TOTAL NEUTRON CROSS-SECTIONS FOR MOLYBDENUM
#+ AND ZYRCONIUM AT LOW ENERGIES
#AUTHOR(S) M.V.Pasechnik, M.B.Fedorov, V.D.Ovdienko,
#+ G.A.Smetanin, T.I.Jakovenko
#REF-CODE (C,80KIEV,1,304,8009)
#REFERENCE Conf. 5.All Union Conf.on Neutron Phys.,Kiev,15-19 Sep 1980
#+ Vol.1, p.304, 1980
#DATASETS 7
#
#DATASET 40617007
#DATE 19850305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 4
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
#/DATA 4
#/DATASET
#/ENTRY
'''
entry = ''
author1 = 'AUTHOR1'
year = 0
institute = ''
title = ''
authors = ''
refCode = ''
reference = ''
numDataSets = 0
dataSets = []
flist = copy.copy( fList )
line = flist.pop(0)
while flist:
if line.startswith("#ENTRY"): entry = line[12:].strip()
elif line.startswith("#AUTHOR1"): author1 = line[12:].strip()
elif line.startswith("#YEAR"): year = int( line[12:].strip() )
elif line.startswith("#INSTITUTE"): institute = line[12:].strip()
elif line.startswith("#TITLE"):
title = line[12:].strip()
while flist[0].startswith( '#+' ):
line = flist.pop( 0 )
title += ' ' + line[12:].strip()
elif line.startswith("#AUTHOR(S)"):
authors = line[12:].strip()
while flist[0].startswith( '#+' ):
line = flist.pop( 0 )
authors += ' ' + line[12:].strip()
elif line.startswith("#REF-CODE"): refCode = line[12:].strip()
elif line.startswith("#REFERENCE"):
reference = line[12:].strip()
while flist[0].startswith( '#+' ):
line = flist.pop( 0 )
reference += ' ' + line[12:].strip()
elif line.startswith("#DATASETS"): numDataSets = int( line[12:].strip() )
elif line.startswith("#DATASET"):
sublist = [ line ]
while not flist[0].startswith( '#/DATASET' ):
sublist.append( flist.pop( 0 ) )
sublist.append( flist.pop(0) )
dataSets.append( readC4DataSet( sublist ) )
elif line.strip() == "#": pass
elif line.startswith( "/ENTRY" ): pass
else: pass
line = flist.pop(0)
return C4Entry(
entry = entry,
author1 = author1,
year = year,
institute = institute,
title = title,
authors = authors,
refCode = refCode,
reference = reference,
numDataSets = numDataSets,
dataSets = dataSets)
def readC4DataSet( fList ):
'''
An example dataset header taken from a c4 file:
#DATASET 40617007
#DATE 19850305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 4
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
#/DATA 4
#/DATASET
'''
dataSet = ''
date = ''
reaction = ''
projectile = 0
target = 0
MF = 0
MT = 0
c4Begin = ''
numData = 0
data = []
flist = copy.copy( fList )
line = flist.pop(0)
while flist:
if line.startswith("#DATASET"): dataSet = line[12:].strip()
elif line.startswith("#DATE"): date = line[12:].strip()
elif line.startswith("#REACTION"): reaction = line[12:].strip()
elif line.startswith("#PROJ"): projectile = int( line[12:].strip() )
elif line.startswith("#TARG"): target = int( line[12:].strip() )
elif line.startswith("#MF"): MF = int( line[12:].strip() )
elif line.startswith("#MT"): MT = int( line[12:].strip() )
elif line.startswith("#C4BEGIN"): c4Begin = line[12:].strip()
elif line.startswith("#DATA"):
numData = int( line[12:].strip() )
line = flist.pop(0)
line = flist.pop(0)
line = flist.pop(0)
while not line.startswith( "#/DATA" ):
data.append( readC4Point( line ) )
line = flist.pop(0)
elif line.strip() == "#": pass
elif line.startswith( "#/DATASET" ): pass
else: pass
line = flist.pop(0)
return C4DataSet( dataSet=dataSet, date=date, reaction=reaction, projectile=projectile, target=target, MF=MF, MT=MT, c4Begin=c4Begin, numData=numData, data=data )
def writeC4Point( pt ):
return \
str(pt.projectile).rjust(5) + \
str(pt.target).rjust(6) + \
str(NoneToEmptyString(pt.targetMetastableState))[0] + \
str(pt.MF).rjust(3) + \
str(pt.MT).rjust(4) + \
str(NoneToEmptyString(pt.productMetastableState))[0] + \
str(NoneToEmptyString(pt.status))[0] + \
str(NoneToEmptyString(pt.cmFlag))[0] + \
writeFunkyFloat(pt.energy,9) + \
writeFunkyFloat(pt.dEnergy,9) + \
writeFunkyFloat(pt.data,9) + \
writeFunkyFloat(pt.dData,9) + \
writeFunkyFloat(pt.cosMuOrLegendreOrder,9) + \
writeFunkyFloat(pt.dCosMuOrLegendreOrder,9) + \
writeFunkyFloat(pt.eLevelOrHalflife,9) + \
writeFunkyFloat(pt.dELevelOrHalflife,9) + \
str(NoneToEmptyString(pt.idOf78)).rjust(3) + \
str(pt.reference).ljust(25) + \
str(pt.exforEntry).rjust(4) + \
str(pt.exforSubEntry).rjust(3) + \
str(NoneToEmptyString(pt.multiDimFlag))
def readC4Point( fline ):
'''
Here is an example taken from a c4 file of a list of points:
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 504400.0 4961.493 8.146000 0.404200 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 506500.0 4992.510 8.027000 0.557100 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 508600.0 5023.591 7.656000 0.276500 L.GREEN,ET.AL. (73) 10225 20
.
.
.
Note, the PXC field is really 3 one character fields. From the x4toc4 manual:
Columns Description
------- -----------
1- 5 Projectile ZA (e.g. neutron =1, proton =1001)
(defined by reaction dictionary).
6- 11 Target ZA (e.g. 26-Fe-56 = 26056)
(defined by EXFOR reaction).
12 Target metastable state (e.g. 26-FE-56m = M)
(defined by EXFOR reaction).
13- 15 MF (ENDF conventions, plus additions)
(defined by reaction dictionary).
16- 19 MT (ENDF conventions, plus additions)
(defined by reaction dictionary).
20 Product metastable state (e.g. 26-FE-56M = M)
(defined by EXFOR reaction).
21 EXFOR status
(defined by EXFOR keyword status).
22 Center-of-mass flag (C=center-of-mass, blank=lab)
(defined by EXFOR title dictionary).
23- 94 8 data fields (each in E9.3 format defined below)
(defined by MF and title dictionary).
95- 97 Identification of data fields 7 and 8
(e.g., LVL=level, HL=half-life, etc.).
For a complete list of codes see title dictionary
(defined by MF and title dictionary).
98-122 Reference (first author and year)
(defined by EXFOR keywords title and reference).
123-127 EXFOR accession number
(defined by EXFOR format).
128-130 EXFOR sub-accession number
(defined by EXFOR format).
131 Multi-dimension table flag
(defined by EXFOR keyword reaction or common fields).
'''
try: multiDimFlag = emptyStringToNone( fline[130] )
except IndexError: multiDimFlag = None
return C4Point(
projectile = int(fline[0:5]),
target = int(fline[5:11]),
targetMetastableState = emptyStringToNone( fline[11] ),
MF = int(fline[11:15]),
MT = int(fline[15:19]),
productMetastableState = emptyStringToNone( fline[19] ),
status = emptyStringToNone( fline[20] ),
cmFlag = emptyStringToNone( fline[21] ),
energy = readFunkyFloat( fline[22:31] ),
dEnergy = readFunkyFloat( fline[31:40] ),
data = readFunkyFloat( fline[40:49] ),
dData = readFunkyFloat( fline[49:58] ),
cosMuOrLegendreOrder = readFunkyFloat( fline[58:67] ),
dCosMuOrLegendreOrder = readFunkyFloat( fline[67:76] ),
eLevelOrHalflife = readFunkyFloat( fline[76:85] ),
dELevelOrHalflife = readFunkyFloat( fline[85:94] ),
idOf78 = emptyStringToNone( fline[94:97] ),
reference = fline[97:122].strip(),
exforEntry = fline[122:127],
exforSubEntry = int(fline[127:130]),
multiDimFlag = multiDimFlag )
# ------------------------------------------------
# Unit tests
# ------------------------------------------------
class TestFunkyFloats( unittest.TestCase ):
def setUp(self):
self.nums={ 0.314159e14:"3.141590+13", -3.14159e13:"-3.141590+13", 51450.00:'51450 ', -51450.00:'-51450 ', 0.00:'0 ', 504400.:"504400 ", 4961.493:"4961.493 ", 8.146:"8.146 ", 0.4042:"0.4042 " }
def test_write_read_consistency( self ):
for x in self.nums:
a = writeFunkyFloat( x )
b = readFunkyFloat( a )
self.assertEqual( x, b )
def test_write( self ):
for k,v in self.nums.items():
self.assertEqual( v, writeFunkyFloat( k ) )
class TestReadC4Point( unittest.TestCase ):
def setUp( self ):
self.a = C4Point( projectile=1, target=40092, targetMetastableState=None, MF=3, MT=1, productMetastableState=None, status='A', cmFlag=None, energy=504400.0, dEnergy=4961.493, data=8.146000, dData=0.404200, cosMuOrLegendreOrder=None, dCosMuOrLegendreOrder=None, eLevelOrHalflife=None, dELevelOrHalflife=None, idOf78=None, reference='L.GREEN,ET.AL. (73)', exforEntry='10225', exforSubEntry=20, multiDimFlag=None )
def test_a( self ):
txt = ' 1 40092 3 1 A 504400 4961.493 8.146 0.4042 L.GREEN,ET.AL. (73) 10225 20 '
b = readC4Point( txt )
self.assertEqual( self.a, b )
c = writeC4Point( b )
self.assertEqual( txt, c )
self.assertEqual( self.a, readC4Point(c) )
class TestReadC4Entry( unittest.TestCase ):
def setUp( self ):
self.a = C4Entry(
entry='40617', author1 = 'M.V.Pasechnik+', year = 1980, institute = '(4CCPIJI)',
title = "TOTAL NEUTRON CROSS-SECTIONS FOR MOLYBDENUM AND ZYRCONIUM AT LOW ENERGIES",
authors = "M.V.Pasechnik, M.B.Fedorov, V.D.Ovdienko, G.A.Smetanin, T.I.Jakovenko",
refCode = "(C,80KIEV,1,304,8009)",
reference = "Conf. 5.All Union Conf.on Neutron Phys.,Kiev,15-19 Sep 1980 Vol.1, p.304, 1980",
numDataSets = 7,
dataSets =[
readC4DataSet( '''#DATASET 40617007
#DATE 19850305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 4
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
#/DATA 4
#/DATASET'''.split('\n') ) ] )
def test_a( self ):
b = readC4Entry( '''#ENTRY 40617
#AUTHOR1 M.V.Pasechnik+
#YEAR 1980
#INSTITUTE (4CCPIJI)
#TITLE TOTAL NEUTRON CROSS-SECTIONS FOR MOLYBDENUM
#+ AND ZYRCONIUM AT LOW ENERGIES
#AUTHOR(S) M.V.Pasechnik, M.B.Fedorov, V.D.Ovdienko,
#+ G.A.Smetanin, T.I.Jakovenko
#REF-CODE (C,80KIEV,1,304,8009)
#REFERENCE Conf. 5.All Union Conf.on Neutron Phys.,Kiev,15-19 Sep 1980
#+ Vol.1, p.304, 1980
#DATASETS 7
#
#DATASET 40617007
#DATE 19850305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 4
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
#/DATA 4
#/DATASET
#/ENTRY
'''.split('\n') )
self.assertEqual( self.a, b )
class TestReadC4DataSet( unittest.TestCase ):
def setUp( self ):
self.a = C4DataSet(
dataSet = '40617007', date = '19850305',
reaction = '40-ZR-92(N,TOT),,SIG', projectile = 1, target = 40092, MF = 3, MT = 1,
c4Begin = "[ 1 40092 3 1 A ]", numData = 4,
data = [
C4Point( projectile=1, target=40092, targetMetastableState=None, MF=3, MT=1, productMetastableState=None, status='A', cmFlag=None, energy=442000.0, dEnergy=None, data=12.74000, dData=1.700000, cosMuOrLegendreOrder=None, dCosMuOrLegendreOrder=None, eLevelOrHalflife=None, dELevelOrHalflife=None, idOf78=None, reference='M.V.PASECHNIK,ET.AL. (80)', exforEntry='40617', exforSubEntry=7, multiDimFlag=None ),
C4Point( projectile=1, target=40092, targetMetastableState=None, MF=3, MT=1, productMetastableState=None, status='A', cmFlag=None, energy=507000.0, dEnergy=None, data=8.790000, dData=0.570000, cosMuOrLegendreOrder=None, dCosMuOrLegendreOrder=None, eLevelOrHalflife=None, dELevelOrHalflife=None, idOf78=None, reference='M.V.PASECHNIK,ET.AL. (80)', exforEntry='40617', exforSubEntry=7, multiDimFlag=None ),
C4Point( projectile=1, target=40092, targetMetastableState=None, MF=3, MT=1, productMetastableState=None, status='A', cmFlag=None, energy=572000.0, dEnergy=None, data=9.520000, dData=0.200000, cosMuOrLegendreOrder=None, dCosMuOrLegendreOrder=None, eLevelOrHalflife=None, dELevelOrHalflife=None, idOf78=None, reference='M.V.PASECHNIK,ET.AL. (80)', exforEntry='40617', exforSubEntry=7, multiDimFlag=None ),
C4Point( projectile=1, target=40092, targetMetastableState=None, MF=3, MT=1, productMetastableState=None, status='A', cmFlag=None, energy=637000.0, dEnergy=None, data=8.480000, dData=0.110000, cosMuOrLegendreOrder=None, dCosMuOrLegendreOrder=None, eLevelOrHalflife=None, dELevelOrHalflife=None, idOf78=None, reference='M.V.PASECHNIK,ET.AL. (80)', exforEntry='40617', exforSubEntry=7, multiDimFlag=None ) ] )
def test_a( self ):
b = readC4DataSet( '''#DATASET 40617007
#DATE 19850305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 4
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
#/DATA 4
'''.split('\n') )
class TestReadC4File( unittest.TestCase ):
def setUp( self ):
self.testData = '''
#
#ENTRY 40617
#AUTHOR1 M.V.Pasechnik+
#YEAR 1980
#INSTITUTE (4CCPIJI)
#TITLE TOTAL NEUTRON CROSS-SECTIONS FOR MOLYBDENUM
#+ AND ZYRCONIUM AT LOW ENERGIES
#AUTHOR(S) M.V.Pasechnik, M.B.Fedorov, V.D.Ovdienko,
#+ G.A.Smetanin, T.I.Jakovenko
#REF-CODE (C,80KIEV,1,304,8009)
#REFERENCE Conf. 5.All Union Conf.on Neutron Phys.,Kiev,15-19 Sep 1980
#+ Vol.1, p.304, 1980
#DATASETS 7
#
#DATASET 40617007
#DATE 19850305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 4
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
#/DATA 4
#/DATASET
#/ENTRY
#
#
#ENTRY 10225
#AUTHOR1 L.Green+
#YEAR 1973
#INSTITUTE (1USABET)
#TITLE Total cross section measurements with a 252Cf time-of-
#+ flight spectrometer.
#AUTHOR(S) L.Green, J.A.Mitchell
#REF-CODE (R,WAPD-TM-1073,197304)
#REFERENCE Rept. Westinghouse Atomic Power Div.(Bettis) Reports
#+ No.1073, 1973
#DATASETS 27
#
#DATASET 10225020
#DATE 20010305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 3
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 504400.0 4961.493 8.146000 0.404200 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 506500.0 4992.510 8.027000 0.557100 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 508600.0 5023.591 7.656000 0.276500 L.GREEN,ET.AL. (73) 10225 20
#/DATA 3
#/DATASET
#/ENTRY
#
'''
self.a = [
readC4Entry('''
#ENTRY 40617
#AUTHOR1 M.V.Pasechnik+
#YEAR 1980
#INSTITUTE (4CCPIJI)
#TITLE TOTAL NEUTRON CROSS-SECTIONS FOR MOLYBDENUM
#+ AND ZYRCONIUM AT LOW ENERGIES
#AUTHOR(S) M.V.Pasechnik, M.B.Fedorov, V.D.Ovdienko,
#+ G.A.Smetanin, T.I.Jakovenko
#REF-CODE (C,80KIEV,1,304,8009)
#REFERENCE Conf. 5.All Union Conf.on Neutron Phys.,Kiev,15-19 Sep 1980
#+ Vol.1, p.304, 1980
#DATASETS 7
#
#DATASET 40617007
#DATE 19850305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 4
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
#/DATA 4
#/DATASET
#/ENTRY
'''.split('\n') ),
readC4Entry('''
#ENTRY 10225
#AUTHOR1 L.Green+
#YEAR 1973
#INSTITUTE (1USABET)
#TITLE Total cross section measurements with a 252Cf time-of-
#+ flight spectrometer.
#AUTHOR(S) L.Green, J.A.Mitchell
#REF-CODE (R,WAPD-TM-1073,197304)
#REFERENCE Rept. Westinghouse Atomic Power Div.(Bettis) Reports
#+ No.1073, 1973
#DATASETS 27
#
#DATASET 10225020
#DATE 20010305
#REACTION 40-ZR-92(N,TOT),,SIG
#PROJ 1
#TARG 40092
#MF 3
#MT 1
#C4BEGIN [ 1 40092 3 1 A ]
#DATA 3
# Prj Targ M MF MT PXC Energy dEnergy Data dData Cos/LO dCos/LO ELV/HL dELV/HL I78
#---><---->o<-><-->ooo<-------><-------><-------><-------><-------><-------><-------><-------><->
1 40092 3 1 A 504400.0 4961.493 8.146000 0.404200 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 506500.0 4992.510 8.027000 0.557100 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 508600.0 5023.591 7.656000 0.276500 L.GREEN,ET.AL. (73) 10225 20
#/DATA 4
#/DATASET
#/ENTRY
'''.split('\n') ) ]
self.b = map( readC4Point, ''' 1 40092 3 1 A 442000.0 12.74000 1.700000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 507000.0 8.790000 0.570000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 572000.0 9.520000 0.200000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 637000.0 8.480000 0.110000 M.V.PASECHNIK,ET.AL. (80)40617 7
1 40092 3 1 A 504400.0 4961.493 8.146000 0.404200 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 506500.0 4992.510 8.027000 0.557100 L.GREEN,ET.AL. (73) 10225 20
1 40092 3 1 A 508600.0 5023.591 7.656000 0.276500 L.GREEN,ET.AL. (73) 10225 20'''.split('\n') )
def test_a( self ):
self.assertEqual( readC4File( self.testData.split( '\n' ), asPointList = False ), self.a )
def test_b( self ):
self.assertEqual( readC4File( self.testData.split( '\n' ), asPointList = True ), self.b )
# ------------------------------------------------
# Main !!
# ------------------------------------------------
if __name__ == "__main__":
try:
import xmlrunner
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-results'))
except ImportError:
unittest.main()
print
|
[
"alhajri@eofe7.cm.cluster"
] |
alhajri@eofe7.cm.cluster
|
8fa5b2def82417e05b7c1c0b0f5562e8fedaa21c
|
ba0f0c869b5d60242b07796659f355e869eb36c1
|
/makename.py
|
968ec129e183897a32b7a5e46e94c1038ba95d90
|
[] |
no_license
|
btubbs/benadryl-cuttlefish
|
2dfef3a6f207710896d3d01ca7166b7fac5e3da6
|
e5abaa38fa50e7a595d7a3bd99149bfafec93124
|
refs/heads/master
| 2020-06-23T09:42:51.902536
| 2016-11-24T16:42:08
| 2016-11-24T16:42:08
| 74,656,623
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
import random
import sys
seed = sys.argv[1] if len(sys.argv) > 1 else ''
with open('BENEDICT.txt') as f:
if seed:
lines = f.readlines()
benedict = lines[hash(seed) % len(lines)].strip()
else:
benedict = random.choice(f.readlines()).strip()
with open('CUMBERBATCH.txt') as f:
if seed:
lines = f.readlines()
cumberbatch = lines[hash(seed) % len(lines)].strip()
else:
cumberbatch = random.choice(f.readlines()).strip()
print benedict, cumberbatch
|
[
"brent.tubbs@gmail.com"
] |
brent.tubbs@gmail.com
|
9f613532098036136a4e75d1586b8c91b7da8229
|
c40b6d7ad46246a06d2491afdb469fbb2426dcfe
|
/read_frames_slow.py
|
a0e2a3e45c496e89367cb24bae1010f0ebc42ed7
|
[] |
no_license
|
JasperEbus/ODK_Test_omgeving
|
3885146577858b806fe7e195352cfb55151f1eb4
|
2ec35ed7a3243b27e0743b952d5bf164d00ec55c
|
refs/heads/master
| 2023-05-31T17:05:23.508656
| 2021-06-10T10:09:19
| 2021-06-10T10:09:19
| 347,057,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
###############################################################
'''test scipts for reading frames of a video on a slow rate '''
###############################################################
# USAGE
# python read_frames_slow.py --video videos/jurassic_park_intro.mp4
# import the necessary packages
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="path to input video file")
args = vars(ap.parse_args())
# open a pointer to the video stream and start the FPS timer
stream = cv2.VideoCapture(args["video"])
fps = FPS().start()
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video file stream
(grabbed, frame) = stream.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# resize the frame and convert it to grayscale (while still
# retaining 3 channels)
frame = imutils.resize(frame, width=450)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.dstack([frame, frame, frame])
# display a piece of text to the frame (so we can benchmark
# fairly against the fast method)
cv2.putText(frame, "Slow Method", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# show the frame and update the FPS counter
cv2.imshow("Frame", frame)
cv2.waitKey(1)
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
stream.release()
cv2.destroyAllWindows()
|
[
"j.j.h.ebus@gmail.com"
] |
j.j.h.ebus@gmail.com
|
ab20f84ddbdde43c14c5377af45410a454eee913
|
288e92cf5e69bac400e4d779d7a2a6c21c5b3453
|
/richard_blog/apps.py
|
da03d9ce02c5fcd6caa36e39ffbaebbc890d4587
|
[] |
no_license
|
richawadaskar/Django_girls_tutorial
|
72516ba6e553f4f95c57dc380566c635f6a64eee
|
d3ae3cb17739546c7e69e7eccdb9e0999f9fe671
|
refs/heads/master
| 2021-01-21T08:08:18.379400
| 2017-08-31T03:42:43
| 2017-08-31T03:42:43
| 101,953,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from django.apps import AppConfig
class RichardBlogConfig(AppConfig):
name = 'richard_blog'
|
[
"rwadaskar@169-231-167-118.wireless.ucsb.edu"
] |
rwadaskar@169-231-167-118.wireless.ucsb.edu
|
a80455e02b47895202544e7370c65e58233944db
|
b29ab41dbe3762b45a12ac476889928c2de57add
|
/backend/testing_app_19538/settings.py
|
013f8fa61b96c558a3c80ea4d34846e2d3b22f7c
|
[] |
no_license
|
crowdbotics-apps/testing-app-19538
|
9c57756e41dbc313055ceb3b92ee2af84081055e
|
91c476cadbd5683f3fa04555fd8432f7e6ab2c13
|
refs/heads/master
| 2022-12-04T01:27:32.450055
| 2020-08-13T15:22:04
| 2020-08-13T15:22:04
| 287,311,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,907
|
py
|
"""
Django settings for testing_app_19538 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testing_app_19538.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testing_app_19538.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2b740de3bd87db1cfd3c33d62bbe98e82f1dc858
|
9d0945e72217094b054349282f9e0a020a2213eb
|
/src/study/powerset.py
|
6fe0c936d8dc74a74674df1e484c4597a54c5aad
|
[] |
no_license
|
dineshbalachandran/mypython
|
c66e68b25f450907f78518ee9ebf2cafbeece7c4
|
b8304b2b8f7fc6644706656643233dbfed9003f5
|
refs/heads/master
| 2022-08-10T22:56:31.422465
| 2022-07-30T11:40:03
| 2022-07-30T11:40:03
| 100,846,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
'''
Created on 22 Apr. 2018
@author: DINESHKB
'''
def getBinaryRep(n, numdigits):
result = ''
while n > 0:
result = str(n%2) + result
n = n//2
if len(result) > numdigits:
raise ValueError('not enough digits')
for _ in range(numdigits - len(result)):
result = '0' + result
return result
def genpowerset(L):
powerset = []
for i in range(0, 2**len(L)):
binstr = getBinaryRep(i, len(L))
subset = []
for j in range(len(L)):
if binstr[j] == '1':
subset.append(L[j])
powerset.append(subset)
return powerset
if __name__ == '__main__':
print(genpowerset([1,2,3]))
|
[
"'dinesh_k_b@hotmail.com'"
] |
'dinesh_k_b@hotmail.com'
|
5bb89e6cf29ba3714ec5f135922c7450189edc0b
|
aea5a2b175f89ccf7b09caff723ed38d12ab1bb0
|
/apps/courses/migrations/0004_course_category.py
|
12ce432a34dd642fc0c1adf99ebb6d84fe9a31e2
|
[] |
no_license
|
ridcyq1739/mxonline
|
d02de07bfb86d3395dd544e19d9d3f92df3c477a
|
7b45d0a3eefbdcd27897cae741cef89334911857
|
refs/heads/master
| 2020-03-24T04:04:26.260838
| 2018-07-27T17:14:53
| 2018-07-27T17:14:53
| 142,443,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-07-08 20:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0003_course_course_org'),
]
operations = [
migrations.AddField(
model_name='course',
name='category',
field=models.CharField(default='\u540e\u7aef\u5f00\u53d1', max_length=20, verbose_name='\u8bfe\u7a0b\u7c7b\u522b'),
),
]
|
[
"ridcyq@gmail.com"
] |
ridcyq@gmail.com
|
2767712dda6453a0acfda995d59c03411567e10d
|
85c72fb410f302142b96ff741ce6d5dd8a6ed718
|
/unify_data.py
|
b1fb0b10117cd4c345f3c633fd7b13d05413e492
|
[] |
no_license
|
gangeshwark/CommonSense_QA
|
f73b1dea1ec7402c22382a5074c7f3ef2579e47d
|
17089a78bcc4ffa3f3b6a59a35eabe369dc64502
|
refs/heads/master
| 2021-03-19T15:25:05.830346
| 2017-11-20T23:55:03
| 2017-11-20T23:55:19
| 110,533,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,943
|
py
|
from pprint import pprint
import pandas as pd
import os
import xml.etree.ElementTree
import io, json
base_data_path = 'KB/'
all_scripts = {}
def get_omcs_data():
for subdir, dirs, files in os.walk(base_data_path + 'OMCS/'):
for file in sorted(files):
scripts = []
file_path = subdir + os.path.sep + file
e = xml.etree.ElementTree.parse(file_path).getroot()
for atype in e.findall('script'):
script = {'id': atype.get('id')}
items = []
for i in atype.findall('item'):
item_text = i.get('text')
if not '.' in item_text[-2:]:
items.append(item_text + ' .')
else:
items.append(item_text)
script['items'] = items
script['text'] = ' '.join(items)
scripts.append(script)
all_scripts[file] = scripts
def get_descript_data():
for subdir, dirs, files in os.walk(base_data_path + 'DS/'):
for file in sorted(files):
scripts = []
file_path = subdir + os.path.sep + file
print(file_path)
e = xml.etree.ElementTree.parse(file_path).getroot()
for atype in e.findall('script'):
script = {'id': atype.get('id')}
items = []
for i in atype.findall('item'):
item_text = i.get('original')
if not '.' in item_text[-2:]:
items.append(item_text + '.')
else:
items.append(item_text)
script['items'] = items
script['text'] = ' '.join(items)
scripts.append(script)
all_scripts[file] = scripts
def get_rkp_data():
for subdir, dirs, files in os.walk(base_data_path + 'RKP/'):
for file in sorted(files):
scripts = []
file_path = subdir + os.path.sep + file
print(file_path)
e = xml.etree.ElementTree.parse(file_path).getroot()
for atype in e.findall('script'):
script = {'id': atype.get('id')}
items = []
for i in atype.findall('item'):
item_text = i.get('text')
if not '.' in item_text[-2:]:
items.append(item_text + '.')
else:
items.append(item_text)
script['items'] = items
script['text'] = ' '.join(items)
scripts.append(script)
all_scripts[file] = scripts
if __name__ == '__main__':
get_omcs_data()
get_descript_data()
get_rkp_data()
with io.open('all_scripts.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(all_scripts, ensure_ascii=False, indent=4))
pprint(all_scripts)
|
[
"gangeshwark@gmail.com"
] |
gangeshwark@gmail.com
|
fd58354bcbd87a70f351f1e926c41ae90a926fbc
|
99291222b1b54a17a10c1662f428648349bb15b3
|
/python_modules/libraries/dagster-pandas/dagster_pandas_tests/test_constraints.py
|
1d8fe541bab87d529d1e473fec358ecb0d4130eb
|
[
"Apache-2.0"
] |
permissive
|
rberrelleza/dagster
|
ee3c9c80f092b6d817e8bd546eb0d3c5852c0de3
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
refs/heads/master
| 2022-12-03T11:12:32.585772
| 2020-08-07T23:25:30
| 2020-08-07T23:25:30
| 286,546,665
| 1
| 1
|
Apache-2.0
| 2020-08-10T18:09:13
| 2020-08-10T18:09:12
| null |
UTF-8
|
Python
| false
| false
| 7,861
|
py
|
import pytest
from dagster_pandas.constraints import (
CategoricalColumnConstraint,
ColumnDTypeInSetConstraint,
ConstraintViolationException,
InRangeColumnConstraint,
MaxValueColumnConstraint,
MinValueColumnConstraint,
NonNullableColumnConstraint,
RowCountConstraint,
StrictColumnsConstraint,
UniqueColumnConstraint,
)
from numpy import NaN
from pandas import DataFrame
NAN_VALUES = [
NaN,
None,
]
def test_column_unique_constraint():
test_dataframe = DataFrame({'foo': ['foo', 'bar', 'baz']})
assert UniqueColumnConstraint(ignore_missing_vals=False).validate(test_dataframe, 'foo') is None
bad_test_dataframe = DataFrame({'foo': ['foo', 'foo', 'baz']})
with pytest.raises(ConstraintViolationException):
UniqueColumnConstraint(ignore_missing_vals=False).validate(bad_test_dataframe, 'foo')
def test_column_unique_constraint_ignore_nan():
for nullable_value in NAN_VALUES:
test_dataframe = DataFrame({'foo': [nullable_value, 'bar', 'baz']})
assert (
UniqueColumnConstraint(ignore_missing_vals=True).validate(test_dataframe, 'foo') is None
)
test_dataframe = DataFrame({'foo': [nullable_value, nullable_value, 'baz']})
assert (
UniqueColumnConstraint(ignore_missing_vals=True).validate(test_dataframe, 'foo') is None
)
test_dataframe = DataFrame({'foo': ['bar', 'bar', nullable_value]})
with pytest.raises(ConstraintViolationException):
UniqueColumnConstraint(ignore_missing_vals=False).validate(test_dataframe, 'foo')
test_dataframe = DataFrame({'foo': [nullable_value, nullable_value, 'baz']})
with pytest.raises(ConstraintViolationException):
UniqueColumnConstraint(ignore_missing_vals=False).validate(test_dataframe, 'foo')
def test_column_type_constraint():
test_dataframe = DataFrame({'foo': ['baz']})
assert ColumnDTypeInSetConstraint({'object'}).validate(test_dataframe, 'foo') is None
with pytest.raises(ConstraintViolationException):
ColumnDTypeInSetConstraint({'int64'}).validate(test_dataframe, 'foo')
def test_non_nullable_column_constraint():
test_dataframe = DataFrame({'foo': ['baz']})
assert NonNullableColumnConstraint().validate(test_dataframe, 'foo') is None
bad_test_dataframe = DataFrame({'foo': ['baz', None]})
with pytest.raises(ConstraintViolationException):
NonNullableColumnConstraint().validate(bad_test_dataframe, 'foo')
def test_categorical_column_constraint():
test_dataframe = DataFrame({'foo': ['bar', 'baz', 'bar', 'bar']})
assert (
CategoricalColumnConstraint({'bar', 'baz'}, ignore_missing_vals=False).validate(
test_dataframe, 'foo'
)
is None
)
bad_test_dataframe = DataFrame({'foo': ['bar', 'qux', 'bar', 'bar']})
with pytest.raises(ConstraintViolationException):
CategoricalColumnConstraint({'bar', 'baz'}, ignore_missing_vals=False).validate(
bad_test_dataframe, 'foo'
)
def test_categorical_column_constraint_ignore_nan():
for nullable in NAN_VALUES:
test_dataframe = DataFrame({'foo': ['red', 'blue', 'green', nullable]})
assert (
CategoricalColumnConstraint(
{'red', 'blue', 'green'}, ignore_missing_vals=True
).validate(test_dataframe, 'foo')
is None
)
test_dataframe = DataFrame({'foo': ['red', 'yellow', 'green', nullable, nullable]})
with pytest.raises(ConstraintViolationException):
CategoricalColumnConstraint(
{'red', 'blue', 'green'}, ignore_missing_vals=True
).validate(test_dataframe, 'foo')
def test_min_value_column_constraint():
test_dataframe = DataFrame({'foo': [1, 1, 2, 3]})
assert (
MinValueColumnConstraint(0, ignore_missing_vals=False).validate(test_dataframe, 'foo')
is None
)
with pytest.raises(ConstraintViolationException):
MinValueColumnConstraint(2, ignore_missing_vals=False).validate(test_dataframe, 'foo')
def test_min_valid_column_constraint_ignore_nan():
for nullable in NAN_VALUES:
test_dataframe = DataFrame({'foo': [1, 1, 2, 3, nullable]})
assert (
MinValueColumnConstraint(0, ignore_missing_vals=True).validate(test_dataframe, 'foo')
is None
)
with pytest.raises(ConstraintViolationException):
MinValueColumnConstraint(3, ignore_missing_vals=True).validate(test_dataframe, 'foo')
def test_max_value_column_constraint():
test_dataframe = DataFrame({'foo': [1, 1, 2, 3]})
assert (
MaxValueColumnConstraint(5, ignore_missing_vals=False).validate(test_dataframe, 'foo')
is None
)
with pytest.raises(ConstraintViolationException):
MaxValueColumnConstraint(2, ignore_missing_vals=False).validate(test_dataframe, 'foo')
def test_max_valid_column_constraint_ignore_nan():
for nullable in NAN_VALUES:
test_dataframe = DataFrame({'foo': [1, 1, 2, 3, nullable]})
assert (
MaxValueColumnConstraint(5, ignore_missing_vals=True).validate(test_dataframe, 'foo')
is None
)
with pytest.raises(ConstraintViolationException):
MaxValueColumnConstraint(2, ignore_missing_vals=True).validate(test_dataframe, 'foo')
def test_in_range_value_column_constraint():
test_dataframe = DataFrame({'foo': [1, 1, 2, 3]})
assert (
InRangeColumnConstraint(1, 4, ignore_missing_vals=False).validate(test_dataframe, 'foo')
is None
)
with pytest.raises(ConstraintViolationException):
InRangeColumnConstraint(2, 3, ignore_missing_vals=False).validate(test_dataframe, 'foo')
def test_in_range_value_column_constraint_ignore_nan():
for nullable in NAN_VALUES:
test_dataframe = DataFrame({'foo': [1, 1, 2, 3, nullable]})
assert (
InRangeColumnConstraint(1, 4, ignore_missing_vals=True).validate(test_dataframe, 'foo')
is None
)
with pytest.raises(ConstraintViolationException):
InRangeColumnConstraint(2, 3, ignore_missing_vals=True).validate(test_dataframe, 'foo')
def test_strict_columns_constraint():
assert (
StrictColumnsConstraint(['foo', 'bar']).validate(DataFrame({'foo': [1, 2], 'bar': [1, 2]}))
is None
)
with pytest.raises(ConstraintViolationException):
StrictColumnsConstraint(['foo', 'bar']).validate(
DataFrame({'foo': [1, 2], 'bar': [1, 2], 'baz': [1, 2]})
)
assert (
StrictColumnsConstraint(['foo', 'bar'], enforce_ordering=True).validate(
DataFrame([[1, 2], [1, 2]], columns=['foo', 'bar'])
)
is None
)
with pytest.raises(ConstraintViolationException):
StrictColumnsConstraint(['foo', 'bar'], enforce_ordering=True).validate(
DataFrame([[1, 2], [1, 2]], columns=['bar', 'foo'])
)
def test_row_count_constraint():
test_dataframe = DataFrame({'foo': [1, 2, 3, 4, 5, 6]})
assert RowCountConstraint(6).validate(test_dataframe) is None
with pytest.raises(ConstraintViolationException):
RowCountConstraint(5).validate(test_dataframe)
assert (
RowCountConstraint(5, error_tolerance=1).validate(DataFrame({'foo': [1, 2, 3, 4]})) is None
)
with pytest.raises(ConstraintViolationException):
assert RowCountConstraint(5, error_tolerance=1).validate(DataFrame({'foo': [1, 2]}))
assert (
RowCountConstraint(5, error_tolerance=1).validate(DataFrame({'foo': [1, 2, 3, 4, 5, 6]}))
is None
)
with pytest.raises(ConstraintViolationException):
assert RowCountConstraint(5, error_tolerance=1).validate(
DataFrame({'foo': [1, 2, 3, 4, 5, 6, 7]})
)
|
[
"abhi@elementl.com"
] |
abhi@elementl.com
|
f5f14f69f6f2cdd141085563ff8a700ff0c2c75a
|
0581673ff3fcf6f31e5939fb97eaa9055fe66678
|
/main/urls.py
|
e63885cee1f3ffa433fd8acdee23aa317a9c6eac
|
[] |
no_license
|
Arhan99/TYK_TYK
|
d17f7936509dfbe22188bd4b16173a0e17e4838d
|
724cfcca0493ebfc4ba1fbc1d300955980a950e9
|
refs/heads/master
| 2023-04-09T16:16:18.839896
| 2021-04-21T06:52:19
| 2021-04-21T06:52:19
| 360,060,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from main.views import index, filter_search, filter_search_busines, all_busines, likes, likes_busines, all_filter_search, statistics
urlpatterns = [
path('', index, name='index'),
path('all_neighb/', all_filter_search, name='all_neighb'),
path('filter_search/', filter_search, name='filter_search'),
path('all_busines/', all_busines, name='all_busines'),
path('search_busines/', filter_search_busines, name='search_busines'),
path('likes/', likes, name='likes'),
path('likes_busines/', likes_busines, name='likes_busines'),
path('statistics/', statistics, name='statistics'),
]
|
[
"82928812+Arhan99@users.noreply.github.com"
] |
82928812+Arhan99@users.noreply.github.com
|
617614a4bc84ee6c6eff40b83f98bbafd2677a8d
|
42d5a0bb4fc9205c87b9c38db0a33eaf32de5288
|
/ballotbuilder/bin/easy_install
|
ba0c29e4bd37d2fc5b35e16de683aea035cfc649
|
[
"BSD-2-Clause"
] |
permissive
|
walke469/spartahack-17
|
d522ae3c0689a7420a7dae05e67fbefefbed2918
|
646ebfad8d4f0a8170af9a5fa5b9d3578b0f2f1c
|
refs/heads/master
| 2021-01-11T16:47:42.517999
| 2017-01-22T13:58:08
| 2017-01-22T13:58:08
| 79,672,326
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
#!/srv/ballotbuilder/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ubuntu@ip-172-26-9-129.ec2.internal"
] |
ubuntu@ip-172-26-9-129.ec2.internal
|
|
196f594cdf93c99c3e6f58b29bd47b6f682769ff
|
ec0f61837133b22aac328d5f4a7cd28063ffd997
|
/blogs/blogs/urls.py
|
8720f951230d17f1f16de4120bdb10b4fe936e35
|
[] |
no_license
|
ashishpokhrel123/TechBlogs
|
9e76e2efc6eecf825ce5e575eb3d802376f20f19
|
5fb726ac018255e4d1bba5e29ee464ae310557fe
|
refs/heads/main
| 2023-02-23T10:41:48.543708
| 2021-01-23T15:20:23
| 2021-01-23T15:20:23
| 332,236,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
"""blogs URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"aashishpokhrel146@gmail.com"
] |
aashishpokhrel146@gmail.com
|
36c02ba4c1f74ed2e06006d957ec2c4f77222217
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/devices/v20210303preview/get_certificate.py
|
11a256c611dd26a54b2a548c6357eefae477a010
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 3,803
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
@pulumi.output_type
class GetCertificateResult:
"""
The X509 Certificate.
"""
def __init__(__self__, etag=None, id=None, name=None, properties=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
The entity tag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the certificate.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.CertificatePropertiesResponse':
"""
The description of an X509 CA Certificate.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
etag=self.etag,
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_certificate(certificate_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
The X509 Certificate.
:param str certificate_name: The name of the certificate
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['certificateName'] = certificate_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20210303preview:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
b4a5eefb8d0a74ba831efebc744256bb401f047d
|
b1da40a1faf67c503b1415ff0086e302fd5212be
|
/.env/Scripts/runxlrd.py
|
160ff96f69c70a3d7f1d7478f72d75db84006768
|
[] |
no_license
|
HelderMenegatti/Blog_wagtail
|
dc611de7a0117d97f247df7a7d529fd9139f12fb
|
26842790fd1c6b2d02fc9dc87638f3dce3d98353
|
refs/heads/master
| 2023-06-05T10:00:43.724221
| 2021-06-28T20:16:10
| 2021-06-28T20:16:10
| 380,579,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,113
|
py
|
#!c:\users\ramon\desktop\blog_wagtail\.env\scripts\python.exe
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
else:
main(av)
|
[
"heldermenegatti720@gmail.com"
] |
heldermenegatti720@gmail.com
|
e99cd18876c90e06aa848b966591b485b703cea3
|
88a32ab161fcd3473a87b1a921e2475620d71ee5
|
/multihashtags.py
|
2d217e17d03a232b839d1feeba8bf3c0b62dbd14
|
[] |
no_license
|
maczokni/halloweenMCR
|
b152b2e61c5c21a34214a51387526fdc5cd83b18
|
0b647d0a1e90232b6ae5547d65c594ffb1325e3e
|
refs/heads/master
| 2020-08-30T07:58:39.631008
| 2019-11-06T11:40:58
| 2019-11-06T11:40:58
| 218,311,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 26 08:45:24 2019
@author: reka
"""
from instaloader import Instaloader
def download_posts_with_multiple_hashtags(instaloader, hashtags, max_per_hashtag):
posts = []
print("Getting posts with hashtag {}".format(hashtags[0]))
for count, post in enumerate(instaloader.get_hashtag_posts(hashtags[0])):
posts.append(post)
print(count + 1, sep='', end='\r', flush=True)
if count >= max_per_hashtag - 1:
break
for idx, hashtag in enumerate(hashtags[1:]):
prev_posts = posts
posts = []
print("\nGetting posts with hashtag {} and {}".format(hashtag,
','.join(hashtags[:(idx + 1)])))
for count, post in enumerate(instaloader.get_hashtag_posts(hashtag)):
if any(p == post for p in prev_posts):
posts.append(post)
print("{}, {} matching".format(count + 1, len(posts)), sep='', end='\r',
flush=True)
if count >= max_per_hashtag - 1:
break
if posts:
print("\nDownloading posts with hashtags {}".format(','.join(hashtags)))
for count, post in enumerate(posts):
print("[{:03d}/{:03d}] ".format(count + 1, len(posts)), end='', flush=True)
instaloader.download_post(post, target=','.join(hashtags))
loader = Instaloader(sleep=True, filename_pattern='{date}')
try:
download_posts_with_multiple_hashtags(loader,
hashtags=['HalloweenMCR', 'MCRMONSTERS'],
max_per_hashtag=5000)
except KeyboardInterrupt:
pass
|
[
"maczokni@gmail.com"
] |
maczokni@gmail.com
|
dc6f95fed9fae0c16bb591d70b845ffbd27a1d28
|
bc97ac7fd050af7e29123c85ccb1d40e44ed8788
|
/venv/Scripts/pip3.7-script.py
|
191104a62fd174eb4b971b71bf9ecf599e5679aa
|
[] |
no_license
|
zhaisa/mydjangop1
|
9eb4c8185fe0499a6276d97a99843e5eca757d1c
|
d894b65129fe1336ec400dcc60b06ecaa5c24861
|
refs/heads/master
| 2020-04-15T03:24:41.152479
| 2019-01-20T16:16:06
| 2019-01-20T16:16:06
| 164,347,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
#!C:\Users\95\PycharmProjects\mydjangop1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"zhaisa21@163.com"
] |
zhaisa21@163.com
|
091bc22980d0b8341128b4835a7ee5c94c79e71d
|
e8f99a162207cba82d4e0f969d7bcdb2b9d8b522
|
/wireless_parse/udp_trans_demo/udp_receiver_v5.py
|
214e9d1353b50d8ee559e3f3305ca0306e139e1b
|
[] |
no_license
|
TesterCC/Python3Scripts
|
edb5446278ebf13edb64336001081941ca27d67d
|
58be67e1ffc74ef50289a885aa4ad05f58e2c383
|
refs/heads/master
| 2023-08-30T21:16:38.328045
| 2023-08-17T11:23:08
| 2023-08-17T11:23:08
| 93,401,996
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
# -*- coding=utf-8 -*-
import socket
import sys
import threading
import time
from Crypto.Cipher import AES
# v5: support AES Cipher
def _unpad(data):
# decrypt use
padding_len = data[-1]
return data[:-padding_len]
def aes_decrypt(key, encrypted_data):
aes = AES.new(key, AES.MODE_ECB)
decrypted_data = aes.decrypt(encrypted_data)
return _unpad(decrypted_data)
def receiver(host, port):
count = 0
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_addr = (host, port)
s.bind(server_addr)
print(f'[D] Bind UDP at {host}:{port}')
received_size = 0
while True:
if count == 0:
data, client_addr = s.recvfrom(4096)
print('connected from %s:%s' % client_addr)
data = aes_decrypt(key, data)
# Record the start time of the receiver running
start = time.time()
f = open(data, 'wb')
data, client_addr = s.recvfrom(4096)
data = aes_decrypt(key, data)
if str(data) != "b'end'":
received_size += len(data)
f.write(data)
# Record the current system time
end = time.time()
# Print the current time every 1s
# while printing the cumulative amount of transmission
if end - start > 1:
print(end)
print('Accept ', received_size, ' B')
start = time.time()
else:
break
s.sendto('ok'.encode('utf-8'), client_addr)
count += 1
print('[D] total received ', received_size, ' B')
f.close()
s.close()
if __name__ == '__main__':
if len(sys.argv) == 3:
host = sys.argv[1]
port = sys.argv[2]
key = bytes("wirelesspost2023", encoding="utf-8")
else:
print("usage: python3 % ip port" % sys.argv[0])
sys.exit(-1)
receiver(host, int(port))
|
[
"testerlyx@foxmail.com"
] |
testerlyx@foxmail.com
|
911742be4879768d802cb3f2496a4b5561f2d7e9
|
0050a676c34974cbc90c8c2049f67918b951db98
|
/brute_MD5.py
|
005c9ae0f0d5b377fecdc0085218fb2649f2f249
|
[] |
no_license
|
Bhavya251/MD5-Brute-Force
|
c912fc2ec98bd6c660ed7e56a6d08e06ce7117b1
|
4e664c55716838fde616be342d30b270eb17d371
|
refs/heads/main
| 2023-04-15T07:54:32.601631
| 2021-04-29T23:53:36
| 2021-04-29T23:53:36
| 362,976,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
import string
import random
import time
import MD5
charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()+_'
hash24 = '4adc03f09184857ee1370f4001219d45'
string=''
FILE = open("wordlist.txt","w")
print ("[+] Start Time: ", time.strftime('%H:%M:%S'))
print ("\nLoading ... ")
for count in range(0,10000000000):
for x in random.sample(charset,random.randint(1,6)):
string+=x
FILE.write(string+'\n')
hash_random = MD5.calcmd5(string)
if hash_random == hash24:
print("\nPassword found : ")
print (string)
break
string=''
FILE.close()
print ("[+] End Time: ", time.strftime('%H:%M:%S'))
print ("Done")
|
[
"noreply@github.com"
] |
Bhavya251.noreply@github.com
|
ecf12a040456d55ad2e860fc7d0f051ef5e3d5bc
|
d40daca7858c4ca7f30f59700c58f670c431b3ae
|
/Tasks/Day-5/task-1.py
|
fb78e48ba09779a23c17d143a4a1c250102313c1
|
[] |
no_license
|
Bibinbiju9873/Internship-AkashTechnoLabs
|
df5d7ffbf2955798712e41722658e0593d039bdf
|
30840729c00a3970da55e12fe873ebe4da8bd372
|
refs/heads/main
| 2023-05-28T17:16:34.160995
| 2021-06-12T03:53:21
| 2021-06-12T03:53:21
| 370,796,277
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
class cal1:
def setdata(self, a, b, c):
self.a = a
self.b = b
self.c = c
def display(self):
ans1 = self.a + self.b + self.c
print("{}+{}+{}={}".format(self.a, self.b, self.c, ans1))
obj1 = cal1()
obj1.setdata(10, 20, 30)
obj1.display()
|
[
"bibinbiju70000@gmail.com"
] |
bibinbiju70000@gmail.com
|
475b33491bc296d09f617226da842f375640c5b0
|
4914e1e18cabd3db104386b13a48e3371f6c4d25
|
/tov/Lambda_varying_n1_n2.py
|
d248379178f8c757acf90ea12e4d134dd80bebb9
|
[] |
no_license
|
sotzee/ns
|
592b21c013657ca202ab1138d92c32960d7e2170
|
70faa8e97560ec4072e5f0f697e3f2471f1303f7
|
refs/heads/master
| 2021-06-19T15:51:03.271980
| 2019-06-10T14:16:21
| 2019-06-10T14:16:21
| 115,557,527
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,805
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 14:34:31 2018
@author: sotzee
"""
from eos_class import EOS_BPSwithPoly,EOS_PiecewisePoly,EOS_BPS
import numpy as np
from tov_f import MassRadius
from Find_OfMass import Properity_ofmass
from FindMaxmass import Maxmass
import scipy.optimize as opt
from scipy.misc import derivative
import matplotlib.pyplot as plt
baryon_density_s= 0.16
baryon_density0 = 0.16/2.7
baryon_density3 = 7.4*0.16
pressure0=EOS_BPS.eosPressure_frombaryon(baryon_density0)
density0=EOS_BPS.eosDensity(pressure0)
Preset_Pressure_final=1e-8
Preset_rtol=1e-6
def Density_i(pressure_i,baryon_density_i,pressure_i_minus,baryon_density_i_minus,density_i_minus):
gamma_i=np.log(pressure_i/pressure_i_minus)/np.log(baryon_density_i/baryon_density_i_minus)
return gamma_i,(density_i_minus-pressure_i_minus/(gamma_i-1))*\
(pressure_i/pressure_i_minus)**(1./gamma_i)+pressure_i/(gamma_i-1)
def causality_i(pressure_i,baryon_density_i,pressure_i_minus,baryon_density_i_minus,density_i_minus):
gamma_i,density_i=Density_i(pressure_i,baryon_density_i,pressure_i_minus,baryon_density_i_minus,density_i_minus)
return gamma_i*pressure_i/(density_i+pressure_i)-1
def causality_p2(p1):
density1=Density_i(p1,baryon_density1,pressure0,baryon_density0,density0)[1]
return opt.newton(causality_i,200.,args=(baryon_density2,p1,baryon_density1,density1))
def causality_central_pressure(pressure_center,density2,pressure2,gamma3):
#print pressure_center>0,(gamma3*pressure_center*(gamma3-1))/(((gamma3-1)*density2-pressure2)*(pressure_center/pressure2)**(1/gamma3)+gamma3*pressure_center)
#print baryon_density1,baryon_density2
# =============================================================================
# pressure3=pressure2*(baryon_density3/baryon_density2)**gamma3
# eos=EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3])
# print pressure_center,eos.eosCs2(pressure_center)
# =============================================================================
return np.where(pressure_center>0,(gamma3*pressure_center*(gamma3-1))/(((gamma3-1)*density2-pressure2)*(pressure_center/pressure2)**(1/gamma3)+gamma3*pressure_center)-1,-1+pressure_center/1000.)
def caulality_central_pressure_at_peak(pressure3,pressure1,pressure2,Preset_Pressure_final,Preset_rtol):
eos = EOS_PiecewisePoly([density0,pressure0,baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3])
pressure_center=opt.newton(causality_central_pressure,0.3*pressure3,tol=0.1,args=(eos.density2,pressure2,eos.gamma3))
derivative_center_pressure=derivative(MassRadius,pressure_center,dx=1e-2,args=(Preset_Pressure_final,Preset_rtol,'M',eos))
#print pressure3, pressure_center, derivative_center_pressure
return derivative_center_pressure
def trial_p2(p1,of_maxmass):
p2_standard=(360*(of_maxmass-2.0)**1.3+103)
gamma2_standard=np.log(p2_standard/p1)/np.log(3.7*0.16/baryon_density1)
return p1*(baryon_density2/baryon_density1)**gamma2_standard
def trial_p3(p1,p2):
gamma2_standard=np.log(p2/p1)/np.log(baryon_density2/baryon_density1)
p2_standard=p1*(3.7*0.16/baryon_density1)**gamma2_standard
return 7.1*p2_standard
def p2p3_ofmaxmass(ofmaxmass,Maxmass_function,Preset_Pressure_final,Preset_rtol,p1):
print '==================Finding p2 of maxmass%.2f at p1=%.2f'%(ofmaxmass,p1)
pressure3_result=[0]
pressure_center_maxmass=[0]
def Ofmaxmass(p2,ofmaxmass,Maxmass_function,Preset_Pressure_final,Preset_rtol,args_p1):
print '==================Finding p3 at p2=%f'%p2
pressure3=opt.newton(caulality_central_pressure_at_peak,trial_p3(args_p1,p2),tol=0.1,args=(args_p1,p2,Preset_Pressure_final,Preset_rtol))
pressure3_result[0]=pressure3
args=[baryon_density0,args_p1,baryon_density1,p2,baryon_density2,pressure3,baryon_density3]
eos=EOS_BPSwithPoly(args)
maxmass_result = Maxmass_function(Preset_Pressure_final,Preset_rtol,eos)
pressure_center_maxmass[0]=maxmass_result[1]
print 'maxmass=%f'%maxmass_result[2]
return -ofmaxmass+maxmass_result[2]
Preset_p2=trial_p2(p1,ofmaxmass)
result=opt.newton(Ofmaxmass,Preset_p2,tol=0.1,args=(ofmaxmass,Maxmass_function,Preset_Pressure_final,Preset_rtol,p1))
return result,pressure3_result[0],pressure_center_maxmass[0]
upper_bound_eos=[]
upper_bound_pc=[]
lower_bound_eos=[]
lower_bound_pc=[]
baryon_density1 = 1.85*0.16
baryon_density2 = 3.2*0.16
pressure1=30
pressure2=causality_p2(pressure1)
Preset_Pressure_final=1e-8
Preset_rtol=1e-6
pressure3=opt.newton(caulality_central_pressure_at_peak,trial_p3(pressure1,pressure2),tol=0.1,args=(pressure1,pressure2,Preset_Pressure_final,Preset_rtol))
upper_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
upper_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,upper_bound_eos[-1])[1])
print upper_bound_eos[-1].args
pressure1=8.4
pressure2,pressure3,pressure_center=p2p3_ofmaxmass(2.0,Maxmass,Preset_Pressure_final,Preset_rtol,pressure1)
lower_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
lower_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,lower_bound_eos[-1])[1])
print lower_bound_eos[-1].args
baryon_density1 = 1.85*0.16
baryon_density2 = 3.7*0.16
pressure1=30
pressure2=causality_p2(pressure1)
Preset_Pressure_final=1e-8
Preset_rtol=1e-8
pressure3=opt.newton(caulality_central_pressure_at_peak,trial_p3(pressure1,pressure2),tol=0.1,args=(pressure1,pressure2,Preset_Pressure_final,Preset_rtol))
upper_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
upper_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,upper_bound_eos[-1])[1])
print upper_bound_eos[-1].args
pressure1=8.4
pressure2,pressure3,pressure_center=p2p3_ofmaxmass(2.0,Maxmass,Preset_Pressure_final,Preset_rtol,pressure1)
lower_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
lower_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,lower_bound_eos[-1])[1])
print lower_bound_eos[-1].args
baryon_density1 = 1.85*0.16
baryon_density2 = 4.2*0.16
pressure1=30
pressure2=causality_p2(pressure1)
Preset_Pressure_final=1e-8
Preset_rtol=1e-8
pressure3=opt.newton(caulality_central_pressure_at_peak,trial_p3(pressure1,pressure2),tol=0.1,args=(pressure1,pressure2,Preset_Pressure_final,Preset_rtol))
upper_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
upper_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,upper_bound_eos[-1])[1])
print upper_bound_eos[-1].args
pressure1=8.4
pressure2,pressure3,pressure_center=p2p3_ofmaxmass(2.0,Maxmass,Preset_Pressure_final,Preset_rtol,pressure1)
lower_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
lower_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,lower_bound_eos[-1])[1])
print lower_bound_eos[-1].args
baryon_density1 = 1.7*0.16
baryon_density2 = 3.7*0.16
pressure1=23.179569511045678
pressure2=causality_p2(pressure1)
Preset_Pressure_final=1e-8
Preset_rtol=1e-8
pressure3=opt.newton(caulality_central_pressure_at_peak,trial_p3(pressure1,pressure2),tol=0.1,args=(pressure1,pressure2,Preset_Pressure_final,Preset_rtol))
upper_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
upper_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,upper_bound_eos[-1])[1])
print upper_bound_eos[-1].args
pressure1=6.9394800214143499
pressure2,pressure3,pressure_center=p2p3_ofmaxmass(2.0,Maxmass,Preset_Pressure_final,Preset_rtol,pressure1)
lower_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
lower_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,lower_bound_eos[-1])[1])
print lower_bound_eos[-1].args
baryon_density1 = 2.0*0.16
baryon_density2 = 3.7*0.16
pressure1=38.05392111496401
pressure2=causality_p2(pressure1)
Preset_Pressure_final=1e-8
Preset_rtol=1e-8
pressure3=opt.newton(caulality_central_pressure_at_peak,trial_p3(pressure1,pressure2),tol=0.1,args=(pressure1,pressure2,Preset_Pressure_final,Preset_rtol))
upper_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
upper_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,upper_bound_eos[-1])[1])
print upper_bound_eos[-1].args
pressure1=10.017537925119589
pressure2,pressure3,pressure_center=p2p3_ofmaxmass(2.0,Maxmass,Preset_Pressure_final,Preset_rtol,pressure1)
lower_bound_eos.append(EOS_BPSwithPoly([baryon_density0,pressure1,baryon_density1,pressure2,baryon_density2,pressure3,baryon_density3]))
lower_bound_pc.append(Maxmass(Preset_Pressure_final,1e-4,lower_bound_eos[-1])[1])
print lower_bound_eos[-1].args
for eos_i in lower_bound_eos:
print eos_i.args
for eos_i in upper_bound_eos:
print eos_i.args
#gamma1=2.2588784192626843
lower_bound_eos_args=[[0.059259259259259255, 8.4, 0.29600000000000004, 68.161110937142951, 0.512, 738.52598804194781, 1.1840000000000002],
[0.059259259259259255, 8.4, 0.29600000000000004, 103.56616205941582, 0.5920000000000001, 737.53456806740428, 1.1840000000000002],
[0.059259259259259255, 8.4, 0.29600000000000004, 149.49157528349323, 0.672, 737.79470441463366, 1.1840000000000002],
[0.059259259259259255, 6.93948002141435, 0.272, 103.57476787958687, 0.5920000000000001, 739.671922148676, 1.1840000000000002],
[0.059259259259259255, 10.017537925119589, 0.32, 103.60273144822639, 0.5920000000000001, 735.5549808413133, 1.1840000000000002]]
#gamma1=3.0503084533045279
upper_bound_eos_args=[[0.059259259259259255, 30, 0.29600000000000004, 218.54729186441736, 0.512, 2110.1402987649576, 1.1840000000000002],
[0.059259259259259255, 30, 0.29600000000000004, 298.98747443548757, 0.5920000000000001, 1949.035553443144, 1.1840000000000002],
[0.059259259259259255, 30, 0.29600000000000004, 386.9239253136754, 0.672, 1810.3314115401727, 1.1840000000000002],
[0.059259259259259255, 23.179569511045678, 0.272, 301.82393637730013, 0.5920000000000001, 1968.5642788279802, 1.1840000000000002],
[0.059259259259259255, 38.05392111496401, 0.32, 296.08342746016274, 0.5920000000000001, 1932.0239231893693, 1.1840000000000002]]
#lower_bound_eos=[]
#upper_bound_eos=[]
lower_bound_pc=[]
upper_bound_pc=[]
lower_cs2_pc_max=[]
upper_cs2_pc_max=[]
lower_maxmass=[]
upper_maxmass=[]
lower_cs2_p2=[]
upper_cs2_p2=[]
Preset_rtol=1e-4
for i in range(5):
#lower_bound_eos.append(EOS_BPSwithPoly(lower_bound_eos_args[i]))
#upper_bound_eos.append(EOS_BPSwithPoly(upper_bound_eos_args[i]))
maxmass_result_lower=Maxmass(Preset_Pressure_final,Preset_rtol,lower_bound_eos[i])
maxmass_result_upper=Maxmass(Preset_Pressure_final,Preset_rtol,upper_bound_eos[i])
lower_bound_pc.append(maxmass_result_lower[1])
upper_bound_pc.append(maxmass_result_upper[1])
lower_maxmass.append(maxmass_result_lower[2])
upper_maxmass.append(maxmass_result_upper[2])
lower_cs2_p2.append(lower_bound_eos[i].eosCs2(0.99*lower_bound_eos[i].args[3]))
upper_cs2_p2.append(upper_bound_eos[i].eosCs2(0.99*upper_bound_eos[i].args[3]))
lower_cs2_pc_max.append(lower_bound_eos[i].eosCs2(lower_bound_pc[i]))
upper_cs2_pc_max.append(upper_bound_eos[i].eosCs2(upper_bound_pc[i]))
from Parallel_process import main_parallel
import cPickle
pc_list=10**np.linspace(0,-1.5,20)
def Calculation_mass_beta_Lambda(args_list,i):
eos=args_list[:,0]
pc_max=args_list[:,1]
mass=[]
beta=[]
Lambda=[]
for j in range(len(pc_list)):
MR_result=MassRadius(pc_max[i]*pc_list[j],Preset_Pressure_final,Preset_rtol,'MRBIT',eos[i])
mass.append(MR_result[0])
beta.append(MR_result[2])
Lambda.append(MR_result[-1])
return [mass,beta,Lambda]
f_mass_beta_Lambda_result='./out.dat'
main_parallel(Calculation_mass_beta_Lambda,np.array([upper_bound_eos,upper_bound_pc]).transpose(),f_mass_beta_Lambda_result,0)
f=open(f_mass_beta_Lambda_result,'rb')
mass_beta_Lambda_result=np.array(cPickle.load(f))
f.close()
mass_upper=mass_beta_Lambda_result[:,0]
beta_upper=mass_beta_Lambda_result[:,1]
Lambda_upper=mass_beta_Lambda_result[:,2]
f_mass_beta_Lambda_result='./out.dat'
main_parallel(Calculation_mass_beta_Lambda,np.array([lower_bound_eos,lower_bound_pc]).transpose(),f_mass_beta_Lambda_result,0)
f=open(f_mass_beta_Lambda_result,'rb')
mass_beta_Lambda_result=np.array(cPickle.load(f))
f.close()
mass_lower=mass_beta_Lambda_result[:,0]
beta_lower=mass_beta_Lambda_result[:,1]
Lambda_lower=mass_beta_Lambda_result[:,2]
eos_label=['n1=1.85ns,n2=3.2ns', 'n1=1.85ns,n2=3.7ns', 'n1=1.85ns,n2=4.2ns', 'n1=1.7ns,n2=3.7ns', 'n1=2.0ns,n2=3.7ns']
eos_color=['r','k','c','g','b']
for i in range(5):
plt.plot(mass_upper[i],Lambda_upper[i],color=eos_color[i],label='upper bound '+eos_label[i])
plt.plot(mass_lower[i],Lambda_lower[i],'--',color=eos_color[i],label='lower bound '+eos_label[i])
plt.legend()
plt.xlim(1.0,2.0)
plt.ylim(0,3000)
plt.xlabel('$M/M_\odot$')
plt.ylabel('$\Lambda$')
for i in range(5):
plt.plot(mass_upper[i],beta_upper[i]**6*Lambda_upper[i],color=eos_color[i],label='upper bound '+eos_label[i])
plt.plot(mass_lower[i],beta_lower[i]**6*Lambda_lower[i],'--',color=eos_color[i],label='lower bound '+eos_label[i])
plt.legend()
plt.xlim(1.0,2.0)
#plt.ylim(0,3000)
plt.xlabel('$M/M\odot$')
plt.ylabel('$\Lambda$')
|
[
"tianqi.zhao@stonybrook.edu"
] |
tianqi.zhao@stonybrook.edu
|
dc2a0f1c550c97f969d894be1f332fe010d3868b
|
e0e497aed225a9a758dfff214b0b866b3c5778d5
|
/hw_colors.py
|
9a9b22b972845918f95df401a1b927059132d5b6
|
[
"Apache-2.0"
] |
permissive
|
rjalmo/cs224u
|
344cd83ec87a8d285c540142fa20b8811bf44c3f
|
2ebebe15c7656f0d2fcf988a5b8c4fef5f6bb42d
|
refs/heads/master
| 2022-09-10T10:53:02.267896
| 2020-05-23T10:45:31
| 2020-05-23T10:45:31
| 256,973,984
| 0
| 0
| null | 2020-04-19T10:43:02
| 2020-04-19T10:43:01
| null |
UTF-8
|
Python
| false
| false
| 21,879
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Homework and bake-off: pragmatic color descriptions
# In[246]:
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2020"
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [Set-up](#Set-up)
# 1. [All two-word examples as a dev corpus](#All-two-word-examples-as-a-dev-corpus)
# 1. [Dev dataset](#Dev-dataset)
# 1. [Random train–test split for development](#Random-train–test-split-for-development)
# 1. [Question 1: Improve the tokenizer [1 point]](#Question-1:-Improve-the-tokenizer-[1-point])
# 1. [Use the tokenizer](#Use-the-tokenizer)
# 1. [Question 2: Improve the color representations [1 point]](#Question-2:-Improve-the-color-representations-[1-point])
# 1. [Use the color representer](#Use-the-color-representer)
# 1. [Initial model](#Initial-model)
# 1. [Question 3: GloVe embeddings [1 points]](#Question-3:-GloVe-embeddings-[1-points])
# 1. [Try the GloVe representations](#Try-the-GloVe-representations)
# 1. [Question 4: Color context [3 points]](#Question-4:-Color-context-[3-points])
# 1. [Your original system [3 points]](#Your-original-system-[3-points])
# 1. [Bakeoff [1 point]](#Bakeoff-[1-point])
# ## Overview
#
# This homework and associated bake-off are oriented toward building an effective system for generating color descriptions that are pragmatic in the sense that they would help a reader/listener figure out which color was being referred to in a shared context consisting of a target color (whose identity is known only to the describer/speaker) and a set of distractors.
#
# The notebook [colors_overview.ipynb](colors_overview.ipynb) should be studied before work on this homework begins. That notebook provides backgroud on the task, the dataset, and the modeling code that you will be using and adapting.
#
# The homework questions are more open-ended than previous ones have been. Rather than asking you to implement pre-defined functionality, they ask you to try to improve baseline components of the full system in ways that you find to be effective. As usual, this culiminates in a prompt asking you to develop a novel system for entry into the bake-off. In this case, though, the work you do for the homework will likely be directly incorporated into that system.
# ## Set-up
# See [colors_overview.ipynb](colors_overview.ipynb) for set-up in instructions and other background details.
# In[247]:
from colors import ColorsCorpusReader
import os
from sklearn.model_selection import train_test_split
from torch_color_describer import (
ContextualColorDescriber, create_example_dataset)
import utils
from utils import START_SYMBOL, END_SYMBOL, UNK_SYMBOL
# In[248]:
utils.fix_random_seeds()
# In[249]:
COLORS_SRC_FILENAME = os.path.join(
"data", "colors", "filteredCorpus.csv")
# ## All two-word examples as a dev corpus
#
# So that you don't have to sit through excessively long training runs during development, I suggest working with the two-word-only subset of the corpus until you enter into the late stages of system testing.
# In[250]:
dev_corpus = ColorsCorpusReader(
COLORS_SRC_FILENAME,
word_count=2,
normalize_colors=True)
# In[251]:
dev_examples = list(dev_corpus.read())
# This subset has about one-third the examples of the full corpus:
# In[252]:
len(dev_examples)
# We __should__ worry that it's not a fully representative sample. Most of the descriptions in the full corpus are shorter, and a large proportion are longer. So this dataset is mainly for debugging, development, and general hill-climbing. All findings should be validated on the full dataset at some point.
# ## Dev dataset
#
# The first step is to extract the raw color and raw texts from the corpus:
# In[253]:
dev_rawcols, dev_texts = zip(*[[ex.colors, ex.contents] for ex in dev_examples])
# The raw color representations are suitable inputs to a model, but the texts are just strings, so they can't really be processed as-is. Question 1 asks you to do some tokenizing!
# ## Random train–test split for development
#
# For the sake of development runs, we create a random train–test split:
# In[254]:
dev_rawcols_train, dev_rawcols_test, dev_texts_train, dev_texts_test = train_test_split(dev_rawcols, dev_texts)
# ## Question 1: Improve the tokenizer [1 point]
#
# This is the first required question – the first required modification to the default pipeline.
#
# The function `tokenize_example` simply splits its string on whitespace and adds the required start and end symbols:
# In[255]:
def tokenize_example(s):
# Improve me!
import string
def remove_suffix(w):
for suffix in ['er', 'est', 'ish']:
if w.endswith(suffix):
return w[:-len(suffix)]
return w
def remove_punctuation(s):
return s.translate(str.maketrans('', '', string.punctuation))
# lowercase, remove suffixes, and punctuation
result = [remove_suffix(w.lower()) for w in remove_punctuation(s).split()]
return [START_SYMBOL] + result + [END_SYMBOL]
# In[256]:
tokenize_example(dev_texts_train[376])
# __Your task__: Modify `tokenize_example` so that it does something more sophisticated with the input text.
#
# __Notes__:
#
# * There are useful ideas for this in [Monroe et al. 2017](https://transacl.org/ojs/index.php/tacl/article/view/1142)
# * There is no requirement that you do word-level tokenization. Sub-word and multi-word are options.
# * This question can interact with the size of your vocabulary (see just below), and in turn with decisions about how to use `UNK_SYMBOL`.
#
# __Important__: don't forget to add the start and end symbols, else the resulting models will definitely be terrible!
# ## Use the tokenizer
# Once the tokenizer is working, run the following cell to tokenize your inputs:
# In[257]:
dev_seqs_train = [tokenize_example(s) for s in dev_texts_train]
dev_seqs_test = [tokenize_example(s) for s in dev_texts_test]
# We use only the train set to derive a vocabulary for the model:
# In[258]:
dev_vocab = sorted({w for toks in dev_seqs_train for w in toks}) + [UNK_SYMBOL]
# It's important that the `UNK_SYMBOL` is included somewhere in this list. Test examples with word not seen in training will be mapped to `UNK_SYMBOL`. If you model's vocab is the same as your train vocab, then `UNK_SYMBOL` will never be encountered during training, so it will be a random vector at test time.
# In[259]:
len(dev_vocab)
# ## Question 2: Improve the color representations [1 point]
#
# This is the second required pipeline improvement for the assignment.
#
# The following functions do nothing at all to the raw input colors we get from the corpus.
# In[260]:
from scipy.fft import fft
def represent_color_context(colors):
# Improve me!
return [represent_color(color) for color in colors]
def represent_color(color):
# Improve me!
return fft(color)
# In[261]:
represent_color_context(dev_rawcols_train[0])
# __Your task__: Modify `represent_color_context` and/or `represent_color` to represent colors in a new way.
#
# __Notes__:
#
# * The Fourier-transform method of [Monroe et al. 2017](https://transacl.org/ojs/index.php/tacl/article/view/1142) is a proven choice.
# * You are not required to keep `represent_color`. This might be unnatural if you want to perform an operation on each color trio all at once.
# * For that matter, if you want to process all of the color contexts in the entire data set all at once, that is fine too, as long as you can also perform the operation at test time with an unknown number of examples being tested.
# ## Use the color representer
# The following cell just runs your `represent_color_context` on the train and test sets:
# In[262]:
dev_cols_train = [represent_color_context(colors) for colors in dev_rawcols_train]
dev_cols_test = [represent_color_context(colors) for colors in dev_rawcols_test]
# At this point, our preprocessing steps are complete, and we can fit a first model.
# ## Initial model
#
# The first model is configured right now to be a small model run for just a few iterations. It should be enough to get traction, but it's unlikely to be a great model. You are free to modify this configuration if you wish; it is here just for demonstration and testing:
# In[263]:
dev_mod = ContextualColorDescriber(
dev_vocab,
embed_dim=10,
hidden_dim=10,
max_iter=5,
batch_size=128)
# In[264]:
_ = dev_mod.fit(dev_cols_train, dev_seqs_train)
# As discussed in [colors_overview.ipynb](colors_overview.ipynb), our primary metric is `listener_accuracy`:
# In[265]:
dev_mod.listener_accuracy(dev_cols_test, dev_seqs_test)
# We can also see the model's predicted sequences given color context inputs:
# In[266]:
dev_mod.predict(dev_cols_test[:1])
# In[267]:
dev_seqs_test[:1]
# ## Question 3: GloVe embeddings [1 points]
#
# The above model uses a random initial embedding, as configured by the decoder used by `ContextualColorDescriber`. This homework question asks you to consider using GloVe inputs.
#
# __Your task__: Complete `create_glove_embedding` so that it creates a GloVe embedding based on your model vocabulary. This isn't mean to be analytically challenging, but rather just to create a basis for you to try out other kinds of rich initialization.
# In[268]:
GLOVE_HOME = os.path.join('data', 'glove.6B')
# In[269]:
def create_glove_embedding(vocab, glove_base_filename='glove.6B.50d.txt'):
# Use `utils.glove2dict` to read in the GloVe file:
##### YOUR CODE HERE
glove_src = os.path.join(GLOVE_HOME, glove_base_filename)
lookup = utils.glove2dict(glove_src)
# Use `utils.create_pretrained_embedding` to create the embedding.
# This function will, by default, ensure that START_TOKEN,
# END_TOKEN, and UNK_TOKEN are included in the embedding.
##### YOUR CODE HERE
embedding, emb_vocab = utils.create_pretrained_embedding(lookup, vocab)
# Be sure to return the embedding you create as well as the
# vocabulary returned by `utils.create_pretrained_embedding`,
# which is likely to have been modified from the input `vocab`.
##### YOUR CODE HERE
return embedding, emb_vocab
# ## Try the GloVe representations
# Let's see if GloVe helped for our development data:
# In[270]:
dev_glove_embedding, dev_glove_vocab = create_glove_embedding(dev_vocab)
# The above might dramatically change your vocabulary, depending on how many items from your vocab are in the Glove space:
# In[271]:
len(dev_vocab)
# In[272]:
len(dev_glove_vocab)
# In[273]:
dev_mod_glove = ContextualColorDescriber(
dev_glove_vocab,
embedding=dev_glove_embedding,
hidden_dim=10,
max_iter=5,
batch_size=128)
# In[274]:
_ = dev_mod_glove.fit(dev_cols_train, dev_seqs_train)
# In[275]:
dev_mod_glove.listener_accuracy(dev_cols_test, dev_seqs_test)
# You probably saw a small boost, assuming your tokeization scheme leads to good overlap with the GloVe vocabulary. The input representations are larger than in our previous model (at least as I configured things), so we would need to do more runs with higher `max_iter` values to see whether this is worthwhile overall.
# ## Question 4: Color context [3 points]
#
# The final required homework question is the most challenging, but it should set you up to think in much more flexible ways about the underlying model we're using.
#
# The question asks you to modify various model components in `torch_color_describer.py`. The section called [Modifying the core model](colors_overview.ipynb#Modifying-the-core-model) from the core unit notebook provides a number of examples illustrating the basic techniques, so you might review that material if you get stuck here.
#
# __Your task__: [Monroe et al. 2017](https://transacl.org/ojs/index.php/tacl/article/view/1142) append the target color (the final one in the context) to each input token that gets processed by the decoder. The question asks you to subclass the `Decoder` and `EncoderDecoder` from `torch_color_describer.py` so that you can build models that do this.
# __Step 1__: Modify the `Decoder` so that the input vector to the model at each timestep is not just a token representaton `x` but the concatenation of `x` with the representation of the target color.
#
# __Notes__:
#
# * You might notice at this point that the original `Decoder.forward` method has an optional keyword argument `target_colors` that is passed to `Decoder.get_embeddings`. Because this is already in place, all you have to do is modify the `get_embeddings` method to use this argument.
#
# * The change affects the configuration of `self.rnn`, so you need to subclass the `__init__` method as well, so that its `input_size` argument accomodates the embedding as well as the color representations.
#
# * You can do the relevant operations efficiently in pure PyTorch using `repeat_interleave` and `cat`, but the important thing is to get a working implementation – you can always optimize the code later if the ideas prove useful to you.
#
# Here's skeleton code for you to flesh out:
# In[276]:
import torch
import torch.nn as nn
from torch_color_describer import Decoder
class ColorContextDecoder(Decoder):
def __init__(self, color_dim, *args, **kwargs):
self.color_dim = color_dim
super().__init__(*args, **kwargs)
# Fix the `self.rnn` attribute:
##### YOUR CODE HERE
self.rnn = nn.GRU(
input_size=self.embed_dim + self.color_dim,
hidden_size=self.hidden_dim,
batch_first=True)
def get_embeddings(self, word_seqs, target_colors=None):
"""You can assume that `target_colors` is a tensor of shape
(m, n), where m is the length of the batch (same as
`word_seqs.shape[0]`) and n is the dimensionality of the
color representations the model is using. The goal is
to attached each color vector i to each of the tokens in
the ith sequence of (the embedded version of) `word_seqs`.
"""
##### YOUR CODE HERE
emb = self.embedding(word_seqs)
tar = target_colors[:, None, :]
inter = tar.repeat_interleave(emb.shape[1], dim=1)
result = torch.cat((emb, inter), dim=2)
return result
# __Step 2__: Modify the `EncoderDecoder`. For this, you just need to make a small change to the `forward` method: extract the target colors from `color_seqs` and feed them to the decoder.
# In[277]:
from torch_color_describer import EncoderDecoder
class ColorizedEncoderDecoder(EncoderDecoder):
def forward(self,
color_seqs,
word_seqs,
seq_lengths=None,
hidden=None,
targets=None):
if hidden is None:
hidden = self.encoder(color_seqs)
# Extract the target colors from `color_seqs` and
# feed them to the decoder, which already has a
# `target_colors` keyword.
##### YOUR CODE HERE
output, hidden = self.decoder(
word_seqs, seq_lengths=seq_lengths, hidden=hidden, target_colors=color_seqs[:,-1,:])
return output, hidden, targets
# __Step 3__: Finally, as in the examples in [Modifying the core model](colors_overview.ipynb#Modifying-the-core-model), you need to modify the `build_graph` method of `ContextualColorDescriber` so that it uses your new `ColorContextDecoder` and `ColorizedEncoderDecoder`. Here's starter code:
# In[298]:
from torch_color_describer import Encoder
class ColorizedInputDescriber(ContextualColorDescriber):
def __init__(self, *args, **kwargs):
super(ColorizedInputDescriber, self).__init__(*args, **kwargs)
def build_graph(self):
# We didn't modify the encoder, so this is
# just copied over from the original:
encoder = Encoder(
color_dim=self.color_dim,
hidden_dim=self.hidden_dim)
# Use your `ColorContextDecoder`, making sure
# to pass in all the keyword arguments coming
# from `ColorizedInputDescriber`:
##### YOUR CODE HERE
decoder = ColorContextDecoder(
color_dim=self.color_dim,
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
embedding=self.embedding,
hidden_dim=self.hidden_dim)
# Return a `ColorizedEncoderDecoder` that uses
# your encoder and decoder:
##### YOUR CODE HERE
return ColorizedEncoderDecoder(encoder, decoder)
# That's it! Since these modifications are pretty intricate, you might want to use [a toy dataset](colors_overview.ipynb#Toy-problems-for-development-work) to debug it:
# In[288]:
toy_color_seqs, toy_word_seqs, toy_vocab = create_example_dataset(
group_size=50, vec_dim=2)
# In[289]:
toy_color_seqs_train, toy_color_seqs_test, toy_word_seqs_train, toy_word_seqs_test = train_test_split(toy_color_seqs, toy_word_seqs)
# In[290]:
toy_mod = ColorizedInputDescriber(
toy_vocab,
embed_dim=10,
hidden_dim=10,
max_iter=100,
batch_size=128)
# In[291]:
_ = toy_mod.fit(toy_color_seqs_train, toy_word_seqs_train)
# In[292]:
toy_mod.listener_accuracy(toy_color_seqs_test, toy_word_seqs_test)
# If that worked, then you can now try this model on SCC problems!
# ## Your original system [3 points]
# There are many options for your original system, which consists of the full pipeline – all preprocessing and modeling steps. You are free to use any model you like, as long as you subclass `ContextualColorDescriber` in a way that allows its `listener_accuracy` method to behave in the expected way.
#
# So that we can evaluate models in a uniform way for the bake-off, we ask that you modify the function `my_original_system` below so that it accepts a trained instance of your model and does any preprocessing steps required by your model.
#
# If we seek to reproduce your results, we will rerun this entire notebook. Thus, it is fine if your `my_original_system` makes use of functions you wrote or modified above this cell.
# In[293]:
def my_original_system(trained_model, color_seqs_test, texts_test):
"""Feel free to modify this code to accommodate the needs of
your system. Just keep in mind that it will get raw corpus
examples as inputs for the bake-off.
"""
# `word_seqs_test` is a list of strings, so tokenize each of
# its elements:
tok_seqs = [tokenize_example(s) for s in texts_test]
col_seqs = [represent_color_context(colors)
for colors in color_seqs_test]
# Return the `listener_accuracy` for your model:
return trained_model.listener_accuracy(col_seqs, tok_seqs)
# If `my_original_system` works on test sets you create from the corpus distribution, then it will works for the bake-off, so consider checking that. For example, this would check that `dev_mod` above passes muster:
# In[294]:
my_original_system(dev_mod, dev_rawcols_test, dev_texts_test)
# In the cell below, please provide a brief technical description of your original system, so that the teaching team can gain an understanding of what it does. This will help us to understand your code and analyze all the submissions to identify patterns and strategies.
# In[301]:
# Enter your system description in this cell.
# The system uses the ColorizedInputDescriber to produce a trained model.
# Each word sequence text is tokenized based on the approach described in Monroe et al. 2017,
# where each word of the sequence is lowercased, removed of 'er', 'est', 'ish' suffixes, and all punctuation.
# Each color representation is then processed with a Fourier-transform function applied to produce color sequences.
# The decoder within the ColorizedInputDescriber concatenates the target color sequence to each word embedding dimension.
#
# My peak score was: 0.7512237258854016
if 'IS_GRADESCOPE_ENV' not in os.environ:
color_dev_mod = ColorizedInputDescriber(
dev_vocab,
embed_dim=10,
hidden_dim=10,
max_iter=100,
batch_size=128)
color_dev_mod.fit(dev_cols_train, dev_seqs_train)
result = my_original_system(color_dev_mod, dev_rawcols_test, dev_texts_test)
print(result)
# Please do not remove this comment.
# ## Bakeoff [1 point]
# For the bake-off, we will release a test set. The announcement will go out on the discussion forum. You will evaluate your custom model from the previous question on these new datasets using your `my_original_system` function. Rules:
#
# 1. Only one evaluation is permitted.
# 1. No additional system tuning is permitted once the bake-off has started.
#
# The cells below this one constitute your bake-off entry.
#
# People who enter will receive the additional homework point, and people whose systems achieve the top score will receive an additional 0.5 points. We will test the top-performing systems ourselves, and only systems for which we can reproduce the reported results will win the extra 0.5 points.
#
# Late entries will be accepted, but they cannot earn the extra 0.5 points. Similarly, you cannot win the bake-off unless your homework is submitted on time.
#
# The announcement will include the details on where to submit your entry.
# In[ ]:
# Enter your bake-off assessment code in this cell.
# Please do not remove this comment.
# In[ ]:
# On an otherwise blank line in this cell, please enter
# your listener_accuracy score as reported by the code
# above. Please enter only a number between 0 and 1 inclusive.
# Please do not remove this comment.
|
[
"rjalmo@github.com"
] |
rjalmo@github.com
|
df5f1e52dda77005e950ebab2b576331300d4ee1
|
15204477d3f85ad9177d6eecaf409b507a5fffd3
|
/Lesson_3/HW_3_2.py
|
0e98fbf4c21de2446ba45cdddb2e7a3b8e8fb024
|
[] |
no_license
|
Seila2009/Basic_Python
|
cef77c9a5db3d68edb2980fcd486bfebd407f6c8
|
a8b422fbadc209bb2114be1e1ee813d0da39158a
|
refs/heads/main
| 2023-02-12T20:53:43.677056
| 2021-01-10T16:51:33
| 2021-01-10T16:51:33
| 310,787,485
| 0
| 0
| null | 2021-01-10T16:51:35
| 2020-11-07T07:02:10
|
Python
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
def pers_data(name="", last_name="", year="", city="", email="", tel_num=""):
return f"Привет, меня зовут - {name} {last_name}. Я родилась в {year} в городе {city}. " \
f"Мои контактные данные: email - {email}, телефон - {tel_num}."
print(pers_data(name=input('Введите имя: '),
last_name=input('Введите фамилию: '),
year=input('Введите год рождения: '),
city=input('Введите город рождения: '),
email=input('Введите e-mail: '),
tel_num=input('Введите номер телефона: ')))
|
[
"Seila2009@yandex.ru"
] |
Seila2009@yandex.ru
|
849a2fdf44397c065660f4aade3876157bc151b9
|
9c9131970bcbf9b14a53c8638ade0f624d6bf687
|
/backend/games/common/models/game.py
|
4e056d394697c39b9ffe1f45aa16457c21ace926
|
[] |
no_license
|
PraderioM/GamePlatform
|
969e803f9ad264e24b3d6ebceb5f142a69479333
|
36e206c76c27d70cff14b9819d3b8a9040d21904
|
refs/heads/master
| 2023-01-11T15:48:29.944509
| 2022-12-26T14:32:10
| 2022-12-26T14:32:10
| 251,726,441
| 0
| 0
| null | 2022-12-26T14:33:54
| 2020-03-31T20:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,849
|
py
|
import abc
import asyncpg
import json
from random import shuffle
from typing import Dict, List, Optional
from .game_component import GameComponent
from .play import Play
from .player import Player
class Game(GameComponent):
def __init__(self, current_player_index: int,
play_list: List[Play], player_list: List[Player],
id_: Optional[str],
n_actions: int):
self.current_player_index = current_player_index
self.play_list = play_list[:]
self.player_list = player_list[:]
self.id = id_
self._n_actions = n_actions
def to_database(self) -> Dict:
return {
'current_player_index': self.current_player_index,
'plays': json.dumps([play.to_database() for play in self.play_list]),
'players': json.dumps([player.to_database() for player in self.player_list]),
'id': self.id,
'n_actions': self._n_actions,
}
@abc.abstractmethod
def to_display(self) -> Dict:
raise NotImplementedError('Sub-classes must implement to frontend method.')
@abc.abstractmethod
def to_frontend(self, db: Optional[asyncpg.Connection] = None, *args, **kwargs) -> Dict:
raise NotImplementedError('Sub-classes must implement to frontend method.')
@abc.abstractmethod
def get_player_score(self, player: Player) -> int:
raise NotImplementedError('Sub-classes must implement get player points method.')
def to_game_resolution(self, player: Optional[Player]) -> Dict[str, bool]:
if player is None:
return {'isObserver': True}
scores_dict = self.get_all_player_scores()
sorted_scores = sorted([score for _, score in scores_dict.items()], reverse=True)
player_score = scores_dict[player.name]
if player_score == sorted_scores[0]:
if player_score == sorted_scores[1]:
return {'isTie': True}
else:
return {'isVictorious': True}
else:
return {'isLoser': True}
def get_all_player_scores(self) -> Dict[str, int]:
return {player.name: self.get_player_score(player) for player in self.player_list}
def resolution_points(self, player: Player):
scores_dict = self.get_all_player_scores()
sorted_scores = sorted([score for _, score in scores_dict.items()], reverse=True)
player_score = scores_dict[player.name]
above_players = len([score for score in sorted_scores if score > player_score])
below_players = len([score for score in sorted_scores if score < player_score])
return below_players - above_players
def add_play(self, play: Optional[Play]):
if play is None:
return
if play.player in self.player_list:
if self.player_list.index(play.player) == self.current_player_index:
self.play_list.append(play)
self.update_player_index()
def update_player_index(self):
self.current_player_index = (self.current_player_index + 1) % len(self.player_list)
def update_n_actions(self):
self._n_actions += 1
def get_player_by_name(self, name: str) -> Optional[Player]:
for player in self.player_list:
if player.name == name:
return player
return None
def add_new_player_name(self, name: str):
# Cannot add twice the same player.
for player in self.player_list:
if player.name == name:
return
if self.n_missing > 0:
player_list = self.player_list[:]
# Make player position random.
shuffle(player_list)
for player in player_list:
if player.name is None and not player.is_bot:
player.name = name
break
def is_winner_points(self, resolution_points: int) -> bool:
return resolution_points == len(self.player_list) - 1
@property
def n_bots(self) -> int:
return len([player for player in self.player_list if player.is_bot])
@property
def n_players(self) -> int:
return len([player for player in self.player_list if not player.is_bot])
@property
def n_current(self) -> int:
return len([player for player in self.player_list if player.name is not None and not player.is_bot])
@property
def current_player(self) -> Player:
return self.player_list[self.current_player_index]
@property
def n_missing(self) -> int:
return self.n_players - self.n_current
@property
def n_actions(self):
return self._n_actions
@property
@abc.abstractmethod
def has_ended(self) -> bool:
raise NotImplementedError('Sub-classes must implement `has_ended` property')
|
[
"PraderioM@Gmail.com"
] |
PraderioM@Gmail.com
|
16ce80485888de8ab4bfab31f0e1396dae14d95a
|
ca27d0a1fbb1e5b5df7676a56e0117a19a40ac1c
|
/LargestValues/MaxPriorityQueue.py
|
5d3c8f6ea27371dbb409c8542af9290280027f7b
|
[] |
no_license
|
azh4r/online-sorting-algorithms
|
eb65d3accd8ba7c83670f9553923e0ea02b60cc6
|
94000f42a653d062cfd329075e1539a391936521
|
refs/heads/main
| 2023-03-19T07:38:27.882121
| 2021-03-09T17:23:59
| 2021-03-09T17:23:59
| 324,482,162
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
from queue import PriorityQueue
# by default python PriorityQueue is a Min PriorityQueue, this will reverse the priority to
# make it into a max priority queue
class MaxPriorityQueue(PriorityQueue):
def __init__(self):
PriorityQueue.__init__(self)
self.reverse = -1
def push(self, priority, data):
PriorityQueue.put(self,(self.reverse*priority, data))
def pop(self, *args, **kwargs):
priority, data = PriorityQueue.get(self, *args, **kwargs)
return self.reverse * priority, data
|
[
"azhar.basit@gmail.com"
] |
azhar.basit@gmail.com
|
38427ed51e281e7e2e2de65afd012da08f83da0f
|
16c94af48a05294b99ab6496199c28d64973b4af
|
/romeo_sorter.py
|
11fb5b27afa2dbe910b5d7c6e9a2beadcca94507
|
[] |
no_license
|
anajaved/coursera_intro_to_programming
|
317c6ea4707c62871e562f9bef534f3ff9c26fb6
|
f34463040198c5ad0d055e6407cad470234a5038
|
refs/heads/master
| 2020-04-05T18:32:09.023987
| 2016-11-01T18:01:51
| 2016-11-01T18:01:51
| 66,982,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
fname = raw_input("Enter file name: ")
fh = open(fname)
lst = list()
#remove white space, split, and add to list
for line in fh:
short=line.rstrip()
clean=short.split()
lst.append(clean)
list=lst[1]+lst[0]+lst[2]+lst[3]
for each in list:
while list.count(each)>1:
list.remove(each)
list.sort()
print list
|
[
"noreply@github.com"
] |
anajaved.noreply@github.com
|
d805bbc6775ceadfc642833a71503d7bb0c33f4e
|
e423810d41f73681e8b5ea8926762277c8fe2d78
|
/Session01_IntroductionToPython/src/m6_your_turtles.py
|
39aee8aee5816a68d01b466f1324579711bcc553
|
[] |
no_license
|
wykkevin/CSSE120
|
5d74745d4475a532e7bee808c58198076c71d301
|
7dfeff647a130c7132af3e5ffeda67d97ee9b8a8
|
refs/heads/master
| 2021-09-15T09:35:24.642099
| 2018-05-30T01:24:12
| 2018-05-30T01:24:12
| 135,370,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,386
|
py
|
"""
Demonstrates using OBJECTS via Turtle Graphics.
It is the same m3_turtles.py except that it organizes
the code into FUNCTIONS and then defines ADDITIONAL functions.
Concepts include:
-- CONSTRUCT an INSTANCE of a CLASS (we call such instances OBJECTS).
-- Make an object ** DO ** something by using a METHOD.
-- Reference an object's ** DATA ** by using an INSTANCE VARIABLE.
Also:
-- ASSIGNING a VALUE to a NAME (VARIABLE).
-- ORGANIZING code into FUNCTIONS.
-- MAIN as the place where execution starts.
-- CALLING functions.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
and their colleagues. March 2016.
Yuankai Wang made modifications to this module.
"""
########################################################################
#
# DONE: 1.
# On Line 19 above, replace PUT_YOUR_OWN_NAME_HERE with your OWN name.
#
########################################################################
########################################################################
#
# DONE: 2.
#
# You should have RUN the PREVIOUS module and SKIMMED its code.
# (Do so now if you have not already done so.)
#
# THIS module is the SAME as the PREVIOUS module, except for its
# comments and the fact that the calls in main are "commented out".
#
# 1. Look at the definition of the main function below,
# at Lines 81 to 85. Note that the calls that it makes
# to four of the five example functions are "commented out" --
# they have a hash mark in front of them, making them comments
# (and hence they will not run).
#
# 2. RUN this program. It will run the function call in main
# that is NOT commented-out, that is, it will call the
# draw_squares_in_squares
# function.
#
# 3. Modify the definition of the draw_squares_in_squares function
# (it starts at Line 188) in ANY way you like.
# ** ANYTHING ** is OK.
#
# CHALLENGE: Can you make an even cooler picture that the one
# that you start with?
#
# If you get errors, NO WORRIES -- ask for help if you are in class,
# or just leave them as errors if you are doing this after class.
#
# 4. Repeat step 3 a few times, with any functions that you like.
# Don't forget to "un-comment" the function call in main to make
# them run.
#
# ****************************************************************
# ** You are NOT expected to understand much of ANYTHING
# ** in this module yet. Just enjoy playing with it
# ** as a preview of forthcoming sessions.
# ****************************************************************
#
# 5. After you have made a few changes (or more, if you wish),
# COMMIT your work (which turns it in) by selecting this file
# and doing SVN ~ Commit. If your code is broken, it
# will ask you if you really want to Commit -- respond Yes.
#
########################################################################
import rosegraphics as rg
def main():
""" Calls the other functions in this module to demo them. """
# example_from_m3()
# draw_you_guess_it()
# draw_pink_square()
draw_squares_in_squares()
# cool_turtle()
def example_from_m3():
"""
Constructs several SimpleTurtles and demonstrates their use.
This code in this example is EXACTLY the same as that from the
m3_turtles.py
module that you saw previously except that the code is now
INSIDE this FUNCTION (also this version runs the turtles faster).
"""
# ------------------------------------------------------------------
# Next two lines after this comment set up a TurtleWindow object
# for animation. The definition of a TurtleWindow is in the
# rg (shorthand for rosegraphics) module.
# ------------------------------------------------------------------
window = rg.TurtleWindow()
window.delay(1) # Bigger numbers mean slower animation.
# ------------------------------------------------------------------
# Next two lines make (construct) two SimpleTurtle objects.
# ------------------------------------------------------------------
nadia = rg.SimpleTurtle()
akil = rg.SimpleTurtle('turtle')
# ------------------------------------------------------------------
# Next lines ask the SimpleTurtle objects to do things:
# ------------------------------------------------------------------
nadia.forward(100)
nadia.left(90)
nadia.forward(200)
akil.right(45)
akil.backward(50)
akil.right(60)
nadia.forward(50)
nadia.left(135)
# ------------------------------------------------------------------
# Next lines set the pen and speed characteristics of the
# SimpleTurtle objects. The pen characteristic is itself
# an object that is constructed, of type Pen.
# ------------------------------------------------------------------
nadia.pen = rg.Pen('blue', 10) # The 10 is the Pen's thickness
nadia.speed = 10 # 1 is slowest, big is faster, maxes out about 100
akil.pen = rg.Pen('red', 30)
akil.speed = 1
akil.backward(100)
nadia.forward(100)
nadia.left(60)
nadia.forward(500)
nadia.speed = 1 # was 10, so much slower now
nadia.right(120)
nadia.forward(200)
window.close_on_mouse_click()
def draw_you_guess_it():
"""
Constructs a window and a medium-speed, blue Turtle
that draws a certain letter of the alphabet.
"""
window = rg.TurtleWindow()
tx = rg.SimpleTurtle('turtle')
tx.pen = rg.Pen('blue', 20)
tx.speed = 5 # Medium
tx.left(60)
tx.forward(200)
tx.pen_up()
tx.left(120)
tx.forward(100)
tx.left(120)
tx.pen_down()
tx.forward(200)
window.close_on_mouse_click()
def draw_pink_square():
"""
Constructs a window and a slow, pink SimpleTurtle
that draws a square.
"""
window = rg.TurtleWindow()
pink_turtle = rg.SimpleTurtle('turtle')
pink_turtle.pen = rg.Pen('DeepPink', 5)
pink_turtle.speed = 1 # Slowest
pink_turtle.draw_square(80)
window.close_on_mouse_click()
def draw_squares_in_squares():
"""
Constructs a window and a SimpleTurtle
that draws squares within squares.
"""
window = rg.TurtleWindow()
square_turtle = rg.SimpleTurtle('turtle')
square_turtle.pen = rg.Pen('yellow', 6)
square_turtle.speed = 1000 # Fast
size = 200
delta = 20
# Do the indented code 13 times. Each time draws a square.
for _ in range(20):
square_turtle.draw_square(size)
# Move "inside" the previous square a bit.
square_turtle.pen_up()
point_inside = rg.Point(square_turtle.x_cor() + (delta // 2),
square_turtle.y_cor() - (delta // 2))
square_turtle.go_to(point_inside)
square_turtle.pen_down()
# Next square will be a bit smaller.
size = size - 20
square_turtle = rg.SimpleTurtle('turtle')
square_turtle.pen = rg.Pen('red', 10)
square_turtle.speed = 40 # Fast
square_turtle.pen_up()
square_turtle.backward(40)
square_turtle.pen_down()
for _ in range(20):
square_turtle.draw_square(size)
# Move "inside" the previous square a bit.
square_turtle.pen_up()
point_inside = rg.Point(square_turtle.x_cor() - delta,
square_turtle.y_cor() - delta)
square_turtle.go_to(point_inside)
square_turtle.pen_down()
# Next square will be a bit smaller.
size = size - 1
delta = delta - 1
window.close_on_mouse_click()
def cool_turtle():
"""
Constructs a window and a SimpleTurtle and makes her
draw a pretty shape on the window.
Uses the variables (see below):
size angle iterations
to control the nature of the shape that it draws.
Both of these settings make pretty pictures, as do other settings:
size = 100 angle = 1 iterations = 360
size = 150 angle = 20 iterations = 90
"""
# Make the TurtleWindow.
window = rg.TurtleWindow()
# Make the SimpleTurtle.
cool_turtle = rg.SimpleTurtle('turtle')
cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too
cool_turtle.speed = 1 # Slow
# Move the SimpleTurtle to her starting position.
start_at = rg.Point(100, -50)
cool_turtle.pen_up()
cool_turtle.go_to(start_at)
cool_turtle.pen_down()
# Set up some parameters that control the nature of the shape drawn.
size = 100 # Try 150 too
angle = 1 # Try 20 too
iterations = 360 # Try 90 too
# Store the animation speed (to reset it later).
tracer_n, tracer_d = window.tracer(), window.delay()
# Make the animation go much faster.
# First number: bigger means faster.
# Second number: bigger means slower.
window.tracer(5, 5)
for _ in range(iterations):
cool_turtle.right(angle)
cool_turtle.draw_square(size)
# Reset the animation to its original speed.
window.tracer(tracer_n, tracer_d)
window.close_on_mouse_click()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
[
"wykwykwykwykkevin@gmail.com"
] |
wykwykwykwykkevin@gmail.com
|
a4fba525c51a91a7c8a9596094346db99f968dd5
|
33c9916a0997ef39255240163db5ea2bc67bd2c9
|
/sellibrary/dexter/golden_spotter_exercise.py
|
d64c31ff10eef8c94a12b013d58ad6e8387139a7
|
[] |
no_license
|
dwanev/SEL
|
5f1252b564ef5c282ccac01d6b1191296b424af9
|
1d9d68d463c7d916367c9a5eb724e06eb31eafdb
|
refs/heads/master
| 2021-09-21T02:10:19.201202
| 2018-08-19T12:01:34
| 2018-08-19T12:01:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,070
|
py
|
import json
import logging
import sys
import time
from general.golden_spotter import GoldenSpotter
from sel.dexter_dataset import DatasetDexter
class DexterThroughGolden:
# set up logging
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'))
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.propagate = False
logger.setLevel(logging.INFO)
def __init__(self):
pass
@staticmethod
def extract_body(data):
body = ''
for d in data['document']:
if d['name'].startswith('body_par_'):
body = body + d['value']
return body
def main(self, from_, to_, measurement):
# load the data
dd = DatasetDexter()
document_list = dd.get_dexter_dataset()
# process the data
count = 0
spotter = GoldenSpotter(document_list)
for document in document_list:
data = json.loads(document)
docid = data['docId']
if (count in range(from_, (to_ + 1)) and measurement == 'LINE') or \
(docid in range(from_, (to_ + 1)) and measurement == 'DOCID'):
self.logger.info('_______________________________________')
self.logger.info('Starting processing of docid = %d line=%d ', docid, count)
start_time = time.time()
body = self.extract_body(data)
title = data['title']
title_entity_candidate = spotter.get_entity_candidates(title, docid)
body_entity_candidate = spotter.get_entity_candidates(body, docid)
diff = time.time() - start_time
self.logger.info('Time taken for docid=%d, time=%f', docid, diff)
count += 1
if __name__ == "__main__":
df = DexterThroughGolden()
if sys.argv[1].upper() == 'DOCID':
from_ = int(sys.argv[2])
to_ = int(sys.argv[3])
df.main(from_, to_, sys.argv[1].upper())
|
[
"dwanev@gmail.com"
] |
dwanev@gmail.com
|
900dac4c07936cc4d452d57b86ca471e5d448a88
|
8311a0bcf3f2126d622f928483ce2ea9d6a7cb0d
|
/Code/Daniel/django/lab01-polls/polls/migrations/0001_initial.py
|
9620f4191550d063054a416f5fcc5fe1e0b4d0be
|
[] |
no_license
|
guam68/class_iguana
|
857247dca0ff732d11f7fb0d3dc761ec83846c94
|
e4359d32dfe60423a643c21df5636669016ad2c0
|
refs/heads/master
| 2020-05-01T06:33:22.611127
| 2019-03-13T23:07:41
| 2019-03-13T23:07:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
# Generated by Django 2.1.5 on 2019-02-05 19:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"guam_68@hotmail.com"
] |
guam_68@hotmail.com
|
69c23fe32660671649ed6c602c18af63ca869641
|
2541462164bb147212aa3ff813889ab608a82323
|
/snt/blog/urls.py
|
8f41432e4b1a86ec7ca96872834532ea76809973
|
[] |
no_license
|
vaidehi29/miniproject4
|
3f564b79aedc5bce1530802de94833f93637d275
|
ab1ad2c0a4ad509732663d40eeacd8b726481211
|
refs/heads/main
| 2023-05-02T11:46:42.699329
| 2021-05-16T18:02:11
| 2021-05-16T18:02:11
| 367,949,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="ShopHome")
]
|
[
"vaidehisonavane2436@gmail.com"
] |
vaidehisonavane2436@gmail.com
|
54e633d69fc0ea052139c4ecce2b71ac5fbbb8d5
|
af4246ff49076ee85ceaac1de47d69a38f95a93a
|
/training/src/hydrophobicity.py
|
035122987cd28228a885a874302e2476f59e216e
|
[] |
no_license
|
Jong-hun-Park/deeplearning-proteomics
|
6d3a559d3a26b75da8ba26d2ed55c282c74d6bce
|
00ddf2df67b23a07d886cdf27fd638debc7af94d
|
refs/heads/master
| 2020-06-17T19:27:14.763863
| 2017-01-13T12:28:44
| 2017-01-13T12:28:44
| 74,976,904
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
'''
Created on 2016. 11. 30.
@author: JONGHUN
'''
import hydrophobicity_table
def get_sum_hydf(peptide, ion_type, ion_index):
sum_hydf = 0
if ion_type == 'b':
for i in range(ion_index):
sum_hydf += hydrophobicity_table.get_aa_hydph(peptide[i])
else:
for i in range(ion_index, len(peptide)):
sum_hydf += hydrophobicity_table.get_aa_hydph(peptide[i])
return sum_hydf
def get_hydrphobicity_features(peptide, ion_type, ion_index):
sum_hydf = get_sum_hydf(peptide, ion_type, ion_index)
|
[
"zorba.heron@gmail.com"
] |
zorba.heron@gmail.com
|
82b43643d7116b7a35974c68b9b6c72e9996791a
|
92c586573ff7db754aa390ca6c028aec0c39aa0d
|
/crm/library/helpers/permissions.py
|
698aa59b4bb73e37c273168d14d63a6ea0e14229
|
[] |
no_license
|
vovapasko/crm
|
b7784ac7f1984a26a955b178ebefea0c03c9ad6a
|
bab909324aa2e4c1c8fff72093d3fcf44aaf4963
|
refs/heads/master
| 2023-02-10T10:39:40.789246
| 2020-11-20T18:30:01
| 2020-11-20T18:30:01
| 326,683,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
from typing import Union, Tuple, Any, List
from ..constants.permissions import GROUPS_FOR_CASCADE
from django.contrib.auth.models import Group
from ...models import User
from ..constants import SUPERUSER, MANAGER, ADMIN, GUEST
def groups_cascade_down(group: Group) -> List[str]:
"""
for given group returns list of groups names which is cascade down from given group
:param group: group name or Group from django models
:return: list of groups name
"""
def __cascade(
group_type: Union[type, Tuple[Union[type, Tuple[Any, ...]], ...]],
value: Union[str, Group]
) -> list:
if isinstance(group, group_type):
return GROUPS_FOR_CASCADE[GROUPS_FOR_CASCADE.index(value):]
return __cascade(str, group) or __cascade(Group, group.name) or list()
def is_user_allowed_cascade_down(user: User, group_name: str) -> bool:
"""
check if user allowed to make any changes on other users with group name group_name
if allowed only cascade down permission
:param user: user from User model
:param group_name: name of group to check
:return: True if user can do changes cascade down on users with group_name
False otherwise
"""
return group_name in groups_cascade_down(user.groups.first())
def get_name_from_permission(permission: str) -> str:
"""
codename from app_name.codename of permission
:param permission: permission from view's permission_required
:return: permission codename
"""
app_name, permission_name = permission.split('.')
return permission_name
|
[
"vovapasko1699@gmail.com"
] |
vovapasko1699@gmail.com
|
198a082b16a44e06a1f5342860a421931875bee7
|
d24f81b52917a7b0629fe615149ef4ac8a0bd049
|
/backend/feed/api/service.py
|
f71344b937b3519f87921c8cdbe194d1f6648e61
|
[] |
no_license
|
ScrollPage/Test-Chat
|
f533c8d1112a4bc639d9659a126b9a9f886f68b2
|
3911b7555ca684b3eb31e9857d007fda3b6c7cd3
|
refs/heads/master
| 2023-01-03T13:37:44.600044
| 2020-10-30T08:43:27
| 2020-10-30T08:43:27
| 288,795,592
| 0
| 0
| null | 2020-08-24T13:35:51
| 2020-08-19T17:31:59
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,369
|
py
|
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from rest_framework import mixins, serializers
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.db.models import Count, Q, Min, Subquery, OuterRef
from backend.service import (
PermissionMixin,
LowContactSerializer,
SerializerMixin,
PermissionMixin,
PermissionSerializerMixin
)
from .exceptions import BadRequestError
from feed.models import Post
from contact.models import Contact
class UsersPostsListMixin(mixins.ListModelMixin):
'''Посты только текущего пользователя'''
def list(self, request, *args, **kwargs):
id = request.query_params.get('id', None)
queryset = self.get_queryset().filter(owner__user__id=id)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class PermisisonSerializerPostModelViewset(PermissionSerializerMixin,
ModelViewSet,
UsersPostsListMixin):
'''
Измененны методы определения прав доступа, класса сериализатора
А также измененный метод лист
'''
pass
class CreateViewset(mixins.CreateModelMixin,
GenericViewSet):
'''Создание с доп классами'''
pass
class BaseFeedSerializer(serializers.Serializer):
'''Базовый класс для сериализаторов'''
user = LowContactSerializer(read_only=True)
def post_annotations(user, queryset):
return queryset.annotate(
num_likes=Count('likes', distinct=True)
).annotate(
num_reposts=Count('reposts', distinct=True)
).annotate(
is_liked=Count('likes', filter=Q(likes__user=user))
).annotate(
is_watched=Count('reviews', filter=Q(reviews__user=user))
).annotate(
num_reviews=Count('reviews', distinct=True)
).annotate(
num_comments=Count('comments', distinct=True)
)
|
[
"54814200+reqww@users.noreply.github.com"
] |
54814200+reqww@users.noreply.github.com
|
ac17b65b72c91dfb20148b42a77c6ac661042fe7
|
cf8e2c80bf3f5d13d1cb4f567a81c8fba1e5fba9
|
/load_images.py
|
6bbd5650581091fcd233676fd209022bfe834717
|
[] |
no_license
|
poleha/travnik
|
52b34b7911794a65cb1e74762e5333073ed4e2d9
|
8a449644c5201b1c5b9dd5cf137d0fc8c616dd62
|
refs/heads/master
| 2022-12-04T03:55:13.972057
| 2017-08-05T15:21:58
| 2017-08-05T15:21:58
| 48,569,404
| 0
| 0
| null | 2022-11-22T00:59:02
| 2015-12-25T06:28:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 997
|
py
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travnik.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
from main import models
from django.conf import settings
from django.core.files.storage import FileSystemStorage
save_path = os.path.join(settings.MEDIA_ROOT, 'plant')
storage = FileSystemStorage(save_path)
plants = models.Plant.objects.all()
count = 0
for plant in plants:
try:
f = open('load_images/' + str(plant.code) + '.jpg', 'rb')
except:
f = None
if f:
file_name = plant.alias + '.jpg'
try:
existing_f = storage.open(file_name)
print(plant.pk)
except:
existing_f = None
if not existing_f:
storage.save(file_name, f)
plant.image = 'plant/' + file_name
plant.save()
print(plant)
count += 1
print(count)
|
[
"pass1191"
] |
pass1191
|
fbaec0902ace41c2165e09ca0cbdb3ef76f31d08
|
2ad58ae25da89057b421321c606252c716c39b5d
|
/Finding_a_Spliced_Motif/Finding_a_Spliced_Motif.py
|
07805ba7799074d2ec0ec9ffa934cb03f7fa93e3
|
[] |
no_license
|
phondanai/rosalind
|
04f0748d581037be906ebb87246d09649e338c48
|
e60e5e512196228abf6c79d7380ac0edd993bfc0
|
refs/heads/master
| 2020-03-21T07:22:58.532855
| 2018-06-29T10:10:28
| 2018-06-29T10:10:28
| 138,277,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
from typing import List
def extract_fasta(fasta_file: str) -> List:
results = []
with open(fasta_file, 'r') as f:
tmp_str = ''
start = True
for line in f:
if not line.startswith('>'):
tmp_str += line.rstrip()
elif start:
start = False
continue
else:
results.append(tmp_str)
tmp_str = ''
else:
results.append(tmp_str)
return results
dna_string, sequence_string = extract_fasta('rosalind_sseq.txt')
sequence_list = []
for c in sequence_string:
sequence_list.append([ idx+1 for idx, i in enumerate(dna_string) if i == c ])
result = [sequence_list[0][0]]
for i in range(1, len(sequence_list)):
tmp = [ i for i in sequence_list[i] if i > result[-1]][0]
result.append(tmp)
print(' '.join(map(str,result)))
|
[
"phondanai@gmail.com"
] |
phondanai@gmail.com
|
588969f1d268d3f3f6167822cfd89430accf0539
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2139/60581/296948.py
|
1bb9649d9c7efd690ee0c582f77669d2d86f648b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
#include <iostream>
#include <cstdio>
#include <cstring>
#include <algorithm>
#include <queue>
#include <vector>
#include <map>
using namespace std;
#define reg register
inline int read() {
int res = 0;char ch=getchar();bool fu=0;
while(!isdigit(ch))fu|=(ch=='-'),ch=getchar();
while(isdigit(ch)) res=(res<<3)+(res<<1)+(ch^48),ch=getchar();
return fu?-res:res;
}
#define N 50005
int n, m;
struct edge {
int nxt, to, from, id;
}ed[N<<1];
int head[N], cnt;
inline void add(int x, int y, int id) {
ed[++cnt] = (edge){head[x], y, x, id};
head[x] = cnt;
}
struct date {
int x, y, val;
inline bool operator < (const date &a) const {
return val > a.val;
}
}da[N];
int ans[N];
int Fa[N], top[N], id[N], rnk[N], dep[N], siz[N], son[N], tot;
void dfs1(int x, int fa)
{
Fa[x] = fa, dep[x] = dep[fa] + 1, siz[x] = 1;
for (reg int i = head[x] ; i ; i = ed[i].nxt)
{
int to = ed[i].to;
if (to == fa) continue;
dfs1(to, x);
siz[x] += siz[to];
if (siz[to] > siz[son[x]]) son[x] = to;
}
}
void dfs2(int x, int tep)
{
id[x] = ++tot, rnk[tot] = x;
top[x] = tep;
if (son[x]) dfs2(son[x], tep);
for (reg int i = head[x] ; i ; i = ed[i].nxt)
{
int to = ed[i].to;
if (to == Fa[x] or to == son[x]) continue;
dfs2(to, to);
}
}
int tr[N << 2], lzy[N << 2];
#define ls o << 1
#define rs o << 1 | 1
inline void pushup(int o) {
tr[o] = tr[ls] + tr[rs];
}
inline void spread(int o, int l, int r)
{
if (!lzy[o]) return;
lzy[ls] = lzy[rs] = lzy[o];
int mid = (l + r) >> 1;
tr[ls] = lzy[o] * (mid - l + 1);
tr[rs] = lzy[o] * (r - mid);
lzy[o] = 0;
}
void change(int l, int r, int o, int ql, int qr, int c)
{
if (l >= ql and r <= qr) {
tr[o] = (r - l + 1) * c;
lzy[o] = c;
return ;
}
spread(o, l, r);
int mid = (l + r) >> 1;
if (ql <= mid) change(l, mid, ls, ql, qr, c);
if (qr > mid) change(mid + 1, r, rs, ql, qr, c);
pushup(o);
}
void changes(int x, int y, int c)
{
while(top[x] != top[y])
{
if (dep[top[x]] < dep[top[y]]) swap(x, y);
change(1, n, 1, id[top[x]], id[x], c);
x = Fa[top[x]];
}
if (id[x] > id[y]) swap(x, y);
change(1, n, 1, id[x] + 1, id[y], c);
}
int query(int l, int r, int o, int p)
{
// printf("%d %d %d\n", l, r, tr[o]);
if (l == r) return tr[o];
spread(o, l, r);
int mid = (l + r) >> 1;
if (p <= mid) return query(l, mid, ls, p);
else return query(mid + 1, r, rs, p);
}
int main()
{
n = read(), m = read();
for (reg int i = 1 ; i < n ; i ++)
{
int x = read(), y = read();
add(x, y, i), add(y, x, i);
}
for (reg int i = 1 ; i <= m ; i ++) da[i] = (date){read(), read(), read()};
sort(da + 1, da + 1 + m);
dfs1(1, 0);
dfs2(1, 1);
// for (int i=1;i<=n;i++) printf("siz[%d]=%d\n", i, siz[i]);
for (reg int i = 1 ; i <= m ; i ++)
changes(da[i].x, da[i].y, da[i].val);
for (reg int i = 1 ; i <= cnt ; i += 2)
{
int x = ed[i].from, y = ed[i].to;
if (dep[x] < dep[y]) swap(x, y);
ans[ed[i].id] = query(1, n, 1, id[x]);
// printf("%d\n",x);
}
for (reg int i = 1 ; i < n ; i ++) printf("%d\n", ans[i] > 0 ? ans[i] : -1);
return 0;
}
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
13766e68563f5b4ef2362c9b836a089ffaa0f61d
|
71b19989fc37003954a9e3dda748370a6f98aff4
|
/database_construction/insert_transcripts.py
|
d7d1cffd54053ecd31f936b3cb78e80469835bfa
|
[] |
no_license
|
philloidin/CRISPR_MultiTargeter
|
241232408e411442de1a2e59e8dad10a0cb72623
|
96115212d3f20e0e231d2739fc5ad7c95c45e300
|
refs/heads/master
| 2021-05-28T01:35:54.061336
| 2015-01-08T18:28:51
| 2015-01-08T18:28:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
# The purpose of this script is to insert the data into the tables
# of genomes database
import os
import sqlite3 as db
# obtain all the files from a directory matching a certain naming scheme
# assign these filenames to the corresponding variables
for file in os.listdir("."):
################################
# Insertion of TRANSCRIPT data
################################
if file.endswith("transcript_table.txt"):
# open the file containing exons
tr_fh = open(file, 'r')
# also, establish a database connection
conn = db.connect('zebrafish.db')
cur = conn.cursor()
line_count = 0
insert_count = 0
# go iteratively through each line of the file
for line in tr_fh:
# check if you reached the end of the file
if line != "":
line_count = line_count + 1
# read the data items for database insertion
(transcriptID, geneid, sequence) = line.split('\t')
sequence.rstrip()
# execute the insertion statement
cur.execute('''INSERT INTO Transcripts (transcriptID, geneid, sequence) VALUES(?,?,?)''', (transcriptID, geneid, sequence))
insert_count = insert_count + cur.rowcount
conn.commit()
conn.close()
tr_fh.close()
print(file)
print("The number of lines gone through is %d" %line_count)
print("The number of rows inserted is %d" %insert_count)
|
[
"s.prykhozhij@gmail.com"
] |
s.prykhozhij@gmail.com
|
0b62293ce1d11f795ccecf9fc2fc4b648b6ecc11
|
bf985435618bbf978e89c990c4b1464ec32fe044
|
/mastermix.py
|
b6fa2dbfba7ebc9a8c64b06cd702b59894722b5d
|
[] |
no_license
|
derrik-gratz/PCR-mastermix-calculator
|
5a293701efccd7e148e666ddaff0a33a7a37909d
|
719ae7756400b01797a0725109f5c1a14a35519a
|
refs/heads/master
| 2023-01-10T22:44:46.666103
| 2020-10-28T00:13:17
| 2020-10-28T00:13:17
| 191,266,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,340
|
py
|
#!/usr/bin/env python
import csv
import sys
import os
from openpyxl import load_workbook, Workbook
from shutil import move
from numpy import arange
import datetime
from time import sleep
assaylist = []
samples = {}
def main():
print('Make sure the target directory will not change while the program is running (e.g. no other programs will add files while this runs)')
original_directory = os.getcwd()
path = getpath()
path1 = getplatemap(path)
platemap1 = platemapsheet1(path1)
getassays(platemap1)
samplecount = checksamples(samples)
mmtemplate = 'mastermix_template.xlsx'
reagents = open_reagent_list(original_directory)
MMoutput(mmtemplate, path, reagents, samplecount)
def getpath():
yesno = "x"
while yesno.lower() not in "yesno":
print("Attempt automatic folder detection?")
yesno = input(":")
if yesno.lower() not in "yesno":
print("What?")
if yesno.lower() in "yes":
# attempts to navigate to most recent directory
today = datetime.date.today()
monday_date = today - datetime.timedelta(days=today.weekday())
week_of = "Week of {}-{}-{}".format(monday_date.strftime("%m"), monday_date.strftime("%d"), monday_date.strftime("%y"))
if today.weekday() != 0 and today.weekday() != 2:
previous_date = today - datetime.timedelta(days=1)
else:
previous_date = today
previous_folder = "{}-{}-{}".format(previous_date.strftime("%m"), previous_date.strftime("%d"), previous_date.strftime("%y"))
try:
os.chdir('../../Current Year/{}/{}'.format(week_of, previous_folder))
path = os.getcwd()
except FileNotFoundError:
print("Automatic gel map detection failed.")
path = manual_directory()
else:
path = manual_directory()
return path
def manual_directory():
# if automated detection fails or isn't wanted
while True:
path = input("Enter the path of your platemap file: ")
if not os.path.isdir(path):
print("Not a valid directory")
else:
break
return path
def getplatemap(path):
filelist= []
for file in os.listdir(path):
filelist.append(file)
for file in filelist:
if 'gel_' in file:
# platemap should include this str, other files shouldn't
print('Is this the platemap?')
print(file)
yesno = input(':')
if yesno.lower() in "yes":
path1 = path + '\\' + file
return path1
elif yesno.lower() in 'no':
# if for some reason another file has the same substr and is detected by mistake
while True:
print('Here is a list of the files in that directory.')
print(filelist)
try:
filenum = int(input('Which would you like to use? (enter a number corresponding to the order of the files):'))
platemap = filelist[filenum - 1]
print("You selected '{}'. Are you sure?".format(platemap))
yesno = input(':')
if yesno.lower() in "yes":
break
elif yesno.lower() not in 'no':
print("this is a yes or no question")
except TypeError:
print("Use a number dummy")
except ValueError:
print("Use a number dummy")
except IndexError:
print("Count much? Use a number that refers to one of the files")
path1 = path + "\\" + platemap
return path1
else:
print('You broke it. Start over and answer yes-no questions with yes-no answers')
sleep(3)
quit()
def platemapsheet1(path1):
tries = 0
while tries < 2:
try:
wb = load_workbook(filename=path1, data_only=True)
ws = wb.worksheets[0]
return ws
except PermissionError:
print('Close out of the platemap. The program will try again in 5 seconds.')
sleep(5)
tries += 1
if tries == 2:
print('Alright, clearly nothing is changing. Make sure the platemap is closed and rerun the program')
sleep(5)
quit()
return ws
def getassays(ws):
# I distinguish bottom rows from top rows because they have different rules
bottom_rows = list(arange(15,431,16))
# all the rows that will have assay name information
assay_rows = sorted(list(arange(2,418,16)) + bottom_rows)
rowcounter = 0
columntitles = []
bottomrowcounter = 0
# initialized here to avoid recalculation each loop
botrowlen = len(bottom_rows)
for row in ws.values:
# Resets a column value for each new row. The column value allows me to iterate trough each member of the list I made from the row
# I start at 4 because I know the values I care about will start in column 4
columncounter = 3
rowcounter += 1
if rowcounter in assay_rows:
columntitles.clear()
while True:
# excel cell format
cell = '{}{}'.format(chr(64 + columncounter), rowcounter)
# Will add the value in the current cell to the assay list if it is not already present in the list
if ws[cell].value not in assaylist and ws[cell].value != None:
assaylist.append(ws[cell].value)
if ws[cell].value not in samples:
samples[(ws[cell].value)] = []
columntitles.append(ws[cell].value)
columncounter += 1
if columncounter == 15:
break
if (rowcounter - 7) % 16 == 0:
for a in range(12):
# each column
temprowcounter = rowcounter
for b in range(8):
# each row
cell = '{}{}'.format(chr(64 + columncounter), temprowcounter)
coltitle = columntitles[columncounter - 3]
if coltitle != None:
if ws[cell].value != ' ' and ws[cell].value != None:
samples[coltitle].append(ws[cell].value)
if ws[cell].value == 'RNTC_NTC_A_1_1':
# signals the end of an assay
cell1 = ('{}{}'.format(chr(64 + columncounter), (bottom_rows[bottomrowcounter])))
str1 = ws[cell1].value
# checks if there are more samples after the end of this assay, suggesting another assay is tucked beneath
if str1 != coltitle and str1 != None:
columntitles[columncounter - 3] = str1
if str1 not in samples:
samples[str1] = []
temprowcounter += 1
columncounter += 1
if rowcounter > bottom_rows[bottomrowcounter]:
bottomrowcounter += 1
if rowcounter > bottom_rows[botrowlen - 1]:
break
def checksamples(samples):
badassays = []
samplecount = {}
for assays in samples:
# removing 'assays' that have no samples. This might happen if a note is included in the cells where assays might be
if len(samples[assays]) == 0:
badassays.append(assays)
if len(badassays) > 0:
print("The following strings were in places where assays should be, but they're probably not assays.")
print(badassays)
print('Should they be removed?')
response = 'a'
while response.lower() not in 'yesno':
response = input(':')
if response.lower() in 'yes':
for assays in badassays:
assaylist.remove(assays)
for assays in assaylist:
# adding overage to mastermix calculations
samplecount[assays] = 0
# individual overages
n = len(samples[assays])
if n < 5:
samplecount[assays] = n + 0.5
elif n / 10 < 4:
samplecount[assays] = round(n * 1.1)
else:
samplecount[assays] = n + 4
return samplecount
def open_reagent_list(original_directory):
# references and external dictionary for assay reagent pairings
os.chdir(original_directory)
reagents = {}
with open('assaydictionary.csv', 'r') as a:
reagent_list = csv.reader(a)
for rows in reagent_list:
if rows[0] in assaylist:
reagents[rows[0]] = rows[1]
return reagents
def MMoutput(temp, path, reagents, samplecount):
wb = load_workbook(temp)
ws = wb.active
rowcounter = 1
# this is used to replace outdated information output by our LIMS if we have new primers that aren't present in LIMS
noadaptorsprimers = [
'tRNA_Tyr_AA_1'
]
current_assay = 0
total_zymo_mastermix = 0
total_qiagen_mastermix = 0
for row in ws.iter_rows():
columncounter = 1
if (rowcounter-4)%6 == 0 and rowcounter != 4:
for x in range(3):
# outputting the assay
ws.cell(row=rowcounter, column=columncounter).value = assaylist[current_assay]
# outputting the right reagent
ws.cell(row=(rowcounter+1), column=columncounter).value = reagents.get(assaylist[current_assay], "Not Found")
# note for cases where we have new primers not present in LIMS
if assaylist[current_assay] in noadaptorsprimers:
ws.cell(row=rowcounter, column=columncounter+1).value = 'Use primers without adaptors'
if assaylist[current_assay] in samples:
# sample count
ws.cell(row=rowcounter, column=(columncounter + 2)).value = samplecount[assaylist[current_assay]]
else:
# one last saftey net for an assay that wasn't removed in check samples for some reason
print("Error: {} slipped through the cracks. Is there something weird about this assay on the platemap?".format(assaylist[current_assay]))
# reagent volumes, dependent on reagent used
if reagents.get(assaylist[current_assay]) == 'ZymoTaq':
total_zymo_mastermix += samplecount[assaylist[current_assay]]
elif reagents.get(assaylist[current_assay]) == 'Qiagen':
total_qiagen_mastermix += samplecount[assaylist[current_assay]]
# a special case
if assaylist[current_assay] == 'ATP7B_112GA_RD_2':
ws.cell(row=(rowcounter + 2), column=(columncounter + 3)).value = 'See'
ws.cell(row=(rowcounter + 3), column=(columncounter + 1)).value = 'above'
ws.cell(row=3, column=5).value = samplecount['ATP7B_112GA_RD_2']
current_assay += 1
columncounter += 3
if current_assay == len(assaylist):
break
if current_assay == len(assaylist):
break
if current_assay == len(assaylist):
break
rowcounter += 1
# 5% overage
total_zymo_mastermix *= 1.05
total_qiagen_mastermix *= 1.05
one_ml_zymo_mm_tubes = total_zymo_mastermix // 125
remaining_zymo_mm = total_zymo_mastermix % 125
one_ml_qiagen_mm_tubes = total_qiagen_mastermix // 150
remaining_qiagen_mm = total_qiagen_mastermix % 150
ws.cell(row=3, column=8).value = remaining_qiagen_mm
ws.cell(row=3, column=9).value = one_ml_qiagen_mm_tubes
ws.cell(row=3, column=2).value = remaining_zymo_mm
ws.cell(row=3, column=3).value = one_ml_zymo_mm_tubes
datestr = str(datetime.date.today()) + ' Secondary PCR Mastermixes.xlsx'
wb.save(datestr)
move(datestr, path)
enda = input("Press enter to end this program")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
derrik-gratz.noreply@github.com
|
dccc6514a00d19f0633bc043036a303b14288fad
|
aebbf31274e4b9bc52b1f1049e177d3806229c17
|
/aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DeleteFleetRequest.py
|
6c58da55554050add0a8ba96159a2f6c858e3cc6
|
[
"Apache-2.0"
] |
permissive
|
nikita-barkovsky/aliyun-openapi-python-sdk
|
bc76310a114cadd9d72a0966d4f76491f4071f9d
|
5f4f8e164f9cd2143d3b4bb0b57bfb8a938fe094
|
refs/heads/master
| 2020-07-19T19:46:22.048026
| 2019-09-05T06:17:44
| 2019-09-05T06:17:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DeleteFleetRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DeleteFleet','ecs')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TerminateInstances(self):
return self.get_query_params().get('TerminateInstances')
def set_TerminateInstances(self,TerminateInstances):
self.add_query_param('TerminateInstances',TerminateInstances)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_FleetId(self):
return self.get_query_params().get('FleetId')
def set_FleetId(self,FleetId):
self.add_query_param('FleetId',FleetId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
02e0823c0eca2063c6f3bc6be1ae785f62ad507a
|
8cd6b3a2a4b3ba25d65d803fbb702ea8dc12d458
|
/app/api/handler/home.py
|
21dc0d90eeb91b01d95702128c845972d5c51917
|
[] |
no_license
|
casimiror/cuboid-challenge-python
|
51bd5e500fc8fcd29fb2c4cdfe2778cea405232e
|
676133d997c16bad1dbac2dc50466f0a57efac49
|
refs/heads/master
| 2023-09-01T22:20:54.750701
| 2021-11-10T22:28:43
| 2021-11-10T22:28:43
| 426,737,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from http import HTTPStatus
from flask import Blueprint
home_api = Blueprint("home_api", __name__)
@home_api.route("/")
def run():
return "Cuboids", HTTPStatus.OK
|
[
"casimiro@cliengo.com"
] |
casimiro@cliengo.com
|
0e40d4109c2d4f7bf260081a9d8fba002899e8c5
|
17bf30cd975af30f7a7d4864e89e087ec3d8f701
|
/hivemind/modules/api/models/payment.py
|
276df76011cc5661e4f76f5bee808e67552d1908
|
[] |
no_license
|
jaystaks/HiveMind
|
421134e98b37c156f6e513727caaddbb28c921b4
|
8db41ad743375399d01ea4870e9f6bc2adaa54c0
|
refs/heads/master
| 2020-04-04T04:51:51.552659
| 2018-10-30T11:09:53
| 2018-10-30T11:09:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
"""
Payment Model for API Module
"""
from django.db import models
from .base_model import BaseModel
from .profile import Profile
class Payment(BaseModel):
"""
Payment Model
"""
owner = models.ForeignKey(
Profile, on_delete=models.SET_DEFAULT, default=None, null=True)
payment_ref = models.CharField(
'Payment Reference',
max_length=50,
help_text='Represents Payment Reference.')
extra_info = models.TextField(
'Extra Info',
blank=True,
default="",
help_text='Represent extra information related to Payment.')
# pylint: disable=too-few-public-methods
class Meta():
"""
Meta Options for Payment Model
"""
ordering = ['id', 'owner']
def __str__(self):
"""
String Representation for Payment Model
"""
return self.payment_ref
|
[
"davisraymondmuro@outlook.com"
] |
davisraymondmuro@outlook.com
|
906f0580c258ee404255342093afeaeb509b5c16
|
6d411dc42fd3bbc5d0438fddde92447877449f44
|
/override.py
|
c7b3f1ae24e840ed503742d82d004ac45da2aafa
|
[] |
no_license
|
Jnosuke/pythonFile
|
d09eb5a5da7b0f924c348a2beaca22b4fb48c199
|
c228f6f6e2f7ece6bfa5e3abc53f0505d2ff822f
|
refs/heads/master
| 2022-11-16T04:53:18.117810
| 2020-07-11T05:06:34
| 2020-07-11T05:18:37
| 278,789,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
class Greet():
def hell(self):
print("やあ!")
def bye(self):
print("さようなら")
class Greet2(Greet):
def hello(self, name = None):
if name :
print(name + "さん、こんにちは")
else:
super.hello()
|
[
"saikoro.nosuke@gmail.com"
] |
saikoro.nosuke@gmail.com
|
4054def62ff0cca0a741f0a331f0c9d13e154ec8
|
1c65c0c7620c50e98f3a9b83e6ec121d2c0037f9
|
/statistics_feeder.py
|
21f28073fcb51746a60b766dad75a028bb0c6aa2
|
[] |
no_license
|
SLAC/slac_utils
|
a03bccb1c4fa221e96ec868b6af46a69538f4930
|
923d092891b067fd48d28d91db6a32f9e7757d9e
|
refs/heads/master
| 2023-04-14T03:25:14.184558
| 2023-03-31T20:53:19
| 2023-03-31T20:53:19
| 31,451,941
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,145
|
py
|
from slac_utils.time import now, datetime_to_epoch, sleep
import datetime
from slac_utils.queues import PyAmqpLibQueue
from slac_utils.hashing import ConsistentHashRing
from collections import deque
import gc
from struct import pack
from pickle import dumps
import socket
import logging
class StatisticsMixin( object ):
"""
a supervisor that relays statistics of workers back to a store
"""
stats_feeder = None
stats_feeder_obj = None
stats_premable = ''
def init_stats(self,*args,**kwargs):
if self.stats_feeder and 'stats_host' in kwargs and 'stats_port' in kwargs:
f = getattr( self, 'stats_feeder' )
self.stats_feeder_obj = f( host=kwargs['stats_host'], port=kwargs['stats_port' ])
def statistics_key( self, job ):
return self.stats_preamble + '.' + str(job['device'])
def process_stats(self, job, retries=1 ):
if 'stats' in job['_meta']:
t = now()
# logging.warn("sending stats: " + str(t) + ', key=' + str(self.statistics_key(job)) + ": " + str(job['_meta']['stats']))
try:
self.stats_feeder_obj.send( t, self.statistics_key(job), statistics=job['_meta']['stats'] )
except Exception,e:
logging.error("Could not send statistics: " + str(e))
class StatisticsFeeder( object ):
"""
generic agent to push statistics to something
"""
stats_messages = None
def __init__(self,*args,**kwargs):
for k,v in kwargs.iteritems():
setattr( self, k, v )
self.stats_messages = deque()
self.init(*args,**kwargs)
def init(self,*args,**kwargs):
pass
def __enter__(self):
return self
def __exit__(self,*args,**kwargs):
pass
def send( self, statistics={} ):
raise NotImplementedError, 'not implemented'
class PyAmqpLibFeeder( StatisticsFeeder ):
queue = None
def init(self,*args,**kwargs):
logging.debug("ARGS: %s KWARGS: %s"%(args,kwargs))
super( PyAmqpLibFeeder, self ).init(*args,**kwargs )
if kwargs.has_key('key_premable'):
del kwargs['key_preamble']
else:
self.key_preamble = ''
self.queue = PyAmpqLibQueue(**kwargs)
def __enter__(self):
self.queue.__enter__()
def __exit__(self):
self.queue.__exit__()
def send( self, time, key, statistics={}, retries=3 ):
if isinstance( time, datetime.datetime ):
time = datetime_to_epoch( time )
for k,v in statistics.iteritems():
self.queue.put( '%f %d' % (v,time), key=self.key_preamble + '.' + k )
class CarbonFeeder( StatisticsFeeder ):
"""
A generic class to send statistics to graphite/carbon
"""
pickle = True
sock = None
state = False
backing_off = False
backoff_time = 0.5
backoff_window = 0
def init(self,*args,**kwargs):
super( CarbonFeeder, self ).init(*args,**kwargs)
self.pickle = True
self.backing_off = False
self.backoff_window = 0
self.__enter__()
def __enter__(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logging.info("connecting %s with %s:%s (%r) msgs: %d" % ( self, self.host,self.port,self.sock, len(self.stats_messages), ) )
# self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect( ( self.host, int(self.port) ) )
self.sock.settimeout( 2*self.backoff_time )
# logging.info(" connect ok, timeout %s" % (self.sock.gettimeout()) )
self.state = True
except socket.error, e:
self.state = False
# logging.warn(" connect not ok: %s" % (e,))
if self.sock:
self.sock.close()
self.sock = None
logging.warn(" could not connect to %s:%s: %s" %(self.host,self.port,e) )
return self
def __exit__(self,*args,**kwargs):
if self.sock:
self.sock.close()
self.state = False
self.sock = None
def __del__(self):
# flush cache out to host, if it's down, then try 3 times (each 10x backoff time wait)
self.backoff_time = self.backoff_time * 3
while len(self.stats_messages) > 0:
n = self._send( 100 )
logging.info("flushing... %s" % (n))
def reconnect( self ):
self.__exit__()
sleep( self.backoff_time )
self.__enter__()
def _send( self, number ):
size = len(self.stats_messages)
if number > size:
number = size
if not self.state or not self.sock:
self.reconnect()
# this = deque()
this = []
try:
if self.sock == None:
raise Exception, 'no socket available'
ts = now()
# as we're using deque, we have to pop if (no peeking)
for x in xrange(0,number):
this.append(self.stats_messages.popleft())
# try sending the stuff
# logging.debug("sending %s" % this)
payload = dumps(this)
header = pack("!L", len(payload))
self.sock.sendall( header + payload )
# time it and report
ts = now() - ts
ts = "%.3f" % float( (float(ts.microseconds) + float(ts.seconds*1000000))/1000000 )
logging.info("%s:%s sent %s datapoints (%s left) in %ssec" % (self.host, self.port, number, size - number, ts ) )
return len(this)
except Exception,e:
self.__exit__()
# don't loose the data! put it back into the deque
if len(this) > 0:
self.stats_messages.extendleft(this)
logging.warning("%s:%s send error: %s %s" % ( self.host, self.port, type(e), e ))
# logging.error('could not store stats %s (count %s)' % (key,len(self.stats_messages)))
return None
def load_from_disk( self ):
pass
def page_to_disk( self ):
"""
in order to prevent hogging up memory, if the len(self.stats_message) gets too large
then we page the data off to disk
"""
pass
def send( self, time, key, statistics, min_chunks=500, max_chunks=750, backoff_threshold=25000 ):
"""
try to be a little clever in not stalling the updates as we can keep a cache on this
system, we use two variables: backing_off and backoff_window. if the grpahite server is
stalled, we set backing_off to true, and we do not attempt to send anything until we
have accumulated max_chunks more items (since the last try)
"""
if isinstance( time, datetime.datetime ):
time = datetime_to_epoch( time )
gc.disable()
for k,v in statistics.iteritems():
try:
v = float(v)
this_key = "%s.%s" % (key,k)
this_key = this_key.replace( '/', '.' )
# logging.info(" %s: %s\t%s" % ( time, this_key, v ))
self.stats_messages.append( ( this_key, (time, v) ) )
self.backoff_window = self.backoff_window + 1
except Exception, e:
logging.error("Error parsing %s: (%s) %s in %s" % (v, type(e), e, key) )
gc.enable()
# facility not hammering the failed host too much and thus slowing us down
# we use self.backoff_window as a counter for outstanding messages
# if this value goes over max_chunks, we try sending again
if self.backing_off:
if self.backoff_window > backoff_threshold:
self.backing_off = False
self.backoff_window = self.backoff_window - backoff_threshold
# okay to send
if not self.backing_off:
# send! if we succeed, then good
# if we're backing off, then try sending max_chunks, else min_chunks
# also, try to avoid bursting after backing_off?
if self.backoff_window >= min_chunks:
size = len(self.stats_messages) # send everything we have
num = size
if num > max_chunks:
num = max_chunks # limit number of items sent
# send the data
sent = self._send( num )
if not sent == None:
self.backing_off = False
self.backoff_window = self.backoff_window - sent
if self.backoff_window < 0:
self.backoff_window = 0 # important!
# logging.info("%s:%s after %s\tsize %s/%s window %s: %s"%( self.host, self.port, sent, size, len(self.stats_messages), self.backoff_window, self.backing_off ) )
return True
# if sending fails, then we wait another width before we try again
else:
# use fact that we should have sent the entire size if we aren't backing offc
self.backing_off = True
# logging.error("%s %s: send failed: num %s size %s window %s, backoff %s" % (self.host, self.port, num, len(self.stats_messages), self.backoff_window, self.backing_off ) )
return False
return None
class MultiCarbonFeeder( StatisticsFeeder ):
instance_ports = {}
ring = None
feeders = {}
def init(self,**kwargs):
super( MultiCarbonFeeder, self).init(**kwargs)
# expects self.instances = [ 'ip:port:instance', 'ip:port:instance', ]
if not len(self.instances) > 0:
raise Exception, 'no carbon instances defined, use CARBON_INSTANCES'
self.instance_ports = {} # { (server, instance) : port }
self.ring = ConsistentHashRing([])
self.feeders = {}
for i in self.instances:
s, p, n = i.split(':')
self.add_instance( s, p, n )
# connect to each instance
self.feeders[(s,p,n)] = CarbonFeeder( host=s, port=p )
def __exit__(self,*args,**kwargs):
for i in self.feeders:
self.feeder.__exit__(*args,**kwargs)
def add_instance(self,server,port,instance):
if (server, instance) in self.instance_ports:
raise Exception("destination instance (%s, %s) already configured" % (server, instance))
self.instance_ports[ (server, instance) ] = port
self.ring.add_node( (server, instance) )
def remove_instance(self,server,port,instance):
if (server, instance) not in self.instance_ports:
raise Exception("destination instance (%s, %s) not configured" % (server, instance))
del self.instance_ports[ (server, instance) ]
self.ring.remove_node( (server, instance) )
def get_instance(self,key):
(s, i) = self.ring.get_node(key)
p = self.instance_ports[ (s, i) ]
k = (s, p, i)
if k in self.feeders:
# logging.info("%s:%s:%s" % (s, p, i))
return self.feeders[k]
raise Exception, 'could not find feeder for %s:%s:%s' % (s, p, i)
def send( self, time, key, statistics={}, min_chunks=500, max_chunks=1000, backoff_threshold=25000 ):
# logging.info("sending...")
# as statistics is a hash, we need to append concat with key to get appropriate feeder
data = {}
for k,v in statistics.iteritems():
this_key = "%s.%s" % (key,k)
this_key = this_key.replace( '/', '.' )
# for f in self.get_instances(this_key):
f = self.get_instance(this_key)
if not f in data:
data[f] = {}
data[f][k] = v
for f in data:
f.send( time, key, statistics=data[f], min_chunks=min_chunks, max_chunks=max_chunks, backoff_threshold=backoff_threshold )
|
[
"yee379@gmail.com"
] |
yee379@gmail.com
|
7da2ba571206feffcfcfe76ceadbd73c59c86999
|
92b31640f0572b3f7cdb3ae237bddb13318b300b
|
/meiduo_mall/celery_tasks/sms/yuntongxun/ccp_sms.py
|
f663d92eb9f19a2276a64aedff2c49b22f14fa0e
|
[] |
no_license
|
Hillary886/meiduo_mall-project
|
940f18b76ec0b0c03601c26e4a2c491115124b47
|
8a4019182ce8f64f3263b4baa43883c0fca60fdd
|
refs/heads/master
| 2023-07-08T01:50:14.282813
| 2021-08-06T15:10:47
| 2021-08-06T15:10:47
| 383,808,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
# -*- coding:utf-8 -*-
import ssl
ssl._create_default_https_context =ssl._create_stdlib_context # 解决Mac开发环境下,网络错误的问题
from celery_tasks.sms.yuntongxun.CCPRestSDK import REST
# 说明:主账号,登陆云通讯网站后,可在"控制台-应用"中看到开发者主账号ACCOUNT SID
_accountSid = '8aaf07087955b4b50179949dde1b1764'
# 说明:主账号Token,登陆云通讯网站后,可在控制台-应用中看到开发者主账号AUTH TOKEN
_accountToken = '636d2d61487d44f9b4101ba46572b87f'
# 请使用管理控制台首页的APPID或自己创建应用的APPID
_appId = '8aaf07087955b4b50179949ddf11176b'
# 说明:请求地址,生产环境配置成app.cloopen.com
# _serverIP = 'sandboxapp.cloopen.com'
_serverIP = 'app.cloopen.com'
# 说明:请求端口 ,生产环境为8883
_serverPort = "8883"
# 说明:REST API版本号保持不变
_softVersion = '2013-12-26'
# 云通讯官方提供的发送短信代码实例
# 发送模板短信
# @param to 手机号码
# @param datas 内容数据 格式为数组 例如:{'12','34'},如不需替换请填 ''
# @param $tempId 模板Id
# def sendTemplateSMS(to, datas, tempId):
# # 初始化REST SDK
# rest = REST(_serverIP, _serverPort, _softVersion)
# rest.setAccount(_accountSid, _accountToken)
# rest.setAppId(_appId)
#
# result = rest.sendTemplateSMS(to, datas, tempId)
# print(result)
class CCP(object):
'''发送短信验证码的单例类'''
def __new__(cls, *args, **kwargs):
'''
定义单例的初始化方法
return:单例
'''
# 判断单例是否存在:_instance属性中存储的就是单例
if not hasattr(cls,'_instance'):
# 如果单例不存在,初始化单例
cls._instance = super(CCP, cls).__new__(cls, *args, **kwargs)
# 初始化REST SDK
cls._instance.rest = REST(_serverIP, _serverPort, _softVersion)
cls._instance.rest.setAccount(_accountSid, _accountToken)
cls._instance.rest.setAppId(_appId)
# 返回单例
return cls._instance
def send_template_sms(self,to, datas, tempId):
'''发送短信验证码的短信验证方法'''
result = self.rest.sendTemplateSMS(to, datas, tempId)
'''
发送短信验证码单例方法
to:手机号
datas:内容数据
tempid:模板ID
return:成功:0 失败:-1
'''
print(result)
if result.get('statusCode')=='000000':
return 0
else:
return -1
if __name__ == '__main__':
# 注意: 测试的短信模板编号为1
# sendTemplateSMS('15280862522', ['123456', 5], 1)
# 单例类发送短信验证码
CCP().send_template_sms('15280862522', ['123456', 5], 1)
|
[
"zhangxu201709@163.com"
] |
zhangxu201709@163.com
|
e03c92ed9a61266c0dcc029b0dd784a2ab585796
|
c372880122512bcc897dac4932982a8face60363
|
/farsante/dask_generators.py
|
c1d79025fddbfa8ebe8e5671d2504de3d9e0c509
|
[] |
no_license
|
bishwajitdey/farsante
|
a5023705cb227f1e0cc5b1ddc9b02143859335c4
|
e94d6b4305fb684da5eb234846888d30511603f9
|
refs/heads/master
| 2023-03-23T20:10:52.503013
| 2020-08-23T15:55:43
| 2020-08-23T15:55:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from farsante.pandas_generators import *
import dask.dataframe as dd
def quick_dask_df(cols, num_rows, npartitions=1):
df = quick_pandas_df(cols, num_rows)
return dd.from_pandas(df, npartitions=npartitions)
def dask_df(funs, num_rows, npartitions=1):
df = pandas_df(funs, num_rows)
return dd.from_pandas(df, npartitions=npartitions)
|
[
"matthewkevinpowers@gmail.com"
] |
matthewkevinpowers@gmail.com
|
b4c3705c29a3cafac85fe5090c3e0a119c6fd42a
|
b8770ccd24aa8b5d53bec3a1b11aa43a2b00de0f
|
/assignment4
|
f613f706a3f3ba3bc38fcb4d9be877579f05c38e
|
[] |
no_license
|
thomas1115/computer-physics-yoonjongseok
|
7bbf78f80329a0402f26093217221a0b43c5fad4
|
f572d3ae7169e684a26ce4e2dd3bfc39e52b6089
|
refs/heads/main
| 2023-04-21T23:12:19.552672
| 2021-05-10T12:53:29
| 2021-05-10T12:53:29
| 345,236,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,142
|
#!/usr/bin/env python
# coding: utf-8
# In[13]:
import numpy as np
import math
import scipy as sp
import time
import matplotlib.pyplot as plt
from scipy.linalg import lu,lu_factor, lu_solve
def LUdecomp(a):
n = len(a)
for k in range(0,n-1):
for i in range(k+1,n):
if a[i,k] != 0.0:
lam = a[i,k]/a[k,k]
a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
a[i,k] = lam
return a
def LUsolve(a,b):
n = len(a)
for k in range(1,n):
b[k] = b[k] - np.dot(a[k,0:k],b[0:k])
b[n-1] = b[n-1]/a[n-1,n-1]
for k in range(n-2,-1,-1):
b[k] = (b[k] - np.dot(a[k,k+1:n],b[k+1:n]))/a[k,k]
return b
def LUdecomp3(c,d,e):
n = len(d)
for k in range(1,n):
lam = c[k-1]/d[k-1]
d[k] = d[k] - lam*e[k-1]
c[k-1] = lam
return c,d,e
def LUsolve3(c,d,e,b):
n = len(d)
for k in range(1,n):
b[k] = b[k] - c[k-1]*b[k-1]
b[n-1] = b[n-1]/d[0,n-1]
for k in range(n-1,-1,-1):
b[k] = (b[k] - e[0,k]*b[k+1])/d[0,k]
return b
def Ax(v):
n = len(v)
Ax = np.zeros(n)
Ax[0] = 2.0*v[0] - v[1] + v[n-1]
A[1:n-1] = -v[0:n-2] + 2.0*v[1:n-1] -v[2:n]
Ax[n-1] = -v[n-2] + 2.0*v[n-1] + v[0]
return Ax
def Av(x):
return A@x
def conjGrad(Av,x,b,tol = 1.0e-9):
n = len(b)
r = b - Av(x)
s = r.copy()
for i in range(n):
u = Av(s)
alpha = np.dot(s,r)/np.dot(s,u)
x = x + alpha*s
r = b - Av(x)
if(math.sqrt(np.dot(r,r))) < tol:
break
else:
beta = -np.dot(r,u)/np.dot(s,u)
np.dot(s,u)
s = r + beta*s
return x,i
def gauss(a,b):
for k in range(n-1,0,-1):
for i in range(k-1,-1,-1):
if a[i,k] != 0.0 :
lam = a[i,k]/a[k,k]
a[i,k:n] = a[i,k:n] - lam*a[k,k:n]
b[i] = b[i] - lam*b[k]
for k in range(-1,n-1,1):
x[k] = ( b[k] - np.dot(a[k,k+1:n],x[k+1:n]) )/a[k,k]
return x
A = np.array([[2,-1,0,0,0],[-1,4,-1,0,0],[0,-1,4,-1,-2],[0,0,-1,2,-1],[0,0,-2,-1,3]])
b = np.array([1/2.5,1/2.5,1/2.5,1/2.5,1/2.5])
x= np.zeros(5)
conjGrad(A,x,b,tol=1.0e-9)
n = 10
C = np.zeros([n,n])
C[0,0] = 4
C[0,1] = -1
x= np.zeros(10)
for i in range(2,n):
C[i-1,i-2] = C[i-1,i-2] - 1
C[i-1,i-1] = C[i-1,i-1] + 4
C[i-1,i] = C[i-1,i] - 1
C[n-1,n-2] = C[n-1,n-2] - 1
C[n-1,n-1] = C[n-1,n-1] + 4
B = np.zeros([n,1])
B[0,0] = 9
for i in range(1,n):
B[i,0] = 5
alpha = time.time()
gauss(C,B)
beta = time.time()
t1 = beta - alpha
alpha = time.time()
conjGrad(C,x,B,tol=1.0e-9)
beta = time.time()
t2 = beta - alpha
A = np.array([[1,0,1],[0,1,0],[0,0,1]])
b = np.array([[0],[0],[1]])
x = np.array([-1,0,0])
conjGrad(A,x,b,tol=1.0e-9)
n = len(b)
r = b - Av(x)
s = np.array([0,0,1])
for i in range(n):
u = Av(s)
alpha = np.dot(s,r)/np.dot(s,u)
x = x + alpha*s
r = b - Av(x)
if(math.sqrt(np.dot(r,r))) < tol:
break
else:
beta = -np.dot(r,u)/np.dot(s,u)
np.dot(s,u)
s = r + beta*s
# In[ ]:
|
[
"noreply@github.com"
] |
thomas1115.noreply@github.com
|
|
9ca07dd3eb6987b07746338f6a11f29b8695ed31
|
70164edd34216338b162616ef496d55af98a6f39
|
/nostclient/commands/configure.py
|
62034f796bc8f4ce1ab6492cca9d5eebde4496f1
|
[] |
no_license
|
Nexenta/python-nostclient
|
48f23fa33d2b91d26b635f5886d69ddd4514b905
|
d65c5e31749fd28b817aad4e6f50e5afe5e2bf92
|
refs/heads/master
| 2016-09-06T19:11:11.276520
| 2013-02-26T12:28:36
| 2013-02-26T12:28:36
| 6,267,159
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,604
|
py
|
# Copyright 2012 Nexenta Systems Inc.
from __future__ import with_statement
import os
import sys
from nostclient.common.config import Config
from nostclient.common.utils import is_true, renamer, validate_path
from nostclient.common.exceptions import ValidationError, OptionError
from nostclient.common.validators import NotEmptyValidator, NotUrlValidator
from nostclient.common.constants import DEFAULT_CONFIG_PATH, ERROR_CODE, \
SUCCESS_CODE, EMPTY_VALUES, SCRIPT_NAME
USAGE = """
%s configure [options]
Save configs to config file, default file path is %s.
""".strip('\n') % (SCRIPT_NAME, DEFAULT_CONFIG_PATH)
def action(parser, args):
parser.usage = USAGE
(opts, args) = parser.parse_args(args)
cfg_file_path = validate_path(opts.cfg_file)
config = Config(cfg_file_path)
if not os.path.exists(os.path.split(cfg_file_path)[0]):
raise OptionError('Invalid path for configuration file: %s' %
cfg_file_path)
#TODO: if already exists cfg_file, ask whether to make a change
options = [
('auth_url', 'Authorization url', 'URL for obtaining an auth token',
(NotEmptyValidator('Please define authorization url'),
NotUrlValidator('Invalid url value'))),
('auth_version', 'Authorization version',
'Specify a version for authentication (default: 1.0)',
(NotEmptyValidator('Please authorization version'), )),
('user', 'User', 'User name for obtaining an auth token',
(NotEmptyValidator('Please define user'), )),
('key', 'Key', 'Key for obtaining an auth token',
(NotEmptyValidator('Please define key'), ))
]
print >> sys.stdout, 'Configure nostclient.'
print >> sys.stdout, 'Enter new values or accept defaults in brackets ' \
'with Enter.'
print >> sys.stdout
for key, name, description, validators in options:
print >> sys.stdout, description
default = getattr(config, key)
if default:
promt = '%s [%s]: ' % (name, default)
else:
promt = '%s: ' % name
if not isinstance(validators, (list, tuple)):
validators = tuple(validators)
while True:
value = raw_input(promt) or default
try:
for validator in validators:
value = validator(value)
except ValidationError, e:
print >> sys.stderr, 'ERROR: %s' % e
continue
break
setattr(config, key, value)
save = raw_input("Save configuration parameters in %s ([y]/n): " %
cfg_file_path) or 'yes'
if not is_true(save):
print >> sys.stdout, 'Configuration parameters wasn\'t saved'
return SUCCESS_CODE
while os.path.exists(cfg_file_path):
overwrite = raw_input("File %s already exists, overwrite it? "
"(y/[n]): " % cfg_file_path) or 'no'
if is_true(overwrite):
backup = raw_input("Create backup of %s? ([y]/n)" %
cfg_file_path) or 'yes'
if is_true(backup):
idx = 0
backup_file = '%s.bkp' % cfg_file_path
while os.path.exists(backup_file):
idx += 1
backup_file = '%s.bkp_%s' % (cfg_file_path, idx)
try:
renamer(cfg_file_path, backup_file)
print >> sys.stdout, ("File %s was successfully moved to "
"%s" % (cfg_file_path, backup_file))
except IOError:
print >> sys.stderr, "ERROR: Cannot move %s to %s" % \
(cfg_file_path, backup_file)
else:
while True:
cfg_file_path = raw_input("Input path to file for saving "
"configuration parameters: ")
if cfg_file_path in EMPTY_VALUES:
print >> sys.stderr, "ERROR: Please define path to " \
"configuration file"
continue
break
break
try:
config.save_config(cfg_file_path)
print >> sys.stdout, "Configuration parameters was successfully " \
"saved in %s" % cfg_file_path
except IOError:
print >> sys.stderr, "ERROR: Cannot save configuration file: %s" % \
cfg_file_path
return ERROR_CODE
return SUCCESS_CODE
|
[
"vito.ordaz@gmail.com"
] |
vito.ordaz@gmail.com
|
2ef32e227da259422bf598a45417ac09a69f99b6
|
110261034f716194118e6c1561dec424f0704ca9
|
/bin/femur
|
cbe08a567d2bb0f1de38540bf959dfa874614b3d
|
[] |
no_license
|
recordsonribs/femur
|
db452835ce82462f245dbc63762786708b0dcd3f
|
2911b884316c2caa502a38692fe41c48fe5d46aa
|
refs/heads/master
| 2021-01-01T05:33:45.710245
| 2016-01-02T23:00:16
| 2016-01-02T23:00:16
| 24,003,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
#!/usr/bin/env python
from femur.main import main
if __name__ == "__main__":
main()
|
[
"alex@recordsonribs.com"
] |
alex@recordsonribs.com
|
|
5031f715f67890e35e6ed14eec1e76f9f3209fa3
|
e98a2c73b2052e6aeafa5c1297cd5a238aa8e8ae
|
/tceh_lesson16/lesson16_app/apps.py
|
e315804c3e2b26d15de0e994d4f91e9851471cd2
|
[] |
no_license
|
dkorney/tceh_homeworks
|
2af30efc90f865cec04ea41cfb5f44be771524de
|
8358294e2672ba415de02eed9bc51ef40bfa819f
|
refs/heads/master
| 2021-06-12T20:46:07.022979
| 2017-03-30T07:41:07
| 2017-03-30T07:41:07
| 80,547,101
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from django.apps import AppConfig
class Lesson16AppConfig(AppConfig):
name = 'lesson16_app'
|
[
"dmitry.korneychuk@gmail.com"
] |
dmitry.korneychuk@gmail.com
|
26b17c8da6ad395e7b19e0b8a32e91aff2370408
|
76c197aecb5de3314e9a90864e5f534ac1e89003
|
/examples/schedulers/asyncio_.py
|
88d9a822330f6a1cb41018065261ade8c22f516a
|
[
"MIT"
] |
permissive
|
shipmints/apscheduler
|
a46a3825025e0008c27bb138fc0336e444bc1729
|
0f096f63732a608c43df1ea05393308d04d7c455
|
refs/heads/master
| 2020-08-14T13:04:18.419142
| 2019-10-17T16:45:31
| 2019-10-17T16:45:31
| 215,172,072
| 0
| 0
|
NOASSERTION
| 2019-10-15T00:44:16
| 2019-10-15T00:44:15
| null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
"""
Demonstrates how to use the asyncio compatible scheduler to schedule a job that executes on 3
second intervals.
"""
from datetime import datetime
import os
from apscheduler.schedulers.asyncio import AsyncIOScheduler
try:
import asyncio
except ImportError:
import trollius as asyncio
def tick():
print('Tick! The time is: %s' % datetime.now())
if __name__ == '__main__':
scheduler = AsyncIOScheduler()
scheduler.add_job(tick, 'interval', seconds=3)
scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
# Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
try:
asyncio.get_event_loop().run_forever()
except (KeyboardInterrupt, SystemExit):
pass
|
[
"alex.gronholm@nextday.fi"
] |
alex.gronholm@nextday.fi
|
c83a126fcf82805e353bec8a36aaa4ac53092571
|
f078a969c6c92dbd4644a55609c3969255de1c7a
|
/qtoolkit/data_structures/quantum_circuit/quantum_circuit.py
|
8ff67be84d9fcd15146add087969fc8f612e50cc
|
[
"BSD-3-Clause",
"CECILL-B",
"MIT",
"LicenseRef-scancode-cecill-b-en"
] |
permissive
|
nelimee/qtoolkit
|
2d2919a163677128acbfbd84b36bb61367732a6e
|
1e99bd7d3a143a327c3bb92595ea88ec12dbdb89
|
refs/heads/master
| 2020-04-01T09:15:09.134161
| 2018-11-27T13:57:52
| 2018-11-27T13:57:52
| 153,067,146
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,657
|
py
|
# ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: Adrien Suau (adrien.suau@cerfacs.fr)
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Implementation of the :py:class:`~.QuantumCircuit` class.
The :py:class:`~.QuantumCircuit` class represents a general quantum circuit as a
Directed Acyclic Graph with possibly some multi-edges (2 edges can share the
same source **and** the same target).
"""
import copy
import typing
import networkx as nx
import numpy
import qtoolkit.data_structures.quantum_circuit.gate_hierarchy as qgate
import qtoolkit.data_structures.quantum_circuit.quantum_operation as qop
class QuantumCircuit:
def __init__(self, qubit_number: int, cache_matrix: bool = True) -> None:
"""Initialise the :py:class:`~.QuantumCircuit` instance.
For documentation about the :py:class:`~.QuantumCircuit` internals see
the :py:mod:`.quantum_circuit.quantum_circuit` documentation.
:param qubit_number: The number of qubits the instance will acts on.
:param cache_matrix: A boolean flag indicating if the instance should
keep in memory the current value of its representing matrix or if it
should recompute this matrix at each call to
:py:attr:`.QuantumCircuit.matrix`.
"""
assert qubit_number > 0, (
"A circuit with less than 1 qubit cannot be " "created."
)
self._qubit_number = qubit_number
self._graph = nx.MultiDiGraph()
self._node_counter = 0
for qubit_id in range(qubit_number):
self._graph.add_node(self._node_counter, type="input", key=qubit_id)
self._node_counter += 1
self._last_inserted_operations = numpy.arange(qubit_number)
self._cache_matrix = cache_matrix
self._matrix = None
if self._cache_matrix:
self._matrix = numpy.identity(2 ** self._qubit_number)
def add_operation(self, operation: qop.QuantumOperation) -> None:
"""Add an operation to the circuit.
:param operation: The operation to add to the
:py:class:`~.QuantumCircuit` instance.
"""
self._check_operation(operation)
current_node_id = self._node_counter
self._graph.add_node(self._node_counter, type="op", op=operation)
self._node_counter += 1
# Create the target wire
self._create_edge(
self._last_inserted_operations[operation.target],
current_node_id,
operation.target,
)
self._last_inserted_operations[operation.target] = current_node_id
# Create the control wires
for ctrl in operation.controls:
self._create_edge(
self._last_inserted_operations[ctrl], current_node_id, ctrl
)
self._last_inserted_operations[ctrl] = current_node_id
# Compute the new matrix if needed and possible.
if self._cache_matrix:
self._matrix = self._matrix @ operation.matrix(self._qubit_number)
def apply(
self, gate: qgate.QuantumGate, target: int, controls: typing.Sequence[int] = ()
) -> None:
"""Apply a quantum operation to the circuit.
:param gate: The quantum gate to apply.
:param target: The target qubit. The quantum gate will be applied on
this qubit.
:param controls: The control qubit(s).
"""
self.add_operation(qop.QuantumOperation(gate, target, controls))
def _check_operation(self, operation: qop.QuantumOperation) -> None:
"""Check if the operation is valid. If not, raise an exception.
:param operation: The operation to check for validity.
:raise IndexError: if the qubits of the operation (target or control(s))
are not within the range of the current instance.
:raise RuntimeError: if one of the qubits on the operation (target or
control(s)) is None or if the target qubit is also listed in the
control qubit(s).
"""
if operation.target is None or any(
(ctrl is None for ctrl in operation.controls)
):
raise RuntimeError(
"At least one of the target or control qubit is None. Generic "
"QuantumOperations are not supported in a QuantumCircuit "
"instance."
)
if operation.target in operation.controls:
raise RuntimeError(
"The target qubit cannot be in the list of control qubits."
)
if operation.target >= self._qubit_number or operation.target < 0:
raise IndexError(
f"The operation's target ({operation.target}) is not valid "
f"for the current quantum circuit with {self._qubit_number} "
f"qubits."
)
for ctrl in operation.controls:
if ctrl >= self._qubit_number or ctrl < 0:
raise IndexError(
"One of the control qubit is not valid for the current "
"quantum circuit."
)
def pop(self) -> qop.QuantumOperation:
"""Deletes the last inserted operation from the instance and returns it.
:return: The last inserted operation.
"""
if self._node_counter <= self._qubit_number:
raise RuntimeError(
"Attempting to pop a QuantumOperation from an empty " "QuantumCircuit."
)
# Recover the last operation performed.
op = self.last
# Update the last_inserted structure
for pred, _, key in self._graph.in_edges(
nbunch=self._node_counter - 1, keys=True
):
self._last_inserted_operations[key] = pred
# Remove the node (and the edges associated to it).
self._graph.remove_node(self._node_counter - 1)
self._node_counter -= 1
# Compute the new matrix if needed and possible.
if self._cache_matrix:
self._matrix = self._matrix @ op.matrix(self._qubit_number).T.conj()
return op
def _create_edge(self, from_id: int, to_id: int, qubit_id: int) -> None:
"""Create an edge between `from_id` and `to_id`.
:param from_id: Source of the edge.
:param to_id: Target of the edge.
:param qubit_id: Identifier of the qubit concerned by the target
operation.
"""
self._graph.add_edge(from_id, to_id, key=qubit_id)
def get_n_last_operations_on_qubit_reversed(
self, n: int, qubit_id: int
) -> typing.Iterable[qop.QuantumOperation]:
"""Get the `n` last inserted operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param n: Number of quantum operation to retrieve.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over the `n` last quantum operations involving
`qubit_id` in the reverse order of insertion.
:raise IndexError: if `qubit_id` is involved in less than `n`
operations.
"""
try:
all_ops_gen = self.get_operations_on_qubit_reversed(qubit_id)
for op_id in range(n):
yield next(all_ops_gen)
except StopIteration:
raise IndexError(
f"Cannot retrieve {n} operations on qubit n°{qubit_id}: only "
f"{op_id} operation are available."
)
def get_n_last_operations_on_qubit(
self, n: int, qubit_id: int
) -> typing.Iterable[qop.QuantumOperation]:
"""Get the `n` last inserted operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param n: Number of quantum operation to retrieve.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over the `n` last quantum operations involving
`qubit_id` in the order of insertion.
:raise IndexError: if `qubit_id` is involved in less than `n`
operations.
"""
return list(self.get_n_last_operations_on_qubit_reversed(n, qubit_id))[::-1]
def get_operations_on_qubit_reversed(self, qubit_id: int):
"""Get all the operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over all the quantum operations involving
`qubit_id` in the reverse order of insertion.
"""
current = self._last_inserted_operations[qubit_id]
while current >= self.qubit_number:
yield self._graph.nodes[current]["op"]
# Update the current node.
current = next(
filter(
lambda node_id: qubit_id
in self._graph.get_edge_data(node_id, current),
self._graph.predecessors(current),
)
)
def get_operations_on_qubit(self, qubit_id: int):
"""Get all the operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over all the quantum operations involving
`qubit_id` in the order of insertion.
"""
return list(self.get_operations_on_qubit_reversed(qubit_id))[::-1]
def __getitem__(self, idx: int) -> qop.QuantumOperation:
"""Method used when []-indexing is used.
:param idx: The position of the operation we want to retrieve.
:return: The idx-th inserted operation.
"""
return self._graph.nodes[idx + self._qubit_number]["op"]
@property
def last(self) -> qop.QuantumOperation:
"""Getter for the last inserted operation.
:return: the last inserted operation.
:raise IndexError: if the circuit is empty.
"""
if self._node_counter == self._qubit_number:
raise IndexError(
"Trying to recover the last operation of an " "empty QuantumCircuit."
)
return self._graph.nodes[self._node_counter - 1]["op"]
@property
def operations(self) -> typing.Iterable[qop.QuantumOperation]:
"""Getter on the operations performed in this quantum circuit.
:return: a generator that generates all the operations of the circuit.
"""
return (
self._graph.nodes[i]["op"]
for i in range(self._qubit_number, self._node_counter)
)
def gates_on_qubit(self, qubit_index: int) -> typing.Iterable[qop.QuantumOperation]:
"""Getter for the gates applied on the qubit at the given index.
:param qubit_index: the qubit we are interested in.
:return: a generator yielding all the quantum gates in the circuit
that involve the specified qubit.
"""
return (op.gate for op in self.get_operations_on_qubit(qubit_index))
@property
def matrix(self) -> numpy.ndarray:
"""Getter on the unitary matrix representing the circuit.
Depending on the value of `cache_matrix` given at initialisation, this
method will either return the cached matrix or compute it.
:return: the unitary matrix representing the current quantum circuit.
"""
if self._cache_matrix:
return self._matrix
ret = numpy.identity(2 ** self._qubit_number)
for operation in self.operations:
ret = ret @ operation.matrix(self._qubit_number)
return ret
@property
def qubit_number(self) -> int:
"""Getter on the number of qubits of the current instance."""
return self._qubit_number
@property
def size(self) -> int:
"""Getter on the number of quantum gates in the current instance."""
return self._node_counter - self._qubit_number
def __iadd__(self, other: "QuantumCircuit") -> "QuantumCircuit":
"""Add all the operations contained in `other` to the current instance.
:param other: the quantum circuit containing the operations to append
to the current instance. `other` and the instance
:py:meth:`~.__iadd__` is called on should have the same number of
qubits.
:return: The union of self and other.
:raise RuntimeError: if `self` and `other` have a different number of
qubits.
"""
# 1. Checks
if self.qubit_number != other.qubit_number:
raise RuntimeError(
f"The number of qubits of the first circuit "
f"({self.qubit_number}) does not match the "
f"number of qubits of the second circuit "
f"({other.qubit_number})."
)
# 2. Update the graph
# 2.1. First remove the "input" nodes from the other graph. We don't
# want to change or copy the other graph so we take a view of the other
# graph without the "input" nodes.
other_subgraph = other._graph.subgraph(
range(other.qubit_number, other._node_counter)
)
# 2.2. Regroup the two graphs into one graph.
self._graph = nx.disjoint_union(self._graph, other_subgraph)
# 2.3. Join the nodes if possible.
for qubit_index in range(self.qubit_number):
old_neighbor = list(other._graph.neighbors(qubit_index))
if old_neighbor:
new_neighbor = old_neighbor[0] - other.qubit_number + self._node_counter
self._graph.add_edge(
self._last_inserted_operations[qubit_index], new_neighbor
)
# Only change the last inserted index if we joined the nodes.
self._last_inserted_operations[qubit_index] = new_neighbor
# 3. Update the other attributes:
self._node_counter += other._node_counter - other.qubit_number
if self._cache_matrix and other._matrix is not None:
self._matrix = self.matrix @ other.matrix
return self
def __matmul__(self: "QuantumCircuit", other: "QuantumCircuit") -> "QuantumCircuit":
"""Wrapper around __iadd__ for the new '@' operator."""
cpy = copy.copy(self)
return cpy.__iadd__(other)
def __copy__(self) -> "QuantumCircuit":
"""Override the default copy behaviour."""
cpy = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
if self.compressed:
cpy._compressed_graph = copy.copy(self._compressed_graph)
else:
cpy._graph = self._graph.copy()
cpy._node_counter = self._node_counter
cpy._last_inserted_operations = self._last_inserted_operations.copy()
if self._cache_matrix:
cpy._matrix = self._matrix
return cpy
def compress(self) -> "QuantumCircuit":
"""Compress the instance to save some memory.
This method is useful when a large number of small circuits needs to be
stored in memory.
.. warning:: Several methods of the :py:class:`~.QuantumCircuit` class
will not work as expected (or will raise an exception) if called on
a compressed circuit.
"""
if not self.compressed:
self._compressed_graph = CompressedMultiDiGraph(self._graph)
del self._graph
return self
def uncompress(self) -> "QuantumCircuit":
"""Uncompress the instance."""
if self.compressed:
self._graph = self._compressed_graph.uncompress()
del self._compressed_graph
return self
@property
def compressed(self) -> bool:
"""Return True if the instance is compressed, else False."""
return hasattr(self, "_compressed_graph")
def inverse(self) -> "QuantumCircuit":
"""Create the inverse of the instance it is called on.
This method will create a new :py:class:`~.QuantumCircuit` and construct
in this new circuit the inverse of `self`.
"""
inv = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
for op in reversed(list(self.operations)):
inv.add_operation(op.inverse())
return inv
def __str__(self) -> str:
"""Textual representation of the circuit.
The representation used is very similar to OpenQASM.
"""
return "\n".join(
(
"{Cs}{opname} {controls}{commaornot}{target}".format(
Cs="C" * len(op.controls),
opname=op.gate.name,
controls=",".join(map(str, op.controls)),
commaornot=(", " if op.controls else ""),
target=op.target,
)
for op in self.operations
)
)
class CompressedMultiDiGraph:
def __init__(self, graph: nx.MultiDiGraph = None) -> None:
"""Initialise the :py:class:`~.CompressedMultiDiGraph` instance.
Instances of :py:class:`~.CompressedMultiDiGraph` are just storing
a :py:class:`networkx.MultiDiGraph` in a more memory efficient format.
:param graph: The graph to compress.
"""
if graph is None:
self._qubit_number = 0
return
node_number = len(graph.nodes)
edge_number = len(graph.edges)
if node_number < 2 ** 8:
data_type = numpy.uint8
elif node_number < 2 ** 16:
data_type = numpy.uint16
else:
data_type = numpy.uint32
# We keep each edge with its corresponding qubit ID.
self._from_arr = numpy.zeros((edge_number,), dtype=data_type)
self._to_arr = numpy.zeros((edge_number,), dtype=data_type)
self._data_arr = numpy.zeros((edge_number,), dtype=data_type)
for idx, (u, v, qubit_id) in enumerate(graph.edges):
self._from_arr[idx] = u
self._to_arr[idx] = v
self._data_arr[idx] = qubit_id
# And the we keep each node
self._qubit_number = 0
self._is_op_node = numpy.zeros((node_number,), dtype=numpy.bool)
self._operations = list()
for node_id, node_data in graph.nodes.items():
if node_data["type"] == "op":
self._is_op_node[node_id] = True
self._operations.append(node_data["op"])
else:
self._qubit_number += 1
def __copy__(self) -> "CompressedMultiDiGraph":
"""Override the default copy behaviour."""
cpy = CompressedMultiDiGraph()
cpy._qubit_number = self._qubit_number
cpy._from_arr = self._from_arr.copy()
cpy._to_arr = self._to_arr.copy()
cpy._data_arr = self._data_arr.copy()
cpy._is_op_node = self._is_op_node.copy()
cpy._operations = copy.copy(self._operations)
return cpy
def uncompress(self) -> nx.MultiDiGraph:
"""Uncompress the stored :py:class:`networkx.MultiDiGraph`.
:return: the uncompressed :py:class:`networkx.MultiDiGraph`.
"""
graph = nx.MultiDiGraph()
if self._qubit_number == 0:
return graph
# Re-create the nodes.
for i in range(self._qubit_number):
graph.add_node(i, type="input", key=i)
for node_id in range(self._qubit_number, len(self._is_op_node)):
graph.add_node(
node_id, type="op", op=self._operations[node_id - self._qubit_number]
)
# Re-create the edges
for u, v, qubit_id in zip(self._from_arr, self._to_arr, self._data_arr):
graph.add_edge(u, v, key=qubit_id)
return graph
CircuitCostFunction = typing.Callable[[QuantumCircuit], float]
|
[
"adrien.suau@cerfacs.fr"
] |
adrien.suau@cerfacs.fr
|
5823d7d7033516b476ed84db307da0fc0abd03c5
|
0868d7c74bd4a0877bc6b862e7a159ebf0c42f1c
|
/funcao_n_quadrado.py
|
3468fb1cb20cab95675e6ee5087b9a5917a30574
|
[
"MIT"
] |
permissive
|
rafaelblira/python-progressivo
|
af7a901fc324362de1f74cd5399be5f711fb4e3f
|
fdd6ccf10d351d04158e03ff74dd99b431d94303
|
refs/heads/master
| 2022-10-14T07:40:33.900740
| 2020-06-11T14:22:54
| 2020-06-11T14:22:54
| 263,900,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
#crie uma função que recebe um número e exiba seu quadrado.
def quadrado(n):
quad = n ** 2
print('Quadrado de {} é: {}'.format(n, quad))
numero = int(input('Digite um númeor para saber o seu quadrado: '))
quadrado(numero)
|
[
"rafaelblira@yahoo.com.br"
] |
rafaelblira@yahoo.com.br
|
5d3d6caff5ed5760a0ebb8e8b347d689ca73f304
|
7f3cbde8b31cc7ef064b303de54807f59ea0d3c8
|
/Algorithms/Warmup/solve_me_first.py
|
be7d9e33f573da43e5a6b9ce7256bebaf4731ddf
|
[] |
no_license
|
Marlysson/HackerRank
|
7f9ea6a04cd7b97ba5c43c5e321b219511a64106
|
98e65be30d8e6f70ca75676441dc9b1fd7fcac1b
|
refs/heads/master
| 2020-04-06T04:28:43.522255
| 2016-10-07T04:18:06
| 2016-10-07T04:18:06
| 55,646,873
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
# -*- cofing:utf-8 -*-
# Challenge : https://www.hackerrank.com/challenges/solve-me-first
def solveMeFirst(a,b):
tipos = (int,float)
if all( [isinstance(a,tipos) , isinstance(b,tipos)] ):
return a+b
else:
raise ValueError("Ha valores incorretos")
res = solveMeFirst(1,3)
print(res)
|
[
"marlysson5@gmail.com"
] |
marlysson5@gmail.com
|
a17c68c81482a954d2d326a14b0ca8afe7a1d3bf
|
9f525781555887c9528f83a97322bcd775162c3e
|
/Draw circle on Mouse clicks.py
|
53d96f5ca62ce7ca5cc96ebb05270b7bc2890f53
|
[] |
no_license
|
desairahulinrd000/OpenCV
|
0d0e19077aba9a2050b751ca64511efa19de2f74
|
4cf2bb820b789d314e34f5d7871a6f818a2d708f
|
refs/heads/master
| 2022-08-20T09:26:28.177278
| 2020-05-29T13:45:20
| 2020-05-29T13:45:20
| 265,911,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
import cv2
import numpy as np
# Create a black image and a window
windowName = 'Drawing'
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow(windowName)
# mouse callback function
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x, y), 40, (0, 255, 0), -1)
if event == cv2.EVENT_MBUTTONDOWN:
cv2.circle(img, (x, y), 20, (0, 0, 255), -1)
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img, (x, y), 30, (255, 0, ), -1)
# bind the callback function to window
cv2.setMouseCallback(windowName, draw_circle)
def main():
while(True):
cv2.imshow(windowName, img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
desairahulinrd000.noreply@github.com
|
b36c74ebc90e94b6c5b8e4696e8c3c466af4f025
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03327/s531970976.py
|
af05a6588e968b8a60610e4ea7216c99a751bad7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
#099A
#1.入力をちゃんと受け取ること
x=input()
i=int(x)
#2.結果を出力する
if i<1000 :
print('ABC')
elif i<=1998:
print('ABD')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0b88fa7df258e0b1cb5daa7d89442a7886d490b0
|
10733187b81975377d37e3a414cd66f0be3be1ef
|
/cms/admin.py
|
f15a7b9ddcdba845bf124a6a3c605a6931b12c40
|
[] |
no_license
|
podnachitana/landing
|
58ea6aff6f4cd0b20f77659a96ee0bdca3352545
|
074d7c3f8e568f3f70e55564bda706d79837df66
|
refs/heads/master
| 2023-06-24T21:07:33.313094
| 2021-07-23T21:59:58
| 2021-07-23T21:59:58
| 387,613,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
from django.contrib import admin
from django.utils.safestring import mark_safe
from cms.models import CmsSlider
class CmsAdmin(admin.ModelAdmin):
list_display = ('cms_title', 'cms_css', 'get_img')
list_display_links = ('cms_title',)
list_editable = ('cms_css',)
fields = ('cms_title', 'cms_css', 'cms_img', 'get_img')
readonly_fields = ('get_img',)
def get_img(self, obj):
if obj.cms_img:
return mark_safe(f'<img src="{obj.cms_img.url}" width="80px"')
else:
return 'Нет картинки'
get_img.short_description = 'Миниатюра'
admin.site.register(CmsSlider, CmsAdmin)
|
[
"zlayatanyaa@yandex.ru"
] |
zlayatanyaa@yandex.ru
|
2c24363060a7968ed3b518dcb2d567e499fe6a10
|
52307b41989b553051fe5d352998e46fca7b12a7
|
/api/serializers.py
|
d5b1291d6c4e08b5525de54c5b98b35ff99f7ea0
|
[] |
no_license
|
dangerousmonk/yamdb_final
|
93e68728db7e176e2c1873a2676fe556e5ff5721
|
acb6c26654ced865c5b058b3577cd82346c1843c
|
refs/heads/master
| 2023-07-09T18:27:47.427920
| 2021-08-22T17:03:01
| 2021-08-22T17:03:01
| 379,703,162
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,429
|
py
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Category, Comment, Genre, Review, Title
User = get_user_model()
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ['name', 'slug']
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = ['name', 'slug']
class TitleReadSerializer(serializers.ModelSerializer):
genre = GenreSerializer(read_only=True, many=True)
category = CategorySerializer(read_only=True)
rating = serializers.FloatField()
class Meta:
model = Title
fields = [
'id',
'name',
'year',
'description',
'genre',
'category',
'rating',
'year',
]
class TitleWriteSerializer(serializers.ModelSerializer):
genre = serializers.SlugRelatedField(
queryset=Genre.objects.all(), slug_field='slug', many=True
)
category = serializers.SlugRelatedField(
queryset=Category.objects.all(), slug_field='slug'
)
class Meta:
model = Title
fields = [
'id',
'name',
'year',
'description',
'genre',
'category',
]
class ReviewSerializer(serializers.ModelSerializer):
text = serializers.CharField()
author = serializers.ReadOnlyField(source='author.username')
def validate(self, data):
request = self.context['request']
if request.method != 'POST':
return data
user = request.user
title_id = (
request.parser_context['kwargs']['title_id']
)
if Review.objects.filter(author=user, title__id=title_id).exists():
raise serializers.ValidationError(
'Review must be unique')
return data
class Meta:
fields = [
'id',
'text',
'author',
'score',
'pub_date'
]
model = Review
class CommentSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
class Meta:
model = Comment
fields = [
'id',
'text',
'author',
'pub_date'
]
|
[
"dangerousmonk@yandex.ru"
] |
dangerousmonk@yandex.ru
|
e65862fecbe2af3df27ffe46a85392bb2c47e41b
|
aaa4eb09ebb66b51f471ebceb39c2a8e7a22e50a
|
/Lista 09/exercício 01.py
|
6cf8306d62a0b717e4f61f0d98c65df539a25d8b
|
[
"MIT"
] |
permissive
|
Brenda-Werneck/Listas-CCF110
|
c0a079df9c26ec8bfe194072847b86b294a19d4a
|
271b0930e6cce1aaa279f81378205c5b2d3fa0b6
|
refs/heads/main
| 2023-09-03T09:59:05.351611
| 2021-10-17T00:49:03
| 2021-10-17T00:49:03
| 411,115,920
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
#Crie um algoritmo que leia os elementos de uma matriz inteira 10 x 10 e escreva todos os elementos, exceto os elementos da diagonal principal.
matriz = [[0 for i in range(10)] for j in range(10)]
for i in range(10):
for j in range(10):
matriz[i][j] = int(input(f"Digite o valor para o índice ({i + 1}, {j + 1}): "))
for i in range(10):
for j in range(10):
if i != j:
print(matriz[i][j])
|
[
"89711195+Brenda-Werneck@users.noreply.github.com"
] |
89711195+Brenda-Werneck@users.noreply.github.com
|
5d4e823654d5532193ebea170301f710d793fe76
|
c81677a3d76953cc65e958bbb585f410cfdba36e
|
/csvs
|
ee4bc3af47669ab59488ff51342526e2ae00364d
|
[
"MIT"
] |
permissive
|
a-e/csvsee
|
4e3fd0a7fd74f4033cb974b713ff5dad9509b674
|
9960a2722be0dc961ab6d822407933e7c19ec531
|
refs/heads/master
| 2021-01-19T21:51:48.565578
| 2012-07-23T16:13:46
| 2012-07-23T16:13:46
| 2,497,642
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,328
|
#! /usr/bin/env python
__doc__ = """csvs: Frontend for CSVSee
Usage::
csvs [command] [options]
Command may be::
filter
graph
grep
grinder
info
Run ``csvs [command]`` with no further arguments to get help.
"""
usage = __doc__
"""
Ideas
-----
Manipulation of .csv files, especially large ones
- Display column names / column count / row count
- Split into manageable pieces based on column name or position
High-level analysis
- Display "interesting" columns (ones with large or frequent variation)
- Display "boring" columns (ones that are always the same or with little variation)
"""
import sys
import csv
from csvsee import utils
from csvsee.graph import Graph
from csvsee import grinder
class UsageError (Exception):
pass
def graph_command(args):
"""
Generage a graph from a .csv data file.
Usage::
csvs graph filename.csv [-options] ["Column 1"] ["Column 2"] ...
Where filename.csv contains comma-separated values, with column names in the
first row, and all subsequent arguments are regular expressions that may match
one or more column names.
Options:
-x "<column name>"
An expression matching the column you want to use for your X-axis.
If this is omitted, the first column of the .csv file will be used
as the X-axis coordinate.
-dateformat "<format string>"
Interpret the timestamp as a date in the given format. Examples:
%m/%d/%y %I:%M:%S %p: 12/10/09 3:45:56 PM (Grinder logs)
%m/%d/%Y %H:%M:%S.%f: 12/10/2009 15:45:56.789 (Perfmon)
See http://docs.python.org/library/datetime.html for valid formats.
By default, the date format will be guessed based on the first row of
the .csv file. If the X-column is NOT a date, use -dateformat ""
-title "Title"
Set the title label for the graph. By default, the .csv filename
is used as the graph title.
-save "filename.(png|svg|pdf)"
Save the graph to a file. Default is to show the graph in a viewer.
-linestyle "<format string>"
Define the style of lines plotted on the graph. Examples are:
"-" Solid line (Default)
"." Point marker
"o" Circle marker
"o-" Circle + solid lines
See the Matplotlib Axes.plot documentation for available styles:
http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.plot
-xlabel "Label string"
Use the given string as the label of the X axis. If omitted, the
name of the X-column is used.
-ylabel "Label string" | prefix
Use the given string as the label of the Y axis. By default, the
Y axis has no label. If 'prefix', the prefix common to all the given
column names is used.
-ymax <number>
Set the maximum Y-value beyond which the graph is cropped. By default,
maximum Y-value is determined by the maximum value present in the data.
-truncate <number>
Truncate the column labels to <number> characters. By default,
no truncation is done.
-top <number>
Graph only the top <number> columns, based on the average of
all values in matching columns.
-peak <number>
Graph only the top <number> columns, based on the highest peak
value in matching columns.
-drop <number>
When used in conjunction with -top or -peak, this causes the top
<number> of columns to be omitted. For example, -top 10 -drop 5
will skip the top 5 maximum columns, and graph the next 10.
-gmtoffset [+/-]<hours>
Adjust timestamps if they are not in GMT. For example, if the
timestamps are GMT-6, use -gmtoffset +6 to make the graph display
them as GMT times.
-zerotime
Adjust all timestamps so the graph starts at 00:00.
If no column names are given, then all columns are graphed. To graph only
specific columns, provide one or more column expressions after the .csv
filename and any options. Column names are given as regular expressions,
allowing you to match multiple columns.
Examples:
csvgraph.py data.csv
Graph all columns found in data.csv, using the first column
as the X-axis.
csvgraph.py data.csv -top 5
Graph the 5 columns with the highest average value
csvgraph.py data.csv "^Response.*"
Graph all columns beginning with the word "Response"
csvgraph.py data.csv A B C
Graph columns "A", "B", and "C". Note that these are regular
expressions, and will actually match all columns containing "A", all
columns containing "B", and all columns containing "C".
If the first column is a date field, then the X axis will be displayed in HH:MM
format. Otherwise, all columns must be numeric (integer or floating-point).
"""
# CSV file is always the first argument
csv_file = args.pop(0)
if not csv_file.lower().endswith('.csv'):
raise UsageError("First argument must be a filename with .csv extension.")
# Create Graph for this csv file
graph = Graph(csv_file)
save_file = ''
# Get any -options that follow
while args and args[0].startswith('-'):
opt = args.pop(0).lstrip('-')
if opt in graph.strings:
graph[opt] = args.pop(0)
elif opt in graph.ints:
graph[opt] = int(args.pop(0))
elif opt in graph.floats:
graph[opt] = float(args.pop(0))
elif opt in graph.bools:
graph[opt] = True
elif opt == 'save':
save_file = args.pop(0)
else:
raise UsageError("Unknown option: %s" % opt)
# Get column expressions (all remaining arguments, if any)
if args:
graph['y'] = args
# Generate the graph
graph.generate()
if save_file:
graph.save(save_file)
else:
graph.show()
def grep_command(args):
"""
Create a .csv file by counting the number of occurrences of
text strings in one or more timestamped text files.
Usage::
csvs grep <file1> <file2> -match <expr1> <expr2> -out <report.csv> [-options]
Options::
-seconds <number>
Report match frequency with a granularity of <number> seconds. The
default is 60 seconds (1 minute); that is, each line of the .csv
output will include the count of all matches during each minute.
-dateformat "<format string>"
Interpret date/time using the given format. If omitted, the format
is inferred by guessing.
See http://docs.python.org/library/datetime.html for valid formats.
"""
# Need at least five arguments
if len(args) < 5:
raise UsageError()
infiles = []
matches = []
csvfile = ''
dateformat = ''
seconds = 60
# Get input filenames until an -option is reached
while args and not args[0].startswith('-'):
infiles.append(args.pop(0))
while args:
opt = args.pop(0)
if opt == '-match':
while not args[0].startswith('-'):
matches.append(args.pop(0))
elif opt == '-out':
csvfile = args.pop(0)
elif opt == '-dateformat':
dateformat = args.pop(0)
elif opt == '-seconds':
seconds = int(args.pop(0))
else:
raise UsageError("Unknown option: '%s'" % opt)
# Search all the given files for matching text, and write the results to
# csvfile, with the first column being the timestamp, and remaining columns
# being the number of times each match was found.
outfile = open(csvfile, 'w')
heading = '"Timestamp","%s"' % '","'.join(matches)
outfile.write(heading + '\n')
for (timestamp, counts) in utils.grep_files(infiles, matches, dateformat, seconds):
line = '%s' % timestamp
for match in matches:
line += ',%s' % counts[match]
outfile.write(line + '\n')
outfile.close()
print("Wrote '%s'" % csvfile)
def grinder_command(args):
"""
Generate a .csv report of data from Grinder log files.
Usage::
csvs grinder [-options] <out_file> <data_files ...> <csv_prefix>
Options::
-seconds <number>
Summarize statistics over an interval of <number> seconds.
Default is 60-second intervals.
This will generate one .csv file for each of several important statistics.
"""
# Defaults
granularity = 60
# Get any -options
while args and args[0].startswith('-'):
opt = args.pop(0)
if opt == '-seconds':
granularity = int(args.pop(0))
else:
raise UsageError("Unknown option: '%s'" % opt)
# Need at least three positional arguments
if len(args) < 3:
raise UsageError()
# Get positional arguments
out_file = args[0]
data_files = args[1:-1]
csv_prefix = args[-1]
# Generate the report
report = grinder.Report(granularity, out_file, *data_files)
report.write_all_csvs(csv_prefix)
# TODO: Refactor some of this into a submodule
def info_command(args):
"""
Display statistics and high-level analysis of a .csv file.
Usage::
csvs info <filename.csv> [-options]
Options::
-columns
Display all column names
"""
# Need a .csv filename at least
if len(args) < 1:
raise UsageError()
csvfile = args.pop(0)
show_columns = False
while args and args[0].startswith('-'):
opt = args.pop(0)
if opt == '-columns':
show_columns = True
else:
raise UsageError("Unknown option: '%s'" % opt)
reader = csv.DictReader(open(csvfile))
num_columns = len(reader.fieldnames)
print(csvfile)
print("%d columns" % num_columns)
if show_columns:
print("Column names:")
print("-------------")
for column in reader.fieldnames:
print(column)
def filter_command(args):
"""
Filter a .csv file, keeping only matching columns.
Usage::
csvs filter <in_file.csv> -match <expr1> <expr2> ... -out <out_file.csv>
"""
# Need at least five arguments
if len(args) < 5:
raise UsageError()
infile = args.pop(0)
matches = []
outfile = ''
while args:
opt = args.pop(0)
if opt == '-match':
while not args[0].startswith('-'):
matches.append(args.pop(0))
elif opt == '-out':
outfile = args.pop(0)
else:
raise UsageError("Unknown option: '%s'" % opt)
if not matches:
raise UsageError("Please provide one or more match expressions with -match")
if not outfile:
raise UsageError("Please provide an output file with -out")
utils.filter_csv(infile, outfile, matches)
# Commands and the function that handles them
command_functions = {
'graph': graph_command,
'grep': grep_command,
'grinder': grinder_command,
'info': info_command,
'filter': filter_command,
}
def exit_msg(usage, text=''):
"""Print usage notes along with a message, then exit the application.
"""
print(usage)
if text:
print(text)
sys.exit(1)
# Main program
if __name__ == '__main__':
if len(sys.argv) < 2:
exit_msg(usage)
args = sys.argv[1:]
command = args.pop(0)
# If command is not known, print usage and exit
if command not in command_functions:
exit_msg(usage, "Unknown command: '%s'" % command)
# Get the appropriate function
function = command_functions[command]
# If there are no arguments, display help and exit
if not args:
print(function.__doc__)
sys.exit(0)
# Run the command and catch errors
try:
command_functions[command](args)
except UsageError, message:
exit_msg(function.__doc__, message)
except KeyboardInterrupt:
print("Aborted!")
sys.exit(0)
|
[
"wapcaplet88@gmail.com"
] |
wapcaplet88@gmail.com
|
|
55a74f5fb140c6e32234bf3cea3002577990709b
|
59f7791ea6d2cc9e200ef2addd84621c8f457cef
|
/fkumi/COSC1336 - Programming Fundamentals I/Programming Examples/Chapter 5 - Functions/Chapter 3 - Source Code/retirement.py
|
fc1105eebddeea93906b89ed5dd5ef744d47305b
|
[] |
no_license
|
suarezluis/DB_ClassRepo
|
6c1a753726f75a7b86074930897c4dca9db6ced6
|
3e680b27a0180607c3f8bcc008aa72893b608106
|
refs/heads/master
| 2020-07-28T13:24:09.531985
| 2019-12-12T01:23:11
| 2019-12-12T01:23:11
| 209,422,073
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
# The following is used as a global constant to represent
# the contribution rate.
CONTRIBUTION_RATE = 0.05
def main():
gross_pay = float(input('Enter the gross pay: '))
bonus = float(input('Enter the amount of bonuses: '))
show_pay_contrib(gross_pay)
show_bonus_contrib(bonus)
# The show_pay_contrib function accepts the gross
# pay as an argument and displays the retirement
# contribution for that amount of pay.
def show_pay_contrib(gross):
contrib = gross * CONTRIBUTION_RATE
print('Contribution for gross pay: $', \
format(contrib, ',.2f'), \
sep='')
# The show_bonus_contrib function accepts the
# bonus amount as an argument and displays the
# retirement contribution for that amount of pay.
def show_bonus_contrib(bonus):
contrib = bonus * CONTRIBUTION_RATE
print('Contribution for gross pay: $', \
format(contrib, ',.2f'), \
sep='')
# Call the main function.
main()
|
[
"8792613"
] |
8792613
|
a5f6d26fc1bc6236673643badeb26a02c0d91751
|
9fa0fdd378fc2750a517a6059d903f8b7c0196c0
|
/userbot/plugins/inviteall.py
|
dfdfea5ab55b662567c96aecaaef5f5570d361ec
|
[
"MIT"
] |
permissive
|
MrActive/GujjuBot
|
e205dd8fa08c30c206c25b21024076c113b41653
|
30388aec3d2ad125cd4129b2b5f01a35a6b4d4b1
|
refs/heads/master
| 2023-04-05T08:37:30.423757
| 2021-04-23T04:09:31
| 2021-04-23T04:09:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,966
|
py
|
from telethon.errors import (
ChannelInvalidError,
ChannelPrivateError,
ChannelPublicGroupNaError,
)
from telethon.tl import functions
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon.tl.functions.messages import GetFullChatRequest
from userbot import *
from userbot.utils import admin_cmd
import asyncio
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.reply("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.reply(
"`This is a private channel/group or I am banned from there`"
)
return None
except ChannelPublicGroupNaError:
await event.reply("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError):
await event.reply("`Invalid channel/group`")
return None
return chat_info
def make_mention(user):
if user.username:
return f"@{user.username}"
else:
return inline_mention(user)
def inline_mention(user):
full_name = user_full_name(user) or "No Name"
return f"[{full_name}](tg://user?id={user.id})"
def user_full_name(user):
names = [user.first_name, user.last_name]
names = [i for i in list(names) if i]
full_name = " ".join(names)
return full_name
@borg.on(admin_cmd(pattern=r"allinvite ?(.*)"))
async def get_users(event):
sender = await event.get_sender()
me = await event.client.get_me()
if not sender.id == me.id:
rkp = await event.reply("`processing...`")
else:
rkp = await event.edit("`processing...`")
rk1 = await get_chatinfo(event)
chat = await event.get_chat()
if event.is_private:
return await rkp.edit("`Sorry, Can add users here`")
s = 0
f = 0
error = "None"
await rkp.edit("**TerminalStatus**\n\n`Collecting Users.......`")
async for user in event.client.iter_participants(rk1.full_chat.id):
try:
if error.startswith("Too"):
await rkp.edit(
f"**Terminal Finished With Error**\n(`May Got Limit Error from telethon Please try agin Later`)\n**Error** : \n`{error}`\n\n• Invited `{s}` people \n• Failed to Invite `{f}` people")
if Config.PRIVATE_GROUP_BOT_API_ID is not None:
await show.client.send_message(
Config.PRIVATE_GROUP_BOT_API_ID, "#ADDING\n"
f"ADDED **{s}** account(s) !!\
\nFailed **{f}** account(s) !!\
\nCHAT: {event.chat.title}(`{event.chat_id}`)")
await event.client(
functions.channels.InviteToChannelRequest(channel=chat, users=[user.id])
)
s = s + 1
await rkp.edit(
f"**Terminal Running...**\n\n• Invited `{s}` people \n• Failed to Invite `{f}` people\n\n**× LastError:** `{error}`"
)
asyncio.sleep(2)
except Exception as e:
error = str(e)
f = f + 1
return await rkp.edit(
f"**Terminal Finished** \n\n• Successfully Invited `{s}` people \n• failed to invite `{f}` people"
)
|
[
"noreply@github.com"
] |
MrActive.noreply@github.com
|
035608dc157e69fcf2296e173c32828af4537f8b
|
6e68584f2819351abe628b659c01184f51fec976
|
/Centre_College/CSC_117/CSC_117_Python_Files/form.py
|
a06885e220a2f72a9945f583716eb540c481c1ab
|
[] |
no_license
|
DanSGraham/code
|
0a16a2bfe51cebb62819cd510c7717ae24b12d1b
|
fc54b6d50360ae12f207385b5d25adf72bfa8121
|
refs/heads/master
| 2020-03-29T21:09:18.974467
| 2017-06-14T04:04:48
| 2017-06-14T04:04:48
| 36,774,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
#!/usr/bin/python
print "Content-type: text/html\n"
import cgi
import cgitb; cgitb.enable() #helps w/ debugging
form = cgi.FieldStorage()
print "<OL>"
for element in form.keys():
print "<LI>" + element + "=" + str(form.getlist(element)) + "</LI>"
print "</OL>"
|
[
"dan.s.graham@gmail.com"
] |
dan.s.graham@gmail.com
|
2a8140a13fc4fc9a76df6bc671252a972d6f76ee
|
07c4798e133b3190c7a4351d6b40fdd2f0a40756
|
/Challenge_019.py
|
75a010264e7dc0727e98482473e45c3c2b6ee846
|
[] |
no_license
|
hardhary/PythonByExample
|
95f05ded7ea96e824cd0ee453ead97004d069dcd
|
15c2c496060b6058fe604f3f0389976ffe9293a2
|
refs/heads/master
| 2023-03-06T19:05:21.018556
| 2021-02-06T19:55:15
| 2021-02-06T19:55:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
# 019 Ask the user to enter 1, 2 or 3. If they enter a 1, display the message “Thank you”, if they enter a 2, display “Well done”, if they enter a 3, display “Correct”.
# If they enter anything else, display “Error message”.
def enternumber():
number = int(input("Enter a number 1, 2 or 3: "))
if number == 1:
print("Thank you")
elif number == 2:
print("Well done")
elif number == 3:
print("Correct")
else:
print("Error message")
enternumber()
|
[
"brain@villorddozasmbp.attlocal.net"
] |
brain@villorddozasmbp.attlocal.net
|
03fda28d315454a90ef2015ea553d725ae633957
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_246/ch168_2020_06_21_21_55_19_103833.py
|
0fcd377deab8aba74d445f29d9bac6a57a4950c1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
def login_disponivel(nome, lista):
if nome in lista:
i=1
login =str(i)join.nome[len(nome)+1]
while login in lista:
i+=1
login = str(i)join.nome[len(nome)+1]
else:
login = nome
return login
|
[
"you@example.com"
] |
you@example.com
|
832673c8ebb766cb37ab04d94ec053f6ba356842
|
c0c4fe8f9aff2e7684fcaf10329f963873753b2a
|
/src/biotite/sequence/alphabet.py
|
90f246dabb05b57661b14007e7ad49eb3b56da95
|
[
"BSD-3-Clause"
] |
permissive
|
thomasnevolianis/biotite
|
85e1b9d6a1fbb5d9f81501a8ebc617bc26388ab9
|
916371eb602cfcacb2d5356659298ef38fa01fcc
|
refs/heads/master
| 2022-11-30T19:40:53.017368
| 2020-08-04T07:00:59
| 2020-08-04T07:00:59
| 285,375,415
| 0
| 0
|
BSD-3-Clause
| 2020-08-05T18:41:48
| 2020-08-05T18:41:47
| null |
UTF-8
|
Python
| false
| false
| 16,309
|
py
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence"
__author__ = "Patrick Kunzmann"
__all__ = ["Alphabet", "LetterAlphabet", "AlphabetMapper", "AlphabetError"]
import copy
from numbers import Integral
import string
import numpy as np
from .codec import encode_chars, decode_to_chars, map_sequence_code
class Alphabet(object):
"""
This class defines the allowed symbols for a :class:`Sequence` and
handles the encoding/decoding between symbols and symbol codes.
An :class:`Alphabet` is created with the list of symbols, that can
be used in this context.
In most cases a symbol will be simply a letter, hence a string of
length 1. But in principle every hashable Python object can serve
as symbol.
The encoding of a symbol into a symbol code is
done in the following way: Find the first index in the symbol list,
where the list element equals the symbol. This index is the
symbol code. If the symbol is not found in the list, an
:class:`AlphabetError` is raised.
Internally, a dictionary is used for encoding, with symbols as keys
and symbol codes as values. Therefore, every symbol must be
hashable. For decoding the symbol list is indexed with the symbol
code.
If an alphabet *1* contains the same symbols and the same
symbol-code-mappings like another alphabet *2*, but alphabet *1*
introdues also new symbols, then alphabet *1* *extends* alphabet
*2*.
Per definition, every alphabet also extends itself.
Objects of this class are immutable.
Parameters
----------
symbols : iterable object
The symbols, that are allowed in this alphabet. The
corresponding code for a symbol, is the index of that symbol
in this list.
Examples
--------
Create an Alphabet containing DNA letters and encode/decode a
letter/code:
>>> alph = Alphabet(["A","C","G","T"])
>>> print(alph.encode("G"))
2
>>> print(alph.decode(2))
G
>>> try:
... alph.encode("foo")
... except Exception as e:
... print(e)
Symbol 'foo' is not in the alphabet
Create an Alphabet of arbitrary objects:
>>> alph = Alphabet(["foo", 42, (1,2,3), 5, 3.141])
>>> print(alph.encode((1,2,3)))
2
>>> print(alph.decode(4))
3.141
On the subject of alphabet extension:
An alphabet always extends itself.
>>> Alphabet(["A","C","G","T"]).extends(Alphabet(["A","C","G","T"]))
True
An alphabet extends an alphabet when it contains additional symbols...
>>> Alphabet(["A","C","G","T","U"]).extends(Alphabet(["A","C","G","T"]))
True
...but not vice versa
>>> Alphabet(["A","C","G","T"]).extends(Alphabet(["A","C","G","T","U"]))
False
Two alphabets with same symbols but different symbol-code-mappings
>>> Alphabet(["A","C","G","T"]).extends(Alphabet(["A","C","T","G"]))
False
"""
def __init__(self, symbols):
if len(symbols) == 0:
raise ValueError("Symbol list is empty")
self._symbols = copy.deepcopy(list(symbols))
self._symbol_dict = {}
for i, symbol in enumerate(symbols):
self._symbol_dict[symbol] = i
def get_symbols(self):
"""
Get the symbols in the alphabet.
Returns
-------
symbols : list
Copy of the internal list of symbols.
"""
return copy.deepcopy(self._symbols)
def extends(self, alphabet):
"""
Check, if this alphabet extends another alphabet.
Parameters
----------
alphabet : Alphabet
The potential parent alphabet.
Returns
-------
result : bool
True, if this object extends `alphabet`, false otherwise.
"""
if alphabet is self:
return True
elif len(alphabet) > len(self):
return False
else:
return list(alphabet.get_symbols()) \
== list(self.get_symbols()[:len(alphabet)])
def encode(self, symbol):
"""
Use the alphabet to encode a symbol.
Parameters
----------
symbol : object
The object to encode into a symbol code.
Returns
-------
code : int
The symbol code of `symbol`.
Raises
------
AlphabetError
If `symbol` is not in the alphabet.
"""
try:
return self._symbol_dict[symbol]
except KeyError:
raise AlphabetError(
f"Symbol {repr(symbol)} is not in the alphabet"
)
def decode(self, code):
"""
Use the alphabet to decode a symbol code.
Parameters
----------
code : int
The symbol code to be decoded.
Returns
-------
symbol : object
The symbol corresponding to `code`.
Raises
------
AlphabetError
If `code` is not a valid code in the alphabet.
"""
if code < 0 or code >= len(self._symbols):
raise AlphabetError(f"'{code:d}' is not a valid code")
return self._symbols[code]
def encode_multiple(self, symbols, dtype=np.int64):
"""
Encode a list of symbols.
Parameters
----------
symbols : array-like
The symbols to encode.
dtype : dtype, optional
The dtype of the output ndarray. (Default: `int64`)
Returns
-------
code : ndarray
The sequence code.
"""
return np.array([self.encode(e) for e in symbols], dtype=dtype)
def decode_multiple(self, code):
"""
Decode a sequence code into a list of symbols.
Parameters
----------
code : ndarray
The sequence code to decode.
Returns
-------
symbols : list
The decoded list of symbols.
"""
return [self.decode(c) for c in code]
def is_letter_alphabet(self):
"""
Check whether the symbols in this alphabet are single printable
letters.
If so, the alphabet could be expressed by a `LetterAlphabet`.
Returns
-------
is_letter_alphabet : bool
True, if all symbols in the alphabet are 'str' or 'bytes',
have length 1 and are printable.
"""
for symbol in self:
if not isinstance(symbol, (str, bytes)) \
or len(symbol) > 1:
return False
if isinstance(symbol, str):
symbol = symbol.encode("ASCII")
if symbol not in LetterAlphabet.PRINATBLES:
return False
return True
def __str__(self):
return str(self.get_symbols())
def __len__(self):
return len(self.get_symbols())
def __iter__(self):
return self.get_symbols().__iter__()
def __contains__(self, symbol):
return symbol in self.get_symbols()
def __hash__(self):
return hash(tuple(self._symbols))
def __eq__(self, item):
if item is self:
return True
if not isinstance(item, Alphabet):
return False
return self.get_symbols() == item.get_symbols()
class LetterAlphabet(Alphabet):
"""
:class:`LetterAlphabet` is a an :class:`Alphabet` subclass
specialized for letter based alphabets, like DNA or protein
sequence alphabets.
The alphabet size is limited to the 94 printable, non-whitespace
characters.
Internally the symbols are saved as `bytes` objects.
The encoding and decoding process is a lot faster than for a
normal :class:`Alphabet`.
The performance gain comes through the use of *NumPy* and *Cython*
for encoding and decoding, without the need of a dictionary.
Parameters
----------
symbols : iterable object or str or bytes
The symbols, that are allowed in this alphabet. The
corresponding code for a symbol, is the index of that symbol
in this list.
"""
PRINATBLES = (string.digits + string.ascii_letters + string.punctuation) \
.encode("ASCII")
def __init__(self, symbols):
if len(symbols) == 0:
raise ValueError("Symbol list is empty")
self._symbols = []
for symbol in symbols:
if not isinstance(symbol, (str, bytes)) or len(symbol) > 1:
raise ValueError(f"Symbol '{symbol}' is not a single letter")
if isinstance(symbol, str):
symbol = symbol.encode("ASCII")
if symbol not in LetterAlphabet.PRINATBLES:
raise ValueError(
f"Symbol {repr(symbol)} is not printable or whitespace"
)
self._symbols.append(symbol)
# Direct 'astype' conversion is not allowed by numpy
# -> frombuffer()
self._symbols = np.frombuffer(
np.array(self._symbols, dtype="|S1"),
dtype=np.ubyte
)
def get_symbols(self):
"""
Get the symbols in the alphabet.
Returns
-------
symbols : list
Copy of the internal list of symbols.
"""
return [symbol.decode("ASCII") for symbol
in self._symbols_as_bytes()]
def encode(self, symbol):
if not isinstance(symbol, (str, bytes)) or len(symbol) > 1:
raise AlphabetError(f"Symbol '{symbol}' is not a single letter")
indices = np.where(self._symbols == ord(symbol))[0]
if len(indices) == 0:
raise AlphabetError(
f"Symbol {repr(symbol)} is not in the alphabet"
)
return indices[0]
def decode(self, code, as_bytes=False):
if code < 0 or code >= len(self._symbols):
raise AlphabetError(f"'{code:d}' is not a valid code")
return chr(self._symbols[code])
def encode_multiple(self, symbols, dtype=None):
"""
Encode multiple symbols.
Parameters
----------
symbols : iterable object of str or iterable object of bytes
The symbols to encode. The method is fastest when a
:class:`ndarray`, :class:`str` or :class:`bytes` object
containing the symbols is provided, instead of e.g. a list.
dtype : dtype, optional
For compatibility with superclass. The value is ignored
Returns
-------
code : ndarray
The sequence code.
"""
if isinstance(symbols, str):
symbols = np.frombuffer(symbols.encode("ASCII"), dtype=np.ubyte)
elif isinstance(symbols, bytes):
symbols = np.frombuffer(symbols, dtype=np.ubyte)
elif isinstance(symbols, np.ndarray):
symbols = np.frombuffer(
symbols.astype(dtype="|S1"), dtype=np.ubyte
)
else:
symbols = np.frombuffer(
np.array(list(symbols), dtype="|S1"),
dtype=np.ubyte
)
return encode_chars(alphabet=self._symbols, symbols=symbols)
def decode_multiple(self, code, as_bytes=False):
"""
Decode a sequence code into a list of symbols.
Parameters
----------
code : ndarray, dtype=uint8
The sequence code to decode.
Works fastest if an :class:`ndarray` is provided.
as_bytes : bool, optional
If true, the output array will contain `bytes`
(dtype 'S1').
Otherwise, the the output array will contain `str`
(dtype 'U1').
Returns
-------
symbols : ndarray, dtype='U1' or dtype='S1'
The decoded list of symbols.
"""
if not isinstance(code, np.ndarray):
code = np.array(code, dtype=np.uint8)
code = code.astype(np.uint8, copy=False)
symbols = decode_to_chars(alphabet=self._symbols, code=code)
# Symbols must be convverted from 'np.ubyte' to '|S1'
symbols = np.frombuffer(symbols, dtype="|S1")
if not as_bytes:
symbols = symbols.astype("U1")
return symbols
def __contains__(self, symbol):
if not isinstance(symbol, (str, bytes)):
return False
return ord(symbol) in self._symbols
def __len__(self):
return len(self._symbols)
def _symbols_as_bytes(self):
"Properly convert from dtype 'np.ubyte' to '|S1'"
return np.frombuffer(self._symbols, dtype="|S1")
class AlphabetMapper(object):
"""
This class is used for symbol code conversion from a source
alphabet into a target alphabet.
This means that the symbol codes are converted from one to another
alphabet so that the symbol itself is preserved.
This class works for single symbol codes or an entire sequence code
likewise.
Parameters
----------
source_alphabet, target_alphabet : Alphabet
The codes are converted from the source alphabet into the
target alphabet.
The target alphabet must contain at least all symbols of the
source alphabet, but it is not required that the shared symbols
are in the same order.
Examples
--------
>>> source_alph = Alphabet(["A","C","G","T"])
>>> target_alph = Alphabet(["T","U","A","G","C"])
>>> mapper = AlphabetMapper(source_alph, target_alph)
>>> print(mapper[0])
2
>>> print(mapper[1])
4
>>> print(mapper[[1,1,3]])
[4 4 0]
>>> in_sequence = GeneralSequence(source_alph, "GCCTAT")
>>> print(in_sequence.code)
[2 1 1 3 0 3]
>>> print(in_sequence)
GCCTAT
>>> out_sequence = GeneralSequence(target_alph)
>>> out_sequence.code = mapper[in_sequence.code]
>>> print(out_sequence.code)
[3 4 4 0 2 0]
>>> print(out_sequence)
GCCTAT
"""
def __init__(self, source_alphabet, target_alphabet):
if target_alphabet.extends(source_alphabet):
self._necessary_mapping = False
else:
self._necessary_mapping = True
self._mapper = np.zeros(
len(source_alphabet),
dtype=AlphabetMapper._dtype(len(target_alphabet))
)
for old_code in range(len(source_alphabet)):
symbol = source_alphabet.decode(old_code)
new_code = target_alphabet.encode(symbol)
self._mapper[old_code] = new_code
def __getitem__(self, code):
if isinstance(code, Integral):
if self._necessary_mapping:
return self._mapper[code]
else:
return code
if not isinstance(code, np.ndarray) \
or code.dtype not in (np.uint8, np.uint16, np.uint32, np.uint64):
code = np.array(code, dtype=np.uint64)
if self._necessary_mapping:
mapped_code = np.empty(len(code), dtype=self._mapper.dtype)
map_sequence_code(
self._mapper,
code,
mapped_code
)
return mapped_code
else:
return code
@staticmethod
def _dtype(alphabet_size):
_size_uint8 = np.iinfo(np.uint8 ).max +1
_size_uint16 = np.iinfo(np.uint16).max +1
_size_uint32 = np.iinfo(np.uint32).max +1
if alphabet_size <= _size_uint8:
return np.uint8
elif alphabet_size <= _size_uint16:
return np.uint16
elif alphabet_size <= _size_uint32:
return np.uint32
else:
return np.uint64
class AlphabetError(Exception):
"""
This exception is raised, when a code or a symbol is not in an
:class:`Alphabet`.
"""
pass
|
[
"patrick.kunzm@gmail.com"
] |
patrick.kunzm@gmail.com
|
5a8f1beee8e1464524dd3fe03ee4b58d19ae0e88
|
f4916957a4fd3165fb91369635f437b8b542e930
|
/importer.py
|
51f370b379004032ac8a94878ef56e84cc527253
|
[] |
no_license
|
jcongithub/oldmando
|
ae0bc296db9523060c4814354898cb5245d4a1ba
|
37d109c2238f111f0a148a60be03bb12bf2e6070
|
refs/heads/master
| 2021-06-14T18:00:27.760743
| 2021-02-06T13:18:33
| 2021-02-06T13:18:33
| 56,347,831
| 0
| 0
| null | 2021-02-06T13:18:34
| 2016-04-15T20:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
import os
from stock import *
import sys
from os.path import isfile, join
from datetime import datetime
from datetime import date
import time
import dao
def strfftime(s, f1, f2):
return time.strftime(f2, time.strptime(s, f1))
def import_all_prices():
for stock in Stock.list():
ticker = stock['ticker']
file_name = 'data/' + ticker + '.price.csv'
if(isfile(file_name)):
print('Importing {} prices from {}'.format(ticker, file_name))
df = pd.read_csv(file_name)
df = df.rename(columns={'Date' : 'date',
'Open' : 'open',
'High' : 'high',
'Low' : 'low',
'Close' : 'close',
'Volume' : 'volume'})
df['date'] = df.apply(lambda row : strfftime(row['date'], '%Y-%m-%d', '%Y%m%d'), axis=1)
print(df)
dao.save_price_history(ticker, df.T.to_dict().values())
def import_all_earnings():
for stock in Stock.list():
ticker = stock['ticker']
file_name = 'data/' + ticker + '.earning.csv'
if(isfile(file_name)):
print('Importing {} prices from {}'.format(ticker, file_name))
df = pd.read_csv(file_name)
print(df)
dao.save_earning_history(ticker, df.T.to_dict().values())
|
[
"jc100102@gmail.com"
] |
jc100102@gmail.com
|
f299b233b68ede04a2e0da4d1289b98b42b3455d
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS_SHOffer/YCHF_KCBYCHF_OMS_SHOffer_119.py
|
58eddaab5403d714716228dbb7cb1f33cb41d752
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,567
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_SHOffer_119(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_SHOffer_119')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_SHOffer_119(self):
title = '先重启上海报盘再重启OMS(沪A本方最优:分笔成交_累积成交金额 < 累积成交费用)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688011', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':5,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
ad171f7120c9bab591359ee02d1c9584203a680a
|
b04318d794d1e77362b4a7b35fa2e8c7764aa60f
|
/test/core/protocol/connection/test_wss_core.py
|
bbe7244fb9de44ec3bca35f7822dab1ad4c81e36
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-iot-device-sdk-python
|
c4a5d99ecdf483d67b3704f58cb7238bc1959238
|
f78e330bfc4f007be5ecbd269b2429718e9b25e2
|
refs/heads/master
| 2023-08-01T16:42:57.576372
| 2023-07-20T21:17:22
| 2023-07-20T21:17:22
| 60,723,336
| 761
| 498
|
Apache-2.0
| 2023-07-20T21:17:24
| 2016-06-08T19:06:12
|
Python
|
UTF-8
|
Python
| false
| false
| 13,172
|
py
|
from test.sdk_mock.mockSecuredWebsocketCore import mockSecuredWebsocketCoreNoRealHandshake
from test.sdk_mock.mockSecuredWebsocketCore import MockSecuredWebSocketCoreNoSocketIO
from test.sdk_mock.mockSecuredWebsocketCore import MockSecuredWebSocketCoreWithRealHandshake
from test.sdk_mock.mockSSLSocket import mockSSLSocket
import struct
import socket
import pytest
try:
from configparser import ConfigParser # Python 3+
except ImportError:
from ConfigParser import ConfigParser
class TestWssCore:
# Websocket Constants
_OP_CONTINUATION = 0x0
_OP_TEXT = 0x1
_OP_BINARY = 0x2
_OP_CONNECTION_CLOSE = 0x8
_OP_PING = 0x9
_OP_PONG = 0xa
def _generateStringOfAs(self, length):
ret = ""
for i in range(0, length):
ret += 'a'
return ret
def _printByteArray(self, src):
for i in range(0, len(src)):
print(hex(src[i]))
print("")
def _encodeFrame(self, rawPayload, opCode, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=1):
ret = bytearray()
# FIN+RSV1+RSV2+RSV3
F = (FIN & 0x01) << 3
R1 = (RSV1 & 0x01) << 2
R2 = (RSV2 & 0x01) << 1
R3 = (RSV3 & 0x01)
FRRR = (F | R1 | R2 | R3) << 4
# Op byte
opByte = FRRR | opCode
ret.append(opByte)
# Payload Length bytes
maskBit = masked
payloadLength = len(rawPayload)
if payloadLength <= 125:
ret.append((maskBit << 7) | payloadLength)
elif payloadLength <= 0xffff: # 16-bit unsigned int
ret.append((maskBit << 7) | 126)
ret.extend(struct.pack("!H", payloadLength))
elif payloadLength <= 0x7fffffffffffffff: # 64-bit unsigned int (most significant bit must be 0)
ret.append((maskBit << 7) | 127)
ret.extend(struct.pack("!Q", payloadLength))
else: # Overflow
raise ValueError("Exceeds the maximum number of bytes for a single websocket frame.")
if maskBit == 1:
# Mask key bytes
maskKey = bytearray(b"1234")
ret.extend(maskKey)
# Mask the payload
payloadBytes = bytearray(rawPayload)
if maskBit == 1:
for i in range(0, payloadLength):
payloadBytes[i] ^= maskKey[i % 4]
ret.extend(payloadBytes)
# Return the assembled wss frame
return ret
def setup_method(self, method):
self._dummySSLSocket = mockSSLSocket()
# Wss Handshake
def test_WssHandshakeTimeout(self):
self._dummySSLSocket.refreshReadBuffer(bytearray()) # Empty bytes to read from socket
with pytest.raises(socket.error):
self._dummySecuredWebsocket = \
MockSecuredWebSocketCoreNoSocketIO(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Constructor
def test_InvalidEndpointPattern(self):
with pytest.raises(ValueError):
self._dummySecuredWebsocket = MockSecuredWebSocketCoreWithRealHandshake(None, "ThisIsNotAValidIoTEndpoint!", 1234)
def test_BJSEndpointPattern(self):
bjsStyleEndpoint = "blablabla.iot.cn-north-1.amazonaws.com.cn"
unexpectedExceptionMessage = "Invalid endpoint pattern for wss: %s" % bjsStyleEndpoint
# Garbage wss handshake response to ensure the test code gets passed endpoint pattern validation
self._dummySSLSocket.refreshReadBuffer(b"GarbageWssHanshakeResponse")
try:
self._dummySecuredWebsocket = MockSecuredWebSocketCoreWithRealHandshake(self._dummySSLSocket, bjsStyleEndpoint, 1234)
except ValueError as e:
if str(e) == unexpectedExceptionMessage:
raise AssertionError("Encountered unexpected exception when initializing wss core with BJS style endpoint", e)
# Wss I/O
def test_WssReadComplete(self):
# Config mockSSLSocket to contain a Wss frame
rawPayload = b"If you can see me, this is good."
# The payload of this frame will be masked by a randomly-generated mask key
# securedWebsocketCore should be able to decode it and get the raw payload back
coolFrame = self._encodeFrame(rawPayload, self._OP_BINARY, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=0)
# self._printByteArray(coolFrame)
self._dummySSLSocket.refreshReadBuffer(coolFrame)
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Read it back:
readItBack = self._dummySecuredWebsocket.read(len(rawPayload)) # Basically read everything
assert rawPayload == readItBack
def test_WssReadFragmented(self):
rawPayloadFragmented = b"I am designed to be fragmented..."
# The payload of this frame will be masked by a randomly-generated mask key
# securedWebsocketCore should be able to decode it and get the raw payload back
stop1 = 4
stop2 = 9
coolFrame = self._encodeFrame(rawPayloadFragmented, self._OP_BINARY, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=0)
# self._printByteArray(coolFrame)
coolFramePart1 = coolFrame[0:stop1]
coolFramePart2 = coolFrame[stop1:stop2]
coolFramePart3 = coolFrame[stop2:len(coolFrame)]
# Config mockSSLSocket to contain a fragmented Wss frame
self._dummySSLSocket.setReadFragmented()
self._dummySSLSocket.addReadBufferFragment(coolFramePart1)
self._dummySSLSocket.addReadBufferFragment(coolFramePart2)
self._dummySSLSocket.addReadBufferFragment(coolFramePart3)
self._dummySSLSocket.loadFirstFragmented()
# In this way, reading from SSLSocket will result in 3 sslError, simulating the situation where data is not ready
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Read it back:
readItBack = bytearray()
while len(readItBack) != len(rawPayloadFragmented):
try:
# Will be interrupted due to faked socket I/O Error
# Should be able to read back the complete
readItBack += self._dummySecuredWebsocket.read(len(rawPayloadFragmented)) # Basically read everything
except:
pass
assert rawPayloadFragmented == readItBack
def test_WssReadlongFrame(self):
# Config mockSSLSocket to contain a Wss frame
rawPayloadLong = bytearray(self._generateStringOfAs(300), 'utf-8') # 300 bytes of raw payload, will use extended payload length bytes in encoding
# The payload of this frame will be masked by a randomly-generated mask key
# securedWebsocketCore should be able to decode it and get the raw payload back
coolFrame = self._encodeFrame(rawPayloadLong, self._OP_BINARY, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=0)
# self._printByteArray(coolFrame)
self._dummySSLSocket.refreshReadBuffer(coolFrame)
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Read it back:
readItBack = self._dummySecuredWebsocket.read(len(rawPayloadLong)) # Basically read everything
assert rawPayloadLong == readItBack
def test_WssReadReallylongFrame(self):
# Config mockSSLSocket to contain a Wss frame
# Maximum allowed length of a wss payload is greater than maximum allowed payload length of a MQTT payload
rawPayloadLong = bytearray(self._generateStringOfAs(0xffff + 3), 'utf-8') # 0xffff + 3 bytes of raw payload, will use extended payload length bytes in encoding
# The payload of this frame will be masked by a randomly-generated mask key
# securedWebsocketCore should be able to decode it and get the raw payload back
coolFrame = self._encodeFrame(rawPayloadLong, self._OP_BINARY, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=0)
# self._printByteArray(coolFrame)
self._dummySSLSocket.refreshReadBuffer(coolFrame)
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Read it back:
readItBack = self._dummySecuredWebsocket.read(len(rawPayloadLong)) # Basically read everything
assert rawPayloadLong == readItBack
def test_WssWriteComplete(self):
ToBeWritten = b"Write me to the cloud."
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Fire the write op
self._dummySecuredWebsocket.write(ToBeWritten)
ans = self._encodeFrame(ToBeWritten, self._OP_BINARY, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=1)
# self._printByteArray(ans)
assert ans == self._dummySSLSocket.getWriteBuffer()
def test_WssWriteFragmented(self):
ToBeWritten = b"Write me to the cloud again."
# Configure SSLSocket to perform interrupted write op
self._dummySSLSocket.setFlipWriteError()
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Fire the write op
with pytest.raises(socket.error) as e:
self._dummySecuredWebsocket.write(ToBeWritten)
assert "Not ready for write op" == e.value.strerror
lengthWritten = self._dummySecuredWebsocket.write(ToBeWritten)
ans = self._encodeFrame(ToBeWritten, self._OP_BINARY, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=1)
assert lengthWritten == len(ToBeWritten)
assert ans == self._dummySSLSocket.getWriteBuffer()
# Wss Client Behavior
def test_ClientClosesConnectionIfServerResponseIsMasked(self):
ToBeWritten = b"I am designed to be masked."
maskedFrame = self._encodeFrame(ToBeWritten, self._OP_BINARY, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=1)
self._dummySSLSocket.refreshReadBuffer(maskedFrame)
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Now read it back
with pytest.raises(socket.error) as e:
self._dummySecuredWebsocket.read(len(ToBeWritten))
assert "Server response masked, closing connection and try again." == e.value.strerror
# Verify that a closing frame from the client is on its way
closingFrame = self._encodeFrame(b"", self._OP_CONNECTION_CLOSE, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=1)
assert closingFrame == self._dummySSLSocket.getWriteBuffer()
def test_ClientClosesConnectionIfServerResponseHasReserveBitsSet(self):
ToBeWritten = b"I am designed to be masked."
maskedFrame = self._encodeFrame(ToBeWritten, self._OP_BINARY, FIN=1, RSV1=1, RSV2=0, RSV3=0, masked=1)
self._dummySSLSocket.refreshReadBuffer(maskedFrame)
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Now read it back
with pytest.raises(socket.error) as e:
self._dummySecuredWebsocket.read(len(ToBeWritten))
assert "RSV bits set with NO negotiated extensions." == e.value.strerror
# Verify that a closing frame from the client is on its way
closingFrame = self._encodeFrame(b"", self._OP_CONNECTION_CLOSE, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=1)
assert closingFrame == self._dummySSLSocket.getWriteBuffer()
def test_ClientSendsPONGIfReceivedPING(self):
PINGFrame = self._encodeFrame(b"", self._OP_PING, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=0)
self._dummySSLSocket.refreshReadBuffer(PINGFrame)
# Init securedWebsocket with this mockSSLSocket
self._dummySecuredWebsocket = \
mockSecuredWebsocketCoreNoRealHandshake(self._dummySSLSocket, "data.iot.region.amazonaws.com", 1234)
# Now read it back, this must be in the next round of paho MQTT packet reading
# Should fail since we only have a PING to read, it never contains a valid MQTT payload
with pytest.raises(socket.error) as e:
self._dummySecuredWebsocket.read(5)
assert "Not a complete MQTT packet payload within this wss frame." == e.value.strerror
# Verify that PONG frame from the client is on its way
PONGFrame = self._encodeFrame(b"", self._OP_PONG, FIN=1, RSV1=0, RSV2=0, RSV3=0, masked=1)
assert PONGFrame == self._dummySSLSocket.getWriteBuffer()
|
[
"bretambrose@gmail.com"
] |
bretambrose@gmail.com
|
57ccd580e954028d3885004fc6b730237e1067ec
|
969993314e660796aa8063af79d14b4600b1e20d
|
/eMail.py
|
2a471b773c5624123f890ceffc8a58d20da609f4
|
[
"MIT"
] |
permissive
|
fizxmike/qualApp
|
3be99213d05992f8db10ebb822b0c10acf30a2ac
|
5e4c2fe0afdf9c1232c9b249773ee6f8f0927d6d
|
refs/heads/master
| 2021-01-01T06:00:19.908687
| 2014-07-01T22:08:40
| 2014-07-01T22:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'qualapp@cart.ucsd.edu'
EMAIL_HOST_PASSWORD = ''
|
[
"michael@Dubuntu.(none)"
] |
michael@Dubuntu.(none)
|
fd5722882d2f08ef73a644e947fe0684bbd17301
|
6052b1d422ebcbfc0f169ec613b1f71da976bdb8
|
/Character_Builder/migrations/0002_character_race.py
|
7d69f316f7671c5f8b80396a11d79c635adb042a
|
[] |
no_license
|
NickJacksonDev/DnD-Manager
|
e777951268ca5f7a9a62dd9410446c5ee092158c
|
36f416e56770a2a8c60e93652c084e32f96f86fa
|
refs/heads/master
| 2020-04-21T04:14:34.279087
| 2019-04-08T16:45:44
| 2019-04-08T16:45:44
| 169,307,667
| 1
| 0
| null | 2019-04-08T16:45:45
| 2019-02-05T20:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 587
|
py
|
# Generated by Django 2.1.7 on 2019-04-08 02:08
import Character_Builder.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Character_Builder', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='character',
name='race',
field=models.ForeignKey(blank=True, default=Character_Builder.models.defaultRace, null=True, on_delete=django.db.models.deletion.PROTECT, to='Character_Builder.CharacterRace'),
),
]
|
[
"34148647+DelgadoJosh@users.noreply.github.com"
] |
34148647+DelgadoJosh@users.noreply.github.com
|
7b095086d4b76d1d8805eb23c5ec0a44ea477a7d
|
ea49dd7d31d2e0b65ce6aadf1274f3bb70abfaf9
|
/problems/0038_Count_and_Say/39_2nian.py
|
22f97ff32c00a6c181b30d8c2a7cc50d5bd7beb3
|
[] |
no_license
|
yychuyu/LeetCode
|
907a3d7d67ada9714e86103ac96422381e75d683
|
48384483a55e120caf5d8d353e9aa287fce3cf4a
|
refs/heads/master
| 2020-03-30T15:02:12.492378
| 2019-06-19T01:52:45
| 2019-06-19T01:52:45
| 151,345,944
| 134
| 331
| null | 2019-08-01T02:56:10
| 2018-10-03T01:26:28
|
C++
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
phrase = "1"
for i in range(n-1):
phrase = self.count(phrase)
return phrase
def count(self,phrase):
count=0
result=""
curr = phrase[0]
for i in phrase:
if(i!=curr):
result+=str(count)+curr
count=1
curr=i
else:
count+=1
result +=str(count)+curr
return result
|
[
"457649408@qq.com"
] |
457649408@qq.com
|
10ad04ad666ded682fd737b546dfa941204358d6
|
e438eedbb4aae62544f0a0de46b1bdff801147d1
|
/signature/recognition.py
|
34943680d4022a028a6939e3df8301a7770aecaa
|
[] |
no_license
|
colombmo/pattern-recognition
|
013bb35d016b3bb5c7ed41810c32c0aac9e0b4b2
|
3b40e92aa1fe6e1971e3a53bc8993bf532f67fc9
|
refs/heads/master
| 2021-01-23T06:10:23.790752
| 2017-05-29T11:17:20
| 2017-05-29T11:17:20
| 86,341,944
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,481
|
py
|
import numpy as np
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
import datetime
import os
# Read feature vectors from .txt
features = {}
enrollment = {}
previous = {}
# Get name of signatures files
with open("users.txt", "r") as myfile:
lines = myfile.readlines()
# Loading enrollment data(genuine signatures)
for fn in lines:
filenum = fn.replace("\n", "")
enrollment[filenum]={}
for i in range(1, 6):
with open("enrollment/" + filenum + "-g-%02d.txt" % (i,), "r") as myfile:
lines = myfile.readlines()
line = 0
features = np.zeros((lines.__len__(), 5), dtype=np.float)
for l in lines:
feats = []
a = l.replace("\n", "").split(" ")
t = float(a[0])
x = float(a[1])
y = float(a[2])
pressure = float(a[3])
if line == 0:
vx = 0
vy = 0
else:
vx = float((x - previous['x']) / (t - previous['t']))
vy = float((y - previous['y']) / (t - previous['t']))
previous['t'] = t
previous['x'] = x
previous['y'] = y
feats.extend((x, y, vx, vy, pressure))
features[line] = feats
line = line + 1
enrollment[filenum][i] = features
# Loading verification data
verification = {}
for filename in os.listdir("verification/"):
with open("verification/" + filename, "r") as myfile:
lines = myfile.readlines()
linev = 0
features = np.zeros((lines.__len__(), 5), dtype=np.float)
for l in lines:
feats = []
a = l.replace("\n", "").split(" ")
t = float(a[0])
x = float(a[1])
y = float(a[2])
pressure = float(a[3])
if linev == 0:
vx = 0
vy = 0
else:
vx = float((x - previous['x']) / (t - previous['t']))
vy = float((y - previous['y']) / (t - previous['t']))
previous['t'] = t
previous['x'] = x
previous['y'] = y
feats.extend((x, y, vx, vy, pressure))
features[linev] = feats
linev = linev + 1
verification[filename.replace(".txt", "")] = features
print("Start: " + str(datetime.datetime.now().time()))
# Compute the mean distance between variations of genuine signatures for each rider
mean_dist = {}
for author in enrollment:
mean_dist[author] = {}
dists = []
for var_i in enrollment[author]:
for var_j in enrollment[author]:
if int(var_i)<int(var_j):
dist, path = fastdtw(enrollment[author][var_i], enrollment[author][var_j], dist=euclidean)
dists.append(dist)
mean_dist[author] = np.mean(dists)
res = {}
predictions={}
threshold = 10000
# compute dissimilarity for each verification signature wrt the 5 genuine ones
for signature in verification:
predictions[signature]={}
dists = []
tempAuthor = signature.split("-")[0]
for genuine in enrollment[tempAuthor]:
dist, path = fastdtw(verification[signature], enrollment[tempAuthor][genuine], dist=euclidean)
dists.append(dist)
if len(dists) > 0:
res[signature] = np.mean(dists)
if abs(res[signature] - mean_dist[tempAuthor]) < threshold:
predictions[signature] = 'g'
else:
predictions[signature] = 'f'
with open("Predictions.txt", "w") as text_file:
for r in predictions:
text_file.write("%s %s\n" % (r , predictions[r]))
# Load transcriptions
transcriptions = {}
with open("gt.txt", "r") as myfile:
lines = myfile.readlines()
for l in lines:
a = l.replace("\n", "").split(" ")
transcriptions[a[0]] = a[1]
# # Compute average precision
# mean_precisions = []
# precisions = []
# tp = 0
# fp = 0
#
# for r in predictions:
# if transcriptions[r] == predictions[r]:
# tp = tp + 1
# else:
# fp = fp + 1
# precisions.append(tp / (tp + fp))
#
# if len(precisions) > 0:
# if np.mean(precisions) > 0:
# mean_precisions.append(np.mean(precisions))
#
# print("avg_precision: " + str(np.mean(precisions)))
#
# print("ratio")
# print("Average mean precision: " + str(np.mean(mean_precisions)))
# print("End: " + str(datetime.datetime.now().time()))
|
[
"egzon.syka@students.unibe.ch"
] |
egzon.syka@students.unibe.ch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.