blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8249cf00c2f599f554f4091d82a9228f753634ec
|
29b9c067361b5c9b9038bbec7efef31e680011c2
|
/projects/witcher.py
|
f189a57d1ae6a06dc9f922d290505cbf65b727f0
|
[
"MIT"
] |
permissive
|
KryoKorpz/Vector
|
a387b3e9e5a9762d3e81608e0857b96e9323cf22
|
e1a9dd4eec4a03027e66c5acab680dcc4937aa03
|
refs/heads/master
| 2020-11-30T07:08:35.471238
| 2020-01-01T21:21:41
| 2020-01-01T21:21:41
| 230,342,406
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
"""
Have Vector change face to witcher coin and play Valley of plenty
"""
import os
import sys
import time
try:
from PIL import Image
except ImportError:
sys.exit("Cannot import from PIL: Do `pip3 install --user Pillow` to install")
import anki_vector
from anki_vector.util import degrees
def main():
args = anki_vector.util.parse_command_args()
with anki_vector.Robot(args.serial) as robot:
robot.behavior.set_head_angle(degrees(45))
robot.behavior.set_lift_height(0.0)
current_directory = os.path.dirname(os.path.realpath(__file__))
image_path = os.path.join(current_directory, "..", "face_images", "witcher_coin.jpg")
# Load an image
image_file = Image.open(image_path)
# Convert the image to the format used by the Screen
print("Display image on Vector's face...")
screen_data = anki_vector.screen.convert_image_to_screen_data(image_file)
duration_s = 15
robot.screen.set_screen_with_image_data(screen_data, duration_s)
robot.audio.stream_wav_file("../sounds/coin.wav", 10)
robot.anim.play_animation_trigger('PRDemoGreeting')
if __name__ == "__main__":
main()
|
[
"jesse.reichel@live.com"
] |
jesse.reichel@live.com
|
21ecd7523521524e5e984511a9fc8da312c0cb2d
|
b25208ffef4dd656e781760b158ea299ea74c6a6
|
/newffunction.py
|
ecfc65a6f76a2d586aacc8c4f0b02034e80467ed
|
[] |
no_license
|
ks18212/pydatastructure
|
a168cc6cc7c14ec76d77c0fe6d0865df8748dffa
|
f094ed427af190ea096ab5796a3fe7dc3ccba75a
|
refs/heads/master
| 2020-12-30T17:51:06.695667
| 2017-05-13T02:59:28
| 2017-05-13T02:59:28
| 90,932,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
def Add(x, y):
return x+y
|
[
"ksriji@gmail.com"
] |
ksriji@gmail.com
|
0122fffd0f7a8f3db4a4e28afbd37fd70412b36f
|
cec64b38884a4ba25da7447d2290cef7b67a8467
|
/blog/migrations/0002_auto_20210330_1557.py
|
707a725d47cc175b51a5359cde426e458b779151
|
[] |
no_license
|
JannatTurdumbayeva/blog_molchanov
|
418ae666059eadd0b537c22c07a1911be1193a9c
|
e5aecd26739c3f076499a84f2adb849439e16b2c
|
refs/heads/master
| 2023-03-31T06:22:43.350725
| 2021-04-02T12:39:12
| 2021-04-02T12:39:12
| 354,013,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
# Generated by Django 3.1.7 on 2021-03-30 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, related_name='posts', to='blog.Tag'),
),
]
|
[
"you@yourdomain.example.comjannelya@gmail.com"
] |
you@yourdomain.example.comjannelya@gmail.com
|
e468e099bcb970ff97fc6ad4c6274f326e0cc25e
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/third_party/blink/tools/try_flag.py
|
6bba6a55e3ae179c862d8ea4cf4aa7cf222d21a5
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!/usr/bin/env vpython
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Triggers and processes results from flag try jobs.
For more information, see: http://bit.ly/flag-try-jobs
"""
import sys
from blinkpy.web_tests import try_flag
if __name__ == '__main__':
sys.exit(try_flag.main())
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
9dd6da2c076305740bb57622c4a4fd9dd54adfd0
|
733d283389362b80d783f3bcdbcc6a51eec3f300
|
/src/recomb/len_llh.py
|
3c326bc97e1307e8a021df0553742010b119d6e8
|
[
"MIT"
] |
permissive
|
ngannguyen/aimseqtk
|
1abf40cc794c4c6c78b042cee18f02b430f85753
|
1ebaee3b927f7fb128de4a59b759c19fceeefb5b
|
refs/heads/master
| 2020-05-17T00:19:16.361068
| 2014-08-20T00:30:50
| 2014-08-20T00:30:50
| 23,130,513
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,890
|
py
|
'''Compute the likelihood of generating sequences of each specific length
'''
import os
import sys
from math import log10
import cPickle as pickle
import gzip
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from sonLib.bioio import system
import aimseqtk.lib.common as lcommon
import aimseqtk.src.recomb.recomb_common as rcommon
def get_union_children(indir):
children = []
for s in os.listdir(indir):
for file in os.listdir(os.path.join(indir, s)):
if file == s:
continue
if file not in children:
children.append(file)
return children
def get_lens(db_dir):
lens = []
for s in os.listdir(db_dir):
sdir = os.path.join(db_dir, s)
for v in os.listdir(sdir):
if v == s:
continue
vdir = os.path.join(sdir, v)
for l in os.listdir(vdir):
if l not in lens:
lens.append(l)
return lens
class GetLenLlh_VJ(Target):
def __init__(self, model, indir, j, outfile):
Target.__init__(self)
self.model = model
self.indir = indir
self.j = j
self.outfile = outfile
def run(self):
llhs = []
events = []
for sam in os.listdir(self.indir):
jfile = os.path.join(self.indir, sam, self.j)
if os.path.exists(jfile):
jclones = pickle.load(gzip.open(jfile, 'rb'))
for c in jclones:
if not rcommon.visited_event(events, c):
events.append(c)
clonellh = rcommon.ntclone_likelihood(c, self.model)
llhs.append(clonellh)
sumllh = sum([10 ** llh for llh in llhs])
pickle.dump(sumllh, gzip.open(self.outfile, 'wb'))
class GetLenLlh_V_Cleanup(Target):
def __init__(self, basename):
Target.__init__(self)
self.basename = basename
def run(self):
dbdir = "%s-db_jsplit" % self.basename
system("rm -Rf %s" % dbdir)
llhdir = "%s-llh_jsplit" % self.basename
system("rm -Rf %s" % llhdir)
class GetLenLlh_V_Agg(Target):
def __init__(self, indir, outfile):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
def run(self):
sumllh = 0.0
for j in os.listdir(self.indir):
jfile = os.path.join(self.indir, j)
llh = pickle.load(gzip.open(jfile, 'rb'))
sumllh += llh
pickle.dump(sumllh, gzip.open(self.outfile, 'wb'))
self.setFollowOnTarget(GetLenLlh_V_Cleanup(self.outfile))
class GetLenLlh_V(Target):
def __init__(self, model, db_dir, v, l, outfile):
Target.__init__(self)
self.model = model
self.db_dir = db_dir
self.v = v
self.l = l
self.outfile = outfile
def run(self):
# split db clones by j
js = []
tempdir = "%s-db_jsplit" % self.outfile
system("mkdir -p %s" % tempdir)
for sam in os.listdir(self.db_dir):
infile = os.path.join(self.db_dir, sam, self.v, self.l)
if not os.path.exists(infile):
continue
clones = pickle.load(gzip.open(infile, "rb"))
j2clones = lcommon.split_clones_by_j(clones)
tempsamdir = os.path.join(tempdir, sam)
system('mkdir -p %s' % tempsamdir)
for j, jclones in j2clones.iteritems():
tempjfile = os.path.join(tempsamdir, j) # l/v-db_jsplit/sam/jfile
pickle.dump(jclones, gzip.open(tempjfile, 'wb'))
if j not in js:
js.append(j)
self.logToMaster("Done spliting clones by j for length %s V %s\n" %
(self.l, self.v))
tempoutdir = "%s-llh_jsplit" % self.outfile # l/v-llh_jsplit/jllh
system("mkdir -p %s" % tempoutdir)
for j in js:
joutfile = os.path.join(tempoutdir, j)
self.addChildTarget(GetLenLlh_VJ(self.model, tempdir, j, joutfile))
self.setFollowOnTarget(GetLenLlh_V_Agg(tempoutdir, self.outfile))
class GetLenLlhAgg(Target):
def __init__(self, indir, outfile):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
def run(self):
llh = 0.0
for v in os.listdir(self.indir):
vfile = os.path.join(self.indir, v)
vllh = pickle.load(gzip.open(vfile, "rb"))
llh += vllh
if llh == 0:
log_llh = float('-inf')
else:
log_llh = log10(llh)
pickle.dump(log_llh, gzip.open(self.outfile, "wb"))
class GetLenLlh(Target):
def __init__(self, db_dir, length, model, outdir):
Target.__init__(self)
self.db_dir = db_dir
self.length = length
self.model = model
self.outdir = outdir
def run(self):
vs = get_union_children(self.db_dir)
for v in vs:
outfile = os.path.join(self.outdir, v)
self.addChildTarget(GetLenLlh_V(self.model, self.db_dir, v,
self.length, outfile))
aggfile = os.path.join(self.outdir, "%s.pickle" % self.length)
self.setFollowOnTarget(GetLenLlhAgg(self.outdir, aggfile))
class GetLenLlhsAgg(Target):
def __init__(self, indir, outfile):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
def run(self):
f = open(self.outfile, 'w')
f.write("#Length\tLog_likelihood\n")
lens = sorted([int(l) for l in os.listdir(self.indir)])
for l in lens:
lfile = os.path.join(self.indir, str(l), "%s.pickle" % str(l))
llh = pickle.load(gzip.open(lfile, "rb"))
f.write("%d\t%f\n" % (l, llh))
f.close()
class GetLenLlhs(Target):
'''compute the likelihood of all clones:
global_dir/
length1/
v1
v2
...
length2/
...
...
'''
def __init__(self, db_dir, lens, model, outfile):
Target.__init__(self)
self.db_dir = db_dir
self.lens = lens
self.model = model
self.outfile = outfile
def run(self):
self.logToMaster("Starting to compute llh for each length...\n")
global_dir = self.getGlobalTempDir()
for l in self.lens:
outdir = os.path.join(global_dir, str(l))
system("mkdir -p %s" % outdir)
self.addChildTarget(GetLenLlh(self.db_dir, l, self.model, outdir))
self.setFollowOnTarget(GetLenLlhsAgg(global_dir, self.outfile))
class Setup(Target):
def __init__(self, db_dir, model_dir, outfile, options):
Target.__init__(self)
self.db_dir = db_dir
self.model_dir = model_dir
self.outfile = outfile
self.options = options
def run(self):
model = rcommon.get_median_model(self.model_dir)
self.logToMaster("Done computing median model.\n")
lens = get_lens(self.db_dir)
self.logToMaster("Done getting lengths: %s\n" %
",".join([str(l) for l in lens]))
self.addChildTarget(GetLenLlhs(self.db_dir, lens, model, self.outfile))
def main():
usage = "%prog <db_dir> <model_dir> <outfile> [options]"
parser = lcommon.init_options(usage)
Stack.addJobTreeOptions(parser)
options, args = parser.parse_args()
db_dir = args[0]
model_dir = args[1]
outfile = args[2]
i = Stack(Setup(db_dir, model_dir, outfile, options)).startJobTree(options)
if i:
raise RuntimeError("The jobtree contains %d failed jobs.\n" % i)
if __name__ == '__main__':
from aimseqtk.src.recomb.len_llh import *
main()
|
[
"nknguyen@soe.ucsc.edu"
] |
nknguyen@soe.ucsc.edu
|
aa1d41dd1307400c3f7b80cfd443c12f4c19343f
|
d49ff97be24539856f725e591bf2cff626e1a3d5
|
/electrumx/server/version.py
|
137eb2f1e8daa69b6a396da9a45f4c65059d08fa
|
[
"MIT"
] |
permissive
|
rootSig/electrumx-atom
|
3b75b11bd07bf4621a10022018cb3f3625b1eebf
|
c11b36cdf5acdb9129d7d4507a79cdd7d1408bcb
|
refs/heads/master
| 2020-03-28T13:25:35.654573
| 2018-09-04T19:47:42
| 2018-09-04T19:47:42
| 148,394,514
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
VERSION = 'ElectrumX 1.5.2'
|
[
"kyuupichan@gmail.com"
] |
kyuupichan@gmail.com
|
b5085bc81ecf945c3ae437698d35ee024d6096d1
|
76dba08689db40edf2d01a98856fa2a20d98d679
|
/Python:从入门到实践/从入门到实践代码/第15章 生成数据/15.4 使用Pygal模拟掷骰子/动动手/15-7 两个D8骰子.py
|
c407144e32e950fef7d8df457fa1c7e35db04ff1
|
[] |
no_license
|
pangfeiyo/PythonLearn
|
ce0747d75b53eb21acb6199acfe10934778420b2
|
b514b3d7baa62fa7b801d26ff49266f02cb9cbd2
|
refs/heads/master
| 2021-05-11T10:20:14.818774
| 2020-01-16T15:47:16
| 2020-01-16T15:47:16
| 118,096,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
# 请模拟同时掷两个 8 面骰子 1000 次的结果。逐渐增加掷骰子的次数,直到系统不堪重负为止。
import pygal
# 若不在同一目录,python查找不到,必须进行查找路径的设置,将模块所在的文件夹加入系统查找路径
import sys
sys.path.append("..") # 上级目录
from die import Die
die_1 = Die(8)
die_2 = Die(8)
result = [die_1.roll() + die_2.roll() for roll_num in range(1000000)]
# 两个骰子面和
max_result = die_1.num_sides + die_2.num_sides
# 统计每个合有多少个
frequencies = [result.count(value) for value in range(2, max_result+1)]
# 对结果进行可视化
hist = pygal.Bar()
hist.title = "Results of rolling two D8 dice 1,000,000 times."
hist.x_labels = [str(x) for x in range(2, max_result+1)]
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
# 将值添加到表中
hist.add("D8 + D8", frequencies)
hist.render_to_file('15-7.svg')
|
[
"35551631+pangfeiyo@users.noreply.github.com"
] |
35551631+pangfeiyo@users.noreply.github.com
|
bdb43ab9e72cc2d066b3dee755eae5060bd6c4fe
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_267/ch76_2020_05_13_20_14_15_244931.py
|
b776d73d9f1c29cf528dbbcbba0d1d85863dcdcc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
def aniversariantes_de_setembro(dicio_nomes_birth):
new = dict()
for n,i in dicio_nomes_birth.values():
if i[3:5] == '09':
new[n] = i
return new
|
[
"you@example.com"
] |
you@example.com
|
09ce368184cffcfcd74d912cf3a822f6f8e1d4ae
|
fd3c2071c2b74ade4d4938ee9a6e7141c9958c76
|
/faa.py
|
eae9d1eb447d649918dee9ad83cc6f9b658c4480
|
[] |
no_license
|
RaviKumar10052/Python-code-Practice
|
9f79889f1c963dd068eebfad440ddf8c5c669eef
|
406d18011a85f86154ccc49f14b3d6d1dd320aec
|
refs/heads/master
| 2020-07-19T23:32:17.178204
| 2019-09-05T10:06:10
| 2019-09-05T10:06:10
| 206,531,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
def keys(i,n, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
p = tuple(i)
n = len(p)
if r ==0:
return
else:
for i in range (0,n):
del_list(p,i,new)
x=keys(New,n-1,r-1)
l="bcdfghjklmnpqrstvwxyz"
keys(l,2)
|
[
"noreply@github.com"
] |
RaviKumar10052.noreply@github.com
|
a7ee1c3be59a6b35c6bdf0c3e2c8573d743101f6
|
3bc79d5b5f16bc17b27c22246053850664870e6e
|
/class_inheritance.py
|
9cff52ed6469251ef0f1ec119a5deaf8219af943
|
[] |
no_license
|
OokGIT/pythonproj
|
379344fe332c2734d7d598eccfa9b6a88438e21f
|
cad5852527fae1347f7000e09546ebf7b3d9122f
|
refs/heads/master
| 2022-01-08T10:53:06.742045
| 2019-03-10T01:27:24
| 2019-03-10T01:27:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
class Animal(object):
def __init__(self, age):
self.age = age
self.name = None
def get_age(self):
return self.age
def get_name(self):
return self.name
def set_age(self, newage):
self.age = newage
def set_name(self, newname=""):
self.name = newname
def __str__(self):
return "animal:"+str(self.name)+":"+str(self.age)
class Cat(Animal):
def speak(self):
print(self.name, "says: ", "You are idiots!")
def __str__(self):
return "cat:"+str(self.name)+":"+str(self.age)
class Rabbit(Animal):
def speak(self):
print(self.name, "says: ", "You are doomed!")
def __str__(self):
return "rabbit:"+str(self.name)+":"+str(self.age)
class Human(Animal):
def __init__(self, name, age):
Animal.__init__(self, age)
Animal.set_name(self, name)
self.friends = []
def get_friends(self):
return self.friends
def add_friend(self, fname):
if fname not in self.friends:
self.friends.append(fname)
def speak(self):
print('Shut up and take my money')
def age_diff(self, other):
diff = self.get_age() - other.get_age()
if self.age > other.age:
print(self.name, "is", diff, "years older than", other.name)
else:
print(self.name, "is", -diff, "years younger than", other.name)
def __str__(self):
return "person:"+str(self.name)+":"+str(self.age)
hedgehog = Animal(42)
hedgehog.set_name("Bill")
print(hedgehog)
barsik = Cat(42)
barsik.set_name('Barsik')
barsik.speak()
print(barsik)
bart = Human("Bart", 55)
hanry = Human("Hanry", 42)
hanry.age_diff(bart)
hanry.speak()
print(hanry)
|
[
"slanjr@gmail.com"
] |
slanjr@gmail.com
|
5847abd56c78f21361e646d230bff446ff2e7ff2
|
be78478bc48bd836fe497f082d9dfe1675dfcc3e
|
/playbookquiz/wsgi.py
|
5c3afca36a35d82e993e2b30f156b49b56a02134
|
[] |
no_license
|
regold123/playbookquiz-django-react
|
137533341ec049802b0942e29f61ea74a5beaa6b
|
81ae64565d9c169f5e8ab10f5987d525c4f082f5
|
refs/heads/master
| 2023-01-10T00:37:02.862583
| 2020-11-15T10:29:07
| 2020-11-15T10:29:07
| 312,997,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for playbookquiz project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'playbookquiz.settings')
application = get_wsgi_application()
|
[
"ari@replyall.me"
] |
ari@replyall.me
|
5dca3ca15af2450c43355677a72edfd2e4ef807d
|
91fa98e3108e1360e63fe4d7f91f0530500f32cc
|
/src/hhash.py
|
a28ba2457c287ed97e3cd9584c723014d75b8654
|
[] |
no_license
|
ddmbr/Extendible-Hashing
|
c702d9594d7fc85cbcc25e204780485db3a08a0c
|
192f2fba8639ab7417a6e9b15c38ab6a442cfdc5
|
refs/heads/master
| 2021-01-10T20:33:07.097122
| 2012-05-23T10:41:30
| 2012-05-23T10:41:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,838
|
py
|
import re
import sys
import pygame as pg
from pygame import Rect
from time import sleep
DEBUG = 0
PCONTENT = 0
class Record:
FormatPatt = re.compile(r'(\d+)\|(\d+)\|')
def __init__(self, rawstr):
self.data = rawstr
self.orderkey, self.partkey = map(int, Record.FormatPatt.match(rawstr).groups())
def __cmp__(self, other):
return cmp(self.partkey, other.partkey)
def __repr__(self):
if PCONTENT:
return str((self.orderkey, self.partkey, '...', 'hv=%30s' % (bin(hash_func(self.orderkey, globalDepth))[2:])))
else:
return 'r'
class Bucket:
MaxSize = 8 * 1024
# MaxSize = 8 * 500
newBucketID = [2]
def __init__(self, id, depth):
if DEBUG: print 'bucket ', id, 'created'
self.depth = depth
self.id = id
self.page = []
self.sumLen = 0
def __repr__(self):
if PCONTENT:
return """%s\nBucket(id=%d, depth=%d, \n%s\n, len=%d) \n%s\n""" % ('-'*60,
self.id, self.depth, '\n'.join(map(str, self.get_records())), self.sumLen, '-'*60)
else:
return "Bucket(id=%d, depth=%d)"%(self.id, self.depth)
@staticmethod
def get_by_hvalue(hvalue):
return indexs[hvalue/Index.DictPerPage].d[hvalue%Index.DictPerPage]
def is_overflow(self, record):
return self.sumLen + len(record.data) > self.MaxSize
def write_record(self, record):
self.page.append(record)
self.sumLen += len(record.data)
def get_records(self):
return self.page
def insert(self, record):
bucket = self
while bucket.is_overflow(record):
bucket.split(hash_func(record.orderkey, globalDepth))
hvalue = hash_func(record.orderkey, globalDepth)
bucket = Bucket.get_by_hvalue(hvalue)
if DEBUG: print 'write record %s into Bucket(id=%d)' %(record, bucket.id)
bucket.write_record(record)
def split(self, hvalue):
if DEBUG: print 'split bucket', self.id
if self.depth == globalDepth:
global PCONTENT
o = PCONTENT
PCONTENT = 1
print 'split %s cause double index' % self
double_index()
print indexs
PCONTENT = o
depth = self.depth
hvalue = hash_func(hvalue, depth) << 1
newHv = hvalue + 1
print 'depth=%d, old,new(hv) = \n %40s\n %40s'%(depth, bin(hvalue), bin(newHv))
self.depth += 1
depth = self.depth
# create new bucket
newBucket = Bucket(Bucket.newBucketID[0], depth)
Bucket.newBucketID[0] += 1
buckets.append(newBucket)
# create temp bucket
tempBucket = Bucket(-1, depth)
# relink new bucket to the indexs
for i in xrange(0, 2**globalDepth):
if Bucket.get_by_hvalue(i) == self:
if hash_func(i, depth) == hvalue:
set_dict(i, self)
else:
set_dict(i, newBucket)
# redistribute the buckets
for record in self.get_records():
if hash_func(record.orderkey, depth) == newHv:
newBucket.write_record(record)
else:
tempBucket.write_record(record)
self.page = tempBucket.page
self.sumLen = tempBucket.sumLen
GDB.update()
if DEBUG: print 'after split bucket(id=%d), n1=%d, n2=%d' %(self.id, len(self.get_records()), len(newBucket.get_records()))
if DEBUG: print 'split bucket %d end' % self.id
class Index:
DictPerPage = 2 * 1024
# DictPerPage = 100
def __init__(self, id):
self.id = id
self.d = [None for i in xrange(Index.DictPerPage)]
def __repr__(self):
if not PCONTENT:
n = 0
for i in self.d:
if i is not None:
n += 1
else:
break
return 'Index(id=%d)'%(self.id)
w = 1
s = []
s.append('-'*60)
s.append('Index(id=%d)' % self.id)
line = ''
for i, b in enumerate(self.d):
if b is None: break
line += str((i, b.id))
if (i + 1) % w == 0:
s.append(line)
line = ''
if line:
s.append(line)
s.append('-'*60)
return '\n'.join(s)
def init():
global globalDepth
globalDepth = 1
indexs.append(Index(0))
buckets.append(Bucket(0, 1))
buckets.append(Bucket(1, 1))
indexs[0].d[0] = buckets[0]
indexs[0].d[1] = buckets[1]
def parse(faddr):
global DEBUG
with open(faddr, 'r') as f:
for line in f:
record = Record(line.rstrip())
if DEBUG: print 'prepare to insert ', record
GDB.update()
hvalue = hash_func(record.orderkey, globalDepth)
bucket = Bucket.get_by_hvalue(hvalue)
bucket.insert(record)
if DEBUG: print 'after insert'
if DEBUG: print len(indexs), indexs
if DEBUG: print len(buckets), buckets
# print '%30s %30s usage: %.2f' %(len(indexs), len(buckets),
# sum(x.sumLen for x in buckets)*100./(len(buckets)*Bucket.MaxSize))
def query(faddr):
with open(faddr, 'r') as f:
n = int(f.readline())
for i in xrange(n):
key = int(f.readline())
bucket = Bucket.get_by_hvalue(hash_func(key, globalDepth))
ans = []
for record in bucket.get_records():
if record.orderkey == key:
ans.append(record)
ans.sort()
for record in ans:
print record.data
print -1
def mybin(x):
return bin(x)[2:]
def hash_func(key, depth):
j = 0
for i in xrange(29):
if key & (1 << i):
j = (1 << i)
ans = 0
for i in xrange(depth):
if (key & j) : ans = (ans << 1) + 1
else: ans = ans << 1
j >>= 1
print bin(key), bin(ans), depth
return ans
def double_index():
global globalDepth
n = 1 << globalDepth
print 'double index from %d' % n
for j in xrange(n, 2*n):
indexID2 = j / Index.DictPerPage
if indexID2 >= len(indexs):
indexs.append(Index(indexID2))
for k in xrange(n-1, -1, -1):
j = 2 * k + 1
i = 2 * k
set_dict(j, Bucket.get_by_hvalue(k))
set_dict(i, Bucket.get_by_hvalue(k))
GDB.update()
globalDepth += 1
if DEBUG: print 'after double'
if DEBUG: print 'globalDepth', globalDepth
if DEBUG: print 'indexs=', indexs
class GDB(): # graphical debugger
W, H = 1000, 500
FPS = 10
@staticmethod
def init():
GDB.screen = pg.display.set_mode((GDB.W, GDB.H), 0, 32)
GDB.timer = pg.time.Clock()
GDB.t = 0.
GDB.update()
@staticmethod
def update():
GDB.t += GDB.timer.tick()
if GDB.t >= 1000/GDB.FPS:
GDB.t = 0
else:
return
# sleep(0.02)
if DEBUG: print 'update screen'
bg = (0xff, 0xff, 0xff, 0xff)
idxColor = (0x00, 0x2f, 0x00, 0xff)
bucketColor = (0x1f, 0x1f, 0x00, 0xff)
screen = GDB.screen
screen.fill(bg)
x0, y0 = 10, 5
y1 = GDB.H - 5
Sep = 2
bucketW = min(20, max(1, GDB.W / len(buckets) - Sep))
idxW = min(20, max(1, GDB.W / len(indexs) - Sep))
# b.x = x0 + b.id * (bucketW + Sep)
# b.y = y0 - q * b.sumLen
# q = 0.02
dw = min((GDB.W + 0.) / len(buckets), (bucketW + Sep))
for b in buckets:
x = x0 + b.id * dw
h = 10 + 200./Bucket.MaxSize * b.sumLen
y = y1 - h
pg.draw.rect(screen, bucketColor, Rect((x, y), (bucketW, h)))
# idx.x = x0 + idx.id * (idxW + Sep)
# idx.y = y0 + 5
dw = min((GDB.W + 0.) / len(indexs), (idxW + Sep))
for index in indexs:
x = x0 + index.id * dw
y = y0
h = 10 + 100./Index.DictPerPage * len(index.d)
pg.draw.rect(screen, idxColor, Rect((x, y), (idxW, h)))
pg.display.flip()
def set_dict(index, bucket):
indexs[index / Index.DictPerPage].d[index % Index.DictPerPage] = bucket
globalDepth = 0
indexs = []
buckets = []
def main():
init()
GDB.init()
print indexs
print buckets
parse('lineitemcut.tbl')
query('testinput.in')
print 'finish'
print 'buckets', buckets
sys.stdout.flush()
while 1:
for e in pg.event.get():
if e.type == pg.QUIT:
exit(0)
elif e.type == pg.KEYDOWN:
GDB.update()
sleep(0.1)
if __name__ == '__main__':
main()
|
[
"ray040123@gmail.com"
] |
ray040123@gmail.com
|
a9cc6866def09742144858776e0a9934b2392345
|
147d121b736e56ca7a63e945bb8933de36203a0b
|
/logical question/error_hendaling.py
|
1ffe76e4c50b227f3eb6fba25daa36f50fcd7116
|
[] |
no_license
|
Nehajha99/Python
|
4473443038d0c07a3f59f7940c0a96fe21c2df4b
|
ae208d06b5c53cf3f3dd8df6015ead0009658c39
|
refs/heads/master
| 2023-07-13T21:21:02.174047
| 2021-08-23T19:56:00
| 2021-08-23T19:56:00
| 399,231,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# list1=["123s","874w","675","890","789r"]
# new_list=[]
# for i in range(len(list1)):
# try:
# b=int(list1[i])
# new_list.append(b)
# except Exception:
# pass
# print(new_list)
ages = {'Jim': 30, 'Pam': 28, 'Kevin': 33}
person = input('Get age for: ')
try:
print(f'{person} is {ages[person]} years old.')
except KeyError:
print(f"{person}'s age is unknown.")
|
[
"neha20@navgurukul.org"
] |
neha20@navgurukul.org
|
8dcb7a190653d321a50aa5a3d6333a019407c168
|
a627060a0f443843f76170993018f6c2d8c42ca3
|
/util/modis_tile_cal.py
|
1004442c36e2fb121ed28a996bfcc2d60b700ae2
|
[] |
no_license
|
MarcYin/Global-analysis-ready-dataset
|
2547395581c26347278642b74c5a716403ea682e
|
e103a2d6175b44bbf813077ed3d845214a9ad2cf
|
refs/heads/master
| 2020-03-28T00:14:49.741952
| 2018-11-08T17:28:48
| 2018-11-08T17:28:48
| 147,388,318
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,580
|
py
|
#!/usr/bin/env python
import os
import osr
import ogr
import gdal
import numpy as np
'''
This is a function used for the calculation of MODIS
tile names from lat and lon coordinates. get_raster_hv
is used for the calculation of MODIS hv from a raster
file and get_vector_hv form a raster file
'''
x_step = -463.31271653
y_step = 463.31271653
m_y0, m_x0 = -20015109.354, 10007554.677
def get_raster_hv(example_file):
try:
g = gdal.Open(example_file)
except:
try:
g = example_file
g.GetGeoTransform()[0]
except:
raise IOError('aoi has to be raster file or a gdal object.')
geo_t = g.GetGeoTransform()
x_size, y_size = g.RasterYSize, g.RasterXSize
wgs84 = osr.SpatialReference( ) # Define a SpatialReference object
wgs84.ImportFromEPSG( 4326 ) # And set it to WGS84 using the EPSG code
H_res_geo = osr.SpatialReference( )
raster_wkt = g.GetProjection()
H_res_geo.ImportFromWkt(raster_wkt)
tx = osr.CoordinateTransformation(H_res_geo, wgs84)
# so we need the four corners coordiates to check whether they are within the same modis tile
(ul_lon, ul_lat, ulz ) = tx.TransformPoint( geo_t[0], geo_t[3])
(lr_lon, lr_lat, lrz ) = tx.TransformPoint( geo_t[0] + geo_t[1]*x_size, \
geo_t[3] + geo_t[5]*y_size )
(ll_lon, ll_lat, llz ) = tx.TransformPoint( geo_t[0] , \
geo_t[3] + geo_t[5]*y_size )
(ur_lon, ur_lat, urz ) = tx.TransformPoint( geo_t[0] + geo_t[1]*x_size, \
geo_t[3] )
a0, b0 = None, None
corners = [(ul_lon, ul_lat), (lr_lon, lr_lat), (ll_lon, ll_lat), (ur_lon, ur_lat)]
tiles = []
for i,j in enumerate(corners):
h, v = mtile_cal(j[1], j[0])
tiles.append('h%02dv%02d'%(h,v))
unique_tile = np.unique(np.array(tiles))
return unique_tile
def get_vector_hv(aoi):
try:
og = ogr.Open(aoi)
except:
try:
og = aoi
l = og.GetLayer(0)
except:
raise IOError('aoi has to be vector file or a ogr object')
feature = og.GetLayer(0).GetFeature(0)
coordinates = feature.geometry().GetGeometryRef(-0).GetPoints()
tiles = []
for coordinate in coordinates:
h, v = mtile_cal(coordinate[1], coordinate[0])
tiles.append('h%02dv%02d'%(h,v))
unique_tile = np.unique(np.array(tiles))
return unique_tile
def mtile_cal(lat, lon):
wgs84 = osr.SpatialReference( )
wgs84.ImportFromEPSG( 4326 )
modis_sinu = osr.SpatialReference()
sinu = "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs"
modis_sinu.ImportFromProj4 (sinu)
tx = osr.CoordinateTransformation( wgs84, modis_sinu)# from wgs84 to modis
ho,vo,z = tx.TransformPoint(lon, lat)# still use the function instead of using the equation....
h = int((ho-m_y0)/(2400*y_step))
v = int((vo-m_x0)/(2400*x_step))
return h,v
if __name__ == '__main__':
aoi = '/home/ucfafyi/DATA/S2_MODIS/l_data/LC08_L1TP_014034_20170831_20170915_01_T1/AOI.json'
example_file = '/home/ucfafyi/DATA/S2_MODIS/l_data/LC08_L1TP_014034_20170831_20170915_01_T1/aot.tif'
print(get_vector_hv(aoi))
print(get_vector_hv(ogr.Open(aoi)))
print(get_raster_hv(example_file))
print(get_raster_hv(gdal.Open(example_file)))
|
[
"marcollinbobo@gmail.com"
] |
marcollinbobo@gmail.com
|
e626f9bcf86cceb851f1eed94b0772987defa8ce
|
bc2cdb1e438efaf67131e975ac4db80b4dc43385
|
/src/private/activity/business/migrations/0007_accessalarmresult.py
|
229ec399c9747919cd062b9334ff2561d9c8716d
|
[] |
no_license
|
Shadow-linux/ops-for-study
|
cf4d55409ebc6f27d454bea60886cd154c994484
|
115b567948d25a64e423a6cdc89bc8337896afe2
|
refs/heads/master
| 2023-01-14T13:35:56.880896
| 2019-09-23T05:01:31
| 2019-09-23T05:01:31
| 209,781,758
| 2
| 0
| null | 2023-01-04T10:55:45
| 2019-09-20T12:08:11
|
Python
|
UTF-8
|
Python
| false
| false
| 875
|
py
|
# Generated by Django 2.0.1 on 2019-05-14 17:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('business', '0006_auto_20190514_1301'),
]
operations = [
migrations.CreateModel(
name='AccessAlarmResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('work_order', models.CharField(help_text='strategy uniq id', max_length=128, null=True, unique=True)),
('match_count', models.IntegerField(help_text='hit 的数目', null=True)),
('result_set', models.CharField(help_text='结果集', max_length=256, null=True)),
],
options={
'db_table': 'business_access_alarm_result',
},
),
]
|
[
"liangyedong@qipeipu.com"
] |
liangyedong@qipeipu.com
|
08ced66bbdf4f7ca827ce00c37edfa9b6475841f
|
5fc098414b45cf8f4cb0580e92af95aaccb675b0
|
/huffpsst/urls.py
|
f59757cf1e3d55e38d13f10cc70457a0e68eec0e
|
[] |
no_license
|
sachitad/huffingtonpsst
|
16e26b9197f6607cb7204bf3d56454688fed1f73
|
47ff9399c2142f49b07809dcdada88855d768322
|
refs/heads/master
| 2021-01-19T22:23:47.415738
| 2017-04-20T01:39:48
| 2017-04-20T01:39:48
| 88,807,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,753
|
py
|
# -*- coding: utf-8 -*-
import os.path
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from django.contrib import admin
from django.conf import settings
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailsearch.urls import frontend as wagtailsearch_frontend_urls
admin.autodiscover()
handler404 = 'huffpsst.views.errors.error404'
handler403 = 'huffpsst.views.errors.error403'
handler500 = 'huffpsst.views.errors.error500'
# Signal handlers
from wagtail.wagtailsearch import register_signal_handlers as wagtailsearch_register_signal_handlers
wagtailsearch_register_signal_handlers()
urlpatterns = patterns('',
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^search/', include(wagtailsearch_frontend_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^redactor/', include('redactor.urls')),
url('^markdown/', include('django_markdown.urls')),
url('^markdown/', include('django_markdown.urls')),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
[
"sachit@qed.ai"
] |
sachit@qed.ai
|
726d5a3b31fa3abbfb051de91c55087b28816e66
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4072/codes/1578_1560.py
|
003ad9a0d0baed87e52716d0e8a1ef8139930790
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
string = ('Augusta Ada Byron, a Condessa de Lovelace, nasceu em 1815, na Inglaterra.')
print(string.lower())
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
49088c7dfe1f91e9c82dbf323b00876e28a2b585
|
b4dd9cd86fd0b802e38e8cc468752ad34cd1ee6c
|
/COEN171_Programming Languages/HW2/HW2.py
|
24427c99d51f37d58016fae3853f82a5188e065f
|
[] |
no_license
|
tchung777/School-Projects
|
c1a88c921856e016a7edb80ee526fe75087df1e6
|
c07fbab1200d37c4a45105458e8a11e19fa586b4
|
refs/heads/master
| 2021-05-16T01:01:36.543539
| 2019-03-17T05:30:20
| 2019-03-17T05:30:20
| 107,031,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,029
|
py
|
#!/usr/bin/python
from Tkinter import *
from random import randint
from time import sleep,time
from math import sqrt
H = 600
W = 800
scorePoint = 0
NUM_MINES = 10
BUB_CHANCE = 10
SHIP_RADIUS = 30;
SHIP_LIFE = 4;
GAP = 100
GAMEOVER = False
window = Tk()
window.title("Submarine Game")
c = Canvas(window, width = W, height = H, bg ="#CAE1FF")
p=PhotoImage(file="water.gif")
c.create_image(0,0, image=p)
c.pack()
parts = list()
#Draw a red submarine
ship_h = c.create_polygon(370,415,425,400,425,430,outline= "black", fill="red")
ship_c = c.create_oval(300,400,400,430,outline="black",fill="red")
ship_p = c.create_rectangle(350,387,360,400,outline= "black",fill="red")
ship_p2 = c.create_rectangle(340,387,350,390,outline= "black",fill="red")
parts.append(ship_h)
parts.append(ship_p)
parts.append(ship_p2)
#move the submarine
#move up
SHIP_SPD = 10
def move_ship_up(event):
x1,y1,x2,y2 = c.coords(ship_c)
if y1 - SHIP_SPD > 0 and not GAMEOVER:
c.move(ship_c,0,-SHIP_SPD)
for x in range(len(parts)):
c.move(parts[x],0,-SHIP_SPD)
window.bind("<Up>",move_ship_up)
#move down
SHIP_SPD1 = -10
def move_ship_down(event):
x1,y1,x2,y2 = c.coords(ship_c)
if y2 - SHIP_SPD < 640 and not GAMEOVER:
c.move(ship_c,0,-SHIP_SPD1)
for x in range(len(parts)):
c.move(parts[x],0,-SHIP_SPD1)
window.bind("<Down>",move_ship_down)
#move right
SHIP_SPD2 = 0
def move_ship_right(event):
x1,y1,x2,y2 = c.coords(ship_c)
if x1 - SHIP_SPD1 < 690 and not GAMEOVER:
c.move(ship_c,10,-SHIP_SPD2)
for x in range(len(parts)):
c.move(parts[x],10,-SHIP_SPD2)
window.bind("<Right>",move_ship_right)
#move left
SHIP_SPD3 = 0
def move_ship_left(event):
x1,y1,x2,y2 = c.coords(ship_c)
if x1 - SHIP_SPD1 > 10 and not GAMEOVER:
c.move(ship_c,-10,-SHIP_SPD3)
for x in range(len(parts)):
c.move(parts[x],-10,-SHIP_SPD3)
window.bind("<Left>",move_ship_left)
bubble_id=list()
bubble_r=list()
bubble_speed=list()
def create_mines():
xCoord = randint(80,730)
yCoord = 640
radius = randint(10,30)
speed = randint(10,30)
bubble = c.create_oval(xCoord,yCoord,xCoord+2*radius,yCoord+2*radius, outline="blue")
bubble_r.append(radius)
bubble_speed.append(speed)
bubble_id.append(bubble)
def move_mines():
for x in range(len(bubble_id)):
c.move(bubble_id[x],0, -bubble_speed[x])
def delBubbles(index):
del bubble_r[index]
del bubble_speed[index]
c.delete(bubble_id[index])
del bubble_id[index]
def getCenter(id):
pos = c.coords(id)
centerX = (pos[0] + pos[2])/2
centerY = (pos[1] + pos[3])/2
return centerX, centerY
def distance(bubble_b):
x1,y1 = getCenter(ship_c)
x2,y2 = getCenter(bubble_b)
distance = sqrt((x2-x1)**2 + (y2-y1)**2)
return distance
def collision():
for x in range(len(bubble_id)-1,-1,-1):
d = distance(bubble_id[x]);
b = SHIP_RADIUS + bubble_r[x]
if d < b:
delBubbles(x)
global SHIP_LIFE
SHIP_LIFE-=1
updateShipLife()
def clean_up_bubbles():
for id in range(len(bubble_id)-1,-1,-1):
x,y = getCenter(bubble_id[id])
if y < -GAP:
delBubbles(id)
def updateShipLife():
if SHIP_LIFE == 0:
return
if SHIP_LIFE == 3:
c.delete(parts[2])
del parts[2]
if SHIP_LIFE == 2:
c.delete(parts[1])
del parts[1]
if SHIP_LIFE == 1:
c.delete(parts[0])
del parts[0]
c.create_text(150,30,text="Score",fill="black",font=('Comic Sans',30))
score_text = c.create_text(150,55,fill="black")
def show_score(score):
c.itemconfig(score_text,text=str(score),font=('Comic Sans',30))
def finalScore(score):
c.create_text(W/2, H/2, text='GAME OVER, YOUR SHIP EXPLODED!', fill='white',
font=('Comic Sans', 40))
c.create_text(W/2, H/2 + 30, text='Score: ' + str(score),
fill='white', font=('Comic Sans', 30))
while SHIP_LIFE > 0:
move_mines()
clean_up_bubbles()
if scorePoint > 500 or scorePoint > 1000 or scorePoint > 2000:
BUB_CHANCE/=2
if randint(1,BUB_CHANCE) == 1:
create_mines()
#clean_up_mines()
collision()
scorePoint+=1
show_score(scorePoint)
window.update()
sleep(0.01)
GAMEOVER = True
finalScore(scorePoint)
window.mainloop()
|
[
"cchung@scu.edu"
] |
cchung@scu.edu
|
5b7eedd4d4d3e3d922da6372774b69cfacf3b6d9
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5756407898963968_0/Python/methane/magicktrick.py
|
22f320c4758561b273b14dfd3c86937f0c3fa0de
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
import sys
def read_rows():
return [[int(x) for x in sys.stdin.readline().split()] for _ in range(4)]
def solve(T):
n1 = int(sys.stdin.readline())
cand1 = read_rows()[n1-1]
n2 = int(sys.stdin.readline())
cand2 = read_rows()[n2-1]
cand = set(cand1) & set(cand2)
if len(cand) == 1:
print("Case #{}: {}".format(T, list(cand)[0]))
elif cand:
print("Case #{}: Bad magician!".format(T))
else:
print("Case #{}: Volunteer cheated!".format(T))
T = int(sys.stdin.readline())
for t in range(1, T+1):
solve(t)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
d9755d36d4ecea5aaaca5f900735f8fb14154b2d
|
1e36de4d741d56437dd285816f38e61878a82d09
|
/src/tests/test_quick_sort.py
|
44e57614f398f3f51e6b16f2e945ae5d2a93c103
|
[
"MIT"
] |
permissive
|
Woojgh/data-structure
|
a61fd5d1cfd5914156d3dcff8693886ec7cb35cb
|
f44bcb5950f26b5e098f1e25d11ad6e19cfb0eb1
|
refs/heads/master
| 2020-04-05T13:38:27.919769
| 2019-12-10T21:28:49
| 2019-12-10T21:28:49
| 94,933,670
| 0
| 0
|
MIT
| 2019-04-08T17:04:34
| 2017-06-20T20:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 973
|
py
|
"""Test that quick."""
import pytest
from random import randint
from quick_sort import quick_sort
to_sort = [
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
([10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
]
def test_quick_non_list_raises_error():
"""Non list raises error."""
with pytest.raises(TypeError):
quick_sort('You didn\'t say the magic word')
def test_quick_non_int_raises_error():
"""Non-int raises error."""
with pytest.raises(ValueError):
quick_sort([1, 2, 3, 4, 'monkies'])
@pytest.mark.parametrize('input, output', to_sort)
def test_quick_sort_returns_ordered_list(input, output):
"""Quick sort returns an ordered list."""
assert quick_sort(input) == output
def test_quick_sort_sorts_random_list():
"""Quick sort returns an ordered list."""
input = [randint(0, 250) for i in range(100)]
output = sorted(input)
assert quick_sort(input) == output
|
[
"jamessalamonsen@gmail.com"
] |
jamessalamonsen@gmail.com
|
9e98e30afaa17a6351d8233793fac472df76b88c
|
ae97300ae6ee4c274ccefd86b565b876ddf558d2
|
/bs4_pyquery_lxml_exerices/xml_py2_2.py
|
fa1fecea1362ec16abe4313cd7f2110726f3e363
|
[] |
no_license
|
Mensyne/python_crawler
|
7d3eae531bcbff46fc256f342ac7819d4077558e
|
b883570fbeadaf9b07439a16229ebee61bb8e8e7
|
refs/heads/master
| 2022-12-16T10:59:37.522407
| 2020-09-08T06:45:54
| 2020-09-08T06:45:54
| 130,138,875
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
#-*- coding:utf-8 -*-
from lxml import etree
html = etree.parse('./hello.html')
print(type(html))
# 获取所有的li标签
result = html.xpath("//li")
print(result)
print(type(result))
print(len(result))
print(type(result[0]))
# 获取所有的li标签下classshuxiang
result1 = html.xpath('//li/@class')
print(result)
# 获取li标签下href 为link1.html的a标签
result2 = html.xpath('//li/a[@href="link1.html"]')
print(result2)
# 获取<li> 标签下的所有 <span> 标签
# 因为 / 是用来获取子元素的,而 <span> 并不是 <li> 的子元素,所以,要用双斜杠
result3 = html.xpath('//li//span')
print(result3)
# 获取 <li> 标签下的<a>标签里的所有 class
result4 = html.xpath('//li/a//@class')
print(result4)
# 获取最后一个 <li> 的 <a> 的 href
result5 = html.xpath('//li[last()]/a/@href')
print(result5)
# 获取倒数第二个元素的内容
result6 = html.xpath('//li[last()-1]/a')
# 使用text 可以获取元素的内容
print(result6[0].text)
# 获取 class 值为 bold 的标签名
result7 = html.xpath('//*[@class="bold"]')
# tag 方法可以获取标签名
print(result7[0].tag)
|
[
"1379022527@qq.com"
] |
1379022527@qq.com
|
d44e7245ae198153aeebb6d5a664a5be23588dbe
|
db731f2ae2708b7f0b274d03f3a4aa3200b225bf
|
/app/controller.py
|
28a3b88a24fa4da64b52b35cef03374a779e51c3
|
[] |
no_license
|
AmyBrowneDesigns/CC_Flask_Event_Lab
|
c87fedcc1542a88dfa3f3d09beca6c4c7c33631e
|
aa2a3b405881b5bc32f57b98aaa50b74a8eb419b
|
refs/heads/master
| 2022-12-20T13:28:58.813319
| 2020-09-24T17:15:35
| 2020-09-24T17:15:35
| 298,345,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
from app import app
from flask import render_template, request, redirect
from app.models.events_list import events, add_new_event
from app.models.event import *
@app.route('/')
def index():
# return "hello"
return render_template("index.html", title= "Home", events=events)
@app.route('/add-event', methods=['POST'])
def add_event():
event_date= request.form["date"]
event_name_of_event = request.form["name_of_event"]
event_num_of_guest = request.form["num_of_guest"]
event_room_location = request.form["room_location"]
event_description = request.form["description"]
new_event = Event(event_date, event_name_of_event, num_of_guest, room_location, description)
add_new_event (new_event)
return redirect('/')
|
[
"amybrowne20@gmail.com"
] |
amybrowne20@gmail.com
|
ab48d8f590363579e1895eb26a3fea33faff1879
|
e3e150079a617b96e8ee6305adca0e862ee93db3
|
/coco-dst/classifier_filter/bert_filter.py
|
d3d500c7d822e7f13e59fac0da213deb333ee28f
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
Janeey99/coco-dst
|
1e2c84cd03ebb3a18f9541dde6f126f243c4aca1
|
f50868d0d8349b522dd070c667d58da1788e27e4
|
refs/heads/main
| 2023-08-18T06:11:19.230921
| 2021-10-12T19:05:00
| 2021-10-12T19:05:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,274
|
py
|
"""
Copyright (c) 2020, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import time
import csv
import logging
import random
import numpy as np
import torch
from classifier_filter.run_filter import *
from transformers import BertConfig, BertTokenizer
from classifier_filter.modeling import BertForMultiLabelSequenceClassification
import json
class DataProcessor():
def __init__(self,path):
self.data = self.load_data(path)
def load_data(self,path):
multi_label_data = {}
with open(path) as f:
data = json.load(f)
for dial in data:
dialog_history = ""
for idx, turn in enumerate(dial["dialogue"]):
label_list = []
turn_domain = turn["domain"]
text_a = dialog_history
text_b = turn["system_transcript"]
dialog_history = dialog_history+" "+turn["system_transcript"]+" "+ turn["transcript"]
dialog_history = dialog_history.strip()
multi_label_data[dial["dialogue_idx"]+str(idx)] = {"text_a":text_a,
"text_b":text_b,
"label_list":label_list}
return multi_label_data
def get_labels(self):
"""See base class."""
return ["attraction-area",
"attraction-name",
"attraction-type",
"hotel-area",
"hotel-book day",
"hotel-book people",
"hotel-book stay",
"hotel-internet",
"hotel-name",
"hotel-parking",
"hotel-pricerange",
"hotel-stars",
"hotel-type",
"restaurant-area",
"restaurant-book day",
"restaurant-book people",
"restaurant-book time",
"restaurant-food",
"restaurant-name",
"restaurant-pricerange",
"taxi-arriveby",
"taxi-departure",
"taxi-destination",
"taxi-leaveat",
"train-arriveby",
"train-book people",
"train-day",
"train-departure",
"train-destination",
"train-leaveat"]
def create_examples(self,dialogue_idx,turn_id,user_utters,turn_label):
examples = []
meta_info = self.data[dialogue_idx+str(turn_id)]
for user_utter in user_utters:
text_a = meta_info["text_a"]
text_b = meta_info["text_b"]+" "+user_utter
labels = []
for label in turn_label:
labels.append(label[0])
# print("text_a: ",text_a.strip())
# print("text_b: ",text_b.strip())
# print("*************************")
examples.append(InputExample(text_a=text_a.strip(),text_b = text_b.strip(),label=labels))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
tokens_c = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if example.text_c:
tokens_c = tokenizer.tokenize(example.text_c)
if tokens_c:
truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
tokens_b = tokens_b + ["[SEP]"] + tokens_c
elif tokens_b:
truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = len(label_map)*[0]
for label in example.label:
label_id[label_map[label]] = 1
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_tensor(examples,label_list,max_seq_length,tokenizer):
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
for f in features:
input_ids.append(f.input_ids)
input_mask.append(f.input_mask)
segment_ids.append(f.segment_ids)
label_id.append([f.label_id])
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)
all_label_ids = torch.tensor(label_id, dtype=torch.float32)
data = (all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return data
class BERTFilter(object):
def __init__(self,data_file):
self.processor = DataProcessor(data_file)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.label_list = self.processor.get_labels()
bert_config = BertConfig.from_pretrained("bert-base-uncased",num_labels=len(self.label_list))
self.max_seq_length = 512
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
self.model = BertForMultiLabelSequenceClassification.from_pretrained("bert-base-uncased",config = bert_config)
# import pdb;
# pdb.set_trace();
# import sys
self.model.load_state_dict(torch.load("./classifier_filter/filter/best_model.pt", map_location='cpu'))
self.model.to(self.device)
def query_filter(self,dialogue_idx,turn_id,user_utters,turn_label,thresh):
examples = self.processor.create_examples(dialogue_idx,turn_id,user_utters,turn_label)
data = convert_examples_to_tensor(examples, self.label_list, self.max_seq_length, self.tokenizer)
result = self.evaluation(data,thresh)
# print(result)
return result
def evaluation(self,data,thresh):
self.model.eval()
prediction_list = []
target_list = []
input_ids, input_mask, segment_ids, label_ids = data
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
label_ids = label_ids.to(self.device)
with torch.no_grad():
logits = self.model(input_ids = input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
probs = logits.sigmoid()
prediction_list,target_list = self.acc_pred(probs, label_ids.view(-1,len(self.label_list)),self.label_list,thresh)
result = []
for idx in range(len(prediction_list)):
prediction_set = set(prediction_list[idx])
target_set = set(target_list[idx])
# print("pred: ",prediction_set)
# print("target: ",target_set)
# print("*************************")
if(prediction_set.issubset(target_set)):
result.append(True)
else:
result.append(False)
return result
def acc_pred(self,probs,labels,label_list,thresh):
batch_size = probs.size(0)
preds = (probs>thresh)
preds = preds.cpu().numpy()
labels = labels.byte().cpu().numpy()
prediction_list = []
target_list = []
for idx in range(batch_size):
pred = preds[idx]
label = labels[idx]
prediction_list.append([])
target_list.append([])
for idx,each_pred in enumerate(pred):
if(each_pred):
prediction_list[-1].append(label_list[idx])
for idx,each_label in enumerate(label):
if(each_label):
target_list[-1].append(label_list[idx])
return prediction_list,target_list
if __name__ == "__main__":
classifier_filter = BERTFilter()
while(True):
dialogue_idx = "PMUL3688.json"
turn_id = 4
thresh=0.5
user_utters =["that will work. i will need tickets for 3 people.", "that will work. thank you."]
turn_label = [
[
"train-book people",
"3"
]
]
flag = classifier_filter.query_filter(dialogue_idx,turn_id,user_utters,turn_label,thresh)
import pdb;
pdb.set_trace()
|
[
"semihyavuz9091@gmail.com"
] |
semihyavuz9091@gmail.com
|
8faa535f77e9ee8d6cb8c6b69d92a30d23e281bd
|
41898ee4cc597a19540d64c333687610d5792168
|
/Problems/557. Reverse Words in a String III.py
|
06cbf8f6ce5f9612f7228b00c8168b7305d69334
|
[] |
no_license
|
aidardarmesh/leetcode
|
82c4e09a85dc5b6cf05bceb089b57b3a81e2406e
|
4509f4b2b83e172e6ccc21ff89fc1204e0c6b3f3
|
refs/heads/master
| 2021-07-06T15:56:04.244369
| 2020-11-15T20:47:16
| 2020-11-15T20:47:16
| 205,086,346
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
from typing import *
class Solution:
def reverseWords(self, s: str) -> str:
left = None
n = len(s)
arr = list(s)
def mirror(left, right):
while left < right:
arr[left], arr[right] = arr[right], arr[left]
left += 1
right -= 1
for i in range(n):
if s[i] == ' ':
if left is not None:
mirror(left, i-1)
left = None
elif left is None:
left = i
elif i == n-1:
if left is not None:
mirror(left, n-1)
return ''.join(arr)
s = Solution()
s.reverseWords("Let's take LeetCode contest") == "s'teL ekat edoCteeL tsetnoc"
|
[
"darmesh.aidar@gmail.com"
] |
darmesh.aidar@gmail.com
|
be8a1bb2c9aa57a82e0da7cf0672f2b6102613b4
|
7bf9709ac227bb9ad70aa38810f9282e66948415
|
/project/domain_layer/stores_managment/DiscountPolicy.py
|
66ffa8797b17664721f7ce77d7327f1b0d39bd79
|
[] |
no_license
|
MoranNeptune/Workshop
|
ad7ad5a01be7ce36d9fe847da4a683749ed59ce8
|
925982e889be41cd7fb3eb6959e981c38ac1cd68
|
refs/heads/master
| 2022-07-16T22:55:18.218174
| 2020-05-13T15:41:56
| 2020-05-13T15:41:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
class DiscountPolicy:
def __init__(self):
pass
|
[
"levamit@post.bgu.ac.il"
] |
levamit@post.bgu.ac.il
|
d29851148971353b46c941e65a866086a96c1bb3
|
5872d377d6a733fcd558979e5a720b29c02375f7
|
/assignment13.py
|
ad55298f8362cf4e76a6bfba48722f82a07e6074
|
[] |
no_license
|
bharti-11/assignment1
|
739186572f308e62e87824245dd2ade3ab06f767
|
e7bbe59acbc57e7ac6754ea039faf5bfe6029eed
|
refs/heads/master
| 2020-03-20T04:03:33.842100
| 2018-07-13T09:03:00
| 2018-07-13T09:03:00
| 137,169,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,540
|
py
|
#Q.1- Name and handle the exception occured in the following program:
a=3
if a<4:
a=a/(a-3)
print(a)
#sol
a=3
if a<4:
try:
a=a/(a-3)
except Exception:
print("exception occurred")
#Q.2- Name and handle the exception occurred in the following program:
l=[1,2,3]
print(l[3])
#sol
l=[1,2,3]
try:
print(l[3])
except Exception:
print("exception occurred")
Q.3- What will be the output of the following code:
# Program to depict Raising Exception
try:
raise NameError("Hi there") # Raise Error
except NameError:
print "An exception"
raise # To determine whether the exception was raised or not
#sol
try:
raise NameError("Hi there") # Raise Error
except NameError:
print("An exception")
raise # To determine whether the exception was raised or not
#output
An exception
Traceback (most recent call last):
File "n.py2", line 3, in <module>
raise NameError("Hi there") # Raise Error
NameError: Hi there
Q.4- What will be the output of the following code:
# Function which returns a/b
def AbyB(a , b):
try:
c = ((a+b) / (a-b))
except ZeroDivisionError:
print "a/b result in 0"
else:
print c
# Driver program to test above function
AbyB(2.0, 3.0)
AbyB(3.0, 3.0)
#sol
def AbyB(a , b):
try:
c = ((a+b) / (a-b))
except ZeroDivisionError:
print("a/b result in 0")
else:
print(c)
# Driver program to test above function
AbyB(2.0, 3.0)
AbyB(3.0, 3.0)
#output
-5.0
a/b result in 0
#Q.5- Write a program to show and handle following exceptions:
#sol
#1. Import Error
import error
print("hello world")
#after handling
try:
import error
print("hello world")
except Exception:
print("Exception occurred")
#2. Value Error
n=int(input("enter a number:"))
print("enter a number",n)
#after handling
try:
n=int(input("enter a number:"))
print("enter a number",n)
except Exception:
print("Exception occurred")
#3.index error
l=[1,2,3]
print(l[3])
#after handling
try:
l=[1,2,3]
print(l[3])
except Exception:
print("exception occurred")
#Q.6- Create a user-defined exception AgeTooSmallError() that warns the user when they have entered age less than 18.
The code must keep taking input till the user enters the appropriate age number(less than 18).
#sol
class AgeTooSmallError(Exception):
pass
a=1
while True:
print("you have to enter the age 18 or more than 18")
try:
a=int(input("enter the age:"))
if a<18:
raise AgeTooSmallError()
print("Correct")
break
except Exception:
print("Incorrect Age"
|
[
"bhartirana9050@gmail.com"
] |
bhartirana9050@gmail.com
|
83ad7c7af9dcfd89ee4d58da68d4db5bc5b0c14f
|
e2f98708ab775e2ce7d2b83325319b86ad9e4961
|
/WilliamsCleveland/Assignments/Flask/NinjaGold/server.py
|
7cfd4343fe8f75b65222e444074ee905b5054ba6
|
[
"MIT"
] |
permissive
|
brroberts/Python201608
|
fc9ac57950175a1809e0409446804fe1f1190294
|
518b7e01e5243508c987d5046ace9df5b324c8f0
|
refs/heads/master
| 2021-01-18T17:23:16.285923
| 2016-08-29T16:41:09
| 2016-08-29T16:41:09
| 64,950,868
| 1
| 0
| null | 2016-08-04T16:37:37
| 2016-08-04T16:37:37
| null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
from flask import Flask, request, render_template, session, redirect, Markup
import random
app = Flask(__name__)
app.secret_key = "secretshhh"
@app.route('/')
def main():
if "gold" not in session:
session["gold"] = 0
session['activities']=""
return render_template('index.html')
@app.route('/process_money', methods = ["post"])
def money():
if request.form['place'] == "farm":
gold = random.randint(10,20)
session['gold']+=gold
session['activities']+=Markup("<p>earned {} gold from the farm</p>".format(gold))
return redirect("/")
elif request.form['place'] == "cave":
gold = random.randint(5,10)
session['gold']+=gold
session['activities']+=Markup("<p>earned {} gold from the cave</p>".format(gold))
return redirect("/")
elif request.form['place'] == "house":
gold = random.randint(2,5)
session['gold']+=gold
session['activities']+=Markup("<p>earned {} gold from the house</p>".format(gold))
return redirect("/")
elif request.form['place'] == "casino":
gold = random.randint(-50,50)
session['gold']+=gold
if gold > 0:
session['activities']+=Markup("<p>entered a casino and made {} gold</p>".format(gold))
else:
session['activities']+=Markup("<p class= 'red'>entered a casino and lost {} gold..Ouch</p>".format(gold*-1))
return redirect("/")
app.run(debug=True)
|
[
"cameron3579@hotmail.com"
] |
cameron3579@hotmail.com
|
4d56f19824b7d8e6d79d13913b481d1890ba68cd
|
6eca5af393e8e3c83efa7b89857e6876a4da9f9c
|
/athena/modules/active/google.py
|
9c8182b1ea00359bb186faa33b8f522ace0c6531
|
[
"MIT"
] |
permissive
|
dimitar-petrov/hey-athena-client
|
ec714dbf661a2a66eaea3f3bc72f1998ae0c7d78
|
c4e3121b1df2493d0ddd097eb584ff4c69995695
|
refs/heads/master
| 2021-05-04T12:53:45.750307
| 2016-05-22T03:29:41
| 2016-05-22T03:29:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
"""
Handles most general questions (including math!)
Usage Examples:
- "How tall is Mount Everest?"
"""
from athena.classes.module import Module
from athena.classes.task import ActiveTask
from athena.apis import api_lib
class AnswerTask(ActiveTask):
def __init__(self):
p_list = [r".*\b((who|what|when|where|why|how)(\')?(s)?|" +
r"(can|are|is|will|define|show me|say))\b.*"]
super().__init__(patterns=p_list)
def action(self, text):
print('\n~ Searching Google...\n')
api_lib['voice_browse_api'].search(text)
class Google(Module):
def __init__(self):
tasks = [AnswerTask()]
super().__init__('google', tasks, priority=1)
|
[
"rcbyron@utexas.edu"
] |
rcbyron@utexas.edu
|
46443b8beed1cc102c120ce14f62826fec969d7d
|
4a59f973b28aff4db1864ca8fb19c70790b4d6f3
|
/main.py
|
51b97a69941dc145e23a5c798f870d1586314923
|
[] |
no_license
|
Zoey-little5/20754243_MSBD5001
|
faf18294b75247181396169011b9ade3f8924e28
|
937cb37120c12769647d40238ea015e338f64084
|
refs/heads/main
| 2023-01-31T21:11:54.028639
| 2020-12-05T10:17:28
| 2020-12-05T10:17:28
| 318,756,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
import numpy as np
import pandas as pd
import xgboost as xgb
from dateutil.parser import parser
def read_and_preprocess_training_data(file_path):
dataframe = pd.read_csv(file_path)
traits_data = []
label_data = dataframe.loc[:, 'speed']
p = parser()
for date in dataframe.loc[:, 'date']:
traits_data.append(extract_date_and_time(date, p))
return np.array(traits_data), np.array(label_data)
def extract_date_and_time(date_str, p: parser):
date = p.parse(date_str)
return np.array([date.year, date.month, date.day, date.time().hour])
# return date.timestamp()
def train(traits_data, label_data):
# clf = make_pipeline(StandardScaler(), SVR(C=1.0, epsilon=0.2))
# clf.fit(traits_data, label_data)
clf = xgb.XGBRegressor()
clf.fit(traits_data, label_data)
return clf
def read_test_and_predict(model, test_file_path):
test_data = pd.read_csv(test_file_path)
test_data_list = []
ids = test_data.loc[:, 'id']
dates = test_data.loc[:, 'date']
p = parser()
for i in range(len(dates)):
test_data_list.append(np.array(extract_date_and_time(dates[i], p)))
predict_result = model.predict(np.array(test_data_list))
final_ans = []
for i in range(len(predict_result)):
final_ans.append([int(ids[i]), predict_result[i]])
return final_ans
if __name__ == '__main__':
traits, label = read_and_preprocess_training_data("train.csv")
trained_model = train(traits, label)
predict = read_test_and_predict(trained_model, 'test.csv')
df = pd.DataFrame(columns=['id', 'speed'])
idx = 0
for pred in predict:
df.loc[idx] = pred
idx += 1
df['id'] = df['id'].astype(int)
df.to_csv('predicted.csv', index=False)
|
[
"noreply@github.com"
] |
Zoey-little5.noreply@github.com
|
bf9e51d5d73d63fcdabb9aa1a7e3e9c75c0dc27b
|
82f902d013a323d0cf1123b089a298fd502cd6f5
|
/liangyun_app/views.py
|
44ada5af04e28de537159336e2396a3672e79d68
|
[] |
no_license
|
SmallGaoX/liangyun
|
b976925ec57c5b08cb69c93517b60d664de7f68b
|
4231d412228b177fb6d3a40dda33c843d77e117d
|
refs/heads/master
| 2020-04-13T01:04:28.212958
| 2018-12-23T04:55:26
| 2018-12-23T04:55:26
| 162,863,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,750
|
py
|
from django.shortcuts import render
from liangyun_app import models
from django.db import connection
from collections import namedtuple
import datetime
import json
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
# Create your views here.
def home(request):
return render(request, "overall.html")
# 以元组的形式返回所有数据
def my_custom_sql():
with connection.cursor() as cursor:
cursor.execute(
"SELECT liangyun_app_machine.time, Sum( liangyun_app_machine.fans ), Sum( liangyun_app_machine.`show` ), Sum( liangyun_app_machine.used ) FROM liangyun_app_machine GROUP BY liangyun_app_machine.time ORDER BY liangyun_app_machine.time ASC")
row = cursor.fetchall()
return row
# 以字典的形式返回所有数据
def my_custom_sql1():
with connection.cursor() as cursor:
cursor.execute(
"SELECT liangyun_app_machine.time, Sum( liangyun_app_machine.fans ), Sum( liangyun_app_machine.`show` ), Sum( liangyun_app_machine.used ) FROM liangyun_app_machine GROUP BY liangyun_app_machine.time ORDER BY liangyun_app_machine.time DESC")
# row = cursor.fetchall()
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
def namedtuplefetchall():
with connection.cursor() as cursor:
cursor.execute(
"SELECT liangyun_app_machine.time, Sum( liangyun_app_machine.fans ), Sum( liangyun_app_machine.`show` ), Sum( liangyun_app_machine.used ) FROM liangyun_app_machine GROUP BY liangyun_app_machine.time ORDER BY liangyun_app_machine.time DESC")
desc = cursor.description
nt_reslt = namedtuple('Result', [col[0] for col in desc])
return [nt_reslt(*row) for row in cursor.fetchall()]
def machine(request):
all_data = my_custom_sql()
time = []
used = []
show = []
fans = []
for lin in all_data:
# 时间类型转换
# time.append(datetime.date.isoformat(lin[0]))
time.append(lin[0].strftime('%Y-%m-%d'))
# print(datetime.date.isoformat(lin[0]))
# time.append(lin[0])
fans.append(int(lin[1]))
show.append(int(lin[2]))
used.append(int(lin[3]))
print(time)
# print(used)
# print(show)
# print(fans)
return render(request, 'overall.html',
{'datatime': json.dumps(time), 'fans_data': json.dumps(fans), 'show_data': json.dumps(show),
'used': used})
|
[
"wadxf@live.com"
] |
wadxf@live.com
|
60ee66cae16ac6e0439eb6a3fff8cc6949654f44
|
e3c8f786d09e311d6ea1cab50edde040bf1ea988
|
/Incident-Response/Tools/grr/grr/server/grr_response_server/databases/mysql_utils_test.py
|
a4b36b9292c5c412bae2c71fa29a79284ccdba3a
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
foss2cyber/Incident-Playbook
|
d1add8aec6e28a19e515754c6ce2e524d67f368e
|
a379a134c0c5af14df4ed2afa066c1626506b754
|
refs/heads/main
| 2023-06-07T09:16:27.876561
| 2021-07-07T03:48:54
| 2021-07-07T03:48:54
| 384,988,036
| 1
| 0
|
MIT
| 2021-07-11T15:45:31
| 2021-07-11T15:45:31
| null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from absl.testing import absltest
from grr_response_server.databases import mysql_utils
from grr.test_lib import test_lib
class DocTest(test_lib.DocTest):
module = mysql_utils
class PlaceholdersTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(mysql_utils.Placeholders(0), "()")
def testOne(self):
self.assertEqual(mysql_utils.Placeholders(1), "(%s)")
def testMany(self):
self.assertEqual(mysql_utils.Placeholders(4), "(%s, %s, %s, %s)")
def testZeroValues(self):
self.assertEqual(mysql_utils.Placeholders(3, 0), "")
def testManyValues(self):
self.assertEqual(
mysql_utils.Placeholders(3, 2), "(%s, %s, %s), (%s, %s, %s)")
class NamedPlaceholdersTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(mysql_utils.NamedPlaceholders([]), "()")
def testOne(self):
self.assertEqual(mysql_utils.NamedPlaceholders(["foo"]), "(%(foo)s)")
def testMany(self):
self.assertEqual(
mysql_utils.NamedPlaceholders(["bar", "baz", "foo"]),
"(%(bar)s, %(baz)s, %(foo)s)")
def testDictUsesKeys(self):
self.assertIn(
mysql_utils.NamedPlaceholders({
"bar": 42,
"baz": 42,
"foo": 42
}), ["(%(bar)s, %(baz)s, %(foo)s)"])
def testSortsNames(self):
self.assertEqual(
mysql_utils.NamedPlaceholders(["bar", "foo", "baz"]),
"(%(bar)s, %(baz)s, %(foo)s)")
class ColumnsTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(mysql_utils.Columns([]), "()")
def testOne(self):
self.assertEqual(mysql_utils.Columns(["foo"]), "(`foo`)")
def testMany(self):
self.assertEqual(
mysql_utils.Columns(["bar", "baz", "foo"]), "(`bar`, `baz`, `foo`)")
def testDictUsesKeys(self):
self.assertIn(
mysql_utils.Columns({
"bar": 42,
"baz": 42,
"foo": 42
}), ["(`bar`, `baz`, `foo`)"])
def testSortsNames(self):
self.assertEqual(
mysql_utils.Columns(["bar", "foo", "baz"]), "(`bar`, `baz`, `foo`)")
def testSortsRawNamesWithoutEscape(self):
self.assertGreater("`", "_")
self.assertEqual(mysql_utils.Columns(["a", "a_hash"]), "(`a`, `a_hash`)")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
[
"a.songer@protonmail.com"
] |
a.songer@protonmail.com
|
dd1ed8b4a163e13f54325b7f7d8cc675fbc91d57
|
ffed689b2ea1c0be9a3b750887a6f6d19cc153c2
|
/flair/models/__init__.py
|
2e3c9e2619b2a721c98ab1bdc3f0a331f3cc1f31
|
[
"MIT"
] |
permissive
|
shtechair/AIN
|
a70eec8eb1ee817c528f29898c951f0da9abdfbd
|
7b0413434550c7f5194393526b554df629029743
|
refs/heads/main
| 2023-01-23T06:31:07.087598
| 2020-11-30T07:18:09
| 2020-11-30T07:18:09
| 303,882,729
| 0
| 0
|
MIT
| 2020-11-30T07:18:10
| 2020-10-14T02:35:20
| null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
from .sequence_tagger_model import SequenceTagger, FastSequenceTagger
|
[
"wangxy1@shanghaitech.edu.cn"
] |
wangxy1@shanghaitech.edu.cn
|
c32fd622445ab01b56b3a39587039f4d82e7d06d
|
27ba6b3f865631c9c0fea429842e41a0fdc27241
|
/even_the_last.py
|
36ee64f84ed9fdf8f593eac7e2239776b7ae97e4
|
[] |
no_license
|
jigi-33/checkio
|
7c3b0b68213ef6777df3247edc888805e0c505b8
|
afbfe565618c3e81dbc8f24249bf6832239ece0a
|
refs/heads/master
| 2022-05-22T00:22:26.117962
| 2020-04-20T10:12:07
| 2020-04-20T10:12:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
def checkio(array: list) -> int:
if len(array) == 0:
return 0
else:
a = 0
for i in range(len(array)):
if i % 2 == 0:
a += array[i]
mult = a * array[-1]
return mult
if __name__ == '__main__':
print('Example:')
print(checkio([0, 1, 2, 3, 4, 5]))
assert checkio([0, 1, 2, 3, 4, 5]) == 30, "(0+2+4)*5=30"
assert checkio([1, 3, 5]) == 30, "(1+5)*5=30"
assert checkio([6]) == 36, "(6)*6=36"
assert checkio([]) == 0, "An empty array = 0"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
|
[
"pollicier@gmail.com"
] |
pollicier@gmail.com
|
64a48734fa864359da59f93107a8e4b623c3f06f
|
79cb8e02d967a5a7718f22bc75115f34126328b0
|
/python/cuml/dask/datasets/blobs.py
|
49ed85e4d168593bf9021a2744e71c8d55ecdb8b
|
[
"Apache-2.0"
] |
permissive
|
taureandyernv/cuml
|
55b289d81bc2783878b0ba5c5eb9d28ad9d4b3be
|
c92b594d3bda342c64d88a9c44b5d6e507b13f6c
|
refs/heads/branch-0.11
| 2023-08-16T18:05:24.513141
| 2019-10-22T15:10:28
| 2019-10-22T15:10:28
| 216,860,609
| 0
| 0
|
Apache-2.0
| 2023-08-03T18:53:05
| 2019-10-22T16:32:52
| null |
UTF-8
|
Python
| false
| false
| 4,272
|
py
|
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask.dataframe import from_delayed
import pandas as pd
import cudf
from dask.distributed import default_client
from sklearn.datasets import make_blobs as skl_make_blobs
import numpy as np
from uuid import uuid1
import math
def create_df(m, n, centers, cluster_std, random_state, dtype):
"""
Returns Dask Dataframes on device for X and y.
"""
X, y = skl_make_blobs(m, n, centers=centers, cluster_std=cluster_std,
random_state=random_state)
X = cudf.DataFrame.from_pandas(pd.DataFrame(X.astype(dtype)))
y = cudf.DataFrame.from_pandas(pd.DataFrame(y))
return X, y
def get_meta(df):
ret = df.iloc[:0]
return ret
def get_X(t):
return t[0]
def get_labels(t):
return t[1]
def make_blobs(nrows, ncols, centers=8, n_parts=None, cluster_std=1.0,
center_box=(-10, 10), random_state=None, verbose=False,
dtype=np.float32):
"""
Makes unlabeled dask.Dataframe and dask_cudf.Dataframes containing blobs
for a randomly generated set of centroids.
This function calls `make_blobs` from Scikitlearn on each Dask worker
and aggregates them into a single Dask Dataframe.
For more information on Scikit-learn's `make_blobs:
<https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html>`_.
:param nrows : number of rows
:param ncols : number of features
:param n_centers : number of centers to generate
:param n_parts : number of partitions to generate (this can be greater
than the number of workers)
:param cluster_std : how far can each generated point deviate from its
closest centroid?
:param center_box : the bounding box which constrains all the centroids
:param random_state : sets random seed
:param verbose : enables / disables verbose printing.
:param dtype : (default = np.float32) datatype to generate
:return: (dask.Dataframe for X, dask.Series for labels)
"""
client = default_client()
workers = list(client.has_what().keys())
n_parts = n_parts if n_parts is not None else len(workers)
parts_workers = (workers * n_parts)[:n_parts]
rows_per_part = math.ceil(nrows/n_parts)
if not isinstance(centers, np.ndarray):
centers = np.random.uniform(center_box[0], center_box[1],
size=(centers, ncols)).astype(np.float32)
if verbose:
print("Generating %d samples acgraross %d partitions on "
"%d workers (total=%d samples)" %
(math.ceil(nrows/len(workers)), n_parts, len(workers), nrows))
key = str(uuid1())
# Create dfs on each worker (gpu)
dfs = []
rows_so_far = 0
for idx, worker in enumerate(parts_workers):
if rows_so_far+rows_per_part <= nrows:
rows_so_far += rows_per_part
worker_rows = rows_per_part
else:
worker_rows = (int(nrows) - rows_so_far)
dfs.append(client.submit(create_df, worker_rows, ncols,
centers, cluster_std, random_state, dtype,
key="%s-%s" % (key, idx),
workers=[worker]))
x_key = str(uuid1())
y_key = str(uuid1())
X = [client.submit(get_X, f, key="%s-%s" % (x_key, idx))
for idx, f in enumerate(dfs)]
y = [client.submit(get_labels, f, key="%s-%s" % (y_key, idx))
for idx, f in enumerate(dfs)]
meta_X = client.submit(get_meta, X[0]).result()
X_cudf = from_delayed(X, meta=meta_X)
meta_y = client.submit(get_meta, y[0]).result()
y_cudf = from_delayed(y, meta=meta_y)
return X_cudf, y_cudf
|
[
"cjnolet@gmail.com"
] |
cjnolet@gmail.com
|
a57786a4a1571ef6d33d42dac65e879857b201a8
|
f33c3e874b1142ba2213ac0dfd9df20c698f0542
|
/mnist_cifar/models.py
|
fbf8dff953087218b81c6eb2825fb53b52aea35a
|
[] |
no_license
|
GaoYi439/Complementary-GAN
|
4a60aeac78dcf6e78b388d43b4b6555c3b915188
|
408e1c571e342fff2bdf5aa7d9afb749261baf4f
|
refs/heads/master
| 2022-12-18T10:49:03.354107
| 2020-09-28T15:30:03
| 2020-09-28T15:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,354
|
py
|
import torch.nn as nn
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.functional as F
class G_MNIST(nn.Module):
def __init__(self, nz, ngf, nc):
super(G_MNIST, self).__init__()
self.embed = nn.Embedding(10, nz)
self.conv1 = nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0)
self.bn1 = nn.BatchNorm2d(ngf * 8)
self.conv2 = nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(ngf * 4)
self.conv3 = nn.ConvTranspose2d(ngf * 4, ngf * 1, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(ngf * 1)
self.conv5 = nn.ConvTranspose2d(ngf * 1, nc, 4, 2, 1)
self.relu = nn.ReLU(True)
self.tanh = nn.Tanh()
self.__initialize_weights()
def forward(self, z,label):
input = z.mul_(self.embed(label))
x = input.view(input.size(0), -1, 1, 1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv5(x)
output = self.tanh(x)
return output
def __initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class D_MNIST(nn.Module):
def __init__(self, ndf, nc, num_classes=10):
super(D_MNIST, self).__init__()
self.ndf = ndf
self.lrelu = nn.ReLU()
self.conv1 = nn.Conv2d(nc, ndf, 4, 2, 1)
self.conv3 = nn.Conv2d(ndf , ndf * 4, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(ndf * 4)
self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1)
self.bn4 = nn.BatchNorm2d(ndf * 8)
self.conv5 = nn.Conv2d(ndf * 8, ndf * 1, 4, 1, 0)
self.gan_linear = nn.Linear(ndf * 1, 1)
self.aux_linear = nn.Linear(ndf * 1, num_classes)
self.sigmoid = nn.Sigmoid()
self.__initialize_weights()
def forward(self, input):
x = self.conv1(input)
x = self.lrelu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.lrelu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.lrelu(x)
x = self.conv5(x)
x = x.view(-1, self.ndf * 1)
c = self.aux_linear(x)
s = self.gan_linear(x)
s = self.sigmoid(s)
return s.squeeze(1), c.squeeze(1)
def __initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class G_CIFAR10(nn.Module):
def __init__(self, nz, ngf, nc):
super(G_CIFAR10, self).__init__()
self.embed = nn.Embedding(10, nz)
self.conv1 = nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0)
self.bn1 = nn.BatchNorm2d(ngf * 8)
self.conv2 = nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(ngf * 4)
self.conv3 = nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(ngf * 2)
self.conv4 = nn.ConvTranspose2d(ngf * 2, ngf * 1, 4, 2, 1)
self.bn4 = nn.BatchNorm2d(ngf * 1)
self.conv5 = nn.ConvTranspose2d(ngf, ngf * 1, 3, 1, 1)
self.bn5 = nn.BatchNorm2d(ngf * 1)
self.conv6 = nn.Conv2d(ngf * 1, nc, 3, 1, 1)
self.relu = nn.ReLU(True)
self.tanh = nn.Tanh()
self.__initialize_weights()
def forward(self, z,label):
input = z.mul_(self.embed(label))
x = input.view(input.size(0), -1, 1, 1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
x = self.conv6(x)
output = self.tanh(x)
return output
def __initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class D_CIFAR10(nn.Module):
def __init__(self, ndf, nc, num_classes=10):
super(D_CIFAR10, self).__init__()
self.ndf = ndf
self.lrelu = nn.ReLU()
self.conv0 = nn.Conv2d(nc, ndf, 3, 1, 1)
self.bn0 = nn.BatchNorm2d(ndf)
self.conv1 = nn.Conv2d(ndf, ndf, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(ndf)
self.conv2 = nn.Conv2d(ndf, ndf * 2, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(ndf * 2)
self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(ndf * 4)
self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1)
self.bn4 = nn.BatchNorm2d(ndf * 8)
self.conv5 = nn.Conv2d(ndf * 8, ndf * 1, 4, 1, 0)
self.gan_linear = nn.Linear(ndf * 1, 1)
self.aux_linear = nn.Linear(ndf * 1, num_classes)
self.sigmoid = nn.Sigmoid()
self.__initialize_weights()
def forward(self, input):
x = self.conv0(input)
x = self.bn0(x)
x = self.lrelu(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.lrelu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.lrelu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.lrelu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.lrelu(x)
x = self.conv5(x)
x = x.view(-1, self.ndf * 1)
c = self.aux_linear(x)
s = self.gan_linear(x)
s = self.sigmoid(s)
return s.squeeze(1), c.squeeze(1)
def __initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
'''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.aux_linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10)
self.gan_linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 1)
self.sigmoid = nn.Sigmoid()
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
c = self.aux_linear(out)
s = self.gan_linear(out)
s = self.sigmoid(s)
return s.squeeze(1), c.squeeze(1)
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.aux_linear = nn.Linear(512*block.expansion, num_classes)
self.gan_linear = nn.Linear(512*block.expansion, 1)
self.sigmoid = nn.Sigmoid()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
c = self.aux_linear(out)
s = self.gan_linear(out)
s = self.sigmoid(s)
return s.squeeze(1), c.squeeze(1)
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
|
[
"yanwuxu@gpu050.pvt.bridges.psc.edu"
] |
yanwuxu@gpu050.pvt.bridges.psc.edu
|
32bccf79d8b966c581bc2a92d82c5997f5058fc5
|
6d359f5cac4ed59199bd1b03c5a172237b80b72a
|
/kafka-python/kafkaProducerAsync.py
|
ce02ca3529660efb8916c840044c94e3046588a7
|
[
"Apache-2.0"
] |
permissive
|
acs/kafka-samples
|
9e223d9e2e7b151a427c11064960bba3def7fd35
|
1e12ce3c0ca8e5bb6b4d6039a357087639204b35
|
refs/heads/master
| 2020-12-29T07:34:09.057543
| 2020-02-07T11:15:29
| 2020-02-07T11:15:29
| 238,516,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import logging
import time
from aiokafka import AIOKafkaProducer
import asyncio
loop = asyncio.get_event_loop()
async def send_one():
producer = AIOKafkaProducer(
loop=loop, bootstrap_servers='localhost:9092')
await producer.start()
try:
while True:
logging.debug("Sending message ...")
time.sleep(1)
# Get cluster layout and initial topic/partition leadership information
# Produce message
await producer.send_and_wait("my_topic", b"Super message")
finally:
# Wait for all pending messages to be delivered or expire.
await producer.stop()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
loop.run_until_complete(send_one())
|
[
"adelcastillo@thingso2.com"
] |
adelcastillo@thingso2.com
|
e8e33c30c7a55aeffb0206fba3fafc45a976f7b9
|
427f23bb5886425eadebb7a2db562f049b50123d
|
/metan/settings fordeploy.py
|
1b04b5e2b693bf6990ec1c582dde158a0e79f7a9
|
[] |
no_license
|
mmahdavim/corrmeta
|
fcf119d8a33590bc4891ac07268425ee65321a95
|
079814abba612f3083c66dcd78fd22afcb0fec56
|
refs/heads/master
| 2020-03-28T20:32:26.122038
| 2018-04-25T19:20:31
| 2018-04-25T19:20:31
| 94,613,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,233
|
py
|
"""
Django settings for metan project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&t#_bu7^2#f)1%^3dsr7^d2l0!0e4l9=@v&)yiqag%#1x&-!m6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'mtnlss',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'metan.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'metan.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
######By Mohsen:
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
ADMINS = ["theonlyadmin","corrmeta.noreply@gmail.com"] #This is the receiver's email address (which in this case is the same as the sender)
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'corrmeta.noreply@gmail.com'
DEFAULT_FROM_EMAIL = 'corrmeta.noreply@gmail.com'
EMAIL_HOST_PASSWORD = 'CorrMet@'
EMAIL_PORT = 587
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': [], #['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'applogfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'mtnlss.log'),
'maxBytes': 1024*1024*15, # 15MB
'backupCount': 10,
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'mtnlss': {
'handlers': ['applogfile',],
'level': 'DEBUG',
},
}
}
|
[
"mmahdavim@gmail.com"
] |
mmahdavim@gmail.com
|
913aea80a7be92a30a978815f13ada4dfbc2480d
|
feb1131c3300c1f581c96114bfa512f65f72015d
|
/prac04/quick pick.py
|
2697b49d6c315d68c6ee3b56f59aaf9fa91feccd
|
[] |
no_license
|
jot1/workshops3
|
db4b015ae243f1dd79fa8e2d5cbbfc282a0494fd
|
dd1817c19e2de70e7cea677c7a494be814847559
|
refs/heads/master
| 2021-01-13T14:59:24.404379
| 2016-12-16T11:28:11
| 2016-12-16T11:28:11
| 76,627,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
__author__ = 'jc450453'
from random import randint
quick_pick=int(input("How many quick picks? "))
pick_of_6nos=[]
num=0
for i in range(0,quick_pick):
num=randint(1,45)
for n in range(0,6):
while num in pick_of_6nos:
num=randint(1,45)
pick_of_6nos.append(num)
print(sorted(pick_of_6nos))
pick_of_6nos=[]
|
[
"prabhjotkaur7@my.jcu.edu.au"
] |
prabhjotkaur7@my.jcu.edu.au
|
59c4c00d5f432dec66af350497740d2f71a38cb3
|
d3a556e969b21e42d9a7ee5da4c66ba692029bab
|
/gmm.py
|
a0a7b3eae82359aed981c5feb639c1cd1b03b9ce
|
[] |
no_license
|
caleblu/MLSP-Project-fall17
|
7e2e6f59a0b8d52faffa33cab7253092de552f49
|
90c1b4936b1dbe8eea3fa4428363968fbd75273c
|
refs/heads/master
| 2021-08-24T14:11:08.665505
| 2017-12-10T05:22:19
| 2017-12-10T05:22:19
| 104,931,330
| 1
| 1
| null | 2017-11-19T01:21:26
| 2017-09-26T19:55:04
|
TeX
|
UTF-8
|
Python
| false
| false
| 2,749
|
py
|
import sklearn.mixture
import numpy as np
def train_GMM(jnt):
"""
train GMM with joint vectors
:param jnt: joint vector of spectral features
:return: trained GMM model
"""
GMM = sklearn.mixture.GaussianMixture(n_components=40, covariance_type='full')
GMM.fit(jnt)
return GMM
def get_density_x(src, GMM):
"""
:param src: source spectral features
:param GMM: trained GMM model
:return: get the conditional probability that src belong to a component
"""
mid = GMM.means_.shape[1] / 2
x_GMM = sklearn.mixture.GaussianMixture(n_components=40, covariance_type='full')
x_GMM.covariances_ = GMM.covariances_[:, :mid, :mid]
x_GMM.means_ = GMM.means_[:, 0:mid]
x_GMM.weights_ = GMM.weights_
x_GMM.precisions_cholesky_ = sklearn.mixture.gaussian_mixture._compute_precision_cholesky(x_GMM.covariances_, 'full')
return x_GMM.predict_proba(src)
def get_mean_tgt(GMM):
"""
:param GMM: trained GMM model
:return: get the mean of the target spectral features of each component
"""
mid = GMM.means_.shape[1] / 2
y_mean = GMM.means_[:, mid:]
return y_mean
def get_cross_cov(GMM):
mid = GMM.means_.shape[1] / 2
return GMM.covariances_[:, mid:, :mid]
def get_xx_cov(GMM):
mid = GMM.means_.shape[1] / 2
return GMM.covariances_[:, :mid, :mid]
def get_x_mean(GMM):
mid = GMM.means_.shape[1] / 2
return GMM.means_[:, 0:mid]
def predict_GMM_VQ(src, GMM):
"""
predict target value given src spectral features by VQ conversion
:param src: source spectral features
:param GMM: trained GMM model
:return: predicted target spectral features
"""
density_x = get_density_x(src, GMM)
v = get_mean_tgt(GMM)
m = GMM.n_components
T, n_mcep = src.shape
y = np.zeros((T, n_mcep))
for t in range(T):
for i in range(m):
y[t] = y[t] + np.dot(density_x[t][i], v[i])
return y
def predict_GMM_FULL(src, GMM):
"""
predict target value given src spectral features by Full conversion
:param src: source spectral features
:param GMM: trained GMM model
:return: predicted target spectral features
"""
density_x = get_density_x(src, GMM)
v = get_mean_tgt(GMM)
diag = get_cross_cov(GMM)
sig = get_xx_cov(GMM)
mean_x = get_x_mean(GMM)
m = GMM.n_components
T, n_mcep = src.shape
y = np.zeros((T, n_mcep))
A = np.zeros((m,n_mcep,n_mcep))
for i in range(m):
A[i] = np.dot(diag[i], np.linalg.inv(sig[i]))
for t in range(T):
for i in range(m):
tmp = np.dot(A[i], (src[t] - mean_x[i]))
y[t] = y[t] + np.dot(density_x[t][i], v[i] + tmp)
return y
if __name__=="__main__":
pass
|
[
"nanshu.wang@gmail.com"
] |
nanshu.wang@gmail.com
|
d40abeb6c070fc71ba4e1f5fa90193ab97b47fd6
|
a57541520ab737843c9ba125e5856a7563d8756d
|
/utils/old/make_mndwi_4pred.py
|
64ea3c58da4794de1caa00fd717275e00ebb9c82
|
[
"MIT"
] |
permissive
|
dbuscombe-usgs/segmentation_gym
|
5fbfb2896557657b8ef6678dadc4006e483deadd
|
1517efad76513e0decd51d8e31fca279b7bdcf1c
|
refs/heads/main
| 2023-04-17T11:42:49.141844
| 2022-03-11T20:16:16
| 2022-03-11T20:16:16
| 469,203,035
| 0
| 0
|
MIT
| 2022-03-12T21:21:33
| 2022-03-12T21:21:33
| null |
UTF-8
|
Python
| false
| false
| 11,892
|
py
|
# Written by Dr Daniel Buscombe, Marda Science LLC
# for the USGS Coastal Change Hazards Program
#
# MIT License
#
# Copyright (c) 2022, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# utility to merge multiple coincident jpeg images into nd numpy arrays
import sys,os, time, json, shutil
sys.path.insert(1, '../src')
from skimage.io import imread, imsave
import numpy as np
from tkinter import filedialog, messagebox
from tkinter import *
from glob import glob
from skimage.transform import rescale ## this is actually for resizing
from skimage.morphology import remove_small_objects, remove_small_holes
from tqdm import tqdm
from joblib import Parallel, delayed
###===========================================
#-----------------------------------
# custom 2d resizing functions for 2d discrete labels
def scale(im, nR, nC):
'''
for reszing 2d integer arrays
'''
nR0 = len(im) # source number of rows
nC0 = len(im[0]) # source number of columns
tmp = [[ im[int(nR0 * r / nR)][int(nC0 * c / nC)]
for c in range(nC)] for r in range(nR)]
return np.array(tmp).reshape((nR,nC))
#-----------------------------------
def scale_rgb(img, nR, nC, nD):
'''
for reszing 3d integer arrays
'''
imgout = np.zeros((nR, nC, nD))
for k in range(3):
im = img[:,:,k]
nR0 = len(im) # source number of rows
nC0 = len(im[0]) # source number of columns
tmp = [[ im[int(nR0 * r / nR)][int(nC0 * c / nC)]
for c in range(nC)] for r in range(nR)]
imgout[:,:,k] = np.array(tmp).reshape((nR,nC))
return imgout
#-----------------------------------
def do_pad_image(f, TARGET_SIZE):
img = imread(f)
try:
old_image_height, old_image_width, channels = img.shape
except:
old_image_height, old_image_width = img.shape
channels=0
# create new image of desired size and color (black) for padding
new_image_width = TARGET_SIZE[0]
new_image_height = TARGET_SIZE[0]
if channels>0:
color = (0,0,0)
result = np.full((new_image_height,new_image_width, channels), color, dtype=np.uint8)
else:
color = (0)
result = np.full((new_image_height,new_image_width), color, dtype=np.uint8)
# compute center offset
x_center = (new_image_width - old_image_width) // 2
y_center = (new_image_height - old_image_height) // 2
try:
# copy img image into center of result image
result[y_center:y_center+old_image_height,
x_center:x_center+old_image_width] = img
except:
## AN ALTERNATIVE WAY - DO NOT REMOVE
# sf = np.minimum(new_image_width/old_image_width,new_image_height/old_image_height)
# if channels>0:
# img = rescale(img,(sf,sf,1),anti_aliasing=True, preserve_range=True, order=1)
# else:
# img = rescale(img,(sf,sf),anti_aliasing=True, preserve_range=True, order=1)
# if channels>0:
# old_image_height, old_image_width, channels = img.shape
# else:
# old_image_height, old_image_width = img.shape
#
# x_center = (new_image_width - old_image_width) // 2
# y_center = (new_image_height - old_image_height) // 2
#
# result[y_center:y_center+old_image_height,
# x_center:x_center+old_image_width] = img.astype('uint8')
if channels>0:
result = scale_rgb(img,TARGET_SIZE[0],TARGET_SIZE[1],3)
else:
result = scale(img,TARGET_SIZE[0],TARGET_SIZE[1])
wend = f.split(os.sep)[-2]
fdir = os.path.dirname(f)
fdirout = fdir.replace(wend,'padded_'+wend)
# save result
imsave(fdirout+os.sep+f.split(os.sep)[-1].replace('.jpg','.png'), result.astype('uint8'), check_contrast=False, compression=0)
#-----------------------------------
root = Tk()
root.filename = filedialog.askopenfilename(initialdir = "/segmentation_zoo",title = "Select config file",filetypes = (("config files","*.json"),("all files","*.*")))
configfile = root.filename
print(configfile)
root.withdraw()
with open(configfile) as f:
config = json.load(f)
for k in config.keys():
exec(k+'=config["'+k+'"]')
USE_GPU = True
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
if USE_GPU == True:
if 'SET_GPU' in locals():
os.environ['CUDA_VISIBLE_DEVICES'] = str(SET_GPU)
else:
#use the first available GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '0' #'1'
else:
## to use the CPU (not recommended):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
root = Tk()
root.filename = filedialog.askdirectory(initialdir = os.getcwd(),title = "Select directory for storing OUTPUT files")
output_data_path = root.filename
print(output_data_path)
root.withdraw()
root = Tk()
root.filename = filedialog.askdirectory(initialdir = os.getcwd(),title = "Select directory of RGB IMAGE files")
data_path = root.filename
print(data_path)
root.withdraw()
W=[]
W.append(data_path)
root = Tk()
root.filename = filedialog.askdirectory(initialdir = os.getcwd(),title = "Select directory of SWIR IMAGE files")
data_path = root.filename
print(data_path)
root.withdraw()
W.append(data_path)
##========================================================
## COLLATE FILES INTO LISTS
##========================================================
files = []
for data_path in W:
f = sorted(glob(data_path+os.sep+'*.jpg'))
if len(f)<1:
f = sorted(glob(data_path+os.sep+'images'+os.sep+'*.jpg'))
files.append(f)
# number of bands x number of samples
files = np.vstack(files).T
##========================================================
## MAKING PADDED (RESIZED) COPIES OF IMAGERY
##========================================================
# ## neeed resizing?
# szs = [imread(f).shape for f in files[:,0]]
# szs = np.vstack(szs)[:,0]
# if len(np.unique(szs))>1:
# do_resize=True
# else:
# do_resize=False
#
# from tkinter import simpledialog
# application_window = Tk()
# TARGET_X = simpledialog.askinteger("Imagery are different sizes and will be resized.",
# "What is the TARGET_SIZE (X) of the intended model?",
# parent=application_window,
# minvalue=32, maxvalue=8192)
#
# TARGET_Y = simpledialog.askinteger("Imagery are different sizes and will be resized.",
# "What is the TARGET_SIZE (Y) of the intended model?",
# parent=application_window,
# minvalue=32, maxvalue=8192)
#
# TARGET_SIZE = [TARGET_X,TARGET_Y]
## rersize / pad imagery so all a consistent size (TARGET_SIZE)
# if do_resize:
## make padded direcs
for w in W:
wend = w.split(os.sep)[-1]
print(wend)
newdirec = w.replace(wend,'padded_'+wend)
try:
os.mkdir(newdirec)
except:
pass
## cycle through, merge and padd/resize if need to
for file in files:
for f in file:
do_pad_image(f, TARGET_SIZE)
## write padded labels to file
W2 = []
for w in W:
wend = w.split(os.sep)[-1]
w = w.replace(wend,'padded_'+wend)
W2.append(w)
W = W2
del W2
files = []
for data_path in W:
f = sorted(glob(data_path+os.sep+'*.png'))
if len(f)<1:
f = sorted(glob(data_path+os.sep+'images'+os.sep+'*.png'))
files.append(f)
# number of bands x number of samples
files = np.vstack(files).T
# print("{} sets of {} image files".format(len(W),len(files)))
###================================================
##========================================================
## NON-AUGMENTED FILES
##========================================================
###================================
from imports import *
## make non-aug subset first
# cycle through pairs of files and labels
for counter,f in enumerate(files):
# try:
datadict={}
g = imread(f[0])[:,:,1].astype('float')
swir = imread(f[1]).astype('float')
g[g==0]=np.nan
swir[swir==0]=np.nan
g = np.ma.filled(g)
swir = np.ma.filled(swir)
mndwi = np.divide(swir - g, swir + g)
mndwi[np.isnan(mndwi)]=-1
mndwi = rescale_array(mndwi,0,255)
datadict['arr_0'] = mndwi.astype(np.uint8)
datadict['num_bands'] = 1
datadict['files'] = [fi.split(os.sep)[-1] for fi in f]
ROOT_STRING = f[0].split(os.sep)[-1].split('.')[0]
#print(ROOT_STRING)
segfile = output_data_path+os.sep+ROOT_STRING+'_noaug_nd_data_000000'+str(counter)+'.npz'
np.savez_compressed(segfile, **datadict)
del datadict, mndwi, g, swir
# except:
# print("incompatible dimensions:")
# print(f)
#-----------------------------------
def load_npz(example):
with np.load(example.numpy()) as data:
image = data['arr_0'].astype('uint8')
#image = standardize(image)
file = [''.join(f) for f in data['files']]
return image, file[0]
@tf.autograph.experimental.do_not_convert
#-----------------------------------
def read_seg_dataset_multiclass(example):
"""
"read_seg_dataset_multiclass(example)"
This function reads an example from a npz file into a single image and label
INPUTS:
* dataset example object (filename of npz)
OPTIONAL INPUTS: None
GLOBAL INPUTS: TARGET_SIZE
OUTPUTS:
* image [tensor array]
* class_label [tensor array]
"""
image, file = tf.py_function(func=load_npz, inp=[example], Tout=[tf.float32, tf.string])
return image, file
###================================
##========================================================
## READ, VERIFY and PLOT NON-AUGMENTED FILES
##========================================================
BATCH_SIZE = 8
filenames = tf.io.gfile.glob(output_data_path+os.sep+'*_noaug*.npz')
dataset = tf.data.Dataset.list_files(filenames, shuffle=False)
print('{} files made'.format(len(filenames)))
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
dataset = dataset.map(read_seg_dataset_multiclass, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) # drop_remainder will be needed on TPU
dataset = dataset.prefetch(AUTO)
# dataset = dataset.shuffle(42)
try:
os.mkdir(output_data_path+os.sep+'noaug_sample')
except:
pass
print('.....................................')
print('Printing examples to file ...')
counter=0
for imgs,files in dataset.take(10):
for count,(im, file) in enumerate(zip(imgs, files)):
im = rescale_array(im.numpy(), 0, 1)
# if im.shape[-1]:
# im = im[:,:,:3]
plt.imshow(im)
file = file.numpy()
plt.axis('off')
plt.title(file)
plt.savefig(output_data_path+os.sep+'noaug_sample'+os.sep+ ROOT_STRING + 'noaug_ex'+str(counter)+'.png', dpi=200, bbox_inches='tight')
#counter +=1
plt.close('all')
counter += 1
|
[
"dbuscombe@gmail.com"
] |
dbuscombe@gmail.com
|
dd907f0a69cd9c88ddfd52f92553161b2d59a294
|
44989ba6b04f78b0a9fd7b7879b433eca1047275
|
/06-Condities/blad steen schaar.py
|
2c8cce7af147ee6f655d627f60796c50fb672406
|
[] |
no_license
|
Milan9870/5WWIPython
|
f97bcd908d0e4ec0182fe4eb497f0a0712f0b2f9
|
d9c5ef52e6fe8311caee0440f38a52da6eb1a53d
|
refs/heads/master
| 2020-07-21T04:59:43.781802
| 2020-02-21T10:53:10
| 2020-02-21T10:53:10
| 206,759,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
# invoer
speler1 = str(input('Blad, steen of schaar: '))
speler2 = str(input('Blad, steen of schaar: '))
# berekening
if speler1 == speler2:
winnaar = 'onbeslist'
elif speler1 == 'blad':
if speler2 == 'steen':
winnaar = 'speler 1 wint'
else:
winnaar = 'speler 2 wint'
elif speler1 == 'steen':
if speler2 == 'blad':
winnaar = 'speler 2 wint'
else:
winnaar = 'speler 1 wint'
else:
if speler2 == 'blad':
winnaar = 'speler 1 wint'
else:
winnaar = 'speler 2 wint'
# uitvoer
print(winnaar)
# onbeslist en speler 1 wint programmeren, speler 2 wint in else
|
[
"milan.desmijter@sgsintpaulus.eu"
] |
milan.desmijter@sgsintpaulus.eu
|
ab71c566f2da89fc084c540cb552a05ad7e1ed29
|
f1390c49b3640731c7a869accf59d871785ec5e7
|
/src/users/migrations/0001_initial.py
|
57ed018dd8f6a567d387355ca1b3196ed1a80174
|
[] |
no_license
|
alirezahassanzade/Drmotor
|
de7c094ccabf6983e004aabbb16da7f7140d5f00
|
cd128f992b7f11d82abb74cc867897c9046ef6f3
|
refs/heads/master
| 2020-04-18T17:33:39.807324
| 2019-07-24T07:56:20
| 2019-07-24T07:56:20
| 167,657,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,680
|
py
|
# Generated by Django 2.2 on 2019-07-01 07:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('phone_number', models.CharField(max_length=10, unique=True, verbose_name='Phone Number')),
('type', models.IntegerField(choices=[(10, 'Admin'), (20, 'User'), (30, 'Mechanic')], default=10, verbose_name='User Type')),
('dateofbirth', models.DateField(blank=True, null=True)),
('identificationcode', models.CharField(blank=True, max_length=10, null=True)),
('telephonenumber', models.CharField(blank=True, max_length=10, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('joinedDate', models.DateTimeField(auto_now_add=True)),
('picture', models.ImageField(blank=True, null=True, upload_to='user-img/')),
('vote', models.PositiveSmallIntegerField(blank=True, null=True)),
('wallet', models.DecimalField(decimal_places=3, default=0.0, max_digits=13)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', users.models.UserManager()),
],
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('region', models.CharField(max_length=50)),
('details', models.CharField(max_length=50)),
('postalcode', models.CharField(max_length=10)),
('lat', models.CharField(max_length=50)),
('lng', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Motor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('licenseplate', models.CharField(max_length=8)),
('type', models.CharField(blank=True, choices=[('A', 'Type A'), ('B', 'Type B')], max_length=50, null=True)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"pedram_parsian@outlook.com"
] |
pedram_parsian@outlook.com
|
412dbd51051a3e82471b4b614b8810caed980d3f
|
0146f63cbb2c5bdc321d264e24b112bcb875e8be
|
/animations/stretch1_pose.py
|
b9998595782857d883ca9e76ce9c84f952ad6588
|
[] |
no_license
|
zhexenova/NAO_Planning
|
7dcea52a6cf8305bd4bdc01b55703284e6dd950a
|
5cb395b07dc2ddc3e9113ff9f440c55d848de3f7
|
refs/heads/main
| 2023-01-28T10:48:58.901542
| 2020-12-04T14:35:07
| 2020-12-04T14:35:07
| 318,368,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,950
|
py
|
# Choregraphe bezier export in Python.
names = list()
times = list()
keys = list()
names.append("HeadPitch")
times.append([1.48, 2.24, 2.76, 3.12, 3.76, 5.12, 6.64, 7.4, 8.08])
keys.append([-0.299172, -0.372804, -0.495674, 0.400331, 0.49544, 0.492371, 0.506179, 0.307178, -0.161112])
names.append("HeadYaw")
times.append([1.48, 2.24, 3.12, 3.76, 5.12, 6.64, 8.08])
keys.append([-0.00157596, -0.00464395, -0.046062, -0.0521979, -0.036858, -0.038392, -0.0261199])
names.append("LAnklePitch")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([0.056716, 0.056716, 0.42641, 0.484702, 0.613558, 0.484702, 0.612024, 0.484702, 0.06592])
names.append("LAnkleRoll")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([-0.013764, -0.013764, -0.00302603, 0.00771196, -0.00916204, 0.00771196, -0.00916204, 0.00771196, -0.101202])
names.append("LElbowRoll")
times.append([1.4, 2.16, 2.68, 3.04, 3.68, 5.04, 6.56, 7.32, 8])
keys.append([-1.54462, -1.53549, -1.35612, -0.076658, -0.059784, -0.085862, -0.0812601, -0.600393, -0.421808])
names.append("LElbowYaw")
times.append([1.4, 2.16, 3.04, 3.68, 5.04, 6.56, 8])
keys.append([-0.544613, -0.550747, -0.306841, -0.309909, -0.311444, -0.311444, -1.17355])
names.append("LHand")
times.append([1.4, 2.16, 2.68, 3.04, 3.68, 4.48, 5.04, 5.96, 6.56, 8])
keys.append([0.7, 0.6992, 0.3, 0.9644, 0.6, 1, 0.4, 1, 0.3, 0.3028])
names.append("LHipPitch")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([0.334454, 0.335988, -0.820649, -1.05535, -1.43578, -1.05535, -1.44192, -1.05535, 0.214801])
names.append("LHipRoll")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([0.0245859, 0.0245859, -0.0260361, -0.033706, -0.0352401, -0.033706, -0.0352401, -0.033706, 0.115092])
names.append("LHipYawPitch")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([-0.095066, -0.093532, -0.093532, -0.0904641, -0.078192, -0.091998, -0.078192, -0.091998, -0.153358])
names.append("LKneePitch")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([-0.0859459, -0.0874799, -0.090548, -0.0923279, -0.0923279, -0.092082, -0.0923279, -0.090548, -0.084412])
names.append("LShoulderPitch")
times.append([1.4, 2.16, 3.04, 3.68, 4.48, 5.04, 5.96, 6.56, 8])
keys.append([0.312894, 0.262272, 0.719404, 0.782298, 0.443314, 0.776162, 0.364774, 0.783833, 1.54009])
names.append("LShoulderRoll")
times.append([1.4, 2.16, 2.68, 3.04, 3.36, 3.68, 5.04, 6.56, 8])
keys.append([0.105804, 0.0981341, 0.434587, -0.0353239, 0.0733038, -0.030722, -0.023052, -0.027654, 0.0904641])
names.append("LWristYaw")
times.append([1.4, 2.16, 2.68, 3.04, 3.68, 5.04, 6.56, 8])
keys.append([-0.25622, -0.239346, -1.37532, -0.10282, -0.124296, -0.122762, -0.121228, 0.108872])
names.append("RAnklePitch")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([0.0537319, 0.0537319, 0.421891, 0.475581, 0.610574, 0.475581, 0.60904, 0.475581, 0.066004])
names.append("RAnkleRoll")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([0.0337899, 0.0337899, -0.00149204, 0.00924597, 0.015382, 0.00924597, 0.015382, 0.00924597, 0.0614019])
names.append("RElbowRoll")
times.append([1.36, 2.12, 2.64, 3, 3.32, 3.64, 5, 6.52, 7.28, 7.96])
keys.append([1.54462, 1.50643, 1.34565, 0.038392, 0.0349066, 0.036858, 0.04913, 0.066004, 0.610865, 0.377407])
names.append("RElbowYaw")
times.append([1.36, 2.12, 3, 3.64, 5, 6.52, 7.96])
keys.append([0.529187, 0.552198, -0.00771196, -0.0261199, -0.029188, -0.0261199, 1.16426])
names.append("RHand")
times.append([1.36, 2.12, 2.64, 3, 3.64, 4.44, 5, 5.92, 6.52, 7.96])
keys.append([0.5, 0.4992, 0.2, 1, 0.7, 1, 0.4, 1, 0.4, 0.3048])
names.append("RHipPitch")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([0.331302, 0.332836, -0.814596, -1.04009, -1.44814, -1.05237, -1.45734, -1.05083, 0.196309])
names.append("RHipRoll")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([-0.05058, -0.049046, 0.039926, 0.039926, 0.0353239, 0.039926, 0.0353239, 0.039926, -0.068988])
names.append("RKneePitch")
times.append([1.44, 2.2, 3.08, 3.72, 4.52, 5.08, 6, 6.6, 8.04])
keys.append([-0.091998, -0.0923279, -0.0923279, -0.0923279, -0.0923279, -0.0923279, -0.0923279, -0.0923279, -0.0628521])
names.append("RShoulderPitch")
times.append([1.36, 2.12, 3, 3.64, 4.44, 5, 5.92, 6.52, 7.96])
keys.append([0.319114, 0.323717, 0.705682, 0.79312, 0.417134, 0.785451, 0.390954, 0.785451, 1.53404])
names.append("RShoulderRoll")
times.append([1.36, 2.12, 2.64, 3, 3.32, 3.64, 5, 6.52, 7.96])
keys.append([-0.18719, -0.159578, -0.443314, 0.022968, -0.0558505, 0.00149204, 0.00302603, -0.00310997, -0.0690719])
names.append("RWristYaw")
times.append([1.36, 2.12, 2.64, 3, 3.64, 5, 6.52, 7.96])
keys.append([0.305225, 0.31136, 1.11352, 0.263807, 0.265341, 0.265341, 0.265341, 0.18097])
|
[
"zhanel.zhexenova@studio.unibo.it"
] |
zhanel.zhexenova@studio.unibo.it
|
519c0c9c2dee80eff81906db8d7544c392de95f4
|
6eec34dc8e9085e72fdaf42a2b35214338d9fe3e
|
/shop/urls.py
|
e264ce64f4a1c4b377f89f93b6ee9eaf8a549b90
|
[] |
no_license
|
danielspring-crypto/yate1
|
013676e23e8383d9d78ac043a0398c69c008da69
|
29efc3018a281ca5c21af846a2cc4905977ca313
|
refs/heads/main
| 2023-01-15T19:05:05.857762
| 2020-11-25T08:12:46
| 2020-11-25T08:12:46
| 315,871,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
from django.urls import path
from . import views
app_name = 'shop'
urlpatterns = [
path('', views.product_list, name='product_list'),
path('<slug:category_slug>/', views.product_list, name='product_list_by_category'),
path('<int:id>/<slug:slug>/', views.product_detail, name='product_detail'),
]
|
[
"borncode3@gmail.com"
] |
borncode3@gmail.com
|
a7efb63f3dcee1313420c7f152ef22f4a7ab3dd1
|
80e564b9acc2795e5eba79ebd51ccd4ff4e64ab5
|
/Day4/set_up_display.py
|
d16d7c6a65b1c7707b5d34522c13379ebf9b4d37
|
[] |
no_license
|
Sean-McLeod/Sean-FinalProject
|
80cf76fee2904a01f88b162d36566ad99c3fe07b
|
f5b791f00769e0109003e92140892ab8697478df
|
refs/heads/main
| 2023-06-03T02:48:03.727969
| 2021-06-19T01:24:06
| 2021-06-19T01:24:06
| 375,555,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,339
|
py
|
#!/usr/bin/env python3
# Created by Sean McLeod
# Created on June 2021
# This is the display class
import constants
import pygame
from golem_class import GolemClass
from maps import Maps
from monsters import Monsters
from prisoner_class import PrisonerClass
from sprites import Sprites
class SetUpDisplay:
def __init__(self, screen):
self._screen = screen
def set_up_game_scene_one(self):
prisoner_x = 700
prisoner_y = 450
tile_x_increment = 60
tile_y_increment = 50
# create sprites
prisoner = pygame.image.load(constants.PRISONER_IMAGE)
golem = pygame.image.load(constants.GOLEM_IMAGE)
dragon = pygame.image.load(constants.DRAGON_IMAGE)
tile = pygame.image.load(constants.CELL_IMAGE)
door = pygame.image.load(constants.DOOR_IMAGE)
# objects
my_prisoner = PrisonerClass(
prisoner,
prisoner_x,
prisoner_y,
constants.PRISONER_X_SPEED,
constants.PRISONER_Y_SPEED,
self._screen,
)
my_golem = GolemClass(
golem,
constants.GOLEM_ONE_X,
constants.GOLEM_ONE_Y,
constants.GOLEM_SPEED[0],
constants.GOLEM_SPEED[1],
self._screen,
)
my_golem_two = GolemClass(
golem,
constants.GOLEM_TWO_X,
constants.GOLEM_TWO_Y,
constants.GOLEM_SPEED[0],
constants.GOLEM_SPEED[1],
self._screen,
)
my_dragon = Monsters(
dragon,
constants.DRAGON_X,
constants.DRAGON_Y,
0,
0,
self._screen,
)
my_cell_map = Maps(
tile,
tile_x_increment,
tile_y_increment,
self._screen,
1,
)
my_door = Sprites(
door,
constants.FIRST_DOOR_X,
constants.FIRST_DOOR_Y,
0,
0,
self._screen,
)
return my_prisoner, my_golem, my_golem_two, my_dragon, my_cell_map, my_door
def set_up_game_scene_two(self):
prisoner_x = 400
prisoner_y = 200
tile_x_increment = 100
tile_y_increment = 57
# create sprites
prisoner = pygame.image.load(constants.PRISONER_IMAGE)
tile = pygame.image.load(constants.WIRE_IMAGE)
door = pygame.image.load(constants.CASTLE_DOOR_IMAGE)
chest = pygame.image.load(constants.CHEST_IMAGE)
key = pygame.image.load(constants.KEY_IMAGE)
# create object
my_prisoner = PrisonerClass(
prisoner,
prisoner_x,
prisoner_y,
constants.PRISONER_X_SPEED,
constants.PRISONER_Y_SPEED,
self._screen,
)
my_door = Sprites(
door, constants.SECOND_DOOR_X, constants.SECOND_DOOR_Y, 0, 0, self._screen
)
my_cell_map = Maps(tile, tile_x_increment, tile_y_increment, self._screen, 2)
my_chest = Sprites(
chest, constants.CHEST_X, constants.CHEST_Y, 0, 0, self._screen
)
my_key = Sprites(key, constants.KEY_X, constants.KEY_Y, 0, 0, self._screen)
return my_prisoner, my_door, my_cell_map, my_chest, my_key
|
[
"noreply@github.com"
] |
Sean-McLeod.noreply@github.com
|
4380463608298f8490ce3638b50b1aec0f104568
|
63d4e6f765e612a1f5fbf73c2a5af88b74041f66
|
/curso-python/poo/to_do_v5.py
|
39549c6d12e3e8cb769c02d4fee7e7702405e7a5
|
[
"Apache-2.0"
] |
permissive
|
gui-hub/Estudos-Python
|
e9af0ab2231f9d804a95daae515c3d01752a9321
|
0219da2430526c4c3705248e86e65b8c847175b8
|
refs/heads/master
| 2020-09-05T08:59:31.133017
| 2019-12-04T00:39:57
| 2019-12-04T00:39:57
| 220,045,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
from datetime import datetime, timedelta
class Projeto:
def __init__(self, nome):
self.nome = nome
self.tarefas = []
def __iter__(self):
return self.tarefas.__iter__()
def add(self, descricao, vencimento=None):
self.tarefas.append(Tarefa(descricao, vencimento))
def pendentes(self):
return [tarefa for tarefa in self.tarefas if not tarefa.feito]
def procurar(self, descricao):
# Possível IndexError
return [tarefa for tarefa in self.tarefas
if tarefa.descricao == descricao][0]
def __str__(self):
return f'{self.nome} ({len(self.pendentes())} tarefas(s) pendentes(s))'
class Tarefa:
def __init__(self, descricao, vencimento=None):
self.descricao = descricao
self.feito = False
self.criacao = datetime.now()
self.vencimento = vencimento
def concluir(self):
self.feito = True
def __str__(self):
status = []
if self.feito:
status.append(' (Concluída)')
elif self.vencimento:
if datetime.now() > self.vencimento:
status.append(' (Vencida)')
else:
dias = (self.vencimento - datetime.now()).days
status.append(f' Vence em {dias} dias')
return f'{self.descricao}' + ' '.join(status)
class TarefaRecorrente(Tarefa):
def __init__(self, descricao, vencimento, dias=7):
super().__init__(descricao, vencimento)
self.dias = dias
def concluir(self):
super().concluir()
novo_vencimento = datetime.now() + timedelta(days=self.dias)
return TarefaRecorrente(self.descricao, novo_vencimento, self.dias)
def main():
casa = Projeto('Tarefas de casa')
casa.add('Passar roupa', datetime.now())
casa.add('Lavar prato')
casa.tarefas.append(TarefaRecorrente('Trocar lençóis', datetime.now(), 7))
casa.tarefas.append(casa.procurar('Trocar lençóis').concluir())
print(casa)
casa.procurar('Lavar prato').concluir()
for tarefa in casa:
print(f'- {tarefa}')
print(casa)
mercado = Projeto('Compras no mercado')
mercado.add('Frutas secas')
mercado.add('Carne')
mercado.add('Tomate', datetime.now() + timedelta(days=3, minutes=12))
print(mercado)
mercado.procurar('Carne').concluir()
for tarefa in mercado:
print(f'- {tarefa}')
print(mercado)
if __name__ == '__main__':
main()
|
[
"guilherme.est9@gmail.com"
] |
guilherme.est9@gmail.com
|
5a3b79eed9f6b309a19ff6d523da69cae7fd420e
|
8e4b99c5b0915415e1919c25f4c310c36c2320b6
|
/jobportalproject/jobportalproject/urls.py
|
44d1ec81524ee6a91dd2d2393d3af9dcb01b8a27
|
[] |
no_license
|
rohitmanjhi/dj_jobsproject
|
1ec5b87742bd43ff8587b41a2239d75d9ba9c68e
|
8e9c7414b70a035dd2b5a5ee1b3d01cc45c16f8b
|
refs/heads/master
| 2020-04-13T11:45:39.899084
| 2018-12-27T02:34:00
| 2018-12-27T02:34:00
| 163,182,931
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
"""jobportalproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path
from testApp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index),
path('hyd', views.hydjobs),
]
|
[
"rohitmanjhibtc@gmail.com"
] |
rohitmanjhibtc@gmail.com
|
f8912b81865a7c26fda37b5815cea9c7477b5a55
|
2b04963c3d6870d9cace072229c38a119d312e40
|
/main.py
|
07e3f93c557a5f5cfe2752476cc2dbbd4f7b3149
|
[] |
no_license
|
alishbah13/BooleanRetrievalModel
|
a27f2f731c0faeadf8acf7f5564c18fc38d604e5
|
e54fc762c82fc3f23d09c6b24c43cad2e91531fa
|
refs/heads/main
| 2023-04-13T06:56:24.373657
| 2021-04-24T19:22:10
| 2021-04-24T19:22:10
| 352,271,422
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
import tkinter as tk
from tkinter import ttk
from tkinter import *
from qp import query_processor
def show():
tempList = query_processor(raw_query.get())
result_list = []
if len(tempList) >= 1:
for i in range(1,len(tempList)+1):
result_list.append([i, str(tempList[i-1]) + ".txt"])
result_list.append(["-","-"])
for i, (num, name) in enumerate(result_list, start=1):
listBox.insert("", "end", values=( num, name))
root = tk.Tk()
root.title('Boolean Retrieval')
windowWidth = root.winfo_reqwidth()
windowHeight = root.winfo_reqheight()
positionRight = int(root.winfo_screenwidth()/2 - windowWidth)
positionDown = int(root.winfo_screenheight()/2 - windowHeight)
root.geometry("+{}+{}".format(positionRight, positionDown))
raw_query = StringVar()
label = tk.Label(root, text="Enter query ").grid(row=0,column =0, columnspan=3)
entry = tk.Entry(root, text="",textvariable = raw_query).grid(row=1, column=0, columnspan=3)
button = tk.Button(root, text="Search",command=show).grid(row=2, column=0, columnspan=3)
# create Treeview with 2 columns
cols = ('S.No', 'Document Name')
listBox = ttk.Treeview(root, columns=cols, show='headings', height = 20)
# set column headings
for col in cols:
listBox.heading(col, text=col)
listBox.grid(row=3, column=0, columnspan=2)
closeButton = tk.Button(root, text="Close", width=15, command=exit).grid(row=4, columnspan=3)
root.mainloop()
|
[
"alishbah13@gmail.com"
] |
alishbah13@gmail.com
|
336b57254b205f5b5d316d407f031a32fbdcf1ec
|
b0e64b050e43519df5e172d1b1c191e7341aa4b7
|
/theme.py
|
386b7f6beb4ee6c41a394a1250ed86e03390807d
|
[
"MIT"
] |
permissive
|
whtsky/catsup-theme-CHO
|
125fdf8b8dcac8f02fdae753f20415d7b3f5182d
|
8350f42352c1331473023d9d8da6a30a41d33ac0
|
refs/heads/master
| 2021-01-17T05:32:43.662645
| 2014-01-12T14:14:54
| 2014-01-12T14:14:54
| 15,843,962
| 0
| 0
|
MIT
| 2019-03-05T16:00:43
| 2014-01-12T14:40:45
|
CSS
|
UTF-8
|
Python
| false
| false
| 122
|
py
|
name = 'CHO'
author = 'Oran Zhang'
homepage = 'https://github.com/oranzhang/catsup-theme-CHO'
post_per_page = 20
vars = {}
|
[
"423794590@qq.com"
] |
423794590@qq.com
|
c7ab178a852f777960a56505deeb38def08faf90
|
e7ccd71a2da26d170f23194397f125457d7199ec
|
/mtensorflow/base.py
|
993f49650133acdf95d3e14604f0037c3fa8bde7
|
[] |
no_license
|
intererting/PythonAllInOne
|
0dd0287db45932f865061254f65cc899a87b7c17
|
a9d02b29ff518856faef3bfd3e3b68c680bd1bf4
|
refs/heads/master
| 2020-04-14T01:43:48.454086
| 2019-01-02T08:51:04
| 2019-01-02T08:51:04
| 163,494,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
import tensorflow as tf
import numpy as np
def testA():
a = tf.constant(3.0, dtype=tf.float32)
b = tf.constant(4.0, dtype=tf.float32)
total = a + b
sess = tf.Session()
print(sess.run(total))
print(sess.run((a, b, total)))
print(sess.run({'a': a, "b": b, 'total': total}))
def testB():
vec = tf.random_uniform(shape=(1, 3))
sess = tf.Session()
print(sess.run(vec))
def testC():
x = tf.placeholder(tf.float32)
def testD():
my_data = [
[1, 2],
[3, 4]
]
slices = tf.data.Dataset.from_tensor_slices(my_data)
nextItem = slices.make_one_shot_iterator().get_next()
sess = tf.Session()
while True:
try:
print(sess.run(nextItem))
except tf.errors.OutOfRangeError:
break
def testE():
x = tf.placeholder(dtype=tf.float32, shape=[None, 3])
linear_model = tf.layers.Dense(units=1)
y = linear_model(x)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
print(sess.run(y, {x: [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}))
if __name__ == '__main__':
testE()
|
[
"645644717@qq.com"
] |
645644717@qq.com
|
2902ccce1a81c074d5666f55c969ceb7dc41c4d1
|
0db05f7b843e8450bafd5ae23f8f70f9a9a8c151
|
/Src/StdLib/Lib/site-packages/win32com/test/testExchange.py
|
22fafccfb8ef3e4b7652839c64309313d9fa7c8f
|
[
"BSD-3-Clause",
"Python-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
IronLanguages/ironpython2
|
9c7f85bd8e6bca300e16f8c92f6384cecb979a6a
|
d00111890ce41b9791cb5bc55aedd071240252c4
|
refs/heads/master
| 2023-01-21T21:17:59.439654
| 2023-01-13T01:52:15
| 2023-01-13T01:52:15
| 91,620,472
| 1,171
| 288
|
Apache-2.0
| 2023-01-13T01:52:16
| 2017-05-17T21:11:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,269
|
py
|
# TestExchange = Exchange Server Dump
# Note that this code uses "CDO", which is unlikely to get the best choice.
# You should use the Outlook object model, or
# the win32com.mapi examples for a low-level interface.
from win32com.client import gencache, constants
import pythoncom
import os
ammodule = None # was the generated module!
def GetDefaultProfileName():
import win32api, win32con
try:
key = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows NT\\CurrentVersion\\Windows Messaging Subsystem\\Profiles")
try:
return win32api.RegQueryValueEx(key, "DefaultProfile")[0]
finally:
key.Close()
except win32api.error:
return None
#
# Recursive dump of folders.
#
def DumpFolder(folder, indent = 0):
print " " * indent, folder.Name
folders = folder.Folders
folder = folders.GetFirst()
while folder:
DumpFolder(folder, indent+1)
folder = folders.GetNext()
def DumpFolders(session):
try:
infostores = session.InfoStores
except AttributeError:
# later outlook?
store = session.DefaultStore
folder = store.GetRootFolder()
DumpFolder(folder)
return
print infostores
print "There are %d infostores" % infostores.Count
for i in range(infostores.Count):
infostore = infostores[i+1]
print "Infostore = ", infostore.Name
try:
folder = infostore.RootFolder
except pythoncom.com_error, details:
hr, msg, exc, arg = details
# -2147221219 == MAPI_E_FAILONEPROVIDER - a single provider temporarily not available.
if exc and exc[-1]==-2147221219:
print "This info store is currently not available"
continue
DumpFolder(folder)
# Build a dictionary of property tags, so I can reverse look-up
#
PropTagsById={}
if ammodule:
for name, val in ammodule.constants.__dict__.iteritems():
PropTagsById[val] = name
def TestAddress(session):
# entry = session.GetAddressEntry("Skip")
# print entry
pass
def TestUser(session):
ae = session.CurrentUser
fields = getattr(ae, "Fields", [])
print "User has %d fields" % len(fields)
for f in range(len(fields)):
field = fields[f+1]
try:
id = PropTagsById[field.ID]
except KeyError:
id = field.ID
print "%s/%s=%s" % (field.Name, id, field.Value)
def test():
import win32com.client
oldcwd = os.getcwd()
try:
session = gencache.EnsureDispatch("MAPI.Session")
try:
session.Logon(GetDefaultProfileName())
except pythoncom.com_error, details:
print "Could not log on to MAPI:", details
return
except pythoncom.error:
# no mapi.session - let's try outlook
app = gencache.EnsureDispatch("Outlook.Application")
session = app.Session
try:
TestUser(session)
TestAddress(session)
DumpFolders(session)
finally:
session.Logoff()
# It appears Exchange will change the cwd on us :(
os.chdir(oldcwd)
if __name__=='__main__':
from util import CheckClean
test()
CheckClean()
|
[
"pawel.jasinski@gmail.com"
] |
pawel.jasinski@gmail.com
|
6d673e1488323ef093aa6afb6a9b2ec12e6202cc
|
8c98a6f0108bf2aaf01f898be234ed0ecdb55759
|
/50.网络编程/粘包现象/3.sshclient.py
|
b13450d78dc8a99076ceb07a87850806f592df0e
|
[] |
no_license
|
mafei0728/python
|
5d3d6268602a24166d3a2301f2bd256713259587
|
51cd8bd843c5fcedc9fc457f927b4e48db406d7b
|
refs/heads/master
| 2020-03-28T14:17:10.123953
| 2018-10-15T11:27:48
| 2018-10-15T11:27:48
| 100,848,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import socket,struct
s1 = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s1.connect(('127.0.0.1',10010))
while True:
mes = input('ssh-->')
if not mes:
continue
if mes == 'quit':
break
s1.send(mes.encode('utf8'))
#先收4个字节的报头
data_size = s1.recv(4)
data_size = struct.unpack('i',data_size)[0]
recv_size = 0
recv_data = b''
#确定收到的长度的数据才停止收包
while recv_size < data_size:
data = s1.recv(1024)
recv_size += len(data)
recv_data += data
print(recv_data.decode('gbk'))
s1.close()
|
[
"275060461@qq.com"
] |
275060461@qq.com
|
4fce7f850dacd24f34f079e59b0179cf87857913
|
dd1424b76ed7b9cced681ab13c87026b35124bc4
|
/feature_engine/timeseries/forecasting/base_forecast_transformers.py
|
d332036d19f3826283159d6d85860eb120108364
|
[
"BSD-3-Clause"
] |
permissive
|
solegalli/feature_engine
|
2c685b34db53681d14ae9dc3115534924596dba6
|
3cd3bbb6e6bcb0e7ce7321684ced2a3f3a33d48d
|
refs/heads/main
| 2023-07-06T21:15:56.288083
| 2022-12-07T10:32:49
| 2022-12-07T10:32:49
| 394,274,826
| 42
| 13
|
BSD-3-Clause
| 2021-09-10T22:01:57
| 2021-08-09T12:02:48
| null |
UTF-8
|
Python
| false
| false
| 6,770
|
py
|
from typing import List, Optional, Union
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from feature_engine._base_transformers.mixins import GetFeatureNamesOutMixin
from feature_engine._docstrings.fit_attributes import (
_feature_names_in_docstring,
_n_features_in_docstring,
)
from feature_engine._docstrings.init_parameters import (
_drop_original_docstring,
_missing_values_docstring,
)
from feature_engine._docstrings.methods import _fit_not_learn_docstring
from feature_engine._docstrings.substitute import Substitution
from feature_engine._variable_handling.init_parameter_checks import (
_check_init_parameter_variables,
)
from feature_engine._variable_handling.variable_type_selection import (
_find_or_check_numerical_variables,
)
from feature_engine.dataframe_checks import (
_check_contains_inf,
_check_contains_na,
_check_X_matches_training_df,
check_X,
)
from feature_engine.tags import _return_tags
@Substitution(
missing_values=_missing_values_docstring,
drop_original=_drop_original_docstring,
feature_names_in_=_feature_names_in_docstring,
fit=_fit_not_learn_docstring,
n_features_in_=_n_features_in_docstring,
)
class BaseForecastTransformer(BaseEstimator, TransformerMixin, GetFeatureNamesOutMixin):
"""
Shared methods across time-series forecasting transformers.
Parameters
----------
variables: str, int, or list of strings or integers, default=None.
The variables to use to create the new features.
{missing_values}
{drop_original}
Attributes
----------
{feature_names_in_}
{n_features_in_}
"""
def __init__(
self,
variables: Union[None, int, str, List[Union[str, int]]] = None,
missing_values: str = "raise",
drop_original: bool = False,
) -> None:
if missing_values not in ["raise", "ignore"]:
raise ValueError(
"missing_values takes only values 'raise' or 'ignore'. "
f"Got {missing_values} instead."
)
if not isinstance(drop_original, bool):
raise ValueError(
"drop_original takes only boolean values True and False. "
f"Got {drop_original} instead."
)
self.variables = _check_init_parameter_variables(variables)
self.missing_values = missing_values
self.drop_original = drop_original
def _check_index(self, X: pd.DataFrame):
"""
Check that the index does not have missing data and its values are unique.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The dataset.
"""
if X.index.isnull().any():
raise NotImplementedError(
"The dataframe's index contains NaN values or missing data. "
"Only dataframes with complete indexes are compatible with "
"this transformer."
)
if X.index.is_unique is False:
raise NotImplementedError(
"The dataframe's index does not contain unique values. "
"Only dataframes with unique values in the index are "
"compatible with this transformer."
)
return self
def _check_na_and_inf(self, X: pd.DataFrame):
"""
Checks that the dataframe does not contain NaN or Infinite values.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The dataset for training or transformation.
"""
_check_contains_na(X, self.variables_)
_check_contains_inf(X, self.variables_)
return self
def _get_feature_names_in(self, X: pd.DataFrame):
"""
Finds the number and name of the features in the training set.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The dataset for training or transformation.
"""
self.feature_names_in_ = X.columns.tolist()
self.n_features_in_ = X.shape[1]
return self
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
This transformer does not learn parameters.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training dataset.
y: pandas Series, default=None
y is not needed in this transformer. You can pass None or y.
"""
# check input dataframe
X = check_X(X)
# We need the dataframes to have unique values in the index and no missing data.
# Otherwise, when we merge the new features we will duplicate rows.
self._check_index(X)
# find or check for numerical variables
self.variables_ = _find_or_check_numerical_variables(X, self.variables)
# check if dataset contains na
if self.missing_values == "raise":
self._check_na_and_inf(X)
self._get_feature_names_in(X)
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Common checks performed before the feature transformation.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The data to transform.
Returns
-------
X: pandas dataframe of shape = [n_samples, n_features]
The data to transform.
"""
# check method fit has been called
check_is_fitted(self)
# check if 'X' is a dataframe
X = check_X(X)
# check if input data contains the same number of columns as the fitted
# dataframe.
_check_X_matches_training_df(X, self.n_features_in_)
# Dataframes must have unique values in the index and no missing data.
# Otherwise, when we merge the created features we will duplicate rows.
self._check_index(X)
# check if dataset contains na
if self.missing_values == "raise":
self._check_na_and_inf(X)
# reorder variables to match train set
X = X[self.feature_names_in_]
if self.sort_index is True:
X.sort_index(inplace=True)
return X
def _more_tags(self):
tags_dict = _return_tags()
tags_dict["allow_nan"] = True
tags_dict["variables"] = "numerical"
# add additional test that fails
tags_dict["_xfail_checks"][
"check_methods_subset_invariance"
] = "LagFeatures is not invariant when applied to a subset. Not sure why yet"
return tags_dict
|
[
"noreply@github.com"
] |
solegalli.noreply@github.com
|
2ad83f05cdc784f41d41b2d0117f8ce45f46a603
|
625fec15a0bea4ed3d4a02eba56608e63bb18053
|
/app/models.py
|
f8ac18277b54702e5cc68297d0fb48689d4b8e09
|
[] |
no_license
|
deepaksinghcv/microblog
|
70df64dfe3d42acd7143d9e90b812636e6f61fa9
|
84c14907707e4f96d4a033cfdef4f6a42ea17de5
|
refs/heads/master
| 2022-04-06T01:45:59.602794
| 2020-03-06T13:34:40
| 2020-03-06T13:34:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120),index = True, unique= True)
password_hash = db.Column(db.String(128))
def __repr__(self):
return f"<User {self.username}"
|
[
"deepak.singh@research.iiit.ac.in"
] |
deepak.singh@research.iiit.ac.in
|
908bcd0ea9be997e48755466c5d0e2bed4bde5a8
|
1361a8d0f43b8467c9208bfff69429dd0f679450
|
/python_koans/python3/runner/sensei.py
|
df99154606117dd8de4430079d9ff02c38f62fdd
|
[
"MIT"
] |
permissive
|
jonahoffline/python-practice
|
acc6ab84ac7dd5560fff3e27a452863fbc754589
|
3740c756be766bae5a15331655e7b72b69f79cf3
|
refs/heads/master
| 2016-09-05T13:58:29.007122
| 2013-08-25T05:27:25
| 2013-08-25T05:27:25
| 12,315,982
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,551
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
from . import helper
from .mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) != 'AboutAsserts':
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) != self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py{0}{1}" \
.format(Fore.RESET, Style.NORMAL))
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
scrape = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
scrape += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
scrape += sep + line
lines = scrape.splitlines()
scrape = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
scrape += line + '\n'
return scrape.replace(sep, '\n').strip('\n')
def report_progress(self):
koans_complete = self.pass_count
lessons_complete = self.lesson_pass_count
koans_remaining = self.total_koans() - koans_complete
lessons_remaining = self.total_lessons() - lessons_complete
sent1 = "You have completed {0} koans and " \
"{1} lessons.\n".format(koans_complete, lessons_complete)
sent2 = "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(koans_remaining, lessons_remaining)
return sent1+sent2
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = list(filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons))
return self.all_lessons
|
[
"jonah@pixelhipsters.com"
] |
jonah@pixelhipsters.com
|
4efcc55ae6a8de5b8580d5b5b5d25bc9662516c7
|
f0a0a5feb73597e8c54db72deec35d9cd3c977a3
|
/manage.py
|
7c0f64d4a90798b573d0f02c506033595f33e383
|
[] |
no_license
|
rjslingshot/ISv1
|
7016423d4e681d75e01ea25f78caa3569dafe700
|
54e0d91f773ea94ad980785f365d895a09e3a489
|
refs/heads/master
| 2022-12-13T13:17:41.661113
| 2020-09-13T20:09:41
| 2020-09-13T20:09:41
| 295,229,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ISv1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"61992435+rjslingshot@users.noreply.github.com"
] |
61992435+rjslingshot@users.noreply.github.com
|
f3c067729e4d7cdef9425a8ea6a7a61fcd0c13c3
|
a28841b530e90c0a55ed858abfb92b68da45857a
|
/amino/tc/traverse.py
|
4f561ff409fab0b44337386d65af0b18124f4954
|
[
"MIT"
] |
permissive
|
GitHub-Notables/amino
|
172d3657bcc6fdc888f30614b41a57ce4c7273ed
|
3ab30ee65ba4e726b4f9db5384ddbd24551c8c33
|
refs/heads/master
| 2020-03-16T16:46:49.195323
| 2018-05-07T18:03:47
| 2018-05-07T18:03:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
import abc
from typing import TypeVar, Generic, Callable, List
from amino.tc.base import TypeClass
from amino.func import I
from amino import _
from amino.tc.apply_n import ApplyN
A = TypeVar('A')
B = TypeVar('B')
class TraverseF(Generic[A]):
pass
class TraverseG(Generic[A]):
pass
F = TraverseF
G = TraverseG
F0 = TypeVar('F0', bound=TraverseF)
G0 = TypeVar('G0', bound=TraverseG)
class Traverse(Generic[F0], TypeClass[F0], ApplyN):
# FIXME lens functions return index lenses, which is not a property of Traverse
def apply_n_funcs(self) -> List[str]:
return ['traverse']
@abc.abstractmethod
def traverse(self, fa: F[G[A]], f: Callable[[A], B], tpe: type) -> G[F[B]]:
...
def flat_traverse(self, fa: F[G[A]], f: Callable[[A], F[B]], tpe: type) -> G[F[B]]:
return self.traverse(fa, f, tpe).map(_.join) # type: ignore
def sequence(self, fa: F[G[A]], tpe: type) -> G[F[A]]:
return self.traverse(fa, I, tpe)
def flat_sequence(self, fa: F[G[A]], tpe: type) -> G[F[B]]:
return self.sequence(fa, tpe).map(_.join) # type: ignore
__all__ = ('Traverse',)
|
[
"torstenschmits@gmail.com"
] |
torstenschmits@gmail.com
|
c53276a9b8a51151a001d7ca22af7cb0eee2f421
|
585dd2091e5414d15eb807163f41473e560759e6
|
/tf_retrainer/utils/download_flowers.py
|
eb49afca1647cd0ebfbc78ab15f34abcdeb53f13
|
[
"BSD-3-Clause"
] |
permissive
|
joshsungasong/tf-retrainer
|
c2a869ce59294f242dafdb6623822344e1869173
|
c8f202cfc323e66e2bb252ccee10a3ea76994cdd
|
refs/heads/master
| 2021-04-27T01:40:29.617385
| 2018-02-23T04:11:50
| 2018-02-23T04:11:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import os
import sys
import tensorflow as tf
sys.path.append('/repos/tensorflow/models/research/slim')
from datasets import dataset_utils
url = 'http://download.tensorflow.org/data/flowers.tar.gz'
flowers_data_dir = '/tmp/flowers'
def main():
if not tf.gfile.Exists(flowers_data_dir):
tf.gfile.MakeDirs(flowers_data_dir)
dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)
if __name__ == "__main__":
main()
|
[
"messiercr@gmail.com"
] |
messiercr@gmail.com
|
a805fa28b435a4533264a55906b316de63d7dbf1
|
344c410e8f245ab032610bdf8fbf2ece561621b4
|
/blog/shell.py
|
2be75da0722320d5b1e17353274b1351eee78c73
|
[] |
no_license
|
yulits/Django-blog
|
a602df0aff8890f75ffe7bc098f4b7f9ec7309e3
|
66473a715839c8847466aca479ca6fbf91144e88
|
refs/heads/master
| 2021-01-11T23:22:37.119360
| 2017-01-10T21:02:15
| 2017-01-10T21:02:15
| 78,573,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
import datetime
from django.utils import timezone
from blog.models import Post
from django.contrib.auth.models import User
time = timezone.now() + datetime.timedelta(days=30)
user = User.objects.get(username = 'yuliya')
Post.objects.create(title='title %s' % time, slug='slug_%s' % time, author=user, body='body', publish=time, created=time, updated=time, status='published')
|
[
"yulits@gmail.com"
] |
yulits@gmail.com
|
21697788a792b44fc25537856e2c150aa90fdbfe
|
9f30c5ddc20d7d198f15c12ce4a6aae968764d41
|
/dataset.py
|
b9fd3a55f1d4d65fbc4d04d0bc18d31494cc5796
|
[] |
no_license
|
allenwind/word2vec-in-tensorflow2.0
|
f947f54be4cb3106b4d61b4e46b9946f957abc0b
|
1ac36c444ceec77c5c2a4845762ac67e69871ea5
|
refs/heads/master
| 2023-02-18T20:11:54.156169
| 2021-01-20T14:15:46
| 2021-01-20T14:15:46
| 316,661,222
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,177
|
py
|
import glob
import os
import itertools
import random
import collections
import numpy as np
import jieba
import tensorflow as tf
from tokenizer import Tokenizer
window = 5 # 窗口大小
minlen = 30 # 句子最小长度
mintf = 64 # 最小词频
processes = 7 # 并行分词进程数
def preprocess(content):
# 文章的预处理,这里暂不处理
return content
_THUContent = "/home/zhiwen/workspace/dataset/THUCTC/THUCNews/**/*.txt"
def load_sentences(file=_THUContent, shuffle=True, limit=None):
files = glob.glob(file)
if shuffle:
random.shuffle(files)
for file in files[:limit]:
with open(file, encoding="utf-8") as fd:
content = fd.read()
yield preprocess(content)
file = "word_meta.json"
tokenizer = Tokenizer(mintf, processes)
if os.path.exists(file):
tokenizer.load(file)
else:
X = load_sentences(limit=None)
print("tokenize...")
tokenizer.fit_in_parallel(X)
tokenizer.save(file)
words = tokenizer.words
word2id = tokenizer.word2id
id2word = {j:i for i,j in word2id.items()}
vocab_size = len(tokenizer)
def create_subsamples(words, subsample_eps=1e-5):
# 计算降采样表,用于context
# 参考tf.keras.preprocessing.sequence.make_sampling_table
total = len(words)
subsamples = {}
for i, j in words.items():
j = j / total
if j <= subsample_eps:
continue
j = subsample_eps / j + (subsample_eps / j) ** 0.5
if j >= 1.0:
continue
subsamples[word2id[i]] = j
return subsamples
# 采样表
subsamples = create_subsamples(words)
def DataGenerator(epochs=10):
# cbow data
for epoch in range(epochs):
sentences = load_sentences()
for sentence in sentences:
# 关闭新词发现功能
sentence = jieba.lcut(sentence, HMM=False)
sentence = [0] * window + [word2id[w] for w in sentence if w in word2id] + [0] * window
probs = np.random.random(len(sentence))
for i in range(window, len(sentence) - window):
# 过滤context
c = sentence[i]
if c in subsamples and probs[i] > subsamples[c]:
continue
# 滑动窗口中的词序列
x = np.array(sentence[i-window:i] + sentence[i+1:i+window+1])
c = np.array([c])
# 为方便直接把target放在位置0
z = np.array([0])
yield (x, c), z
def DataGeneratorSG(epochs=10):
# skip-gram data
for epoch in range(epochs):
sentences = load_sentences(limit=1000)
for sentence in sentences:
# 关闭新词发现功能
sentence = jieba.lcut(sentence, HMM=False)
sentence = [0] * window + [word2id[w] for w in sentence if w in word2id] + [0] * window
probs = np.random.random(len(sentence))
for i in range(window, len(sentence) - window):
# 过滤context
c = sentence[i]
if c in subsamples and probs[i] > subsamples[c]:
continue
# 滑动窗口中的词序列
x = np.array(sentence[i-window:i] + sentence[i+1:i+window+1])
c = np.array([c])
yield (c, x), ()
def create_dataset(window, minlen, batch_size=32, epochs=10):
pass
dl = tf.data.Dataset.from_generator(
DataGenerator,
output_types=((tf.int32, tf.int32), tf.int32)
).shuffle(
buffer_size=1024
).padded_batch(
batch_size=320,
padded_shapes=(([None], [None]), [None]),
drop_remainder=True
).prefetch(tf.data.experimental.AUTOTUNE)
dl_sg = tf.data.Dataset.from_generator(
DataGeneratorSG,
output_types=((tf.int32, tf.int32), ())
).shuffle(
buffer_size=1024
).padded_batch(
batch_size=320,
padded_shapes=(([None], [None]), ()),
drop_remainder=True
).prefetch(tf.data.experimental.AUTOTUNE)
if __name__ == "__main__":
# 测试
for (a, b), c in iter(dl):
print(a.shape, b.shape, c.shape)
break
for (a, b), _ in iter(dl_sg):
print(a.shape, b.shape)
break
|
[
"allenwind@foxmail.com"
] |
allenwind@foxmail.com
|
a3ef57459a80b0ea6d084ea01131d839dc18bff6
|
9984fa5e343a7810ae8da2ee3933d1acce9a9657
|
/random_sampling_of_accounts/collect_basic_user_profile_info.py
|
ed05a7c1976f05e8b48c5edff47c660944cc78f0
|
[] |
no_license
|
kennyjoseph/twitter_matching
|
4757e7cff241d90167888ce24625be36015b5a93
|
9fe3a2b970e0ff2f559261f89d29b3202db25920
|
refs/heads/master
| 2022-01-05T15:45:19.643082
| 2019-07-11T20:28:54
| 2019-07-11T20:28:54
| 84,259,451
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
"""
An example of how to use a UserSimpleDataWorker to collect basic information about users,
and then to see if users have been suspended/deleted from that.
Basically, we don't get information back from the API if these users have been suspended/deleted, so
we can learn from that information
"""
__author__ = 'kjoseph'
import glob
import sys
from twitter_dm import TwitterApplicationHandler
from twitter_dm.multiprocess import multiprocess_setup
from twitter_dm.multiprocess.WorkerSimpleUserLookup import SimpleUserLookupWorker
from twitter_dm.utility import general_utils
if len(sys.argv) != 4:
print 'usage: [known_user_dir] [user_ids] [out_dir]'
sys.exit(-1)
handles =[]
for fil in glob.glob(sys.argv[1]+"/*.txt"):
print 'FIL: ' , fil
app_handler = TwitterApplicationHandler(pathToConfigFile=fil)
handles += app_handler.api_hooks
print 'n authed users: ', len(handles)
user_ids = set([line.strip().lower() for line in open(sys.argv[2]).readlines()])
out_dir = sys.argv[3]
general_utils.mkdir_no_err(out_dir)
print "N TO FIND: ", len(user_ids)
user_ids = [u for u in user_ids]
user_data_chunked = []
i=0
while i < len(user_ids):
user_data_chunked.append(user_ids[i:(i+100)])
i += 100
user_data_chunked.append(user_ids[i-100:len(user_ids)])
print 'len chunked: ', len(user_data_chunked)
multiprocess_setup.init_good_sync_manager()
##put data on the queue
request_queue = multiprocess_setup.load_request_queue([x for x in user_data_chunked], len(handles), add_nones=True)
processes = []
for i in range(len(handles)):
p = SimpleUserLookupWorker(request_queue,handles[i],i, out_dir)
p.start()
processes.append(p)
try:
for p in processes:
p.join()
except KeyboardInterrupt:
print 'keyboard interrupt'
|
[
"kennyjoseph@Kennys-MacBook-Pro.local"
] |
kennyjoseph@Kennys-MacBook-Pro.local
|
0d2f70e1924505a4a1c0b85b513286db86ca90a5
|
943080ab1f5535228bc70f9d1b4f1749712662ea
|
/zhihu/zhihu/items.py
|
1fc16a45b32c98dfa4d782205ad3a82e1abcf782
|
[] |
no_license
|
mezc/spider-crawlZhihuUseScrapy
|
48438fd08fb2bc3ed744a6fbb759d708b51fdfcb
|
6164096e858ff17cebb47291731c9b8b3cbd7843
|
refs/heads/master
| 2020-03-28T12:09:03.346589
| 2018-09-17T06:17:15
| 2018-09-17T06:17:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
# import scrapy
from scrapy import Field,Item
class ZhihuItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
# pass
allow_message = Field()
answer_count = Field()
articles_count = Field()
avatar_url = Field()
avatar_url_template = Field()
badge = Field()
employments = Field()
follower_count = Field()
gender = Field()
headline = Field()
id = Field()
is_advertiser = Field()
is_blocking = Field()
is_followed = Field()
is_following = Field()
is_org = Field()
name = Field()
type = Field()
url = Field()
url_token = Field()
user_type = Field()
|
[
"noreply@github.com"
] |
mezc.noreply@github.com
|
509f86e2cfc2944eb6f6fa78124a30ee82cef12c
|
a701d44222e81e5636ba4ccabc373fc3da5f337a
|
/scripts/venv/bin/pip
|
46f899e86b10818df5cf8e4cb742a7875cbe5a4a
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
alexanderjacks/dex7
|
b738e9961782660af1ad2b1546133379678de249
|
f322dde3f32849292d9ca23ae306597ffe545efc
|
refs/heads/master
| 2023-01-07T11:57:29.149359
| 2020-02-01T08:05:39
| 2020-02-01T08:05:39
| 205,595,877
| 0
| 0
| null | 2023-01-04T08:38:06
| 2019-08-31T21:01:22
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
#!/Users/alexanderjacks/stardewdex/scripts/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"alexanderthejacks@gmail.com"
] |
alexanderthejacks@gmail.com
|
|
b33dbf58444f4d6bfb70b411d9d46f3efc815648
|
3365e4d4fc67bbefe4e8c755af289c535437c6f4
|
/.history/src/core/dialogs/swimmer_dialog_20170810150405.py
|
6407ba898248bb4dc73215c2627a021a2a97948f
|
[] |
no_license
|
kiranhegde/OncoPlotter
|
f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1
|
b79ac6aa9c6c2ca8173bc8992ba3230aa3880636
|
refs/heads/master
| 2021-05-21T16:23:45.087035
| 2017-09-07T01:13:16
| 2017-09-07T01:13:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,179
|
py
|
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem)
from PyQt5 import QtCore, QtGui
import numpy as np
import core.gui.swimmer as swimmmer
class Swimmer(QWidget, swimmmer.Ui_Swimmer):
def __init__(self, parent):
super(Swimmer,self).__init__(parent)
self.setupUi(self)
def on_swimmer_data_signal(self,signal):
self.swimmer_data = signal['swimmer_data'] #pandas dataframe
def closeEvent(self,event):
#Override closeEvent so that we hide the window rather than exit so we don't lose data
event.ignore()
self.hide()
class SwimmerPlotter(QWidget):
def __init__(self,parent=None):
super(SwimmerPlotter,self).__init__(parent)
markersize = 5 #needs to be user variable so that as more/less bars added, it looks ok
bar_width = 0.75
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.btn_plot)
self.setLayout(layout)
def on_swimmer_data_signal(self,signal):
self.swimmer_data = signal['swimmer_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
def on_general_settings_signal(self,signal):
try:
hasattr(self,'ax')
self.ax.set_title(signal[0])
self.ax.set_xlabel(signal[1])
self.ax.set_ylabel(signal[2])
self.canvas.draw()
except Exception as e:
print(e)
def default_plot(self):
'''
Plot swimmer data
'''
self.figure.clear()
bar_locations = np.arange(len(self.swimmer_data.ix[:,0]))
for i in range(1,6):
|
[
"ngoyal95@terpmail.umd.edu"
] |
ngoyal95@terpmail.umd.edu
|
4917ef790ad266ca127572ab20027fad94a4b702
|
7fb770d2e905333bcf7a7fd479adb194200a1b34
|
/vision/bin/iptest3
|
9e218e0cb97dea7ef54abbf918e449f280d628ec
|
[
"MIT"
] |
permissive
|
JiangWenPL/CS231n
|
59424681220fdb83d7c2153dae05f1393e1df7cb
|
c88c61ec78441389df1151080258d99473a1d3b3
|
refs/heads/master
| 2020-03-08T20:41:24.976987
| 2018-05-11T13:41:05
| 2018-05-11T13:41:05
| 128,388,531
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
#!/Users/jiangwen/Desktop/CS231n/vision/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from IPython.testing.iptestcontroller import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"JiangWenPL@users.noreply.github.com"
] |
JiangWenPL@users.noreply.github.com
|
|
7f01ae969cfec51582d547c6680543e7f9442fcd
|
b015f287c3d690df7242d86a77b347b5e724bfd9
|
/xnmt/rl/policy_priors.py
|
638df0f28fe96b74007b010c3edc3e0099ae898d
|
[
"Apache-2.0"
] |
permissive
|
philip30/xnmt
|
0db384d36c18826968bdef41badeef7702c61b15
|
b5e6985d3bedfac102312cab030a60594bc17baf
|
refs/heads/master
| 2020-05-16T15:12:19.995227
| 2019-05-09T08:23:05
| 2019-05-09T08:23:05
| 183,123,358
| 0
| 1
|
NOASSERTION
| 2019-05-19T16:32:40
| 2019-04-24T01:31:28
|
Python
|
UTF-8
|
Python
| false
| false
| 34
|
py
|
class PolicyPrior(object):
pass
|
[
"philip.arthur30@gmail.com"
] |
philip.arthur30@gmail.com
|
3d1b354c2a01e91e52bb9c95b7c4dcb1352f0e23
|
b7e93f7c5b2fc830cb3fc49825e25638e338e35b
|
/project/settings/development.py
|
91224dcf6afe12fbf381206b0ff286a96f2b657d
|
[
"MIT"
] |
permissive
|
thomasleese/django-template
|
7e5fc3c63e121a6587bb806887123971ec624f4b
|
9f0a5835ce65db8c19e0d04662d7ed7a1429950c
|
refs/heads/main
| 2023-02-17T00:06:21.611385
| 2021-01-21T07:46:30
| 2021-01-21T07:46:30
| 323,590,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
from .common import * # noqa: F403
SECRET_KEY = "CHANGE-ME!"
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": str(BASE_DIR / "db.sqlite3"), # noqa: F405
}
}
|
[
"thomas@leese.io"
] |
thomas@leese.io
|
43c93d0986a46aea9421cd55b036abe186385fab
|
344507fea1e483b2743be7118430dd92014f4c2f
|
/Hunter/spiders/news/japan_gongtong/gongtong_news_economy_science.py
|
7bb3fca4599a41e2762e62ebaad584df6a104674
|
[] |
no_license
|
sunheqing/Hunter
|
1f755b7d2171cb5defcece9af909422dc159fab2
|
d1b34aac6581f45d4f3d55703c80561cd6c4a526
|
refs/heads/master
| 2020-03-22T02:34:58.646830
| 2018-07-27T10:09:07
| 2018-07-27T10:09:07
| 139,379,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# shq
from scrapy import Spider, Request
from Hunter.items import items
import os
import re
from urlparse import urljoin
class GongTongEconomyScience(Spider):
name = "gong_tong_economy_science_spider"
start_urls = [
'https://china.kyodonews.net/news/economy_science'
]
def parse(self, response):
try:
every_li = response.xpath('//ul[@id="js-postListItems"]/li')
for li in every_li:
every_url = urljoin(response.url, li.xpath('.//a/@href').extract_first())
title = li.xpath('.//a/h3/text()').extract_first()
time = li.xpath('.//p[@class="time"]/text()').extract_first()
yield response.follow(every_url, callback=self.parse_every, meta={'title': title, 'time': time})
except Exception:
import traceback
traceback.print_exc()
def parse_every(self, response):
try:
texts = ''.join(response.xpath('//div[@class="article-body"]/p/text()').extract()) #这个网站结构性非常好
get_url = response.xpath('//div[@class="mainpic"]/img/@src').extract_first()
item = items.NewsItem()
item['title'] = response.meta.get('title')
item['time'] = response.meta.get('time').replace(' ','').replace('\n','').replace('\r','').replace('\t','').replace(u'\u3000','').replace(u'\xa0','').replace('|','')
item['source'] = response.url
item['origin'] = u"日本共同社"
item['news_type'] = u"经济 科学"
item['lable'] = u"经济 科学"
item['content'] = texts
if get_url:
photo_url = urljoin(response.url, get_url)
item['image_src'] = photo_url
else:
item['image_src'] = ''
print item
yield item
except Exception:
import traceback
traceback.print_exc()
|
[
"sunheqing@basaltmatrix.com"
] |
sunheqing@basaltmatrix.com
|
d99d6ed3ca318be0c23513944d85a8ab42e4ec0b
|
7cbff51ecdcca2c234318c5f8a4679c350ec9f1a
|
/average_perceptron.py
|
1d3ec3e88bd05ae49ac0d3030a9ca00381d0a9f5
|
[] |
no_license
|
ericlamnguyen/MNIST
|
27c94f00aa864e8f499c1bb98a176a5beb0a7f8a
|
544184335d4be36db0b89108dc8cc5ec5c0f7cd9
|
refs/heads/master
| 2022-01-27T01:35:47.124873
| 2022-01-24T04:42:04
| 2022-01-24T04:42:04
| 152,354,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,628
|
py
|
import sys
import struct as st
import numpy as np
import gzip
def parseMNIST(dataset):
"""
this function parses the binary data from MNIST files and return a numpy 2D
array of the instances
Params:
dataset (str): "training" or "testing"
Returns:
data (numpy 2D array): A numpy 2D array with 786 columns, the first 784 columns
are flattened 28x28 image matrix, the 785th column is
the additional dimension to account for the bias term,
the 786th column is the corresponding label of the instance
"""
if dataset is "training":
img_file = 'train-images-idx3-ubyte.gz'
label_file = 'train-labels-idx1-ubyte.gz'
elif dataset is "testing":
img_file = 't10k-images-idx3-ubyte.gz'
label_file = 't10k-labels-idx1-ubyte.gz'
with gzip.open(label_file, 'rb') as flbl:
magic, num = st.unpack(">II", flbl.read(8))
lbl = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(img_file, 'rb') as fimg:
zero, data_type, dims = st.unpack('>HBB', fimg.read(4))
shape = tuple(st.unpack('>I', fimg.read(4))[0] for d in range(dims))
img = np.fromstring(fimg.read(), dtype=np.uint8).reshape(shape)
# an empty list
data = []
# flatten each 28x28 2D array into a 1D array, normalize the value and append to data
for i in range(len(lbl)):
row = img[i].reshape(784)
row = np.around((row * 1.0 / 255))
data.append(row)
# convert data from list to 2D array
data = np.array(data)
# adding 1 more entry to each instance to account for bias term
# bias_term is a 1D array with 1 column of 1s
bias_term = np.ones((len(lbl),1))
# append additional column of 1s to data 2D array
data = np.hstack((data, bias_term))
# append additional column of corresponding labels to data 2D array
lbl = lbl.reshape(-1, 1)
data = np.hstack((data, lbl))
return data
def F1_score(actual_labels, predictions):
"""
this function calculate the macro F1 score
Params:
actual_labels (array)
predictions (array)
Returns:
F1_score
"""
len_ = len(actual_labels)
F1s = []
label_isPresent = [True for i in range(0,10)]
# calculate F1 score and accuracy for each label
for label in range(0,10):
TP = 0
TN = 0
FP = 0
FN = 0
for j in range(0,len_):
if predictions[j] == label:
if actual_labels[j] == label:
TP += 1
continue
else:
FP += 1
continue
else:
if actual_labels[j] == label:
FN += 1
continue
else:
TN += 1
continue
# calculate precision, recall, F1 score and accuracy for current label
precision = TP*1.0 / (TP + FP + 0.0001)
recall = TP*1.0 / (TP + FN + + 0.0001)
# calculate F1 score for each label
F1 = 2 * precision * recall / (precision + recall + 0.0001)
F1s.append(F1)
# determine if a label is present in the test set, if it is not it will
# be ignored in the calculation of the final macro F1 score
if TP == 0 and FN == 0:
label_isPresent[label] = False
# calculate the final macro F1 score
sum_F1 = 0
len_F1 = 0
for j in range(0,10):
if label_isPresent[j] == True:
sum_F1 += F1s[j]
len_F1 += 1
return (sum_F1 * 100.0 / len_F1)
def train_average_perceptron(training_data, training_size, epoch, r):
"""
this function train the classifiers via Vanilla Perceptron Algorithm
Params:
training_data (numpy 2D array): 10,000 training instances
training_size (int): Number of training instances used
epoch (int): Number of iterations to run through the training instances
r (float): learning rate
Returns:
Ws (numpy 2D array): 10 classifiers w corresponding to 10 labels from 0 to 9
"""
# generate 2D array, each row is an array of 785 randomly generated values
Ws = np.random.rand(10, 785)
Ws_sum = np.zeros((10, 785), dtype=float)
# loop epoch iterations
for i in range(epoch):
# shuffle training data
np.random.shuffle(training_data)
# loop through the training instances
for j in range(training_size):
x = training_data[j, 0:785]
label = training_data[j, 785]
# loop through each of the classifiers
for k in range(10):
w = Ws[k]
if np.dot(x,w) >= 0:
y_prediction = 1
else:
y_prediction = 0
# if the label is the same as the index of the classifer, actual_label = 1
# else it is 0
y_actual = 0
if k == label:
y_actual = 1
# update the classifier w and Ws
w = w + (y_actual - y_prediction) * 1.0 * r * x
Ws[k] = w
Ws_sum[k] += w
# average the identifiers
count = epoch * training_size
return Ws_sum * 1.0 / count
def predict_average_perceptron(Ws, data):
"""
this function use the learned classifiers to predict the label
Params:
Ws (numpy 2D array): Each row is a learned classifier, the index of the
classifier is the target label that it is supposed
to predict
data (numpy 2D array): The dataset to perform prediction on
Returns:
Macro F1 score for the classifiers on the dataset
"""
actual_labels = []
predictions = []
# loop through the dataset
for i in range(len(data)):
x = data[i, 0:785]
label = data[i, 785]
actual_labels.append(label)
# loop through the classifiers
rank = []
for j in range(10):
w = Ws[j]
xTw = np.dot(x,w)
rank.append((xTw, j))
rank = sorted(rank, reverse = True)
prediction = rank[0][1]
predictions.append(prediction)
return F1_score(actual_labels, predictions)
if __name__ == "__main__":
"""
main function
"""
# read arguments from console
training_size = int(sys.argv[1])
epoch = int(sys.argv[2])
r = float(sys.argv[3])
# parse MNIST file and get the train and data set
# only get the first 10,000 instances in the training set, discard the remaining 50,000
training_data = parseMNIST(dataset='training')[0:10000, :]
test_data = parseMNIST(dataset='testing')
# train the classifers via Vanilla Perceptron Algorithm
Ws = train_average_perceptron(training_data, training_size, epoch, r)
# calculate F1 scores for training data and test data
F1_score_training_data = predict_average_perceptron(Ws, training_data)
F1_score_test_data = predict_average_perceptron(Ws, test_data)
# print out to console
print('')
print("Training F1 score: " + str(round(F1_score_training_data / 100.0, 2)))
print("Test F1 score: " + str(round(F1_score_test_data / 100.0, 2)))
print('')
|
[
"noreply@github.com"
] |
ericlamnguyen.noreply@github.com
|
f62d1f4d9d18e4a14b3fe1f9d973a55f68525fdf
|
707483bde7bf6b8b35c8ad933550a1a179dfe504
|
/正则表达式re操作/demo02.py
|
ab4d77b227f45d930de76fe2137089218244f1d7
|
[] |
no_license
|
AH-Toby/RegularExpressionBasics
|
57a625730c52691498517768fb10b795b77cd503
|
f7894f61d4e6f3f9c58e04a6c343636ec84562e5
|
refs/heads/master
| 2021-05-25T07:40:10.448334
| 2020-04-08T17:02:47
| 2020-04-08T17:02:47
| 253,720,803
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import re
# .的使用
# 匹配单个字符
char = re.match('.', 'M')
print(char.group())
# 匹配一段文字
resultString = re.match("s.ort", "short is long!")
print(resultString.group())
|
[
"1751001928@qq.com"
] |
1751001928@qq.com
|
d6f6ea6881821f5c0756efe1baf745c624c220ce
|
b13f200db3238f3ac60dd7cea240c40ed0a55da6
|
/vespa/transit_basic.py
|
3b407434668af0049821102c1247e1deda928ffa
|
[
"MIT"
] |
permissive
|
TheBatmanofButler/VESPA
|
13447f6c93b6a0afe6d3f03f3a745141c1e7f621
|
6cc42fcc995fe02e8d63f6d3d1943378e56cec04
|
refs/heads/master
| 2021-01-22T14:46:32.777689
| 2015-06-09T19:40:17
| 2015-06-09T19:40:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,410
|
py
|
from __future__ import print_function, division
import os
import logging
import pkg_resources
#test if building documentation on readthedocs.org
on_rtd = False
try:
import numpy as np
import numpy.random as rand
from scipy.optimize import leastsq
from scipy.ndimage import convolve1d
from scipy.interpolate import LinearNDInterpolator as interpnd
except ImportError:
on_rtd = True
np, rand, leastsq, convolve1d, interpnd = (None, None, None, None, None)
from .orbits.kepler import Efn
from .stars.utils import rochelobe, withinroche, semimajor
if not on_rtd:
from vespa_transitutils import find_eclipse
from vespa_transitutils import traptransit, traptransit_resid
import emcee
else:
find_eclipse, traptransit, traptransit_resid = (None, None, None)
emcee = None
if not on_rtd:
import astropy.constants as const
AU = const.au.cgs.value
RSUN = const.R_sun.cgs.value
MSUN = const.M_sun.cgs.value
REARTH = const.R_earth.cgs.value
MEARTH = const.M_earth.cgs.value
DAY = 86400
DATAFOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
LDDATA = np.recfromtxt('{}/keplerld.dat'.format(DATAFOLDER),names=True)
LDOK = ((LDDATA.teff < 10000) & (LDDATA.logg > 2.0) & (LDDATA.feh > -2))
LDPOINTS = np.array([LDDATA.teff[LDOK],LDDATA.logg[LDOK]]).T
U1FN = interpnd(LDPOINTS,LDDATA.u1[LDOK])
U2FN = interpnd(LDPOINTS,LDDATA.u2[LDOK])
else:
const, AU, RSUN, MSUN = (None, None, None, None)
REARTH, MEARTH, DAY = (None, None, None)
DATAFOLDER = None
LDDATA, LDOK, LDPOINTS, U1FN, U2FN = (None, None, None, None, None)
def ldcoeffs(teff,logg=4.5,feh=0):
"""
Returns limb-darkening coefficients in Kepler band.
"""
teffs = np.atleast_1d(teff)
loggs = np.atleast_1d(logg)
Tmin,Tmax = (LDPOINTS[:,0].min(),LDPOINTS[:,0].max())
gmin,gmax = (LDPOINTS[:,1].min(),LDPOINTS[:,1].max())
teffs[(teffs < Tmin)] = Tmin + 1
teffs[(teffs > Tmax)] = Tmax - 1
loggs[(loggs < gmin)] = gmin + 0.01
loggs[(loggs > gmax)] = gmax - 0.01
u1,u2 = (U1FN(teffs,loggs),U2FN(teffs,loggs))
return u1,u2
"""
def correct_fs(fs):
fflat = fs.ravel().copy()
wbad = np.where(fflat > 1)[0]
#identify lowest and highest index of valid flux
ilowest=0
while fflat[ilowest] > 1:
ilowest += 1
ihighest = len(fflat)-1
while fflat[ihighest] > 1:
ihighest -= 1
wlo = wbad - 1
whi = wbad + 1
#find places where wlo index is still in wbad
ilo = np.searchsorted(wbad,wlo)
mask = wbad[ilo]==wlo
while np.any(mask):
wlo[mask] -= 1
ilo = np.searchsorted(wbad,wlo)
mask = wbad[ilo]==wlo
ihi = np.searchsorted(wbad,whi)
ihi = np.clip(ihi,0,len(wbad)-1) #make sure no IndexError
mask = wbad[ihi]==whi
while np.any(mask):
whi[mask] += 1
ihi = np.searchsorted(wbad,whi)
ihi = np.clip(ihi,0,len(wbad)-1) #make sure no IndexError
mask = wbad[ihi]==whi
wlo = np.clip(wlo,ilowest,ihighest)
whi = np.clip(whi,ilowest,ihighest)
fflat[wbad] = (fflat[whi] + fflat[wlo])/2. #slightly kludge-y, esp. if there are consecutive bad vals
return fflat.reshape(fs.shape)
"""
class MAInterpolationFunction(object):
"""
Object enabling fast, vectorized evaluations of Mandel-Agol transit model.
Interpolates on pre-defined grid calculating Mandel & Agol (2002)
and Agol & Eastman (2008) calculations.
This object is generally used as follows::
>>> import numpy as np
>>> from vespa import MAInterpolationFunction
>>> mafn = MAInterpolationFunction() #takes a few seconds
>>> ps = 0.1 # radius ratio; can be float or array-like
>>> zs = np.abs(np.linspace(-1,1,1000)) #impact parameters
>>> fs = mafn(ps, zs) # relative flux
Even cooler, it can be called with different-sized arrays for
radius ratio and impact parameter, in which case it returns a
flux array of shape ``(nps, nzs)``. This is clearly awesome
for generating populations of eclipses::
>>> ps = np.linspace(0.01,0.1,100) # radius ratios
>>> zs = np.abs(np.linspace(-1,1,1000)) #impact parameters
>>> fs = mafn(ps, zs)
>>> fs.shape
(100, 1000)
It can also be called with different limb darkening parameters,
in which case arrays of ``u1`` and ``u2`` should be the third
and fourth argument, after ``ps`` and ``zs``, with the same shape
as ``ps`` (radius ratios).
:param u1,u2: (optional)
Default quadratic limb darkening parameters. Setting
these only enables faster evaluation; you can always call with
different values.
:param pmin,pmax,nps,nzs,zmax: (optional)
Parameters describing grid in p and z.
"""
def __init__(self,u1=0.394,u2=0.261,pmin=0.007,pmax=2,nps=200,nzs=200,zmax=None):
self.u1 = u1
self.u2 = u2
self.pmin = pmin
self.pmax = pmax
if zmax is None:
zmax = 1+pmax
self.zmax = zmax
self.nps = nps
ps = np.logspace(np.log10(pmin),np.log10(pmax),nps)
if pmax < 0.5:
zs = np.concatenate([np.array([0]),ps-1e-10,ps,np.arange(pmax,1-pmax,0.01),
np.arange(1-pmax,zmax,0.005)])
elif pmax < 1:
zs = np.concatenate([np.array([0]),ps-1e-10,ps,np.arange(1-pmax,zmax,0.005)])
else:
zs = np.concatenate([np.array([0]),ps-1e-10,ps,np.arange(pmax,zmax,0.005)])
self.nzs = np.size(zs)
#zs = linspace(0,zmax,nzs)
#zs = concatenate([zs,ps,ps+1e-10])
mu0s = np.zeros((np.size(ps),np.size(zs)))
lambdads = np.zeros((np.size(ps),np.size(zs)))
etads = np.zeros((np.size(ps),np.size(zs)))
fs = np.zeros((np.size(ps),np.size(zs)))
for i,p0 in enumerate(ps):
f,res = occultquad(zs,u1,u2,p0,return_components=True)
mu0s[i,:] = res[0]
lambdads[i,:] = res[1]
etads[i,:] = res[2]
fs[i,:] = f
P,Z = np.meshgrid(ps,zs)
points = np.array([P.ravel(),Z.ravel()]).T
self.mu0 = interpnd(points,mu0s.T.ravel())
##need to make two interpolation functions for lambdad
## b/c it's strongly discontinuous at z=p
mask = (Z<P)
pointmask = points[:,1] < points[:,0]
w1 = np.where(mask)
w2 = np.where(~mask)
wp1 = np.where(pointmask)
wp2 = np.where(~pointmask)
self.lambdad1 = interpnd(points[wp1],lambdads.T[w1].ravel())
self.lambdad2 = interpnd(points[wp2],lambdads.T[w2].ravel())
def lambdad(p,z):
#where p and z are exactly equal, this will return nan....
p = np.atleast_1d(p)
z = np.atleast_1d(z)
l1 = self.lambdad1(p,z)
l2 = self.lambdad2(p,z)
bad1 = np.isnan(l1)
l1[np.where(bad1)]=0
l2[np.where(~bad1)]=0
return l1*~bad1 + l2*bad1
self.lambdad = lambdad
#self.lambdad = interpnd(points,lambdads.T.ravel())
self.etad = interpnd(points,etads.T.ravel())
self.fn = interpnd(points,fs.T.ravel())
def __call__(self,ps,zs,u1=.394,u2=0.261,force_broadcast=False):
""" returns array of fluxes; if ps and zs aren't the same shape, then returns array of
shape (nps, nzs)
"""
#return self.fn(ps,zs)
if np.size(ps)>1 and (np.size(ps)!=np.size(zs) or force_broadcast):
P = ps[:,None]
if np.size(u1)>1 or np.size(u2)>1:
if u1.shape != ps.shape or u2.shape != ps.shape:
raise ValueError('limb darkening coefficients must be same size as ps')
U1 = u1[:,None]
U2 = u2[:,None]
else:
U1 = u1
U2 = u2
else:
P = ps
U1 = u1
U2 = u2
if np.size(u1)>1 or np.any(u1 != self.u1) or np.any(u2 != self.u2):
mu0 = self.mu0(P,zs)
lambdad = self.lambdad(P,zs)
etad = self.etad(P,zs)
fs = 1. - ((1-U1-2*U2)*(1-mu0) + (U1+2*U2)*(lambdad+2./3*(P > zs)) + U2*etad)/(1.-U1/3.-U2/6.)
#if fix:
# fs = correct_fs(fs)
else:
fs = self.fn(P,zs)
return fs
def impact_parameter(a, R, inc, ecc=0, w=0, return_occ=False):
"""a in AU, R in Rsun, inc & w in radians
"""
b_tra = a*AU*np.cos(inc)/(R*RSUN) * (1-ecc**2)/(1 + ecc*np.sin(w))
if return_occ:
b_tra = a*AU*np.cos(inc)/(R*RSUN) * (1-ecc**2)/(1 - ecc*np.sin(w))
return b_tra, b_occ
else:
return b_tra
def transit_T14(P,Rp,Rs=1,b=0,Ms=1,ecc=0,w=0):
"""P in days, Rp in Earth radii, Rs in Solar radii, b=impact parameter, Ms Solar masses. Returns T14 in hours. w in deg.
"""
a = semimajor(P,Ms)*AU
k = Rp*REARTH/(Rs*RSUN)
inc = np.pi/2 - b*RSUN/a
return P*DAY/np.pi*np.arcsin(Rs*RSUN/a * np.sqrt((1+k)**2 - b**2)/np.sin(inc)) *\
np.sqrt(1-ecc**2)/(1+ecc*np.sin(w*np.pi/180)) / 3600.
def transit_T23(P,Rp,Rs=1,b=0,Ms=1,ecc=0,w=0):
a = semimajor(P,Ms)*AU
k = Rp*REARTH/(Rs*RSUN)
inc = np.pi/2 - b*RSUN/a
return P*DAY/np.pi*np.arcsin(Rs*RSUN/a * np.sqrt((1-k)**2 - b**2)/np.sin(inc)) *\
np.sqrt(1-ecc**2)/(1+ecc*np.sin(w*pi/180)) / 3600.#*24*60
def eclipse_depth(mafn,Rp,Rs,b,u1=0.394,u2=0.261,max_only=False,npts=100,force_1d=False):
""" Calculates average (or max) eclipse depth
***why does b>1 take so freaking long?...
"""
k = Rp*REARTH/(Rs*RSUN)
if max_only:
return 1 - mafn(k,b,u1,u2)
if np.size(b) == 1:
x = np.linspace(0,np.sqrt(1-b**2),npts)
y = b
zs = np.sqrt(x**2 + y**2)
fs = mafn(k,zs,u1,u2) # returns array of shape (nks,nzs)
depth = 1-fs
else:
xmax = np.sqrt(1-b**2)
x = np.linspace(0,1,npts)*xmax[:,Nones]
y = b[:,None]
zs = np.sqrt(x**2 + y**2)
fs = mafn(k,zs.ravel(),u1,u2)
if not force_1d:
fs = fs.reshape(size(k),*zs.shape)
depth = 1-fs
meandepth = np.squeeze(depth.mean(axis=depth.ndim-1))
return meandepth #array of average depths, shape (nks,nbs)
def minimum_inclination(P,M1,M2,R1,R2):
"""
Returns the minimum inclination at which two bodies from two given sets eclipse
Only counts systems not within each other's Roche radius
:param P:
Orbital periods.
:param M1,M2,R1,R2:
Masses and radii of primary and secondary stars.
"""
P,M1,M2,R1,R2 = (np.atleast_1d(P),
np.atleast_1d(M1),
np.atleast_1d(M2),
np.atleast_1d(R1),
np.atleast_1d(R2))
semimajors = semimajor(P,M1+M2)
rads = ((R1+R2)*RSUN/(semimajors*AU))
ok = (~np.isnan(rads) & ~withinroche(semimajors,M1,R1,M2,R2))
if ok.sum() == 0:
logging.error('P: {}'.format(P))
logging.error('M1: {}'.format(M1))
logging.error('M2: {}'.format(M2))
logging.error('R1: {}'.format(R1))
logging.error('R2: {}'.format(R2))
if np.all(withinroche(semimajors,M1,R1,M2,R2)):
raise AllWithinRocheError('All simulated systems within Roche lobe')
else:
raise EmptyPopulationError('no valid systems! (see above)')
mininc = np.arccos(rads[ok].max())*180/np.pi
return mininc
def a_over_Rs(P,R2,M2,M1=1,R1=1,planet=True):
"""
Returns a/Rs for given parameters.
"""
if planet:
M2 *= REARTH/RSUN
R2 *= MEARTH/MSUN
return semimajor(P,M1+M2)*AU/(R1*RSUN)
def eclipse_tz(P,b,aR,ecc=0,w=0,npts=200,width=1.5,sec=False,dt=1,approx=False,new=True):
"""Returns ts and zs for an eclipse (npts points right around the eclipse)
:param P,b,aR:
Period, impact parameter, a/Rstar.
:param ecc,w:
Eccentricity, argument of periapse.
:param npts:
Number of points in transit to return.
:param width:
How much to go out of transit. 1.5 is a good choice.
:param sec:
Whether to return the values relevant to the secondary occultation
rather than primary eclipse.
:param dt:
Spacing of simulated data points, in minutes.
:param approx:
Whether to use the approximate expressions to find the mean
anomalies. Default is ``False`` (exact calculation).
:param new:
Meaningless.
:return ts,zs:
Times from mid-transit (in days) and impact parameters.
"""
if sec:
eccfactor = np.sqrt(1-ecc**2)/(1-ecc*np.sin(w*np.pi/180))
else:
eccfactor = np.sqrt(1-ecc**2)/(1+ecc*np.sin(w*np.pi/180))
if eccfactor < 1:
width /= eccfactor
#if width > 5:
# width = 5
if new:
#finding the mean anomaly boundaries of the eclipse
#shoot for at least 100 pts in transit
n = 100 * (np.pi * aR)
Ms = np.linspace(-np.pi,np.pi,n)
if ecc != 0:
Es = Efn(Ms,ecc) #eccentric anomalies
else:
Es = Ms
zs,in_eclipse = find_eclipse(Es,b,aR,ecc,w,width,sec)
if in_eclipse.sum() < 2:
logging.debug('no eclipse because fewer than 2 points '+
'in eclipse. [Es,b,aR,ecc,w,width,sec] = ' +
'{}'.format([Es,b,aR,ecc,w,width,sec]))
raise NoEclipseError
wecl = np.where(in_eclipse)
subMs = Ms[wecl]
dMs = subMs[1:] - subMs[:-1]
if np.any(subMs < 0) and dMs.max()>1: #if there's a discontinuous wrap-around...
subMs[np.where(subMs < 0)] += 2*np.pi
#logging.debug('subMs: {}'.format(subMs))
minM,maxM = (subMs.min(),subMs.max())
#logging.debug('minM: {}, maxM: {}'.format(minM,maxM))
#dM = 2*np.pi*dt/(P*24*60) #the spacing in mean anomaly that corresponds to dt (minutes)
#Ms = np.arange(minM,maxM+dM,dM)
Ms = np.linspace(minM, maxM, npts) #npts desired in transit, rather than using dt
if ecc != 0:
Es = Efn(Ms,ecc) #eccentric anomalies
else:
Es = Ms
zs,in_eclipse = find_eclipse(Es,b,aR,ecc,w,width,sec)
Mcenter = Ms[zs.argmin()]
phs = (Ms - Mcenter) / (2*np.pi)
ts = phs*P
#logging.debug('{} in-transit points simulated'.format(len(ts)))
return ts,zs
if not approx:
if sec:
inc = np.arccos(b/aR*(1-ecc*np.sin(w*np.pi/180))/(1-ecc**2))
else:
inc = np.arccos(b/aR*(1+ecc*np.sin(w*np.pi/180))/(1-ecc**2))
Ms = np.linspace(-np.pi,np.pi,2e3) #mean anomalies around whole orbit
if ecc != 0:
Es = Efn(Ms,ecc) #eccentric anomalies
nus = 2 * np.arctan2(np.sqrt(1+ecc)*np.sin(Es/2),
np.sqrt(1-ecc)*np.cos(Es/2)) #true anomalies
else:
nus = Ms
r = aR*(1-ecc**2)/(1+ecc*np.cos(nus)) #secondary distance from primary in units of R1
X = -r*np.cos(w*np.pi/180 + nus)
Y = -r*np.sin(w*np.pi/180 + nus)*np.cos(inc)
rsky = np.sqrt(X**2 + Y**2)
if not sec:
inds = np.where((np.sin(nus + w*np.pi/180) > 0) & (rsky < width)) #where "front half" of orbit and w/in width
if sec:
inds = np.where((np.sin(nus + w*np.pi/180) < 0) & (rsky < width)) #where "front half" of orbit and w/in width
subMs = Ms[inds].copy()
if np.any((subMs[1:]-subMs[:-1]) > np.pi):
subMs[np.where(subMs < 0)] += 2*np.pi
if np.size(subMs)<2:
logging.debug('no eclipse because fewer than 2 points '+
'in subMs. Look into this...')
logging.error(subMs)
raise NoEclipseError
minM,maxM = (subMs.min(),subMs.max())
dM = 2*np.pi*dt/(P*24*60) #the spacing in mean anomaly that corresponds to dt (minutes)
Ms = np.arange(minM,maxM+dM,dM)
if ecc != 0:
Es = Efn(Ms,ecc) #eccentric anomalies
nus = 2 * np.arctan2(np.sqrt(1+ecc)*np.sin(Es/2),
np.sqrt(1-ecc)*np.cos(Es/2)) #true anomalies
else:
nus = Ms
r = aR*(1-ecc**2)/(1+ecc*np.cos(nus))
X = -r*np.cos(w*np.pi/180 + nus)
Y = -r*np.sin(w*np.pi/180 + nus)*np.cos(inc)
zs = np.sqrt(X**2 + Y**2) #rsky
#center = absolute(X).argmin()
#c = polyfit(Ms[center-1:center+2],X[center-1:center+2],1)
#Mcenter = -c[1]/c[0]
if not sec:
Mcenter = Ms[np.absolute(X[np.where(np.sin(nus + w*np.pi/180) > 0)]).argmin()]
else:
Mcenter = Ms[np.absolute(X[np.where(np.sin(nus + w*np.pi/180) < 0)]).argmin()]
phs = (Ms - Mcenter) / (2*np.pi)
wmin = np.absolute(phs).argmin()
ts = phs*P
return ts,zs
else:
if sec:
f0 = -pi/2 - (w*pi/180)
inc = arccos(b/aR*(1-ecc*sin(w*pi/180))/(1-ecc**2))
else:
f0 = pi/2 - (w*pi/180)
inc = arccos(b/aR*(1+ecc*sin(w*pi/180))/(1-ecc**2))
fmin = -arcsin(1./aR*sqrt(width**2 - b**2)/sin(inc))
fmax = arcsin(1./aR*sqrt(width**2 - b**2)/sin(inc))
if isnan(fmin) or isnan(fmax):
logging.debug('No eclipse in approximate calculation. ' +
'P=%.2f, b=%.3f, aR=%.2f, ecc=%0.2f, w=%.1f' % (P,b,aR,ecc,w))
raise NoEclipseError
fs = linspace(fmin,fmax,npts)
if sec:
ts = fs*P/2./pi * sqrt(1-ecc**2)/(1 - ecc*sin(w)) #approximation of constant angular velocity
else:
ts = fs*P/2./pi * sqrt(1-ecc**2)/(1 + ecc*sin(w)) #approximation of constant ang. vel.
fs += f0
rs = aR*(1-ecc**2)/(1+ecc*cos(fs))
xs = -rs*cos(w*pi/180 + fs)
ys = -rs*sin(w*pi/180 + fs)*cos(inc)
zs = aR*(1-ecc**2)/(1+ecc*cos(fs))*sqrt(1-(sin(w*pi/180 + fs))**2 * (sin(inc))**2)
return ts,zs
def eclipse_pars(P,M1,M2,R1,R2,ecc=0,inc=90,w=0,sec=False):
"""retuns p,b,aR from P,M1,M2,R1,R2,ecc,inc,w"""
a = semimajor(P,M1+M2)
if sec:
b = a*AU*np.cos(inc*np.pi/180)/(R1*RSUN) * (1-ecc**2)/(1 - ecc*np.sin(w*np.pi/180))
aR = a*AU/(R2*RSUN)
p0 = R1/R2
else:
b = a*AU*np.cos(inc*np.pi/180)/(R1*RSUN) * (1-ecc**2)/(1 + ecc*np.sin(w*np.pi/180))
aR = a*AU/(R1*RSUN)
p0 = R2/R1
return p0,b,aR
def eclipse(p0,b,aR,P=1,ecc=0,w=0,npts=200,MAfn=None,u1=0.394,u2=0.261,width=3,conv=False,cadence=0.020434028,frac=1,sec=False,dt=2,approx=False,new=True):
"""Returns ts, fs of simulated eclipse.
:param p0,b,aR:
Radius ratio, impact parameter, a/Rstar.
:param P:
Orbital period
:param ecc,w: (optional)
Eccentricity, argument of periapse.
:param npts: (optional)
Number of points to simulate.
:param MAfn: (optional)
:class:`MAInterpolationFunction` object.
:param u1,u2: (optional)
Quadratic limb darkening parameters.
:param width: (optional)
Argument defining how much out-of-transit to simulate. 3 is good.
:param conv: (optional)
Whether to convolve with box-car to simulate integration time.
:param cadence: (optional)
Cadence to simulate; default is Kepler cadence.
:param frac: (optional)
Fraction of total light in eclipsed object (for dilution purposes).
:param sec: (optional)
If ``True``, then simulate secondary occultation rather than eclipse.
:param dt: (optional)
Simulated spacing of theoretical data points, in minutes.
:param approx: (optional)
Whether to approximate solution to Kepler's equation or not.
:param new: (optional)
Meaningless relic [or not??]. Apparently this is in practice
``True`` by default.
:return ts,fs:
Times from mid-transit [days] and relative fluxes of simulated eclipse.
"""
if sec:
ts,zs = eclipse_tz(P,b/p0,aR/p0,ecc,w,npts=npts,width=(1+1/p0)*width,
sec=sec,dt=dt,approx=approx,new=new)
if zs.min() > (1 + 1/p0):
logging.debug('no eclipse because min z is greater than 1 + 1/p0: ' +
'[P,b/p0,aR/p0,ecc,w] = {}'.format([P,b/p0,aR/p0,ecc,w]))
raise NoEclipseError
else:
ts,zs = eclipse_tz(P,b,aR,ecc,w,npts=npts,width=(1+p0)*width,
sec=sec,dt=dt,approx=approx,new=new)
if zs.min() > (1+p0):
logging.debug('no eclipse (secondary) because min z is greater ' +
'than 1 + 1/p0: ' +
'[P,b,aR,ecc,w] = {}'.format([P,b,aR,ecc,w]))
raise NoEclipseError
if MAfn is None:
if sec:
fs = occultquad(zs,u1,u2,1/p0)
else:
fs = occultquad(zs,u1,u2,p0)
else:
if sec:
fs = MAfn(1/p0,zs,u1,u2)
else:
fs = MAfn(p0,zs,u1,u2)
fs[np.isnan(fs)] = 1.
if conv:
dt = ts[1]-ts[0]
npts = np.round(cadence/dt)
if npts % 2 == 0:
npts += 1
boxcar = np.ones(npts)/npts
fs = convolve1d(fs,boxcar)
fs = 1 - frac*(1-fs)
return ts,fs #ts are in the same units P is given in.
def eclipse_tt(p0,b,aR,P=1,ecc=0,w=0,npts=200,MAfn=None,u1=0.394,u2=0.261,conv=False,cadence=0.020434028,frac=1,sec=False,new=True,pars0=None):
"""
Trapezoidal parameters for simulated orbit.
All arguments passed to :func:`eclipse` except the following:
:param pars0: (optional)
Initial guess for least-sq optimization for trapezoid parameters.
:return dur,dep,slope:
Best-fit duration, depth, and T/tau for eclipse shape.
"""
ts,fs = eclipse(p0,b,aR,P,ecc,w,npts,MAfn,u1,u2,
conv=conv,cadence=cadence,frac=frac,sec=sec,new=new)
#logging.debug('{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}'.format(p0,b,aR,P,ecc,w,xmax,npts,u1,u2,leastsq,conv,cadence,frac,sec,new))
#logging.debug('ts: {} fs: {}'.format(ts,fs))
if pars0 is None:
depth = 1 - fs.min()
duration = (fs < (1-0.01*depth)).sum()/float(len(fs)) * (ts[-1] - ts[0])
tc0 = ts[fs.argmin()]
pars0 = np.array([duration,depth,5.,tc0])
dur,dep,slope,epoch = fit_traptransit(ts,fs,pars0)
return dur,dep,slope
def occultquad(z,u1,u2,p0,return_components=False):
"""
#### Mandel-Agol code:
# Python translation of IDL code.
# This routine computes the lightcurve for occultation of a
# quadratically limb-darkened source without microlensing. Please
# cite Mandel & Agol (2002) and Eastman & Agol (2008) if you make use
# of this routine in your research. Please report errors or bugs to
# jdeast@astronomy.ohio-state.edu
.. note::
Should probably wrap the Fortran code at some point.
(This particular part of the code was put together awhile ago.)
"""
z = np.atleast_1d(z)
nz = np.size(z)
lambdad = np.zeros(nz)
etad = np.zeros(nz)
lambdae = np.zeros(nz)
omega=1.-u1/3.-u2/6.
## tolerance for double precision equalities
## special case integrations
tol = 1e-14
p = np.absolute(p0)
z = np.where(np.absolute(p-z) < tol,p,z)
z = np.where(np.absolute((p-1)-z) < tol,p-1.,z)
z = np.where(np.absolute((1-p)-z) < tol,1.-p,z)
z = np.where(z < tol,0.,z)
x1=(p-z)**2.
x2=(p+z)**2.
x3=p**2.-z**2.
def finish(p,z,u1,u2,lambdae,lambdad,etad):
omega = 1. - u1/3. - u2/6.
#avoid Lutz-Kelker bias
if p0 > 0:
#limb darkened flux
muo1 = 1 - ((1-u1-2*u2)*lambdae+(u1+2*u2)*(lambdad+2./3*(p > z)) + u2*etad)/omega
#uniform disk
mu0 = 1 - lambdae
else:
#limb darkened flux
muo1 = 1 + ((1-u1-2*u2)*lambdae+(u1+2*u2)*(lambdad+2./3*(p > z)) + u2*etad)/omega
#uniform disk
mu0 = 1 + lambdae
if return_components:
return muo1,(mu0,lambdad,etad)
else:
return muo1
## trivial case of no planet
if p <= 0.:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
## Case 1 - the star is unocculted:
## only consider points with z lt 1+p
notusedyet = np.where( z < (1. + p) )[0]
if np.size(notusedyet) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
# Case 11 - the source is completely occulted:
if p >= 1.:
cond = z[notusedyet] <= p-1.
occulted = np.where(cond)#,complement=notused2)
notused2 = np.where(~cond)
#occulted = where(z[notusedyet] <= p-1.)#,complement=notused2)
if np.size(occulted) != 0:
ndxuse = notusedyet[occulted]
etad[ndxuse] = 0.5 # corrected typo in paper
lambdae[ndxuse] = 1.
# lambdad = 0 already
#notused2 = where(z[notusedyet] > p-1)
if np.size(notused2) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused2]
# Case 2, 7, 8 - ingress/egress (uniform disk only)
inegressuni = np.where((z[notusedyet] >= np.absolute(1.-p)) & (z[notusedyet] < 1.+p))
if np.size(inegressuni) != 0:
ndxuse = notusedyet[inegressuni]
tmp = (1.-p**2.+z[ndxuse]**2.)/2./z[ndxuse]
tmp = np.where(tmp > 1.,1.,tmp)
tmp = np.where(tmp < -1.,-1.,tmp)
kap1 = np.arccos(tmp)
tmp = (p**2.+z[ndxuse]**2-1.)/2./p/z[ndxuse]
tmp = np.where(tmp > 1.,1.,tmp)
tmp = np.where(tmp < -1.,-1.,tmp)
kap0 = np.arccos(tmp)
tmp = 4.*z[ndxuse]**2-(1.+z[ndxuse]**2-p**2)**2
tmp = np.where(tmp < 0,0,tmp)
lambdae[ndxuse] = (p**2*kap0+kap1 - 0.5*np.sqrt(tmp))/np.pi
# eta_1
etad[ndxuse] = 1./2./np.pi*(kap1+p**2*(p**2+2.*z[ndxuse]**2)*kap0- \
(1.+5.*p**2+z[ndxuse]**2)/4.*np.sqrt((1.-x1[ndxuse])*(x2[ndxuse]-1.)))
# Case 5, 6, 7 - the edge of planet lies at origin of star
cond = z[notusedyet] == p
ocltor = np.where(cond)#, complement=notused3)
notused3 = np.where(~cond)
#ocltor = where(z[notusedyet] == p)#, complement=notused3)
t = np.where(z[notusedyet] == p)
if np.size(ocltor) != 0:
ndxuse = notusedyet[ocltor]
if p < 0.5:
# Case 5
q=2.*p # corrected typo in paper (2k -> 2p)
Ek,Kk = ellke(q)
# lambda_4
lambdad[ndxuse] = 1./3.+2./9./np.pi*(4.*(2.*p**2-1.)*Ek+\
(1.-4.*p**2)*Kk)
# eta_2
etad[ndxuse] = p**2/2.*(p**2+2.*z[ndxuse]**2)
lambdae[ndxuse] = p**2 # uniform disk
elif p > 0.5:
# Case 7
q=0.5/p # corrected typo in paper (1/2k -> 1/2p)
Ek,Kk = ellke(q)
# lambda_3
lambdad[ndxuse] = 1./3.+16.*p/9./np.pi*(2.*p**2-1.)*Ek-\
(32.*p**4-20.*p**2+3.)/9./np.pi/p*Kk
# etad = eta_1 already
else:
# Case 6
lambdad[ndxuse] = 1./3.-4./np.pi/9.
etad[ndxuse] = 3./32.
#notused3 = where(z[notusedyet] != p)
if np.size(notused3) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused3]
# Case 2, Case 8 - ingress/egress (with limb darkening)
cond = ((z[notusedyet] > 0.5+np.absolute(p-0.5)) & \
(z[notusedyet] < 1.+p)) | \
( (p > 0.5) & (z[notusedyet] > np.absolute(1.-p)) & \
(z[notusedyet] < p))
inegress = np.where(cond)
notused4 = np.where(~cond)
#inegress = where( ((z[notusedyet] > 0.5+abs(p-0.5)) & \
#(z[notusedyet] < 1.+p)) | \
#( (p > 0.5) & (z[notusedyet] > abs(1.-p)) & \
#(z[notusedyet] < p)) )#, complement=notused4)
if np.size(inegress) != 0:
ndxuse = notusedyet[inegress]
q=np.sqrt((1.-x1[ndxuse])/(x2[ndxuse]-x1[ndxuse]))
Ek,Kk = ellke(q)
n=1./x1[ndxuse]-1.
# lambda_1:
lambdad[ndxuse]=2./9./np.pi/np.sqrt(x2[ndxuse]-x1[ndxuse])*\
(((1.-x2[ndxuse])*(2.*x2[ndxuse]+x1[ndxuse]-3.)-\
3.*x3[ndxuse]*(x2[ndxuse]-2.))*Kk+(x2[ndxuse]-\
x1[ndxuse])*(z[ndxuse]**2+7.*p**2-4.)*Ek-\
3.*x3[ndxuse]/x1[ndxuse]*ellpic_bulirsch(n,q))
#notused4 = where( ( (z[notusedyet] <= 0.5+abs(p-0.5)) | \
# (z[notusedyet] >= 1.+p) ) & ( (p <= 0.5) | \
# (z[notusedyet] <= abs(1.-p)) | \
# (z[notusedyet] >= p) ))
if np.size(notused4) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
notusedyet = notusedyet[notused4]
# Case 3, 4, 9, 10 - planet completely inside star
if p < 1.:
cond = z[notusedyet] <= (1.-p)
inside = np.where(cond)
notused5 = np.where(~cond)
#inside = where(z[notusedyet] <= (1.-p))#, complement=notused5)
if np.size(inside) != 0:
ndxuse = notusedyet[inside]
## eta_2
etad[ndxuse] = p**2/2.*(p**2+2.*z[ndxuse]**2)
## uniform disk
lambdae[ndxuse] = p**2
## Case 4 - edge of planet hits edge of star
edge = np.where(z[ndxuse] == 1.-p)#, complement=notused6)
if np.size(edge[0]) != 0:
## lambda_5
lambdad[ndxuse[edge]] = 2./3./np.pi*np.arccos(1.-2.*p)-\
4./9./np.pi*np.sqrt(p*(1.-p))*(3.+2.*p-8.*p**2)
if p > 0.5:
lambdad[ndxuse[edge]] -= 2./3.
notused6 = np.where(z[ndxuse] != 1.-p)
if np.size(notused6) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
ndxuse = ndxuse[notused6[0]]
## Case 10 - origin of planet hits origin of star
origin = np.where(z[ndxuse] == 0)#, complement=notused7)
if np.size(origin) != 0:
## lambda_6
lambdad[ndxuse[origin]] = -2./3.*(1.-p**2)**1.5
notused7 = np.where(z[ndxuse] != 0)
if np.size(notused7) == 0:
return finish(p,z,u1,u2,lambdae,lambdad,etad)
ndxuse = ndxuse[notused7[0]]
q=np.sqrt((x2[ndxuse]-x1[ndxuse])/(1.-x1[ndxuse]))
n=x2[ndxuse]/x1[ndxuse]-1.
Ek,Kk = ellke(q)
## Case 3, Case 9 - anywhere in between
## lambda_2
lambdad[ndxuse] = 2./9./np.pi/np.sqrt(1.-x1[ndxuse])*\
((1.-5.*z[ndxuse]**2+p**2+x3[ndxuse]**2)*Kk+\
(1.-x1[ndxuse])*(z[ndxuse]**2+7.*p**2-4.)*Ek-\
3.*x3[ndxuse]/x1[ndxuse]*ellpic_bulirsch(n,q))
## if there are still unused elements, there's a bug in the code
## (please report it)
#notused5 = where(z[notusedyet] > (1.-p))
if notused5[0] != 0:
logging.error("The following values of z didn't fit into a case:")
return finish(p,z,u1,u2,lambdae,lambdad,etad)
# Computes Hasting's polynomial approximation for the complete
# elliptic integral of the first (ek) and second (kk) kind
def ellke(k):
m1=1.-k**2
logm1 = np.log(m1)
a1=0.44325141463
a2=0.06260601220
a3=0.04757383546
a4=0.01736506451
b1=0.24998368310
b2=0.09200180037
b3=0.04069697526
b4=0.00526449639
ee1=1.+m1*(a1+m1*(a2+m1*(a3+m1*a4)))
ee2=m1*(b1+m1*(b2+m1*(b3+m1*b4)))*(-logm1)
ek = ee1+ee2
a0=1.38629436112
a1=0.09666344259
a2=0.03590092383
a3=0.03742563713
a4=0.01451196212
b0=0.5
b1=0.12498593597
b2=0.06880248576
b3=0.03328355346
b4=0.00441787012
ek1=a0+m1*(a1+m1*(a2+m1*(a3+m1*a4)))
ek2=(b0+m1*(b1+m1*(b2+m1*(b3+m1*b4))))*logm1
kk = ek1-ek2
return [ek,kk]
# Computes the complete elliptical integral of the third kind using
# the algorithm of Bulirsch (1965):
def ellpic_bulirsch(n,k):
kc=np.sqrt(1.-k**2); p=n+1.
if(p.min() < 0.):
logging.warning('Negative p')
m0=1.; c=1.; p=np.sqrt(p); d=1./p; e=kc
while 1:
f = c; c = d/p+c; g = e/p; d = 2.*(f*g+d)
p = g + p; g = m0; m0 = kc + m0
if (np.absolute(1.-kc/g)).max() > 1.e-8:
kc = 2*np.sqrt(e); e=kc*m0
else:
return 0.5*np.pi*(c*m0+d)/(m0*(m0+p))
#def traptransit(ts,p):
# return traptransit(ts,p)
def fit_traptransit(ts,fs,p0):
"""
Fits trapezoid model to provided ts,fs
"""
pfit,success = leastsq(traptransit_resid,p0,args=(ts,fs))
if success not in [1,2,3,4]:
raise NoFitError
#logging.debug('success = {}'.format(success))
return pfit
class TraptransitModel(object):
"""
Model to enable MCMC fitting of trapezoidal shape.
"""
def __init__(self,ts,fs,sigs=1e-4,maxslope=30):
self.n = np.size(ts)
if np.size(sigs)==1:
sigs = np.ones(self.n)*sigs
self.ts = ts
self.fs = fs
self.sigs = sigs
self.maxslope = maxslope
def __call__(self,pars):
pars = np.array(pars)
return traptransit_lhood(pars,self.ts,self.fs,self.sigs,maxslope=self.maxslope)
def traptransit_lhood(pars,ts,fs,sigs,maxslope=30):
if pars[0] < 0 or pars[1] < 0 or pars[2] < 2 or pars[2] > maxslope:
return -np.inf
resid = traptransit_resid(pars,ts,fs)
return (-0.5*resid**2/sigs**2).sum()
def traptransit_MCMC(ts,fs,dfs=1e-5,nwalkers=200,nburn=300,niter=1000,
threads=1,p0=[0.1,0.1,3,0],return_sampler=False,
maxslope=30):
"""
Fit trapezoidal model to provided ts, fs, [dfs] using MCMC.
Standard emcee usage.
"""
model = TraptransitModel(ts,fs,dfs,maxslope=maxslope)
sampler = emcee.EnsembleSampler(nwalkers,4,model,threads=threads)
T0 = p0[0]*(1+rand.normal(size=nwalkers)*0.1)
d0 = p0[1]*(1+rand.normal(size=nwalkers)*0.1)
slope0 = p0[2]*(1+rand.normal(size=nwalkers)*0.1)
ep0 = p0[3]+rand.normal(size=nwalkers)*0.0001
p0 = np.array([T0,d0,slope0,ep0]).T
pos, prob, state = sampler.run_mcmc(p0, nburn)
sampler.reset()
sampler.run_mcmc(pos, niter, rstate0=state)
if return_sampler:
return sampler
else:
return sampler.flatchain[:,0],sampler.flatchain[:,1],sampler.flatchain[:,2],sampler.flatchain[:,3]
##### Custom Exceptions
class NoEclipseError(Exception):
pass
class NoFitError(Exception):
pass
class EmptyPopulationError(Exception):
pass
class NotImplementedError(Exception):
pass
class AllWithinRocheError(Exception):
pass
|
[
"tim.morton@gmail.com"
] |
tim.morton@gmail.com
|
60e955d09fd9dff5c4d25bfb3c9f922d16e1ffe0
|
c2ba1de59b83d58600db335b69512319093a6a81
|
/nltk/util.py
|
5bd5b7c30efe821b79ca7ed8f62b5b4530f2dcd9
|
[] |
no_license
|
GustavoKatel/stackcontest
|
87a49be419d4bfa124fdd8bf4dc419b88f25aaf7
|
f75c1b978a8c04dc0dc909584305c0dbf4b138ee
|
refs/heads/master
| 2020-06-01T09:28:22.900817
| 2012-10-01T04:12:04
| 2012-10-01T04:12:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
#!/usr/bin/python
#coding: utf-8
import nltk
def tokenizeIt(sentence):
tokenizer = nltk.tokenize.PunktWordTokenizer()
return tokenizer.tokenize(sentence.lower())
def featureIt(tokens):
return {}.fromkeys(tokens,True)
def featuresetIt(feature,labelList):
return [ (feature,label) for label in labelList ]
|
[
"gustavobs.katel@gmail.com"
] |
gustavobs.katel@gmail.com
|
daad2b33289afc5124d355dc7371ed687541f7a5
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_205/50.py
|
c9b42cb43da39eb13706fb051b66e91cc8a899f0
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,104
|
py
|
# coding=utf-8
import sys
def simu(hd, ad, hk, ak, b, d, b_cnt, d_cnt):
hp = hd
pre = False
cnt = 0
while True:
cnt += 1
if d_cnt > 0:
if ak - d >= hp:
if pre:
return 0
pre = True
hp = hd
else:
pre = False
ak -= d
d_cnt -= 1
elif b_cnt > 0:
if ak >= hp:
if pre:
return 0
pre = True
hp = hd
else:
pre = False
ad += b
b_cnt -= 1
else:
if ad < hk and ak >= hp:
if pre:
return 0
pre = True
hp = hd
else:
pre = False
hk -= ad
if hk <= 0:
break
hp -= ak
if hp <= 0:
return 0
return cnt
def solve(hd, ad, hk, ak, b, d):
if ad >= hk:
return 1
if ak - d >= hd:
return 0
b_cnt = 0
if b > 0:
b_cnt = min(100, hk)
d_cnt = 0
if d > 0:
d_cnt = ak
res = 0
i = 0
while i <= b_cnt:
j = 0
while j <= d_cnt:
r = simu(hd, ad, hk, ak, b, d, i, j)
if res == 0 or 0 < r < res:
res = r
j += 1
i += 1
return res
def main(argv=None):
if argv is None:
argv = sys.argv
infile = argv[1]
outfile = argv[2]
pin = open(infile, "r")
pout = open(outfile, "w")
n = int(pin.readline().strip())
for i in range(n):
hd, ad, hk, ak, b, d = pin.readline().strip().split(" ")
hd = int(hd)
ad = int(ad)
hk = int(hk)
ak = int(ak)
b = int(b)
d = int(d)
print("#", str(i + 1))
res = solve(hd, ad, hk, ak, b, d)
pout.write("Case #" + str(i + 1) + ": " + ("IMPOSSIBLE" if res == 0 else str(res)) + "\n")
pin.close()
pout.close()
if __name__ == "__main__":
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
67d279cec8d429bf09392177fa8c0ee1d136d2c8
|
1621ab066a9c1c6b4a34a8d3c5ff2a117a735d5b
|
/ailog-bot/db.py
|
eeb8c917a7fe9a791727453209078c3a600bc7a5
|
[] |
no_license
|
penguinkang/logbot
|
7ee12d97d9d6c35ddbbb047db2e881fe46525c68
|
aa137ca538a471cc7b9382cff969aa133bec03cb
|
refs/heads/master
| 2023-05-11T18:28:29.044473
| 2020-07-19T08:08:36
| 2020-07-19T08:08:36
| 280,818,811
| 0
| 0
| null | 2023-05-01T21:44:31
| 2020-07-19T07:54:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
import boto3
from datetime import datetime
import pytz
import validators
from urlextract import URLExtract
def put_ailog(url, comment, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
dynamodb = boto3.resource('dynamodb')
localtimezone = pytz.timezone('Asia/Seoul')
today_str = datetime.now(localtimezone).strftime('%Y-%m-%d')
table = dynamodb.Table('ailogTable5')
response = table.put_item(
Item={
'comment': comment,
'url': url,
'date': today_str
}
)
return response
class MyURLExtractor(URLExtract):
cache_dir = None
def set_cache_dir(self, dir):
self.cache_dir = dir
def _get_writable_cache_dir(self):
if self.cache_dir:
return self.cache_dir
else:
return "/tmp"
def put_db(text):
extractor = MyURLExtractor(cache_dns=False, cache_dir='/tmp/')
urls = extractor.find_urls(text)
if len(urls) == 0:
return "Give me 'url and memo` to record."
# Remove URL from the text and make it to comment
comment = text
for url in urls:
comment = comment.replace(url, "")
res_text = ""
for url in urls:
valid=validators.url(url)
if valid==False or not url.startswith('http'):
res_text += 'Invalid url: ' + url + '\n'
continue
res = put_ailog(url, comment)
res_text += 'Cool: ' + str(res['ResponseMetadata']['HTTPStatusCode']) + ' ' + url
return res_text
if __name__ == '__main__':
res = put_db("paperm htt?//www is very good")
print(res)
res = put_db("paperm okoko is very good")
print(res)
res = put_db("https://www.youtube.com/watch?v=1VdEw_mGjFk 이 비디오 너무 짱!")
print(res)
res = put_db("오오오 https://www.youtube.com/watch?v=1VdEw_mGjFk 이 비디오 너무 짱!")
print(res)
|
[
"Jay_Kang@intuit.com"
] |
Jay_Kang@intuit.com
|
62ac59df0d732f96e5867b9da792da1896a945df
|
1259ee2a27cbb2d7de3e034159957d6043161add
|
/tests/test_catalogs.py
|
4bfbb18a3884cb07012a351cdffe073ad7bc917b
|
[
"MIT",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
balabit-deps/balabit-os-7-sphinx
|
f7b0ad4967418f074e8876cd8c7f4a7f5cfbe5d3
|
4e18ca37f4ddddf346c0b30835a544db20887259
|
refs/heads/master
| 2020-04-07T09:14:11.757278
| 2018-04-11T21:10:19
| 2018-07-20T22:59:13
| 158,244,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
# -*- coding: utf-8 -*-
"""
test_build_base
~~~~~~~~~~~~~~~
Test the base build process.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import shutil
import pytest
from sphinx.testing.util import find_files
@pytest.fixture
def setup_test(app_params):
srcdir = app_params.kwargs['srcdir']
locale_dir = srcdir / 'locale'
# copy all catalogs into locale layout directory
for po in find_files(srcdir, '.po'):
copy_po = (locale_dir / 'en' / 'LC_MESSAGES' / po)
if not copy_po.parent.exists():
copy_po.parent.makedirs()
shutil.copy(srcdir / po, copy_po)
yield
# delete remnants left over after failed build
locale_dir.rmtree(True)
(srcdir / '_build').rmtree(True)
@pytest.mark.usefixtures('setup_test')
@pytest.mark.test_params(shared_result='test-catalogs')
@pytest.mark.sphinx(
'html', testroot='intl',
confoverrides={'language': 'en', 'locale_dirs': ['./locale']})
def test_compile_all_catalogs(app, status, warning):
app.builder.compile_all_catalogs()
locale_dir = app.srcdir / 'locale'
catalog_dir = locale_dir / app.config.language / 'LC_MESSAGES'
expect = set([
x.replace('.po', '.mo')
for x in find_files(catalog_dir, '.po')
])
actual = set(find_files(catalog_dir, '.mo'))
assert actual # not empty
assert actual == expect
@pytest.mark.usefixtures('setup_test')
@pytest.mark.test_params(shared_result='test-catalogs')
@pytest.mark.sphinx(
'html', testroot='intl',
confoverrides={'language': 'en', 'locale_dirs': ['./locale']})
def test_compile_specific_catalogs(app, status, warning):
locale_dir = app.srcdir / 'locale'
catalog_dir = locale_dir / app.config.language / 'LC_MESSAGES'
def get_actual():
return set(find_files(catalog_dir, '.mo'))
actual_on_boot = get_actual() # sphinx.mo might be included
app.builder.compile_specific_catalogs(['admonitions'])
actual = get_actual() - actual_on_boot
assert actual == set(['admonitions.mo'])
@pytest.mark.usefixtures('setup_test')
@pytest.mark.test_params(shared_result='test-catalogs')
@pytest.mark.sphinx(
'html', testroot='intl',
confoverrides={'language': 'en', 'locale_dirs': ['./locale']})
def test_compile_update_catalogs(app, status, warning):
app.builder.compile_update_catalogs()
locale_dir = app.srcdir / 'locale'
catalog_dir = locale_dir / app.config.language / 'LC_MESSAGES'
expect = set([
x.replace('.po', '.mo')
for x in find_files(catalog_dir, '.po')
])
actual = set(find_files(catalog_dir, '.mo'))
assert actual # not empty
assert actual == expect
|
[
"testbot@balabit.com"
] |
testbot@balabit.com
|
9396ffff142006a7aa4a7dc55bd6f269ec1a4f85
|
abf3e150a8e8d9db5b95c1d6a8cf2f287ce25507
|
/涨幅最高分级B.py
|
d16e6450620c54119bf65675b8591fe7178e59b3
|
[] |
no_license
|
littlestone/Hamlet
|
4e8a27bdd961867aec081004eb3b3d849a8efb3a
|
9bb57973de7a99f9d3489662f9930495fad90c4f
|
refs/heads/master
| 2023-03-15T02:31:30.485221
| 2017-10-05T14:27:42
| 2017-10-05T14:27:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,179
|
py
|
def initialize(context):
# 使用真实价格回测
set_option('use_real_price', True)
#设置滑点
set_slippage(PriceRelatedSlippage(0.01))
# 交易总次数(全部卖出时算一次交易)
g.trade_total_count = 0
# 交易成功次数(卖出时盈利即算一次成功)
g.trade_success_count = 0
# 统计数组(win存盈利的,loss 存亏损的)
g.statis = {'win': [], 'loss': []}
run_daily(watch,'9:34')
run_daily(change, '9:35')
log.info('initialize')
def watch(context):
log.info('watching...')
log.info(context.current_dt)
stocks = list(get_all_securities(['fjb']).index)
df1=get_price(stocks, count=4, frequency='1m',end_date=context.current_dt,fields=['close'])
#df=history(4, unit='1m', field='close', security_list=stocks)
#df=(df.iloc[0]-df.iloc[-1])/df.iloc[0]
df1=(df1['close'].iloc[0]-df1['close'].iloc[-1])/df1['close'].iloc[0]
#df= df.dropna().order(ascending=False)
df1= df1.dropna().order(ascending=False)
#df=df.head(3)
df1=df1.head(3)
print df1
holds=list(df1.index)
log.info(holds)
have_set = set(context.portfolio.positions.keys())
hold_set = set(holds)
g.to_buy = hold_set - have_set
g.to_sell = have_set - hold_set
#发送持仓信息
# message1='准备卖出:%s'%(g.to_sell)
# send_message(message1, channel='weixin')
# print message1
# message2='准备买入:%s'%(g.to_buy)
# send_message(message2, channel='weixin')
# print message2
def change(context):
print '--------------------------------------------------------'
stocks = list(get_all_securities(['fjb']).index)
df=history(4, unit='1m', field='close', security_list=stocks)
df=(df.iloc[0]-df.iloc[-1])/df.iloc[0]
df= df.dropna().order(ascending=False)
df=df.head(3)
print df
log.info('changing...')
log.info(context.current_dt)
for stock in g.to_sell:
sell_amount(context,stock,0)
print 'sell:%s'%stock
if len(g.to_buy) == 0:
return
each = context.portfolio.cash/len(g.to_buy)
for stock in g.to_buy:
#price=get_price(stock, count=1, frequency='1m',end_date=context.current_dt, fields='close')['close']
price=round(float(history(1, unit='1m', field='close', security_list=stock)[stock]),3)
try:
volume = int(each/price/100*0.998) * 100
except:
volume=0
if volume > 0:
buy_amount(stock,volume)
print 'buy:%s'%stock
# 每个单位时间(如果按天回测,则每天调用一次,如果按分钟,则每分钟调用一次)调用一次
def handle_data(context, data):
pass
# 买入指定数量股票
def buy_amount(stock, amount):
if 100 <= amount:
order(stock, +amount)
# 卖出指定数量股票,若amount为0则表示清空该股票的所有持仓
def sell_amount(context, stock, amount):
if 0 == amount:
record_trade_count(context, stock)
__amount = context.portfolio.positions[stock].sellable_amount
order_target_value(stock, 0)
else:
order(stock, -amount)
# 记录交易次数便于统计胜率
def record_trade_count(context, stock):
g.trade_total_count += 1
amount = context.portfolio.positions[stock].total_amount
avg_cost = context.portfolio.positions[stock].avg_cost
price = context.portfolio.positions[stock].last_sale_price
current_value = amount * price
cost = amount * avg_cost
percent = round((current_value - cost) / cost * 100, 2)
if current_value > cost:
g.trade_success_count += 1
win = [stock, percent]
g.statis['win'].append(win)
else:
loss = [stock, percent]
g.statis['loss'].append(loss)
# 打印胜率
def print_win_rate(current_date, print_date, context):
print current_date,print_date
if str(current_date) == str(print_date):
win_rate = 0
if 0 < g.trade_total_count and 0 < g.trade_success_count:
win_rate = round(g.trade_success_count / float(g.trade_total_count), 2)
most_win = statis_most_win_percent()
most_loss = statis_most_loss_percent()
starting_cash = context.portfolio.starting_cash
total_profit = statis_total_profit(context)
if len(most_win)!=0 and len(most_loss)!=0:
print "-"
print '------------绩效报表------------'
print '交易次数: {0}, 盈利次数: {1}, 胜率: {2}'.format(g.trade_total_count, g.trade_success_count, str(win_rate * 100) + str('%'))
print '单次盈利最高: {0}, 盈利比例: {1}%'.format(most_win['stock'], most_win['value'])
print '单次亏损最高: {0}, 亏损比例: {1}%'.format(most_loss['stock'], most_loss['value'])
print '总资产: {0}, 本金: {1}, 盈利: {2}'.format(starting_cash + total_profit, starting_cash, total_profit)
print '--------------------------------'
print "-"
else:
print len(most_win),len(most_loss)
# 统计单次盈利最高的股票
def statis_most_win_percent():
result = {}
for statis in g.statis['win']:
if {} == result:
result['stock'] = statis[0]
result['value'] = statis[1]
else:
if statis[1] > result['value']:
result['stock'] = statis[0]
result['value'] = statis[1]
return result
# 统计单次亏损最高的股票
def statis_most_loss_percent():
result = {}
for statis in g.statis['loss']:
if {} == result:
result['stock'] = statis[0]
result['value'] = statis[1]
else:
if statis[1] < result['value']:
result['stock'] = statis[0]
result['value'] = statis[1]
return result
# 统计总盈利金额
def statis_total_profit(context):
return context.portfolio.portfolio_value - context.portfolio.starting_cash
|
[
"noreply@github.com"
] |
littlestone.noreply@github.com
|
2e0a163249d5fd8602d58c558cc6305977f5c5f5
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/SmartSketch_ID1046_for_PyTorch/backend/data/cityscapes_dataset.py
|
7babf09c7a27ed97b81e5970dc59ada2505f910b
|
[
"GPL-3.0-only",
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-4.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"CC-BY-4.0"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,604
|
py
|
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
"""
import os.path
from data.pix2pix_dataset import Pix2pixDataset
from data.image_folder import make_dataset
class CityscapesDataset(Pix2pixDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='fixed')
parser.set_defaults(load_size=512)
parser.set_defaults(crop_size=512)
parser.set_defaults(display_winsize=512)
parser.set_defaults(label_nc=35)
parser.set_defaults(aspect_ratio=2.0)
parser.set_defaults(batchSize=16)
opt, _ = parser.parse_known_args()
if hasattr(opt, 'num_upsampling_layers'):
parser.set_defaults(num_upsampling_layers='more')
return parser
def get_paths(self, opt):
root = opt.dataroot
phase = 'val' if opt.phase == 'test' else 'train'
label_dir = os.path.join(root, 'gtFine', phase)
label_paths_all = make_dataset(label_dir, recursive=True)
label_paths = [p for p in label_paths_all if p.endswith('_labelIds.png')]
image_dir = os.path.join(root, 'leftImg8bit', phase)
image_paths = make_dataset(image_dir, recursive=True)
if not opt.no_instance:
instance_paths = [p for p in label_paths_all if p.endswith('_instanceIds.png')]
else:
instance_paths = []
return label_paths, image_paths, instance_paths
def paths_match(self, path1, path2):
name1 = os.path.basename(path1)
name2 = os.path.basename(path2)
# compare the first 3 components, [city]_[id1]_[id2]
return '_'.join(name1.split('_')[:3]) == \
'_'.join(name2.split('_')[:3])
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
9f39cbc22d17ed80320c3d912128d2c052b91042
|
259cc44b3f829ccb0d534534bcf51b5b47014dbc
|
/app.py
|
0008aeabc5ba08396d9dfe1da650a78653425e53
|
[] |
no_license
|
Axorimaster/prueba_flask
|
bb60fb51ec4673eed6f5ee3895b365c794ec54e0
|
de1ab9981832e96124b738f9f8beecffbb41dd6a
|
refs/heads/main
| 2023-01-09T03:58:16.828968
| 2020-10-30T17:15:58
| 2020-10-30T17:15:58
| 308,175,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
from flask import Flask, render_template, request, redirect, url_for, flash
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'contactosdb'
mysql = MySQL(app)
app.secret_key = 'mysecretkey'
@app.route('/')
def index():
cur = mysql.connection.cursor()
cur.execute('select * from contactos')
data = cur.fetchall()
return render_template('index.html', contactos = data)
@app.route('/add_contact', methods = ['POST'])
def add_contact():
if request.method == 'POST':
nom = request.form['nombres']
tel = request.form['telefono']
email = request.form['email']
print('INSERT', id, nom, tel, email)
cur = mysql.connection.cursor()
cur.execute('insert into contactos(nombres,telefono,email) values(%s,%s,%s)', (nom,tel,email))
mysql.connection.commit()
flash('Contacto insertado correctamente')
return (redirect(url_for('index')))
@app.route('/edit/<id>')
def edit_contact(id):
cur = mysql.connection.cursor()
cur.execute('select * from contactos where id =%s', {id})
data = cur.fetchall()
print(data[0])
return(render_template('edit.html', contacto=data[0]))
@app.route('/delete/<string:id>')
def delete_contact(id):
cur = mysql.connection.cursor()
cur.execute('delete from contactos where id =%s', {id})
mysql.connection.commit()
flash('Contacto eliminado correctamente')
return (redirect(url_for('index')))
@app.route('/update/<id>', methods=['POST'])
def update_contact(id):
if request.method == "POST":
nom = request.form['nombres']
tel = request.form['telefono']
email = request.form['email']
print('Update', id, nom, tel, email)
cur = mysql.connection.cursor()
cur.execute("""
update contactos
set nombres = %s,
telefono = %s,
email = %s
where id = %s
""", (nom,tel,email,id))
mysql.connection.commit()
flash('Contacto actualizado correctamente')
return(redirect(url_for('index')))
if __name__ == '__main__':
app.run(port=8000, debug = True)
|
[
"rodrigo.zambrano@utec.edu.pe"
] |
rodrigo.zambrano@utec.edu.pe
|
b2f7243d37ac49ca0cdd95e3ed979d074a8dc30b
|
74460e5a294d0d5f7c1204ba7e99855cea49d762
|
/ex19.py
|
622c4b419ff682aec1341b305eb35d8a17752e4e
|
[] |
no_license
|
HungryEagle/zed_shaw_learn_python_the_hard_way
|
4809ef781728efa4265f1641d49ec39fe83eaeff
|
992e2831a4a88dd57d5391ce23eacb00d4c6af3c
|
refs/heads/master
| 2023-01-11T16:07:09.306802
| 2020-11-15T10:45:37
| 2020-11-15T10:45:37
| 307,918,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
def something(cheese_count,boxes_of_crackers):
print "You have %d cheesess!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
something(20,30)
print "OR, we can used variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
something(amount_of_cheese,amount_of_crackers)
print "We can even use variables from our script:"
something(10+20,5+6)
print "And we can combine the two, variables and math:"
something(amount_of_cheese + 100, amount_of_crackers + 1000)
|
[
"iyengarvenugopalan@gmail.com"
] |
iyengarvenugopalan@gmail.com
|
a23f829e5df7dfc851e5e4b5b0c08838b6288584
|
b27714b21d6282a397fd8a9e832f33739e349648
|
/news/views.py
|
d0b701b3c2699fc92502aadc4605a174be67fb9f
|
[] |
no_license
|
AzatZinatullin/Study-web-project
|
f1a28fc87a8baf3183e2543965ea7a972c4a0128
|
3431e3e4c79890068268552ac6f92e6b945cbba5
|
refs/heads/master
| 2020-04-10T22:45:09.658116
| 2019-06-28T18:21:18
| 2019-06-28T18:21:18
| 161,331,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.dates import ArchiveIndexView
from django.views.generic.detail import DetailView
from news.models import New
from generic.mixins import CategoryListMixin, PageNumberMixin
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from generic.controllers import PageNumberView
from django.core.mail import send_mass_mail
from contacts.models import MailList
from broomtrade import settings
class NewsListView(ArchiveIndexView, CategoryListMixin):
model = New
date_field = "posted"
template_name = "news_index.html"
paginate_by = 10
allow_empty = True
allow_future = True
fields = '__all__'
class NewDetailView(DetailView, PageNumberMixin):
model = New
template_name = "new.html"
fields = '__all__'
class NewCreate(SuccessMessageMixin, CreateView, CategoryListMixin):
model = New
template_name = "new_add.html"
success_url = reverse_lazy("news_index")
success_message = "Новость успешно создана"
fields = '__all__'
def form_valid(self, form):
output = super(NewCreate, self).form_valid(form)
if MailList.objects.exists():
s = "На сайте 'Веник-Торг' появилась новость:\n\n" +\
form.instance.title + "\n\n" + form.instance.description +\
"\n\nhttp://localhost:8000" +\
reverse("news_detail", kwargs={"pk" : form.instance.pk})
letters = []
for maillist_item in MailList.objects.all():
letters = letters + [("Уведомление с сайте 'Веник-Торг'",\
"Здравствуйте, " + maillist_item.username + "!\n\n" + s,\
settings.DEFAULT_FROM_EMAIL, [maillist_item.email])]
send_mass_mail(letters, fail_silently=True)
return output
class NewUpdate(SuccessMessageMixin, PageNumberView, UpdateView, PageNumberMixin):
model = New
template_name = "new_edit.html"
success_url = reverse_lazy("news_index")
success_message = "Новость успешно изменена"
fields = '__all__'
class NewDelete(PageNumberView, DeleteView, PageNumberMixin):
model = New
template_name = "new_delete.html"
success_url = reverse_lazy("news_index")
fields = '__all__'
def post(self, request, *args, **kwargs):
messages.add_message(request, messages.SUCCESS, "Новость успешно удалена")
return super(NewDelete, self).post(request, *args, **kwargs)
class RssNewsListFeed(Feed):
title = "Новости сайта фирмы Веник-Торг"
description = title
link = reverse_lazy("news_index")
def items(self):
return New.objects.all()[0:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
def item_pubdate(self, item):
return item.posted
def item_link(self, item):
return reverse("news_detail", kwargs = {"pk": item.pk})
class AtomNewsListFeed(RssNewsListFeed):
feed_type = Atom1Feed
subtitle = RssNewsListFeed.description
|
[
"zinatullin.azat@mail.ru"
] |
zinatullin.azat@mail.ru
|
ec70d0194966dbf2f70639e867ec153839fe63fd
|
0245aead667916a7abc63b9fc109633cdb77b891
|
/educative_grokking_coding_interview_patterns/14_dynamic_programming/05_word_break_ii.py
|
2ed25573f5a59333fed9bd8c0d72adf67be18f06
|
[] |
no_license
|
chlos/exercises_in_futility
|
b6850c456d48e363dfdd3d47ee487ca843113a7d
|
5c2473f859da5efec73120256faad06ab8e0e359
|
refs/heads/master
| 2023-08-04T09:58:09.584146
| 2023-07-31T16:44:42
| 2023-07-31T16:44:42
| 219,707,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
class Solution:
# dfs / top-down
# see the: https://leetcode.com/problems/word-break-ii/solutions/44311/python-easy-to-understand-solution/comments/881748
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
# a bit faster in 'word in wordDict' checks
# wordDict = set(wordDict)
wordDict = {w: True for w in wordDict}
result = []
def dfs(start, path):
if start >= len(s):
result.append(' '.join(path))
for end in range(start + 1, len(s) + 1):
curr_word = s[start:end]
if curr_word in wordDict:
dfs(end, path + [curr_word])
dfs(0, [])
return result
|
[
"baphonet@gmail.com"
] |
baphonet@gmail.com
|
0b77d281cdd7a2fd494ca4ed716f2a5df8dc98aa
|
775378afa2eff7b7b539aa00ad373938cbc651de
|
/train.py
|
4b25f3c35143edccff0c9dabd8b760138655a31c
|
[
"MIT"
] |
permissive
|
RuHungLee/2019_Deep_Learning_For_calligraphy
|
ab259fc46475337108f3fc26b120d12dcaba6fe9
|
820a560c63663c8280621af50baf397a5d4b501a
|
refs/heads/master
| 2020-07-29T00:10:46.983551
| 2019-10-24T06:38:13
| 2019-10-24T06:38:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,683
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
#import save_gif
from PIL import Image
from models import Model, Model_v2, Model_v3
from visdom import Visdom
from dataloader.dataloader import Loader
def train():
device = torch.device('cuda:0')
model = Model_v3().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
visdom_server = Visdom(port=3387)
train_set = Loader(mode = 'train')
val_set = Loader(mode = 'val')
show_set = Loader(mode = 'show')
val_show_set = Loader(mode = 'val_show')
data_loader = torch.utils.data.DataLoader(train_set , batch_size = 1 , shuffle = False , num_workers = 1)
val_data_loader = torch.utils.data.DataLoader(val_set , batch_size = 1 , shuffle = False , num_workers = 1)
show_loader = torch.utils.data.DataLoader(show_set , batch_size = 1 , shuffle = False , num_workers = 1)
val_show_loader = torch.utils.data.DataLoader(val_show_set , batch_size = 1 , shuffle = False , num_workers = 1)
print('The number of train dataloader:' , len(data_loader))
print('The number of val dataloader:' , len(val_data_loader))
print('The number of gif for training every 500 epochs' , len(show_loader))
print('The number of gif for validation every 500 epochs:' , len(val_show_loader))
for epoch in range(5000):
#====================training
total_loss = 0
for i , (image , target) in enumerate(data_loader):
image = image.to(device)
target = target.to(device)
predict, loss = model(image, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_loss /= len(data_loader)
if total_loss<=5000:
visdom_server.line([total_loss], [epoch], win='loss', env='version2', update='append')
else:
visdom_server.line([5000], [epoch], win='loss', env='version2', update='append')
#=====================validation
total_loss = 0
for i , (image , target) in enumerate(val_data_loader):
image = image.to(device)
target = target.to(device)
predict , loss = model(image, target)
total_loss += loss.item()
total_loss /= len(val_data_loader)
if total_loss<=5000:
visdom_server.line([total_loss], [epoch], win='val', env='version2', update='append')
else:
visdom_server.line([5000], [epoch], win='val', env='version2', update='append')
#======================save model and gif for training and validation
'''
if epoch%500==0:
path = f'./pretrained/{epoch}_pretrained.pth'
torch.save(model.state_dict() , path)
for idx , (image , target) in enumerate(show_loader):
image = image.to(device)
target = target.to(device)
predict , _ = model(image , target)
predict = torch.squeeze(predict)
predict = predict.cpu().detach().numpy()
save_gif(predict , epoch , idx , mode = 'train')
for idx , (image , target) in enumerate(val_show_loader):
image = image.to(device)
target = target.to(device)
predict , _ = model(image , target)
predict = torch.squeeze(predict)
predict = predict.cpu().detach().numpy()
save_gif(predict , epoch , idx , mode = 'val')
'''
if __name__ == '__main__':
train()
|
[
"q1a2z3nmnm1010@gmail.com"
] |
q1a2z3nmnm1010@gmail.com
|
ad8d1374dc143b2f0d290231c845d35fe23b1d82
|
bb87579e47fc04b299694b8a8fe318f022f54ee8
|
/Practice Python/Exercise_06.py
|
07e6b0f3f57b4197d3310985c9a563d48f0a03b6
|
[] |
no_license
|
QaisZainon/Learning-Coding
|
7bbc45197085dfa8f41ac298d26cf54e99e7b877
|
a3991842e79c30f24d7bc0cca77dbd09bc03372f
|
refs/heads/master
| 2022-12-23T05:47:26.512814
| 2020-09-25T08:10:20
| 2020-09-25T08:10:20
| 297,945,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
'''
Ask the users for a string and print out whether it is a palindrome or not
'''
def palindrome_checker():
string = input('Check if a sentence is a palindrome\n')
string2 = string[len(string) :-len(string)-1 :-1]
if string == string2:
print('This is a palindrome')
else:
print('That aint no palindrome')
palindrome_checker()
|
[
"noreply@github.com"
] |
QaisZainon.noreply@github.com
|
93f586b8b57676bdca5f28196965d63cdb3d646b
|
88f17875a2886c56ec91bc517f49087f52ebe0eb
|
/examples/butatriene/nokrr/diabatize.py
|
92080a0ae83dc3a9f676ef5b9904eba38d6abdeb
|
[
"MIT"
] |
permissive
|
addschile/propdb
|
403566cf8c9b4e66e6aa3249b63c3fe08795c7e6
|
1062c1d371b44c62d58da47cffff294f5f957860
|
refs/heads/main
| 2023-06-19T21:33:22.986904
| 2021-07-13T21:20:58
| 2021-07-13T21:20:58
| 381,525,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
import numpy as np
from propdb import diabatize
if __name__ == "__main__":
nel = 2
nmodes = 2
ngeoms = 813
# get adiabatic energies
eads = np.zeros((ngeoms,2))
f = open('adpes.dba','r')
for i in range(ngeoms):
f.readline() # record no
es = f.readline().split()
eads[i,0] = float(es[0])
eads[i,1] = float(es[1])
f.close()
# get nonadiabatic couplings
f = open('nact.dba','r')
Fs = np.zeros(ngeoms, dtype=np.ndarray)
for i in range(ngeoms):
F = np.zeros((8,3))
f.readline()
for j in range(8):
line = f.readline().split()
F[j,0] = float(line[0])
F[j,1] = float(line[1])
F[j,2] = float(line[2])
Fs[i] = F/(eads[i,1]-eads[i,0])
f.close()
# get geometries
f = open('geo.dba','r')
geoms = np.zeros(ngeoms, dtype=np.ndarray)
for i in range(ngeoms):
geom = np.zeros((8,3))
f.readline()
for j in range(8):
line = f.readline().split()
geom[j,0] = float(line[0])
geom[j,1] = float(line[1])
geom[j,2] = float(line[2])
geoms[i] = geom
f.close()
diabatize(nel,ngeoms,geoms,eads,Fs)
|
[
"addison@ummac.local"
] |
addison@ummac.local
|
30faf0d47ba0f57bc9f9df27b0db6ec1633d5a42
|
9b75094dd493c362a7bf488f8b1031e5d9235a13
|
/oldProgs/Test1.2.py
|
7a5f708cab93c5d8993ecf418d6a20c3e166284f
|
[] |
no_license
|
cmibarnwell/python
|
40330ce6c0d5ae1cd3047003ae75fa238d5598e9
|
4ddbbe7677696d737436a87cbd9ba2de58d0f242
|
refs/heads/master
| 2021-01-11T17:07:37.671241
| 2017-01-22T15:55:58
| 2017-01-22T15:55:58
| 79,726,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
def volume(l, w, h):
return l*w*h
def main():
l=input("What is the length of the rectangle? ")
h=input("What is the height of the rectangle? ")
w=input("What is the width of the rectangle? ")
l_int=int(l)
h_int=int(h)
w_int=int(w)
print(volume(l_int,w_int,h_int))
main()
|
[
"calebbarnwell@gmail.com"
] |
calebbarnwell@gmail.com
|
fab9c995fa494e4fa2e1d0fa45978e30482968af
|
48da2708fb52948c21cc513bdd323511602c9cbd
|
/random_seq_engine.py
|
f9e00088775e187dfbfae21ae1ce4618658ae8a2
|
[
"MIT"
] |
permissive
|
a1exwang/qtrading-algorithms
|
d99a1a75fe07eb4f2177d3924d25f8e68a3f48dd
|
f5c9a0a2aa28fa0ec49fbbc8d1bfda13b472df97
|
refs/heads/master
| 2020-07-22T16:57:06.380011
| 2019-09-09T08:58:00
| 2019-09-09T08:58:00
| 207,266,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,398
|
py
|
import numpy as np
import math
import matplotlib.pyplot as plt
class MyOperator:
def __init__(self):
self.init_price = 0
self.expected_return_rate = 0.8
self.max_return_rate = 0.8
self.max_last_prices = 100
self.last_prices = []
self.sell_percent = 0.5
self.buy_percent = 0.2
self.min_trade_period = 10
self.last_trade_time = -self.min_trade_period
def sell(self, t, shares):
self.last_trade_time = t
return -shares * self.sell_percent
def buy_in_cash(self, t, cash, price):
self.last_trade_time = t
print(cash)
return math.floor(cash / price) * self.buy_percent
def __call__(self, t, price, shares, cash, service_charge_rate):
self.last_prices.append(price)
if len(self.last_prices) > self.max_last_prices:
self.last_prices = self.last_prices[1:]
if t - self.last_trade_time >= self.min_trade_period:
if shares > 100:
if price < sum(self.last_prices) / len(self.last_prices) * 0.95:
return self.sell(t, shares)
if cash > 100:
if price < sum(self.last_prices) / len(self.last_prices) * 1.3:
return self.buy_in_cash(t, cash, price)
return 0
def simulate(init_price, init_cash, deltas, operator):
current_price = init_price
current_shares = (init_cash / 2) / current_price
current_cash = init_cash / 2
total_assets = []
prices = []
total_trade_values = []
total_cash = []
service_charge_rate = 0.001
for t, d in enumerate(deltas):
# > 0, buy x shares
# < 0, sell x shares
traded_shares = operator(t, current_price, current_shares, current_cash, service_charge_rate)
current_shares += traded_shares
current_cash -= traded_shares * current_price
service_charge = abs(traded_shares) * current_price * service_charge_rate
current_cash -= service_charge
total_assets.append(current_cash + current_shares * current_price)
prices.append(current_price)
total_trade_values.append(traded_shares * current_price)
total_cash.append(current_cash)
current_price = current_price * (1+d)
return np.array(total_assets), np.array(prices), total_trade_values, np.array(total_cash)
def run(your_operator, name):
deltas = np.concatenate((
np.random.uniform(-0.09, 0.11, 100),
np.random.uniform(-0.11, 0.09, 100),
np.random.uniform(-0.09, 0.10, 100),
np.random.uniform(-0.10, 0.09, 100),
np.random.uniform(-0.10, 0.10, 100),
))
init_price = 10.0
principle = 10000
total_assets, total_prices, total_trade_values, total_cash = simulate(init_price, principle, deltas, MyOperator())
total_assets2, _, total_trade_values2, total_cash2 = simulate(init_price, principle, deltas, your_operator)
plt.subplot('211')
plt.plot(total_assets, label='Asset(%s)' % 'trend')
plt.plot(total_assets2, label='Asset(%s)' % name)
plt.plot(total_prices/init_price * principle, label='Price')
plt.legend()
plt.subplot('212')
plt.plot(total_trade_values, label='Traded(%s)' % 'Trend')
plt.plot(total_trade_values2, label='Traded2(%s)' % name)
plt.plot(total_cash, label='Cash')
plt.legend()
plt.show()
|
[
"ice_b0und@hotmail.com"
] |
ice_b0und@hotmail.com
|
16c9c2f253d3c693699b34d7a7058b078c5fedbd
|
5e94d48dfefde297b538afba5f551a1140d06806
|
/gibbs-sampler.py
|
b7a9bf5e5f8142d32aa63aaced6b6319c7c68abd
|
[] |
no_license
|
audreyferry/gibbs
|
c3ecf3d7740170fe3d8b9ce8f7a351546b28b333
|
be0d8471a34c882a2c0af263ce604c0059a39fe8
|
refs/heads/master
| 2016-08-12T02:58:11.120508
| 2016-04-16T02:21:21
| 2016-04-16T02:21:21
| 49,163,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88,457
|
py
|
#!/usr/bin/env python3
# LoopNumberAtWhichWeStartTracking = 20 # Will need this later in development
import sys
import os
import random
import math
import json
import jsonpickle
import time
import datetime
import copy
#import numpy # TODAY ONLY
g_encoding = "asci" # "utf8"
# PARAMETERS # probably want shorter segments initially (so BREAKPROB higher than 0.1)
BitsPerLetter = 5
BREAKPROB = 0.3 #0.3 # 0.5 #0.4 #0.3 #0.2 # 0.1 # where does this probability come from? is it a statistic about languages in general/English?
DEFAULTCOUNT = 1 # 0.5 # Used in divide_charges_among_instances() and in get_plog()
PLOGCOEFF = 3 # 3 # used in get_plog_charge()
PENALTYFACTOR = 1.5 # extra factor in get_plog_charge() for "new" segment (not in dictionary) 1.0 1.5 2.0 1.25 1.3
REBASE_PERIOD = 10 # number of iterations between calls to rebase() # standard setting = 10
FLOAT_INF = float("inf")
NumberOfIterations = 15 # 160 # 200 # 400
ResumeLoopno = 0 # Note - may want to (set a flag and) give a file to load, then get the ResumeLoop from the file
print("\nNumber of iterations =", NumberOfIterations)
if ResumeLoopno > 0:
print("Resume processing starting at loopno =", ResumeLoopno)
SaveState = False # True
## ---------------------------------------------------------------------------------------##
class Segment: # think <morpheme> for morphology, <word-type> or dictionary entry for wordbreaking
## ---------------------------------------------------------------------------------------##
def __init__(self, segment_text):
self.segment_text = segment_text
self.count = 0
self.phonocost = len(segment_text) * float(BitsPerLetter)
self.ordercost = math.log (math.factorial(len(segment_text)), 2)
#<self.ordercost = 0.0> # <produces interesting results>
self.inclusioncost = 1.0
self.phonocost_portion = 0.0 # phonocost / count
self.ordercost_portion = 0.0 # etc.
self.inclusioncost_portion = 0.0
self.sum_dictcosts_portion = 0.0 # (phonocost + ordercost + inclusioncost) / count
self.plog = 0.0 # CAUTION: plog depends on total_pieces_count.
# self.plog is not updated except when this segment is involved in a parsing decision.
# Use self.get_plog(totalsegmentcount) for correct value.
# SEE ALSO document.fetch_plogged_segment_from_dictionary
def divide_charges_among_instances(self):
if self.count != 0:
divisor = self.count
else:
divisor = DEFAULTCOUNT
self.phonocost_portion = self.phonocost/divisor # Note that phonocost is float; also '/' is true division in python3
self.ordercost_portion = self.ordercost/divisor
self.inclusioncost_portion = self.inclusioncost/divisor
self.sum_dictcosts_portion = self.phonocost_portion + self.ordercost_portion + self.inclusioncost_portion
def get_plog(self, totalsegmentcount):
if self.count >= 1:
return math.log( (totalsegmentcount / float(self.count)), 2 )
else:
return math.log( (totalsegmentcount / float(DEFAULTCOUNT)), 2 )
# def get_plog_charge(self, totalsegmentcount):
# return PLOGCOEFF * self.get_plog(totalsegmentcount)
def get_plog_charge(self, totalsegmentcount):
if self.count != 0:
return PLOGCOEFF * self.get_plog(totalsegmentcount)
else:
return PENALTYFACTOR * PLOGCOEFF * self.get_plog(totalsegmentcount)
def get_instance_cost(self, totalsegmentcount):
return self.get_plog_charge(totalsegmentcount) + self.sum_dictcosts_portion
## ---------------------------------------------------------------------------------------##
class Line: # a bounded expression <word in dx1 file> <line in corpus>
## ---------------------------------------------------------------------------------------##
def __init__(self, unbroken_text):
self.unbroken_text = unbroken_text # (former self.word)
self.breaks = []
self.pieces = [] # list of strings <morphs> <words> NOT segment objects
self.piecesorder_cost = 0.0
self.total_cost = 0.0 # Since only local information is needed for parsing decisions,
# total_cost for the line and the lists below are not maintained at intermediate stages.
# Use the document function compute_brokenline_cost() (former EvaluateWordParse)
# to obtain line cost information.
# Use the document function populate_line_displaylists() to fill the lists below
# in order to display cost details by segment and cost component.
self.count_list = [] # List of segment counts, in proper order.
self.phonocost_portion_list = [] # List per segment of phonocost_portion, in proper order. Similarly for other list variables.
self.ordercost_portion_list = [] # The lists are used to arrange segment information attractively for display.
self.inclusioncost_portion_list = [] # Are they useful to retain? Should they be in a separate Display class?
self.plog_list = []
self.subtotal_list = [] # list per segment of following quantity:
# ordercost_portion + phonocost_portion + inclusioncost_portion + plog
self.true_text = []
self.true_breaks = []
def getpiece(self, pieceno):
return self.unbroken_text[self.breaks[pieceno-1]:self.breaks[pieceno]] # note that getpiece(k) returns pieces[k-1]
# for example, getpiece(1) returns pieces[0]
# EXAMPLE FOR NEXT TWO FUNCTIONS
# line.unbroken_text = abcdefghij
# line.breaks = [0, 2, 5, 7, 10]
# line.pieces = [ab, cde, fg, hij]
def populate_pieces_from_breaks(self):
#self.pieces = []
#for n in range(len(self.breaks)-1):
# self.pieces.append(self.unbroken_text[self.breaks[n]:self.breaks[n+1]])
self.pieces = []
start = 0
for brk in self.breaks[1:]:
self.pieces.append(self.unbroken_text[start:brk])
start = brk
def populate_breaks_from_pieces(self):
self.breaks = [0]
for piece in self.pieces:
self.breaks.append(self.breaks[-1] + len(piece))
def displaytextonly(self, outfile):
print(self.unbroken_text, file=outfile)
print(" breaks:", self.breaks, file=outfile)
print(" pieces:", end=' ', file=outfile) # FIX SPACING?
#for n in range(1,len(self.breaks)):
# print(self.getpiece(n), "", end=' ', file=outfile)
for piece in self.pieces:
print(piece, "", end=' ', file=outfile)
print(file=outfile)
def display_detail(self, outfile):
FormatString1 = "%20s"
FormatString2 = "%8.1f"
FormatString3 = "%8s"
FormatString4 = "%8d"
print("\n", self.unbroken_text, file=outfile)
print("breaks:", self.breaks, file=outfile)
print(FormatString1 %("pieces:"), end=' ', file=outfile) # FIX SPACING?
#for n in range(1,len(self.breaks)):
# print(FormatString3 %(self.getpiece(n)), end=' ', file=outfile)
for piece in self.pieces:
print(FormatString3 % piece, end=' ', file=outfile)
print(file=outfile)
print(FormatString1 %("count:"), end=' ', file=outfile)
for item in self.count_list:
print(FormatString4 %(item), end=' ', file=outfile)
print(file=outfile)
print(FormatString1 %("plog:"), end=' ', file=outfile)
for item in self.plog_list:
print(FormatString2 %(item), end=' ', file=outfile)
print(file=outfile)
print(FormatString1 %("log |piece|!:"), end=' ', file=outfile)
for item in self.ordercost_portion_list:
print(FormatString2 %(item), end=' ', file=outfile)
print(file=outfile)
print(FormatString1 %("phono info:"), end=' ', file=outfile)
for item in self.phonocost_portion_list:
print(FormatString2 %(item), end=' ', file=outfile)
print(file=outfile)
print(FormatString1 %("inclusion list cost:"), end=' ', file=outfile)
for item in self.inclusioncost_portion_list:
print(FormatString2 %(item), end=' ', file=outfile)
print(file=outfile)
print(FormatString1 %("subtotal:"), end=' ', file=outfile)
for item in self.subtotal_list:
print(FormatString2 %(item), end=' ', file=outfile)
#Total += item # self.total_cost is computed in EvaluateWordParse (including also logfacword)
print(file=outfile)
logfacword = self.piecesorder_cost
print(FormatString1 %("log (num_pieces!):"), end=' ', file=outfile)
print(FormatString2 %( logfacword ), end=' ', file=outfile)
print(file=outfile)
print(FormatString1 %("Total:"), end=' ', file=outfile)
print(FormatString2 %( self.total_cost ), file=outfile)
def displaytoscreen_textonly(self):
print(self.unbroken_text)
print(" breaks:", self.breaks)
print(" pieces:", end=' ') # FIX SPACING?
#for n in range(1,len(self.breaks)):
# print(self.getpiece(n), "", end=' ', file=outfile)
for piece in self.pieces:
print(piece, "", end=' ')
print()
def displaytoscreen_detail(self):
FormatString1 = "%20s"
FormatString2 = "%8.1f"
FormatString3 = "%8s"
FormatString4 = "%8d"
print(self.unbroken_text)
print("breaks", self.breaks)
print(FormatString1 %("pieces:"), end=' ')
#for n in range(1,len(self.breaks)):
# print(FormatString3 %(self.getpiece(n)), end=' ')
for piece in self.pieces:
print(FormatString3 % piece, end=' ')
print()
print(FormatString1 %("count:"), end=' ')
for item in self.count_list:
print(FormatString4 %(item), end=' ')
print()
print(FormatString1 %("plog:"), end=' ')
for item in self.plog_list:
print(FormatString2 %(item), end=' ')
print()
print(FormatString1 %("log |piece|!:"), end=' ')
for item in self.ordercost_portion_list:
print(FormatString2 %(item), end=' ')
print()
print(FormatString1 %("phono info:"), end=' ')
for item in self.phonocost_portion_list:
print(FormatString2 %(item), end=' ')
print()
print(FormatString1 %("inclusion list cost:"), end=' ')
for item in self.inclusioncost_portion_list:
print(FormatString2 %(item), end=' ')
print()
print(FormatString1 %("subtotal:"), end=' ')
for item in self.subtotal_list:
print(FormatString2 %(item), end=' ')
#Total += item
print()
logfacword = self.piecesorder_cost
print(FormatString1 %("log (num_pieces!):"), end=' ')
print(FormatString2 %( logfacword ), end=' ')
print()
print(FormatString1 %("Total:"), end=' ')
print(FormatString2 %( self.total_cost ))
def break_cover(self, point):
# for the first breakpoint that is greater than or equal to the point, return the breakpoint and its index
if point not in range(1, len(self.unbroken_text)):
print("Error in break_cover(): point (=", point, ") must satisfy 0 < point < ", len(self.unbroken_text), "for line = '", self.unbroken_text, "'.")
sys.exit()
for n in range(1, len(self.breaks)): # Note self.breaks[0] = 0.
if point <= self.breaks[n]:
return (self.breaks[n], n)
return (-1, -1) #should never happen!
## ---------------------------------------------------------------------------------------##
class Document: # <dx1 file> <corpus>
## ---------------------------------------------------------------------------------------##
def __init__(self): # this_item ???
self.line_object_list = [] # list of Line objects (former WordObjectList)
self.segment_object_dictionary = {} # dictionary key: piece value: segment object
self.totalsegmentcount = 0
self.merge_count = 0
self.split_count = 0
self.merge_newsegment_count = 0 # these 3 added on Feb. 2, 2016
self.split_1newsegment_count = 0
self.split_2newsegments_count = 0
self.split_merge_history = []
self.break_precision = 0.0 # these 6 added on Feb. 21, 2016
self.break_recall = 0.0
self.token_precision = 0.0
self.token_recall = 0.0
self.dictionary_precision = 0.0
self.dictionary_recall = 0.0
self.addedandtrue_devcount = 0.0 # these 4 are for diagnosing DR (DictionaryRecall); added on Feb. 25, 2016
self.deletedandtrue_devcount = 0.0
self.addedandtrue_dictionary = {} # key is piece; value is the count in the true_segment_dictionaryx
self.deletedandtrue_dictionary = {}
self.overall_cost = 0.0
self.other_statistics = 0.0 # What should be measured?
self.random_state = None # save state of random number generator in this spot
# so that it will be preserved by pickling
self.true_segment_dictionary = {}
self.true_totalsegmentcount = 0
def output_corpuslines_detail(self, outfile, loopno):
print("----------------------------------------\nLoop number:", loopno, file=outfile)
print("----------------------------------------", file=outfile)
for line in self.line_object_list:
self.populate_line_displaylists(line)
line.display_detail(outfile) # displays text followed by line cost, detailed by segment and component
def output_corpuslines_textonly(self, outfile, loopno):
print("----------------------------------------\nLoop number:", loopno, file=outfile)
print("----------------------------------------", file=outfile)
for line in self.line_object_list:
line.displaytextonly(outfile) # displays only unbroken line and its parse
print(" cost: %7.3f\n" % line.total_cost, end=' ', file=outfile)
def output_gibbspieces(self, outfile, loopno):
print("----------------------------------------\nLoop number:", loopno, file=outfile)
print("----------------------------------------", file=outfile)
# Additional information is stored in the segment_object_dictionary,
# but only count will be displayed on the outfile.
reduced_dictionary = {}
for this_piece, this_segment in self.segment_object_dictionary.items():
reduced_dictionary[this_piece] = this_segment.count
#countslist = sorted(reduced_dictionary.items(), key = lambda x:(x[1],x[0]), reverse=True) #primary sort key is count, secondary is alphabetical
countslist = sorted(reduced_dictionary.items(), key = lambda x:x[0]) #secondary sort is alphabetical (ascending)
countslist = sorted(countslist, key = lambda x:x[1], reverse=True) #primary sort is by count (descending)
print("\ntotalsegmentcount =", self.totalsegmentcount, file=outfile)
print("\n=== Dictionary ===", file=outfile)
for n in range(len(countslist)):
print("%6d" % n, "\t%5d" % countslist[n][1], "\t", countslist[n][0], file=outfile)
def output_addedandtrue(self, outfile, loopno):
print("----------------------------------------\nLoop number:", loopno, file=outfile)
print("----------------------------------------", file=outfile)
#countslist = sorted(reduced_dictionary.items(), key = lambda x:(x[1],x[0]), reverse=True) #primary sort key is count, secondary is alphabetical
countslist = sorted(self.addedandtrue_dictionary.items(), key = lambda x:x[0]) #secondary sort is alphabetical (ascending)
countslist = sorted(countslist, key = lambda x:x[1], reverse=True) #primary sort is by count (descending)
print("\n=== addedandtrue_dictionary ===", file=outfile)
for n in range(len(countslist)):
print("%6d" % n, "\t%5d" % countslist[n][1], "\t", countslist[n][0], file=outfile)
def output_deletedandtrue(self, outfile, loopno):
print("----------------------------------------\nLoop number:", loopno, file=outfile)
print("----------------------------------------", file=outfile)
#countslist = sorted(reduced_dictionary.items(), key = lambda x:(x[1],x[0]), reverse=True) #primary sort key is count, secondary is alphabetical
countslist = sorted(self.deletedandtrue_dictionary.items(), key = lambda x:x[0]) #secondary sort is alphabetical (ascending)
countslist = sorted(countslist, key = lambda x:x[1], reverse=True) #primary sort is by count (descending)
print("\n=== deletedandtrue_dictionary ===", file=outfile)
for n in range(len(countslist)):
print("%6d" % n, "\t%5d" % countslist[n][1], "\t", countslist[n][0], file=outfile)
def fetch_plogged_segment_from_dictionary(self, piece): # BETTER: return (this_segment, plog)
this_segment = self.segment_object_dictionary[piece]
if this_segment.count == 0:
print("Error in fetch_plogged_segment_from_dictionary for piece ='", piece, "': if segment is in the dictionary, its count should not be 0")
sys.exit()
this_segment.plog = math.log( self.totalsegmentcount / float(this_segment.count), 2 )
return this_segment
def new_segment_object(self, piece, count):
this_segment = Segment(piece)
this_segment.count = count
this_segment.divide_charges_among_instances() # replaces 0 by DEFAULTCOUNT
this_segment.plog = this_segment.get_plog(self.totalsegmentcount) # replaces 0 by DEFAULTCOUNT
return this_segment
def initial_segmentation(self):
dictionary = self.segment_object_dictionary
for ln in self.line_object_list:
start = 0
ln.breaks.append(0) # always put a break at the beginning
for n in range(1, len(ln.unbroken_text)): # won't randomly put a break at the beginning or end
if random.random() < BREAKPROB: # about every 10 (= 1/BREAKPROB) letters add a break
piece = ln.unbroken_text[start:n]
ln.pieces.append(piece)
ln.breaks.append( n )
start = n
self.totalsegmentcount += 1 # ALERT - for any item in or about to go into the dictionary,
if not piece in dictionary: # increment totalsegmentcount BEFORE populating its plog variable
dictionary[piece] = self.new_segment_object(piece, 1)
else:
dictionary[piece].count += 1
if start < len(ln.unbroken_text): # should always be true...
piece = ln.unbroken_text[start:]
ln.pieces.append(piece)
ln.breaks.append( len(ln.unbroken_text) ) # always put a break at the end
self.totalsegmentcount += 1
if not piece in dictionary:
dictionary[piece] = self.new_segment_object(piece, 1)
else:
dictionary[piece].count += 1
# Now that forming of segments is complete,
# fill in the information that depends on their count.
for sgmt in self.segment_object_dictionary.values():
sgmt.divide_charges_among_instances()
sgmt.get_plog(self.totalsegmentcount)
def compare_alt_parse(self, line):
# EXPLANATORY NOTE
### point = 1 + int(random.random() * (len(line.unbroken_text)-1))
# Before python3, this line and the first line of code below were equivalent.
# randrange changed in python3, so now program output doesn't match pre-python3 runs.
# Using random.random() as shown above DOES exactly reproduce pre-python3 results,
# except for spacing and ordering.
attentionpoint = random.randrange( 1, len(line.unbroken_text)) # selects a possible spot for a change, not before all text or after.
# attentionpoint k refers to a current or potential break between text points k-1 and k.
# Suppose len(line.unbroken_text) = 5
# Text index runs from 0 through 4. Don't pick 0. Don't pick 5.
# But OK to pick 4. That splits off the last character of the line.
coverbrkpt, coverbrkindex = line.break_cover(attentionpoint)
# SPLITTING:
if attentionpoint < coverbrkpt: # attentionpoint may be any character within its piece except the first
leftbreak = line.breaks[coverbrkindex-1]
rightbreak = line.breaks[coverbrkindex] # Note rightbreak == coverbrkpt
# Consider a modification of current parse at the selected location
# current configuration
singlepiece = line.unbroken_text[leftbreak:rightbreak] # Note singlepiece == line.pieces[coverbrkindex-1]
assert(singlepiece in self.segment_object_dictionary)
#if singlepiece not in self.segment_object_dictionary:
#print("Error in CompareAltParse: singlepiece (=", singlepiece, ") not found in dictionary at line ='", line.unbroken_text, "'.")
#sys.exit()
single_segment = self.fetch_plogged_segment_from_dictionary(singlepiece)
# alternate configuration
leftpiece = line.unbroken_text[leftbreak:attentionpoint]
rightpiece = line.unbroken_text[attentionpoint:rightbreak]
if leftpiece in self.segment_object_dictionary:
left_segment = self.fetch_plogged_segment_from_dictionary(leftpiece)
else:
left_segment = self.new_segment_object(leftpiece, 0)
if rightpiece in self.segment_object_dictionary:
right_segment = self.fetch_plogged_segment_from_dictionary(rightpiece)
else:
right_segment = self.new_segment_object(rightpiece, 0)
# In the standard case we consider singlepiece vs. (leftpiece and rightpiece).
# When possible, we consider in addition whether to merge a separated single character with the preceding or following segment, as appropriate.
# For each case:
# - calculate alternative costs
# - select among alternatives by sampling
# - update information
leftsingleton_case = (len(leftpiece) == 1) and (leftbreak != 0)
rightsingleton_case = (len(rightpiece) == 1) and (rightbreak != len(line.unbroken_text))
if (not leftsingleton_case and not rightsingleton_case):
decision = self.compare_simple_split(line, single_segment, left_segment, right_segment)
if decision == 'alt':
self.update_for_simple_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
# NOTE: if decision == 'current', make no changes
else: # special treatment for single characters
if leftsingleton_case:
precedingpiece = line.pieces[coverbrkindex-2]
preceding_segment = self.fetch_plogged_segment_from_dictionary(precedingpiece)
if rightsingleton_case:
followingpiece = line.pieces[coverbrkindex]
following_segment = self.fetch_plogged_segment_from_dictionary(followingpiece)
if (leftsingleton_case and not rightsingleton_case):
leftmergedpiece = precedingpiece + leftpiece
if leftmergedpiece in self.segment_object_dictionary:
leftmerged_segment = self.fetch_plogged_segment_from_dictionary(leftmergedpiece)
else:
leftmerged_segment = self.new_segment_object(leftmergedpiece, 0)
decision = self.compare_leftsingleton_split(line, single_segment, left_segment, right_segment, preceding_segment, leftmerged_segment)
if decision == 'alt1':
self.update_for_simple_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
elif decision == 'alt2':
self.update_for_leftsingleton_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, preceding_segment, leftmerged_segment)
# NOTE: if decision == 'current', make no changes
elif (rightsingleton_case and not leftsingleton_case):
rightmergedpiece = rightpiece + followingpiece
if rightmergedpiece in self.segment_object_dictionary:
rightmerged_segment = self.fetch_plogged_segment_from_dictionary(rightmergedpiece)
else:
rightmerged_segment = self.new_segment_object(rightmergedpiece, 0)
decision = self.compare_rightsingleton_split(line, single_segment, left_segment, right_segment, following_segment, rightmerged_segment)
if decision == 'alt1':
self.update_for_simple_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
elif decision == 'alt2':
self.update_for_rightsingleton_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, following_segment, rightmerged_segment)
# NOTE: if decision == 'current', make no changes
elif (rightsingleton_case and leftsingleton_case): # This case should really be "else:"
leftmergedpiece = precedingpiece + leftpiece
if leftmergedpiece in self.segment_object_dictionary:
leftmerged_segment = self.fetch_plogged_segment_from_dictionary(leftmergedpiece)
else:
leftmerged_segment = self.new_segment_object(leftmergedpiece, 0)
rightmergedpiece = rightpiece + followingpiece
if rightmergedpiece in self.segment_object_dictionary:
rightmerged_segment = self.fetch_plogged_segment_from_dictionary(rightmergedpiece)
else:
rightmerged_segment = self.new_segment_object(rightmergedpiece, 0)
decision = self.compare_bothsingletons_split(line, single_segment, left_segment, right_segment, preceding_segment, following_segment, leftmerged_segment, rightmerged_segment)
if decision == 'alt1':
self.update_for_simple_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
elif decision == 'alt2':
self.update_for_leftsingleton_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, preceding_segment, leftmerged_segment)
elif decision == 'alt3':
self.update_for_rightsingleton_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, following_segment, rightmerged_segment)
elif decision == 'alt4':
self.update_for_bothsingletons_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, preceding_segment, following_segment, leftmerged_segment, rightmerged_segment)
# NOTE: if decision == 'current', make no changes
else: # used when developing and testing individual parts of preceding code; should not be reached in regular operation.
decision = self.compare_simple_split(line, single_segment, left_segment, right_segment)
if decision == 'alt':
self.update_for_simple_split(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
# MERGING:
elif attentionpoint == line.breaks[coverbrkindex]: # here attentionpoint is the first character within its piece
leftbreak = line.breaks[coverbrkindex-1]
rightbreak = line.breaks[coverbrkindex+1]
# Consider a modification of current parse at the selected location
# current configuration
leftpiece = line.unbroken_text[leftbreak:attentionpoint] # leftpiece == line.pieces[coverbrkindex-1]
rightpiece = line.unbroken_text[attentionpoint:rightbreak] # rightpiece == line.pieces[coverbrkindex]
#if leftpiece not in self.segment_object_dictionary:
#print("Error in CompareAltParse: leftpiece (= ", leftpiece, ") not found in dictionary at line = '", line.unbroken_text, "'.")
#sys.exit()
#if rightpiece not in self.segment_object_dictionary:
#print("Error in CompareAltParse: rightpiece (= ", rightpiece, ") not found in dictionary at line = '", line.unbroken_text, "'.")
#sys.exit()
assert(leftpiece in self.segment_object_dictionary)
assert(rightpiece in self.segment_object_dictionary)
left_segment = self.fetch_plogged_segment_from_dictionary(leftpiece)
right_segment = self.fetch_plogged_segment_from_dictionary(rightpiece)
# alternate configuration
singlepiece = line.unbroken_text[leftbreak:rightbreak]
if singlepiece in self.segment_object_dictionary:
single_segment = self.fetch_plogged_segment_from_dictionary(singlepiece)
else:
single_segment = self.new_segment_object(singlepiece, 0)
# In the standard case we consider (leftpiece and rightpiece) vs.singlepiece (that is, the merger of leftpiece and rightpiece).
# If either (or both) of the original pieces is a single character, we consider as an additional alternative whether to merge the single-character segment instead with the preceding or following segment, as appropriate.
# For each case:
# - calculate alternative costs
# - select among alternatives by sampling
# - update information
leftsingleton_case = (len(leftpiece) == 1) and (leftbreak != 0)
rightsingleton_case = (len(rightpiece) == 1) and (rightbreak != len(line.unbroken_text))
if (not leftsingleton_case and not rightsingleton_case):
decision = self.compare_simple_merge(line, single_segment, left_segment, right_segment)
if decision == 'alt':
self.update_for_simple_merge(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
# NOTE: if decision == 'current', make no changes
else: # special treatment for single characters
if leftsingleton_case:
precedingpiece = line.pieces[coverbrkindex-2]
preceding_segment = self.fetch_plogged_segment_from_dictionary(precedingpiece)
if rightsingleton_case:
followingpiece = line.pieces[coverbrkindex+1]
following_segment = self.fetch_plogged_segment_from_dictionary(followingpiece)
if (leftsingleton_case and not rightsingleton_case):
leftmergedpiece = precedingpiece + leftpiece
if leftmergedpiece in self.segment_object_dictionary:
leftmerged_segment = self.fetch_plogged_segment_from_dictionary(leftmergedpiece)
else:
leftmerged_segment = self.new_segment_object(leftmergedpiece, 0)
decision = self.compare_leftsingleton_merge(line, single_segment, left_segment, right_segment, preceding_segment, leftmerged_segment)
if decision == 'alt1':
self.update_for_simple_merge(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
elif decision == 'alt2':
self.update_for_leftsingleton_merge(line, attentionpoint, coverbrkindex, left_segment, preceding_segment, leftmerged_segment)
# NOTE: if decision == 'current', make no changes
elif (rightsingleton_case and not leftsingleton_case):
rightmergedpiece = rightpiece + followingpiece
if rightmergedpiece in self.segment_object_dictionary:
rightmerged_segment = self.fetch_plogged_segment_from_dictionary(rightmergedpiece)
else:
rightmerged_segment = self.new_segment_object(rightmergedpiece, 0)
decision = self.compare_rightsingleton_merge(line, single_segment, left_segment, right_segment, following_segment, rightmerged_segment)
if decision == 'alt1':
self.update_for_simple_merge(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
elif decision == 'alt2':
self.update_for_rightsingleton_merge(line, attentionpoint, coverbrkindex, right_segment, following_segment, rightmerged_segment)
# NOTE: if decision == 'current', make no changes
elif (rightsingleton_case and leftsingleton_case): # This case should really be "else:"
leftmergedpiece = precedingpiece + leftpiece
if leftmergedpiece in self.segment_object_dictionary:
leftmerged_segment = self.fetch_plogged_segment_from_dictionary(leftmergedpiece)
else:
leftmerged_segment = self.new_segment_object(leftmergedpiece, 0)
rightmergedpiece = rightpiece + followingpiece
if rightmergedpiece in self.segment_object_dictionary:
rightmerged_segment = self.fetch_plogged_segment_from_dictionary(rightmergedpiece)
else:
rightmerged_segment = self.new_segment_object(rightmergedpiece, 0)
decision = self.compare_bothsingletons_merge(line, single_segment, left_segment, right_segment, preceding_segment, following_segment, leftmerged_segment, rightmerged_segment)
if decision == 'alt1':
self.update_for_simple_merge(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
elif decision == 'alt2':
self.update_for_leftsingleton_merge(line, attentionpoint, coverbrkindex, left_segment, preceding_segment, leftmerged_segment)
elif decision == 'alt3':
self.update_for_rightsingleton_merge(line, attentionpoint, coverbrkindex, right_segment, following_segment, rightmerged_segment)
elif decision == 'alt4':
self.update_for_bothsingletons_merge(line, attentionpoint, coverbrkindex, left_segment, right_segment, preceding_segment, following_segment, leftmerged_segment, rightmerged_segment)
# NOTE: if decision == 'current', make no changes
else: # used when developing and testing individual parts of preceding code; should not be reached in regular operation.
decision = self.compare_simple_merge(line, single_segment, left_segment, right_segment)
if decision == 'alt':
self.update_for_simple_merge(line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment)
# N.B. Except for the adjustments to line's piecesorder_cost [log(factorial( len(self.pieces) ))],
# these 'compare_' functions could be made to work for both Splitting and Merging operations.
# Rename the alternatives with descriptive names; then the calling function could distinguish
# which is the current and which the alternate configurations.
# SPLITTING #
# ----------------------------------------------------------------------------- #
# FUNCTIONS FOR SAMPLING AMONG LOCAL CONFIGURATIONS WEIGHTED ACCORDING TO COST. #
# THESE FUNCTIONS APPLY TO DIFFERENT CASES. ALL BEGIN WITH THE WORD 'compare_'. #
# ----------------------------------------------------------------------------- #
def compare_simple_split(self, line, single_segment, left_segment, right_segment):
# local contribution to line cost as currently configured
current_contribution = single_segment.get_instance_cost(self.totalsegmentcount)
# alternate configuration
alt_contribution = left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
math.log(1 + len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
# # FOR DETERMINISTIC SELECTION, USE THESE LINES
# #if alt_contribution < current_contribution:
# return 'alt'
# else
# return 'current'
# FOR SAMPLING, USE THESE LINES
normalizing_factor = 1.0 / (current_contribution + alt_contribution)
norm_compl_current = alt_contribution * normalizing_factor
norm_compl_alt = current_contribution * normalizing_factor
hypothesis_list = [('current', norm_compl_current), ('alt', norm_compl_alt)]
selection = weighted_choice(hypothesis_list)
#print(selection)
return selection
def compare_leftsingleton_split(self, line, single_segment, left_segment, right_segment, preceding_segment, leftmerged_segment):
# local contribution to line cost as currently configured
current_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
single_segment.get_instance_cost(self.totalsegmentcount)
# alternate configuration
alt1_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
math.log(1 + len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
# another alternate configuration
alt2_contribution = leftmerged_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount)
method = 'sampling'
# FOR DETERMINISTIC SELECTION, USE THESE LINES
if method == 'determinate':
min_contribution = min(current_contribution, alt1_contribution, alt2_contribution)
if min_contribution == alt1_contribution:
return 'alt1'
elif min_contribution == alt2_contribution:
return 'alt2'
else:
return 'current'
# FOR SAMPLING, USE THESE LINES
elif method == 'sampling':
normalizing_factor = 1.0 / (2 * (current_contribution + alt1_contribution + alt2_contribution))
norm_compl_current = (alt1_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt1 = (current_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt2 = (current_contribution + alt1_contribution) * normalizing_factor
hypothesis_list = [('current',norm_compl_current), ('alt1',norm_compl_alt1), ('alt2',norm_compl_alt2)]
selection = weighted_choice(hypothesis_list)
#print()
#print("cost_current =", current_contribution, " cost_alt1 =", alt1_contribution, " cost_alt2 =", alt2_contribution)
#print("weight_current =", norm_compl_current, " weight_alt1 =", norm_compl_alt1, " weight_alt2 =", norm_compl_alt2)
#print()
return selection
def compare_rightsingleton_split(self, line, single_segment, left_segment, right_segment, following_segment, rightmerged_segment):
# local contribution to line cost as currently configured
current_contribution = single_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount)
# alternate configuration
alt1_contribution = left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount) + \
math.log(1 + len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
# another alternate configuration
alt2_contribution = left_segment.get_instance_cost(self.totalsegmentcount) + \
rightmerged_segment.get_instance_cost(self.totalsegmentcount)
method = 'sampling'
# FOR DETERMINISTIC SELECTION, USE THESE LINES
if method == 'determinate':
min_contribution = min(current_contribution, alt1_contribution, alt2_contribution)
if min_contribution == alt1_contribution:
return 'alt1'
elif min_contribution == alt2_contribution:
return 'alt2'
else:
return 'current'
# FOR SAMPLING, USE THESE LINES
elif method == 'sampling':
normalizing_factor = 1.0 / (2 * (current_contribution + alt1_contribution + alt2_contribution))
norm_compl_current = (alt1_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt1 = (current_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt2 = (current_contribution + alt1_contribution) * normalizing_factor
hypothesis_list = [('current',norm_compl_current), ('alt1',norm_compl_alt1), ('alt2',norm_compl_alt2)]
selection = weighted_choice(hypothesis_list)
#print()
#print("cost_current =", current_contribution, " cost_alt1 =", alt1_contribution, " cost_alt2 =", alt2_contribution)
#print("weight_current =", norm_compl_current, " weight_alt1 =", norm_compl_alt1, " weight_alt2 =", norm_compl_alt2)
#print()
return selection
def compare_bothsingletons_split(self, line, single_segment, left_segment, right_segment, preceding_segment, following_segment, leftmerged_segment, rightmerged_segment):
# local contribution to line cost as currently configured
current_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
single_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount)
# four alternate configurations
alt1_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount) + \
math.log(1 + len(line.pieces), 2)
# last addend is adjustment to the current value
# of log(factorial( len(self.pieces) ))
alt2_contribution = leftmerged_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount)
alt3_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
left_segment.get_instance_cost(self.totalsegmentcount) + \
rightmerged_segment.get_instance_cost(self.totalsegmentcount)
alt4_contribution = leftmerged_segment.get_instance_cost(self.totalsegmentcount) + \
rightmerged_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to the current value
# of log(factorial( len(self.pieces) ))
method = 'sampling'
# # FOR DETERMINISTIC SELECTION, USE THESE LINES
if method == 'determinate':
min_contribution = min(current_contribution, alt1_contribution, alt2_contribution, alt3_contribution, alt4_contribution)
if min_contribution == alt1_contribution:
return 'alt1'
elif min_contribution == alt2_contribution:
return 'alt2'
elif min_contribution == alt3_contribution:
return 'alt3'
elif min_contribution == alt4_contribution:
return 'alt4'
else:
return 'current'
# FOR SAMPLING, USE THESE LINES
elif method == 'sampling':
sum = current_contribution + alt1_contribution + alt2_contribution + alt3_contribution + alt4_contribution
normalizing_factor = 1.0 / (4 * sum)
norm_compl_current = (sum - current_contribution) * normalizing_factor
norm_compl_alt1 = (sum - alt1_contribution) * normalizing_factor
norm_compl_alt2 = (sum - alt2_contribution) * normalizing_factor
norm_compl_alt3 = (sum - alt3_contribution) * normalizing_factor
norm_compl_alt4 = (sum - alt4_contribution) * normalizing_factor
hypothesis_list = [ ('current',norm_compl_current),
('alt1',norm_compl_alt1),
('alt2',norm_compl_alt2),
('alt3',norm_compl_alt3),
('alt4',norm_compl_alt4) ]
selection = weighted_choice(hypothesis_list)
return selection
# SPLITTING #
# ---------------------------------------------------------------------------- #
# FUNCTIONS FOR UPDATING RECORDS ACCORDING TO SELECTED PARSING MODIFICATIONS. #
# THESE FUNCTIONS APPLY TO DIFFERENT CASES. ALL BEGIN WITH THE WORD 'update_'. #
# ---------------------------------------------------------------------------- #
def update_for_simple_split(self, line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment):
singlepiece = single_segment.segment_text
leftpiece = left_segment.segment_text
rightpiece = right_segment.segment_text
# UPDATE THE PARSE
line.piecesorder_cost += math.log(1 + len(line.pieces), 2)
line.pieces[coverbrkindex-1] = leftpiece # i.e., replace singlepiece by leftpiece
line.breaks.insert(coverbrkindex, attentionpoint) # or use addcut
line.pieces.insert(coverbrkindex, rightpiece)
# UPDATE GLOBAL COUNTS
self.totalsegmentcount += 1
self.split_count += 1
if left_segment.count == 0 and right_segment.count == 0:
self.split_2newsegments_count += 1
elif left_segment.count == 0 or right_segment.count == 0:
self.split_1newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.decrement_records(single_segment)
self.increment_records(left_segment)
self.increment_records(right_segment)
def update_for_leftsingleton_split(self, line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, preceding_segment, leftmerged_segment):
singlepiece = single_segment.segment_text
leftpiece = left_segment.segment_text
rightpiece = right_segment.segment_text
precedingpiece = preceding_segment.segment_text
leftmergedpiece = leftmerged_segment.segment_text
# UPDATE THE PARSE
line.pieces[coverbrkindex-2] = leftmergedpiece # i.e., replace precedingpiece by leftmergedpiece
line.pieces[coverbrkindex-1] = rightpiece
line.breaks[coverbrkindex-1] += len(leftpiece) # moves break from beginning of singlepiece over to beginning of rightpiece [note len(leftpiece) == 1]
# [note: this break should now be attentionpoint]
# UPDATE GLOBAL COUNTS
# Figure this situation as a split plus a merge.
# self.totalsegmentcount is unchanged
self.split_count += 1
self.merge_count += 1
if left_segment.count == 0 and right_segment.count == 0:
self.split_2newsegments_count += 1
elif left_segment.count == 0 or right_segment.count == 0:
self.split_1newsegment_count += 1
if leftmerged_segment.count == 0:
self.merge_newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.decrement_records(single_segment)
self.decrement_records(preceding_segment)
self.increment_records(right_segment)
self.increment_records(leftmerged_segment)
def update_for_rightsingleton_split(self, line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, following_segment, rightmerged_segment):
singlepiece = single_segment.segment_text
leftpiece = left_segment.segment_text
rightpiece = right_segment.segment_text
followingpiece = following_segment.segment_text
rightmergedpiece = rightmerged_segment.segment_text
# UPDATE THE PARSE
line.pieces[coverbrkindex-1] = leftpiece # i.e., replace singlepiece by leftpiece
line.pieces[coverbrkindex] = rightmergedpiece
line.breaks[coverbrkindex] -= len(rightpiece) # moves break from beginning of followingpiece over to beginning of rightmergedpiece [note len(rightpiece) == 1]
# [note: this break should now be attentionpoint]
# UPDATE GLOBAL COUNTS
# Figure this situation as a split plus a merge.
# self.totalsegmentcount is unchanged
self.split_count += 1
self.merge_count += 1
if left_segment.count == 0 and right_segment.count == 0:
self.split_2newsegments_count += 1
elif left_segment.count == 0 or right_segment.count == 0:
self.split_1newsegment_count += 1
if rightmerged_segment.count == 0:
self.merge_newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.decrement_records(single_segment)
self.decrement_records(following_segment)
self.increment_records(left_segment)
self.increment_records(rightmerged_segment)
def update_for_bothsingletons_split(self, line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment, \
preceding_segment, following_segment, leftmerged_segment, rightmerged_segment):
singlepiece = single_segment.segment_text
leftpiece = left_segment.segment_text
rightpiece = right_segment.segment_text
precedingpiece = preceding_segment.segment_text
followingpiece = following_segment.segment_text
leftmergedpiece = leftmerged_segment.segment_text
rightmergedpiece = rightmerged_segment.segment_text
# UPDATE THE PARSE
line.piecesorder_cost -= math.log(len(line.pieces), 2)
line.pieces.pop(coverbrkindex-1) # removes singlepiece
line.pieces[coverbrkindex-2] = leftmergedpiece # i.e., replace precedingpiece by leftmergedpiece
line.pieces[coverbrkindex-1] = rightmergedpiece
#the_break_to_remove = line.breaks[coverbrkindex]
#line.breaks.remove(the_break_to_remove)
line.breaks.pop(coverbrkindex)
line.breaks[coverbrkindex-1] += len(leftpiece) # moves break from beginning of (former) singlepiece over to beginning of (former) rightpiece
# [note: this break should now be attentionpoint]
# UPDATE GLOBAL COUNTS
# Figure this situation as one split and two merges
self.totalsegmentcount -= 1
self.split_count += 1
self.merge_count += 2
if left_segment.count == 0 and right_segment.count == 0: # since leftpiece and rightpiece are both single characters,
self.split_2newsegments_count += 1 # it's highly unlikely that either would have count == 0
elif left_segment.count == 0 or right_segment.count == 0:
self.split_1newsegment_count += 1
if leftmerged_segment.count == 0:
self.merge_newsegment_count += 1
if rightmerged_segment.count == 0:
self.merge_newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.decrement_records(single_segment)
self.decrement_records(preceding_segment)
self.decrement_records(following_segment)
self.increment_records(leftmerged_segment)
self.increment_records(rightmerged_segment)
# See previous note about similarity, hence possible reuse, of functions
# for Split and Merge operations.
# MERGING #
# ----------------------------------------------------------------------------- #
# FUNCTIONS FOR SAMPLING AMONG LOCAL CONFIGURATIONS WEIGHTED ACCORDING TO COST. #
# THESE FUNCTIONS APPLY TO DIFFERENT CASES. ALL BEGIN WITH THE WORD 'compare_'. #
# ----------------------------------------------------------------------------- #
def compare_simple_merge(self, line, single_segment, left_segment, right_segment):
# local contribution to line cost as currently configured
current_contribution = left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount)
# alternate configuration
alt_contribution = single_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
# # FOR DETERMINISTIC SELECTION, USE THESE LINES
# #if alt_contribution < current_contribution:
# return 'alt'
# else
# return 'current'
# FOR SAMPLING, USE THESE LINES
normalizing_factor = 1.0 / (current_contribution + alt_contribution)
norm_compl_current = alt_contribution * normalizing_factor
norm_compl_alt = current_contribution * normalizing_factor
hypothesis_list = [('current', norm_compl_current), ('alt', norm_compl_alt)]
selection = weighted_choice(hypothesis_list)
#print(selection)
return selection
def compare_leftsingleton_merge(self, line, single_segment, left_segment, right_segment, preceding_segment, leftmerged_segment):
# local contribution to line cost as currently configured
current_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount)
# alternate configuration
alt1_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
single_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
# another alternate configuration
alt2_contribution = leftmerged_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
method = 'sampling'
# FOR DETERMINISTIC SELECTION, USE THESE LINES
if method == 'determinate':
min_contribution = min(current_contribution, alt1_contribution, alt2_contribution)
if min_contribution == alt1_contribution:
return 'alt1'
elif min_contribution == alt2_contribution:
return 'alt2'
else:
return 'current'
# FOR SAMPLING, USE THESE LINES
elif method == 'sampling':
normalizing_factor = 1.0 / (2 * (current_contribution + alt1_contribution + alt2_contribution))
norm_compl_current = (alt1_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt1 = (current_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt2 = (current_contribution + alt1_contribution) * normalizing_factor
hypothesis_list = [('current',norm_compl_current), ('alt1',norm_compl_alt1), ('alt2',norm_compl_alt2)]
selection = weighted_choice(hypothesis_list)
#print()
#print("cost_current =", current_contribution, " cost_alt1 =", alt1_contribution, " cost_alt2 =", alt2_contribution)
#print("weight_current =", norm_compl_current, " weight_alt1 =", norm_compl_alt1, " weight_alt2 =", norm_compl_alt2)
#print()
return selection
def compare_rightsingleton_merge(self, line, single_segment, left_segment, right_segment, following_segment, rightmerged_segment):
# local contribution to line cost as currently configured
current_contribution = left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount)
# alternate configuration
alt1_contribution = single_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
# another alternate configuration
alt2_contribution = left_segment.get_instance_cost(self.totalsegmentcount) + \
rightmerged_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to present value of log(factorial( len(self.pieces) ))
method = 'sampling'
# FOR DETERMINISTIC SELECTION, USE THESE LINES
if method == 'determinate':
min_contribution = min(current_contribution, alt1_contribution, alt2_contribution)
if min_contribution == alt1_contribution:
return 'alt1'
elif min_contribution == alt2_contribution:
return 'alt2'
else:
return 'current'
# FOR SAMPLING, USE THESE LINES
elif method == 'sampling':
normalizing_factor = 1.0 / (2 * (current_contribution + alt1_contribution + alt2_contribution))
norm_compl_current = (alt1_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt1 = (current_contribution + alt2_contribution) * normalizing_factor
norm_compl_alt2 = (current_contribution + alt1_contribution) * normalizing_factor
hypothesis_list = [('current',norm_compl_current), ('alt1',norm_compl_alt1), ('alt2',norm_compl_alt2)]
selection = weighted_choice(hypothesis_list)
#print()
#print("cost_current =", current_contribution, " cost_alt1 =", alt1_contribution, " cost_alt2 =", alt2_contribution)
#print("weight_current =", norm_compl_current, " weight_alt1 =", norm_compl_alt1, " weight_alt2 =", norm_compl_alt2)
#print()
return selection
def compare_bothsingletons_merge(self, line, single_segment, left_segment, right_segment, preceding_segment, following_segment, leftmerged_segment, rightmerged_segment):
# local contribution to line cost as currently configured
current_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
left_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount)
# four alternate configurations
alt1_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
single_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to the current value
# of log(factorial( len(self.pieces) ))
alt2_contribution = leftmerged_segment.get_instance_cost(self.totalsegmentcount) + \
right_segment.get_instance_cost(self.totalsegmentcount) + \
following_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to the current value
# of log(factorial( len(self.pieces) ))
alt3_contribution = preceding_segment.get_instance_cost(self.totalsegmentcount) + \
left_segment.get_instance_cost(self.totalsegmentcount) + \
rightmerged_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2)
# last addend is adjustment to the current value
# of log(factorial( len(self.pieces) ))
alt4_contribution = leftmerged_segment.get_instance_cost(self.totalsegmentcount) + \
rightmerged_segment.get_instance_cost(self.totalsegmentcount) - \
math.log(len(line.pieces), 2) - \
math.log(len(line.pieces)-1, 2)
# last two addends are adjustment to the current value
# of log(factorial( len(self.pieces) ))
method = 'sampling'
# # FOR DETERMINISTIC SELECTION, USE THESE LINES
if method == 'determinate':
min_contribution = min(current_contribution, alt1_contribution, alt2_contribution, alt3_contribution, alt4_contribution)
if min_contribution == alt1_contribution:
return 'alt1'
elif min_contribution == alt2_contribution:
return 'alt2'
elif min_contribution == alt3_contribution:
return 'alt3'
elif min_contribution == alt4_contribution:
return 'alt4'
else:
return 'current'
# FOR SAMPLING, USE THESE LINES
elif method == 'sampling':
sum = current_contribution + alt1_contribution + alt2_contribution + alt3_contribution + alt4_contribution
normalizing_factor = 1.0 / (4 * sum)
norm_compl_current = (sum - current_contribution) * normalizing_factor
norm_compl_alt1 = (sum - alt1_contribution) * normalizing_factor
norm_compl_alt2 = (sum - alt2_contribution) * normalizing_factor
norm_compl_alt3 = (sum - alt3_contribution) * normalizing_factor
norm_compl_alt4 = (sum - alt4_contribution) * normalizing_factor
hypothesis_list = [ ('current',norm_compl_current),
('alt1',norm_compl_alt1),
('alt2',norm_compl_alt2),
('alt3',norm_compl_alt3),
('alt4',norm_compl_alt4) ]
selection = weighted_choice(hypothesis_list)
return selection
# MERGING #
# ---------------------------------------------------------------------------- #
# FUNCTIONS FOR UPDATING RECORDS ACCORDING TO SELECTED PARSING MODIFICATIONS. #
# THESE FUNCTIONS APPLY TO DIFFERENT CASES. ALL BEGIN WITH THE WORD 'update_'. #
# ---------------------------------------------------------------------------- #
def update_for_simple_merge(self, line, attentionpoint, coverbrkindex, single_segment, left_segment, right_segment):
singlepiece = single_segment.segment_text
leftpiece = left_segment.segment_text
rightpiece = right_segment.segment_text
# UPDATE THE PARSE
line.piecesorder_cost -= math.log(len(line.pieces), 2)
line.pieces[coverbrkindex-1] = singlepiece # i.e., replace leftpiece by singlepiece
line.breaks.pop(coverbrkindex)
line.pieces.pop(coverbrkindex)
# UPDATE GLOBAL COUNTS
self.totalsegmentcount -= 1
self.merge_count += 1
if single_segment.count == 0:
self.merge_newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.increment_records(single_segment)
self.decrement_records(left_segment)
self.decrement_records(right_segment)
def update_for_leftsingleton_merge(self, line, attentionpoint, coverbrkindex, left_segment, preceding_segment, leftmerged_segment):
leftpiece = left_segment.segment_text
precedingpiece = preceding_segment.segment_text
leftmergedpiece = leftmerged_segment.segment_text
# UPDATE THE PARSE
line.piecesorder_cost -= math.log(len(line.pieces), 2)
line.pieces[coverbrkindex-2] = leftmergedpiece # i.e., replace precedingpiece by leftmergedpiece
line.pieces.pop(coverbrkindex-1)
line.breaks.pop(coverbrkindex-1)
# UPDATE GLOBAL COUNTS
self.totalsegmentcount -= 1
self.merge_count += 1
if leftmerged_segment.count == 0:
self.merge_newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.increment_records(leftmerged_segment)
self.decrement_records(preceding_segment)
self.decrement_records(left_segment)
def update_for_rightsingleton_merge(self, line, attentionpoint, coverbrkindex, right_segment, following_segment, rightmerged_segment):
rightpiece = right_segment.segment_text
followingpiece = following_segment.segment_text
rightmergedpiece = rightmerged_segment.segment_text
# UPDATE THE PARSE
line.piecesorder_cost -= math.log(len(line.pieces), 2)
line.pieces[coverbrkindex] = rightmergedpiece # i.e., replace rightpiece by rightmergedpiece
line.pieces.pop(coverbrkindex+1)
line.breaks.pop(coverbrkindex+1)
# UPDATE GLOBAL COUNTS
self.totalsegmentcount -= 1
self.merge_count += 1
if rightmerged_segment.count == 0:
self.merge_newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.increment_records(rightmerged_segment)
self.decrement_records(right_segment)
self.decrement_records(following_segment)
def update_for_bothsingletons_merge(self, line, attentionpoint, coverbrkindex, left_segment, right_segment, \
preceding_segment, following_segment, leftmerged_segment, rightmerged_segment):
leftpiece = left_segment.segment_text
rightpiece = right_segment.segment_text
precedingpiece = preceding_segment.segment_text
followingpiece = following_segment.segment_text
leftmergedpiece = leftmerged_segment.segment_text
rightmergedpiece = rightmerged_segment.segment_text
# UPDATE THE PARSE
line.piecesorder_cost -= ( math.log(len(line.pieces), 2) + math.log(len(line.pieces)-1, 2) )
line.pieces.pop(coverbrkindex+1) # removes followingpiece
line.pieces.pop(coverbrkindex) # removes rightpiece
line.pieces[coverbrkindex-1] = rightmergedpiece # i.e., replace leftpiece by rightmergedpiece
line.pieces[coverbrkindex-2] = leftmergedpiece # i.e., replace precedingpiece by leftmergedpiece
line.breaks.pop(coverbrkindex+1)
line.breaks.pop(coverbrkindex-1)
# UPDATE GLOBAL COUNTS
# Figure this situation as two merges
self.totalsegmentcount -= 2
self.merge_count += 2
if leftmerged_segment.count == 0:
self.merge_newsegment_count += 1
if rightmerged_segment.count == 0:
self.merge_newsegment_count += 1
# UPDATE DICTIONARY ENTRIES
self.increment_records(leftmerged_segment)
self.increment_records(rightmerged_segment)
self.decrement_records(preceding_segment)
self.decrement_records(following_segment)
self.decrement_records(left_segment)
self.decrement_records(right_segment)
##########################
def decrement_records(self, this_segment):
segtext = this_segment.segment_text
this_segment.count -= 1
if this_segment.count == 0:
del self.segment_object_dictionary[segtext]
if segtext in self.true_segment_dictionary: # additional info; no contribution to processing
self.deletedandtrue_devcount += 1
self.deletedandtrue_dictionary[segtext] = self.true_segment_dictionary[segtext]
else:
this_segment.divide_charges_among_instances()
this_segment.plog = this_segment.get_plog(self.totalsegmentcount)
def increment_records(self, this_segment):
segtext = this_segment.segment_text
if segtext not in self.segment_object_dictionary:
self.segment_object_dictionary[segtext] = this_segment
if segtext in self.true_segment_dictionary: # additional info; no contribution to processing
self.addedandtrue_devcount += 1
self.addedandtrue_dictionary[segtext] = self.true_segment_dictionary[segtext]
self.segment_object_dictionary[segtext].count += 1
self.segment_object_dictionary[segtext].divide_charges_among_instances()
self.segment_object_dictionary[segtext].plog = self.segment_object_dictionary[segtext].get_plog(self.totalsegmentcount)
##########################
def lrparse_line(self, line, longest_dictionary_entry_length, outfile ): # from wordbreaker.py: ParseWord() Needs different name. outfile is for verbose (mostly --last part always prints).
# <---- outerscan range----------------------------------------------------> #
# starting point----^ ^---outerscan
# <--------chunkstart range-->
# chunkstart---^
# <------chunk--------->
verboseflag = False # False # True
if verboseflag: print("\n", file=outfile)
if verboseflag: print(line.unbroken_text, file=outfile)
if verboseflag: print("Outer\tInner", file=outfile)
if verboseflag: print("scan:\tscan:\tChunk\tFound?", file=outfile) # column headers
linelength = len(line.unbroken_text)
parse2here=dict() # key is an int < linelength, value is a list of pieces
parse2here[0] = [] # empty list
bestcost2here = dict() # key is an int < linelength, value is a sum of segment costs + ordercost for that many segments
bestcost2here[0] = 0
for outerscan in range(1,linelength+1):
# Note: at this point in the computation,
# the values of parse2here[x] and bestcost2here[x] are known for all x < outerscan.
# The purpose of this pass is to calculate these values for x = outerscan.
parse2here[outerscan] = list()
# CONSIDER CHUNK TO EXTEND A SHORTER PREVIOUSLY-OBTAINED PARSE UP TO CURRENT VALUE OF outerscan.
# CHECK ALL POSSIBLE CHUNK START POINTS. KEEP TRACK TO FIND THE PARSE WITH LOWEST COST.
startingpoint = max(0, outerscan - longest_dictionary_entry_length)
howmanyspaces = -1 # This variable is for formatting.
chosen_cost = FLOAT_INF # MUST BE SURE TOTAL_COST IS POPULATED OOPS - use FLOAT_INF instead
chosen_chunk = line.unbroken_text
chosen_chunkstart = startingpoint # MIGHT BE MORE CONSISTENT TO STEP BACKWARDS set to outerscan-1 ??
# ALL CHUNKS HAVE SAME RIGHT ENDPOINT (outerscan-1)
# START WITH FIRST POSSIBLE CHUNK (the chunk with left endpoint at startingpoint)
# LOOP THROUGH SUCCEEDING CHUNK START POINTS
# WOULD BACKWARDS BE CLEARER? INSTEAD OF startingpoint, CALL IT limitpoint?
for chunkstart in range(startingpoint, outerscan):
chunk = line.unbroken_text[chunkstart: outerscan]
if verboseflag: print("\n %3s\t%3s " % (outerscan, chunkstart), end=" ", file=outfile)
if chunk not in self.segment_object_dictionary:
continue
else:
howmanyspaces +=1
if verboseflag:
for x in range(howmanyspaces):
print(" ", end="", file=outfile)
if verboseflag: print(" %s"% chunk, end=" ", file=outfile)
if verboseflag: print(" %5s" % "Yes.", end=" ", file=outfile)
chunk_segment = self.fetch_plogged_segment_from_dictionary(chunk)
chunk_cost = chunk_segment.get_instance_cost(self.totalsegmentcount)
testcost = bestcost2here[chunkstart] + chunk_cost + \
math.log( 1 + len(parse2here[chunkstart]), 2 )
#math.log( math.factorial( 1 + len(parse2here[chunkstart]) ), 2)
#print(" %7.3f bits" % (testcost), "= %7.3f" % (bestcost2here[chunkstart]), "+ %7.3f" % (chunk_cost), "+ %7.3f" % (math.log( math.factorial( 1 + len(parse2here[chunkstart]) ), 2)) )
if verboseflag: print(" %7.3f bits" % (testcost), end=" ", file=outfile)
if verboseflag: print(" %s" % parse2here[chunkstart], end=" ", file=outfile) # put this at end of line due to spacing
if testcost < chosen_cost:
chosen_cost = testcost
chosen_chunk = chunk
chosen_chunkstart = chunkstart
bestcost2here[outerscan] = chosen_cost
parse2here[outerscan] = list(parse2here[chosen_chunkstart]) # makes a copy
parse2here[outerscan].append(chosen_chunk)
#if verboseflag: print("\n\t\t\t\t\t\t\t\t\tchosen:", chosen_chunk, end=" ", file=outfile)
if verboseflag: print("\nchosen:", chosen_chunk, end=" ", file=outfile)
if verboseflag: print(" parse [0, %d)" % outerscan, "= %s " % parse2here[outerscan], end=" ", file=outfile)
if verboseflag: print("\n", file=outfile)
parsed_line = parse2here[linelength] # IS IT linelength-1 OR linelength? ANSWER: linelength
bitcost = bestcost2here[linelength]
print("\n%7.3f\t" % line.total_cost, end="", file=outfile) # How to get this right-aligned?
for piece in line.pieces:
print(" %s" % piece, end="", file=outfile)
print(file=outfile)
print("%7.3f\t" % bitcost, end="", file=outfile) # Here also.
for chunk in parsed_line:
print(" %s" % chunk, end="", file=outfile)
print("\n", file=outfile)
return (parsed_line, bitcost)
def compute_brokenline_cost(self, line):
line.total_cost = 0.0 # should already be set by __init__
for piece in line.pieces:
if piece in self.segment_object_dictionary:
this_segment = self.fetch_plogged_segment_from_dictionary(piece)
else:
this_segment = self.new_segment_object(piece, 0)
piece_cost = this_segment.get_instance_cost(self.totalsegmentcount)
line.total_cost += piece_cost
line.piecesorder_cost = math.log (math.factorial(len(line.pieces)), 2)
line.total_cost += line.piecesorder_cost
def populate_line_displaylists(self, line):
self.count_list = [] # List of segment counts, in proper order.
self.phonocost_portion_list = [] # List per segment of phonocost_portion, in proper order. Similarly for other list variables.
self.ordercost_portion_list = [] # The lists are used to arrange segment information attractively for display.
self.inclusioncost_portion_list = [] # Are they useful to retain? Should they be in a separate Display class?
self.plog_list = []
self.subtotal_list = [] # list per segment of following quantity:
for piece in line.pieces:
if piece in self.segment_object_dictionary:
this_segment = self.fetch_plogged_segment_from_dictionary(piece)
else:
this_segment = self.new_segment_object(piece, 0)
piece_cost = this_segment.get_instance_cost(self.totalsegmentcount)
# THESE LIST VARIABLES EXIST FOR DISPLAY ONLY [expect changes if class structure is reworked]
line.count_list.append(this_segment.count)
line.plog_list.append(this_segment.get_plog_charge(self.totalsegmentcount)) #(PLOGCOEFF * this_instance.plog)
line.phonocost_portion_list.append(this_segment.phonocost_portion)
line.ordercost_portion_list.append(this_segment.ordercost_portion)
line.inclusioncost_portion_list.append(this_segment.inclusioncost_portion)
line.subtotal_list.append(piece_cost)
def rebase(self, verbose_outfile):
# REPARSE
longest = 0
for piece in self.segment_object_dictionary:
if len(piece) > longest:
longest = len(piece)
print("longest_entry_length =", longest)
print("longest_entry_length =", longest, file=verbose_outfile)
print("parsing...")
for ln in self.line_object_list:
(parsed_line, bitcost) = self.lrparse_line(ln, longest, verbose_outfile)
ln.pieces = list(parsed_line) # copy
ln.populate_breaks_from_pieces()
#ln.total_cost = bitcost [stored for comparison in RECOMPUTE section]
# RECOUNT SEGMENTS
# rebuild the dictionary IS THERE ANYTHING ELSE THAT NEEDS TO BE REINITED ????
print("updating segment counts in the dictionary...")
newdictionary = {}
self.totalsegmentcount = 0
for ln in self.line_object_list:
for piece in ln.pieces:
self.totalsegmentcount += 1 # ALERT - for any item in or about to go into the dictionary,
if not piece in newdictionary: # increment totalsegmentcount BEFORE populating its plog variable
newdictionary[piece] = self.new_segment_object(piece, 1)
else:
newdictionary[piece].count += 1
# fill in the information that depends on the count
for sgmt in newdictionary.values():
sgmt.divide_charges_among_instances()
sgmt.get_plog(self.totalsegmentcount)
self.segment_object_dictionary = copy.deepcopy(newdictionary)
# RECOMPUTE
print("computing line costs...")
self.overall_cost = 0.0
for ln in self.line_object_list:
for piece in ln.pieces:
assert(piece in self.segment_object_dictionary) # there should be no "new" pieces
self.compute_brokenline_cost(ln)
self.overall_cost += ln.total_cost
def load_truth_and_data(self, true_line, line_object): # from wordbreaker code
unbroken_text_construction = ""
true_breaks_construction = list()
true_breaks_construction.append(0) # always put a break at the beginning
# Clean up data as desired
true_line = true_line.casefold()
true_line = true_line.replace(".", " . ") # these characters will go into TrueDictionary as separate words
true_line = true_line.replace(",", " , ")
true_line = true_line.replace(";", " ; ")
true_line = true_line.replace("!", " ! ")
true_line = true_line.replace("?", " ? ")
true_line = true_line.replace(":", " : ")
true_line = true_line.replace(")", " ) ")
true_line = true_line.replace("(", " ( ")
pieces_list = true_line.split() # split true_line into pieces
if len(pieces_list) <= 1: # punctuation only. 10 such lines in Brown corpus.
return
#pieces_list.append("\n") # added only to match previous runs; may prefer without. ATTN: outfile_corpuslines, outfile_lrparse
for piece in pieces_list:
self.true_totalsegmentcount += 1 # Record in TrueDictionary
if piece not in self.true_segment_dictionary:
self.true_segment_dictionary[piece] = 1
else:
self.true_segment_dictionary[piece] += 1
unbroken_text_construction += piece # Build up unbroken line
true_breaks_construction.append(len(unbroken_text_construction))
line_object.unbroken_text = unbroken_text_construction
line_object.true_text = true_line
line_object.true_breaks = true_breaks_construction
self.line_object_list.append(line_object)
def precision_recall(self): # from wordbreaker
# the following calculations are precision and recall *for breaks* (not for morphemes)
true_positives = 0
for line in self.line_object_list:
line_true_positives = len(set(line.breaks).intersection(set(line.true_breaks))) - 1 # IMPORTANT - This removes the zero breakpoint
true_positives += line_true_positives
self.break_precision = float(true_positives) / self.totalsegmentcount
self.break_recall = float(true_positives) / self.true_totalsegmentcount
#formatstring = "%16s %12s %6.4f %9s %6.4f"
#print()
#print(formatstring %( "Break based word", "precision", self.break_precision, "recall", self.break_recall))
#print(formatstring %( "Break based word", "precision", break_precision, "recall", break_recall), file=outfile)
# Token_based precision for word discovery:
if True:
true_positives = 0
for piece in self.segment_object_dictionary:
if piece in self.true_segment_dictionary:
these_true_positives = min(self.true_segment_dictionary[piece], self.segment_object_dictionary[piece].count)
else:
these_true_positives = 0
true_positives += these_true_positives
self.token_precision = float(true_positives) / self.totalsegmentcount
self.token_recall = float(true_positives) / self.true_totalsegmentcount
#print(formatstring %( "Token_based word", "precision", word_precision, "recall", word_recall), file=outfile)
#print(formatstring %( "Token_based word", "precision", word_precision, "recall", word_recall))
# Type_based precision for word discovery:
if True:
true_positives = 0
for piece in self.segment_object_dictionary:
if piece in self.true_segment_dictionary:
true_positives +=1
self.dictionary_precision = float(true_positives) / len(self.segment_object_dictionary)
self.dictionary_recall = float(true_positives) / len(self.true_segment_dictionary)
#print >>outfile, "\n\n***\n"
#print "Type_based Word Precision %6.4f; Word Recall %6.4f" %(word_precision ,word_recall)
#print(formatstring %( " Type_based word", "precision", word_precision, "recall", word_recall), file=outfile)
#print(formatstring %( " Type_based word", "precision", word_precision, "recall", word_recall))
def output_stats(self, outfile, loopno, show_cost):
if (loopno % REBASE_PERIOD == 0):
print()
print(file=outfile)
formatstring = "%4d S:%4d M:%4d new:%2d %2d %3d At:%4d Dt:%4d BP: %6.4f BR: %6.4f TP: %6.4f TR: %6.4f DP: %6.4f DR: %6.4f"
filled_string = formatstring % (loopno,
self.split_count,
self.merge_count,
self.split_1newsegment_count,
self.split_2newsegments_count,
self.merge_newsegment_count,
self.addedandtrue_devcount,
self.deletedandtrue_devcount,
self.break_precision,
self.break_recall,
self.token_precision,
self.token_recall,
self.dictionary_precision,
self.dictionary_recall)
cost_string = ""
if show_cost == True:
cost_string = " COST = %.4f" % self.overall_cost
#number_with_commas = "{0:,.4f}".format(self.overall_cost)
#cost_string = " COST = %s" % number_with_commas
print( filled_string + cost_string)
print( filled_string + cost_string, file=outfile)
def test_unbroken_text(self, text):
print("\npoint = 0 (i.e., unbroken text)")
test_parse = Line(text)
test_parse.breaks = [0, len(text)]
test_parse.pieces.append(text)
self.compute_brokenline_cost(test_parse)
self.populate_line_displaylists(test_parse)
bestscore = test_parse.total_cost
bestlocation = 0
test_parse.displaytoscreen_detail()
for point in range(1, len(text)):
print("\npoint =", point)
test_parse = Line(text)
test_parse.breaks = [0, point, len(text)]
test_parse.pieces.append(text[0:point])
test_parse.pieces.append(text[point:])
self.compute_brokenline_cost(test_parse)
self.populate_line_displaylists(test_parse)
if test_parse.total_cost < bestscore:
bestscore = test_parse.total_cost
bestlocation = point
test_parse.displaytoscreen_detail()
print("\nBest score = ", bestscore, "at point = ", bestlocation, "\n") # FORMAT bestscore AS %8.1f
## ---------------------------------------------------------------------------------------##
## End of class Document:
## ---------------------------------------------------------------------------------------#
def weighted_choice(hypothesis_list):
samplepoint = random.random()
#print("In weighted_choice function, samplepoint =", samplepoint)
cum = 0.0
for (hyp, weight) in hypothesis_list:
cum += weight
#print("cum =", cum)
if samplepoint < cum:
#print("returning hyp:", hyp)
return hyp
return hyp
def save_state_to_file(loopno, pkl_outfile_name, document_object):
if g_encoding == "utf8":
pkl_outfile = codecs.open(pkl_outfile_name, encoding = "utf-8", mode = 'w',)
else:
pkl_outfile = open(pkl_outfile_name, mode='w')
# Header for jsonpickle outfile
i = datetime.datetime.now()
print("# Date = " + i.strftime("%Y_%m_%d"), file=pkl_outfile)
print("# Time = " + i.strftime("%H_%M"), file=pkl_outfile)
print(file=pkl_outfile)
print("#----------------------------------------\n# Loop number:", loopno, file=pkl_outfile)
print("#----------------------------------------", file=pkl_outfile)
print("serializing...")
serialstr = jsonpickle.encode(document_object, keys=True)
print("printing serialization to file...")
print(serialstr, file=pkl_outfile)
pkl_outfile.close()
def load_state_from_file(pkl_infile_name):
if g_encoding == "utf8":
pkl_infile = codecs.open(pkl_infile_name, encoding = 'utf-8')
else:
pkl_infile = open(pkl_infile_name)
print("Loading saved state...")
filelines = pkl_infile.readlines()
serialstr = filelines[-1]
#print(serialstr[0:40])
document = jsonpickle.decode(serialstr, keys=True)
pkl_infile.close()
return document
#----------------------------------------------------------------------------------#
# this is not currently being used:
# #def ShiftBreak (word, thiswordbreaks, shiftamount, outfile, totalmorphemecount):
# Code for this fuction is available in earlier versions in the git repository.
#----------------------------------------------------------------------------------#
#--------------------------------------------------------------------##
# Main program
# (This will be revised to conform to lxa2015 approach.)
#--------------------------------------------------------------------##
#---------------------------------------------------------#
# 0. Set up files for input and output
#---------------------------------------------------------#
# organize files like this or change the paths here for input
language = "english"
infolder = '../data/' + language + '/'
size = 50 #french 153 10 english 14 46
infilename = infolder + "english-brown.txt" # corpus, instead of .dx1 file
# if an argument is specified, uses that instead of the above path for input
if len(sys.argv) > 1:
print(sys.argv[1])
infilename = sys.argv[1]
if not os.path.isfile(infilename):
print("Warning: ", infilename, " does not exist.")
if g_encoding == "utf8":
infile = codecs.open(infilename, encoding = 'utf-8')
else:
infile = open(infilename)
print("\nData file: ", infilename)
# organize files like this or change the paths here for output
outfolder = '../data/'+ language + '/gibbs_wordbreaking/'
outfilename_gibbspieces = outfolder + "gibbs_pieces.txt"
outfilename_corpuslines = outfolder + "corpus_lines.txt"
outfilename_stats = outfolder + "stats.txt"
outfilename_lrparse = outfolder + "left_right_parse.txt"
if g_encoding == "utf8":
outfile_gibbspieces = codecs.open(outfilename_gibbspieces, encoding = "utf-8", mode = 'w',)
outfile_corpuslines = codecs.open(outfilename_corpuslines, encoding = "utf-8", mode = 'w',)
outfile_stats = codecs.open(outfilename_stats, encoding = "utf-8", mode = 'w',)
outfile_lrparse = codecs.open(outfilename_lrparse, encoding = "utf-8", mode = 'w',)
print("yes utf8")
else:
outfile_gibbspieces = open(outfilename_gibbspieces, mode='w')
outfile_corpuslines = open(outfilename_corpuslines, mode='w')
outfile_stats = open(outfilename_stats, mode='w')
outfile_lrparse = open(outfilename_lrparse, mode='w')
# 2016_02_25
outfilename_addedandtrue = outfolder + "addedandtrue.txt"
outfilename_deletedandtrue = outfolder + "deletedandtrue.txt"
outfile_addedandtrue = open(outfilename_addedandtrue, mode='w')
outfile_deletedandtrue = open(outfilename_deletedandtrue, mode='w')
if ResumeLoopno > 0:
#---------------------------------------------------------#
# Load state to resume processing
#---------------------------------------------------------#
print()
print("State will be loaded from the following file:")
os.system("ls -l jsonpickle_infile.txt")
print()
this_document = load_state_from_file("jsonpickle_infile.txt") # ln -s <relative_or_absolute_filename> jsonpickle_infile.txt
random.setstate(this_document.random_state) # restores state of random number generator
else:
#---------------------------------------------------------#
# 1. Input
#---------------------------------------------------------#
# Once jsonpickle is set up,
# loading from a saved state (to resume processing)
# will be an alternative to sections 1 and 2.
this_document = Document()
random.seed(a=5) # audrey 2015_12_09 #Note that integer seed is not affected by seed change in python3
# THIS PART IS FOR CORPUS INPUT
truelines_list = infile.readlines()
infile.close()
for trueline in truelines_list:
line_object = Line("dummy")
this_document.load_truth_and_data(trueline, line_object)
print("Data file has", len(this_document.line_object_list), "lines,", \
len(this_document.true_segment_dictionary), "distinct words,", \
this_document.true_totalsegmentcount, "word occurrences.")
print()
# THIS PART IS FOR READING FROM dx1 FILE [not yet reworked for new class structure 2016_01_21]
#filelines= infile.readlines()
#WordCounts={}
## add counts for all words in dictionary
#for line in filelines:
# pieces = line.split(' ')
# word=pieces[0] # the first column in the dx1 file is the actual word
# word = ''.join([c.lower() for c in word if c not in "()1234567890"])
# if word in WordCounts:
# WordCounts[word] += 1
# else:
# WordCounts[word]= 1
#print "We read", len(WordCounts), "words."
# #saves words also in list format, then sorts alphabetically (in case they're not already?)
#wordlist = WordCounts.keys()
#wordlist.sort()
#---------------------------------------------------------#
# 2. Random splitting of words
#---------------------------------------------------------#
this_document.initial_segmentation()
print("Initial randomization completed.")
loopno = -1
this_document.precision_recall()
this_document.output_stats(outfile_stats, loopno, show_cost = False)
# THIS PART IS PROBABLY TEMPORARY OR IF NOT MAY BE REORGANIZED
#-----------------------------#
# output results #
#-----------------------------#
#if loopno == 0 or loopno == 10 or loopno == 20 or loopno == 100 or loopno == NumberOfIterations -1:
if False:
if ((loopno+1) % REBASE_PERIOD == 0) or (loopno == NumberOfIterations -1):
for line in this_document.line_object_list:
# computes cost for entire line using information recorded in line and segment objects; does not change parse.
for piece in line.pieces:
assert(piece in this_document.segment_object_dictionary) # there should be no "new" pieces
this_document.compute_brokenline_cost(line) # needed only for display on lrparse.txt, not for processing
if ((loopno+1) % REBASE_PERIOD == 0):
this_document.rebase(outfile_lrparse) # reparse, recount, recompute
this_document.precision_recall()
this_document.output_stats(outfile_stats, loopno, show_cost = True)
if loopno == NumberOfIterations -1:
#this_document.output_corpuslines_detail(outfile1, loopno) # displays text and also total line cost, detailed by segment and cost component
this_document.output_corpuslines_textonly(outfile_corpuslines, loopno) # "textonly" makes it easier to see diffs during development
this_document.output_gibbspieces(outfile_gibbspieces, loopno)
if SaveState == True:
this_document.random_state = random.getstate() # saves state of random number generator
save_state_to_file(loopno, outfolder + "jsonpickle_" + str(loopno) + ".txt", this_document)
#----------------------------------------------------------#
# 3. Main loop
#----------------------------------------------------------#
# Markov chain based on sampling individual components (i.e., distribution of individual segment conditioned on the other segments)
for loopno in range (ResumeLoopno, NumberOfIterations):
this_document.split_count = 0
this_document.merge_count = 0
this_document.split_1newsegment_count = 0
this_document.split_2newsegments_count = 0
this_document.merge_newsegment_count = 0
# 2016_02_25
this_document.addedandtrue_devcount = 0
this_document.deletedandtrue_devcount = 0
for line in this_document.line_object_list:
this_document.compare_alt_parse(line)
this_document.precision_recall()
this_document.output_stats(outfile_stats, loopno, show_cost = False)
#-----------------------------#
# output results #
#-----------------------------#
#if loopno == 0 or loopno == 10 or loopno == 20 or loopno == 100 or loopno == NumberOfIterations -1:
if ((loopno+1) % REBASE_PERIOD == 0) or (loopno == NumberOfIterations -1):
for line in this_document.line_object_list:
# computes cost for entire line using information recorded in line and segment objects; does not change parse.
for piece in line.pieces:
assert(piece in this_document.segment_object_dictionary) # there should be no "new" pieces
this_document.compute_brokenline_cost(line) # at this loopno, needed only for display on lrparse.txt, not for processing
if ((loopno+1) % REBASE_PERIOD == 0):
this_document.rebase(outfile_lrparse) # reparse, recount, recompute
this_document.precision_recall()
this_document.output_stats(outfile_stats, loopno, show_cost = True)
if loopno == NumberOfIterations -1:
#this_document.output_corpuslines_detail(outfile1, loopno) # displays text and also total line cost, detailed by segment and cost component
this_document.output_corpuslines_textonly(outfile_corpuslines, loopno) # "textonly" makes it easier to see diffs during development
this_document.output_gibbspieces(outfile_gibbspieces, loopno)
# 2016_02_25
this_document.output_addedandtrue(outfile_addedandtrue, loopno)
this_document.output_deletedandtrue(outfile_deletedandtrue, loopno)
if SaveState == True:
this_document.random_state = random.getstate() # saves state of random number generator
save_state_to_file(loopno, outfolder + "jsonpickle_" + str(loopno) + ".txt", this_document)
# CLOSE OUTPUT FILES SO THAT INFORMATION DERIVED BY PROGRAM CAN BE VIEWED DURING INTERACTIVE QUERIES
outfile_addedandtrue.close()
outfile_deletedandtrue.close()
outfile_lrparse.close()
outfile_stats.close()
outfile_corpuslines.close()
outfile_gibbspieces.close()
while (True):
command = input("Enter word:")
if len(command)==0:
print("enter a word.")
continue
if command =="exit" :
break
this_document.test_unbroken_text(command)
|
[
"audrey@audreyxvi.local"
] |
audrey@audreyxvi.local
|
060db81fe3de88de7ae6b3057678182060f034f7
|
487487954ce7b34b97a904be4082e5da5cfacec2
|
/038 - [O.O] Funcionário 2.py
|
6b7deeeef87897702790cef86d3e6fec09165895
|
[] |
no_license
|
rifatmondol/Python-Exercises
|
62eae905793e4f747a51653fd823fe7aba49a3c3
|
5b5f3fa6bf34408ca9afa035604a79cf19559304
|
refs/heads/master
| 2022-01-19T02:07:10.940300
| 2018-12-26T18:07:17
| 2018-12-26T18:07:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
#038- Aprimore a classe do exercício anterior para adicionar o método aumentarSalario (porcentualDeAumento) que aumente o salário do funcionário em uma certa porcentagem.
# Exemplo de uso:
# harry=funcionário("Harry",25000)
# harry.aumentarSalario(10)
class Funcionario:
nome = None
salario = None
tax = None
def __init__(self, nome, salario, tax):
self.nome = nome
self.salario = salario
self.tax = tax
def nomeFunc(self):
return self.nome
def Salario(self):
return self.salario
def IncSalario(self):
self.perc = (self.tax/100)
incSal = self.perc*self.salario
novoSalario = self.salario + incSal
self.novoSalario = novoSalario
return self.novoSalario
nome = input('Nome: ').upper()
salario = float(input('Salário: '))
tax = float(input('Taxa de aumento de salário: '))
Func = Funcionario(nome, salario, tax)
print('O(a) funcionário(a) {} recebe o salário de R${:.2f}'.format(Func.nomeFunc(), Func.Salario()))
print('Após 1 ano, ele(a) receberá um salário de R${:.2f}'.format(Func.IncSalario()))
|
[
"astrodelta14@gmail.com"
] |
astrodelta14@gmail.com
|
46e1b390c4e8f62a880af59b8c029db26f3a94d9
|
e0b5a869c687fea3c9dda138734d25b3c5e68b88
|
/5. Inheritance/5.2 Exercises/Problem 2- Zoo/project/animal.py
|
261c20dcaf8fa94f6ac665dceac99a882268cbda
|
[] |
no_license
|
asen-krasimirov/Python-OOP-Course
|
b74de5f83fb3e287cb206d48c3db79d15657c902
|
c6df3830168d8b8d780d4fb4ccfe67d1bb350f7e
|
refs/heads/main
| 2023-02-01T04:09:33.796334
| 2020-12-15T14:56:59
| 2020-12-15T14:56:59
| 309,389,119
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
class Animal:
def __init__(self, name: str):
self.__name = name
@property
def name(self):
return self.__name
|
[
"68907559+asen-krasimirov@users.noreply.github.com"
] |
68907559+asen-krasimirov@users.noreply.github.com
|
9d9e3c36b558a527c58f312429f564e2c2aacc91
|
c152ee1ffd2e4c4f3a2ff085517f0149cff87b23
|
/eventex/subscriptions/views.py
|
347a539a324a7294ca2ef3e76cb526f52e70c2de
|
[] |
no_license
|
veridianamt/eventex
|
3319dd1e5f58943ea0aca75f40933f8e5ee48527
|
dca5e043459cc24dff21baaa73887e43a0f6ceec
|
refs/heads/master
| 2021-01-21T18:34:05.146864
| 2018-01-25T18:18:56
| 2018-01-25T18:18:56
| 92,058,359
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,650
|
py
|
from django.conf import settings
from django.core import mail
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.shortcuts import resolve_url as r
from django.template.loader import render_to_string
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
def new(request):
if request.method == 'POST':
return create(request)
return empty_form(request)
def empty_form(request):
return render(request,'subscriptions/subscription_form.html', {'form': SubscriptionForm()})
def create(request):
form = SubscriptionForm(request.POST)
if not form.is_valid():
return render(request, 'subscriptions/subscription_form.html',
{'form': form})
subscription = form.save()
#subscription = Subscription.objects.create(**form.cleaned_data)
_send_mail('Confirmação de inscrição',
settings.DEFAULT_FROM_EMAIL,
subscription.email,
'subscriptions/subscription_email.txt',
{'subscription': subscription})
return HttpResponseRedirect(r('subscriptions:detail', subscription.pk))
def detail(request, pk):
try:
subscription = Subscription.objects.get(pk=pk)
except Subscription.DoesNotExist:
raise Http404
return render(request, 'subscriptions/subscription_detail.html',
{'subscription': subscription})
def _send_mail(subject, from_, to, template_name, context):
body = render_to_string(template_name, context)
mail.send_mail(subject, body, from_,[from_, to])
|
[
"vmantecom@options.qss.com.br"
] |
vmantecom@options.qss.com.br
|
4258078d9866fd3462f783966f5dca8fe44371be
|
6afe7ba26ed50d158110874cf3f9767f6a108d18
|
/task_1_4.py
|
4735518a6351831af7cad2435f40514696b1843e
|
[] |
no_license
|
hejaziak/Regular-expression-to-NFA
|
9603ded3b1459b6b17de838c410c2d101fc3d779
|
f959fb0da21d2896ebee857cd201bac48765f3c6
|
refs/heads/master
| 2020-05-02T19:17:46.777728
| 2019-03-28T09:53:12
| 2019-03-28T09:53:12
| 178,155,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
import argparse
import re
def task_1_4(args):
regex = re.compile("[123\+\-\*\/\.=]{2,4}")
output_file = open("task_1_4_result.txt","w+")
with open(args.file+"/task1_4.txt", "r") as file:
for line in file:
matches = regex.finditer(line)
if(matches):
for match in matches:
output_file.write(match.group()+ "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=True, description='Sample Commandline')
parser.add_argument('--file', action="store", help="path of file to take as input", nargs="?", metavar="file")
args = parser.parse_args()
print(args.file)
task_1_4(args)
|
[
"atherkhalid158@gmail.com"
] |
atherkhalid158@gmail.com
|
1ed42624220a828e400ca519d611390d05b01c6f
|
47c4e8c83d8c1e5f1093488b22ed1ecf27b29454
|
/schoolmanager/enroll/admin.py
|
641eb44c83656709f03774d021288c747b61f8bc
|
[] |
no_license
|
sjbitcode/schoolmanager
|
077a3295069e1b9e3494fbd7bfcae34d28d6a690
|
934d3cb1da99c746f15481fde2be25f78402c901
|
refs/heads/master
| 2021-01-11T18:10:17.990357
| 2017-01-20T00:53:23
| 2017-01-20T00:53:23
| 79,509,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django.contrib import admin
from .models import Student, Classroom, School
admin.site.register(Student)
admin.site.register(Classroom)
admin.site.register(School)
|
[
"sjbitcode@gmail.com"
] |
sjbitcode@gmail.com
|
592bf6d633bee2449ba9302f858c095db9aac103
|
85153198367fc7df788d8732627041f43edad400
|
/whoosh/example.py
|
d8a9b03ab86b28737484b67564f3b29159352bdd
|
[] |
no_license
|
An0nYm0u5101/python_discover_search_engine
|
823596089fc49cf7696e9ebcd3a0108f6715b603
|
1dbaca7792720d5e721155ac52f83b22013db4df
|
refs/heads/master
| 2022-04-19T12:46:07.687022
| 2017-09-23T18:47:46
| 2017-09-23T18:47:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
from whoosh import fields, index
from datetime import datetime
'''class whooshSCHEMA(fields.SchemaClass):
title = fields.TEXT(stored=True,sortable=True)
content = fields.TEXT(spelling=True)
date = fields.DATETIME(stored=True)
summary = fields.STORED
url=fields.ID(stored=True, unique=True))'''
WHOOSH_SCHEMA = fields.Schema(
title=fields.TEXT(stored=True,sortable=True),
content=fields.TEXT(spelling=True),
date = fields.DATETIME(stored=True),
summary = fields.STORED,
url=fields.ID(stored=True, unique=True))
#To create an index basically you need a writer object
ix = index.create_in("index",schema=WHOOSH_SCHEMA)
writer = ix.writer()
writer.add_document(title="pycones 2017",content="python conference",
date = datetime(2017,9,22),
summary = "discovering python search engine",
url="http://pycones.es")
writer.add_document(title="python 2017",content="pycones2017",
date = datetime(2017,9,22),
summary = "discovering python search engine",
url="http://pycones.es")
writer.commit()
#searching in the index by a single field
from whoosh import qparser
queryParser = qparser.QueryParser("content",schema = ix.schema)
query = queryParser.parse("python")
with ix.searcher() as searcher:
results = searcher.search(query)
print(results)
for result in results:
print(result)
#searching in the index by a multiple field
from whoosh.qparser import MultifieldParser, OrGroup
queryParser = MultifieldParser(["title",
"content"],
schema = ix.schema,
group = OrGroup)
query = queryParser.parse("python")
with ix.searcher() as searcher:
results = searcher.search(query)
print(results)
for result in results:
print(result)
|
[
"jmoc25@gmail.com"
] |
jmoc25@gmail.com
|
7645dc17368bfb6ea9d70b6b6796e5ba81bed16f
|
7ec3d5ec9ca964c72479a8f9295bfddd82dc7ed8
|
/chat.py
|
517f15fe335d759b614401bf448f76d65a84f895
|
[] |
no_license
|
lucaskc/t2networks
|
9a3946148b12e1ccf52fd4e14edf693833982256
|
8396a0bda6d16cfce2a50dba934e4ebf7d0cb6ce
|
refs/heads/master
| 2021-01-10T18:50:06.542891
| 2016-04-16T01:10:00
| 2016-04-16T01:10:00
| 56,322,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,581
|
py
|
# -*- coding: utf-8 -*-
"""
Chat Server
===========
This simple application uses WebSockets to run a primitive chat server.
"""
import os
import logging
import redis
import gevent
from flask import Flask, render_template
from flask_sockets import Sockets
REDIS_URL = os.environ['REDIS_URL']
REDIS_CHAN = 'chat'
app = Flask(__name__)
@app.route("/")
def index():
return "/templates/index.html"
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
app.debug = 'DEBUG' in os.environ
sockets = Sockets(app)
redis = redis.from_url(REDIS_URL)
class ChatBackend(object):
"""Interface for registering and updating WebSocket clients."""
def __init__(self):
self.clients = list()
self.pubsub = redis.pubsub()
self.pubsub.subscribe(REDIS_CHAN)
def __iter_data(self):
for message in self.pubsub.listen():
data = message.get('data')
if message['type'] == 'message':
app.logger.info(u'Sending message: {}'.format(data))
yield data
def register(self, client):
"""Register a WebSocket connection for Redis updates."""
self.clients.append(client)
def send(self, client, data):
"""Send given data to the registered client.
Automatically discards invalid connections."""
try:
client.send(data)
except Exception:
self.clients.remove(client)
def run(self):
"""Listens for new messages in Redis, and sends them to clients."""
for data in self.__iter_data():
for client in self.clients:
gevent.spawn(self.send, client, data)
def start(self):
"""Maintains Redis subscription in the background."""
gevent.spawn(self.run)
chats = ChatBackend()
chats.start()
@app.route('/')
def hello():
return render_template('index.html')
@sockets.route('/submit')
def inbox(ws):
"""Receives incoming chat messages, inserts them into Redis."""
while not ws.closed:
# Sleep to prevent *constant* context-switches.
gevent.sleep(0.1)
message = ws.receive()
if message:
app.logger.info(u'Inserting message: {}'.format(message))
redis.publish(REDIS_CHAN, message)
@sockets.route('/receive')
def outbox(ws):
"""Sends outgoing chat messages, via `ChatBackend`."""
chats.register(ws)
while not ws.closed:
# Context switch while `ChatBackend.start` is running in the background.
gevent.sleep(0.1)
|
[
"lucas.crocomo@gmail.com"
] |
lucas.crocomo@gmail.com
|
cded55579c0e4ece740a6d67c6ee740de3a1317a
|
e8cc093ce857f65882e25f4bfae94a395ffc2fe5
|
/PESA-BACK/docs/urls.py
|
5393afc48426f59a0119887f30be96b8a18658f7
|
[] |
no_license
|
mandelashaban593/Chatting-App
|
f6d0f7ac3785da690f52a7c1427353956699af4c
|
0e6e5d9edb0a4f0c91a40391ae5916549e87ec7b
|
refs/heads/master
| 2021-01-22T06:01:42.649403
| 2017-03-20T08:08:55
| 2017-03-20T08:08:55
| 92,512,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'docs.views.home', name='docs_home'),
url(r'^api$', 'docs.views.apidocs', name='apidocs'),
)
|
[
"mandelashaban593@gmail.com"
] |
mandelashaban593@gmail.com
|
6ce2bc017407a86ee6b88e1ba18c45d91acd5dc8
|
195577cb8b2a0ecaf77f5bdaa318e3b57d20e090
|
/torch/_C/_distributed_c10d.pyi
|
708586a7bd1c5c09113ad95d4b02948b19072c73
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
Hemantr05/pytorch
|
747e0fd17dee1f8aa2a18707b1027786e1702f74
|
cc9cb8286a0d7c7fae1097bf8273cc8c297cd01a
|
refs/heads/master
| 2021-03-30T11:14:52.285048
| 2021-03-22T23:41:18
| 2021-03-22T23:41:18
| 248,046,669
| 0
| 0
|
NOASSERTION
| 2020-03-17T18:40:03
| 2020-03-17T18:40:03
| null |
UTF-8
|
Python
| false
| false
| 9,014
|
pyi
|
from torch import Tensor
from enum import Enum
from typing import Optional, List, Any, overload
from datetime import timedelta
# This module is defined in torch/csrc/distributed/c10d/init.cpp
_DEFAULT_FIRST_BUCKET_BYTES: int
_DEFAULT_NO_TIMEOUT: timedelta
class BuiltinCommHookType(Enum):
ALLREDUCE = ...
FP16_COMPRESS = ...
def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
def _register_builtin_comm_hook(reducer: Reducer, comm_hook_type: BuiltinCommHookType): ...
class GradBucket:
def __init__(self, tensors: List[Tensor]): ...
def get_index(self) -> int: ...
def get_tensors(self) -> List[Tensor]: ...
def get_per_parameter_tensors(self) -> List[Tensor]: ...
def is_the_last_bucket_to_allreduce(self) -> bool: ...
def set_tensor(self, tensor: Tensor, i: int) -> None: ...
class Reducer:
def __init__(
self,
replicas: List[List[Tensor]],
bucket_indices: List[List[int]],
process_group: ProcessGroup,
expect_sparse_gradients: List[List[bool]],
bucket_bytes_cap: int,
find_unused_parameters: bool,
gradient_as_bucket_view: bool,
): ...
def initialize_buckets(self, bucket_indices: List[List[int]]): ...
...
class Logger:
def __init__(
self,
reducer: Reducer
): ...
def set_construction_data_and_log(
self,
module_name: str,
device_ids: List[int],
output_device: int,
broadcast_buffers: bool,
): ...
...
def _get_debug_mode(): ...
class _DistributedDebugLevel(Enum):
OFF = ...
INFO = ...
DETAIL = ...
class ReduceOp(Enum):
SUM = ...
PRODUCT = ...
MIN = ...
MAX = ...
BAND = ...
BOR = ...
BXOR = ...
UNUSED = ...
class BroadcastOptions:
rootRank: int
rootTensor: int
timeout: timedelta
class AllreduceOptions:
reduceOp: ReduceOp
timeout: timedelta
class AllreduceCoalescedOptions(AllreduceOptions):
...
class ReduceOptions:
reduceOp: ReduceOp
rootRank: int
rootTensor: int
timeout: timedelta
class AllGatherOptions:
timeout: timedelta
class GatherOptions:
rootRank: int
timeout: timedelta
class ScatterOptions:
rootRank: int
timeout: timedelta
class ReduceScatterOptions:
reduceOp: ReduceOp
timeout: timedelta
class BarrierOptions:
device_ids: List[int]
timeout: timedelta
class AllToAllOptions:
timeout: timedelta
class Store:
def set(self, key: str, value: str): ...
def get(self, key: str) -> bytes: ...
def add(self, key: str, value: int) -> int: ...
def delete_key(self, key: str) -> bool: ...
def num_keys(self) -> int: ...
def set_timeout(self, timeout: timedelta): ...
@overload
def wait(self, keys: List[str]): ...
@overload
def wait(self, keys: List[str], timeout: timedelta): ...
class FileStore(Store):
def __init__(
self,
path: str,
numWorkers: int
): ...
class HashStore(Store):
def __init__(self): ...
class TCPStore(Store):
def __init__(
self,
host_name: str,
port: int,
world_size: int,
is_master: bool,
timeout: timedelta,
): ...
class PrefixStore(Store):
def __init__(
self,
prefix: str,
store: Store
): ...
class Work:
def is_completed(self) -> bool: ...
def is_success(self) -> bool: ...
def exception(self) -> Any: ...
def wait(self, timeout: timedelta = _DEFAULT_NO_TIMEOUT) -> bool: ...
def source_rank(self) -> int: ...
def _source_rank(self) -> int: ...
def result(self) -> List[Tensor]: ...
def synchronize(self): ...
...
class ProcessGroup:
def __init__(self): ...
def rank(self) -> int: ...
def size(self) -> int: ...
@overload
def broadcast(
self,
tensors: List[Tensor],
opts = BroadcastOptions(),
) -> Work: ...
@overload
def broadcast(
self,
tensor: Tensor,
root: int,
) -> Work: ...
@overload
def allreduce(
self,
tensors: List[Tensor],
opts: AllreduceOptions = AllreduceOptions(),
) -> Work: ...
@overload
def allreduce(
self,
tensors: List[Tensor],
op = ReduceOp.SUM,
) -> Work: ...
@overload
def allreduce(
self,
tensor: Tensor,
op = ReduceOp.SUM,
) -> Work: ...
def allreduce_coalesced(
self,
tensors: List[Tensor],
opts = AllreduceCoalescedOptions(),
) -> Work: ...
@overload
def reduce(
self,
tensors: List[Tensor],
opts = ReduceOptions(),
) -> Work: ...
@overload
def reduce(
self,
tensor: Tensor,
root: int,
op = ReduceOp.SUM,
) -> Work: ...
@overload
def allgather(
self,
output_tensors: List[List[Tensor]],
input_tensors: List[Tensor],
opts = AllGatherOptions(),
) -> Work: ...
@overload
def allgather(
self,
output_tensors: List[Tensor],
input_tensor: Tensor,
) -> Work: ...
def allgather_coalesced(
self,
output_lists: List[List[Tensor]],
input_list: List[Tensor],
opts = AllGatherOptions(),
) -> Work: ...
@overload
def gather(
self,
output_tensors: List[List[Tensor]],
input_tensors: List[Tensor],
opts = GatherOptions(),
) -> Work: ...
@overload
def gather(
self,
output_tensors: List[Tensor],
input_tensor: Tensor,
root: int,
) -> Work: ...
@overload
def scatter(
self,
output_tensors: List[Tensor],
input_tensors: List[List[Tensor]],
opts = ScatterOptions(),
) -> Work: ...
@overload
def scatter(
self,
output_tensor: Tensor,
input_tensors: List[Tensor],
root: int,
) -> Work: ...
@overload
def reduce_scatter(
self,
output_tensors: List[Tensor],
input_tensors: List[List[Tensor]],
opts = ReduceScatterOptions(),
) -> Work: ...
@overload
def reduce_scatter(
self,
output_tensors: Tensor,
input_tensor: List[Tensor],
) -> Work: ...
@overload
def alltoall_base(
self,
output_tensor: Tensor,
input_tensor: Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
opts = AllToAllOptions(),
) -> Work: ...
@overload
def alltoall_base(
self,
output: Tensor,
input: Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
) -> Work: ...
@overload
def alltoall(
self,
output_tensor: List[Tensor],
input_tensor: List[Tensor],
opts = AllToAllOptions(),
) -> Work: ...
@overload
def alltoall(
self,
output: List[Tensor],
input: List[Tensor],
) -> Work: ...
def send(
self,
tensors: List[Tensor],
dstRank: int,
tag: int,
) -> Work: ...
def recv(
self,
tensors: List[Tensor],
srcRank: int,
tag: int,
) -> Work: ...
def recv_anysource(
self,
tensors: List[Tensor],
tag: int
) -> Work: ...
def barrier(
self,
opts = BarrierOptions()
) -> Work: ...
class ProcessGroupRoundRobin(ProcessGroup): ...
def _round_robin_process_groups(
process_groups: List[ProcessGroup],
) -> ProcessGroupRoundRobin: ...
class ProcessGroupGloo(ProcessGroup):
class Device: ...
def __init__(
self,
store: Store,
rank: int,
size: int,
timeout: timedelta,
): ...
@staticmethod
def create_device(hostname = str(), interface = str()) -> Device: ...
...
class ProcessGroupNCCL(ProcessGroup):
def __init__(
self,
store: Store,
rank: int,
size: int,
timeout: timedelta,
): ...
@staticmethod
def _group_start() -> None: ...
@staticmethod
def _group_end() -> None: ...
...
class ProcessGroupMPI(ProcessGroup):
def __init__(
self,
rank: int,
size: int,
pgComm: int,
): ...
@staticmethod
def create(ranks: List[int]) -> ProcessGroupMPI: ...
def _compute_bucket_assignment_by_size(
tensors: List[Tensor],
bucket_size: int,
expect_sparse_gradient: List[bool],
tensor_indices: List[int]) -> List[List[int]]: ...
def _broadcast_coalesced(
process_group: ProcessGroup,
tensors: List[Tensor],
buffer_size: int,
src: int,
): ...
def _test_python_store(store: Store): ...
def _verify_replicas_within_process(
replicas: List[List[Tensor]],
expect_sparse_gradient: List[List[bool]]
): ...
def _verify_model_across_ranks(
process_group: ProcessGroup,
replicas: List[List[Tensor]]
): ...
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
6000c38166aa34b4a4f88f0e2514bac752047095
|
a508dec8ee78466f5a4598150c5bd2e960c67245
|
/banking/migrations/0001_initial.py
|
b34c29ed3d2a802b6f2f0458ac9d8a16c2e1ee34
|
[] |
no_license
|
koffskeyj/Iron_Bank
|
0c19ec54423fc53999bf304d705fdac0134de484
|
18afbd9323f8df16f57a2af8f90788ef2af52d2d
|
refs/heads/master
| 2021-01-17T19:24:07.244331
| 2016-07-05T02:57:11
| 2016-07-05T02:57:11
| 61,582,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 20:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('business', models.CharField(max_length=30)),
('amount', models.FloatField()),
('transaction_method', models.CharField(choices=[('Debit', 'Debit'), ('Credit', 'Credit')], default='Debit', max_length=15)),
('transaction_type', models.CharField(choices=[('Deposit', 'Deposit'), ('Withdrawal', 'Withdrawal')], default='Deposit', max_length=15)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"koffskeyj@gmail.com"
] |
koffskeyj@gmail.com
|
cc63467008ab0a8e13868fe769018dc8cba1852c
|
c1be2084053fdbe5804a12751b5a1fba8f86b30a
|
/contrib/seeds/generate-seeds.py
|
e09f271ba25bed2517e08bbec1cc3e7ba01564fb
|
[
"MIT"
] |
permissive
|
LordSoylent/ZinniCoin
|
76186400cc69b90cde61d5a574face34c3a43912
|
697eedf7def8423bb23ad6a5b16b44296e186ca2
|
refs/heads/master
| 2020-03-28T19:16:52.975219
| 2018-08-22T17:25:28
| 2018-08-22T17:25:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,327
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 15877)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 2254)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
[
"root"
] |
root
|
d29985f01b3be9780b7cb8589a4cd0a905200a28
|
a8904a94eaf1402fa2cd4dcc88ad2c87f3ce77de
|
/hfcs
|
3f46f78622ad93806879663da66fe3a4fa3e11f3
|
[
"Apache-2.0"
] |
permissive
|
dbgdd/hifi-tutorial
|
cfaf3189b845f8c8bab9fcfaf4271fdf12a51478
|
4cb56bf5b83d1799dfad5ef75fe9ab9fd0453ebd
|
refs/heads/master
| 2020-04-29T07:03:23.295097
| 2018-10-10T18:55:59
| 2018-10-10T18:55:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,741
|
#!/usr/bin/env python
"""
This tool provides functionality to:
* Generate a content set directory that the High Fidelity domain server
and assignment clients can process.
* Copy the models.json.gz that your entity server is using back into the src
directory.
* Package the generated content set into a releasable archive that the High
Fidelity Sandbox can download and use.
The tool expects the following directory structure in the input/src directory:
src/
assets/ # ATP server assets
...
entities/ # Entity server assets, models.json in unzipped form
models.json
domain-server/ # Domain server assets
config.json
Building the content set will process the above directory structure and produce
a directory with a High Fidelity server compatible structure, which includes a
gzipped models file and a map.json for the assets server.
Build directory structure:
build/
assignment-client/
assets/
map.json
files/
...
entities/
models.json.gz
domain-server/
config.json
Packaging the build will generate a gzipped tarball that can be downloaded and
extracted by the Sandbox.
Example usage:
Generate build into default ac/ds directory. This is useful when working with the Sandbox.
./hfcs build -o ~/AppData/Roaming/High\ Fidelity
After making modifications to entities on your local sandbox in interface,
copy the models file back:
./hfcs pull -o ~/AppData/Roaming/High\ Fidelity
Create a release:
# Assuming build does not exist
./hfcs build --bake -o ./build
./hfcs package ./build
"""
from __future__ import print_function
import argparse
import datetime
import gzip
import hashlib
import json
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import time
import errno
TEXTURE_EXTENSIONS = ('jpg', 'jpeg', 'png', 'tga')
verbose_enabled = False
def print_verbose(*args, **kwargs):
if verbose_enabled:
print(*args, **kwargs)
def is_texture_extension(extension):
return extension in TEXTURE_EXTENSIONS
def create_assets_map(file_path_pairs):
assets_map = {}
for filename, path, filehash in file_path_pairs:
path_parts = split(path)
assets_path = '/' + '/'.join(path_parts)
if assets_path in assets_map:
if assets_map[assets_path] == filehash:
print(" Found duplicate: {}".format(assets_path))
else:
print(" !!!! Overwriting asset: {}".format(assets_path))
assets_map[assets_path] = filehash
return assets_map
def split(path):
"""
Return a list containing the individual directories and filename (if
included) in `path`. This is in contract to os.path.split, which will only
split the path into 2 parts - the beginning and the last component.
"""
head, tail = os.path.split(path)
if tail == '':
if head == '':
return []
else:
return [head]
return split(head) + [tail]
def makedirs(path):
"""
Create directory `path`, including its parent directories if they do
not already exist. Return True if the directory did not exist and was
created, or False if it already existed.
"""
try:
os.makedirs(path)
return True
except OSError as e:
if e.errno == errno.EEXIST:
return False
raise
except FileExistsError:
return False
def basename_without_extension(filepath):
"""
Given a filepath, return the basename without the extension.
Exampe: /home/ryan/file.json.gz => file
"""
basename = os.path.basename(filepath)
dotpos = basename.find('.')
if dotpos > -1:
return basename[:dotpos]
return basename
def get_extension(filepath):
"""
Return the extension for a file, without the dot.
Example: /home/ryan/sphere.fbx => fbx
"""
dotpos = filepath.rfind('.')
if dotpos == -1:
extension = ''
else:
extension = filepath[dotpos + 1:]
return extension
def bake_file(input_filepath, output_directory):
"""
Bake a file and return a list of info about each generated files. If the input file
can't be baked or the bake fails, None will be returned.
The file info dict will contain:
relative_path - path to the file, relative to the baked folder for this
asset. This will generally have a depth of 0 (example:
'sphere.fbx')
absolute_path - absolute path to the file
For an fbx, the returned list will usually contain a set of relative paths
like:
chair.fbx
textures/wood_diffuse.ktx
textures/wood_normal.ktx
"""
extension = get_extension(input_filepath)
is_texture = is_texture_extension(extension)
if extension == 'fbx' or is_texture:
FNULL = open(os.devnull, 'w')
if is_texture:
output_directory = os.path.join(output_directory, basename_without_extension(input_filepath))
makedirs(output_directory)
res = subprocess.call(
['oven', '-i', input_filepath, '-o', output_directory],
stdout=FNULL,
stderr=subprocess.STDOUT)
if res == 0:
input_filename = os.path.basename(input_filepath)
pos = input_filename.rfind('.')
if pos > -1:
input_filename_no_ext = input_filename[:pos]
else:
input_filename_no_ext = input_filename
baked_file_info = []
# For models, if input_filepath is something.fbx, output folder
# will be:
#
# output_filepath/something/baked/
#
# For textures, the directory is just the output_directory
#
if is_texture:
baked_directory = output_directory
else:
baked_directory = os.path.join(output_directory, input_filename_no_ext, 'baked')
for dirpath, _dirs, baked_files in os.walk(baked_directory):
relpath = os.path.relpath(dirpath, baked_directory)
for baked_file in baked_files:
rel_baked_file = os.path.normpath(os.path.join(relpath, baked_file))
baked_file_info.append({
'relative_path': rel_baked_file,
'absolute_path': os.path.join(os.getcwd(), dirpath, baked_file)
})
return baked_file_info
return None
def ask_yes_no(prompt):
"""
Prompt the user to answer yes or no to the question in `prompt`. The
default response is no if nothing has been entered.
"""
while True:
resp = raw_input(prompt + " (y/N) ")
if resp == 'y' or resp == 'Y':
return True
elif resp == '' or resp == 'n' or resp == 'N':
return False
def remove_baked_extension(filepath):
"""
Remove the ".baked." portion of an extension from the path `filepath`.
If the filepath does not contain ".baked." in the extension, the original
path will be returned.
"""
pos = filepath.rfind('.baked.')
if pos > -1:
return filepath[:pos] + filepath[pos + len('.baked'):]
return filepath
def generate_build(source_dir, output_dir, bake=False, skip_baking_skyboxes=False,
version=None):
"""
Generate a build by processing the directories and files in source_dir
and outputting the build to build_dir. if a version is specified, it will
be written to a file in the assignment-client directory.
"""
src_assets_dir = os.path.join(source_dir, 'assets')
src_entities_dir = os.path.join(source_dir, 'entities')
src_ds_dir = os.path.join(source_dir, 'domain-server')
output_ac_dir = os.path.join(output_dir, 'assignment-client')
output_assets_dir = os.path.join(output_ac_dir, 'assets')
output_assets_files_dir = os.path.join(output_assets_dir, 'files')
output_entities_dir = os.path.join(output_ac_dir, 'entities')
output_ds_dir = os.path.join(output_dir, 'domain-server')
timestr = datetime.datetime.fromtimestamp(time.time())\
.strftime('%Y-%m-%d-%H_%M_%S')
base_temp_dir = tempfile.gettempdir()
temp_dir = os.path.join(base_temp_dir, 'tut-' + timestr)
print_verbose("Temp path for baked files is ", temp_dir)
if bake:
makedirs(temp_dir)
makedirs(output_assets_dir)
makedirs(output_assets_files_dir)
makedirs(output_entities_dir)
makedirs(output_ds_dir)
# Generate models.json.gz if it doesn't exist
print(" Writing entities")
models_filepath = os.path.join(src_entities_dir, 'models.json')
output_models_filepath = os.path.join(output_entities_dir, 'models.json.gz')
should_copy_entities_to_build = False
if os.path.exists(output_models_filepath):
print(" models.json.gz in build directory already exists.")
should_copy_entities_to_build = ask_yes_no(" Do you want to replace it?")
else:
should_copy_entities_to_build = True;
# Find zone entities to determine which files are used as a skybox
skybox_asset_files = []
with open(models_filepath, 'r') as models_file:
try:
entities = json.load(models_file)
for entity in entities['Entities']:
if entity['type'] == 'Zone':
url = entity.get('skybox', {}).get('url', None)
if url is not None and url.startswith('atp:/'):
skybox_asset_files.append(url[len('atp:/'):])
except:
print("ERROR: Failed to load models file")
raise
sys.exit(1)
print_verbose("Found skyboxes: ", ', '.join(skybox_asset_files))
# Build asset server files
print(" Writing assets")
print(" Source assets directory is: " + src_assets_dir)
print(" Copying assets to output directory")
assets_files = []
skyboxes_to_update = {}
for dirpath, _dirs, files in os.walk(os.path.join(src_assets_dir)):
for filename in files:
abs_filepath = os.path.abspath(os.path.join(dirpath, filename))
asset_dir = os.path.relpath(os.path.abspath(dirpath), src_assets_dir)
asset_filepath = os.path.normpath(os.path.join(asset_dir, filename)).replace('\\', '/')
asset_files_to_copy = []
needs_copy = True
if bake:
extension = get_extension(filename)
is_texture = is_texture_extension(extension)
is_skybox_texture = (is_texture and asset_filepath in skybox_asset_files)
if extension == 'fbx' or (not skip_baking_skyboxes and is_skybox_texture):
print(" Baking ", abs_filepath)
baked_files = bake_file(abs_filepath, temp_dir)
if baked_files is None:
print(" Failed to bake:", abs_filepath)
else:
for baked_file_info in baked_files:
needs_copy = False
rel_path = baked_file_info['relative_path']
abs_path = baked_file_info['absolute_path']
print_verbose('Got baked file: ', rel_path, abs_path)
asset_filepath = remove_baked_extension(rel_path)
with open(abs_path, 'rb') as f:
sha256 = hashlib.sha256()
for chunk in iter(lambda: f.read(4096), b''):
sha256.update(chunk)
filehash = sha256.hexdigest()
asset_filepath = os.path.normpath(os.path.join(asset_dir, asset_filepath))
asset_files_to_copy.append( (filehash, abs_path, asset_filepath) )
if is_skybox_texture:
rel_asset_filepath = asset_dir.replace('\\', '/')
pos = rel_path.rfind('.')
original_path = 'atp:/' + '/'.join((rel_asset_filepath, filename))
baked_path = 'atp:/' + '/'.join((rel_asset_filepath, rel_path[:pos] + '.ktx'))
print("Mapping {} to {}".format(original_path, baked_path))
skyboxes_to_update[original_path] = baked_path
if needs_copy:
asset_filepath = os.path.normpath(os.path.join(asset_dir, filename))
with open(abs_filepath, 'rb') as f:
filehash = hashlib.sha256(f.read()).hexdigest()
asset_files_to_copy.append( (filehash, abs_filepath, asset_filepath) )
for filehash, source_filepath, asset_filepath in asset_files_to_copy:
assets_files.append((source_filepath, asset_filepath, filehash))
output_filepath = os.path.join(output_assets_files_dir, filehash)
print_verbose(" Copying {} to {}".format(source_filepath, output_filepath))
shutil.copy(source_filepath, output_filepath)
print(" Copied {} assets".format(len(assets_files)))
assets_map = create_assets_map(assets_files)
output_assets_map_file = os.path.join(output_assets_dir, 'map.json')
with open(output_assets_map_file, 'w') as map_file:
json.dump(assets_map, map_file, indent=4)
def replace_with_baked_skybox(models_data, mapping):
for entity in models_data['Entities']:
if entity['type'] == 'Zone':
url = entity.get('skybox', {}).get('url', None)
if url is not None and url in skyboxes_to_update:
entity['skybox']['url'] = skyboxes_to_update[url]
if should_copy_entities_to_build:
print(" Creating models.json.gz")
with open(models_filepath, 'r') as orig_file, \
gzip.open(output_models_filepath, 'w') as gz_file:
models_data = json.load(orig_file)
for entity in models_data['Entities']:
if entity['type'] == 'Zone':
url = entity.get('skybox', {}).get('url', None)
if url is not None and url in skyboxes_to_update:
print('Updating models file', url, 'to', skyboxes_to_update[url])
entity['skybox']['url'] = skyboxes_to_update[url]
data = json.dumps(models_data)
gz_file.write(data.encode())
# Copy domain-server config
print(" Writing domain-server config")
src_ds_config_filepath= os.path.join(src_ds_dir, 'config.json')
output_ds_config_filepath= os.path.join(output_ds_dir, 'config.json')
shutil.copy(src_ds_config_filepath, output_ds_config_filepath)
# Write content version
print(" Copying content version")
src_content_version_filepath = os.path.join(source_dir, 'content-version.txt')
output_content_version_filepath= os.path.join(output_ac_dir, 'content-version.txt')
shutil.copy(src_content_version_filepath, output_content_version_filepath)
print("Complete")
# These are the paths in an output/build directory to include in a content set
# package (.tar.gz).
PATHS_TO_INCLUDE_IN_ARCHIVE = (
'assignment-client/assets/files',
'assignment-client/assets/map.json',
'assignment-client/entities/models.json.gz',
'assignment-client/content-version.txt',
'domain-server/config.json',
)
def generate_package(input_dir, output_filepath):
print("Generating release")
if not output_filepath.endswith('.tar.gz'):
print(' Skipping, output must end in "tar.gz": {}'.format(output_filepath))
else:
def tarfilter(tarinfo):
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = 'hifi'
return tarinfo
print(" Writing archive to {}".format(output_filepath))
with tarfile.open(output_filepath, 'w:gz') as f:
for path in PATHS_TO_INCLUDE_IN_ARCHIVE:
full_path = os.path.join(input_dir, path)
print(" Adding to archive: {}".format(full_path))
f.add(full_path, path, filter=tarfilter)
print(" Complete")
def handle_generate_build(args):
source_dir = args.input_directory
output_dir = args.output_directory
print("Generating build in `{}` from `{}`".format(output_dir, source_dir))
generate_build(source_dir, output_dir, args.bake, args.skip_baking_skyboxes, 35)
def handle_generate_package(args):
archive_path = os.path.join(os.getcwd(), args.output_filename)
generate_package(args.input_directory, archive_path)
def handle_pull_entities(args):
input_dir = args.input_directory
output_dir = args.output_directory
input_models_filepath = os.path.join(
input_dir,
'entities',
'models.json')
output_models_filepath = os.path.join(
output_dir,
'assignment-client',
'entities',
'models.json.gz')
if os.path.exists(output_models_filepath):
print("Copying {} to {}".format(output_models_filepath, input_models_filepath))
with open(input_models_filepath, 'wb') as orig_file, \
gzip.open(output_models_filepath, 'rb') as gz_file:
shutil.copyfileobj(gz_file, orig_file)
else:
print("Error: A models file could not be found at {}".format(output_models_filepath))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=\
"High Fidelity Content Set generator and packager.")
DEFAULT_OUTPUT_DIRECTORY = 'build'
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers()
parser_gen_build = subparsers.add_parser('build',
help='Build input directory into output directory')
parser_gen_build.set_defaults(func=handle_generate_build)
parser_gen_build.add_argument('-i', '--input_directory', default='src',
help='Directory to pull data from')
parser_gen_build.add_argument('-o', '--output_directory',
default=DEFAULT_OUTPUT_DIRECTORY)
parser_gen_build.add_argument('--bake', action='store_true',
help='Bake models and textures')
parser_gen_build.add_argument('--skip-baking-skyboxes', action='store_true',
help='If baking, do not bake skybox textures')
parser_pull_entities = subparsers.add_parser('pull',
help='Pull the models.json.gz file from an output directory back into '\
+ 'an input directory')
parser_pull_entities.set_defaults(func=handle_pull_entities)
parser_pull_entities.add_argument('-i', '--input_directory', default='src',
help='Directory to pull data from')
parser_pull_entities.add_argument('-o', '--output_directory',
default=DEFAULT_OUTPUT_DIRECTORY)
parser_package = subparsers.add_parser('package', help='Generate a release\
from a build generated using the `build` command')
parser_package.set_defaults(func=handle_generate_package)
parser_package.add_argument('input_directory')
parser_package.add_argument('output_filename')
args = parser.parse_args(sys.argv[1:])
verbose_enabled = args.verbose
if 'func' in args:
args.func(args)
else:
parser.print_help()
|
[
"ryanhuffman@gmail.com"
] |
ryanhuffman@gmail.com
|
|
a72386a31efdb6eab615e6b723aa396531289c9b
|
c1e1e1e510a9ef249857325295dae86c028e4185
|
/Modules/resize_by_pad_or_crop.py
|
67287bda35eddeb774e4c85b6d4a548e26bab773
|
[] |
no_license
|
templeblock/JPEG-reconstruction
|
bc6f67c01738895235ae135dca578e3c828fcbed
|
4c59fb9745579056b63cada51b2df17ead002bb7
|
refs/heads/master
| 2020-07-03T11:53:07.181546
| 2019-06-14T22:21:06
| 2019-06-14T22:21:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
import numpy as np
def resize_by_pad_or_crop(img, sz=[256,256], padVal=0):
X,Y = img.shape()
offx = abs(sz[0] - X)/2
offy = abs(sz[1] - Y)/2
print (X, Y)
print (offx, offy)
tmpx = np.zeros((sz[0], Y), dtype=np.uint8) + padVal
if X < sz[0]:
tmpx[floor(offx):-ceil(offx),:] = img[:,:];
elif X > sz[0]:
tmpx[:,:] = img[floor(offx):-ceil(offx),:];
else:
tmpx[:,:] = img[:,:]
tmpy = np.zeros((sz[0], sz[1]), dtype=np.uint8) + padVal
if Y < sz[1]:
tmpy[:,floor(offy):-ceil(offy)] = tmpx[:,:];
elif Y > sz[1]:
tmpy[:,:] = tmpx[:,floor(offy):-ceil(offy)];
else:
tmpy[:,:] = tmpx[:,:]
return tmpy
|
[
"ubuntu@ip-172-31-15-244.us-east-2.compute.internal"
] |
ubuntu@ip-172-31-15-244.us-east-2.compute.internal
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.