hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dd1773f50f2af84354e0431bf0e4276687f173e
| 3,401
|
py
|
Python
|
Server/Python/src/dbs/dao/Oracle/MigrationBlock/Update.py
|
vkuznet/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 8
|
2015-08-14T04:01:32.000Z
|
2021-06-03T00:56:42.000Z
|
Server/Python/src/dbs/dao/Oracle/MigrationBlock/Update.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 162
|
2015-01-07T21:34:47.000Z
|
2021-10-13T09:42:41.000Z
|
Server/Python/src/dbs/dao/Oracle/MigrationBlock/Update.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 16
|
2015-01-22T15:27:29.000Z
|
2021-04-28T09:23:28.000Z
|
#!/usr/bin/env python
"""
This module provides Migration.Update data access object.
"""
from WMCore.Database.DBFormatter import DBFormatter
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
from dbs.utils.DBSDaoTools import create_token_generator
class Update(DBFormatter):
"""
Migration Update DAO class.
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through.
3 -> 1 allowed for retrying when retry_count <3.
"""
def __init__(self, logger, dbi, owner):
"""
Add schema owner and sql.
"""
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.logger = logger
self.sql = \
"""UPDATE %sMIGRATION_BLOCKS
SET
MIGRATION_STATUS=:migration_status ,
LAST_MODIFICATION_DATE=:last_modification_date
WHERE """ % self.owner
def execute(self, conn, daoinput, transaction = False):
"""
daoinput keys:
migration_status, migration_block_id, migration_request_id
"""
#print daoinput['migration_block_id']
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationBlock/Update. Expects db connection from upper layer." ,self.logger.exception)
if daoinput['migration_status'] == 1:
sql = self.sql + " (MIGRATION_STATUS = 0 or MIGRATION_STATUS = 3)"
elif daoinput['migration_status'] == 2 or daoinput['migration_status'] == 3 or daoinput['migration_status'] == 9:
sql = self.sql + " MIGRATION_STATUS = 1 "
else:
dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Expected migration status to be 1, 2, 3, 0r 9" ,self.logger.exception )
#print sql
if 'migration_request_id' in daoinput:
sql3 = sql + "and MIGRATION_REQUEST_ID =:migration_request_id"
result = self.dbi.processData(sql3, daoinput, conn, transaction)
elif 'migration_block_id' in daoinput:
if type(daoinput['migration_block_id']) is not list:
sql2 = sql+ " and MIGRATION_BLOCK_ID =:migration_block_id"
result = self.dbi.processData(sql2, daoinput, conn, transaction)
else:
bk_id_generator, binds2 = create_token_generator(daoinput['migration_block_id'])
newdaoinput = {}
newdaoinput.update({"migration_status":daoinput["migration_status"],
"last_modification_date":daoinput["last_modification_date"]})
newdaoinput.update(binds2)
sql2 = sql+ """ and MIGRATION_BLOCK_ID in ({bk_id_generator} SELECT TOKEN FROM TOKEN_GENERATOR)
""".format(bk_id_generator=bk_id_generator)
result = self.dbi.processData(sql2, newdaoinput, conn, transaction)
else:
dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Required IDs not in the input", self.logger.exception)
| 46.589041
| 165
| 0.614231
| 3,140
| 0.923258
| 0
| 0
| 0
| 0
| 0
| 0
| 1,754
| 0.515731
|
3dd18ca1ce7d02c28f4d50d91ff399eaea978a1f
| 3,636
|
py
|
Python
|
cldfbench_lapollaqiang.py
|
cldf-datasets/lapollaqiang
|
40bcba31a65b675a15d2dcac5fae7901619162fc
|
[
"CC-BY-4.0"
] | null | null | null |
cldfbench_lapollaqiang.py
|
cldf-datasets/lapollaqiang
|
40bcba31a65b675a15d2dcac5fae7901619162fc
|
[
"CC-BY-4.0"
] | 2
|
2020-04-18T10:57:21.000Z
|
2020-04-18T12:16:03.000Z
|
cldfbench_lapollaqiang.py
|
cldf-datasets/lapollaqiang
|
40bcba31a65b675a15d2dcac5fae7901619162fc
|
[
"CC-BY-4.0"
] | null | null | null |
import re
import pathlib
from clldutils.text import strip_chars
from cldfbench import Dataset as BaseDataset
from cldfbench import CLDFSpec
QUOTES = '“”'
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "lapollaqiang"
def cldf_specs(self): # A dataset must declare all CLDF sets it creates.
return CLDFSpec(dir=self.cldf_dir, module='Generic', metadata_fname='cldf-metadata.json')
def cmd_download(self, args):
pass
def cmd_makecldf(self, args):
args.writer.cldf.add_component('LanguageTable')
args.writer.cldf.add_component(
'ExampleTable',
'Text_ID',
{'name': 'Sentence_Number', 'datatype': 'integer'},
{'name': 'Phrase_Number', 'datatype': 'integer'},
)
args.writer.cldf.add_table('texts.csv', 'ID', 'Title')
args.writer.cldf.add_foreign_key('ExampleTable', 'Text_ID', 'texts.csv', 'ID')
args.writer.objects['LanguageTable'].append({'ID': 'qiang', 'Name':
'Qiang', 'Glottocode': 'west2876'})
example_number = 0
for text_id, title, lines in iter_texts(self.raw_dir.read('Qiang-2.txt').split('\n')):
args.writer.objects['texts.csv'].append({'ID': text_id, 'Title': title})
text, gloss = [], []
for igt in iter_igts(lines):
text.extend(igt[1])
gloss.extend(igt[2])
for sid, sentence in enumerate(iter_sentences(zip(text, gloss)), start=1):
for pid, phrase in enumerate(iter_phrases(sentence), start=1):
example_number += 1
args.writer.objects['ExampleTable'].append({
'ID': example_number,
'Primary_Text': ' '.join(p[0] for p in phrase),
'Analyzed_Word': [p[0] for p in phrase],
'Gloss': [p[1] for p in phrase],
'Text_ID': text_id,
'Language_ID': 'qiang',
'Sentence_Number': sid,
'Phrase_Number': pid,
})
def iter_phrases(chunks):
phrase_end = ',;'
phrase = []
for text, gloss in chunks:
phrase.append((text, gloss))
if strip_chars(QUOTES, text)[-1] in phrase_end:
yield phrase[:]
phrase = []
assert phrase
yield phrase
def iter_sentences(chunks):
sentence_end = '.!?'
sentence = []
for text, gloss in chunks:
sentence.append((text, gloss))
if strip_chars(QUOTES, text)[-1] in sentence_end:
yield sentence[:]
sentence = []
assert not sentence
def iter_igts(lines):
assert len(lines) % 3 == 0
for text, gloss, sep in [lines[i:i+3] for i in range(0, len(lines), 3)]:
assert not sep
m = re.match('(?P<number>[0-9]+)\s+', text)
assert m
sid = m.group('number')
text = text[m.end():].split()
gloss = gloss.split()
assert len(text) == len(gloss)
yield sid, text, gloss
def iter_texts(all_lines):
header_pattern = re.compile('Text\s+(?P<number>[0-9]+)\s*:\s+(?P<title>.+)')
text_id, title, lines = None, None, []
for line in all_lines:
line = line.strip()
header = header_pattern.match(line)
if header:
if text_id:
yield text_id, title, lines
lines = []
text_id, title = header.group('number'), header.group('title')
continue
lines.append(line)
if lines:
yield text_id, title, lines
| 33.054545
| 97
| 0.55033
| 1,984
| 0.545055
| 1,481
| 0.406868
| 0
| 0
| 0
| 0
| 601
| 0.16511
|
3dd25490c9540bd331008a56be6c0ffa65b4b3b0
| 1,752
|
py
|
Python
|
simple-zero-width-chars-encoder-and-decoder/encoder.py
|
MihaiAC/Other-Projects
|
2ce3b4dbc0edf79124fee929c63a698efbbbf123
|
[
"MIT"
] | null | null | null |
simple-zero-width-chars-encoder-and-decoder/encoder.py
|
MihaiAC/Other-Projects
|
2ce3b4dbc0edf79124fee929c63a698efbbbf123
|
[
"MIT"
] | null | null | null |
simple-zero-width-chars-encoder-and-decoder/encoder.py
|
MihaiAC/Other-Projects
|
2ce3b4dbc0edf79124fee929c63a698efbbbf123
|
[
"MIT"
] | null | null | null |
import sys
import os
def convert_word_to_zero_length_list(word):
ls = []
# Convert each character into a zero-width sequence and save them into ls.
for char in word:
# Convert char to binary.
binary_char = bin(ord(char))
binary_char_strip = binary_char[2:]
# A zero-width sequence will begin with a zero-width joiner.
accumulator = u"\u200D"
for digit in binary_char_strip:
# Zeros are encoded with zero-width spaces.
if(digit == '0'):
accumulator += u"\u200B"
# Ones are encoded with zero-width non-joiners.
else:
accumulator += u"\u200C"
accumulator += u"\u200D"
ls.append(accumulator)
return ls
args = sys.argv
from_file_path = args[1]
to_file_path = args[2]
word_to_hide = args[3]
if(os.path.isfile(from_file_path) and len(word_to_hide) > 0):
# Read input from file.
f = open(from_file_path,'r')
content = f.read()
f.close()
# Encode the word.
ls = convert_word_to_zero_length_list(word_to_hide)
# Preamble for iteration.
step = int(len(content)/len(ls))
offset = 0
content = unicode(content)
# Save each zero-width sequence corresponding to a character to a specific place in the input.
# We can be smarter and save them semi-randomly but we'll keep it simple.
for ii in range(len(ls)):
index = ii * step + offset
content = content[:index] + ls[ii] + content[index:]
offset += len(ls[ii])
# Overwrite old file with modified input.
f = open(to_file_path,'w')
f.write(content.encode('utf-8'))
f.close()
else:
print('File could not be found or length of word to hide is 0.')
| 30.206897
| 98
| 0.622717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 632
| 0.360731
|
3dd2a3424b490a95eadbcb0285fa8becc7dbdcc5
| 280
|
py
|
Python
|
setup.py
|
lambdaofgod/HOTT
|
74ec33dae7ba9f9d382384c6bd2c97b5557f6eea
|
[
"MIT"
] | null | null | null |
setup.py
|
lambdaofgod/HOTT
|
74ec33dae7ba9f9d382384c6bd2c97b5557f6eea
|
[
"MIT"
] | null | null | null |
setup.py
|
lambdaofgod/HOTT
|
74ec33dae7ba9f9d382384c6bd2c97b5557f6eea
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name='HOTT',
version='0.1',
url='https://github.com/lambdaofgod/HOTT',
packages=find_packages(),
install_requires=requirements
)
| 20
| 46
| 0.692857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.235714
|
3dd4772c1009f05a2da5ab89f95cb164ef80a08f
| 736
|
py
|
Python
|
setup.py
|
mdmix4/pymdmix-run
|
2c3fdeca39f02429ab0040491e2ad016de210795
|
[
"MIT"
] | null | null | null |
setup.py
|
mdmix4/pymdmix-run
|
2c3fdeca39f02429ab0040491e2ad016de210795
|
[
"MIT"
] | null | null | null |
setup.py
|
mdmix4/pymdmix-run
|
2c3fdeca39f02429ab0040491e2ad016de210795
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup
def getRequirements():
requirements = []
with open("requirements.txt", "r") as reqfile:
for line in reqfile.readlines():
requirements.append(line.strip())
return requirements
def getVersion():
return "0.0.2"
setup(
python_requires=">=3.8",
name="pymdmix-run",
version=getVersion(),
license="MIT",
description="pymdmix plugin for command interpreter",
author="ggutierrez-bio",
author_email="",
url="https://github.com/ggutierrez-bio/mdmix4/pymdmix-run",
packages=["pymdmix_run"],
install_requires=getRequirements(),
classifiers=['Development Status :: 3 - Alpha'],
scripts=["bin/mdmix-run"],
)
| 23
| 63
| 0.65625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.336957
|
3dd4a8f967bc41b59fc8f2382ab1f0506c71e247
| 4,340
|
py
|
Python
|
aether/forum/forms.py
|
katajakasa/aetherguild4
|
a7e294f0cff11e2508751f1013e6648fdc56bb94
|
[
"MIT"
] | null | null | null |
aether/forum/forms.py
|
katajakasa/aetherguild4
|
a7e294f0cff11e2508751f1013e6648fdc56bb94
|
[
"MIT"
] | 1
|
2021-06-10T17:36:11.000Z
|
2021-06-10T17:36:11.000Z
|
aether/forum/forms.py
|
katajakasa/aetherguild4
|
a7e294f0cff11e2508751f1013e6648fdc56bb94
|
[
"MIT"
] | null | null | null |
from django.forms import Form, ModelForm, CharField, Textarea
from django.db import transaction
from django.utils.translation import gettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import ForumPost, ForumThread, ForumBoard, ForumPostEdit
class NewThreadForm(ModelForm):
title = CharField(label=_("Thread title"), max_length=128, required=True)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.board = kwargs.pop('board')
super(NewThreadForm, self).__init__(*args, **kwargs)
# Only allow attaching galleries for staff
self.fields['attached_gallery'].required = False
if not self.user.is_staff:
del self.fields['attached_gallery']
self.fields['message'].widget.attrs['class'] = 'bbcode_field'
self.helper = FormHelper()
self.helper.add_input(Submit('submit', _('Post')))
@transaction.atomic
def save(self, commit=True):
thread = ForumThread(
board=self.board,
user=self.user,
title=self.cleaned_data['title']
)
if commit:
thread.save()
post = super(NewThreadForm, self).save(commit=False)
post.thread = thread
post.user = self.user
if commit:
post.save()
return post
class Meta:
model = ForumPost
fields = ('title', 'message', 'attached_gallery')
class NewMessageForm(ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.thread = kwargs.pop('thread')
super(NewMessageForm, self).__init__(*args, **kwargs)
# Only allow attaching galleries for staff
self.fields['attached_gallery'].required = False
if not self.user.is_staff:
del self.fields['attached_gallery']
self.fields['message'].widget.attrs['class'] = 'bbcode_field'
self.helper = FormHelper()
self.helper.add_input(Submit('submit', _('Post')))
@transaction.atomic
def save(self, commit=True):
self.thread.set_modified()
post = super(NewMessageForm, self).save(commit=False)
post.user = self.user
post.thread = self.thread
if commit:
post.save()
return post
class Meta:
model = ForumPost
fields = ('message', 'attached_gallery')
class MoveThreadForm(ModelForm):
def __init__(self, *args, **kwargs):
super(MoveThreadForm, self).__init__(*args, **kwargs)
self.fields['board'].queryset = ForumBoard.objects.filter(deleted=False)\
.order_by('section__sort_index', 'sort_index')
self.helper = FormHelper()
self.helper.add_input(Submit('submit', _('Post')))
class Meta:
model = ForumThread
fields = ('board', )
class EditMessageForm(ModelForm):
title = CharField(label=_("Thread title"), max_length=128, required=True)
edit_note = CharField(label=_("Edit note"), max_length=255, required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(EditMessageForm, self).__init__(*args, **kwargs)
if self.instance.is_first:
self.fields['title'].initial = self.instance.thread.title
else:
del self.fields['title']
# Only allow attaching galleries for staff
self.fields['attached_gallery'].required = False
if not self.user.is_staff:
del self.fields['attached_gallery']
self.fields['message'].widget.attrs['class'] = 'bbcode_field'
self.helper = FormHelper()
self.helper.add_input(Submit('submit', _('Post')))
def save(self, commit=True):
post = super(EditMessageForm, self).save(commit)
if self.instance.is_first:
self.instance.thread.title = self.cleaned_data['title']
if commit:
self.instance.thread.save()
edit = ForumPostEdit(
post=post,
message=self.cleaned_data['edit_note'],
editor=self.user.profile.alias
)
if commit:
edit.save()
return post, edit
class Meta:
model = ForumPost
fields = ('title', 'message', 'edit_note', 'attached_gallery')
| 32.631579
| 81
| 0.621429
| 4,024
| 0.927189
| 0
| 0
| 700
| 0.16129
| 0
| 0
| 644
| 0.148387
|
3dd4b115a1efae712e7d58d8046528f7acbf782b
| 1,467
|
py
|
Python
|
for_straight_forward_relion/read_star_del_metadata_param.py
|
homurachan/Block-based-recontruction
|
b3fc02a0648db6aaa5d77dcc4b8e10f3361d66f4
|
[
"WTFPL"
] | 11
|
2018-04-17T01:41:11.000Z
|
2020-12-11T05:43:21.000Z
|
for_straight_forward_relion/read_star_del_metadata_param.py
|
homurachan/Block-based-recontruction
|
b3fc02a0648db6aaa5d77dcc4b8e10f3361d66f4
|
[
"WTFPL"
] | null | null | null |
for_straight_forward_relion/read_star_del_metadata_param.py
|
homurachan/Block-based-recontruction
|
b3fc02a0648db6aaa5d77dcc4b8e10f3361d66f4
|
[
"WTFPL"
] | 3
|
2019-08-23T07:48:50.000Z
|
2020-12-08T07:31:41.000Z
|
#!/usr/bin/env python
import math,os,sys
try:
from optparse import OptionParser
except:
from optik import OptionParser
def main():
(star,mline,line_name,output) = parse_command_line()
aa=open(star,"r")
instar_line=aa.readlines()
out=open(output,"w")
for i in range(0,mline):
if (instar_line[i].split()):
if (str(instar_line[i].split()[0])==line_name):
line_index=int(instar_line[i].split('#')[1])-1
skip=i
for i in range(0,mline):
if(i<skip):
out.write(instar_line[i])
if(i>skip):
tmp=str(instar_line[i].split('#')[0])
tmp_num=int(instar_line[i].split('#')[1])
tmp_num-=1
tmp=tmp+"#"+str(tmp_num)
out.write(tmp+"\n")
for i in range(mline,len(instar_line)):
if (instar_line[i].split()):
tmp=""
xx=len(instar_line[i].split())
for j in range(0,xx):
if(j!=line_index):
tmp+=str(instar_line[i].split()[j])
if(j!=xx-1 and j!=line_index):
tmp+="\t"
if(j==xx-1):
tmp+="\n"
out.write(tmp)
out.close()
aa.close()
def parse_command_line():
usage="%prog <input star> <mline +4> <line name> <output>"
parser = OptionParser(usage=usage, version="%1")
if len(sys.argv)<5:
print "<input star> <mline +4> <line name> <output>"
sys.exit(-1)
(options, args)=parser.parse_args()
star = str(args[0])
mline=int(args[1])
line_name=str(args[2])
output=str(args[3])
return (star,mline,line_name,output)
def SQR(x):
y=float(x)
return(y*y)
if __name__== "__main__":
main()
| 22.227273
| 59
| 0.632584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 165
| 0.112474
|
3dd4c39c91d920a780223d1076fe94897deaabd0
| 2,639
|
py
|
Python
|
python/GafferUI/ProgressBar.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/ProgressBar.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/ProgressBar.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import GafferUI
QtGui = GafferUI._qtImport( "QtGui" )
class ProgressBar( GafferUI.Widget ) :
def __init__( self, progress = 0, range = ( 0, 100 ), text = "%p%", **kw ) :
GafferUI.Widget.__init__( self, QtGui.QProgressBar(), **kw )
self._qtWidget().setRange( range[0], range[1] )
self.setRange( range )
self.setProgress( progress )
self.setText( text )
def setRange( self, range ) :
self._qtWidget().setRange( range[0], range[1] )
def getRange( self ) :
return ( self._qtWidget().minimum(), self._qtWidget().maximum() )
def setProgress( self, progress ) :
self._qtWidget().setValue( progress )
def getProgress( self ) :
return self._qtWidget().value()
def setText( self, text ) :
self._qtWidget().setFormat( text )
def getText( self ) :
return self._qtWidget().format()
| 33.833333
| 77
| 0.675256
| 759
| 0.287609
| 0
| 0
| 0
| 0
| 0
| 0
| 1,784
| 0.676014
|
3dd4de6bb7f825300faccd73e718c78bb7dd3d78
| 18,444
|
py
|
Python
|
minihack/agent/rllib/models.py
|
samvelyan/minihack-1
|
441eba33ba0d240b98aeabe1ff7a0c0b33cd236c
|
[
"Apache-2.0"
] | 1
|
2021-11-19T01:51:38.000Z
|
2021-11-19T01:51:38.000Z
|
minihack/agent/rllib/models.py
|
samvelyan/minihack-1
|
441eba33ba0d240b98aeabe1ff7a0c0b33cd236c
|
[
"Apache-2.0"
] | null | null | null |
minihack/agent/rllib/models.py
|
samvelyan/minihack-1
|
441eba33ba0d240b98aeabe1ff7a0c0b33cd236c
|
[
"Apache-2.0"
] | 1
|
2021-11-17T15:45:02.000Z
|
2021-11-17T15:45:02.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import Any, Dict, Optional, Tuple
import gym
import torch
from nle import nethack
from minihack.agent.common.models.embed import GlyphEmbedding
from minihack.agent.common.models.transformer import TransformerEncoder
from omegaconf import DictConfig
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from torch import nn
from torch.nn import functional as F
NUM_GLYPHS = nethack.MAX_GLYPH
NUM_FEATURES = nethack.BLSTATS_SHAPE[0]
PAD_CHAR = 0
NUM_CHARS = 128
class RLLibGlyphEmbedding(GlyphEmbedding):
def glyphs_to_idgroup(self, glyphs):
B, H, W = glyphs.shape
ids_groups = self.id_pairs_table.index_select(
0, glyphs.contiguous().view(-1).long()
)
ids = ids_groups.select(1, 0).view(B, H, W).long()
groups = ids_groups.select(1, 1).view(B, H, W).long()
return (ids, groups)
def prepare_input(self, inputs):
"""Take the inputs to the network as dictionary and return a namedtuple
of the input/index tensors to be embedded (GlyphTuple)"""
embeddable_data = {}
# Only flatten the data we want
for key, value in inputs.items():
if key in self.embeddings:
# -- [ B x ...] -> [ B' x ... ]
# embeddable_data[key] = torch.flatten(value, 0, 1).long()
embeddable_data[key] = value.long()
# add our group id and subgroup id if we want them
if self.requires_id_pairs_table:
ids, groups = self.glyphs_to_idgroup(inputs["glyphs"])
embeddable_data["groups"] = groups
embeddable_data["subgroup_ids"] = ids
# convert embeddable_data to a named tuple
return self.GlyphTuple(**embeddable_data)
class NetHackNet(nn.Module):
AgentOutput = collections.namedtuple(
"AgentOutput",
"action policy_logits baseline chosen_option teacher_logits pot_sm",
)
def __init__(self):
super(NetHackNet, self).__init__()
self.register_buffer("reward_sum", torch.zeros(()))
self.register_buffer("reward_m2", torch.zeros(()))
self.register_buffer("reward_count", torch.zeros(()).fill_(1e-8))
def forward(self, inputs, core_state):
raise NotImplementedError
def initial_state(self, batch_size=1):
return ()
def prepare_input(self, inputs):
# -- [B x H x W]
glyphs = inputs["glyphs"]
# -- [B x F]
features = inputs["blstats"]
B, *_ = glyphs.shape
return glyphs, features
def embed_state(self, inputs):
raise NotImplementedError
@torch.no_grad()
def update_running_moments(self, reward_batch):
"""Maintains a running mean of reward."""
new_count = len(reward_batch)
new_sum = torch.sum(reward_batch)
new_mean = new_sum / new_count
curr_mean = self.reward_sum / self.reward_count
new_m2 = torch.sum((reward_batch - new_mean) ** 2) + (
(self.reward_count * new_count)
/ (self.reward_count + new_count)
* (new_mean - curr_mean) ** 2
)
self.reward_count += new_count
self.reward_sum += new_sum
self.reward_m2 += new_m2
@torch.no_grad()
def get_running_std(self):
"""Returns standard deviation of the running mean of the reward."""
return torch.sqrt(self.reward_m2 / self.reward_count)
class Crop(nn.Module):
def __init__(self, height, width, height_target, width_target):
super(Crop, self).__init__()
self.width = width
self.height = height
self.width_target = width_target
self.height_target = height_target
width_grid = self._step_to_range(
2 / (self.width - 1), self.width_target
)[None, :].expand(self.height_target, -1)
height_grid = self._step_to_range(
2 / (self.height - 1), height_target
)[:, None].expand(-1, self.width_target)
# "clone" necessary, https://github.com/pytorch/pytorch/issues/34880
self.register_buffer("width_grid", width_grid.clone())
self.register_buffer("height_grid", height_grid.clone())
def _step_to_range(self, step, num_steps):
return torch.tensor(
[step * (i - num_steps // 2) for i in range(num_steps)]
)
def forward(self, inputs, coordinates):
"""Calculates centered crop around given x,y coordinates.
Args:
inputs [B x H x W] or [B x H x W x C]
coordinates [B x 2] x,y coordinates
Returns:
[B x H' x W'] inputs cropped and centered around x,y coordinates.
"""
assert inputs.shape[1] == self.height, "expected %d but found %d" % (
self.height,
inputs.shape[1],
)
assert inputs.shape[2] == self.width, "expected %d but found %d" % (
self.width,
inputs.shape[2],
)
permute_results = False
if inputs.dim() == 3:
inputs = inputs.unsqueeze(1)
else:
permute_results = True
inputs = inputs.permute(0, 2, 3, 1)
inputs = inputs.float()
x = coordinates[:, 0]
y = coordinates[:, 1]
x_shift = 2 / (self.width - 1) * (x.float() - self.width // 2)
y_shift = 2 / (self.height - 1) * (y.float() - self.height // 2)
grid = torch.stack(
[
self.width_grid[None, :, :] + x_shift[:, None, None],
self.height_grid[None, :, :] + y_shift[:, None, None],
],
dim=3,
)
crop = (
torch.round(F.grid_sample(inputs, grid, align_corners=True))
.squeeze(1)
.long()
)
if permute_results:
# [B x C x H x W] -> [B x H x W x C]
crop = crop.permute(0, 2, 3, 1)
return crop
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class BaseNet(NetHackNet):
def __init__(self, processed_observation_shape, flags: DictConfig):
super(BaseNet, self).__init__()
self.observation_space = processed_observation_shape.original_space
self.H = self.observation_space["glyphs"].shape[0]
self.W = self.observation_space["glyphs"].shape[1]
self.k_dim = flags.embedding_dim
self.h_dim = flags.hidden_dim
self.crop_model = flags.crop_model
self.crop_dim = flags.crop_dim
self.num_features = NUM_FEATURES
self.crop = Crop(self.H, self.W, self.crop_dim, self.crop_dim)
self.glyph_type = flags.glyph_type
self.glyph_embedding = RLLibGlyphEmbedding(
flags.glyph_type,
flags.embedding_dim,
None,
flags.use_index_select,
)
K = flags.embedding_dim # number of input filters
F = 3 # filter dimensions
S = 1 # stride
P = 1 # padding
M = 16 # number of intermediate filters
self.Y = 8 # number of output filters
L = flags.layers # number of convnet layers
in_channels = [K] + [M] * (L - 1)
out_channels = [M] * (L - 1) + [self.Y]
def interleave(xs, ys):
return [val for pair in zip(xs, ys) for val in pair]
conv_extract = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
self.extract_representation = nn.Sequential(
*interleave(conv_extract, [nn.ELU()] * len(conv_extract))
)
if self.crop_model == "transformer":
self.extract_crop_representation = TransformerEncoder(
K,
N=L,
heads=8,
height=self.crop_dim,
width=self.crop_dim,
device=None,
)
elif self.crop_model == "cnn":
conv_extract_crop = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
self.extract_crop_representation = nn.Sequential(
*interleave(conv_extract_crop, [nn.ELU()] * len(conv_extract))
)
# MESSAGING MODEL
if "msg" not in flags:
self.msg_model = "none"
else:
self.msg_model = flags.msg.model
self.msg_hdim = flags.msg.hidden_dim
self.msg_edim = flags.msg.embedding_dim
if self.msg_model in ("gru", "lstm", "lt_cnn"):
# character-based embeddings
self.char_lt = nn.Embedding(
NUM_CHARS, self.msg_edim, padding_idx=PAD_CHAR
)
else:
# forward will set up one-hot inputs for the cnn, no lt needed
pass
if self.msg_model.endswith("cnn"):
# from Zhang et al, 2016
# Character-level Convolutional Networks for Text Classification
# https://arxiv.org/abs/1509.01626
if self.msg_model == "cnn":
# inputs will be one-hot vectors, as done in paper
self.conv1 = nn.Conv1d(NUM_CHARS, self.msg_hdim, kernel_size=7)
elif self.msg_model == "lt_cnn":
# replace one-hot inputs with learned embeddings
self.conv1 = nn.Conv1d(
self.msg_edim, self.msg_hdim, kernel_size=7
)
else:
raise NotImplementedError("msg.model == %s", flags.msg.model)
# remaining convolutions, relus, pools, and a small FC network
self.conv2_6_fc = nn.Sequential(
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# conv2
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=7),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# conv3
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv4
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv5
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv6
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# fc receives -- [ B x h_dim x 5 ]
Flatten(),
nn.Linear(5 * self.msg_hdim, 2 * self.msg_hdim),
nn.ReLU(),
nn.Linear(2 * self.msg_hdim, self.msg_hdim),
) # final output -- [ B x h_dim x 5 ]
elif self.msg_model in ("gru", "lstm"):
def rnn(flag):
return nn.LSTM if flag == "lstm" else nn.GRU
self.char_rnn = rnn(self.msg_model)(
self.msg_edim,
self.msg_hdim // 2,
batch_first=True,
bidirectional=True,
)
elif self.msg_model != "none":
raise NotImplementedError("msg.model == %s", flags.msg.model)
self.embed_features = nn.Sequential(
nn.Linear(self.num_features, self.k_dim),
nn.ReLU(),
nn.Linear(self.k_dim, self.k_dim),
nn.ReLU(),
)
self.equalize_input_dim = flags.equalize_input_dim
if not self.equalize_input_dim:
# just added up the output dimensions of the input featurizers
# feature / status dim
out_dim = self.k_dim
# CNN over full glyph map
out_dim += self.H * self.W * self.Y
if self.crop_model == "transformer":
out_dim += self.crop_dim ** 2 * K
elif self.crop_model == "cnn":
out_dim += self.crop_dim ** 2 * self.Y
# messaging model
if self.msg_model != "none":
out_dim += self.msg_hdim
else:
# otherwise, project them all to h_dim
NUM_INPUTS = 4 if self.msg_model != "none" else 3
project_hdim = flags.equalize_factor * self.h_dim
out_dim = project_hdim * NUM_INPUTS
# set up linear layers for projections
self.project_feature_dim = nn.Linear(self.k_dim, project_hdim)
self.project_glyph_dim = nn.Linear(
self.H * self.W * self.Y, project_hdim
)
c__2 = self.crop_dim ** 2
if self.crop_model == "transformer":
self.project_crop_dim = nn.Linear(c__2 * K, project_hdim)
elif self.crop_model == "cnn":
self.project_crop_dim = nn.Linear(c__2 * self.Y, project_hdim)
if self.msg_model != "none":
self.project_msg_dim = nn.Linear(self.msg_hdim, project_hdim)
self.fc = nn.Sequential(
nn.Linear(out_dim, self.h_dim),
nn.ReLU(),
nn.Linear(self.h_dim, self.h_dim),
nn.ReLU(),
)
def prepare_input(self, inputs):
# -- [B x H x W]
B, H, W = inputs["glyphs"].shape
# take our chosen glyphs and merge the time and batch
glyphs = self.glyph_embedding.prepare_input(inputs)
# -- [B x F]
features = inputs["blstats"]
return glyphs, features
def forward(self, inputs):
B, *_ = inputs["glyphs"].shape
glyphs, features = self.prepare_input(inputs)
# -- [B x 2] x,y coordinates
coordinates = features[:, :2]
# -- [B x K]
features_emb = self.embed_features(features)
if self.equalize_input_dim:
features_emb = self.project_feature_dim(features_emb)
assert features_emb.shape[0] == B
reps = [features_emb] # either k_dim or project_hdim
# -- [B x H' x W']
crop = self.glyph_embedding.GlyphTuple(
*[self.crop(g, coordinates) for g in glyphs]
)
# -- [B x H' x W' x K]
crop_emb = self.glyph_embedding(crop)
if self.crop_model == "transformer":
# -- [B x W' x H' x K]
crop_rep = self.extract_crop_representation(crop_emb, mask=None)
elif self.crop_model == "cnn":
# -- [B x K x W' x H']
crop_emb = crop_emb.transpose(1, 3)
# -- [B x W' x H' x K]
crop_rep = self.extract_crop_representation(crop_emb)
# -- [B x K']
crop_rep = crop_rep.view(B, -1)
if self.equalize_input_dim:
crop_rep = self.project_crop_dim(crop_rep)
assert crop_rep.shape[0] == B
reps.append(crop_rep) # either k_dim or project_hdim
# -- [B x H x W x K]
glyphs_emb = self.glyph_embedding(glyphs)
# glyphs_emb = self.embed(glyphs)
# -- [B x K x W x H]
glyphs_emb = glyphs_emb.transpose(1, 3)
# -- [B x W x H x K]
glyphs_rep = self.extract_representation(glyphs_emb)
# -- [B x K']
glyphs_rep = glyphs_rep.view(B, -1)
# -- [B x K']
if self.equalize_input_dim:
glyphs_rep = self.project_glyph_dim(glyphs_rep)
assert glyphs_rep.shape[0] == B
# -- [B x K'']
reps.append(glyphs_rep)
# MESSAGING MODEL
if self.msg_model != "none":
messages = inputs["message"].long()
if self.msg_model == "cnn":
# convert messages to one-hot, [B x 96 x 256]
one_hot = F.one_hot(messages, num_classes=NUM_CHARS).transpose(
1, 2
)
char_rep = self.conv2_6_fc(self.conv1(one_hot.float()))
elif self.msg_model == "lt_cnn":
# [B x E x 256 ]
char_emb = self.char_lt(messages).transpose(1, 2)
char_rep = self.conv2_6_fc(self.conv1(char_emb))
else: # lstm, gru
char_emb = self.char_lt(messages)
output = self.char_rnn(char_emb)[0]
fwd_rep = output[:, -1, : self.h_dim // 2]
bwd_rep = output[:, 0, self.h_dim // 2 :]
char_rep = torch.cat([fwd_rep, bwd_rep], dim=1)
if self.equalize_input_dim:
char_rep = self.project_msg_dim(char_rep)
reps.append(char_rep)
st = torch.cat(reps, dim=1)
# -- [B x K]
st = self.fc(st)
return st
class RLLibNLENetwork(TorchModelV2, nn.Module):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: Optional[int],
model_config: dict,
name: str,
**kwargs: Any,
):
TorchModelV2.__init__(
self,
observation_space,
action_space,
num_outputs,
model_config,
name,
)
nn.Module.__init__(self)
flags = model_config["custom_model_config"]["flags"]
self.num_outputs = num_outputs or flags.hidden_dim
self.base = BaseNet(observation_space, flags) # device is sorted later
@override(TorchModelV2)
def forward(self, x: Dict[str, Any], *_: Any) -> Tuple[torch.Tensor, list]:
return self.base(x["obs"]), []
ModelCatalog.register_custom_model("rllib_nle_model", RLLibNLENetwork)
| 33.966851
| 79
| 0.559803
| 17,162
| 0.930492
| 0
| 0
| 934
| 0.05064
| 0
| 0
| 3,401
| 0.184396
|
3dd4f4c9b22e44b3e89f6ae2ccad38e595e93b8d
| 1,149
|
py
|
Python
|
old/projects/6.884/hybrid_twolinkmanipulator_with_GreedyFeatures.py
|
ali493/pyro
|
1245340077a733e2ab35765eae783b358d2f3af9
|
[
"MIT"
] | null | null | null |
old/projects/6.884/hybrid_twolinkmanipulator_with_GreedyFeatures.py
|
ali493/pyro
|
1245340077a733e2ab35765eae783b358d2f3af9
|
[
"MIT"
] | null | null | null |
old/projects/6.884/hybrid_twolinkmanipulator_with_GreedyFeatures.py
|
ali493/pyro
|
1245340077a733e2ab35765eae783b358d2f3af9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 12:50:34 2016
@author: alex
"""
from AlexRobotics.dynamic import Manipulator as M
from AlexRobotics.dynamic import Hybrid_Manipulator as HM
from AlexRobotics.control import DPO_features as DPO
import numpy as np
# Define dynamic system
R = HM.HybridTwoLinkManipulator()
R.u_lb = np.array([-5,-5, 0 ])
R.u_ub = np.array([ 5, 5, 3 ])
# Define controller
cost_function = 'quadratic'
A = DPO.TD_Greedy_hybrid_2DOF_Features( R , cost_function )
A.W = np.array([ 0.2 , 0.2 , 0.4 , 0.02 ])
#A.W = np.array([ 1 , 0 , 0 , 0 ])
A.x0 = np.array([ -3, 1 , 0 , 0 ])
A.max_error = 0.5
A.eps = 0.8
A.alpha = 0.00001
#A.plot_J_hat()
A.training( 3 , random = True , show = False )
#A.W = np.array( [ 0.00596714 , 0.05787924 , 0.1246888 , -0.00158788 ] )
#Weight = [ 0.09416771 0.20230782 0.37820584 0.01672458]
#A.plot_J_hat()
A.eps = 1.0
R.plotAnimation( [-4,1,0,0] , tf = 12 , n = 241 , solver = 'euler' )#, save = True )
R.Sim.plot_CL('x')
R.Sim.plot_CL('u')
#R.Sim.plot_OL()
#R.Sim.phase_plane_trajectory()
| 24.978261
| 85
| 0.598782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 415
| 0.361184
|
3dd524d8e59e2c8188892e7a7fe2e15518d2a46b
| 5,294
|
py
|
Python
|
depthaware/data/sunrgbd_dataset.py
|
crmauceri/DepthAwareCNN-pytorch1.5
|
6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9
|
[
"MIT"
] | 3
|
2021-03-11T01:24:37.000Z
|
2021-06-29T03:46:40.000Z
|
depthaware/data/sunrgbd_dataset.py
|
crmauceri/DepthAwareCNN-pytorch1.5
|
6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9
|
[
"MIT"
] | null | null | null |
depthaware/data/sunrgbd_dataset.py
|
crmauceri/DepthAwareCNN-pytorch1.5
|
6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9
|
[
"MIT"
] | null | null | null |
import os.path
from depthaware.data.base_dataset import *
from PIL import Image
import time
def make_dataset_fromlst(dataroot, listfilename):
"""
NYUlist format:
imagepath seglabelpath depthpath HHApath
"""
images = []
segs = []
depths = []
HHAs = []
with open(listfilename) as f:
content = f.readlines()
for x in content:
imgname, segname, depthname, HHAname = x.strip().split(' ')
images += [os.path.join(dataroot, imgname)]
segs += [os.path.join(dataroot, segname)]
depths += [os.path.join(dataroot, depthname)]
HHAs += [os.path.join(dataroot, HHAname)]
return {'images':images, 'segs':segs, 'HHAs':HHAs, 'depths':depths}
class SUNRGBDDataset(BaseDataset):
def __init__(self, opt):
self.opt = opt
np.random.seed(int(time.time()))
self.paths_dict = make_dataset_fromlst(opt.dataroot, opt.list)
self.len = len(self.paths_dict['images'])
# self.label_weight = torch.Tensor(label_weight)
self.datafile = 'sunrgbd_dataset.py'
def __getitem__(self, index):
#self.paths['images'][index]
# print self.opt.scale,self.opt.flip,self.opt.crop,self.opt.colorjitter
img = np.asarray(Image.open(self.paths_dict['images'][index]))#.astype(np.uint8)
HHA = np.asarray(Image.open(self.paths_dict['HHAs'][index]))[:,:,::-1]
seg = np.asarray(Image.open(self.paths_dict['segs'][index])).astype(np.uint8)-1
depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.uint16)
assert (img.shape[0]==HHA.shape[0]==seg.shape[0]==depth.shape[0])
assert (img.shape[1]==HHA.shape[1]==seg.shape[1]==depth.shape[1])
depth = np.bitwise_or(np.right_shift(depth,3),np.left_shift(depth,16-3))
depth = depth.astype(np.float32)/120. # 1/5 * depth
params = get_params_sunrgbd(self.opt, seg.shape, maxcrop=.7)
depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain)
seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain)
if self.opt.inputmode == 'bgr-mean':
img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, normalize=False, istrain=self.opt.isTrain, option=2)
else:
img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, istrain=self.opt.isTrain, option=2)
# print img_tensor_tranformed
# print(np.unique(depth_tensor_tranformed.numpy()).shape)
# print img_tensor_tranformed.size()
return {'image':img_tensor_tranformed,
'depth':depth_tensor_tranformed,
'seg': seg_tensor_tranformed,
'HHA': HHA_tensor_tranformed,
'imgpath': self.paths_dict['segs'][index]}
def __len__(self):
return self.len
def name(self):
return 'sunrgbd_dataset'
class SUNRGBDDataset_val(BaseDataset):
def __init__(self, opt):
self.opt = opt
np.random.seed(8964)
self.paths_dict = make_dataset_fromlst(opt.dataroot, opt.vallist)
self.len = len(self.paths_dict['images'])
def __getitem__(self, index):
#self.paths['images'][index]
img = np.asarray(Image.open(self.paths_dict['images'][index]))#.astype(np.uint8)
HHA = np.asarray(Image.open(self.paths_dict['HHAs'][index]))[:,:,::-1]
seg = np.asarray(Image.open(self.paths_dict['segs'][index])).astype(np.uint8)-1
depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.uint16)
depth = np.bitwise_or(np.right_shift(depth,3),np.left_shift(depth,16-3))
depth = depth.astype(np.float32)/120. # 1/5 * depth
assert (img.shape[0]==HHA.shape[0]==seg.shape[0]==depth.shape[0])
assert (img.shape[1]==HHA.shape[1]==seg.shape[1]==depth.shape[1])
params = get_params_sunrgbd(self.opt, seg.shape, test=True)
depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain)
seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain)
# HHA_tensor_tranformed = transform(HHA, params,istrain=self.opt.isTrain)
if self.opt.inputmode == 'bgr-mean':
img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, normalize=False, istrain=self.opt.isTrain, option=2)
else:
img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, istrain=self.opt.isTrain, option=2)
return {'image':img_tensor_tranformed,
'depth':depth_tensor_tranformed,
'seg': seg_tensor_tranformed,
'HHA': HHA_tensor_tranformed,
'imgpath': self.paths_dict['segs'][index]}
def __len__(self):
return self.len
def name(self):
return 'sunrgbd_dataset_Val'
| 44.116667
| 113
| 0.647526
| 4,546
| 0.858708
| 0
| 0
| 0
| 0
| 0
| 0
| 785
| 0.148281
|
3dd551aff5d9acdfce555b2997eb9c881f846544
| 1,382
|
py
|
Python
|
setup.py
|
elafefy11/flask_gtts
|
8f14b9f114127d8fba240a88f3aa16eb17628872
|
[
"MIT"
] | null | null | null |
setup.py
|
elafefy11/flask_gtts
|
8f14b9f114127d8fba240a88f3aa16eb17628872
|
[
"MIT"
] | null | null | null |
setup.py
|
elafefy11/flask_gtts
|
8f14b9f114127d8fba240a88f3aa16eb17628872
|
[
"MIT"
] | null | null | null |
"""
Flask-gTTS
-------------
A Flask extension to add gTTS Google text to speech, into the template,
it makes adding and configuring multiple text to speech audio files at
a time much easier and less time consuming
"""
from setuptools import setup
setup(
name='Flask-gTTS',
version='0.11',
url='https://github.com/mrf345/flask_gtts/',
download_url='https://github.com/mrf345/flask_gtts/archive/0.11.tar.gz',
license='MIT',
author='Mohamed Feddad',
author_email='mrf345@gmail.com',
description='gTTS Google text to speech flask extension',
long_description=__doc__,
py_modules=['gtts'],
packages=['flask_gtts'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'gTTS',
'static_parameters'
],
keywords=['flask', 'extension', 'google', 'text', 'speech',
'gTTS', 'TTS', 'text-to-speech'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
setup_requires=['pytest-runner'],
test_requires=['pytest']
)
| 29.404255
| 76
| 0.633864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 848
| 0.613603
|
3dd5a2aa827f14ee73dd8f5c2368476016523c81
| 232
|
py
|
Python
|
READ.py
|
BeatrizFS/MongoDB-Python
|
a23741d5f58ccad50e6239c963f78759f92098ac
|
[
"MIT"
] | null | null | null |
READ.py
|
BeatrizFS/MongoDB-Python
|
a23741d5f58ccad50e6239c963f78759f92098ac
|
[
"MIT"
] | null | null | null |
READ.py
|
BeatrizFS/MongoDB-Python
|
a23741d5f58ccad50e6239c963f78759f92098ac
|
[
"MIT"
] | null | null | null |
from Arquivo1 import Produto
#READ
#Consultar o Banco de dados
#1.Retorna todas as informações do Banco de dados
produtos = Produto.objects()
print(produtos)
for produto in produtos:
print(produto.Nome, produto.Valor)
| 23.2
| 50
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.367521
|
3dd7149bf486a0156690dac8d36a869ec269ebf6
| 9,280
|
py
|
Python
|
src/aux_funcs.py
|
ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V
|
79ca40ababbc65464650c5519f9e7fdbf3c9d14d
|
[
"MIT"
] | 7
|
2020-03-19T05:04:30.000Z
|
2022-03-31T10:29:42.000Z
|
src/aux_funcs.py
|
ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V
|
79ca40ababbc65464650c5519f9e7fdbf3c9d14d
|
[
"MIT"
] | 2
|
2020-08-19T03:24:31.000Z
|
2021-03-02T00:18:46.000Z
|
src/aux_funcs.py
|
ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V
|
79ca40ababbc65464650c5519f9e7fdbf3c9d14d
|
[
"MIT"
] | 3
|
2020-09-17T04:15:04.000Z
|
2021-01-18T08:37:39.000Z
|
"""
----------------------------------ABOUT-----------------------------------
Author: Arun Baskaran
--------------------------------------------------------------------------
"""
import model_params
def smooth(img):
return 0.5*img + 0.5*(
np.roll(img, +1, axis=0) + np.roll(img, -1, axis=0) +
np.roll(img, +1, axis=1) + np.roll(img, -1, axis=1) )
def returnIndex(a , value):
k = np.size(a)
for i in range(k):
if(a[i]==value):
return i
def create_model():
xavier_init = tf.contrib.layers.xavier_initializer() #Initializer for weights
zero_init = tf.zeros_initializer() #Initializer for biases
model = tf.keras.models.Sequential([
keras.layers.Conv2D( 2, [5,5], (1,1), input_shape = (200,200,1), kernel_initializer = xavier_init, bias_initializer = zero_init, kernel_regularizer=regularizers.l1(0.001), padding = 'valid', name = 'C1'),
keras.layers.MaxPool2D((2,2), (2,2), input_shape = (196,196,2),padding = 'valid', name ='P1'),
keras.layers.Conv2D(4, [5,5],(1,1), input_shape = (98,98,2), kernel_initializer = xavier_init, bias_initializer = zero_init, kernel_regularizer=regularizers.l1(0.001), name ='C2'),
keras.layers.MaxPool2D((2,2), (2,2), input_shape = (94,94,4), padding = 'valid', name ='P2'),
keras.layers.Conv2D(12, [3,3],(1,1), input_shape = (47,47,4), kernel_initializer = xavier_init, bias_initializer = zero_init, kernel_regularizer=regularizers.l1(0.001), name ='C3'),
keras.layers.Flatten(name ='fc_layer'),
keras.layers.Dense(3, activation='softmax', kernel_regularizer=regularizers.l1(0.001)),])
return model
def load_images_labels():
df = pd.read_excel('labels.xlsx', header=None, names=['id', 'label'])
total_labels = df['label']
for i in range(len(total_labels)):
total_labels[i]-=1
train_list = random.sample(range(1,total_size+1), train_size)
nontrainlist = []
test_list = []
for i in range(1,total_size+1):
if i not in train_list:
nontrainlist.append(i)
validation_list = random.sample(nontrainlist, validation_size)
for item in nontrainlist:
if(item not in validation_list):
test_list.append(item)
train_images = []
train_labels = []
validation_images = []
validation_labels = []
test_images = []
test_labels=[]
test_images_id = []
for i in range(1, total_size+1):
if i in train_list:
filename = 'image_' + str(i) + '.png'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, dsize=(width, height), interpolation=cv2.INTER_CUBIC)
image = cv2.blur(image,(5,5))
image = (image - np.min(image))/(np.max(image)-np.min(image))
train_images.append(image)
train_labels.append(total_labels[i-1])
elif i in validation_list:
filename = 'image_' + str(i) + '.png'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, dsize=(width, height), interpolation=cv2.INTER_CUBIC)
image = cv2.blur(image,(5,5))
image = (image - np.min(image))/(np.max(image)-np.min(image))
validation_images.append(image)
validation_labels.append(total_labels[i-1])
else:
filename = 'image_' + str(i) + '.png'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, dsize=(width, height), interpolation=cv2.INTER_CUBIC)
image = cv2.blur(image,(5,5))
image = (image - np.min(image))/(np.max(image)-np.min(image))
test_images_id.append(i)
test_images.append(image)
test_labels.append(total_labels[i-1])
train_images = np.reshape(train_images, (train_size, width, height, 1))
validation_images = np.reshape(validation_images, (validation_size, width, height, 1))
test_images = np.reshape(test_images, (test_size, width, height, 1))
train_labels = tf.keras.backend.one_hot(train_labels,3)
test_labels = tf.keras.backend.one_hot(test_labels,3)
validation_labels = tf.keras.backend.one_hot(validation_labels,3)
return train_images, train_labels, test_images, test_labels, validation_images, validation_labels
def train_model():
model = create_model()
checkpoint_path = "weights/classification.ckpt" #Check this path
checkpoint_dir = os.path.dirname(checkpoint_path)
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', verbose=1, patience = 50, mode='min', restore_best_weights=True)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=0)
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),optimizer='Adam',metrics='accuracy')
model.fit(train_images, train_labels, epochs=1500, validation_data=(validation_images,validation_labels), steps_per_epoch = 4, validation_steps=1, callbacks=[es, cp_callback])
return model
def load_model():
model = create_model()
model.load_weights(checkpoint_path)
return model
def test_accuracy(model):
loss,acc = model.evaluate(test_images, test_labels, verbose=2, steps = 1)
print("Accuracy: {:5.2f}%".format(100*acc))
def get_predicted_classes(model):
y_prob = model.predict(test_images)
y_classes = y_prob.argmax(axis=-1)
return y_classes
df = pd.read_excel('labels.xlsx', header=None, names=['id', 'label'])
total_labels = df['label']
for i in range(len(total_labels)):
total_labels[i]-=1
def duplex_segmentation(i):
area_frac_duplex=[]
duplex_image_id=[]
filename = 'image_' + str(test_images_id[i]) + '.png'
image = Image.open(filename).convert('F')
image = np.copy(np.reshape(np.array(image), image.size[::-1])/255.)
image = exposure.equalize_adapthist(image, clip_limit=8.3)
image = (smooth(smooth(image)))
image_copy = image
image = cv2.resize(image, dsize=(200,200), interpolation=cv2.INTER_CUBIC)
image_copy = cv2.resize(image_copy, dsize=(200,200), interpolation=cv2.INTER_CUBIC)
markers = np.zeros_like(image)
markers[image > np.median(image) - 0.10*np.std(image)] = 1
markers[image < np.median(image) - 0.10*np.std(image)] = 2
fig, (ax1) = plt.subplots(1, sharex=True, sharey=True)
elevation_map = sobel(image)
#The following implementation of watershed segmentation has been adopted from scikit's documentation example: https://scikit-image.org/docs/dev/user_guide/tutorial_segmentation.html
segmentation = morphology.watershed(elevation_map, markers)
segmentation = ndi.binary_fill_holes(segmentation - 1)
labeled_grains, _ = ndi.label(segmentation)
image_label_overlay = label2rgb(labeled_grains, image=image)
ax1.imshow(image_copy, cmap=plt.cm.gray, interpolation='nearest')
ax1.contour(segmentation, [0.5], linewidths=1.2, colors='r')
ax1.axis('off')
outfile = 'seg_duplex_' + str(test_images_id[i]) + '.png'
plt.savefig(outfile, dpi=100)
equiaxed_area_fraction_dict[test_images_id[i]] = np.sum(segmentation)/(np.shape(image)[0]*np.shape(image)[1])
def lamellar_segmentation(i):
dim = 400
filename = 'image_' + str(test_images_id[i]) + '.png'
image = Image.open(filename).convert('F')
image = np.copy(np.reshape(np.array(image), image.size[::-1])/255.)
image = exposure.equalize_hist(image)
image = smooth(image)
image = np.reshape(image, (np.shape(image)[0],np.shape(image)[1]))
gx = cv2.Sobel(np.float32(image), cv2.CV_32F, 1, 0, ksize=1)
gy = cv2.Sobel(np.float32(image), cv2.CV_32F, 0, 1, ksize=1)
mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
mag_cut_off = 0.2*np.max(mag)
(n,bins,patches) = plt.hist(angle.ravel(), bins = 30)
n_sorted = sorted(n, reverse=True)
bin0 = bins[returnIndex(n, n_sorted[0])]
bin1 = bins[returnIndex(n, n_sorted[1])]
bin2 = bins[returnIndex(n, n_sorted[2])]
bin_s = np.ones(20)
for i in range(20):
bin_s[i] = bins[returnIndex(n, n_sorted[i])]
markers = np.zeros_like(angle)
markers[(angle/360 > bin1/360 - 26/360) & (angle/360 < bin1/360 + 26/360) & (mag > mag_cut_off)] = 1
markers[(angle/360 > bin2/360 - 18/360) & (angle/360 < bin2/360 + 18/360) & (mag > mag_cut_off)] = 1
markers[(angle/360 > bin0/360 - 18/360) & (angle/360 < bin0/360 + 18/360) & (mag > mag_cut_off)] = 1
markers = (smooth(smooth(markers)))
markers1 = np.where(markers > np.mean(markers), 1.0, 0.0)
lamellae_area_fraction_dict[test_images_id[i]] = np.sum(markers1)/(np.shape(image)[0]*np.shape(image)[1])
fig, (ax1) = plt.subplots(1, sharex=True, sharey=True)
ax1.imshow(image, 'gray')
ax1.imshow(markers1, alpha = 0.5)
image1 = image + markers1
ax1.imshow(image1)
#plt.colorbar()
outfile = 'seg_lamellae_' + str(test_images_id[i]) + '.png'
plt.savefig(outfile, dpi=100)
def feature_segmentation():
equiaxed_area_fraction_dict = {}
lamellae_area_fraction_dict= {}
for i in range(np.size(y_classes)):
if(y_classes[i]==0):
duplex_segmentation(i)
elif(y_classes[i]==1):
lamellar_segmentation(i)
| 44.830918
| 210
| 0.650108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 780
| 0.084052
|
3dd842d4edbdc348779300fb523036992a49b5b2
| 125
|
py
|
Python
|
manage.py
|
Stupnitskiy/BinaryAPI
|
e448936ceed96da72e2aa65847030ea56edb224f
|
[
"MIT"
] | null | null | null |
manage.py
|
Stupnitskiy/BinaryAPI
|
e448936ceed96da72e2aa65847030ea56edb224f
|
[
"MIT"
] | null | null | null |
manage.py
|
Stupnitskiy/BinaryAPI
|
e448936ceed96da72e2aa65847030ea56edb224f
|
[
"MIT"
] | null | null | null |
from flask_script import Manager
from src import app
manager = Manager(app)
if __name__ == "__main__":
manager.run()
| 12.5
| 32
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.08
|
3dd84d6968111423f954120eed10897fd01c00ea
| 1,355
|
py
|
Python
|
CIFAR10.py
|
jimmyLeeMc/NeuralNetworkTesting
|
a6208cc8639a93ac24655495c9ace1acba21c76f
|
[
"MIT"
] | null | null | null |
CIFAR10.py
|
jimmyLeeMc/NeuralNetworkTesting
|
a6208cc8639a93ac24655495c9ace1acba21c76f
|
[
"MIT"
] | null | null | null |
CIFAR10.py
|
jimmyLeeMc/NeuralNetworkTesting
|
a6208cc8639a93ac24655495c9ace1acba21c76f
|
[
"MIT"
] | null | null | null |
#CIFAR
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
data = keras.datasets.cifar10
activations=[keras.activations.sigmoid, keras.activations.relu,
keras.layers.LeakyReLU(), keras.activations.tanh]
results=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
class_names=[0,1,2,3,4,5,6,7,8,9]
a=0
for i in range(4):
for j in range(4):
losssum=0
for k in range(6):
(train_images, train_labels), (test_images, test_labels) = data.load_data()
train_images = train_images/255.0
test_images = test_images/255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32,32,3)),
keras.layers.Dense(128, activations[i]),
keras.layers.Dense(10, activations[j])
# tanh softmax
])
model.compile(optimizer="adam",loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(train_images, train_labels,
validation_split=0.25, epochs=5, batch_size=16, verbose=1)
prediction = model.predict(test_images)
losssum=losssum+history.history['loss'][len(history.history['loss'])-1]
results[a]=losssum/1
a=a+1
print(results)
| 38.714286
| 108
| 0.591882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.059779
|
3dd93f9bb15a42397c641e431fd3df72da46ab0d
| 3,127
|
py
|
Python
|
All_RasPy_Files/edgedetection.py
|
govindak-umd/Autonomous_Robotics
|
5293b871c7032b40cbff7814bd773871ee2c5946
|
[
"MIT"
] | 2
|
2020-05-14T11:23:30.000Z
|
2020-05-25T06:30:57.000Z
|
All_RasPy_Files/edgedetection.py
|
govindak-umd/ENPM809T
|
5293b871c7032b40cbff7814bd773871ee2c5946
|
[
"MIT"
] | null | null | null |
All_RasPy_Files/edgedetection.py
|
govindak-umd/ENPM809T
|
5293b871c7032b40cbff7814bd773871ee2c5946
|
[
"MIT"
] | 5
|
2020-06-09T22:09:15.000Z
|
2022-01-31T17:11:19.000Z
|
# ENME 489Y: Remote Sensing
# Edge detection
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Define slice of an arbitrary original image
f = np.empty((0))
index = np.empty((0))
# Create intensity data, including noise
for i in range(2000):
index = np.append(index, i)
if i <= 950:
f = np.append(f, 50 + np.random.normal(0,1))
elif i > 950 and i < 1000:
f = np.append(f, 50 + (i - 950)/2 + np.random.normal(0,1))
elif i >= 1000 and i < 1050:
f = np.append(f, 75 + (i - 1000)/2 + np.random.normal(0,1))
else:
f = np.append(f, 100 + np.random.normal(0,1))
print f.shape
print index.shape
plt.figure(2)
plt.plot(index, f, 'r-')
plt.title('Slice of Original Image: f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Pixel intensity f(x)')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the original signal
messy = np.gradient(f)
plt.figure(3)
plt.plot(messy, 'r-')
plt.title('Derivative of Original Image Slice: df/dx')
plt.xlabel('Pixel x')
plt.ylabel('Derivative df/dx')
plt.grid()
plt.show()
# Define Gaussian filter
mean = 0
std = 5
var = np.square(std)
x = np.arange(-20, 20, 0.1)
kernel = (1/(std*np.sqrt(2*np.pi)))*np.exp(-np.square((x-mean)/std)/2)
print kernel.shape
plt.figure(4)
plt.plot(x, kernel, 'b-')
plt.title('Kernel: Gaussian Filter h(x)')
plt.xlabel('Pixel x')
plt.ylabel('Kernel h(x)')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(kernel, f, 'same')
print smoothed.shape
plt.figure(5)
plt.plot(smoothed, 'r-')
plt.title('Apply Gaussian Filter: Convolve h(x) * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the filtered signal
edges = np.gradient(smoothed)
plt.figure(6)
plt.plot(edges, 'r-')
plt.title('Derivative of Convolved Image: d/dx[ h(x) * f(x) ] ')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the Gaussian kernel
first_diff = np.gradient(kernel)
plt.figure(7)
plt.plot(first_diff, 'b-')
plt.title('1st Derivative of Gaussian: d/dx[ h(x) ]')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(first_diff, f, 'same')
print smoothed.shape
plt.figure(8)
plt.plot(smoothed, 'r-')
plt.title('Apply Gaussian Filter: Convolve d/dx[ h(x) ] * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
# Plot the second derivative of the Gaussian kernel: the Laplacian operator
laplacian = np.gradient(first_diff)
plt.figure(9)
plt.plot(laplacian, 'b-')
plt.title('2nd Derivative of Gaussian: Laplacian Operator d^2/dx^2[ h(x) ]')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(laplacian, f, 'same')
print smoothed.shape
plt.figure(10)
plt.plot(smoothed, 'r-')
plt.title('Apply Laplacian Operator: Convolve d^2/dx^2[ h(x) ] * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
| 23.689394
| 76
| 0.68692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,254
| 0.401023
|
3dda1806de2d35a90208c505c2c72da1466cf4a9
| 1,850
|
py
|
Python
|
alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceReceiptBatchqueryModel(object):
def __init__(self):
self._level = None
self._out_biz_no_list = None
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def out_biz_no_list(self):
return self._out_biz_no_list
@out_biz_no_list.setter
def out_biz_no_list(self, value):
if isinstance(value, list):
self._out_biz_no_list = list()
for i in value:
self._out_biz_no_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.level:
if hasattr(self.level, 'to_alipay_dict'):
params['level'] = self.level.to_alipay_dict()
else:
params['level'] = self.level
if self.out_biz_no_list:
if isinstance(self.out_biz_no_list, list):
for i in range(0, len(self.out_biz_no_list)):
element = self.out_biz_no_list[i]
if hasattr(element, 'to_alipay_dict'):
self.out_biz_no_list[i] = element.to_alipay_dict()
if hasattr(self.out_biz_no_list, 'to_alipay_dict'):
params['out_biz_no_list'] = self.out_biz_no_list.to_alipay_dict()
else:
params['out_biz_no_list'] = self.out_biz_no_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceReceiptBatchqueryModel()
if 'level' in d:
o.level = d['level']
if 'out_biz_no_list' in d:
o.out_biz_no_list = d['out_biz_no_list']
return o
| 28.90625
| 81
| 0.585405
| 1,733
| 0.936757
| 0
| 0
| 717
| 0.387568
| 0
| 0
| 188
| 0.101622
|
3ddaf9735b2cb2b79bcc96e4e4c161028c28ae19
| 2,632
|
py
|
Python
|
tests/test_timeconversion.py
|
FObersteiner/pyFuppes
|
2a8c6e210855598dbf4fb491533bf22706340c9a
|
[
"MIT"
] | 1
|
2020-06-02T08:02:36.000Z
|
2020-06-02T08:02:36.000Z
|
tests/test_timeconversion.py
|
FObersteiner/pyFuppes
|
2a8c6e210855598dbf4fb491533bf22706340c9a
|
[
"MIT"
] | 3
|
2022-03-04T11:43:19.000Z
|
2022-03-25T00:26:46.000Z
|
tests/test_timeconversion.py
|
FObersteiner/pyFuppes
|
2a8c6e210855598dbf4fb491533bf22706340c9a
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import datetime, timezone
from pyfuppes import timeconversion
class TestTimeconv(unittest.TestCase):
@classmethod
def setUpClass(cls):
# to run before all tests
print("testing pyfuppes.timeconversion...")
@classmethod
def tearDownClass(cls):
# to run after all tests
pass
def setUp(self):
# to run before each test
pass
def tearDown(self):
# to run after each test
pass
def test_dtstr_2_mdns(self):
# no timezone
t = ["2012-01-01T01:00:00", "2012-01-01T02:00:00"]
f = "%Y-%m-%dT%H:%M:%S"
result = list(map(int, timeconversion.dtstr_2_mdns(t, f)))
self.assertEqual(result, [3600, 7200])
# with timezone
t = ["2012-01-01T01:00:00+02:00", "2012-01-01T02:00:00+02:00"]
f = "%Y-%m-%dT%H:%M:%S%z"
result = list(map(int, timeconversion.dtstr_2_mdns(t, f)))
self.assertEqual(result, [3600, 7200])
# zero case
t = "2012-01-01T00:00:00+02:00"
result = timeconversion.dtstr_2_mdns(t, f)
self.assertEqual(int(result), 0)
def test_dtobj_2_mdns(self):
t = [datetime(2000, 1, 1, 1), datetime(2000, 1, 1, 2)]
result = list(map(int, timeconversion.dtobj_2_mdns(t)))
self.assertEqual(result, [3600, 7200])
t = [
datetime(2000, 1, 1, 1, tzinfo=timezone.utc),
datetime(2000, 1, 1, 2, tzinfo=timezone.utc),
]
result = list(map(int, timeconversion.dtobj_2_mdns(t)))
self.assertEqual(result, [3600, 7200])
def test_posix_2_mdns(self):
t = [3600, 7200, 10800]
result = list(map(int, timeconversion.posix_2_mdns(t)))
self.assertEqual(result, t)
def test_mdns_2_dtobj(self):
t = [3600, 10800, 864000]
ref = datetime(2020, 5, 15, tzinfo=timezone.utc)
result = list(map(int, timeconversion.mdns_2_dtobj(t, ref, posix=True)))
self.assertEqual(result, [1589504400, 1589511600, 1590364800])
def test_daysSince_2_dtobj(self):
t0, off = datetime(2020, 5, 10), 10.5
result = timeconversion.daysSince_2_dtobj(t0, off)
self.assertEqual(result.hour, 12)
self.assertEqual(result.day, 20)
def test_dtstr_2_posix(self):
result = timeconversion.dtstr_2_posix("2020-05-15", "%Y-%m-%d")
self.assertAlmostEqual(
result, datetime(2020, 5, 15, tzinfo=timezone.utc).timestamp()
)
if __name__ == "__main__":
unittest.main()
| 32.9
| 81
| 0.587006
| 2,474
| 0.93997
| 0
| 0
| 215
| 0.081687
| 0
| 0
| 375
| 0.142477
|
3ddb42001698eb4e38741ad5c0c31bf71b836bbd
| 1,111
|
py
|
Python
|
ucscentralsdk/methodmeta/LstorageCloneMeta.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
ucscentralsdk/methodmeta/LstorageCloneMeta.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
ucscentralsdk/methodmeta/LstorageCloneMeta.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the meta information of LstorageClone ExternalMethod."""
from ..ucscentralcoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("LstorageClone", "lstorageClone", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"dn": MethodPropertyMeta("Dn", "dn", "ReferenceObject", "Version142b", "InputOutput", False),
"in_array_name": MethodPropertyMeta("InArrayName", "inArrayName", "Xs:string", "Version142b", "Input", False),
"in_hierarchical": MethodPropertyMeta("InHierarchical", "inHierarchical", "Xs:string", "Version142b", "Input", False),
"in_target_org": MethodPropertyMeta("InTargetOrg", "inTargetOrg", "ReferenceObject", "Version142b", "Input", False),
"out_config": MethodPropertyMeta("OutConfig", "outConfig", "ConfigConfig", "Version142b", "Output", True),
}
prop_map = {
"cookie": "cookie",
"dn": "dn",
"inArrayName": "in_array_name",
"inHierarchical": "in_hierarchical",
"inTargetOrg": "in_target_org",
"outConfig": "out_config",
}
| 44.44
| 122
| 0.706571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 674
| 0.606661
|
3ddd545e8ac1636ac0a7d92a17cca391f2e23803
| 7,468
|
py
|
Python
|
tool/powermon.py
|
virajpadte/Power_monitoring_JetsonTX1
|
3f337adb16ce09072d69147b705a0c705b3ad53c
|
[
"MIT"
] | null | null | null |
tool/powermon.py
|
virajpadte/Power_monitoring_JetsonTX1
|
3f337adb16ce09072d69147b705a0c705b3ad53c
|
[
"MIT"
] | null | null | null |
tool/powermon.py
|
virajpadte/Power_monitoring_JetsonTX1
|
3f337adb16ce09072d69147b705a0c705b3ad53c
|
[
"MIT"
] | null | null | null |
import sys
import glob
import serial
import ttk
import tkFileDialog
from Tkinter import *
#for plotting we need these:
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from drawnow import *
class MainView:
#CLASS VARIABLES:
closing_status = False
powerW = []
def __init__(self, master):
self.master = master
mainframe = ttk.Frame(self.master, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
port = StringVar()
port.set(" ") # initial value
ttk.Label(mainframe, text="Select Port").grid(column=1, row=1, sticky=W)
port_list = self.serial_ports()
port_list.insert(0," ")
print(port_list)
port = StringVar(mainframe)
port.set(port_list[1]) # default value
dropdown = ttk.OptionMenu(mainframe,port,*port_list)
dropdown.configure(width=20)
dropdown.grid(column=2, row=1, sticky=W)
ttk.Button(mainframe, text="Realtime Plot", command=lambda: self.real_time_plotting(port)).grid(column=1, row=2, sticky=W)
ttk.Button(mainframe, text="Record Session", command=lambda: self.record_session(port)).grid(column=2, row=2, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
def record_session(self,port):
print("record_session")
port = port.get()
print("record port",port)
self.newWindow = Toplevel(root)
self.app = record_session(self.newWindow,port)
def serial_ports(self):
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
result = ports
return result
def handle_close(self):
print('Closed Figure!')
self.closing_status = True
def real_time_plotting(self,port):
cnt = 0
window_size = 20
connected = False
port = port.get()
print("real_time_plotting")
print("realtime data port", port)
try:
print("trying to connect to device....")
ser = serial.Serial(port, 115200)
except:
print "Failed to connect on", port
# ## loop until the arduino tells us it is ready
while not connected:
serin = ser.read()
connected = True
try:
while not self.closing_status: # While loop that loops forever
if ser.inWaiting(): # Wait here until there is data
power = ser.readline() # read the line of text from the serial port
print(power)
self.powerW.append(power) # Build our tempF array by appending temp readings
drawnow(self.makeFig) # Call drawnow to update our live graph
plt.pause(.000001) # Pause Briefly. Important to keep drawnow from crashing
cnt = cnt + 1
if (cnt > window_size): # If you have 50 or more points, delete the first one from the array
self.powerW.pop(0) # This allows us to just see the last 50 data points
print("closing port")
ser.close()
except KeyboardInterrupt:
print("closing port")
ser.close()
def makeFig(self): # Create a function that makes our desired plot
# configure the plot
plt.ion() # Tell matplotlib you want interactive mode to plot live data
plt.rcParams['toolbar'] = 'None'
# create a fig
#fig = plt.figure(0)
#fig.canvas.set_window_title('Window 3D')
#fig.canvas.mpl_connect('close_event', self.handle_close())
plt.ylim(0, 15) # Set y min and max values
plt.title('Plotting power consumption') # Plot the title
plt.grid(True) # Turn the grid on
plt.ylabel('Power (Watts)') # Set ylabels
plt.plot(self.powerW, 'ro-', label='Power W') # plot the temperature
plt.legend(loc='upper right') # plot the legend
def handle_close(self):
print('Closed Figure!')
self.closing_status = True
class record_session:
#class variable:
path = ""
def __init__(self, master,port):
self.master = master
self.master.title("Session parameters")
mainframe = ttk.Frame(self.master, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
print("passed port", port)
duration = StringVar()
autoplot = IntVar()
autoplot.set(0) # initial value
ttk.Button(mainframe, text="Select a location to store session.csv file", command=self.select_dir).grid(column=1, row=1, sticky=W)
ttk.Label(mainframe, text="Record Duration in seconds:").grid(column=1, row=2, sticky=W)
duration_entry_box = ttk.Entry(mainframe, width=5, textvariable=duration)
duration_entry_box.grid(column=2, row=2, sticky=W)
#ttk.Checkbutton(mainframe, text="Auto Plotting enabled", variable=autoplot).grid(column=1, row=3, sticky=W)
ttk.Button(mainframe, text="Start recording", command=lambda: self.record(port,autoplot)).grid(column=1, row=4, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
def select_dir(self):
global path
print("select dir")
path = tkFileDialog.askdirectory()
#append file name to the path
if len(path):
path = path + "/session.csv"
print(path)
def record(self,port,autoplot):
global path
print("recording")
autoplot_status = autoplot.get()
print("autoplot_status", autoplot_status)
connected = False
## establish connection to the serial port that your arduino
## is connected to.
try:
print("trying to connect to device....")
ser = serial.Serial(port, 115200)
except:
print "Failed to connect on", port
# ## loop until the arduino tells us it is ready
while not connected:
serin = ser.read(self)
connected = True
#open text file to store the power values
text_file = open(path, 'w')
#read serial data from arduino and
#write it to the text file 'Data.csv'
try:
while True:
if ser.inWaiting():
# Read a line and convert it from b'xxx\r\n' to xxx
line = ser.readline()
print(line)
if line: # If it isn't a blank line
text_file.write(line)
text_file.close()
except KeyboardInterrupt:
print("closing port")
ser.close()
if __name__ == '__main__':
root = Tk()
root.title("Power Monitoring tool")
main = MainView(root)
root.mainloop()
| 35.561905
| 138
| 0.595742
| 7,116
| 0.952866
| 0
| 0
| 0
| 0
| 0
| 0
| 2,121
| 0.284012
|
3ddeb574a2024dfb0d06c0c742bbc0a272df7e2d
| 900
|
py
|
Python
|
shop/tests/products/views/test_product_details_view.py
|
nikolaynikolov971/NftShop
|
09a535a6f708f0f6da5addeb8781f9bdcea72cf3
|
[
"MIT"
] | null | null | null |
shop/tests/products/views/test_product_details_view.py
|
nikolaynikolov971/NftShop
|
09a535a6f708f0f6da5addeb8781f9bdcea72cf3
|
[
"MIT"
] | null | null | null |
shop/tests/products/views/test_product_details_view.py
|
nikolaynikolov971/NftShop
|
09a535a6f708f0f6da5addeb8781f9bdcea72cf3
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.urls import reverse
from shop.products.models import Product
from tests.base.mixins import ProductTestUtils
class ProductDetailsTest(ProductTestUtils, TestCase):
def setUp(self):
self.client = Client()
self.product = self.create_product(
title="Barry",
price=555.55,
image='media/products/Dart.png',
description="dasd",
is_sold=False,
)
def test_getProductDetails(self):
response = self.client.get(reverse('product_details', kwargs={'pk': self.product.id}))
self.assertEqual(200, response.status_code)
def test_showErrorIfProductDoesNotExist(self):
try:
self.client.get(reverse('product_details', kwargs={'pk': self.product.id}))
except Product.DoesNotExist:
self.assertRaises(Exception)
| 30
| 94
| 0.66
| 735
| 0.816667
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.088889
|
3de34122732924fae3421861027e4399e17b6da8
| 4,558
|
py
|
Python
|
projetoFTP/servidor/sftps.py
|
MarciovsRocha/conectividade-sistemas-cyberfisicos
|
d76b8a540b55eb8a54ae99067b625010e85a2eb8
|
[
"MIT"
] | null | null | null |
projetoFTP/servidor/sftps.py
|
MarciovsRocha/conectividade-sistemas-cyberfisicos
|
d76b8a540b55eb8a54ae99067b625010e85a2eb8
|
[
"MIT"
] | null | null | null |
projetoFTP/servidor/sftps.py
|
MarciovsRocha/conectividade-sistemas-cyberfisicos
|
d76b8a540b55eb8a54ae99067b625010e85a2eb8
|
[
"MIT"
] | null | null | null |
import socket
import threading
import os
import sys
from pathlib import Path
#---------------------------------------------------
def ReadLine(conn):
line = ''
while True:
try:
byte = conn.recv(1)
except:
print('O cliente encerrou')
return 0
if not byte:
return 0
byte = byte.decode()
if byte == '\r':
continue
if byte == '\n':
break
line += byte
return line
#------------------------------------------------
def Upload(conn, ip, file):
try:
f = open(file,'w+')
except:
print('erro abertura arquivo')
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,9998))
while True:
data = s.recv(1024)
#print(data.decode('utf-8'))
f.write(data.decode('utf-8'))
if not data:
break
f.close()
s.close()
conn.send(bytes('TRANSMISSAO ENCERRADA\n','utf-8'))
except:
f.close()
conn.send(bytes('A PORTA DE DADOS NÃO ESTA ABERTA\n','utf-8'))
#-----------------------------------------------
def Download(conn, ip, file):
try:
f = open(Path(file),'rb')
except:
print('erro abertura arquivo')
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,9998))
s.send(f.read())
f.close()
s.close()
conn.send(bytes('TRANSMISSAO ENCERRADA\n','utf-8'))
except:
print('ERRO DE DOWNLOAD')
f.close()
conn.send(bytes('A PORTA DE DADOS NÃO ESTA ABERTA\n','utf-8'))
#------------------------------------------------
def TrataCliente(conn, addr):
while True:
conn.send(bytes('\r\n','utf-8'))
data = ReadLine(conn)
print('{} enviou {}'.format(addr,data))
if data == 0:
break
try:
if data == 'os.getcwd()':
res=os.getcwd()
conn.send(bytes(res,'utf-8'))
elif data.startswith('os.listdir'):
file = data.split('(')[1].split(')')[0]
if file == '':
file = '.'
res=os.listdir(file)
conn.send(bytes(str(res),'utf-8'))
elif data.startswith('os.makedirs'):
file = data.split('(')[1].split(')')[0]
print(file)
if file != '':
os.makedirs(file)
conn.send(bytes('OK','utf-8'))
else:
conn.send(bytes('NOK','utf-8'))
elif data.startswith('upload'):
try:
file = data.split('(')[1].split(')')[0]
Upload(conn, addr[0], file)
except:
conn.send(bytes('COMANDO INVALIDO','utf-8'))
elif data.startswith('download'):
try:
file = data.split('(')[1].split(')')[0]
Download(conn, addr[0], file)
except:
conn.send(bytes('COMANDO INVALIDO','utf-8'))
else:
print('teste:',data,'teste',len(data))
conn.send(bytes('COMANDO DESCONHECIDO','utf-8'))
except:
conn.send(bytes('ERRO DESCONHECIDO\n','utf-8'))
print('{} encerrou'.format(addr))
#---------------------------------------------------------
# PROGRAMA PRINCIPAL
#---------------------------------------------------------
pydir= os.path.dirname(os.path.realpath(__file__))
print('Diretorio do script: ', pydir)
os.chdir(pydir)
print('Simple File Transfer Protocol Server\n')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', 9999))
except:
print('# erro de bind')
sys.exit()
s.listen(5)
print('aguardando conexões na porta ', 9999)
print('Canal de controle: cliente ----> [9999] servidor')
print('Canal de dados (call back): servidor ----> [9998] cliente')
while True:
conn, addr = s.accept()
print('recebi uma conexao do cliente ', addr)
t = threading.Thread( target=TrataCliente, args=(conn,addr,))
t.start()
| 28.4875
| 72
| 0.426942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,189
| 0.260688
|
3de3ed318e614e22c2b9f52348133eddba3a0fee
| 2,424
|
py
|
Python
|
messages.py
|
runjak/hoodedFigure
|
539c9839dd47bc181e592bf4a61eaab361b8d316
|
[
"MIT"
] | null | null | null |
messages.py
|
runjak/hoodedFigure
|
539c9839dd47bc181e592bf4a61eaab361b8d316
|
[
"MIT"
] | null | null | null |
messages.py
|
runjak/hoodedFigure
|
539c9839dd47bc181e592bf4a61eaab361b8d316
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import random
sentences = [
"Going into the #dogpark is not allowed, @%s.",
"That's my favourite #dogpark @%s - no one is allowed to go into it!",
"That #dogpark you mention is forbidden! Please don't, @%s",
"The #dogpark should be secured with electrified barbwire. "
"Don't you agree, @%s?",
"Just make sure NOT TO ENTER the #dogpark @%s.",
"Why would you mention such nasty things like a #dogpark @%s?",
"Remember to share your #dogpark experience "
"so others may also survive @%s!",
"Hi @%s! City council discourages the term #dogpark for security reasons.",
"You are not a dog, @%s! Please don't think of the #dogpark.",
"@%s in the #dogpark all dogs have 8 legs. Scary.",
"Please return to safety @%s! Don't linger in the #dogpark.",
"Hey @%s… I got notice that the #dogpark "
"will get fortified with spikes and lava soon.",
"Beware @%s. Today the #dogpark is full of deer. "
"Dangerous with their sharp claws and many heads.",
"There is a time and place for everything @%s. "
"But it's not the #dogpark. An acid pit is much saver.",
"@%s do you know that the #dogpark is actually a pond of molten lava?",
"@%s beware - flesh entering the #dogpark without correct papers "
"will actually turn into a liquid.",
"Only truely evil spirits may enter the #dogpark. Are you one of us, @%s?",
"I heard a five headed dragon near the #dogpark might try to dine on @%s.",
"@%s and I are sure that the #dogpark is protected by a smiling god "
"that replaces your blood with liquid led.",
"In the #dogpark everyone becomes a stick in an eternal play of fetch. "
"Be careful @%s.",
"You may eat your own dogfood - but please: "
"NEVER walk your own #dogpark, @%s.",
"There is a non-zero chance that thinking the word #dogpark "
"replaces your neurons with ants, @%s.",
"The #dogpark will not harm you, @%s. "
"Provided you have wings. And antlers.",
]
def replyDictFromTweet(status):
msg = random.choice(sentences) % status.user.screen_name
if len(msg) > 140:
print('Cannot send message:', msg)
return None
statusParams = {
'status': msg,
'in_reply_to_status_id': status.id
}
if status.place:
statusParams['place_id'] = status.place.id
return statusParams
| 44.072727
| 79
| 0.636139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,849
| 0.76216
|
3de5ea40f8bf420e08e8aea386566d9bf26093f0
| 3,595
|
py
|
Python
|
detectron/tests/test_track_losses.py
|
orestis-z/track-rcnn
|
6b2405cb8308168106526b57027a1af3fe9df0f3
|
[
"Apache-2.0"
] | 9
|
2020-10-16T22:20:09.000Z
|
2022-03-22T11:08:01.000Z
|
detectron/tests/test_track_losses.py
|
orestis-z/track-rcnn
|
6b2405cb8308168106526b57027a1af3fe9df0f3
|
[
"Apache-2.0"
] | null | null | null |
detectron/tests/test_track_losses.py
|
orestis-z/track-rcnn
|
6b2405cb8308168106526b57027a1af3fe9df0f3
|
[
"Apache-2.0"
] | 2
|
2021-10-04T14:27:52.000Z
|
2022-03-22T11:07:53.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy import spatial
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import assert_and_infer_cfg
import detectron.utils.c2 as c2_utils
from detectron.utils.math import cosine_similarity
from detectron.modeling.track_rcnn_heads import add_track_losses
from detectron.modeling.detector import DetectionModelHelper
c2_utils.import_custom_ops()
class TrackLossesTest(unittest.TestCase):
"""Unit test class for tracking losses.
"""
def _add_track_losses(self, X, X_gt):
model = DetectionModelHelper(train=False, num_classes=1)
add_track_losses(model)
workspace.FeedBlob('track_similarity', X)
workspace.FeedBlob('track_int32', X_gt)
workspace.RunNetOnce(model.net)
return workspace.FetchBlob('loss_track')
def _add_track_losses_np(self, arr_in, arr_gt):
if cfg.TRCNN.LOSS == 'Cosine':
track_cosine_similarity = cosine_similarity(arr_in, arr_gt)
loss_track_raw = 1 - track_cosine_similarity
elif cfg.TRCNN.LOSS == 'L2':
track_l2_loss = 0.5 * np.sum(np.square(arr_in - arr_gt))
loss_track_raw = track_l2_loss / arr_in.shape[1]
elif cfg.TRCNN.LOSS == 'L2Balanced':
track_int32_non_matches = 1 - arr_gt
track_delta_sq = np.square(arr_in - arr_gt)
loss_track_matches_raw = np.matmul(track_delta_sq, arr_gt.T)[0]
loss_track_non_matches_raw = np.matmul(track_delta_sq, track_int32_non_matches.T)[0]
loss_track_matches = loss_track_matches_raw / np.sum(arr_gt)
loss_track_non_matches = loss_track_non_matches_raw / np.sum(track_int32_non_matches)
loss_track_raw = 0.5 * (loss_track_matches + loss_track_non_matches)
else:
raise NotImplementedError('Test case for loss "{}" not implemented yet'.format(cfg.TRCNN.LOSS))
return cfg.TRCNN.LOSS_WEIGHT * loss_track_raw
def test_gpu_random_input_gpu(self):
X = np.random.rand(1, 6).astype(np.float32)
X_gt = np.random.randint(2, size=(1, 6)).astype(np.float32)
for loss in ['Cosine', 'L2', 'L2Balanced', 'CrossEntropy', 'CrossEntropyBalanced', 'CrossEntropyWeighted']:
cfg.immutable(False)
cfg.TRCNN.LOSS = loss
assert_and_infer_cfg(cache_urls=False)
Y_exp = self._add_track_losses_np(X.copy(), X_gt.copy())
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
Y_act = self._add_track_losses(X.copy(), X_gt.copy())
np.testing.assert_allclose(Y_act, Y_exp, rtol=1e-06)
def test_gpu_random_input(self):
X = np.random.rand(1, 6).astype(np.float32)
X_gt = np.random.randint(2, size=(1, 6)).astype(np.float32)
for loss in ['Cosine', 'L2', 'L2Balanced', 'CrossEntropy', 'CrossEntropyBalanced', 'CrossEntropyWeighted']:
cfg.immutable(False)
cfg.TRCNN.LOSS = loss
assert_and_infer_cfg(cache_urls=False)
Y_exp = self._add_track_losses_np(X.copy(), X_gt.copy())
Y_act = self._add_track_losses(X.copy(), X_gt.copy())
np.testing.assert_allclose(Y_act, Y_exp, rtol=1e-06)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
unittest.main()
| 43.313253
| 115
| 0.69096
| 2,828
| 0.786648
| 0
| 0
| 0
| 0
| 0
| 0
| 363
| 0.100974
|
3de7f52d572f048f38c1b4744268152292a54283
| 4,497
|
py
|
Python
|
torch/nn/_functions/thnn/upsampling.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | null | null | null |
torch/nn/_functions/thnn/upsampling.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | null | null | null |
torch/nn/_functions/thnn/upsampling.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | null | null | null |
from numbers import Integral
import torch
from torch.autograd import Function
from torch._thnn import type2backend
from . import _all_functions
from ...modules.utils import _pair
from ...functional import _check_bilinear_2d_scale_factor
class _UpsamplingBase(Function):
def __init__(self, size=None, scale_factor=None):
super(_UpsamplingBase, self).__init__()
if size is None and scale_factor is None:
raise ValueError('either size or scale_factor should be defined')
if scale_factor is not None and not isinstance(scale_factor, (Integral, tuple)):
raise ValueError('scale_factor must be of integer type or tuple of integer types')
self.size = size
self.scale_factor = scale_factor
class UpsamplingNearest2d(_UpsamplingBase):
def __init__(self, size=None, scale_factor=None):
super(UpsamplingNearest2d, self).__init__(size, scale_factor)
if self.scale_factor is not None and not isinstance(scale_factor, Integral):
raise ValueError('scale_factor must be of integer type for nearest neighbor sampling')
self.size = _pair(self.size) if self.size is not None else None
def forward(self, input):
assert input.dim() == 4
if self.scale_factor is None:
if (self.size[0] % input.size(2) != 0 or
self.size[1] % input.size(3) != 0):
raise RuntimeError("output size specified in UpSamplingNearest "
"({}) has to be divisible by the input size, but got: "
"{}".format('x'.join(map(str, self.size)),
'x'.join(map(str, input.size()))))
self.scale_factor = self.size[0] // input.size(2)
if self.scale_factor != self.size[1] // input.size(3):
raise RuntimeError("input aspect ratio doesn't match the "
"output ratio")
output = input.new()
backend = type2backend[type(input)]
self.save_for_backward(input)
backend.SpatialUpSamplingNearest_updateOutput(
backend.library_state,
input,
output,
self.scale_factor
)
return output
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.new()
backend = type2backend[type(input)]
backend.SpatialUpSamplingNearest_updateGradInput(
backend.library_state,
input,
grad_output,
grad_input,
self.scale_factor
)
return grad_input
class UpsamplingBilinear2d(_UpsamplingBase):
def __init__(self, size=None, scale_factor=None):
super(UpsamplingBilinear2d, self).__init__(size, scale_factor)
if self.scale_factor is not None:
self.scale_factor = _check_bilinear_2d_scale_factor(self.scale_factor)
self.size = _pair(self.size) if self.size is not None else None
def forward(self, input):
assert input.dim() == 4
if self.scale_factor is not None:
self.output_size = (
input.size(2) * self.scale_factor[0],
input.size(3) * self.scale_factor[1],
)
else:
self.output_size = self.size
self.input_size = input.size()
output = input.new()
backend = type2backend[type(input)]
backend.SpatialUpSamplingBilinear_updateOutput(
backend.library_state,
input,
output,
self.output_size[0],
self.output_size[1],
)
return output
def backward(self, grad_output):
assert grad_output.dim() == 4
grad_output = grad_output.contiguous()
grad_input = grad_output.new()
backend = type2backend[type(grad_output)]
backend.SpatialUpSamplingBilinear_updateGradInput(
backend.library_state,
grad_output,
grad_input,
self.input_size[0],
self.input_size[1],
self.input_size[2],
self.input_size[3],
self.output_size[0],
self.output_size[1],
)
return grad_input
def __setstate__(self, state):
self.__dict__.update(state)
self.scale_factor = _tuple(self.scale_factor)
_all_functions.append(UpsamplingNearest2d)
_all_functions.append(UpsamplingBilinear2d)
| 34.328244
| 98
| 0.610407
| 4,162
| 0.925506
| 0
| 0
| 0
| 0
| 0
| 0
| 342
| 0.076051
|
3de882780eafbe1233cbdcdf8b3eb920ea7971b8
| 7,869
|
py
|
Python
|
Day17/17_trick_shot.py
|
schca675/my-code-for-advent-of-code-2021
|
e8bdb986930b444884d37e679a37ed25efe2b34e
|
[
"Apache-2.0"
] | null | null | null |
Day17/17_trick_shot.py
|
schca675/my-code-for-advent-of-code-2021
|
e8bdb986930b444884d37e679a37ed25efe2b34e
|
[
"Apache-2.0"
] | null | null | null |
Day17/17_trick_shot.py
|
schca675/my-code-for-advent-of-code-2021
|
e8bdb986930b444884d37e679a37ed25efe2b34e
|
[
"Apache-2.0"
] | null | null | null |
# --- Day 17: Trick Shot ---
import math
import time
def get_puzzle_input(filepath):
with open(filepath) as f:
for line in f:
# target area: x=269..292, y =-68..-44
parts = line.rstrip().replace(',', '').split()
[x1, x2] = parts[2][2:].split("..")
[y1, y2] = parts[3][2:].split("..")
return int(x1), int(x2), int(y1), int(y2)
class Position:
def __init__(self, vx, vy):
self.x = 0
self.y = 0
self.max_y = 0
self.vx = vx
self.vy = vy
self.steps_to_start_searching_y = 10000
# def get_position_at_step(self, t):
# if self.vx >= 0:
# x = max(0, self.vx - t) + self.x
# else:
# x = min(0, self.vx + t) + self.x
# y = min(0, self.vy + t) + self.y
# return x, y
def __eq__(self, other):
if type(other) == type([1,2]):
return other[0] == self.vx and other[1] == self.vy
return self == other
def __gt__(self, other):
return self.x > other.x
def __lt__(self, other):
return self.x < other.x
def get_position_at_step(self, t):
if self.vx >= 0:
if self.vx+1 <= t:
# there is no horizontal speed left anymore
x = 0.5* (self.vx*self.vx + self.vx)
else:
# t * self.vy - (0.5 * (t - 1) * t) # TODO
x = t*self.vx - 0.5*(t*t - t)
# vx_t == 0 <=> t >= vx+1
else:
if self.vx+1 <= t:
vx_abs = abs(self.vx)
# there is no horizontal speed left anymore
x = vx_abs * self.vx + 0.5* (vx_abs * vx_abs + vx_abs)
else:
x = t * self.vx + 0.5 * (t * t + t)
y = self.get_y_at_step(t)
return x, y
def get_y_at_step(self, t):
# vy_t = vy - t
# y_t = y_t-1 + vy_t = y_t-1 + (vy-t)
# = vy-t + vy-(t-1) + vy-(t-2) + ... + vy-0
# = (t+1) * vy - sum_0^t = (t+1) vy - t(t+1)/2
# y_1 = 0 + vy
# y_2 = vy + vy-1 = 2*vy - 1
# y_3 = 2vy -1 + vy - 2 = 3 vy - sum_1^t-1
# y_t =
return t* self.vy -(0.5 * (t-1) * t) #TODO
def __str__(self):
return "{},{} {} steps".format(str(self.vx), str(self.vy), self.steps_to_start_searching_y)
def check_if_hitting_target(self, x1, x2, y1, y2, maxsteps=1000):
### assuming positive distances
# Otherwise make: if max(x1, 0) > x > min(x2, 0): for first inequation
# if at t steps: y is under y1 but left of x1: will not hit it anymore
# | |
# x1|________|x2 .0
# .
# |
# At vx steps --> no more horizontal movement left
## if we still havent reached target in a horizontal level --> will not hit it
x, y = self.get_position_at_step(self.vx)
if x < x1:
return False
## if the y position once we only move straight down is above the bottom y line
# y2_________
# | . |
# y1|____|___|
# |
# |
if x1 <= x <= x2:
# Hits target only if movement stops above the target area, but could miss while going down
if y >= min(y1, y2):
self.steps_to_start_searching_y = self.vx
# check every step after that
t = self.vx
while y >= y1:
y = self.get_y_at_step(t)
if y2 >= y >= y1:
return True
t += 1
return False
# Else: check whether the steps before hit the target area at a integer step
t = self.vx - 1
wx,wy = self.get_position_at_step(max(t - 10, 0))
while wx > x2:
t -=10
wx,wy = self.get_position_at_step(max(t - 10, 0))
while x >= x1:
x, y = self.get_position_at_step(t)
if x1 <= x <= x2 and y2 >= y >= y1:
return True
t -= 1
#### has some mistake in it...
# next_t = round(self.vx/2)
# prev_t = self.vx
# while next_t != prev_t:
# prev_round = prev_t
# prev_t = next_t
# # new target: trunc(vx/2)
# new_x, new_y = self.get_position_at_step(next_t)
# # as long
# if x1 <= new_x <=x2 and y1 <= new_y <= y2:
# self.steps_to_start_searching_y = next_t
# return True
# elif new_x < x1:
# if new_y < y1:
# # left of target but still below
# return False
# else:
# # go to right, add half distance
# next_t += round(0.5*(prev_round - next_t))
# elif new_x <= x2 and new_y > y2:
# # go to right (steps forwards)
# next_t += round(0.5 * (prev_round - next_t))
# else:
# # now one is either still to the right of target area or under it, so to have a chance to hit it:
# # must be on an earlier step
# if prev_round > next_t:
# # now reference point is 0
# next_t = round(next_t/2)
# else:
# next_t -= round(0.5*(next_t - prev_round))
return False
def check_highest_y(self, left_t, right_t, t):
y = self.get_y_at_step(t)
# check step one to left and one to right
next_y = self.get_y_at_step(t+1)
if next_y > y:
# y+1/ \
# y / \
# \
# still going up -> take step to right
new_t = t + math.floor(0.5*(right_t - t))
return self.check_highest_y(left_t=t, right_t=right_t, t=new_t)
prev_y = self.get_y_at_step(t-1)
if prev_y > y:
# / \ y-1
# / \ y
# \
# going down --> take step to the left
new_t = t - math.floor(0.5*(t - left_t))
return self.check_highest_y(left_t=left_t, right_t=t, t=new_t)
# if neither are true: y >= next and prev y ---> highest y achieved
return y
def get_highest_y(self):
if self.vy <= 0:
# if vy is negative highest point is at start
return 0
# return self.check_highest_y(0, self.target_hit_after_steps, self.target_hit_after_steps)
return self.vy*(self.vy+1)/2
def get_highest_trick_shot(x1, x2, y1, y2):
hitting_shots = []
max_y_at_hitting_shots = []
# try different vx and vy
for vx in range(math.floor(math.sqrt(x1)), x2+1):
for vy in range(y1, 10*x2): #969
# for vy in range(-100, 5000): 969
pos = Position(vx, vy)
hits = pos.check_if_hitting_target(x1, x2, y1, y2)
if hits:
hitting_shots.append(pos)
max_y_at_hitting_shots.append(pos.get_highest_y())
return max(max_y_at_hitting_shots), len(hitting_shots), hitting_shots
def resolve_puzzle_part1(filepath):
x1, x2, y1, y2 = get_puzzle_input(filepath)
y, count, hitting_shots = get_highest_trick_shot(x1, x2, y1, y2)
print("HIghest position is: {}, Count: {}".format(y, count))
with open("hitting_shots.txt") as f:
for line in f:
hits = line.rstrip().split()
int_hits = []
for hit in hits:
[x, y] = hit.split(',')
int_hits.append([int(x), int(y)])
pass
print("TEST")
start = time.time()
resolve_puzzle_part1("test_data.txt")
print("Time: {}".format(time.time()-start))
print("PUZZLE")
start = time.time()
resolve_puzzle_part1("data.txt")
print("Time: {}".format(time.time()-start))
| 35.931507
| 115
| 0.490532
| 6,188
| 0.786377
| 0
| 0
| 0
| 0
| 0
| 0
| 3,026
| 0.384547
|
3de9f24b49937335e24db781a7e382e77643515c
| 568
|
py
|
Python
|
zip_files.py
|
VladimirsHisamutdinovs/Advanced_Python_Operations
|
509c219f70adcbe9b3dedd71bff819494bab9c83
|
[
"Apache-2.0"
] | null | null | null |
zip_files.py
|
VladimirsHisamutdinovs/Advanced_Python_Operations
|
509c219f70adcbe9b3dedd71bff819494bab9c83
|
[
"Apache-2.0"
] | null | null | null |
zip_files.py
|
VladimirsHisamutdinovs/Advanced_Python_Operations
|
509c219f70adcbe9b3dedd71bff819494bab9c83
|
[
"Apache-2.0"
] | null | null | null |
import zipfile
zip_file = zipfile.ZipFile("zip_archive.zip", "w")
zip_file.write("textfile_for_zip_01")
zip_file.write("textfile_for_zip_02")
zip_file.write("textfile_for_zip_03")
# print(zipfile.is_zipfile("zip_archive.zip"))
# zip_file = zipfile.ZipFile("zip_archive.zip")
# print(zip_file.namelist())
# print(zip_file.infolist())
# zip_info = zip_file.getinfo("textfile_for_zip_02")
# print(zip_info.file_size)
# print(zip_file.read("textfile_for_zip_01"))
zip_file.extract("textfile_for_zip_02")
zip_file.extractall()
zip_file.close()
| 24.695652
| 53
| 0.748239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 384
| 0.676056
|
3deab92507c5a88674b2ab8baa0fe1cd63998a28
| 21,024
|
py
|
Python
|
omdrivers/lifecycle/iDRAC/iDRACUpdate.py
|
rajroyce1212/Ansible-iDRAC
|
4ce00b605ee2e128ad98b572759e860bae3da3dc
|
[
"Apache-2.0"
] | 61
|
2018-02-21T00:02:20.000Z
|
2022-01-26T03:47:19.000Z
|
omdrivers/lifecycle/iDRAC/iDRACUpdate.py
|
rajroyce1212/Ansible-iDRAC
|
4ce00b605ee2e128ad98b572759e860bae3da3dc
|
[
"Apache-2.0"
] | 31
|
2018-03-24T05:43:39.000Z
|
2022-03-16T07:10:37.000Z
|
omdrivers/lifecycle/iDRAC/iDRACUpdate.py
|
rajroyce1212/Ansible-iDRAC
|
4ce00b605ee2e128ad98b572759e860bae3da3dc
|
[
"Apache-2.0"
] | 25
|
2018-03-13T10:06:12.000Z
|
2022-01-26T03:47:21.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import sys
import os
import re
import json
import time
import glob
import xml.etree.ElementTree as ET
from enum import Enum
from datetime import datetime
from omsdk.sdkprint import PrettyPrint
from omsdk.sdkcenum import EnumWrapper, TypeHelper
from omsdk.lifecycle.sdkupdate import Update
from omsdk.catalog.sdkupdatemgr import UpdateManager
from omsdk.catalog.updaterepo import RepoComparator, UpdateFilterCriteria
from omsdk.catalog.updaterepo import UpdatePresenceEnum, UpdateNeededEnum, UpdateTypeEnum
from omdrivers.enums.iDRAC.iDRACEnums import *
from omsdk.sdkcunicode import UnicodeWriter
from omsdk.sdkfile import FileOnShare
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
from pysnmp.hlapi import *
from pysnmp.smi import *
PySnmpPresent = True
except ImportError:
PySnmpPresent = False
class iDRACUpdate(Update):
def __init__(self, entity):
if PY2:
super(iDRACUpdate, self).__init__(entity, iDRACFirmEnum)
else:
super().__init__(entity, iDRACFirmEnum)
self.reset()
self._job_mgr = entity.job_mgr
def _sw_instance(self, comp):
ilist = []
clist = self._comp_to_fqdd(comp)
for firmware in self.firmware_json["Firmware"]:
if firmware['FQDD'] in clist and firmware['Status'] == "Installed":
ilist.append(firmware['InstanceID'])
return ilist
def _update_from_uri(self, firm_image_path, componentFQDD, job_wait=True):
rjson = self.entity._install_from_uri(uri=firm_image_path, target=componentFQDD)
rjson['file'] = str(share)
if job_wait:
rjson = self._job_mgr._job_wait(rjson['file'], rjson)
return rjson
def reset(self):
self.sw_inited = False
self._swidentity = {}
self.firmware_json = {}
self.installed_firmware = {}
def get_swidentity(self):
if self.sw_inited:
logger.debug("Already present")
return self.firmware_json
self.entity._get_entries(self.firmware_json, self.firmware_enum)
logger.debug(self.firmware_json)
for obj in self.firmware_json:
self.installed_firmware[obj] = []
for entry in self.firmware_json[obj]:
if 'Status' in entry and entry['Status'] == 'Installed':
self.installed_firmware[obj].append(entry)
return self.firmware_json
def _get_swidentity_hash(self):
self.get_swidentity()
for comp in self.firmware_json:
for swentry in self.firmware_json[comp]:
if not "FQDD" in swentry:
continue
if swentry["FQDD"] in self._swidentity:
if not isinstance(self._swidentity[swentry["FQDD"]], list):
self._swidentity[swentry["FQDD"]] = [self._swidentity[swentry["FQDD"]]]
else:
self._swidentity[swentry["FQDD"]] = {}
self._swidentity[swentry["FQDD"]] = {}
if "ComponentID" in swentry and swentry["ComponentID"]:
for val in ["ComponentID"]:
self._swidentity[swentry["FQDD"]][val] = swentry[val]
else:
for val in ["VendorID", "SubVendorID", "DeviceID", "SubDeviceID"]:
self._swidentity[swentry["FQDD"]][val] = swentry[val]
for val in ["ComponentType", "InstanceID", "VersionString", "Status"]:
self._swidentity[swentry["FQDD"]][val] = swentry[val]
self._swidentity[swentry["FQDD"]]["ComponentClass"] = "unknown"
# TODO RESTORE
# for mycomp in self.protocolspec.compmap:
# if re.match(self.protocolspec.compmap[mycomp],swentry["FQDD"]):
# self.swidentity[swentry["FQDD"]]["ComponentClass"] = mycomp
self.sw_inited = True
return self._swidentity
def get_installed_fw_redfish(self):
try:
rjson = self.entity._list_fw_inventory_redfish()
if rjson['Status'] != 'Success':
return rjson
members_uris = self.get_redfishjson_using_responsecode(rjson)
if not members_uris:
logger.debug("Failed to get installed firmware")
return {"Status": "Failed", "Message": "Unable to get Installed Firmware"}
fwlist = []
for member in members_uris:
member_uri = member['@odata.id']
if "Previous" not in member_uri:
rjson = self.get_fwdetail_using_uri(str(member_uri))
if rjson:
fwlist.append(rjson)
return {"Firmware": fwlist}
except:
logger.debug("Failed to get installed firmware")
return {"Status": "Failed", "Message": "Unable to get Installed Firmware"}
def get_fwdetail_using_uri(self, r_uri):
try:
rjson = self.entity._get_resource_redfish(resource_uri=r_uri)
if 'Data' not in rjson or rjson['Status'] != 'Success' or 'body' not in rjson['Data']:
return None
fw_json = {}
fw_json['Name'] = rjson['Data']['body']['Name']
fw_json['Id'] = rjson['Data']['body']['Id']
fw_json['Status'] = rjson['Data']['body']['Status']
fw_json['Updateable'] = rjson['Data']['body']['Updateable']
fw_json['Version'] = rjson['Data']['body']['Version']
return fw_json
except:
logger.debug("Error in getting fw deatil from uri:" + r_uri)
return None
def get_redfishjson_using_responsecode(self, r_json):
try:
if 'Data' not in r_json:
logger.debug("Failed to get json from response")
return None
if 'body' not in r_json['Data']:
logger.debug("reponse body is not present")
return None
if 'Members' not in r_json['Data']['body']:
logger.debug("No installed firmware found")
return None
return r_json['Data']['body']['Members']
except Exception:
logger.debug("Failed to get installed firmware, exception:")
return None
@property
def InstalledFirmware(self):
if self.entity.use_redfish:
return self.get_installed_fw_redfish()
self.get_swidentity()
return self.installed_firmware
@property
def AllUpdates(self):
return self.get_updates_matching(catalog='Catalog')
@property
def AvailableUpdates(self):
criteria = UpdateFilterCriteria()
criteria.include_packages(UpdatePresenceEnum.Present)
return self.get_updates_matching(catalog='Catalog', criteria=criteria)
@property
def NeededUpdates(self):
criteria = UpdateFilterCriteria()
criteria.include_update_needed(UpdateNeededEnum.Needed)
return self.get_updates_matching(catalog='Catalog', criteria=criteria)
def get_updates_matching(self, catalog='Catalog', criteria=None):
updmgr = UpdateManager.get_instance()
if not updmgr:
updates = RepoComparator(self.InstalledFirmware).final()
else:
(ignore, cache_cat) = updmgr.getCatalogScoper(catalog)
updates = cache_cat.compare(self.entity.SystemIDInHex,
self.InstalledFirmware)
if not criteria:
return updates
retval = {}
for comp in updates:
for update in updates[comp]:
if not criteria.meets(update):
continue
if comp not in retval:
retval[comp] = []
retval[comp].append(update)
return retval
def save_invcollector_file(self, invcol_output_file):
with UnicodeWriter(invcol_output_file) as output:
self._save_invcollector(output)
def serialize_inventory(self, myshare):
share = myshare.format(ip=self.entity.ipaddr)
swfqdd_list = [firmware['FQDD'] for firmware in \
self.InstalledFirmware["Firmware"]]
with UnicodeWriter(share.local_full_path) as f:
f._write_output(json.dumps({
'Model_Hex': self.entity.SystemIDInHex,
'Model': self.entity.Model,
'IPAddress': self.entity.ipaddr,
'ServiceTag': self.entity.ServiceTag,
'Firmware': self.InstalledFirmware['Firmware'],
'ComponentMap': self.entity.config_mgr._fqdd_to_comp_map(swfqdd_list)},
sort_keys=True, indent=4, separators=(',', ': ')))
def update_from_repo(self, catalog_path, apply_update=True, reboot_needed=False, job_wait=True):
if isinstance(catalog_path, str):
# Catalog name
updmgr = UpdateManager.get_instance()
if not updmgr: return {}
(cache_share, ignore) = updmgr.getCatalogScoper(catalog_path)
else:
# DRM Repo
cache_share = catalog_path
catalog_dir = FileOnShare(remote=cache_share.remote_folder_path, isFolder=True, creds=cache_share.creds)
catalog_file = cache_share.remote_file_name
if self.entity.use_redfish:
if isinstance(catalog_path, FileOnShare) and catalog_path.mount_point is None:
raise ValueError("Share path or mount point does not exist")
rjson = self.entity._update_from_repo_using_redfish(ipaddress=catalog_dir.remote_ipaddr,
share_name=catalog_dir.remote.share_name,
share_type=IFRShareTypeEnum[catalog_dir.remote_share_type.name.lower()],
username=catalog_dir.creds.username,
password=catalog_dir.creds.password,
reboot_needed=reboot_needed,
catalog_file=catalog_file,
apply_update=ApplyUpdateEnum[str(apply_update)],
ignore_cert_warning=IgnoreCertWarnEnum['On'])
if TypeHelper.resolve(catalog_dir.remote_share_type) == TypeHelper.resolve(ShareTypeEnum.NFS):
rjson = self.entity._update_repo_nfs(share=catalog_dir, creds=catalog_dir.creds, catalog=catalog_file,
apply=URLApplyUpdateEnum[str(apply_update)].value,
reboot=RebootEnum[str(reboot_needed)].value)
else:
rjson = self.entity._update_repo(share=catalog_dir, creds=catalog_dir.creds, catalog=catalog_file,
apply=URLApplyUpdateEnum[str(apply_update)].value,
reboot=RebootEnum[str(reboot_needed)].value)
rjson['file'] = str(cache_share)
if job_wait:
rjson = self._job_mgr._job_wait(rjson['file'], rjson)
if not self.entity.use_redfish:
rjson['job_details'] = self.entity._update_get_repolist()
return rjson
def update_from_dell_repo_url(self, ipaddress=None, share_name=None, share_type=None,
catalog_file="Catalog.xml", apply_update=True, reboot_needed=False,
ignore_cert_warning=True, job_wait=True):
rjson = self.entity._update_dell_repo_url(ipaddress=ipaddress, share_type=URLShareTypeEnum[share_type].value,
catalog_file=catalog_file,
apply_update=URLApplyUpdateEnum[str(apply_update)].value,
reboot_needed=RebootEnum[str(reboot_needed)].value,
ignore_cert_warning=URLCertWarningEnum[str(ignore_cert_warning)].value)
file_format = "{0}://{1}/{2}/{3}" if share_name else "{0}://{1}{2}/{3}"
rjson['file'] = file_format.format(share_type, ipaddress, share_name, catalog_file)
if job_wait:
rjson = self._job_mgr._job_wait(rjson['file'], rjson)
if not self.entity.use_redfish:
rjson['job_details'] = self.entity._update_get_repolist()
return rjson
def update_from_repo_url(self, ipaddress=None, share_type=None, share_name=None, share_user=None,
share_pwd=None, catalog_file="Catalog.xml", apply_update=True, reboot_needed=False,
ignore_cert_warning=True, job_wait=True):
if self.entity.use_redfish:
warning = IgnoreCertWarnEnum["On"] if ignore_cert_warning else IgnoreCertWarnEnum["Off"]
rjson = self.entity._update_from_repo_using_redfish(ipaddress=ipaddress, share_name=share_name,
share_type=IFRShareTypeEnum[share_type],
username=share_user, password=share_pwd,
reboot_needed=reboot_needed, catalog_file=catalog_file,
apply_update=ApplyUpdateEnum[str(apply_update)],
ignore_cert_warning=warning.value)
else:
rjson = self.entity._update_repo_url(ipaddress=ipaddress, share_type=URLShareTypeEnum[share_type].value,
share_name=share_name, catalog_file=catalog_file,
apply_update=URLApplyUpdateEnum[str(apply_update)].value,
reboot_needed=RebootEnum[str(reboot_needed)].value,
ignore_cert_warning=URLCertWarningEnum[str(ignore_cert_warning)].value)
file_format = "{0}://{1}/{2}/{3}" if share_name else "{0}://{1}{2}/{3}"
rjson['file'] = file_format.format(share_type, ipaddress, share_name, catalog_file)
if job_wait:
rjson = self._job_mgr._job_wait(rjson['file'], rjson)
if not self.entity.use_redfish:
rjson['job_details'] = self.entity._update_get_repolist()
return rjson
##below methods to update firmware using redfish will be reimplemented using Type Manager system
def _get_scp_path(self, catalog_dir):
"""
:param catalog_dir: object for Folder containing Catalog on share.
:param catalog_dir: FileOnShare.
:returns: returns a tuple containing remote scp path(full) and the scp file name
"""
catalog_path_str = catalog_dir.remote_full_path
scp_file = 'scp_' + self.entity.ServiceTag + '_' + datetime.now().strftime('%Y%m%d_%H%M%S') + ".xml"
scp_path = catalog_path_str + os.path.sep + scp_file
return (scp_path, scp_file)
def update_from_repo_usingscp_redfish(self, catalog_dir, catalog_file, mount_point, apply_update=True,
reboot_needed=False, job_wait=True):
"""Performs firmware update on target server using scp RepositoyUpdate attribute
:param catalog_dir: object for Folder containing Catalog on share.
:param catalog_dir: FileOnShare.
:param catalog_file: Catalog file name
:param catalog_file: str.
:param mount_point: local share on which remote(catalog_dir) folder has been mounted
:param mount_point: str.
:returns: returns status of firmware update through scp
"""
(scp_path, scp_file) = self._get_scp_path(catalog_dir)
myshare = FileOnShare(scp_path).addcreds(catalog_dir.creds)
# exports only that component which contains RepositoryUpdate attribute
rjson = self.entity.config_mgr.scp_export(share_path=myshare, target='System.Embedded.1')
if 'Status' not in rjson or rjson['Status'] != 'Success':
return {'Status': 'Failed', 'Message': 'Export of scp failed for firmware update'}
scpattrval = {'RepositoryUpdate': catalog_file}
localfile = mount_point.share_path + os.path.sep + scp_file
self.edit_xml_file(localfile, scpattrval)
if reboot_needed:
shutdown = ShutdownTypeEnum.Graceful
else:
shutdown = ShutdownTypeEnum.NoReboot
rjson = self.entity.config_mgr.scp_import(share_path=myshare, shutdown_type=shutdown, job_wait=job_wait)
if job_wait:
rjson['file'] = localfile
rjson = self._job_mgr._job_wait(rjson['file'], rjson)
rjson['job_details'] = self.entity._update_get_repolist()
return rjson
def edit_xml_file(self, file_location, attr_val_dict):
"""Edit and save exported scp's attributes which are passed in attr_val_dict
:param file_location: locally mounted location(full path) of the exported scp .
:param file_location: str.
:param attr_val_dict: attribute and value pairs as dict
:param attr_val_dict: dict.
:returns: returns None
"""
tree = ET.parse(file_location)
root = tree.getroot()
for attr in attr_val_dict:
xpath = ".//*[@Name='" + str(attr) + "']"
attribute_element = root.find(xpath)
attribute_element.text = str(attr_val_dict.get(attr))
tree.write(file_location)
return
def update_get_repolist(self):
return self.entity._update_get_repolist()
def _save_invcollector(self, output):
# self.entity.get_entityjson()
# if not "System" in self.entity.entityjson:
# logger.debug("ERROR: Entityjson is empty")
# return
self._get_swidentity_hash()
output._write_output('<SVMInventory>\n')
output._write_output(' <System')
if "System" in self.entity.entityjson:
for (invstr, field) in [("Model", "Model"), ("systemID", "SystemID"), ("Name", "HostName")]:
if field in self.entity.entityjson["System"]:
output._write_output(" " + invstr + "=\"" + self.entity.entityjson["System"][field] + "\"")
output._write_output(
' InventoryTime="{0}">\n'.format(str(datetime.strftime(datetime.now(), "%Y-%m-%dT%H:%M:%S"))))
for ent in self._swidentity:
output._write_output(' <Device')
for (invstr, field) in [("componentID", "ComponentID"),
("vendorID", "VendorID"),
("deviceID", "DeviceID"),
("subVendorID", "SubVendorID"),
("subDeviceID", "SubDeviceID")]:
if field in self._swidentity[ent]:
output._write_output(" " + invstr + "=\"" + self._swidentity[ent][field] + "\"")
output._write_output(' bus="" display="">\n')
output._write_output(' <Application componentType="{0}" version="{1}" display="" />\n'.format(
self._swidentity[ent]["ComponentType"], self._swidentity[ent]["VersionString"]))
output._write_output(' </Device>\n')
output._write_output(' </System>\n')
output._write_output('</SVMInventory>\n')
| 50.176611
| 137
| 0.581573
| 19,280
| 0.917004
| 0
| 0
| 755
| 0.03591
| 0
| 0
| 4,482
| 0.213175
|
3dec8f27fe9f9465de4b1a61485314e099192b22
| 3,196
|
py
|
Python
|
playthrough/management/commands/migrate_shogun.py
|
SciADV-Community/genki
|
b86811695c428ca93bdab3ea2f68e3a99713d4db
|
[
"MIT"
] | null | null | null |
playthrough/management/commands/migrate_shogun.py
|
SciADV-Community/genki
|
b86811695c428ca93bdab3ea2f68e3a99713d4db
|
[
"MIT"
] | 11
|
2020-10-15T01:19:24.000Z
|
2022-03-28T04:09:43.000Z
|
playthrough/management/commands/migrate_shogun.py
|
SciADV-Community/genki
|
b86811695c428ca93bdab3ea2f68e3a99713d4db
|
[
"MIT"
] | 1
|
2021-01-11T19:56:02.000Z
|
2021-01-11T19:56:02.000Z
|
import argparse
import os
import sqlite3
from django.core.management.base import BaseCommand
from playthrough.models import Alias, Channel, Game, GameConfig, Guild, RoleTemplate, User
class Command(BaseCommand):
help = 'Migrates a DB from \'Shogun\' bot (SciADV-Community/playthrough-bot).'
@staticmethod
def _db_path(path: str):
if os.path.isfile(path) and path.endswith('.db'):
return path
else:
raise argparse.ArgumentTypeError(f'{path} is not a valid path to an SQLite Database.')
def add_arguments(self, parser):
parser.add_argument('sqlite_file', type=self._db_path)
def handle(self, *args, **options):
conn = sqlite3.connect(options['sqlite_file'])
c = conn.cursor()
# Migrate Guilds
c.execute('SELECT Guild_ID, Guild_Name FROM Config')
guilds_in_db = c.fetchall()
for guild in guilds_in_db:
Guild.objects.get_or_create(id=guild[0], name=guild[1])
# Migrate Games
self.stdout.write('- Migrating games...')
c.execute('SELECT name, channel_suffix, role_name FROM Game')
games_in_db = c.fetchall()
self.stdout.write(f'- - Found {len(games_in_db)} games.')
for game_row in games_in_db:
self.stdout.write(f'- - Migrating {game_row[0]}')
role_template = RoleTemplate.objects.create(name=game_row[2])
game = Game.objects.get_or_create(
name=game_row[0]
)[0]
game.channel_suffix = f'-plays-{game_row[1]}'
game.completion_role = role_template
game.save()
self.stdout.write(f'- - Saved {game_row[0]}.')
# Migrate Aliases
c.execute('SELECT alias FROM Game_Alias WHERE game_name = ?', (game.name,))
aliases = c.fetchall()
self.stdout.write(f'- - - Found {len(aliases)} aliases.')
game.aliases.clear()
for alias in aliases:
game.aliases.add(Alias(alias=alias[0]), bulk=False)
self.stdout.write('- - - Migrated aliases.')
# Migrate Configs
c.execute('SELECT Guild_Id FROM Game_Guild WHERE Game_Name = ?', (game.name,))
configs = c.fetchall()
self.stdout.write(f'- - - Found {len(configs)} GameConfigs.')
for config in configs:
self.stdout.write(f'- - - Migrating {game} - {config[0]}.')
GameConfig.objects.get_or_create(
guild_id=config[0], game=game, playable=True, completion_role_id='000000000000'
)
# Migrate Channels
c.execute('SELECT ID, Owner, Guild FROM Channel WHERE Game = ?', (game.name,))
channels = c.fetchall()
self.stdout.write(f'- - - Found {len(channels)} channels.')
for channel in channels:
self.stdout.write(f'- - - Migrating {channel[0]}...')
user = User.objects.get_or_create(id=channel[1])[0]
Channel.objects.get_or_create(id=channel[0], owner=user, guild_id=channel[2], game=game)
self.stdout.write(f'- - - Migrated {channel[0]}.')
| 44.388889
| 104
| 0.59199
| 3,007
| 0.940864
| 0
| 0
| 237
| 0.074155
| 0
| 0
| 888
| 0.277847
|
3ded44aff9cb2e2f9d8057ef0b9ba6ae462ea0c0
| 5,233
|
py
|
Python
|
backend/model/benchmark-metrics/service/ocr.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 3
|
2022-01-12T06:51:51.000Z
|
2022-02-23T18:54:33.000Z
|
backend/model/benchmark-metrics/service/ocr.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 6
|
2021-08-31T19:21:26.000Z
|
2022-01-03T05:53:42.000Z
|
backend/model/benchmark-metrics/service/ocr.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 8
|
2021-08-12T08:07:49.000Z
|
2022-01-25T04:40:51.000Z
|
import logging
from datetime import datetime
import numpy as np
from logging.config import dictConfig
from kafkawrapper.producer import Producer
from utils.mongo_utils import BenchMarkingProcessRepo
from configs.configs import ulca_notifier_input_topic, ulca_notifier_benchmark_completed_event, ulca_notifier_benchmark_failed_event
from models.metric_manager import MetricManager
log = logging.getLogger('file')
prod = Producer()
repo = BenchMarkingProcessRepo()
class OcrMetricEvalHandler:
def __init__(self):
pass
def execute_ocr_metric_eval(self, request):
try:
log.info("Executing Ocr Metric Evaluation.... {}".format(datetime.now()))
metric_mgr = MetricManager.getInstance()
if 'benchmarkDatasets' in request.keys():
for benchmark in request["benchmarkDatasets"]:
metric_inst = metric_mgr.get_metric_execute(benchmark["metric"], request["modelTaskType"])
if not metric_inst:
log.info("Metric definition not found")
doc = {'benchmarkingProcessId':request['benchmarkingProcessId'],'benchmarkDatasetId': benchmark['datasetId'],'eval_score': None}
repo.insert(doc)
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
return
ground_truth = [corpus_sentence["tgt"] for corpus_sentence in benchmark["corpus"]]
machine_translation = [corpus_sentence["mtgt"] for corpus_sentence in benchmark["corpus"]]
eval_score = metric_inst.ocr_metric_eval(ground_truth, machine_translation)
if eval_score:
doc = {'benchmarkingProcessId':request['benchmarkingProcessId'],'benchmarkDatasetId': benchmark['datasetId'],'eval_score': float(np.round(eval_score, 3))}
repo.insert(doc)
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Completed'})
mail_notif_event = {"event": ulca_notifier_benchmark_completed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
else:
log.exception("Exception while metric evaluation of model")
doc = {'benchmarkingProcessId':request['benchmarkingProcessId'],'benchmarkDatasetId': benchmark['datasetId'],'eval_score': None}
repo.insert(doc)
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
else:
log.exception("Missing parameter: benchmark details")
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
return
except Exception as e:
log.exception(f"Exception while metric evaluation of model: {str(e)}")
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
| 55.084211
| 200
| 0.608064
| 3,926
| 0.750239
| 0
| 0
| 0
| 0
| 0
| 0
| 1,702
| 0.325244
|
3deea7c2a0399d6a1677f78e7cc36afe63de0fc2
| 1,780
|
py
|
Python
|
keystroke/migrations/0001_initial.py
|
jstavanja/quiz-biometrics-api
|
75e0db348668b14a85f94261aac092ae2d5fa9c6
|
[
"MIT"
] | null | null | null |
keystroke/migrations/0001_initial.py
|
jstavanja/quiz-biometrics-api
|
75e0db348668b14a85f94261aac092ae2d5fa9c6
|
[
"MIT"
] | null | null | null |
keystroke/migrations/0001_initial.py
|
jstavanja/quiz-biometrics-api
|
75e0db348668b14a85f94261aac092ae2d5fa9c6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-20 16:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='KeystrokeTestSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timing_matrix', models.CharField(max_length=5000)),
],
),
migrations.CreateModel(
name='KeystrokeTestType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('input_text', models.CharField(max_length=5000)),
('repetitions', models.IntegerField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('moodle_username', models.CharField(max_length=250)),
('path_to_image', models.CharField(max_length=250)),
],
),
migrations.AddField(
model_name='keystroketestsession',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='keystroke.Student'),
),
migrations.AddField(
model_name='keystroketestsession',
name='test_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='keystroke.KeystrokeTestType'),
),
]
| 34.901961
| 115
| 0.591573
| 1,588
| 0.892135
| 0
| 0
| 0
| 0
| 0
| 0
| 330
| 0.185393
|
3deeb28e7a4a40609c5fe55751360abc1b88afba
| 1,603
|
py
|
Python
|
komposisjon/komposisjon/rektangler_kvadrater.py
|
knutsenfiksdal/Oving_8
|
4e5d3a358cfb9127509a86a61c9499f22da9eabc
|
[
"MIT"
] | null | null | null |
komposisjon/komposisjon/rektangler_kvadrater.py
|
knutsenfiksdal/Oving_8
|
4e5d3a358cfb9127509a86a61c9499f22da9eabc
|
[
"MIT"
] | null | null | null |
komposisjon/komposisjon/rektangler_kvadrater.py
|
knutsenfiksdal/Oving_8
|
4e5d3a358cfb9127509a86a61c9499f22da9eabc
|
[
"MIT"
] | null | null | null |
class Rektangel:
def __init__(self, start_x, start_y, bredde, hoyde):
self.start_x = start_x
self.start_y = start_y
self.hoyde = hoyde
self.bredde = bredde
def areal(self):
return self.bredde*self.hoyde
# Endrer høyde og bredde på en slik måte at areal forblir det samme
def strekk(self, multiplikator):
self.bredde *= multiplikator
self.hoyde /= multiplikator
def __str__(self):
return f"Rektangel fra ({self.start_x}, {self.start_y}), " \
f"bredde {self.bredde}, høyde {self.hoyde}"
# Kvadrat som bruker komposisjon og delegering
class Kvadrat:
def __init__(self, start_x, start_y, storrelse):
self.rektanglet = Rektangel(start_x, start_y, storrelse, storrelse)
@property
def bredde(self):
return self.rektanglet.bredde
@property
def hoyde(self):
return self.rektanglet.hoyde
@bredde.setter
def bredde(self, ny_bredde):
self.rektanglet.bredde = ny_bredde
self.rektanglet.hoyde = ny_bredde
@hoyde.setter
def hoyde(self, ny_hoyde):
self.rektanglet.bredde = ny_hoyde
self.rektanglet.hoyde = ny_hoyde
def areal(self):
return self.rektanglet.areal()
if __name__ == "__main__":
rektanglet = Rektangel(5, 5, 10, 5)
print(rektanglet)
print(rektanglet.areal())
rektanglet.strekk(0.5)
print(rektanglet)
print(rektanglet.areal())
kvadrat = Kvadrat(2, 2, 6)
print(kvadrat)
print(kvadrat.areal())
kvadrat.strekk(6)
print(kvadrat)
print(kvadrat.areal())
| 26.278689
| 75
| 0.646288
| 1,211
| 0.753578
| 0
| 0
| 395
| 0.2458
| 0
| 0
| 222
| 0.138146
|
3defd9479c2f0a53049990dbf13feea2c96391cf
| 16,024
|
py
|
Python
|
DiscordBot/Commands/DiscordPoints.py
|
aronjanosch/kirbec-bot
|
6d44e177c5cf6669564047fbbc8f6e8c342bca28
|
[
"MIT"
] | null | null | null |
DiscordBot/Commands/DiscordPoints.py
|
aronjanosch/kirbec-bot
|
6d44e177c5cf6669564047fbbc8f6e8c342bca28
|
[
"MIT"
] | null | null | null |
DiscordBot/Commands/DiscordPoints.py
|
aronjanosch/kirbec-bot
|
6d44e177c5cf6669564047fbbc8f6e8c342bca28
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import discord
import itertools
from .utils import formatString, getUsageEmbed, getOopsEmbed
# IDEAS
# 1. Paying out points (without bets)
class DiscordPoints:
"""
Class that parses Discord Points info and interactions
Attributes
__________
fire (Fire obj): The fire instance where information is fetched/updated
Functions
__________
async getDiscordPointsEmbed(page, guild) -> (discord.Embed)
Makes an embedded message with total points for each user
def createNewReward(guild, rewardString) -> (discord.Embed)
Adds a reward and returns the updated list of rewards as an embedded msg
"""
fire = None
def __init__(self, fire):
self.fire = fire
async def getDiscordPointsEmbed(self, page, guild):
"""
Makes an embedded message with DiscordPoints for each member in the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message of Discord Points for each member of the guild
"""
d = self.fire.fetchDiscordPoints(guild)
# This sorts the dictionary by highest-value and converts it to a list
# It takes form [(user_0.id, value_0) ...(user_n.id, value_n)]
info_arr = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
userString, pointsString, description = await self.__createdEmbedStrings(guild, info_arr, page)
title = "Discord Points"
return self.__createPointsEmbed(title, description, userString, pointsString)
def createNewReward(self, guild, rewardString):
"""
Create new reward for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
rewardString : string
String with the reward title and cost
Returns
----------
discord.Embed
Embedded message of the updated rewards for the server
"""
rewardStringList = ["".join(x) for _, x in itertools.groupby(rewardString, key=str.isdigit)]
if len(rewardStringList) < 2:
return getUsageEmbed(
"-addreward [Desired Reward] [Price of the Reward]\n\nexample: -addreward CSGO with friends 500")
try:
rewardCost = int(rewardStringList[len(rewardStringList) - 1])
rewardTitle = self.__parseRewardStringList(rewardStringList)
self.fire.postNewReward(guild, rewardTitle, rewardCost)
return self.getRewardsEmbed(guild)
except Exception as e:
print("ERROR ", e)
return getUsageEmbed(
"-addreward [Desired Reward] [Price of the Reward]\n\nexample: -addreward CSGO with friends 500")
def getRewardsEmbed(self, guild):
"""
Get all of the current rewards for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message with all of the rewards for the guild
"""
rewards_dict = self.fire.fetchAllRewards(guild)
if rewards_dict == {}:
return self.__noRewardsEmbed(guild)
rewardsList = [(k, rewards_dict[k]) for k in sorted(rewards_dict, key=rewards_dict.get, reverse=True)]
idString, rewardsString, costsString = self.__getRewardsEmbedStrings(rewardsList)
return self.__createRewardsEmbed(idString, rewardsString, costsString)
def redeemReward(self, guild, user, reward_id):
"""
Redeems the desired reward with DiscordPoints
[@Todo: Ping Users associated with the reward]
Parameters
----------
guild : discord.Guild
The server that we want to get information from
user : discord.Member if in guild, discord.User otherwise
The user that redeemed the reward
reward_id : Int
The id of the reward to redeem
Returns
----------
discord.Embed
Embedded message with the redeemed reward
"""
points_dict = self.fire.fetchDiscordPoints(guild)
rewards_dict = self.fire.fetchAllRewards(guild)
rewards_list = [(k, rewards_dict[k]) for k in sorted(rewards_dict, key=rewards_dict.get, reverse=True)]
try:
# Check to see if the reward_id is within the list of rewards
if int(reward_id) > len(rewards_list) or int(reward_id) < 1:
return self.__createNotARewardEmbed()
reward_title = rewards_list[int(reward_id) - 1][0]
reward_cost = rewards_list[int(reward_id) - 1][1]
# Check to see if the user has enough points to redeem the reward
if points_dict[str(user.id)] and points_dict[str(user.id)] < reward_cost:
return self.__createNotEnoughPointsEmbed(user, points_dict[str(user.id)])
else:
new_points = points_dict[str(user.id)] - reward_cost
self.fire.postNewDiscordPoints(guild, str(user.id), new_points)
return self.__createRedeemRewardEmbed(reward_title, reward_cost, user, new_points)
except Exception as e:
print(e)
return getUsageEmbed("-redeemReward [Desired Reward Id]\n\nexample: -redeemReward 3")
def addPoints(self, guild, author, user, points):
"""
add Points to a specific User
[@Todo: Ping Users associated with the points]
Parameters
----------
guild : discord.Guild
The server that we want to get information from
author : message.user
user : discord.Member if in guild, discord.User otherwise
The user that redeemed the reward
points : Int
The amount of points
Returns
----------
discord.Embed
Embedded message with the redeemed reward
"""
points_dict = self.fire.fetchDiscordPoints(guild)
print(user.id)
try:
if not str(user.id) in points_dict:
return getOopsEmbed("User ID not correct")
elif not author.guild_permissions.administrator:
return getOopsEmbed("Command can only be used by Server-Admins")
print(points_dict[str(user.id)])
new_points = points_dict[str(user.id)] + int(points)
print(new_points)
self.fire.postNewDiscordPoints(guild, str(user.id), new_points)
return self.__createPointsEmbed("Points added", "Points were added to balance", f"{user}", f"{new_points}")
except Exception as e:
print(e)
print("Error adding points")
return getOopsEmbed("Error adding points, check console")
# ---------- MARK: - Private Functions ----------
async def __createdEmbedStrings(self, guild, sortedList, page):
"""
Private helper function to create strings for the embedded message
Parameters
----------
guild : (discord.Guild)
The server that we are tracking
sortedList : arr[(key_0, val_0) ... (key_n, val_n)]
The sorted (by val) list of key, val pairs where key: user_id, val: points
page : (int)
Page of the message we want to look at (20 entries per page)
Returns
----------
discord.Embed
Formatted information embedded into a message
"""
member_dict = await self.fire.fetchAllMembers(guild)
# Max 20 entries / page
pages = len(sortedList) // 20 + 1
userString = ""
pointsString = ""
rankString = ""
if page > pages or page < 0:
page = 1
for i in range(0, 20):
shiftedIndex = (page - 1) * 20 + i
if shiftedIndex < len(sortedList):
user_id = sortedList[shiftedIndex][0]
points = sortedList[shiftedIndex][1]
if int(user_id) in member_dict.keys():
userString += member_dict[int(user_id)] + '\n'
pointsString += str(points) + '\n'
description = "Page " + str(page) + " of " + str(page)
return userString, pointsString, description
def __createPointsEmbed(self, title, description, userString, pointsString):
"""
Formats information into an embedded message
Parameters
----------
title: (str)
Title for the embedded message
description: (str)
Description for the embedded message
userString: (str)
String representing the list of ordered users
timeString: (str)
String representing the list of ordered points
rankString: (str)
String representing the ranks of each user
Returns
----------
discord.Embed
Formatted information embedded into a message
"""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Username", value=userString)
embed.add_field(name="Discord Points", value=pointsString)
return embed
def __noRewardsEmbed(self, guild):
"""
Private function that shows that there are no rewards yet for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message that states no rewards are in the guild
"""
now = datetime.today()
embed = discord.Embed(title="Oops!", description="", timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="No Rewards Set Yet!",
value="To add a reward:\n-addreward [Desired Reward] [Price of the Reward]")
return embed
def __getRewardsEmbedStrings(self, rewardsList):
"""
Private function that gets formatted strings for the list of rewards
Parameters
----------
rewardsList: [(reward_title_0, cost_0)...]
List of rewards sorted by the highest cost
Returns
----------
idString: string
String representing the id's of the rewards separated by '\n'
rewardString: string
String representing the title of the rewards separated by '\n'
costString: string
String representing the costs of the rewards separated by '\n'
"""
idString = ""
rewardString = ""
costString = ""
for i in range(len(rewardsList)):
numLines, formattedRewardString = formatString(str(rewardsList[i][0]))
idString += str(i + 1) + ("\n" * numLines)
rewardString += formattedRewardString + "\n"
costString += str(rewardsList[i][1]) + ("\n" * numLines)
return idString, rewardString, costString
def __createRewardsEmbed(self, idString, rewardString, costString):
"""
Private function to help create a rewards embed
Parameters
----------
idString: string
String representing the id's of the rewards separated by '\n'
rewardString: string
String representing the title of the rewards separated by '\n'
costString: string
String representing the costs of the rewards separated by '\n'
Returns
----------
discord.Embed
Embedded message that states all of the rewards
"""
title = "Discord Point Rewards"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="ID", value=idString)
embed.add_field(name="Reward", value=rewardString)
embed.add_field(name="Price", value=costString)
return embed
def __createRedeemRewardEmbed(self, reward_title, reward_cost, user, new_points):
"""
Private function to help create a redeem reward embed
Parameters
----------
reward_title: string
Title of the reward to be redeemed
reward_cost : int
Cost of the reward to be redeemed
user : discord.Member if in guild, discord.User otherwise
User_id of the user that redeemed the reward
Returns
----------
discord.Embed
Embedded message that states the redeemed reward
"""
title = "Reward Redeemed"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_thumbnail(url=user.avatar_url)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Reward", value=reward_title, inline=False)
embed.add_field(name="Price", value=reward_cost, inline=False)
embed.add_field(name="Points Remaining", value=str(new_points), inline=False)
return embed
def __createNotEnoughPointsEmbed(self, user, user_points):
"""
Private function to help create a not enough points embed message
Parameters
----------
user_points : int
The amount of points that the user currently has
user : discord.Member if in guild, discord.User otherwise
User that try to redeem the reward
Returns
----------
discord.Embed
Embedded message that states that the user doesn't have enough points
"""
title = "Oops!"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now, colour=discord.Colour.red())
embed.set_thumbnail(url=user.avatar_url)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Not enough points", value="You have: " + str(user_points))
return embed
def __createNotARewardEmbed(self):
"""
Private function to help create a "invalid reward id" embed
Returns
----------
discord.Embed
Embedded message that states that the reward id is invalid
"""
title = "Oops!"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now, colour=discord.Colour.red())
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Not a reward", value="Please enter a valid reward id")
return embed
def __parseRewardStringList(self, rewardStringList):
"""
Private function to recreate reward title
Parameters
----------
rewardStringList: list(String)
List of strings representing the title
Returns
----------
s: string
The reward title string
"""
s = ""
for i in range(len(rewardStringList) - 1):
s += rewardStringList[i]
return s
| 33.949153
| 119
| 0.602034
| 15,851
| 0.989204
| 0
| 0
| 0
| 0
| 2,356
| 0.147029
| 8,361
| 0.52178
|
3df076848f2032b90ec31c8b5ee8c64134fd5e5c
| 1,579
|
py
|
Python
|
lunch/admin.py
|
KrzysztofSakowski/lunch-crawler
|
6a93d6cfad634fb98f89bc22d68547801865c9ae
|
[
"Apache-2.0"
] | 1
|
2020-02-17T13:40:08.000Z
|
2020-02-17T13:40:08.000Z
|
lunch/admin.py
|
KrzysztofSakowski/lunch-crawler
|
6a93d6cfad634fb98f89bc22d68547801865c9ae
|
[
"Apache-2.0"
] | 4
|
2020-02-11T23:06:14.000Z
|
2021-06-10T18:07:30.000Z
|
lunch/admin.py
|
KrzysztofSakowski/lunch-crawler
|
6a93d6cfad634fb98f89bc22d68547801865c9ae
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import MenuFacebook, MenuEmail, UserProfile, Occupation, FacebookRestaurant, EmailRestaurant
class MenuBaseAdmin(admin.ModelAdmin):
list_display = ('id', 'format_date', 'is_lunch', 'message')
list_filter = ('created_date', 'is_lunch')
list_editable = ('is_lunch',)
ordering = ['-created_date']
def format_date(self, obj):
return obj.created_date.strftime('%Y-%m-%d, %R')
class RestaurantAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
class UserProfileInline(admin.StackedInline):
model = UserProfile.restaurants.through
class UserProfileAdmin(admin.ModelAdmin):
inlines = UserProfileInline,
list_display = ('name', 'restaurants_list',)
def get_inline_instances(self, request, obj=None):
if not obj:
return []
return super(UserProfileAdmin, self).get_inline_instances(request, obj)
def name(self, obj):
return obj.user.username
def restaurants_list(self, obj):
return "\n".join([a.name for a in obj.restaurants.all()])
class SeatAdmin(admin.ModelAdmin):
list_display = ('id', 'restaurant', 'seats_taken', 'seats_total', 'date_declared')
def restaurant(self, obj):
return obj.restaurant.name
admin.site.register(FacebookRestaurant, RestaurantAdmin)
admin.site.register(EmailRestaurant, RestaurantAdmin)
admin.site.register(MenuFacebook, MenuBaseAdmin)
admin.site.register(MenuEmail, MenuBaseAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Occupation, SeatAdmin)
| 29.240741
| 105
| 0.723243
| 1,122
| 0.710576
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.122863
|
3df0af937b9149db956b0d8ec02537a403587abe
| 19,082
|
py
|
Python
|
src/oci/log_analytics/models/query_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/log_analytics/models/query_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/log_analytics/models/query_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class QueryDetails(object):
"""
Input arguments for running a log anlaytics query. If the request is set to run in asynchronous mode
then shouldIncludeColumns and shouldIncludeFields can be overwritten when retrieving the results.
"""
#: A constant which can be used with the sub_system property of a QueryDetails.
#: This constant has a value of "LOG"
SUB_SYSTEM_LOG = "LOG"
#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "FOREGROUND"
ASYNC_MODE_FOREGROUND = "FOREGROUND"
#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "BACKGROUND"
ASYNC_MODE_BACKGROUND = "BACKGROUND"
def __init__(self, **kwargs):
"""
Initializes a new QueryDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this QueryDetails.
:type compartment_id: str
:param compartment_id_in_subtree:
The value to assign to the compartment_id_in_subtree property of this QueryDetails.
:type compartment_id_in_subtree: bool
:param saved_search_id:
The value to assign to the saved_search_id property of this QueryDetails.
:type saved_search_id: str
:param query_string:
The value to assign to the query_string property of this QueryDetails.
:type query_string: str
:param sub_system:
The value to assign to the sub_system property of this QueryDetails.
Allowed values for this property are: "LOG"
:type sub_system: str
:param max_total_count:
The value to assign to the max_total_count property of this QueryDetails.
:type max_total_count: int
:param time_filter:
The value to assign to the time_filter property of this QueryDetails.
:type time_filter: oci.log_analytics.models.TimeRange
:param scope_filters:
The value to assign to the scope_filters property of this QueryDetails.
:type scope_filters: list[oci.log_analytics.models.ScopeFilter]
:param query_timeout_in_seconds:
The value to assign to the query_timeout_in_seconds property of this QueryDetails.
:type query_timeout_in_seconds: int
:param should_run_async:
The value to assign to the should_run_async property of this QueryDetails.
:type should_run_async: bool
:param async_mode:
The value to assign to the async_mode property of this QueryDetails.
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:type async_mode: str
:param should_include_total_count:
The value to assign to the should_include_total_count property of this QueryDetails.
:type should_include_total_count: bool
:param should_include_columns:
The value to assign to the should_include_columns property of this QueryDetails.
:type should_include_columns: bool
:param should_include_fields:
The value to assign to the should_include_fields property of this QueryDetails.
:type should_include_fields: bool
:param should_use_acceleration:
The value to assign to the should_use_acceleration property of this QueryDetails.
:type should_use_acceleration: bool
"""
self.swagger_types = {
'compartment_id': 'str',
'compartment_id_in_subtree': 'bool',
'saved_search_id': 'str',
'query_string': 'str',
'sub_system': 'str',
'max_total_count': 'int',
'time_filter': 'TimeRange',
'scope_filters': 'list[ScopeFilter]',
'query_timeout_in_seconds': 'int',
'should_run_async': 'bool',
'async_mode': 'str',
'should_include_total_count': 'bool',
'should_include_columns': 'bool',
'should_include_fields': 'bool',
'should_use_acceleration': 'bool'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'compartment_id_in_subtree': 'compartmentIdInSubtree',
'saved_search_id': 'savedSearchId',
'query_string': 'queryString',
'sub_system': 'subSystem',
'max_total_count': 'maxTotalCount',
'time_filter': 'timeFilter',
'scope_filters': 'scopeFilters',
'query_timeout_in_seconds': 'queryTimeoutInSeconds',
'should_run_async': 'shouldRunAsync',
'async_mode': 'asyncMode',
'should_include_total_count': 'shouldIncludeTotalCount',
'should_include_columns': 'shouldIncludeColumns',
'should_include_fields': 'shouldIncludeFields',
'should_use_acceleration': 'shouldUseAcceleration'
}
self._compartment_id = None
self._compartment_id_in_subtree = None
self._saved_search_id = None
self._query_string = None
self._sub_system = None
self._max_total_count = None
self._time_filter = None
self._scope_filters = None
self._query_timeout_in_seconds = None
self._should_run_async = None
self._async_mode = None
self._should_include_total_count = None
self._should_include_columns = None
self._should_include_fields = None
self._should_use_acceleration = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this QueryDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this QueryDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def compartment_id_in_subtree(self):
"""
Gets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:return: The compartment_id_in_subtree of this QueryDetails.
:rtype: bool
"""
return self._compartment_id_in_subtree
@compartment_id_in_subtree.setter
def compartment_id_in_subtree(self, compartment_id_in_subtree):
"""
Sets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:param compartment_id_in_subtree: The compartment_id_in_subtree of this QueryDetails.
:type: bool
"""
self._compartment_id_in_subtree = compartment_id_in_subtree
@property
def saved_search_id(self):
"""
Gets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:return: The saved_search_id of this QueryDetails.
:rtype: str
"""
return self._saved_search_id
@saved_search_id.setter
def saved_search_id(self, saved_search_id):
"""
Sets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:param saved_search_id: The saved_search_id of this QueryDetails.
:type: str
"""
self._saved_search_id = saved_search_id
@property
def query_string(self):
"""
**[Required]** Gets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:return: The query_string of this QueryDetails.
:rtype: str
"""
return self._query_string
@query_string.setter
def query_string(self, query_string):
"""
Sets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:param query_string: The query_string of this QueryDetails.
:type: str
"""
self._query_string = query_string
@property
def sub_system(self):
"""
**[Required]** Gets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
Allowed values for this property are: "LOG"
:return: The sub_system of this QueryDetails.
:rtype: str
"""
return self._sub_system
@sub_system.setter
def sub_system(self, sub_system):
"""
Sets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
:param sub_system: The sub_system of this QueryDetails.
:type: str
"""
allowed_values = ["LOG"]
if not value_allowed_none_or_none_sentinel(sub_system, allowed_values):
raise ValueError(
"Invalid value for `sub_system`, must be None or one of {0}"
.format(allowed_values)
)
self._sub_system = sub_system
@property
def max_total_count(self):
"""
Gets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:return: The max_total_count of this QueryDetails.
:rtype: int
"""
return self._max_total_count
@max_total_count.setter
def max_total_count(self, max_total_count):
"""
Sets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:param max_total_count: The max_total_count of this QueryDetails.
:type: int
"""
self._max_total_count = max_total_count
@property
def time_filter(self):
"""
Gets the time_filter of this QueryDetails.
:return: The time_filter of this QueryDetails.
:rtype: oci.log_analytics.models.TimeRange
"""
return self._time_filter
@time_filter.setter
def time_filter(self, time_filter):
"""
Sets the time_filter of this QueryDetails.
:param time_filter: The time_filter of this QueryDetails.
:type: oci.log_analytics.models.TimeRange
"""
self._time_filter = time_filter
@property
def scope_filters(self):
"""
Gets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:return: The scope_filters of this QueryDetails.
:rtype: list[oci.log_analytics.models.ScopeFilter]
"""
return self._scope_filters
@scope_filters.setter
def scope_filters(self, scope_filters):
"""
Sets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:param scope_filters: The scope_filters of this QueryDetails.
:type: list[oci.log_analytics.models.ScopeFilter]
"""
self._scope_filters = scope_filters
@property
def query_timeout_in_seconds(self):
"""
Gets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:return: The query_timeout_in_seconds of this QueryDetails.
:rtype: int
"""
return self._query_timeout_in_seconds
@query_timeout_in_seconds.setter
def query_timeout_in_seconds(self, query_timeout_in_seconds):
"""
Sets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:param query_timeout_in_seconds: The query_timeout_in_seconds of this QueryDetails.
:type: int
"""
self._query_timeout_in_seconds = query_timeout_in_seconds
@property
def should_run_async(self):
"""
Gets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:return: The should_run_async of this QueryDetails.
:rtype: bool
"""
return self._should_run_async
@should_run_async.setter
def should_run_async(self, should_run_async):
"""
Sets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:param should_run_async: The should_run_async of this QueryDetails.
:type: bool
"""
self._should_run_async = should_run_async
@property
def async_mode(self):
"""
Gets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:return: The async_mode of this QueryDetails.
:rtype: str
"""
return self._async_mode
@async_mode.setter
def async_mode(self, async_mode):
"""
Sets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
:param async_mode: The async_mode of this QueryDetails.
:type: str
"""
allowed_values = ["FOREGROUND", "BACKGROUND"]
if not value_allowed_none_or_none_sentinel(async_mode, allowed_values):
raise ValueError(
"Invalid value for `async_mode`, must be None or one of {0}"
.format(allowed_values)
)
self._async_mode = async_mode
@property
def should_include_total_count(self):
"""
Gets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:return: The should_include_total_count of this QueryDetails.
:rtype: bool
"""
return self._should_include_total_count
@should_include_total_count.setter
def should_include_total_count(self, should_include_total_count):
"""
Sets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:param should_include_total_count: The should_include_total_count of this QueryDetails.
:type: bool
"""
self._should_include_total_count = should_include_total_count
@property
def should_include_columns(self):
"""
Gets the should_include_columns of this QueryDetails.
Include columns in response
:return: The should_include_columns of this QueryDetails.
:rtype: bool
"""
return self._should_include_columns
@should_include_columns.setter
def should_include_columns(self, should_include_columns):
"""
Sets the should_include_columns of this QueryDetails.
Include columns in response
:param should_include_columns: The should_include_columns of this QueryDetails.
:type: bool
"""
self._should_include_columns = should_include_columns
@property
def should_include_fields(self):
"""
Gets the should_include_fields of this QueryDetails.
Include fields in response
:return: The should_include_fields of this QueryDetails.
:rtype: bool
"""
return self._should_include_fields
@should_include_fields.setter
def should_include_fields(self, should_include_fields):
"""
Sets the should_include_fields of this QueryDetails.
Include fields in response
:param should_include_fields: The should_include_fields of this QueryDetails.
:type: bool
"""
self._should_include_fields = should_include_fields
@property
def should_use_acceleration(self):
"""
Gets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:return: The should_use_acceleration of this QueryDetails.
:rtype: bool
"""
return self._should_use_acceleration
@should_use_acceleration.setter
def should_use_acceleration(self, should_use_acceleration):
"""
Sets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:param should_use_acceleration: The should_use_acceleration of this QueryDetails.
:type: bool
"""
self._should_use_acceleration = should_use_acceleration
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.600746
| 245
| 0.672466
| 18,542
| 0.971701
| 0
| 0
| 18,572
| 0.973273
| 0
| 0
| 13,263
| 0.695053
|
3df0f23a4341291aa332900c1b4adf982ac1f716
| 2,740
|
py
|
Python
|
moist.py
|
phiriv/moisture_sensor
|
1e6a5d967ab639c67bae03847bd58ede31bde564
|
[
"MIT"
] | null | null | null |
moist.py
|
phiriv/moisture_sensor
|
1e6a5d967ab639c67bae03847bd58ede31bde564
|
[
"MIT"
] | null | null | null |
moist.py
|
phiriv/moisture_sensor
|
1e6a5d967ab639c67bae03847bd58ede31bde564
|
[
"MIT"
] | null | null | null |
Script to read temperature data from the DHT11:
# Importeer Adafruit DHT bibliotheek.
import Adafruit_DHT
import time
als = True
while als:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4) #on gpio pin 4 or pin 7
if humidity is not None and temperature is not None:
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print 'Temperature = {0:0.1f}*C Humidity = {1:0.1f}%'.format(temperature, humidity)
else:
print 'can not connect to the sensor!'
time.sleep(60) # read data every minute
Update from the Script above with modification of writing the data to a CSV.file:
# Importeer Adafruit DHT bibliotheek.
#time.strftime("%I:%M:%S")
import Adafruit_DHT
import time
import csv
import sys
csvfile = "temp.csv"
als = True
while als:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4) # gpio pin 4 or pin number 7
if humidity is not None and temperature is not None:
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print 'Temperature = {0:0.1f}*C Humidity = {1:0.1f}%'.format(temperature, humidity)
else:
print 'can not connect to the sensor!'
timeC = time.strftime("%I")+':' +time.strftime("%M")+':'+time.strftime("%S")
data = [temperature, timeC]
with open(csvfile, "a")as output:
writer = csv.writer(output, delimiter=",", lineterminator = '\n')
writer.writerow(data)
time.sleep(6) # update script every 60 seconds
Script to read data from the CSV and display it in a graph:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.animation as animation
from datetime import datetime
fig = plt.figure()
rect = fig.patch
rect.set_facecolor('#0079E7')
def animate(i):
ftemp = 'temp.csv'
fh = open(ftemp)
temp = list()
timeC = list()
for line in fh:
pieces = line.split(',')
degree = pieces[0]
timeB= pieces[1]
timeA= timeB[:8]
#print timeA
time_string = datetime.strptime(timeA,'%H:%M:%S')
#print time_string
try:
temp.append(float(degree))
timeC.append(time_string)
except:
print "dont know"
ax1 = fig.add_subplot(1,1,1,axisbg='white')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax1.clear()
ax1.plot(timeC,temp, 'c', linewidth = 3.3)
plt.title('Temperature')
plt.xlabel('Time')
ani = animation.FuncAnimation(fig, animate, interval = 6000)
plt.show()
*/
void setup() {
}
void loop() {
}
| 30.10989
| 104
| 0.622628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 526
| 0.191971
|
3df10878e5646297672b7b72bacac47ff05e414e
| 4,168
|
py
|
Python
|
route_distances/utils/routes.py
|
general-synthesis/route-distances
|
2bc09a607bd7fa488357dcee96325669d8295f90
|
[
"MIT"
] | null | null | null |
route_distances/utils/routes.py
|
general-synthesis/route-distances
|
2bc09a607bd7fa488357dcee96325669d8295f90
|
[
"MIT"
] | null | null | null |
route_distances/utils/routes.py
|
general-synthesis/route-distances
|
2bc09a607bd7fa488357dcee96325669d8295f90
|
[
"MIT"
] | null | null | null |
""" Module containing helper routines for routes """
from typing import Dict, Any, Set, List, Tuple
import numpy as np
from route_distances.utils.type_utils import StrDict
def calc_depth(tree_dict: StrDict, depth: int = 0) -> int:
"""
Calculate the depth of a route, recursively
:param tree_dict: the route
:param depth: the current depth, don't specify for route
"""
children = tree_dict.get("children", [])
if children:
return max(calc_depth(child, depth + 1) for child in children)
return depth
def calc_llr(tree_dict: StrDict) -> int:
"""
Calculate the longest linear route for a synthetic route
:param tree_dict: the route
"""
return calc_depth(tree_dict) // 2
def extract_leaves(
tree_dict: StrDict,
) -> Set[str]:
"""
Extract a set with the SMILES of all the leaf nodes, i.e.
starting material
:param tree_dict: the route
:return: a set of SMILE strings
"""
def traverse(tree_dict: StrDict, leaves: Set[str]) -> None:
children = tree_dict.get("children", [])
if children:
for child in children:
traverse(child, leaves)
else:
leaves.add(tree_dict["smiles"])
leaves = set()
traverse(tree_dict, leaves)
return leaves
def is_solved(route: StrDict) -> bool:
"""
Find if a route is solved, i.e. if all starting material
is in stock.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param route: the route to analyze
"""
def find_leaves_not_in_stock(tree_dict: StrDict) -> None:
children = tree_dict.get("children", [])
if not children and not tree_dict.get("in_stock", True):
raise ValueError(f"child not in stock {tree_dict}")
elif children:
for child in children:
find_leaves_not_in_stock(child)
try:
find_leaves_not_in_stock(route)
except ValueError:
return False
return True
def route_score(
tree_dict: StrDict,
mol_costs: Dict[bool, float] = None,
average_yield=0.8,
reaction_cost=1.0,
) -> float:
"""
Calculate the score of route using the method from
(Badowski et al. Chem Sci. 2019, 10, 4640).
The reaction cost is constant and the yield is an average yield.
The starting materials are assigned a cost based on whether they are in
stock or not. By default starting material in stock is assigned a
cost of 1 and starting material not in stock is assigned a cost of 10.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param tree_dict: the route to analyze
:param mol_costs: the starting material cost
:param average_yield: the average yield, defaults to 0.8
:param reaction_cost: the reaction cost, defaults to 1.0
:return: the computed cost
"""
mol_cost = mol_costs or {True: 1, False: 10}
reactions = tree_dict.get("children", [])
if not reactions:
return mol_cost[tree_dict.get("in_stock", True)]
child_sum = sum(
1 / average_yield * route_score(child) for child in reactions[0]["children"]
)
return reaction_cost + child_sum
def route_scorer(routes: List[StrDict]) -> Tuple[List[StrDict], List[float]]:
"""
Scores and sort a list of routes.
Returns a tuple of the sorted routes and their costs.
:param routes: the routes to score
:return: the sorted routes and their costs
"""
scores = np.asarray([route_score(route) for route in routes])
sorted_idx = np.argsort(scores)
routes = [routes[idx] for idx in sorted_idx]
return routes, scores[sorted_idx].tolist()
def route_ranks(scores: List[float]) -> List[int]:
"""
Compute the rank of route scores. Rank starts at 1
:param scores: the route scores
:return: a list of ranks for each route
"""
ranks = [1]
for idx in range(1, len(scores)):
if abs(scores[idx] - scores[idx - 1]) < 1e-8:
ranks.append(ranks[idx - 1])
else:
ranks.append(ranks[idx - 1] + 1)
return ranks
| 28.744828
| 84
| 0.651631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,910
| 0.458253
|
3df45b763adea0ed603bc91664b6febfe07b4afe
| 1,920
|
py
|
Python
|
src/yafowil/tests/__init__.py
|
2silver/yafowil
|
b9776503f98f145f7aaaa4f61b73e238c92c534c
|
[
"BSD-3-Clause"
] | 8
|
2015-12-15T21:14:00.000Z
|
2019-11-11T22:13:18.000Z
|
src/yafowil/tests/__init__.py
|
2silver/yafowil
|
b9776503f98f145f7aaaa4f61b73e238c92c534c
|
[
"BSD-3-Clause"
] | 21
|
2015-11-21T10:12:12.000Z
|
2021-06-03T06:51:53.000Z
|
src/yafowil/tests/__init__.py
|
2silver/yafowil
|
b9776503f98f145f7aaaa4f61b73e238c92c534c
|
[
"BSD-3-Clause"
] | 5
|
2016-11-23T13:41:52.000Z
|
2020-06-08T18:21:00.000Z
|
from __future__ import print_function
from node.tests import NodeTestCase
from yafowil.base import factory
from yafowil.compat import IS_PY2
import lxml.etree as etree
import sys
import unittest
import yafowil.common
import yafowil.compound
import yafowil.persistence
import yafowil.table
if not IS_PY2:
from importlib import reload
class YafowilTestCase(NodeTestCase):
def setUp(self):
super(YafowilTestCase, self).setUp()
factory.clear()
reload(yafowil.persistence)
reload(yafowil.common)
reload(yafowil.compound)
reload(yafowil.table)
def fxml(xml):
et = etree.fromstring(xml)
return etree.tostring(et, pretty_print=True).decode('utf-8')
def pxml(xml):
print(fxml(xml))
def test_suite():
from yafowil.tests import test_base
from yafowil.tests import test_common
from yafowil.tests import test_compound
from yafowil.tests import test_controller
from yafowil.tests import test_persistence
from yafowil.tests import test_resources
from yafowil.tests import test_table
from yafowil.tests import test_tsf
from yafowil.tests import test_utils
suite = unittest.TestSuite()
suite.addTest(unittest.findTestCases(test_base))
suite.addTest(unittest.findTestCases(test_common))
suite.addTest(unittest.findTestCases(test_compound))
suite.addTest(unittest.findTestCases(test_controller))
suite.addTest(unittest.findTestCases(test_persistence))
suite.addTest(unittest.findTestCases(test_resources))
suite.addTest(unittest.findTestCases(test_table))
suite.addTest(unittest.findTestCases(test_tsf))
suite.addTest(unittest.findTestCases(test_utils))
return suite
def run_tests():
from zope.testrunner.runner import Runner
runner = Runner(found_suites=[test_suite()])
runner.run()
sys.exit(int(runner.failed))
if __name__ == '__main__':
run_tests()
| 25.945946
| 64
| 0.752083
| 257
| 0.133854
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.008854
|
3df5aa98eb0d85a8d21eb7afce122f2c8fabce6b
| 1,350
|
py
|
Python
|
tools/foolbox/bim_attack.py
|
GianmarcoMidena/adversarial-ML-benchmarker
|
43cfcfdac36da88d37b12d956ea8735fd27ca4a9
|
[
"MIT"
] | null | null | null |
tools/foolbox/bim_attack.py
|
GianmarcoMidena/adversarial-ML-benchmarker
|
43cfcfdac36da88d37b12d956ea8735fd27ca4a9
|
[
"MIT"
] | null | null | null |
tools/foolbox/bim_attack.py
|
GianmarcoMidena/adversarial-ML-benchmarker
|
43cfcfdac36da88d37b12d956ea8735fd27ca4a9
|
[
"MIT"
] | null | null | null |
from foolbox.attacks import LinfinityBasicIterativeAttack
from foolbox.criteria import Misclassification
from foolbox.distances import MSE
from tools.foolbox.adversarial_attack import AdversarialAttack
class BIMAttack(AdversarialAttack):
def __init__(self, model, step_size_iter=0.05, max_perturbation=0.3, n_iterations=10, min_perturbation=None,
binary_search=True, random_start=False, return_early=True, criterion=Misclassification(),
distance=MSE):
super().__init__(attack_method_def=LinfinityBasicIterativeAttack, model=model,
min_perturbation=min_perturbation, criterion=criterion, distance=distance)
self._binary_search = binary_search
self._step_size_iter = step_size_iter
self._n_iterations = n_iterations
self._random_start = random_start
self._return_early = return_early
self._max_perturbation = max_perturbation
def apply_attack_method(self, x, y=None):
return self.attack_method(x, labels=y, unpack=True, binary_search=self._binary_search,
epsilon=self._max_perturbation, stepsize=self._step_size_iter,
iterations=self._n_iterations, random_start=self._random_start,
return_early=self._return_early)
| 51.923077
| 112
| 0.707407
| 1,144
| 0.847407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3df5e6a39fd0846088495ee87733d03e26f82c02
| 292
|
py
|
Python
|
Tabuada.py
|
tobiaspontes/ScriptsPython
|
21ed779e49adca500ce5815dd100f4ec999a2571
|
[
"MIT"
] | null | null | null |
Tabuada.py
|
tobiaspontes/ScriptsPython
|
21ed779e49adca500ce5815dd100f4ec999a2571
|
[
"MIT"
] | null | null | null |
Tabuada.py
|
tobiaspontes/ScriptsPython
|
21ed779e49adca500ce5815dd100f4ec999a2571
|
[
"MIT"
] | null | null | null |
# Tabuada em Python
def tabuada(x):
for i in range(10):
print('{} x {} = {}'.format(x, (i + 1), x * (i + 1)))
print()
if __name__ == '__main__':
print(9
)
nro = int(input('Entre com um número: '))
print(f'\n\033[1;32mTabuada do {nro}'+'\n')
tabuada(nro)
| 20.857143
| 61
| 0.513699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.348123
|
3df87a91ac53ca2678893bfc4dee7db4ace5bf95
| 3,235
|
py
|
Python
|
radix_tree.py
|
mouradmourafiq/data-analysis
|
1df2ca020a554f1fdab7cc9e53115e249cc199ac
|
[
"BSD-2-Clause"
] | 17
|
2015-04-01T12:11:31.000Z
|
2022-03-15T16:44:01.000Z
|
radix_tree.py
|
mouradmourafiq/data-analysis
|
1df2ca020a554f1fdab7cc9e53115e249cc199ac
|
[
"BSD-2-Clause"
] | null | null | null |
radix_tree.py
|
mouradmourafiq/data-analysis
|
1df2ca020a554f1fdab7cc9e53115e249cc199ac
|
[
"BSD-2-Clause"
] | 17
|
2015-01-14T14:59:40.000Z
|
2021-07-01T05:46:14.000Z
|
# -*- coding: utf-8 -*-
'''
Created on Dec 01, 2012
@author: Mourad Mourafiq
About: This is an attempt to implement the radix tree algo.
Features :
-> insert
-> remove
-> search
'''
NOK = "{'':[]}"
class Prefixer():
def __init__(self):
self.__data = {}
def __repr__(self):
return 'Prefixer(%s)' % (self.__data,)
def __eq__(self, other):
return self.__data == other.__data
def get_data(self):
return self.__data
def insert(self, word, item_id):
node = self.__data
while word:
prefix, key = self.longest_prefix(word, node.keys())
if not prefix:
break
len_prefix = len(prefix)
if prefix != key:
# split key into prefix:suffix, move data
suffix = key[len_prefix:]
current_node = node[key]
node[prefix] = {suffix: current_node}
del node[key]
word = word[len_prefix:]
node = node[prefix]
if word:
node[word] = eval(NOK)
node[word][''].append(item_id)
else:
try:
node[word].append(item_id)
except:
node[word] = []
node[word].append(item_id)
return True
def remove(self, word, item_id):
node = self.__data
while word:
prefix, key = self.longest_prefix(word, node.keys())
if not prefix:
return False
node = node.get(prefix, None)
if not node:
return False
word = word[len(prefix):]
try:
node[''].remove(item_id)
return True
except:
return False
def _search_dico(self, word):
node = self.__data
while word:
prefix, key = self.longest_prefix(word, node.keys())
if not prefix:
return False
if not key:
return False
if prefix != key:
if prefix == word:
return node[key]
else:
return False
node = node[prefix]
word = word[len(prefix):]
return node
def search(self, word):
dico = self._search_dico(word)
if dico != False:
return self.traverse_dico(dico)
return []
@staticmethod
def traverse_dico(dico):
results = []
for key, value in dico.iteritems():
if key == '':
results += value
else:
results += Prefixer.traverse_dico(value)
return results
@staticmethod
def longest_prefix(word, candidates):
"""
return the longest prefix match between word and any of the
candidates, if any. Only one candidate will much.
"""
if word:
wc = word[0]
for c in candidates:
if c.startswith(wc):
for i in reversed(xrange(1, min(len(word), len(c)) + 1)):
if c.startswith(word[:i]):
return (word[:i], c)
return ('', None)
| 27.415254
| 77
| 0.483462
| 3,018
| 0.932921
| 0
| 0
| 785
| 0.242658
| 0
| 0
| 409
| 0.12643
|
3df8c0e29455e554abfe1f3cc62c34726c6ded0b
| 1,264
|
py
|
Python
|
Python/PythonOOP/animals.py
|
JosephAMumford/CodingDojo
|
505be74d18d7a8f41c4b3576ca050b97f840f0a3
|
[
"MIT"
] | 2
|
2018-08-18T15:14:45.000Z
|
2019-10-16T16:14:13.000Z
|
Python/PythonOOP/animals.py
|
JosephAMumford/CodingDojo
|
505be74d18d7a8f41c4b3576ca050b97f840f0a3
|
[
"MIT"
] | null | null | null |
Python/PythonOOP/animals.py
|
JosephAMumford/CodingDojo
|
505be74d18d7a8f41c4b3576ca050b97f840f0a3
|
[
"MIT"
] | 6
|
2018-05-05T18:13:05.000Z
|
2021-05-20T11:32:48.000Z
|
class Animal(object):
def __init__(self,name,health):
self.name = name
self.health = 50
def walk(self):
self.health = self.health - 1
return self
def run(self):
self.health = self.health - 5
return self
def display_health(self):
print "Health: " + str(self.health)
return self
# Create instance of Animal
animal1 = Animal("Edgar",30)
animal1.walk().walk().walk().run().run().display_health()
class Dog(Animal):
def pet(self):
self.health = self.health + 5
return self
# Create instance of Dog
dog1 = Dog("Raspberry",150)
dog1.walk().walk().walk().run().run().pet().display_health()
class Dragon(Animal):
def fly(self):
self.health = self.health - 10
return self
def display_health(self):
print "I am a Dragon"
return self
# Create instance of Dragon
dragon1 = Dragon("Phantoon", 500)
dragon1.walk().run().fly().fly().fly().display_health()
# Create new Animal
animal2 = Animal("Probos",200)
#animal2.pet()
#AttributeError: 'Animal' object has no attribute 'pet'
#animal2.fly()
#AttributeError: 'Animal' object has no attribute 'fly'
animal2.display_health()
#Health: 50 - does not say "I am a Dragon"
| 22.175439
| 60
| 0.630538
| 640
| 0.506329
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.267405
|
3dfa41325fc23f6087b7a1ae8181579baa35af0a
| 17,915
|
py
|
Python
|
ai4water/preprocessing/transformations/_wrapper.py
|
moonson619/AI4Water-1
|
285d46824502b6a787e42570b72432f4f6acf45e
|
[
"MIT"
] | 17
|
2021-05-21T13:01:52.000Z
|
2022-03-19T15:17:10.000Z
|
ai4water/preprocessing/transformations/_wrapper.py
|
moonson619/AI4Water-1
|
285d46824502b6a787e42570b72432f4f6acf45e
|
[
"MIT"
] | 3
|
2021-10-31T22:40:28.000Z
|
2021-11-08T02:28:35.000Z
|
ai4water/preprocessing/transformations/_wrapper.py
|
moonson619/AI4Water-1
|
285d46824502b6a787e42570b72432f4f6acf45e
|
[
"MIT"
] | 7
|
2021-08-06T07:27:50.000Z
|
2022-01-26T00:38:32.000Z
|
from typing import Union, List, Dict
import numpy as np
import pandas as pd
from ai4water.utils.utils import jsonize, deepcopy_dict_without_clone
from ai4water.preprocessing.transformations import Transformation
class Transformations(object):
"""
While the [Transformation][ai4water.preprocessing.transformations.Transformation]
class is useful to apply a single transformation to a single data source, this
class is helpful to apply multple transformations to a single data or multiple
transformations to multiple data. This class is especially designed to be applied
as part of `model` inside the `fit`, `predict` or `evaluate` methods. The
`fit_transform` method should be applied before feeding the data to the
algorithm and `inverse_transform` method should be called after algorithm has
worked with data.
Examples:
>>> import numpy as np
>>> from ai4water.preprocessing.transformations import Transformations
>>> x = np.arange(50).reshape(25, 2)
>>> transformer = Transformations(['a', 'b'], config=['minmax', 'zscore'])
>>> x_ = transformer.fit_transform(x)
>>> _x = transformer.inverse_transform(x_)
Apply multiple transformations on multiple arrays which are passed as list
>>> transformer = Transformations([['a', 'b'], ['a', 'b']],
config=['minmax', 'zscore'])
>>> x1 = np.arange(50).reshape(25, 2)
>>> x2 = np.arange(50, 100).reshape(25, 2)
>>> x1_ = transformer.fit_transform([x1, x2])
>>> _x1 = transformer.inverse_transform(x1_)
We can also do more complicated stuff as following
>>> transformer = Transformations({'x1': ['a', 'b'], 'x2': ['a', 'b']},
config={'x1': ['minmax', 'zscore'],
'x2': [{'method': 'log', 'features': ['a', 'b']},
{'method': 'robust', 'features': ['a', 'b']}]
})
>>> x1 = np.arange(20).reshape(10, 2)
>>> x2 = np.arange(100, 120).reshape(10, 2)
>>> x = {'x1': x1, 'x2': x2}
>>> x_ = transformer.fit_transform(x)
>>> _x = transformer.inverse_transform(x_)
In above example we apply `minmax` and `zscore` transformations on x1
and `log` and `robust` transformations on x2 array
"""
def __init__(
self,
feature_names: Union[list, dict],
config: Union[str, list, dict] = None,
):
"""
Arguments:
feature_names:
names of features in data
config:
Determines the type of transformation to be applied on data.
It can be one of the following types
- `string` when you want to apply single transformation
```python
>>> config='minmax'
```
- `dict`: to pass additional arguments to the [Transformation][ai4water.preprocessing.Transformation]
class
```python
>>> config = {"method": 'log', 'treat_negatives': True, 'features': ['features']}
```
- `list` when we want to apply multiple transformations
```python
>>> ['minmax', 'zscore']
```
or
```python
>>> [{"method": 'log', 'treat_negatives': True, 'features': ['features']},
>>> {'method': 'sqrt', 'treat_negatives': True}]
```
"""
self.names = feature_names
self.t_config = config
def _fetch_transformation(self, data):
config = self.t_config
if isinstance(data, list):
if isinstance(config, str):
config = [config for _ in range(len(data))]
elif isinstance(data, dict):
if isinstance(config, str):
config = {k:config for k in data.keys()}
return config
def _check_features(self):
if self.is_numpy_:
assert isinstance(self.names, list), f"""
feature_names are of type {type(self.names)}"""
elif self.is_list_:
for n in self.names:
assert isinstance(n, list), f"""
feature_names {type(n)} don't match data"""
elif self.is_dict_:
assert isinstance(self.names, dict), f"""
feature_names are of type {type(self.names)}"""
for src_name, n in self.names.items():
assert n.__class__.__name__ in ["ListWrapper", 'list']
return
def fit_transform(self, data:Union[np.ndarray, List, Dict]):
"""Transforms the data according the the `config`.
Arguments:
data:
The data on which to apply transformations. It can be one of following
- a (2d or 3d) numpy array
- a list of numpy arrays
- a dictionary of numpy arrays
Returns:
The transformed data which has same type and dimensions as the input data
"""
setattr(self, 'is_numpy_', False)
setattr(self, 'is_list_', False)
setattr(self, 'is_dict_', False)
setattr(self, 'scalers_', {})
if self.t_config is None: # if no transformation then just return the data as it is
return data
orignal_data_type = data.__class__.__name__
if isinstance(data, np.ndarray):
setattr(self, 'is_numpy_', True)
elif isinstance(data, list):
setattr(self, 'is_list_', True)
elif isinstance(data, dict):
setattr(self, 'is_dict_', True)
else:
raise ValueError(f"invalid data of type {data.__class__.__name__}")
# first check that data matches config
self._check_features()
# then apply transformation
data = self._fit_transform(data)
# now pack it in original form
assert data.__class__.__name__ == orignal_data_type, f"""
type changed from {orignal_data_type} to {data.__class__.__name__}
"""
#self._assert_same_dim(self, orignal_data, data)
return data
def _transform_2d(self, data, columns, transformation=None, key="5"):
"""performs transformation on single data 2D source"""
# it is better to make a copy here because all the operations on data happen after this.
data = data.copy()
scalers = {}
if transformation:
if isinstance(transformation, dict):
transformer = Transformation(**transformation)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
scalers[key] = transformer.config()
# we want to apply multiple transformations
elif isinstance(transformation, list):
for idx, trans in enumerate(transformation):
if isinstance(trans, str):
transformer = Transformation(method=trans)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
scalers[f'{key}_{trans}_{idx}'] = transformer.config()
elif trans['method'] is not None:
transformer = Transformation(**trans)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
scalers[f'{key}_{trans["method"]}_{idx}'] = transformer.config()
else:
assert isinstance(transformation, str)
transformer = Transformation(method=transformation)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
scalers[key] = transformer.config()
data = data.values
self.scalers_.update(scalers)
return data
def __fit_transform(self, data, feature_names, transformation=None, key="5"):
"""performs transformation on single data source
In case of 3d array, the shape is supposed to be following
(num_examples, time_steps, num_features)
Therefore, each time_step is extracted and transfomred individually
for example with time_steps of 2, two 2d arrays will be extracted and
transformed individually
(num_examples, 0,num_features), (num_examples, 1, num_features)
"""
if data.ndim == 3:
_data = np.full(data.shape, np.nan)
for time_step in range(data.shape[1]):
_data[:, time_step] = self._transform_2d(data[:, time_step],
feature_names,
transformation,
key=f"{key}_{time_step}")
else:
_data = self._transform_2d(data, feature_names, transformation, key=key)
return _data
def _fit_transform(self, data, key="5"):
"""performs transformation on every data source in data"""
transformation = self._fetch_transformation(data)
if self.is_numpy_:
_data = self.__fit_transform(data, self.names, transformation, key)
elif self.is_list_:
_data = []
for idx, array in enumerate(data):
_data.append(self.__fit_transform(array,
self.names[idx],
transformation[idx],
key=f"{key}_{idx}")
)
else:
_data = {}
for src_name, array in data.items():
_data[src_name] = self.__fit_transform(array,
self.names[src_name],
transformation[src_name],
f"{key}_{src_name}")
return _data
def inverse_transform(self, data):
"""inverse transforms data where data can be dictionary, list or numpy
array.
Arguments:
data:
the data which is to be inverse transformed. The output of
`fit_transform` method.
Returns:
The original data which was given to `fit_transform` method.
"""
if not hasattr(self, 'scalers_'):
raise ValueError(f"Transformations class has not been fitted yet")
return self._inverse_transform(data)
def _inverse_transform(self, data, key="5"):
transformation = self._fetch_transformation(data)
if self.is_numpy_:
data = self.__inverse_transform(data, self.names, transformation, key)
elif self.is_list_:
assert isinstance(data, list)
_data = []
for idx, src in enumerate(data):
__data = self.__inverse_transform(src,
self.names[idx],
transformation[idx],
f'{key}_{idx}')
_data.append(__data)
data = _data
elif self.is_dict_:
assert isinstance(data, dict)
_data = {}
for src_name, src in data.items():
_data[src_name] = self.__inverse_transform(src,
self.names[src_name],
transformation[src_name],
f'{key}_{src_name}')
data = _data
return data
def __inverse_transform(self, data, feature_names, transformation, key="5"):
"""inverse transforms one data source which may 2d or 3d nd array"""
if data.ndim == 3:
_data = np.full(data.shape, np.nan)
for time_step in range(data.shape[1]):
_data[:, time_step] = self._inverse_transform_2d(data[:, time_step],
columns=feature_names,
transformation=transformation,
key=f"{key}_{time_step}")
else:
_data = self._inverse_transform_2d(data, feature_names, key, transformation)
return _data
def _inverse_transform_2d(self, data, columns, key, transformation)->np.ndarray:
"""inverse transforms one 2d array"""
data = pd.DataFrame(data.copy(), columns=columns)
if transformation is not None:
if isinstance(transformation, str):
if key not in self.scalers_:
raise ValueError(f"""
key `{key}` for inverse transformation not found. Available keys are {list(self.scalers_.keys())}""")
scaler = self.scalers_[key]
scaler, shape = scaler, scaler['shape']
original_shape = data.shape
transformer = Transformation.from_config(scaler)
transformed_data = transformer.inverse_transform(data)
data = transformed_data
elif isinstance(transformation, list):
# idx and trans both in reverse form
for idx, trans in reversed(list(enumerate(transformation))):
if isinstance(trans, str):
scaler = self.scalers_[f'{key}_{trans}_{idx}']
scaler, shape = scaler, scaler['shape']
transformer = Transformation.from_config(scaler)
data = transformer.inverse_transform(data=data)
elif trans['method'] is not None:
features = trans.get('features', columns)
# if any of the feature in data was transformed
if any([True if f in data else False for f in features]):
orig_cols = data.columns # copy teh columns in the original df
scaler = self.scalers_[f'{key}_{trans["method"]}_{idx}']
scaler, shape = scaler, scaler['shape']
data, dummy_features = conform_shape(data, shape, features) # get data to transform
transformer = Transformation.from_config(scaler)
transformed_data = transformer.inverse_transform(data=data)
data = transformed_data[orig_cols] # remove the dummy data
elif isinstance(transformation, dict):
features = transformation.get('features', columns)
if any([True if f in data else False for f in features]):
orig_cols = data.columns
scaler = self.scalers_[key]
scaler, shape = scaler, scaler['shape']
data, dummy_features = conform_shape(data, shape, features=features)
transformer = Transformation.from_config(scaler)
transformed_data = transformer.inverse_transform(data=data)
data = transformed_data[orig_cols] # remove the dummy data
if data.__class__.__name__ == "DataFrame":
data = data.values # there is no need to return DataFrame
return data
def config(self)->dict:
"""returns a python dictionary which can be used to construct this class
in fitted form i.e as if the fit_transform method has already been applied.
Returns:
a dictionary from which `Transformations` class can be constructed
"""
return {
'scalers_': jsonize(self.scalers_),
"feature_names": self.names,
"config": self.t_config,
"is_numpy_": self.is_numpy_,
"is_dict_": self.is_dict_,
"is_list_": self.is_list_,
}
@classmethod
def from_config(cls, config:dict)->"Transformations":
"""constructs the Transformations class which may has already been fitted.
"""
config = deepcopy_dict_without_clone(config)
transformer = cls(config.pop('feature_names'), config.pop('config'))
for attr_name, attr_val in config.items():
setattr(cls, attr_name, attr_val)
return transformer
def conform_shape(data, shape, features=None):
# if the difference is of only 1 dim, we resolve it
if data.ndim > len(shape):
data = np.squeeze(data, axis=-1)
elif data.ndim < len(shape):
data = np.expand_dims(data, axis=-1)
assert data.ndim == len(shape), f"""original data had {len(shape)} wihle the
new data has {data.ndim} dimensions"""
# how manu dummy features we have to add to match the shape
dummy_features = shape[-1] - data.shape[-1]
if data.__class__.__name__ in ['DataFrame', 'Series']:
# we know what features must be in data, so put them in data one by one
# if they do not exist in data already
if features:
for f in features:
if f not in data:
data[f] = np.random.random(len(data))
# identify how many features to be added by shape information
elif dummy_features > 0:
dummy_data = pd.DataFrame(np.random.random((len(data), dummy_features)))
data = pd.concat([dummy_data, data], axis=1)
else:
dummy_data = np.random.random((len(data), dummy_features))
data = np.concatenate([dummy_data, data], axis=1)
return data, dummy_features
| 42.252358
| 121
| 0.547251
| 16,459
| 0.918727
| 0
| 0
| 422
| 0.023556
| 0
| 0
| 7,008
| 0.391181
|
3dfbbd5b64a3c6157f0b5de85518ecc1e0323285
| 3,684
|
py
|
Python
|
main/tagcn_training.py
|
Stanislas0/KDD_CUP_2020_MLTrack2_SPEIT
|
7362104002225055715f05ccfc5ee8f6ef433d50
|
[
"Apache-2.0"
] | 18
|
2020-09-10T06:48:22.000Z
|
2022-01-25T18:22:52.000Z
|
main/tagcn_training.py
|
Stanislas0/KDD_CUP_2020_MLTrack2_SPEIT
|
7362104002225055715f05ccfc5ee8f6ef433d50
|
[
"Apache-2.0"
] | null | null | null |
main/tagcn_training.py
|
Stanislas0/KDD_CUP_2020_MLTrack2_SPEIT
|
7362104002225055715f05ccfc5ee8f6ef433d50
|
[
"Apache-2.0"
] | null | null | null |
import os
import dgl
import time
import argparse
import numpy as np
import torch as th
import distutils.util
import torch.nn.functional as F
import utils
import models
import data_loader
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
dev = th.device('cuda' if th.cuda.is_available() else 'cpu')
if __name__ == '__main__':
argparser = argparse.ArgumentParser("training")
argparser.add_argument('--adj-path', type=str, default='../data/adj_matrix_formal_stage.pkl')
argparser.add_argument('--feat-path', type=str, default='../data/feature_formal_stage.npy')
argparser.add_argument('--label-path', type=str, default='../data/train_labels_formal_stage.npy')
argparser.add_argument('--output-dir', type=str, default='./saved_models/')
argparser.add_argument('--output-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--if-load-model', type=lambda x: bool(distutils.util.strtobool(x)), default=False)
argparser.add_argument('--model-dir', type=str, default='./saved_models/')
argparser.add_argument('--model-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--num-epochs', type=int, default=5000)
argparser.add_argument('--num-hidden', type=int, default=128)
argparser.add_argument('--num-layers', type=int, default=3)
argparser.add_argument('--lr', type=float, default=0.001)
argparser.add_argument('--dropout', type=float, default=0.1)
argparser.add_argument('--adj-norm', type=lambda x: bool(distutils.util.strtobool(x)), default=True)
argparser.add_argument('--feat-norm', type=str, default=None)
args = argparser.parse_args()
print(vars(args))
dataset = data_loader.KddDataset(args.adj_path, args.feat_path, args.label_path, indices)
adj = dataset.adj
features = dataset.features
labels = dataset.labels
train_mask = dataset.train_mask
val_mask = dataset.val_mask
test_mask = dataset.test_mask
size_raw = features.shape[0]
size_reduced = size_raw - 50000
graph = dgl.DGLGraph()
if args.adj_norm:
adj = utils.adj_preprocess(adj)
feat_norm_func = utils.feat_norm(args.feat_norm)
graph.from_scipy_sparse_matrix(adj)
features = th.FloatTensor(features).to(dev)
features[th.where(features < -1.0)[0]] = 0
features[th.where(features > 1.0)[0]] = 0
features = feat_norm_func(features)
labels = th.LongTensor(labels).to(dev)
graph.ndata['features'] = features
model = models.TAGCN(100, args.num_hidden, 20, args.num_layers, activation=F.leaky_relu, dropout=args.dropout)
if args.if_load_model:
model_states = th.load(os.path.join(args.model_dir, args.model_name), map_location=dev)
model.load_state_dict(model_states)
model = model.to(dev)
optimizer = th.optim.Adam(model.parameters(), lr=args.lr)
dur = []
for epoch in range(args.num_epochs):
t0 = time.time()
logits = model(graph, features).to(dev)
logp = F.log_softmax(logits, 1)[:size_reduced]
loss = F.nll_loss(logp[train_mask], labels[train_mask]).to(dev)
optimizer.zero_grad()
loss.backward()
optimizer.step()
dur.append(time.time() - t0)
if epoch % 10 == 0:
train_acc = utils.compute_acc(logp, labels, train_mask)
val_acc = utils.compute_acc(logp, labels, val_mask)
print('Epoch {:05d} | Loss {:.4f} | Train Acc {:.4f} | Val Acc {:.4f} '
'| Time(s) {:.4f} | GPU {:.1f} MiB'.format(
epoch, loss, train_acc, val_acc, np.mean(dur), th.cuda.max_memory_allocated() / 1000000))
th.save(model.state_dict(), os.path.join(args.output_dir, args.output_name))
| 41.393258
| 114
| 0.683496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 540
| 0.14658
|
3dfd83b71400b6e832cb757945e612ae86e6bd4c
| 27,127
|
py
|
Python
|
AltFS.py
|
g-mc/AltFS
|
4d83a928cb1f1ec127e9565b578779ec4e659dae
|
[
"BSD-3-Clause"
] | 54
|
2019-02-27T15:57:27.000Z
|
2021-10-10T21:51:50.000Z
|
AltFS.py
|
g-mc/AltFS
|
4d83a928cb1f1ec127e9565b578779ec4e659dae
|
[
"BSD-3-Clause"
] | null | null | null |
AltFS.py
|
g-mc/AltFS
|
4d83a928cb1f1ec127e9565b578779ec4e659dae
|
[
"BSD-3-Clause"
] | 11
|
2019-03-01T19:07:25.000Z
|
2020-12-03T14:56:44.000Z
|
#!/usr/bin/env python
"""
BSD 3-Clause License
Copyright (c) 2017, SafeBreach Labs
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Alternate Fileless File System
References:
Author: Dor Azouri <dor.azouri@safebreach.com>
Date: 2019-01-01
"""
import logging
import types
from exceptions_ import BucketValueMissingException, \
EndOfFileReachedException, \
FileIsClosedException, \
FileNotFoundException, \
InternalStorageOperationException, \
UnsupportedProviderException
from model.block import Block
from model.descriptor import Descriptor
import providers
from providers.common.calculations import \
calculate_bits_sum, \
calculate_next_available_index, \
split_string_by_max_size
from providers.common.machine_identification import \
get_machine_identification_string
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class AltFS(object):
"""
Alternate Fileless File System.
Base class for all usages
"""
class File(object):
"""Provides a file-descriptor-like interface for AltFS files"""
def __init__(self, altfs, name):
"""File constructor"""
self._altfs = altfs
self._pointer = 0
self.name = name
self._closed = False
def _decorator(func):
def check_closed(*args):
self = args[0]
if self._closed:
raise FileIsClosedException
return func(*args)
return check_closed
def _set_open(self):
"""Explicitly sets the file status to OPEN"""
self._closed = False
@_decorator
def get_pointer(self):
"""Returns the current pointer offset in file"""
return self._pointer
@_decorator
def set_pointer(self, position):
"""Sets the pointer offset in file"""
if position >= self.size():
self._pointer = self.size()
elif position < 0:
self._pointer = 0
else:
self._pointer = position
@_decorator
def write(self, data):
"""Writes the given data to file, starting from the pointer"""
self._altfs.write_file(self.name, self._pointer, data)
@_decorator
def read(self, size):
"""
Returns data from file.
starting from the pointer, and until the given size (in bytes)
"""
return self._altfs.read_file(self.name, self._pointer, size)
@_decorator
def seek(self, offset):
"""
Move the file pointer by the given offset.
Offset may be negative
"""
self._pointer += offset
@_decorator
def delete(self):
"""
Deletes the file.
The instance is not deleted, but set to CLOSED
"""
self._closed = True
self._altfs.delete_file(self.name)
@_decorator
def close(self):
"""Closes the file"""
self._closed = True
@_decorator
def size(self):
"""Returns the file size"""
return self._altfs.get_size(self.name)
def __str__(self):
"""Returns the string representation of the file instance"""
return "<File: name: %s, status: %s, pointer: %s, size: %s>" % \
(self.name, "CLOSED" if self._closed else "OPEN",
self._pointer, self._altfs.get_size(self.name))
def __init__(self, storage_provider_name, machine_identification_method,
max_block_size, **kwargs):
"""
Constructor for a new AltFS.
A new AltFS instance is created, given the storage provider name,
the machine identification method name,
and the desired maximal block size for that AltFS.
Note:
* Provider must reside in /providers and implement StorageProvider
* Machine identification method name should be implemented in
/providers/common/machine_identification.py and exported through
the global METHODS dictionary.
"""
logger.debug("initializing AltFS with storage provider: %s, " +
"machine identification method: %s" %
storage_provider_name, machine_identification_method)
# calculate checksum of machine identification string, used for
# calculating the bucket index of the first file system block
machine_identification_string = get_machine_identification_string(
machine_identification_method)
self._set_machine_id_checksum(machine_identification_string)
# initialize desired provider
self._storage_providers = AltFS._load_providers()
if storage_provider_name not in self._storage_providers:
raise UnsupportedProviderException(storage_provider_name)
self._storage_provider = \
self._storage_providers[storage_provider_name](
machine_identification_string, **kwargs)
# set the buckets count, used for the modulus hash function
self._buckets_count = self._storage_provider.get_buckets_count()
# set the first bucket ID, used for the fs descriptor (superblock)
self._first_bucket_id = \
self._machine_id_checksum % self._buckets_count
# set the max data block size
self.max_block_size = max_block_size
# log calculated initialization info
logger.info("INIT:number of buckets (=divider): %s" %
self._buckets_count)
logger.info("INIT:machine identification string: %s" %
machine_identification_string)
logger.info("INIT:machine identification checksum: %s" %
self._machine_id_checksum)
logger.info("INIT:first bucket ID: %s" %
self._first_bucket_id)
# iterate all buckets in storage to fill the blocks mapping
self._load_blocks_dict()
# load the descriptor superblock/create fresh if it does not exist
self._load_descriptor()
# mapping of open files (volatile runtime File instances)
self.files = {}
def _set_machine_id_checksum(self, machine_identification_string):
"""Sets the calculated checksum of the machine identification string"""
self._machine_id_checksum = calculate_bits_sum(
machine_identification_string)
@staticmethod
def _load_providers():
"""
Loads the available providers.
Iterates provider modules in package, to dynamically obtain a list of
available storage providers' names, and picks only the ones that
implement the StorageProvider base class.
"""
storage_providers = {}
for symbol_name in dir(providers):
symbol = getattr(providers, symbol_name)
if not isinstance(symbol, (type, types.ClassType)):
continue
# fill only providers, i.e classes that derive from StorageProvider
if issubclass(symbol, providers.StorageProvider) and \
symbol != providers.StorageProvider:
storage_providers[symbol_name] = symbol
return storage_providers
def _load_blocks_dict(self):
"""
Fills the mapping of {block_id : (bucket_id, value_id)}.
Iterates through all values in all buckets in storage. Determining
which of the iterated values are part of the virtual FS is
provider-dependent.
Note: the filling policy is naive - any exception in the storage layer
is ignored, and iteration continues to next bucket.
"""
self._blocks_dict = {}
for bucket_id in xrange(self._buckets_count):
try:
values = self._storage_provider.get_value_ids_in_bucket(
bucket_id)
except Exception as e:
logger.error(e, exc_info=True)
continue
for value_id in values:
block = self._get_block(bucket_id, value_id)
self._blocks_dict[block.block_id] = (bucket_id, value_id)
def _load_descriptor(self):
"""
Loads the descriptor instance from the superblock.
Creates an empty descriptor if such block does not exist,
and writes it to storage.
"""
self._descriptor = Descriptor()
try: # try load the existing descriptor from superblock
first_block_data = self._storage_provider.get_block(
self._first_bucket_id, 0)
block = Block.generate_block_from_packed_str(first_block_data)
self._descriptor.__dict__ = block.data
except BucketValueMissingException: # superblock does not exist
logger.error("superblock does not exist. Creating a new empty one")
# create an empty descriptor and write it as a superblock (id=0)
self._write_block(
self._first_bucket_id, 0, self._generate_descriptor_block())
def _get_next_available_block_id(self, count=1, blacklist=None):
"""
Returns the next _count_ available block IDs.
Considering the IDs given in the blacklist parameter.
The next ID is the lowest available (re-use)
"""
if blacklist is None:
blacklist = []
ids = []
existing_ids = self._blocks_dict.keys() + blacklist
for i in xrange(count):
id_ = calculate_next_available_index(existing_ids)
ids.append(id_)
existing_ids.append(id_)
if count == 1:
return ids[0]
return ids
def _get_block(self, bucket_id, value_id):
"""
Loads the block the data from the desired value.
Returns it as aBlock instance.
Raises InternalStorageOperationException if provider has failed to read
"""
try:
block = Block.generate_block_from_packed_str(
self._storage_provider.get_block(bucket_id, value_id))
except Exception as e:
logger.error("reading of block at (%s:%s) has failed: %s" %
(bucket_id, value_id, str(e)))
raise InternalStorageOperationException(
InternalStorageOperationException.OPERATION_READ, str(e))
logger.debug("a block was read at (%s:%s):%s" %
(bucket_id, value_id, block.__dict__))
return block
def _get_block_by_id(self, block_id):
"""Returns a Block instance of the desired block ID."""
return self._get_block(*self._blocks_dict[block_id])
def _generate_data_termination_block(self, data="", block_id=None):
"""
Returns a Block instance to be used as the last data block of a file.
It closes the chain of data blocks by pointing to the superblock as
next block.
"""
new_block_id = block_id if block_id is not None else \
self._get_next_available_block_id()
return Block(block_id=new_block_id,
block_type=Block.TYPE_DATA,
data_length=len(data),
next_block_id=0,
data=data)
def _generate_descriptor_block(self):
"""
Returns a Block instance of type TYPE_DESCRIPTOR.
The current descriptor object is saved to it.
Note: The next block ID field is redundant so it's given a constant 1.
"""
return Block(block_id=0,
block_type=Block.TYPE_DESCRIPTOR,
data_length=len(self._descriptor.serialize()),
next_block_id=1,
data=self._descriptor.__dict__)
def _write_block(self, bucket_id, value_id, block):
"""
Writes the given Block instance to the given value_id.
Overrides the existing one.
Returns the value ID to which the block was written.
Note: if the given value ID is None, a new value is created in the
given bucket, and the block is written to it
Raises InternalStorageOperationException if provider failed to write
"""
logger.debug("writing block at (%s:%s):%s" %
(bucket_id, value_id, block.__dict__))
try:
value_id = self._storage_provider.write_block(
bucket_id, value_id, data=block.serialize())
except Exception as e:
logger.error("writing of block (id:%s) to (%s:%s) has failed: %s" %
(block.block_id, bucket_id, value_id, str(e)))
raise InternalStorageOperationException(
InternalStorageOperationException.OPERATION_WRITE, str(e))
# add the new block mapping
self._blocks_dict[block.block_id] = (bucket_id, value_id)
return value_id
def _get_blocks_generator(self, start_block_id):
"""
A generator for blocks in a linked chain.
Starting from the given block ID, ending in the data termination block.
"""
bucket_id, value_id = self._blocks_dict[start_block_id]
while True:
block = self._get_block(bucket_id, value_id)
if block.block_id == 0:
break
yield block
bucket_id, value_id = self._blocks_dict[block.next_block_id]
def _delete_value(self, bucket_id, value_id):
"""
Deletes the value in the given bucket and value IDs.
Raises InternalStorageOperationException if provider failed to delete
"""
block = self._get_block(bucket_id, value_id)
logger.debug("deleting block ID %s (%s:%s)" %
(block.block_id, bucket_id, value_id))
try:
self._storage_provider.delete_block(bucket_id, value_id)
except Exception as e:
logger.error(
"deleting of block (id:%s) to (%s:%s) has failed: %s" %
(block.block_id, bucket_id, value_id, str(e)))
raise InternalStorageOperationException(
InternalStorageOperationException.OPERATION_DELETE, str(e))
# remove the mapping of the deleted block
del self._blocks_dict[block.block_id]
def _delete_data_blocks(self, start_block_id, until_block_id=None):
"""
Delete a chain of linked blocks.
Starting from the given block ID, ending in the data termination block.
"""
for block in list(self._get_blocks_generator(start_block_id)):
if until_block_id is not None and block.block_id == until_block_id:
break
bucket_id, value_id = self._blocks_dict[block.block_id]
self._delete_value(bucket_id, value_id)
def _get_block_by_file_offset(self, file_name, offset):
"""
Returns a tuple of: (block, offset inside block).
The block is the one in which the given offset is located
"""
start_block_id = self._descriptor.files_dict[file_name]
position = 0
for block in self._get_blocks_generator(start_block_id):
if position <= offset <= position + block.data_length:
return block, offset - position
position += block.data_length
raise EndOfFileReachedException(
"The given offset exceeds the file size")
def _create_data_blocks(self, data, terminating_at=None):
"""
Writes a chain of data blocks to hold the given data.
Optional terminating_at parameter defines the next_block_id of the last
data block in the chain. If omitted, the chain ends at the superblock.
"""
if len(data) == 0:
return []
chunks = list(split_string_by_max_size(data, self.max_block_size))
new_block_ids = self._get_next_available_block_id(count=len(chunks))
if isinstance(new_block_ids, int):
new_block_ids = [new_block_ids]
if terminating_at:
new_block_ids.append(terminating_at)
else:
new_block_ids.append(self._get_next_available_block_id(
count=1, blacklist=new_block_ids))
chunk = ""
for chunk_id, chunk in zip(range(len(chunks)), chunks):
new_block = Block(block_id=new_block_ids[chunk_id],
block_type=Block.TYPE_DATA,
data_length=len(chunk),
next_block_id=new_block_ids[chunk_id + 1],
data=chunk)
bucket_id = calculate_bits_sum(chunk) % self._buckets_count
self._write_block(bucket_id, None, new_block)
if not terminating_at:
new_block = self._generate_data_termination_block(
block_id=new_block_ids[-1])
bucket_id = calculate_bits_sum(chunk) % self._buckets_count
self._write_block(bucket_id, None, new_block)
return new_block_ids
def _update_block(self, block_id, **kwargs):
logging.debug("updating block (id=%s) with kwargs:%s" %
(block_id, str(kwargs)))
block = self._get_block_by_id(block_id)
bucket_id, value_id = self._blocks_dict[block.block_id]
for k, v in kwargs.iteritems():
setattr(block, k, v)
self._write_block(bucket_id, value_id, block)
def create_file(self, file_name):
"""
Returns a File object of the given name.
Note:
* If a file with that name already exists, it's corresponding File
instance is returned.
* Otherwise, the required data blocks are written to storage, and a
corresponding File instance is returned.
"""
if file_name in self.files:
# in case a File object already exists - return it
self.files[file_name]._set_open()
self.files[file_name].set_pointer(0)
return self.files[file_name]
if file_name not in self._descriptor.files_dict:
# in case file doesn't exist in storage - create it.
# creating a new empty file means adding only a single data
# termination block, as there are no actual data blocks yet
block = self._generate_data_termination_block()
# adding the required mapping needed in the descriptor:
self._descriptor.add_file(file_name, block.block_id)
# flushing the new descriptor after update, into storage
self._write_block(
self._first_bucket_id, 0, self._generate_descriptor_block())
# calculate the target bucket ID for the new block, by applying the
# hash function on the file name
target_bucket_id = calculate_bits_sum(
file_name) % self._buckets_count
# write the data termination block to the calculated bucket,
# creating a new value by passing None as the value ID
value_id = self._write_block(
target_bucket_id, None, block)
# add the new block mapping
self._blocks_dict[block.block_id] = (
target_bucket_id, value_id)
# in case the file exists in storage, a new File instance is created.
# We also do it in case we have just created the actual file in storage
self.files[file_name] = AltFS.File(self, file_name)
return self.files[file_name]
def delete_file(self, file_name):
"""
Deletes a file.
Results in:
* Deletion of all of the file's data blocks, including the data
termination block.
* Removal of the mappings of the file's blocks
* Deletion of the corresponding File instance.
* Removal of the file mapping from the descriptor
(updates the descriptor superblock in storage)
"""
if file_name not in self._descriptor.files_dict:
raise FileNotFoundException("Cannot delete a non-existent file")
block_id = self._descriptor.files_dict[file_name]
self._delete_data_blocks(block_id)
del self.files[file_name]
# remove the file from the descriptor object
self._descriptor.remove_file(file_name)
# write the update descriptor to storage
self._write_block(
self._first_bucket_id, 0, self._generate_descriptor_block())
def read_file(self, file_name, start, size):
"""
Returns the data from file given its name.
Starting from given offset and up to a maximum given size.
"""
if file_name not in self._descriptor.files_dict:
raise FileNotFoundException()
data = ""
position = 0
first_file_block = self._descriptor.files_dict[file_name]
for block in self._get_blocks_generator(first_file_block):
data += block.data
position += block.data_length
if position >= start + size:
break
self.files[file_name].set_pointer(start + size)
return data[start:start + size]
def write_file(self, file_name, start, data):
"""
Writes the given data to file given its name.
Starting from given offset.
"""
# in case data to write exceeds the current file size, create new
# blocks that will be later linked with the block where start is
# located. The overall result in this case should look like this:
# [current blocks...] -> [fork block] -> [new blocks] -> [superblock]
if start + len(data) >= self.get_size(file_name):
# create the data blocks, still not linked, hanging in the air
new_block_ids = self._create_data_blocks(data)
# the new blocks anyway exceed the current file size, so no need to
# connect the end of the new chain back to a current block
until_block_id = None
# calculate the the starting block out of the current file blocks,
# that will link to the new blocks
fork_block, offset_in_fork_block = self._get_block_by_file_offset(
file_name, start)
# in case data to write fits into the file size, we need to link the
# new data blocks from both sides. Eventually, it should look like
# this:
# [current blocks...] -> [fork block] -> [new blocks] ->
# [merging block] -> [current blocks...] -> [superblock]
else:
# calculate the block to which the new blocks end should link to
merging_block, offset_in_merging_block = \
self._get_block_by_file_offset(
file_name, start + len(data) - 1)
# calculate the the starting block out of the current file blocks,
# that will link to the new blocks
fork_block, offset_in_fork_block = self._get_block_by_file_offset(
file_name, start)
# handle edge case where the fork and merging blocks are the same.
# in this case, we just need to override that block's data
if fork_block.block_id == merging_block.block_id:
new_data = fork_block.data[:offset_in_fork_block] + \
data + fork_block.data[offset_in_fork_block + len(data):]
self._update_block(fork_block.block_id,
data=new_data, data_length=len(new_data))
self.files[file_name].set_pointer(start + len(data))
return
# in the general case, we create new data blocks to be connected as
# described, and cut data from the merging block, as the new data
# length demands
else:
new_block_ids = self._create_data_blocks(
data, terminating_at=merging_block.block_id)
if offset_in_merging_block < merging_block.data_length:
new_data = merging_block.data[offset_in_merging_block:]
self._update_block(
merging_block.block_id,
data=new_data,
data_length=len(new_data))
until_block_id = merging_block.block_id
# cut the data in the fork block, as the start offset demands
if offset_in_fork_block < fork_block.data_length:
new_data = fork_block.data[:offset_in_fork_block]
self._update_block(fork_block.block_id,
data=new_data, data_length=len(new_data))
# delete the current blocks, starting from the fork block and ending at
# the merging block/super block (depends on the above case - each case
# sets the until_block_id value accordingly)
self._delete_data_blocks(
fork_block.next_block_id, until_block_id=until_block_id)
self._update_block(fork_block.block_id, next_block_id=new_block_ids[0])
self.files[file_name].set_pointer(start + len(data))
return
def get_file_names(self):
"""Returns the names of all files currently in storage"""
return self._descriptor.get_file_names()
def get_size(self, file_name):
"""Returns the size of file, given its name"""
file_size = 0
first_file_block = self._descriptor.files_dict[file_name]
for block in self._get_blocks_generator(first_file_block):
file_size += block.data_length
return file_size
| 41.798151
| 79
| 0.629742
| 24,823
| 0.915066
| 499
| 0.018395
| 2,370
| 0.087367
| 0
| 0
| 10,749
| 0.396247
|
3dfe1030cd691567d0eb0ceab815ccdf039f3393
| 269
|
py
|
Python
|
python-crypt-service/services/dbservice.py
|
Shirish-Singh/crypt-analysis
|
eed6d00925389ee0973733e6b7397cd460f97f99
|
[
"Apache-2.0"
] | null | null | null |
python-crypt-service/services/dbservice.py
|
Shirish-Singh/crypt-analysis
|
eed6d00925389ee0973733e6b7397cd460f97f99
|
[
"Apache-2.0"
] | null | null | null |
python-crypt-service/services/dbservice.py
|
Shirish-Singh/crypt-analysis
|
eed6d00925389ee0973733e6b7397cd460f97f99
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from configurations import configuration
from pymongo import MongoClient
MONGO_HOST= configuration.MONGO_HOST
client = MongoClient(MONGO_HOST)
class DBConnection():
def getConnection(self):
return client.analyticsDB
| 20.692308
| 40
| 0.814126
| 84
| 0.312268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3dffaaba0f49d4e4bcf7fb58f40e51bc3b413470
| 448
|
py
|
Python
|
simple_amqp_rpc/data.py
|
rudineirk/py-simple-amqp-rpc
|
823b6efe271732495d4e3ccdcb9f4d85138c1d42
|
[
"MIT"
] | null | null | null |
simple_amqp_rpc/data.py
|
rudineirk/py-simple-amqp-rpc
|
823b6efe271732495d4e3ccdcb9f4d85138c1d42
|
[
"MIT"
] | 1
|
2021-06-01T22:28:43.000Z
|
2021-06-01T22:28:43.000Z
|
simple_amqp_rpc/data.py
|
rudineirk/py-simple-amqp-rpc
|
823b6efe271732495d4e3ccdcb9f4d85138c1d42
|
[
"MIT"
] | null | null | null |
from typing import Any, List
from dataclasses import dataclass, replace
from .consts import OK
class Data:
def replace(self, **kwargs):
return replace(self, **kwargs)
@dataclass(frozen=True)
class RpcCall(Data):
route: str
service: str
method: str
args: List[Any]
@dataclass(frozen=True)
class RpcResp(Data):
status: int
body: Any = None
@property
def ok(self):
return self.status == OK
| 15.448276
| 42
| 0.654018
| 294
| 0.65625
| 0
| 0
| 259
| 0.578125
| 0
| 0
| 0
| 0
|
ad000563b867048b766de0b54cb60801221e67a0
| 598
|
py
|
Python
|
fileparse/python/main.py
|
mlavergn/benchmarks
|
4663009772c71d7c94bcd13eec542d1ce33cef72
|
[
"Unlicense"
] | null | null | null |
fileparse/python/main.py
|
mlavergn/benchmarks
|
4663009772c71d7c94bcd13eec542d1ce33cef72
|
[
"Unlicense"
] | null | null | null |
fileparse/python/main.py
|
mlavergn/benchmarks
|
4663009772c71d7c94bcd13eec542d1ce33cef72
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
import timeit
setup = '''
import os
def FileTest(path):
file = open(path, "r")
lines = file.readlines()
data = [None for i in range(len(lines))]
i = 0
for line in lines:
data[i] = line.split(',')
j = 0
for field in data[i]:
data[i][j] = field.strip('\\'\\n')
j += 1
i += 1
return data
'''
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - cold")
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - warm")
| 20.62069
| 93
| 0.605351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 421
| 0.704013
|
ad005ad94d7f773d61fa5f1363d44b1d458fd462
| 5,475
|
py
|
Python
|
boris/classification.py
|
fragaria/BorIS
|
9585c83f29220d8f63910dabd98641ab41ace6cf
|
[
"MIT"
] | 1
|
2021-08-10T14:01:26.000Z
|
2021-08-10T14:01:26.000Z
|
boris/classification.py
|
fragaria/BorIS
|
9585c83f29220d8f63910dabd98641ab41ace6cf
|
[
"MIT"
] | 5
|
2018-04-04T14:31:34.000Z
|
2020-06-08T07:50:23.000Z
|
boris/classification.py
|
fragaria/BorIS
|
9585c83f29220d8f63910dabd98641ab41ace6cf
|
[
"MIT"
] | 4
|
2017-02-06T15:38:34.000Z
|
2018-03-21T09:40:12.000Z
|
# -*- coding: utf-8 -*-
'''
Created on 25.9.2011
@author: xaralis
'''
from model_utils import Choices
SEXES = Choices(
(1, 'FEMALE', u'žena'),
(2, 'MALE', u'muž')
)
NATIONALITIES = Choices(
(1, 'CZ', u'Česká republika'),
(2, 'EU', u'Jiné - EU'),
(3, 'NON_EU', u'Jiné - non-EU'),
(4, 'UNKNOWN', u'Neznámo')
)
ETHNIC_ORIGINS = Choices(
(1, 'NON_GYPSY', u'Ne-romská'),
(2, 'GYPSY', u'Romská'),
(3, 'NOT_MONITORED', u'Nesledováno')
)
LIVING_CONDITIONS = Choices(
(1, 'ALONE', u'Sám'),
(2, 'WITH_FAMILY', u'S rodiči/rodinou'),
(3, 'WITH_FRIENDS', u'S přáteli'),
(4, 'WITH_PARTNER', u'S partnerem'),
(5, 'WITH_PARTNER_AND_CHILDREN', u'S partnerem a dítětem'),
(6, 'ALONE_WITH_CHILDREN', u'Sám s dítětem'),
(7, 'UNKNOWN', u'Není známo')
)
ACCOMODATION_TYPES = Choices(
(1, 'WITH_PARENTS', u'Doma (u rodičů)'),
(2, 'OWN_FLAT', u'Vlastní byt (i pronajatý)'),
(3, 'FOREIGN_FLAT', u'Cizí byt'),
(4, 'PUBLIC_ACCOMODATION', u'Ubytovna'),
(5, 'SQUAT', u'Squat'),
(6, 'BARRACKS', u'Kasárna'),
(7, 'HOMELESS', u'Bez domova, na ulici'),
(8, 'UNKNOWN', u'Není známo')
)
EMPLOYMENT_TYPES = Choices(
(1, 'REGULAR', u'Pravidelné zam.'),
(2, 'SCHOOL', u'Škola'),
(3, 'OCCASIONAL_WORK', u'Příležitostná práce'),
(4, 'REGISTERED_ON_EB', u'Registrován na ÚP'),
(5, 'NO_EMPLOYMENT', u'Bez zaměstnání'),
(6, 'STATE_SUPPORT', u'Dávky SZ'),
(8, 'UNKNOWN', u'Není známo')
)
EDUCATION_LEVELS = Choices(
(1, 'BASIC', u'Základní'),
(2, 'PRACTICAL_SECONDARY', u'Vyučen'),
(3, 'SECONDARY', u'Střední s maturitou'),
(4, 'HIGHER_PRACTICAL', u'Vyšší odborné'),
(5, 'UNIVERSITY_GRADE', u'Vysokoškolské'),
(6, 'BASIC_NOT_COMPLETED', u'Neukončené základní'),
(7, 'UNKNOWN', u'Není známo')
)
DRUGS = Choices( # (Numbers reflect the old drug ids.)
(3, 'METHAMPHETAMINE', u'Pervitin, jiné amfetaminy'),
(4, 'SUBUTEX_LEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - legálně'),
(5, 'TOBACCO', u'Tabák'),
(8, 'THC', u'THC'),
(9, 'ECSTASY', u'Extáze'),
(10, 'DESIGNER_DRUGS', u'Designer drugs'),
(11, 'HEROIN', u'Heroin'),
(12, 'BRAUN', u'Braun a jiné opiáty'),
(13, 'RAW_OPIUM', u'Surové opium'),
(14, 'SUBUTEX_ILLEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - ilegálně'),
(16, 'ALCOHOL', u'Alkohol',),
(17, 'INHALER_DRUGS', u'Inhalační látky, ředidla'),
(18, 'MEDICAMENTS', u'Medikamenty'),
(19, 'METHADONE', u'Metadon'),
(20, 'COCAINE', u'Kokain, crack'),
(21, 'SUBOXONE', u'Suboxone'),
(22, 'VENDAL', u'Vendal'),
(23, 'LSD', u'LSD'),
(24, 'PSYLOCIBE', u'Lysohlávky'),
(28, 'FENTANYL', u'Fentanyl'),
(25, 'UNKNOWN', u'Neznámo'),
(26, 'PATHOLOGICAL_GAMBLING', u'Patologické hráčství'),
(27, 'OTHER_NON_SUBSTANCE_ADDICTION', u'Jiná nelátková závislost'),
)
# Disable `application`, `first_try_application` and `primary_drug_usage` fields for these drugs
NON_APPLICATION_DRUGS = ['26', '27']
DRUG_APPLICATION_FREQUENCY = Choices(
(1, 'LESS_THAN_3X_A_MONTH', u'méně než 3x měsíčně'),
(2, 'ONCE_A_WEEK', u'1x týdně'),
(3, 'ON_WEEKENDS', u'víkendově'),
(4, 'EVERY_SECOND_DAY', u'obden'),
(5, 'DAILY', u'denně'),
(6, '2X_3X_A_DAY', u'2-3x denně'),
(7, 'MORE_THAN_3X_A_DAY', u'více než 3x denně'),
(8, 'NONE_FOR_MORE_THAN_6_MONTHS', u'neužita déle než 6 měsíců'),
# (9, 'NONE_FOR_LAST_6_MONTHS', u'neužita posledních 6 měsíců'), # Feature 103
(10, 'NONE_FOR_LAST_3_MONTHS', u'neužita poslední 3 měsíce'),
(11, 'NONE_FOR_LAST_1_MONTH', u'neužita v posledním měsíci'),
(12, 'UNKNOWN', u'Není známo')
)
DRUG_APPLICATION_TYPES = Choices(
(1, 'VEIN_INJECTION', u'injekčně do žíly'),
(2, 'MUSCLE_INJECTION', u'injekčně do svalu'),
(3, 'ORAL', u'ústně'),
(4, 'SNIFFING', u'sniff (šňupání)'),
(5, 'SMOKING', u'kouření'),
(6, 'INHALATION', u'inhalace'),
(7, 'UNKNOWN', u'Není známo')
)
RISKY_BEHAVIOR_KIND = Choices(
(1, 'EQUIPMENT_SHARING', u'Sdílení náčiní'),
(2, 'SEX_WITHOUT_PROTECTION', u'Nechráněný sex'),
(3, 'SYRINGE_SHARING', u'Sdílení jehel'),
(4, 'INTRAVENOUS_APPLICATION', u'Nitrožilní aplikace'),
(5, 'RISKY_APPLICATION', u'Riziková aplikace'),
(6, 'OVERDOSING', u'Předávkování'),
(7, 'HEALTH_COMPLICATIONS', u'Zdravotní komplikace')
)
RISKY_BEHAVIOR_PERIODICITY = Choices(
(1, 'NEVER', u'Nikdy'),
(2, 'ONCE', u'Jednorázově'),
(3, 'OFTEN', u'Opakovaně '),
(4, 'UNKNOWN', u'Není známo')
)
DISEASES = Choices(
(1, 'HIV', u'HIV'),
(2, 'VHA', u'VHA'),
(3, 'VHB', u'VHB'),
(4, 'VHC', u'VHC'),
(5, 'SYFILIS', u'Syfilis'),
)
DISEASE_TEST_RESULTS = Choices(
(0, 'UNKNOWN', u'Neznámo, zda testován'),
(1, 'TESTED_POSITIVE', u'Testován - pozitivní'),
(2, 'TESTED_NEGATIVE', u'Testován - negativní'),
(3, 'TESTED_UNKNOWN', u'Testován - výsledek neznámý'),
(4, 'NOT_TESTED', u'Nikdy netestován'),
(5, 'RESULT_NOT_ACCLAIMED', u'Nevyzvedl výsledek'),
)
DISEASE_TEST_SIGN = Choices(
('p', 'POSITIVE', u'Pozitivní'),
('n', 'NEGATIVE', u'Negativní'),
('r', 'REACTIVE', u'Reaktivní'),
('i', 'INCONCLUSIVE', u'Test neprůkazný')
)
ANONYMOUS_TYPES = Choices(
(1, 'NON_USER', u'neuživatel'),
(2, 'NON_IV', u'neIV'),
(3, 'IV', u'IV'),
(4, 'NON_USER_PARENT', u'rodič'),
(5, 'THC', u'THC')
)
def get_drug_by_id(id):
for drug in DRUGS:
if drug[0] == id:
return drug
| 32.589286
| 96
| 0.606575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,568
| 0.630946
|
9a735bf957ffc30fea6d0bb1fe8f079ce7582eb6
| 23,569
|
py
|
Python
|
extern/face_expression/face_expression/dataset.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | 1
|
2022-02-07T06:12:26.000Z
|
2022-02-07T06:12:26.000Z
|
extern/face_expression/face_expression/dataset.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
extern/face_expression/face_expression/dataset.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import pickle
import h5py
from tqdm import tqdm
import numpy as np
import torch
import cv2
import scipy.spatial
import hydra
from face_expression import utils
from face_expression.third_party.face_mesh_mediapipe import FaceMeshMediaPipe
# class VoxCeleb2FaceDataset(torch.utils.data.Dataset):
# def __init__(
# self,
# h5_path,
# scheme_path,
# image_root,
# return_images=True,
# bbox_scale=2.0,
# image_shape=(256, 256),
# sample_range=None
# ):
# self.h5_path = h5_path
# self.scheme_path = scheme_path
# self.image_root = image_root
# self.return_images = return_images
# self.bbox_scale = bbox_scale
# self.image_shape = image_shape
# self.sample_range = sample_range
# # load scheme
# with open(scheme_path, 'rb') as f:
# self.scheme = pickle.load(f)
# if sample_range is not None:
# self.scheme = [self.scheme[i] for i in range(sample_range[0], sample_range[1], sample_range[2])]
# def open_h5_file(self):
# self.h5f = h5py.File(self.h5_path, mode='r')
# def load_image(self, identity_id, video_id, utterance_id, seq_index):
# image_dir = os.path.join(self.image_root, identity_id, video_id, utterance_id)
# names = sorted(os.listdir(image_dir))
# if seq_index < len(names):
# name = names[seq_index]
# path = os.path.join(image_dir, name)
# image = cv2.imread(path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# else:
# # black image mock
# name = names[0]
# path = os.path.join(image_dir, name)
# image = cv2.imread(path)
# image = np.zeros(image.shape, dtype=np.uint8)
# return image
# def get_camera_matrix(self, h, w):
# fx, fy = 3000.0, 3000.0
# cx, cy = w/2, h/2
# camera_martix = np.array([
# [fx, 0.0, cx],
# [0.0, fy, cy],
# [0.0, 0.0, 1.0]
# ])
# return camera_martix
# def get_transformation_matrix(self):
# transformation_matrix = np.eye(3, 4)
# return transformation_matrix
# def get_bbox(self, keypoints_2d):
# left, top, right, down = (
# keypoints_2d[:, 0].min(),
# keypoints_2d[:, 1].min(),
# keypoints_2d[:, 0].max(),
# keypoints_2d[:, 1].max()
# )
# # convex_hull = scipy.spatial.ConvexHull(points)
# # center_x, center_y = (np.mean(convex_hull.points[convex_hull.vertices, axis]) for axis in (0, 1))
# center_x, center_y = (left + right) / 2, (top + down) / 2
# w, h = right - left, down - top
# bbox = (
# center_x - w/2,
# center_y - h/2,
# center_x + w/2,
# center_y + h/2
# )
# bbox = utils.common.utils.common.get_square_bbox(bbox)
# bbox = utils.common.utils.common.scale_bbox(bbox, self.bbox_scale)
# return bbox
# def normalize_keypoints_2d(self, keypoints_2d):
# convex_hull = scipy.spatial.ConvexHull(keypoints_2d)
# center = np.mean(convex_hull.points[convex_hull.vertices], axis=0)
# keypoints_2d = (keypoints_2d - center) / np.sqrt(convex_hull.area)
# return keypoints_2d
# def load_sample(self, identity_id, video_id, utterance_id, seq_index):
# sample = dict()
# # load h5_data
# try:
# h5_data = self.h5f[identity_id][video_id][utterance_id]
# except Exception as e:
# print(identity_id, video_id, utterance_id, seq_index)
# print(e)
# sample['expression'] = h5_data['expressions'][seq_index]
# sample['pose'] = h5_data['poses'][seq_index]
# sample['beta'] = h5_data['betas'][:]
# sample['keypoints_2d'] = h5_data['face_keypoints_2d'][seq_index]
# # load image
# if self.return_images:
# image = self.load_image(identity_id, video_id, utterance_id, seq_index)
# orig_h, orig_w = image.shape[:2]
# # crop
# bbox = self.get_bbox(sample['keypoints_2d'])
# image = utils.common.utils.common.crop_image(image, bbox)
# # resize
# image = utils.common.utils.common.resize_image(image, self.image_shape)
# image = image / 255.0
# image = image.transpose(2, 0, 1)
# sample['image'] = image
# # load projection matrix
# h, w = image.shape[1:3]
# bbox_h, bbox_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
# if 'camera_matrix' in h5_data:
# print('hey')
# camera_matrix = h5_data['camera_matrix'][:]
# else:
# camera_matrix = self.get_camera_matrix(orig_h, orig_w)
# camera_matrix = utils.common.utils.common.update_after_crop_and_resize(
# camera_matrix, bbox, (w/bbox_w, h/bbox_h)
# )
# # update keypoints 2d ufter crop and resize
# sample['keypoints_2d'][:, 0] -= bbox[0]
# sample['keypoints_2d'][:, 1] -= bbox[1]
# sample['keypoints_2d'][:, 0] *= w/bbox_w
# sample['keypoints_2d'][:, 1] *= h/bbox_h
# else:
# image = np.zeros((*self.image_shape, 3), dtype=np.uint8)
# image = image / 255.0
# image = image.transpose(2, 0, 1)
# h, w = image.shape[1:3]
# sample['image'] = image
# if 'camera_matrix' in h5_data:
# camera_matrix = h5_data['camera_matrix'][:]
# else:
# camera_matrix = self.get_camera_matrix(*self.image_shape)
# transformation_matrix = self.get_transformation_matrix()
# projection_matrix = camera_matrix @ transformation_matrix
# sample['camera_matrix'] = camera_matrix
# sample['projection_matrix'] = projection_matrix
# sample['h'] = h
# sample['w'] = w
# # normalize keypoints 2d
# sample['keypoints_2d'] = self.normalize_keypoints_2d(sample['keypoints_2d'])
# return sample
# def __len__(self):
# return len(self.scheme)
# def __getitem__(self, index):
# # this should be normally done in __init__, but due to DataLoader behaviour
# # when num_workers > 1, the h5 file is opened during first data access:
# # https://github.com/pytorch/pytorch/issues/11929#issuecomment-649760983
# if not hasattr(self, 'h5f'):
# self.open_h5_file()
# sample_key = self.scheme[index]
# sample = self.load_sample(*sample_key)
# return sample
# @staticmethod
# def build_scheme(h5f):
# scheme = []
# for identity_id in tqdm(h5f):
# for video_id in h5f[identity_id]:
# for utterance_id in h5f[identity_id][video_id]:
# seq_length = h5f[identity_id][video_id][utterance_id]['expressions'].shape[0]
# for seq_index in range(seq_length):
# scheme.append((identity_id, video_id, utterance_id, seq_index))
# scheme = sorted(scheme)
# return scheme
# @staticmethod
# def preprocess_dataset(face_root, image_root, openpose_root, h5_path):
# # load scheme
# scheme = []
# identity_id_list = sorted(os.listdir(face_root))
# for identity_id in tqdm(identity_id_list):
# identity_dir = os.path.join(face_root, identity_id)
# video_id_list = sorted(os.listdir(identity_dir))
# for video_id in video_id_list:
# video_dir = os.path.join(identity_dir, video_id)
# utterance_id_list = sorted(os.listdir(video_dir))
# for utterance_id in utterance_id_list:
# utterance_dir = os.path.join(video_dir, utterance_id)
# scheme.append((identity_id, video_id, utterance_id))
# scheme = sorted(scheme)
# # build h5 file
# with h5py.File(h5_path, 'w') as hf:
# for (identity_id, video_id, utterance_id) in tqdm(scheme):
# # load face
# face_dir = os.path.join(face_root, identity_id, video_id, utterance_id, 'joints_op_face')
# expressions = np.load(os.path.join(face_dir, 'expressions.npy')) * 100
# poses = np.load(os.path.join(face_dir, 'poses.npy'))
# betas = np.load(os.path.join(face_dir, 'betas.npy'))
# # load openpose keypoints 2d
# openpose_dir = os.path.join(openpose_root, identity_id, video_id, utterance_id)
# face_keypoints_2d_list = []
# names = sorted(os.listdir(openpose_dir))
# for name in names:
# path = os.path.join(openpose_dir, name)
# with open(path) as f:
# openpose_data = json.load(f)
# face_keypoints_2d = openpose_data['people'][0]['face_keypoints_2d']
# face_keypoints_2d = np.array(face_keypoints_2d).reshape(70, 3)
# face_keypoints_2d = face_keypoints_2d[:, :2] # remove confidences
# face_keypoints_2d_list.append(face_keypoints_2d)
# face_keypoints_2d_arr = np.array(face_keypoints_2d_list)
# # save to h5
# group = hf.create_group(f"{identity_id}/{video_id}/{utterance_id}")
# group['expressions'] = expressions
# group['poses'] = poses
# group['betas'] = betas
# group['face_keypoints_2d'] = face_keypoints_2d_arr
class VoxCeleb2MediapipeDataset(torch.utils.data.Dataset):
def __init__(
self, *,
h5_path='', scheme_path='',
image_root='',
return_keypoints_3d=False,
return_images=True, bbox_scale=2.0, image_shape=(256, 256),
sample_range=None
):
assert return_images
self.h5_path = h5_path
self.scheme_path = scheme_path
self.return_keypoints_3d = return_keypoints_3d
self.image_root = image_root
self.return_images = return_images
self.bbox_scale = bbox_scale
self.image_shape = image_shape
self.sample_range = sample_range
# load facemesh model
models_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "third_party", "face_mesh_mediapipe", "models")
anchors_path = os.path.join(models_dir, "face_anchors.csv")
detection_model_path = os.path.join(models_dir, "face_detection_front.tflite")
landmark_model_path = os.path.join(models_dir, "face_landmark.tflite")
self.face_mesh_model = FaceMeshMediaPipe(anchors_path, detection_model_path, landmark_model_path, bbox_scale=1.5)
# load scheme
with open(scheme_path, 'rb') as f:
self.scheme = pickle.load(f)
if sample_range is not None:
start = max(0, sample_range[0])
end = min(len(self.scheme), sample_range[1])
step = sample_range[2]
self.scheme = [self.scheme[i] for i in range(start, end, step)]
def open_h5_file(self):
self.h5f = h5py.File(self.h5_path, mode='r')
def load_image(self, identity_id, video_id, utterance_id, seq_index):
image_dir = os.path.join(self.image_root, identity_id, video_id, utterance_id)
if not os.path.exists(image_dir):
image_dir = os.path.join(self.image_root, identity_id, video_id, 'color_undistorted')
names = sorted(os.listdir(image_dir))
if seq_index < len(names):
name = names[seq_index]
path = os.path.join(image_dir, name)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
# black image mock
name = names[0]
path = os.path.join(image_dir, name)
image = cv2.imread(path)
image = np.zeros(image.shape, dtype=np.uint8)
return image
def get_camera_matrix(self, h, w):
fx, fy = 3000.0, 3000.0
cx, cy = w/2, h/2
camera_martix = np.array([
[fx, 0.0, cx],
[0.0, fy, cy],
[0.0, 0.0, 1.0]
])
return camera_martix
def get_transformation_matrix(self):
transformation_matrix = np.eye(3, 4)
return transformation_matrix
def get_bbox(self, keypoints_2d):
left, top, right, down = (
keypoints_2d[:, 0].min(),
keypoints_2d[:, 1].min(),
keypoints_2d[:, 0].max(),
keypoints_2d[:, 1].max()
)
center_x, center_y = (left + right) / 2, (top + down) / 2
w, h = right - left, down - top
bbox = (
center_x - w/2,
center_y - h/2,
center_x + w/2,
center_y + h/2
)
if np.sum(bbox) == 0.0 or np.sum(np.isnan(bbox)) > 0:
return np.array([0.0, 0.0, 100.0, 100.0])
bbox = utils.common.get_square_bbox(bbox)
bbox = utils.common.scale_bbox(bbox, self.bbox_scale)
return bbox
# def normalize_keypoints_2d(self, keypoints_2d, image_shape):
# convex_hull = scipy.spatial.ConvexHull(keypoints_2d[:, :2])
# center = np.mean(convex_hull.points[convex_hull.vertices], axis=0)
# keypoints_2d[:, :2] = keypoints_2d[:, :2] - center
# if self.keypoints_2d_normalization == 'area':
# keypoints_2d[:, :2] = keypoints_2d[:, :2] / np.sqrt(convex_hull.area)
# elif self.keypoints_2d_normalization == 'image_shape':
# keypoints_2d[:, :2] = keypoints_2d[:, :2] / np.array([image_shape[1], image_shape[0]])
# elif self.keypoints_2d_normalization == 'no':
# pass
# else:
# raise NotImplementedError("Unknown keypoints_2d_normalization mode: {self.keypoints_2d_normalization}")
# # norm depth
# if keypoints_2d.shape[1] == 3: # 3d keypoints
# keypoints_2d[:, 2] /= 100.0
# return keypoints_2d
def load_sample(self, identity_id, video_id, utterance_id, seq_index):
sample = dict()
sample['key'] = (identity_id, video_id, utterance_id, seq_index)
# load h5_data
try:
h5_data = self.h5f[identity_id][video_id][utterance_id]
except Exception as e:
print(identity_id, video_id, utterance_id, seq_index)
print(e)
sample['expression'] = h5_data['expressions'][seq_index]
sample['pose'] = h5_data['poses'][seq_index] # 90 = [63 pose + 3 jaw + 6 eye + 12 hand + 3 trans + 3 root_orient]
sample['beta'] = h5_data['betas'][:]
sample['keypoints_2d_op'] = h5_data['face_keypoints_2d'][seq_index].astype(np.float32)
# load image
if self.return_images:
image = self.load_image(identity_id, video_id, utterance_id, seq_index)
orig_h, orig_w = image.shape[:2]
# get keypoints_2d
op_bbox = self.get_bbox(sample['keypoints_2d_op'])
image_op_cropped = utils.common.crop_image(image, op_bbox)
keypoints_3d, keypoints_3d_normed = self.face_mesh_model(image_op_cropped)
if keypoints_3d_normed is None:
keypoints_3d_normed = np.zeros((468, 3))
keypoints_3d = np.zeros((468, 3))
bbox = op_bbox
else:
keypoints_3d[:, :2] += np.array(op_bbox[:2])
bbox = self.get_bbox(keypoints_3d[:, :2])
if self.return_keypoints_3d:
sample['keypoints'] = keypoints_3d_normed.astype(np.float32)
sample['keypoints_orig'] = keypoints_3d.astype(np.float32)
else:
sample['keypoints'] = keypoints_3d_normed[:, :2].astype(np.float32)
sample['keypoints_orig'] = keypoints_3d[:, :2].astype(np.float32)
# crop
image = utils.common.crop_image(image, bbox)
# resize
image = utils.common.resize_image(image, self.image_shape)
image = image / 255.0
image = image.transpose(2, 0, 1)
sample['image'] = image
# load projection matrix
h, w = image.shape[1:3]
bbox_h, bbox_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if 'camera_matrix' in h5_data:
camera_matrix = h5_data['camera_matrix'][:]
else:
camera_matrix = self.get_camera_matrix(orig_h, orig_w)
camera_matrix = utils.common.update_after_crop_and_resize(
camera_matrix, bbox, (w/bbox_w, h/bbox_h)
)
transformation_matrix = self.get_transformation_matrix()
projection_matrix = camera_matrix @ transformation_matrix
sample['camera_matrix'] = camera_matrix
sample['projection_matrix'] = projection_matrix
sample['h'] = h
sample['w'] = w
# update keypoints 2d after crop and resize
sample['keypoints_orig'][:, 0] -= bbox[0]
sample['keypoints_orig'][:, 1] -= bbox[1]
sample['keypoints_orig'][:, 0] *= w/bbox_w
sample['keypoints_orig'][:, 1] *= h/bbox_h
# # normalize keypoints 2d
# sample['keypoints_2d_orig'] = sample['keypoints_2d'].copy()
# if not np.all(sample['keypoints_2d'] == 0.0):
# try:
# sample['keypoints_2d'] = self.normalize_keypoints_2d(sample['keypoints_2d'], (h, w)).astype(np.float32)
# except Exception as e:
# sample['keypoints_2d'] = np.zeros_like(sample['keypoints_2d']).astype(np.float32)
return sample
def __len__(self):
return len(self.scheme)
def __getitem__(self, index):
# this should be normally done in __init__, but due to DataLoader behaviour
# when num_workers > 1, the h5 file is opened during first data access:
# https://github.com/pytorch/pytorch/issues/11929#issuecomment-649760983
if not hasattr(self, 'h5f'):
self.open_h5_file()
sample_key = self.scheme[index]
sample = self.load_sample(*sample_key)
return sample
@staticmethod
def build_scheme(h5f):
scheme = []
for identity_id in tqdm(h5f):
for video_id in h5f[identity_id]:
for utterance_id in h5f[identity_id][video_id]:
seq_length = h5f[identity_id][video_id][utterance_id]['expressions'].shape[0]
for seq_index in range(seq_length):
scheme.append((identity_id, video_id, utterance_id, seq_index))
scheme = sorted(scheme)
return scheme
@staticmethod
def preprocess_dataset(face_root, image_root, openpose_root, h5_path):
# load scheme
scheme = []
identity_id_list = sorted(os.listdir(face_root))
for identity_id in tqdm(identity_id_list):
identity_dir = os.path.join(face_root, identity_id)
video_id_list = sorted(os.listdir(identity_dir))
for video_id in video_id_list:
video_dir = os.path.join(identity_dir, video_id)
utterance_id_list = sorted(os.listdir(video_dir))
for utterance_id in utterance_id_list:
utterance_dir = os.path.join(video_dir, utterance_id)
scheme.append((identity_id, video_id, utterance_id))
scheme = sorted(scheme)
# build h5 file
with h5py.File(h5_path, 'w') as hf:
for (identity_id, video_id, utterance_id) in tqdm(scheme):
# load face
face_dir = os.path.join(face_root, identity_id, video_id, utterance_id, 'joints_op_face')
expressions = np.load(os.path.join(face_dir, 'expressions.npy')) * 100
poses = np.load(os.path.join(face_dir, 'poses.npy'))
betas = np.load(os.path.join(face_dir, 'betas.npy'))
# load openpose keypoints 2d
openpose_dir = os.path.join(openpose_root, identity_id, video_id, utterance_id)
face_keypoints_2d_list = []
names = sorted(os.listdir(openpose_dir))
for name in names:
path = os.path.join(openpose_dir, name)
with open(path) as f:
openpose_data = json.load(f)
face_keypoints_2d = openpose_data['people'][0]['face_keypoints_2d']
face_keypoints_2d = np.array(face_keypoints_2d).reshape(70, 3)
face_keypoints_2d = face_keypoints_2d[:, :2] # remove confidences
face_keypoints_2d_list.append(face_keypoints_2d)
face_keypoints_2d_arr = np.array(face_keypoints_2d_list)
# save to h5
group = hf.create_group(f"{identity_id}/{video_id}/{utterance_id}")
group['expressions'] = expressions
group['poses'] = poses
group['betas'] = betas
group['face_keypoints_2d'] = face_keypoints_2d_arr
@hydra.main(config_path='config/default.yaml')
def main(config):
print(config.pretty())
# preprocess
print(f"Preprocess split {split}")
VoxCeleb2FaceDataset.preprocess_dataset(
config.data.face_root, config.data.image_root, config.data.openpose_root, config.data.h5_path
)
# save scheme
print("Build scheme")
h5f = h5py.File(config.data.h5_path, mode='r', libver='latest')
scheme = VoxCeleb2FaceDataset.build_scheme(h5f)
with open(config.data.scheme_path, 'wb') as f:
pickle.dump(scheme, f)
# filter scheme
print("Filter scheme")
dataset = VoxCeleb2FaceDataset(
config.data.h5_path, config.data.scheme_path,
config.data.image_root,
return_images=config.data.return_images, bbox_scale=config.data.bbox_scale, image_shape=config.data.image_shape
)
invalid_indices = []
for i in tqdm(range(len(dataset))):
try:
sample = dataset[i]
except Exception as e:
invalid_indices.append(i)
print(f"Index {i} is invalid. Reason: {e}")
invalid_indices = set(invalid_indices)
print(f"Found {len(invalid_indices)} invalid samples")
scheme_filtered = [sample_key for i, sample_key in enumerate(dataset.scheme) if i not in invalid_indices]
with open(config.data.scheme_path, 'wb') as f:
pickle.dump(scheme_filtered, f)
print("Success!")
if __name__ == '__main__':
main()
| 37.058176
| 125
| 0.566252
| 11,837
| 0.502228
| 0
| 0
| 4,375
| 0.185625
| 0
| 0
| 12,090
| 0.512962
|
9a75886d1c5240a727719c8116254cb13ec6d703
| 1,316
|
py
|
Python
|
session7/OLED_Clock.py
|
rezafari/raspberry
|
e6720780f3c65ee1809040fc538f793fe44f0111
|
[
"MIT"
] | null | null | null |
session7/OLED_Clock.py
|
rezafari/raspberry
|
e6720780f3c65ee1809040fc538f793fe44f0111
|
[
"MIT"
] | null | null | null |
session7/OLED_Clock.py
|
rezafari/raspberry
|
e6720780f3c65ee1809040fc538f793fe44f0111
|
[
"MIT"
] | null | null | null |
######################################################################
# OLED_Clock.py
#
# This program display date and time on OLED module
######################################################################
import Adafruit_SSD1306
from datetime import datetime
import time
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Setup Display
RST=24
device = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
device.begin()
device.clear()
device.display()
width = device.width
height = device.height
fontFile = '/usr/share/fonts/truetype/freefont/FreeSansBold.ttf'
smallFont = ImageFont.truetype(fontFile, 12)
largeFont = ImageFont.truetype(fontFile, 33)
# Display a message on 3 lines, first line big font
def DisplayMessage(line1, line2):
global device
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
maxWidth, unused = draw.textsize(line1, font=largeFont)
#with canvas(deviccd e) as draw:
draw.text((10, 0), line1, font=smallFont, fill=255)
draw.text((0, 20), line2, font=largeFont, fill=255)
device.image(image)
device.display()
while True:
now = datetime.now()
dateMessage = '{:%d %B %Y}'.format(now)
timeMessage = '{:%H:%M:%S}'.format(now)
DisplayMessage(dateMessage,timeMessage)
time.sleep(0.1)
| 29.244444
| 70
| 0.634498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 401
| 0.304711
|
9a75a5f4ae8ec0f7ef5613e16f951ea62c4bd8de
| 9,601
|
py
|
Python
|
odim/router.py
|
belda/odim
|
ea49284c4bfc76ac6cb436577c128b20c2c4004c
|
[
"MIT"
] | 5
|
2021-01-29T11:00:10.000Z
|
2021-05-18T23:23:32.000Z
|
odim/router.py
|
belda/odim
|
ea49284c4bfc76ac6cb436577c128b20c2c4004c
|
[
"MIT"
] | 1
|
2021-11-16T10:22:43.000Z
|
2021-11-16T10:22:43.000Z
|
odim/router.py
|
belda/odim
|
ea49284c4bfc76ac6cb436577c128b20c2c4004c
|
[
"MIT"
] | 1
|
2021-02-18T14:45:43.000Z
|
2021-02-18T14:45:43.000Z
|
'''
Contains the extended FastAPI router, for simplified CRUD from a model
'''
from typing import Any, List, Optional, Sequence, Set, Type, Union
import fastapi
from fastapi import Depends, params
from pydantic import BaseModel, create_model
from odim import Odim, OkResponse, SearchResponse
from odim.dependencies import SearchParams
class OdimRouter(fastapi.APIRouter):
''' Simplified FastAPI router for easy CRUD '''
def mount_crud(self,
path: str,
*,
model : Type[BaseModel],
tags: Optional[List[str]] = None,
dependencies : Optional[Sequence[params.Depends]] = None,
include_in_schema: bool = True,
methods : Optional[Union[Set[str], List[str]]] = ('create','get','search','save','update','delete'),
methods_exclude : Optional[Union[Set[str], List[str]]] = [],
extend_query : dict= {}):
''' Add endpoints for CRUD operations for particular model
:param path: base_path, for the model resource location eg: /api/houses/
:param model: pydantic/Odim BaseModel, that is used for eg. Houses
:param tags: Starlette/FastAPI tags for endpoints
:param dependencies: Starlette/FastAPI dependencies for all endpoints
:param include_in_schema: whether to include in docs
:param methods: methods to automatically generate ('create','get','search','save','update','delete')
:param methods_exclude: methods to NOT automatically generate ('create','get','search','save','update','delete')
:param extend_query: adds these parameters to every query and sets it on the object upon creation. keys are fields, values can be exact values or functions taking request as parameter
'''
add_methods = [ x for x in methods if x not in methods_exclude ]
if 'create' in add_methods:
async def create(request : fastapi.Request, obj : model):
for k, v in exec_extend_qeury(request,extend_query).items():
setattr(obj, k, v)
await Odim(obj).save()
return obj
self.add_api_route(path = path,
endpoint=create,
response_model=model,
status_code=201,
tags=tags,
dependencies = dependencies,
summary="Create new %s" % model.schema().get('title'),
description = "Create new instance of %s " % model.schema().get('title'),
methods = ["POST"],
include_in_schema = include_in_schema)
if 'get' in add_methods:
async def get(request : fastapi.Request, id : str):
return await Odim(model).get(id=id, extend_query=exec_extend_qeury(request,extend_query))
self.add_api_route(path = path+"{id}",
endpoint=get,
response_model=model,
tags=tags,
dependencies = dependencies,
summary="Get %s by id" % model.schema().get('title'),
description = "Return individual %s details " % model.schema().get('title'),
methods = ["GET"],
include_in_schema = include_in_schema)
if 'search' in add_methods:
async def search(request : fastapi.Request, search_params : dict = Depends(SearchParams)):
sp = {**search_params.q, **exec_extend_qeury(request,extend_query)}
rsp = { "results" : await Odim(model).find(sp, search_params),
"total" : await Odim(model).count(sp),
"search" : search_params.dict()}
return rsp
self.add_api_route(path = path,
endpoint=search,
response_model=SearchResponse[model],
tags=tags,
dependencies = dependencies,
summary="Search for %ss" % model.schema().get('title'),
description = "Performs a listing search for %s " % model.schema().get('title'),
methods = ["GET"],
include_in_schema = include_in_schema)
if 'save' in add_methods:
async def save(request : fastapi.Request, id : str, obj : model):
obj.id = id
await Odim(obj).save(extend_query=exec_extend_qeury(request,extend_query))
return obj
self.add_api_route(path = path+"{id}",
endpoint=save,
response_model=model,
tags=tags,
dependencies = dependencies,
summary="Replace %s by id" % model.schema().get('title'),
description = "PUT replaces the original %s as whole " % model.schema().get('title'),
methods = ["PUT"],
include_in_schema = include_in_schema)
if 'update' in add_methods:
async def update(request : fastapi.Request, id : str, obj : model):
obj.id = id
await Odim(obj).update(extend_query=exec_extend_qeury(request,extend_query))
return obj
self.add_api_route(path = path+"{id}",
endpoint=update,
response_model=model,
tags=tags,
dependencies = dependencies,
summary="Partial update %s by id" % model.schema().get('title'),
description = "Just updates individual fields of %s " % model.schema().get('title'),
methods = ["Patch"],
include_in_schema = include_in_schema)
if 'delete' in add_methods:
async def delete(request : fastapi.Request, id : str) -> None:
await Odim(model).delete(id, extend_query=exec_extend_qeury(request,extend_query))
return OkResponse()
self.add_api_route(path = path+"{id}",
endpoint=delete,
response_model=OkResponse,
status_code=200,
tags=tags,
dependencies = dependencies,
summary="Delete %s by id" % model.schema().get('title'),
description = "Deletes individual instance of %s " % model.schema().get('title'),
methods = ["DELETE"],
include_in_schema = include_in_schema)
def generate(self,
path: str,
*,
model : Type[BaseModel],
tags: Optional[List[str]] = None,
dependencies : Optional[Sequence[params.Depends]] = None,
include_in_schema: bool = True,
methods : Optional[Union[Set[str], List[str]]] = ('create','get','search','save','update','delete'),
methods_exclude : Optional[Union[Set[str], List[str]]] = []):
''' Generates the code for the endpoints
:param path: base_path, for the model resource location eg: /api/houses/
:param model: pydantic/Odim BaseModel, that is used for eg. Houses
:param tags: Starlette/FastAPI tags for endpoints
:param dependencies: Starlette/FastAPI dependencies for all endpoints
:param include_in_schema: whether to include in docs
:param methods: methods to automatically generate ('create','get','search','save','update','delete')
:param methods_exclude: methods to NOT automatically generate ('create','get','search','save','update','delete')
'''
add_methods = [ x for x in methods if x not in methods_exclude ]
model_name = model.__name__
other=""
if tags:
other+= ", tags="+str(tags)
if dependencies:
other+= ", dependencies="+str(dependencies)
if not include_in_schema:
other+= ", include_in_schema=False"
if 'get' in add_methods:
print(f'''
@router.get("{path}{{id}}", response_model={model_name}{other})
async def get_{model_name}(id : str):
\'\'\' Returns the individual {model_name} details\'\'\'
return await Odim({model_name}).get(id=id)
''')
if 'search' in add_methods:
print(f'''
@router.get("{path}", response_model=SearchResponse[{model_name}]{other})
async def search_{model_name}(search : dict = Depends(SearchParams)):
rsp = {{ "results" : await Odim({model_name}).find(search.q, search),
"total" : await Odim({model_name}).count(search.q),
"search" : search.dict()}}
return rsp
''')
if 'create' in add_methods:
print(f'''
@router.post("{path}", status_code=201, response_model={model_name}{other})
async def create_{model_name}(obj : {model_name}):
await Odim(obj).save()
return obj
''')
if 'save' in add_methods:
print(f'''
@router.put("{path}{{id}}", response_model={model_name}{other})
async def save_{model_name}(id : str, obj : {model_name}):
obj.id = id
await Odim(obj).save()
return obj
''')
if 'update' in add_methods:
print(f'''
@router.patch("{path}{{id}}", response_model={model_name}{other})
async def update_{model_name}(id : str, obj : {model_name}):
obj.id = id
await Odim(obj).update()
return obj
''')
if 'delete' in add_methods:
print(f'''
@router.delete("{path}{{id}}", status_code=200, response_model=OkResponse)
async def delete_{model_name}(id : str):
await Odim(obj).delete(id)
return OkResponse()
''')
def exec_extend_qeury(request : fastapi.Request, sl : dict = {}):
out = {}
for k, v in sl.items():
if callable(v):
out[k] = v(request)
else:
out[k] = v
return out
| 43.247748
| 187
| 0.584939
| 9,071
| 0.944797
| 0
| 0
| 0
| 0
| 1,273
| 0.13259
| 3,517
| 0.366316
|
9a760367155f89800e9ffffd081d1132a56544e5
| 194
|
py
|
Python
|
scripts/item/consume_2432803.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/item/consume_2432803.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/item/consume_2432803.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
# Princess No Damage Skin (30-Days)
success = sm.addDamageSkin(2432803)
if success:
sm.chat("The Princess No Damage Skin (30-Days) has been added to your account's damage skin collection.")
| 38.8
| 109
| 0.747423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.675258
|
9a768a1c9833791d7a707ef08123594b6480d371
| 1,184
|
py
|
Python
|
tests/test_product.py
|
technicapital/stake-python
|
8d0a985923318ca7b92f23e0c9a8319a75f37ff2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_product.py
|
technicapital/stake-python
|
8d0a985923318ca7b92f23e0c9a8319a75f37ff2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_product.py
|
technicapital/stake-python
|
8d0a985923318ca7b92f23e0c9a8319a75f37ff2
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import aiohttp
import pytest
from .client import HttpClient
from .constant import Url
from .product import Product
@pytest.mark.asyncio
async def test_show_portfolio(tracing_client):
return await tracing_client.equities.list()
@pytest.mark.asyncio
async def test_find_products_by_name(tracing_client):
from .product import ProductSearchByName
request = ProductSearchByName(keyword="techno")
products = await tracing_client.products.search(request)
assert len(products) == 10
@pytest.mark.asyncio
async def test_product_serializer():
async with aiohttp.ClientSession(raise_for_status=True) as session:
await session.get(HttpClient.url(Url.symbol.format(symbol="MSFT")))
async def _get_symbol(symbol):
response = await session.get(
HttpClient.url(Url.symbol.format(symbol=symbol))
)
return await response.json()
coros = [_get_symbol(symbol) for symbol in {"MSFT", "TSLA", "GOOG"}]
results = await asyncio.gather(*coros)
assert [
Product(**serialized_product["products"][0])
for serialized_product in results
]
| 26.909091
| 76
| 0.697635
| 0
| 0
| 0
| 0
| 1,043
| 0.880912
| 980
| 0.827703
| 42
| 0.035473
|
9a76e7fea3dd34891002703a3d4d4adaf6c009dc
| 1,346
|
py
|
Python
|
data_utils.py
|
tar-bin/DeepAA
|
acdae33a410eec87eb22419fce0adb513fa97219
|
[
"MIT"
] | 1
|
2021-07-27T09:31:20.000Z
|
2021-07-27T09:31:20.000Z
|
data_utils.py
|
tar-bin/DeepAA
|
acdae33a410eec87eb22419fce0adb513fa97219
|
[
"MIT"
] | null | null | null |
data_utils.py
|
tar-bin/DeepAA
|
acdae33a410eec87eb22419fce0adb513fa97219
|
[
"MIT"
] | null | null | null |
import numpy as np
from PIL import Image, ImageOps
class BaseImage(object):
"""
変換元画像
"""
def __init__(self, path):
"""
元画像を読み込む
:param path:
:param array: np.ndarray
:param line_height: int
:return:
"""
image = Image.open(path)
array = np.asarray(image)
self.array = np.array(array)
def scale_image(self, new_width):
"""
元画像の横幅を変更
アスペクト比を維持する
:param image: 元画像のndarray
:param width: 新しい横幅
:return: 横幅を修正したndarray
"""
image = self.array
original_width = image.shape[1]
original_height = image.shape[0]
aspect_ratio = original_height/float(original_width)
new_height = int(aspect_ratio * new_width)
image = Image.fromarray(image)
new_image = image.resize((new_width, new_height), resample = Image.LANCZOS)
return np.asarray(new_image)
def add_mergin(self, h=18,w = 16):
image = self.array
new_image = np.ones((image.shape[0]+ 2 * h, image.shape[1] + 2 * w))
new_image = new_image*255
new_image[h:-h, w:-w] = image
return new_image
def gray_scale(self):
image = Image.fromarray(self.array)
image = ImageOps.grayscale(image)
return np.asarray(image)
| 26.392157
| 83
| 0.581724
| 1,391
| 0.963296
| 0
| 0
| 0
| 0
| 0
| 0
| 401
| 0.277701
|
9a77a425a1b61dc019f50e24ad07e8460b1a7df9
| 2,839
|
py
|
Python
|
ledfx/color.py
|
broccoliboy/LedFx
|
1c90d5c3ddaf993a072eab92d3e373dd3b0fb45c
|
[
"MIT"
] | 524
|
2020-12-18T19:34:55.000Z
|
2022-03-31T14:52:25.000Z
|
ledfx/color.py
|
broccoliboy/LedFx
|
1c90d5c3ddaf993a072eab92d3e373dd3b0fb45c
|
[
"MIT"
] | 119
|
2020-12-18T21:28:12.000Z
|
2022-03-31T14:44:02.000Z
|
ledfx/color.py
|
broccoliboy/LedFx
|
1c90d5c3ddaf993a072eab92d3e373dd3b0fb45c
|
[
"MIT"
] | 85
|
2020-12-18T18:23:16.000Z
|
2022-03-29T16:37:52.000Z
|
from collections import namedtuple
RGB = namedtuple("RGB", "red, green, blue")
COLORS = {
"red": RGB(255, 0, 0),
"orange-deep": RGB(255, 40, 0),
"orange": RGB(255, 120, 0),
"yellow": RGB(255, 200, 0),
"yellow-acid": RGB(160, 255, 0),
"green": RGB(0, 255, 0),
"green-forest": RGB(34, 139, 34),
"green-spring": RGB(0, 255, 127),
"green-teal": RGB(0, 128, 128),
"green-turquoise": RGB(0, 199, 140),
"green-coral": RGB(0, 255, 50),
"cyan": RGB(0, 255, 255),
"blue": RGB(0, 0, 255),
"blue-light": RGB(65, 105, 225),
"blue-navy": RGB(0, 0, 128),
"blue-aqua": RGB(0, 255, 255),
"purple": RGB(128, 0, 128),
"pink": RGB(255, 0, 178),
"magenta": RGB(255, 0, 255),
"black": RGB(0, 0, 0),
"white": RGB(255, 255, 255),
"brown": RGB(139, 69, 19),
"gold": RGB(255, 215, 0),
"hotpink": RGB(255, 105, 180),
"lightblue": RGB(173, 216, 230),
"lightgreen": RGB(152, 251, 152),
"lightpink": RGB(255, 182, 193),
"lightyellow": RGB(255, 255, 224),
"maroon": RGB(128, 0, 0),
"mint": RGB(189, 252, 201),
"olive": RGB(85, 107, 47),
"peach": RGB(255, 100, 100),
"plum": RGB(221, 160, 221),
"sepia": RGB(94, 38, 18),
"skyblue": RGB(135, 206, 235),
"steelblue": RGB(70, 130, 180),
"tan": RGB(210, 180, 140),
"violetred": RGB(208, 32, 144),
}
GRADIENTS = {
"Rainbow": {
"colors": [
"red",
"orange",
"yellow",
"green",
"green-turquoise",
"blue",
"purple",
"pink",
]
},
"Dancefloor": {"colors": ["red", "pink", "blue"]},
"Plasma": {"colors": ["blue", "purple", "red", "orange-deep", "yellow"]},
"Ocean": {"colors": ["blue-aqua", "blue"]},
"Viridis": {"colors": ["purple", "blue", "green-teal", "green", "yellow"]},
"Jungle": {"colors": ["green", "green-forest", "orange"]},
"Spring": {"colors": ["pink", "orange-deep", "yellow"]},
"Winter": {"colors": ["green-turquoise", "green-coral"]},
"Frost": {"colors": ["blue", "blue-aqua", "purple", "pink"]},
"Sunset": {"colors": ["blue-navy", "orange", "red"]},
"Borealis": {
"colors": [
"orange-deep",
"purple",
"green-turquoise",
"green",
]
},
"Rust": {"colors": ["orange-deep", "red"]},
"Christmas": {
"colors": [
"red",
"red",
"red",
"red",
"red",
"green",
"green",
"green",
"green",
"green",
],
"method": "repeat",
},
"Winamp": {
"colors": [
"green",
"yellow",
"orange",
"orange-deep",
"red",
]
},
}
| 27.833333
| 79
| 0.446988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,108
| 0.390278
|
9a7853c5ab201c882d582391f394325cd2ad7796
| 1,247
|
py
|
Python
|
src/test/nspawn_test/support/header_test.py
|
Andrei-Pozolotin/nspawn
|
9dd3926f1d1a3a0648f6ec14199cbf4069af1c98
|
[
"Apache-2.0"
] | 15
|
2019-10-10T17:35:48.000Z
|
2022-01-29T10:41:01.000Z
|
src/test/nspawn_test/support/header_test.py
|
Andrei-Pozolotin/nspawn
|
9dd3926f1d1a3a0648f6ec14199cbf4069af1c98
|
[
"Apache-2.0"
] | null | null | null |
src/test/nspawn_test/support/header_test.py
|
Andrei-Pozolotin/nspawn
|
9dd3926f1d1a3a0648f6ec14199cbf4069af1c98
|
[
"Apache-2.0"
] | 2
|
2019-10-10T17:36:43.000Z
|
2020-06-20T15:28:33.000Z
|
from nspawn.support.header import *
def test_header():
print()
head_dict = {
'etag':'some-hash',
'last-modified':'some-time',
'content-length':'some-size',
'nspawn-digest':'some-text',
}
assert head_dict[Header.etag] == 'some-hash'
assert head_dict[Header.last_modified] == 'some-time'
assert head_dict[Header.content_length] == 'some-size'
assert head_dict[Header.nspawn_digest] == 'some-text'
def test_compare_head():
print()
assert compare_header({
}, {
}) == HeadComp.undetermined
assert compare_header({
'etag':'123'
}, {
'etag':'"123"'
}) == HeadComp.same
assert compare_header({
'last-modified':'some-time',
'content-length':'some-size',
}, {
'last-modified':'some-time',
'content-length':'some-size',
}) == HeadComp.same
assert compare_header({
'last-modified':'some-time',
'content-length':'some-size-1',
}, {
'last-modified':'some-time',
'content-length':'some-size-2',
}) == HeadComp.different
assert compare_header({
'last-modified':'some-time',
}, {
'content-length':'some-size',
}) == HeadComp.undetermined
| 25.44898
| 58
| 0.57498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 433
| 0.347233
|
9a786a26a6979489803db0c2519bf5cf50427d08
| 2,042
|
py
|
Python
|
game1.py
|
akulakov/learnprogramming
|
ed7d557dabbef697773b4b369c8ed9cd3cdd55a6
|
[
"Apache-2.0"
] | null | null | null |
game1.py
|
akulakov/learnprogramming
|
ed7d557dabbef697773b4b369c8ed9cd3cdd55a6
|
[
"Apache-2.0"
] | null | null | null |
game1.py
|
akulakov/learnprogramming
|
ed7d557dabbef697773b4b369c8ed9cd3cdd55a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from helpers import sjoin, cjoin
from random import shuffle
card_types = [
("tax",1,1), # tax everyone 2 coins => bank
("soldier",2,1),
("sergeant",3,1),
("captain",4,2),
("emperor",1,5),
("prince",1,1), # prince takes 1/3rd of bank
]
class Card:
def __init__(self, name, power=1, honor=1):
self.name = name
self.power, self.honor = power, honor
def __repr__(self):
return "<%s %s %s>" % (self.name, self.power, self.honor)
class Player:
coins = 4
out = False
def __init__(self, name, cards):
self.name = name
self.cards = cards
def __repr__(self):
return cjoin(self.name, self.cards, self.coins)
def get_card(self, name):
for c in self.cards:
if c.name == name:
return c
def score(self):
return sum(c.honor for c in self.cards)
deck = [Card(*c) for c in card_types]
deck += [Card(*c) for c in card_types]
for _ in range(15):
deck.append(Card(*randchoice(card_types)))
shuffle(deck)
def draw(lst, n):
items, lst = lst[:n], lst[n:]
return items
players = [Player('a', draw(deck,5)),
Player('b', draw(deck,5)),
Player('c', draw(deck,5))
]
class Play:
bank = 25
def play_prince(self, player, card):
amt = round(self.bank / 3)
self.bank -= amt
player.coins += amt
player.cards.remove(card)
def play_tax(self, player, card):
others = [p for p in players if p!=player]
for p in others:
p.coins -= 2
if p.coins < 0:
players.remove(p)
def check_end(self):
return len(players) == 1
def go(self):
for p in players:
prince = p.get_card("prince")
tax = p.get_card("tax")
if prince:
self.play_prince(p, prince)
elif tax:
self.play_tax()
| 24.023529
| 65
| 0.519589
| 1,315
| 0.643976
| 0
| 0
| 0
| 0
| 0
| 0
| 163
| 0.079824
|
9a78db38d0f259372303620cba450346c37cd245
| 683
|
py
|
Python
|
src/plotting/plot_permeability.py
|
pgniewko/Deep-Rock
|
b714b98a2c391b4a43c62412769e5732cbd0d07a
|
[
"BSD-3-Clause"
] | 1
|
2019-11-18T04:51:02.000Z
|
2019-11-18T04:51:02.000Z
|
src/plotting/plot_permeability.py
|
pgniewko/Deep-Rock
|
b714b98a2c391b4a43c62412769e5732cbd0d07a
|
[
"BSD-3-Clause"
] | null | null | null |
src/plotting/plot_permeability.py
|
pgniewko/Deep-Rock
|
b714b98a2c391b4a43c62412769e5732cbd0d07a
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
#
# Usage:
# python
#
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
data = np.loadtxt(sys.argv[1])
kappa_LB, kappa_CNN = data.T
kappa_LB = 10.0 ** kappa_LB
kappa_CNN = 10.0 ** kappa_CNN
fig, ax = plt.subplots(1, 1, sharey=True, figsize=(7, 7))
ax.set_xscale("log", nonposx="clip")
ax.set_yscale("log", nonposy="clip")
plt.tick_params(axis="both", which="major", labelsize=15)
plt.tick_params(axis="both", which="minor", labelsize=12)
plt.plot(kappa_LB, kappa_CNN, "+", color="green")
plt.xlabel("lattice-Boltzmann", fontsize=20)
plt.ylabel("ConvNet", fontsize=20, labelpad=-8)
plt.show()
| 21.34375
| 57
| 0.707174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.199122
|
9a79ab000b884a1fa7eeff49e8a3570bf0211367
| 1,664
|
py
|
Python
|
functions/python/todo-app.py
|
swiftycloud/swifty.todoapp
|
1a36c6e6f1af4584a8c0151e15e9ffcf2453f8c1
|
[
"MIT"
] | 5
|
2018-11-08T17:07:43.000Z
|
2019-04-23T15:18:31.000Z
|
functions/python/todo-app.py
|
swiftycloud/swifty.todoapp
|
1a36c6e6f1af4584a8c0151e15e9ffcf2453f8c1
|
[
"MIT"
] | null | null | null |
functions/python/todo-app.py
|
swiftycloud/swifty.todoapp
|
1a36c6e6f1af4584a8c0151e15e9ffcf2453f8c1
|
[
"MIT"
] | 3
|
2018-11-08T17:07:47.000Z
|
2020-11-22T00:20:38.000Z
|
import bson
import json
import swifty
#
# GET /tasks -- list tasks
# POST /tasks $BODY -- add new task
# GET /tasks/ID -- get info about task
# PUT /tasks/ID -- update task (except status)
# DELETE /tasks/ID -- remove task
# POST /tasks/ID/done -- mark task as done
#
def toTask(obj):
return { 'id': str(obj['_id']), 'task': obj['task'], 'status': obj['status'] }
def fromTask(body, q):
b = json.loads(body)
if 'task' in b:
q['task'] = b['task']
def Main(req):
db = swifty.MongoDatabase('tasks')
col = db['tasks']
p = req.path.split('/')
if p[0] != 'tasks':
return {}, { 'status': 404 }
q = { 'owner': req.claims['cookie'] }
if len(p) == 1:
if req.method == 'GET':
if 'status' in req.args:
q['status'] = req.args['status']
return [ toTask(x) for x in col.find(q) ], None
if req.method == 'POST':
q['status'] = 'new'
fromTask(req.body, q)
col.insert_one(q)
return {}, None
q['_id'] = bson.ObjectId(p[1])
if len(p) == 2:
if req.method == 'GET':
return toTask(col.find_one(q)), None
if req.method == 'PUT':
e = { }
fromTask(req.body, e)
col.update_one(q, { '$set': e })
return {}, None
if req.method == 'DELETE':
col.delete_one(q)
return {}, None
if len(p) == 3:
if p[2] == 'done' and req.method == 'POST':
col.update_one(q, { '$set': { 'status': 'done' } })
return {}, None
return {}, { 'status': 404 }
| 25.6
| 82
| 0.477163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 479
| 0.287861
|
9a79fb2f2787441274d55999dc0843161af999b5
| 401
|
py
|
Python
|
dmoj/Uncategorized/tss17a.py
|
UserBlackBox/competitive-programming
|
2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc
|
[
"Unlicense"
] | null | null | null |
dmoj/Uncategorized/tss17a.py
|
UserBlackBox/competitive-programming
|
2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc
|
[
"Unlicense"
] | null | null | null |
dmoj/Uncategorized/tss17a.py
|
UserBlackBox/competitive-programming
|
2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc
|
[
"Unlicense"
] | null | null | null |
# https://dmoj.ca/problem/tss17a
# https://dmoj.ca/submission/2226280
import sys
n = int(sys.stdin.readline()[:-1])
for i in range(n):
instruction = sys.stdin.readline()[:-1].split()
printed = False
for j in range(3):
if instruction.count(instruction[j]) >= 2:
print(instruction[j])
printed = True
break
if not printed:
print('???')
| 26.733333
| 51
| 0.578554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.182045
|
9a7ad9eea9244d2609a2517f92f7fc289fb240da
| 1,159
|
py
|
Python
|
todo/views/users_detail.py
|
josalhor/WebModels
|
6b9cde3141c53562f40b129e6e1c87448ce9853a
|
[
"BSD-3-Clause"
] | null | null | null |
todo/views/users_detail.py
|
josalhor/WebModels
|
6b9cde3141c53562f40b129e6e1c87448ce9853a
|
[
"BSD-3-Clause"
] | 41
|
2021-03-23T12:58:25.000Z
|
2021-05-25T11:38:42.000Z
|
todo/views/users_detail.py
|
josalhor/WebModels
|
6b9cde3141c53562f40b129e6e1c87448ce9853a
|
[
"BSD-3-Clause"
] | null | null | null |
from todo.templatetags.todo_tags import is_management
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse
from django.shortcuts import render
from todo.models import Designer, Management, Writer, Editor
@login_required
@user_passes_test(is_management)
def users_detail(request, list_slug=None) -> HttpResponse:
# Which users to show on this list view?
if list_slug == "editors":
users = Editor.objects.all()
elif list_slug == "designers":
users = Designer.objects.all()
elif list_slug == "writers":
users = Writer.objects.all()
elif list_slug == "management":
users = Management.objects.all()
# Additional filtering
active_users = users.filter(user__is_active=True)
unactive_users = users.filter(user__is_active=False)
# ######################
# Add New User Form
# ######################
context = {
"list_slug": list_slug,
"active_users": active_users,
"unactive_users": unactive_users,
"users": users,
}
return render(request, "todo/users_detail.html", context)
| 30.5
| 75
| 0.667817
| 0
| 0
| 0
| 0
| 892
| 0.769629
| 0
| 0
| 243
| 0.209664
|
9a7cfcbc63f3c97c82737bfbbfa13e26624618e7
| 214
|
py
|
Python
|
src/librhc/cost/__init__.py
|
arnavthareja/mushr_pixelart_mpc
|
db6ee6cae9b4cb1d3b213fed06690074372c824b
|
[
"BSD-3-Clause"
] | 5
|
2019-08-30T08:20:27.000Z
|
2021-08-01T17:16:16.000Z
|
src/librhc/cost/__init__.py
|
arnavthareja/mushr_pixelart_mpc
|
db6ee6cae9b4cb1d3b213fed06690074372c824b
|
[
"BSD-3-Clause"
] | 1
|
2020-09-09T13:38:08.000Z
|
2020-12-15T12:20:26.000Z
|
src/librhc/cost/__init__.py
|
arnavthareja/mushr_pixelart_mpc
|
db6ee6cae9b4cb1d3b213fed06690074372c824b
|
[
"BSD-3-Clause"
] | 4
|
2019-09-14T21:26:09.000Z
|
2021-08-27T23:01:41.000Z
|
# Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR
# License: BSD 3-Clause. See LICENSE.md file in root directory.
from .waypoints import Waypoints
__all__ = ["Waypoints"]
| 30.571429
| 90
| 0.757009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.766355
|
9a7d3e4f21c385675dec5f7b1784429e468d978e
| 1,401
|
py
|
Python
|
759/Employee Free Time.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
759/Employee Free Time.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
759/Employee Free Time.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import heapq
# Definition for an Interval.
class Interval:
def __init__(self, start: int = None, end: int = None):
self.start = start
self.end = end
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
allinterval = []
heapq.heapify(allinterval)
ans = []
for i,e in enumerate(schedule):
heapq.heappush(allinterval,(e[0].start,e[0].end,i,0))
interval = Interval()
flag = False
freetime = []
while len(allinterval)>0:
cur = heapq.heappop(allinterval)
id = cur[2]
idx = cur[3]
if flag == False:
interval = Interval(cur[0],cur[1])
flag = True
else:
if cur[0]>interval.end:
freetime.append(Interval(interval.end,cur[0]))
interval.start = cur[0]
interval.end = cur[1]
else:
interval.end = max(interval.end,cur[1])
if len(schedule[id])-1>idx:
heapq.heappush(allinterval,(schedule[id][idx+1].start,schedule[id][idx+1].end,id,idx+1))
return freetime
i1 = Interval(1,2)
i2 = Interval(6,7)
i3 = Interval(2,4)
i4 = Interval(2,5)
i5 = Interval(9,12)
A = Solution()
print(A.employeeFreeTime([[i1,i2],[i3],[i4,i5]]))
| 31.840909
| 104
| 0.532477
| 1,169
| 0.834404
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.039258
|
9a7d9c6b811efb6d15e0d51600e0fd5bb7bf8479
| 41,312
|
py
|
Python
|
Comms1_internal/Final.py
|
CoderStellaJ/CG4002
|
474bda123856d8a88bef5ff787259fcd9ba9f09a
|
[
"MIT"
] | null | null | null |
Comms1_internal/Final.py
|
CoderStellaJ/CG4002
|
474bda123856d8a88bef5ff787259fcd9ba9f09a
|
[
"MIT"
] | 10
|
2020-01-28T14:17:26.000Z
|
2020-02-05T04:53:06.000Z
|
Comms1_internal/Final.py
|
CoderStellaJ/CG4002
|
474bda123856d8a88bef5ff787259fcd9ba9f09a
|
[
"MIT"
] | 5
|
2021-01-21T08:00:56.000Z
|
2021-09-28T05:06:36.000Z
|
from bluepy import btle
import concurrent
from concurrent import futures
import threading
import multiprocessing
import time
from time_sync import *
import eval_client
import dashBoardClient
from joblib import dump, load
import numpy # to count labels and store in dict
import operator # to get most predicted label
import json
import random # RNG in worst case
from sklearn.preprocessing import StandardScaler # to normalise data
class UUIDS:
SERIAL_COMMS = btle.UUID("0000dfb1-0000-1000-8000-00805f9b34fb")
class Delegate(btle.DefaultDelegate):
def __init__(self, params):
btle.DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
ultra96_receiving_timestamp = time.time() * 1000
for idx in range(len(beetle_addresses)):
if global_delegate_obj[idx] == self:
#print("receiving data from %s" % (beetle_addresses[idx]))
#print("data: " + data.decode('ISO-8859-1'))
if beetle_addresses[idx] == "50:F1:4A:CC:01:C4": # emg beetle data
emg_buffer[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
if '>' in data.decode('ISO-8859-1'):
print("sending emg dataset to dashboard")
packet_count_dict[beetle_addresses[idx]] += 1
try:
arr = emg_buffer[beetle_addresses[idx]].split(">")[
0]
final_arr = arr.split(",")
board_client.send_data_to_DB(
beetle_addresses[idx], str(final_arr))
emg_buffer[beetle_addresses[idx]] = ""
except Exception as e:
print(e)
board_client.send_data_to_DB(
beetle_addresses[idx], str(["1", "1", "1", "1"]))
emg_buffer[beetle_addresses[idx]] = ""
else:
if incoming_data_flag[beetle_addresses[idx]] is True:
if handshake_flag_dict[beetle_addresses[idx]] is True:
buffer_dict[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
if '>' not in data.decode('ISO-8859-1'):
pass
else:
if 'T' in buffer_dict[beetle_addresses[idx]]:
for char in buffer_dict[beetle_addresses[idx]]:
if char == 'T':
ultra96_receiving_timestamp = time.time() * 1000
continue
if char == '>': # end of packet
try:
timestamp_dict[beetle_addresses[idx]].append(
int(datastring_dict[beetle_addresses[idx]]))
except Exception:
timestamp_dict[beetle_addresses[idx]].append(
0)
timestamp_dict[beetle_addresses[idx]].append(
ultra96_receiving_timestamp)
handshake_flag_dict[beetle_addresses[idx]] = False
clocksync_flag_dict[beetle_addresses[idx]] = True
# clear serial input buffer to get ready for data packets
datastring_dict[beetle_addresses[idx]] = ""
buffer_dict[beetle_addresses[idx]] = ""
return
elif char != '>':
if char == '|': # signify start of next timestamp
try:
timestamp_dict[beetle_addresses[idx]].append(
int(datastring_dict[beetle_addresses[idx]]))
except Exception:
timestamp_dict[beetle_addresses[idx]].append(
0)
datastring_dict[beetle_addresses[idx]] = ""
else:
datastring_dict[beetle_addresses[idx]] += char
else:
pass
else:
if '>' in data.decode('ISO-8859-1'):
buffer_dict[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
#print("storing dance dataset")
packet_count_dict[beetle_addresses[idx]] += 1
else:
buffer_dict[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
# send data to dashboard once every 10 datasets
try:
if packet_count_dict[beetle_addresses[idx]] % 10 == 0 and '>' in data.decode('ISO-8859-1'):
print("sending data to dashboard")
first_string = buffer_dict[beetle_addresses[idx]].split("|")[
0]
final_arr = [first_string.split(",")[0], str(int(first_string.split(",")[1])/divide_get_float), str(int(first_string.split(",")[2])/divide_get_float),
str(int(first_string.split(",")[
3])/divide_get_float), str(int(first_string.split(",")[4])/divide_get_float),
str(int(first_string.split(",")[5])/divide_get_float), str(int(first_string.split(",")[6])/divide_get_float)]
board_client.send_data_to_DB(
beetle_addresses[idx], str(final_arr))
except Exception as e:
print(e)
"""
class EMGThread(object):
def __init__(self):
thread = threading.Thread(target=self.getEMGData, args=(beetle, ))
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def getEMGData(self, beetle):
while True:
try:
if beetle.waitForNotifications(2):
continue
except Exception as e:
reestablish_connection(beetle)
"""
def initHandshake(beetle):
retries = 0
if beetle.addr != "50:F1:4A:CC:01:C4":
ultra96_sending_timestamp = time.time() * 1000
incoming_data_flag[beetle.addr] = True
handshake_flag_dict[beetle.addr] = True
for characteristic in beetle.getCharacteristics():
if characteristic.uuid == UUIDS.SERIAL_COMMS:
ultra96_sending_timestamp = time.time() * 1000
timestamp_dict[beetle.addr].append(
ultra96_sending_timestamp)
print("Sending 'T' and 'H' and 'Z' packets to %s" %
(beetle.addr))
characteristic.write(
bytes('T', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('H', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
while True:
try:
if beetle.waitForNotifications(2):
if clocksync_flag_dict[beetle.addr] is True:
# function for time calibration
try:
clock_offset_tmp = calculate_clock_offset(timestamp_dict[beetle.addr])
tmp_value_list = []
if clock_offset_tmp is not None:
tmp_value_list.append(clock_offset_tmp)
clock_offset_dict[beetle.addr] = tmp_value_list
except Exception as e:
print(e)
timestamp_dict[beetle.addr].clear()
print("beetle %s clock offset: %i" %
(beetle.addr, clock_offset_dict[beetle.addr][-1]))
clocksync_flag_dict[beetle.addr] = False
incoming_data_flag[beetle.addr] = False
return
else:
continue
else:
while True:
if retries >= 5:
retries = 0
break
print(
"Failed to receive timestamp, sending 'Z', 'T', 'H', and 'R' packet to %s" % (beetle.addr))
characteristic.write(
bytes('R', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('T', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('H', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
retries += 1
except Exception as e:
reestablish_connection(beetle)
def establish_connection(address):
while True:
try:
for idx in range(len(beetle_addresses)):
# for initial connections or when any beetle is disconnected
if beetle_addresses[idx] == address:
if global_beetle[idx] != 0: # do not reconnect if already connected
return
else:
print("connecting with %s" % (address))
beetle = btle.Peripheral(address)
global_beetle[idx] = beetle
beetle_delegate = Delegate(address)
global_delegate_obj[idx] = beetle_delegate
beetle.withDelegate(beetle_delegate)
if address != "50:F1:4A:CC:01:C4":
initHandshake(beetle)
print("Connected to %s" % (address))
return
except Exception as e:
print(e)
for idx in range(len(beetle_addresses)):
# for initial connections or when any beetle is disconnected
if beetle_addresses[idx] == address:
if global_beetle[idx] != 0: # do not reconnect if already connected
return
time.sleep(3)
def reestablish_connection(beetle):
while True:
try:
print("reconnecting to %s" % (beetle.addr))
beetle.connect(beetle.addr)
print("re-connected to %s" % (beetle.addr))
return
except:
time.sleep(1)
continue
def getDanceData(beetle):
if beetle.addr != "50:F1:4A:CC:01:C4":
timeout_count = 0
retries = 0
incoming_data_flag[beetle.addr] = True
for characteristic in beetle.getCharacteristics():
if characteristic.uuid == UUIDS.SERIAL_COMMS:
while True:
if retries >= 10:
retries = 0
break
print(
"sending 'A' to beetle %s to collect dancing data", (beetle.addr))
characteristic.write(
bytes('A', 'UTF-8'), withResponse=False)
retries += 1
while True:
try:
if beetle.waitForNotifications(2):
#print("getting data...")
# print(packet_count_dict[beetle.addr])
# if number of datasets received from all beetles exceed expectation
if packet_count_dict[beetle.addr] >= num_datasets:
print("sufficient datasets received from %s. Processing data now" % (
beetle.addr))
# reset for next dance move
packet_count_dict[beetle.addr] = 0
incoming_data_flag[beetle.addr] = False
while True:
if retries >= 10:
break
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
retries += 1
return
continue
# beetle finish transmitting, but got packet losses
elif (packet_count_dict[beetle.addr] < num_datasets) and (packet_count_dict[beetle.addr] >= 1):
print(packet_count_dict[beetle.addr])
print("sufficient datasets received from %s with packet losses. Processing data now" % (
beetle.addr))
# reset for next dance move
packet_count_dict[beetle.addr] = 0
incoming_data_flag[beetle.addr] = False
while True:
if retries >= 10:
break
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
retries += 1
return
elif timeout_count >= 3:
incoming_data_flag[beetle.addr] = False
packet_count_dict[beetle.addr] = 0
timeout_count = 0
return
else: # beetle did not start transmitting despite ultra96 sending 'A' previously
timeout_count += 1
packet_count_dict[beetle.addr] = 0
retries = 0
while True:
if retries >= 10:
retries = 0
break
print(
"Failed to receive data, resending 'A' and 'B' packet to %s" % (beetle.addr))
characteristic.write(
bytes('A', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('B', 'UTF-8'), withResponse=False)
retries += 1
except Exception as e:
reestablish_connection(beetle)
def getEMGData(beetle):
retries = 0
for characteristic in beetle.getCharacteristics():
if characteristic.uuid == UUIDS.SERIAL_COMMS:
while True:
if retries >= 5:
retries = 0
break
print(
"sending 'E' to beetle %s to collect emg data", (beetle.addr))
characteristic.write(
bytes('E', 'UTF-8'), withResponse=False)
retries += 1
while True:
try:
if beetle.waitForNotifications(2):
if packet_count_dict[beetle.addr] >= 1:
packet_count_dict[beetle.addr] = 0
retries = 0
while True:
if retries >= 8:
break
characteristic.write(
bytes('X', 'UTF-8'), withResponse=False)
retries += 1
return
continue
else:
print("failed to collect emg data, resending 'E'")
characteristic.write(
bytes('E', 'UTF-8'), withResponse=False)
except Exception as e:
reestablish_connection(beetle)
def processData(address):
if address != "50:F1:4A:CC:01:C4":
data_dict = {address: {}}
def deserialize(buffer_dict, result_dict, address):
for char in buffer_dict[address]:
# start of new dataset
if char == 'D' or end_flag[address] is True:
# 2nd part of dataset lost or '>' lost in transmission
if start_flag[address] is True:
try:
# if only '>' lost in transmission, can keep dataset. Else delete
if checksum_dict[address] != int(datastring_dict[address]):
del result_dict[address][dataset_count_dict[address]]
except Exception: # 2nd part of dataset lost
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
# reset datastring to prepare for next dataset
datastring_dict[address] = ""
# reset checksum to prepare for next dataset
checksum_dict[address] = 0
comma_count_dict[address] = 0
dataset_count_dict[address] += 1
timestamp_flag_dict[address] = True
checksum_dict[address] ^= ord(char)
start_flag[address] = True
end_flag[address] = False
if char != 'D' and char != ',' and char != '|' and char != '>' and (char == '-' or char == '.' or float_flag_dict[address] is True or timestamp_flag_dict[address] is True):
datastring_dict[address] += char
checksum_dict[address] ^= ord(char)
elif char == ' ':
datastring_dict[address] += char
checksum_dict[address] ^= ord(char)
elif char == ',': # next value
comma_count_dict[address] += 1
checksum_dict[address] ^= ord(char)
# already past timestamp value
if comma_count_dict[address] == 1:
timestamp_flag_dict[address] = False
try:
result_dict[address].setdefault(
dataset_count_dict[address], []).append(int(datastring_dict[address]))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
float_flag_dict[address] = True
elif comma_count_dict[address] < 5: # yaw, pitch, roll floats
try:
result_dict[address][dataset_count_dict[address]].append(
float("{0:.2f}".format((int(datastring_dict[address]) / divide_get_float))))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
else: # accelerometer integers
try:
result_dict[address][dataset_count_dict[address]].append(
int(int(datastring_dict[address]) / divide_get_float))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
datastring_dict[address] = ""
elif char == '>': # end of current dataset
# print("ultra96 checksum: %i" % (checksum_dict[address]))
# print("beetle checksum: %i" % (int(datastring_dict[address])))
# received dataset is invalid; drop the dataset from data dictionary
try:
if checksum_dict[address] != int(datastring_dict[address]):
del result_dict[address][dataset_count_dict[address]]
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
# reset datastring to prepare for next dataset
datastring_dict[address] = ""
# reset checksum to prepare for next dataset
checksum_dict[address] = 0
comma_count_dict[address] = 0
start_flag[address] = False
end_flag[address] = True
# missing data in previous dataset
try:
if len(result_dict[address][list(result_dict[address].keys())[-1]]) < 7:
del result_dict[address][list(
result_dict[address].keys())[-1]]
except Exception as e:
print(e)
print("error in processData in line 379")
elif char == '|' or (float_flag_dict[address] is False and timestamp_flag_dict[address] is False):
if float_flag_dict[address] is True:
try:
result_dict[address][dataset_count_dict[address]].append(
int(int(datastring_dict[address]) / divide_get_float))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
# clear datastring to prepare take in checksum from beetle
datastring_dict[address] = ""
float_flag_dict[address] = False
elif char != '|' and char != '>':
datastring_dict[address] += char
try:
if len(result_dict[address][list(result_dict[address].keys())[-1]]) < 7:
del result_dict[address][list(
result_dict[address].keys())[-1]]
except Exception as e:
print(e)
print("error in processData in line 478")
for character in "\r\n":
buffer_dict[address] = buffer_dict[address].replace(character, "")
deserialize(buffer_dict, data_dict, address)
dataset_count_dict[address] = 0
return data_dict
def parse_data(dic_data, beetle):
# collect hand data
data = []
for v in dic_data[beetle].values():
ypr = [] # yaw, pitch, roll
for i in range(1, 7):
ypr.append(v[i])
data.append(ypr)
return (data)
def predict_beetle(beetle_data, model):
pred_arr = model.predict(beetle_data)
unique, counts = numpy.unique(pred_arr, return_counts=True)
pred_count = dict(zip(unique, counts))
prediction = max(pred_count.items(), key=operator.itemgetter(1))[0]
return prediction
# Program to find most frequent element in a list
def most_frequent_prediction(pred_list):
return max(set(pred_list), key=pred_list.count)
def find_new_position(ground_truth, b1_move, b2_move, b3_move):
# ground_truth = [3, 2, 1]
# p1_movement = 'R'
# p2_movement = 'S'
# p3_movement = 'L'
dic = {1: b1_move, 2: b2_move, 3: b3_move}
p1_movement = dic[ground_truth[0]]
p2_movement = dic[ground_truth[1]]
p3_movement = dic[ground_truth[2]]
if p1_movement == "R" and p2_movement == "S" and p3_movement == "L":
# output = [3, 2, 1]
output = [ground_truth[2], ground_truth[1], ground_truth[0]]
elif p1_movement == "R" and p2_movement == "L" and p3_movement == "S":
# output = [2, 1, 3]
output = [ground_truth[1], ground_truth[0], ground_truth[2]]
elif p1_movement == "R" and p2_movement == "L" and p3_movement == "L":
# output = [2, 3, 1]
output = [ground_truth[1], ground_truth[2], ground_truth[0]]
elif p1_movement == "S" and p2_movement == "R" and p3_movement == "L":
# output = [1, 3, 2]
output = [ground_truth[0], ground_truth[2], ground_truth[1]]
elif p1_movement == "S" and p2_movement == "L" and p3_movement == "S":
# output = [2, 1, 3]
output = [ground_truth[1], ground_truth[0], ground_truth[2]]
else:
# output = [1, 2, 3]
output = ground_truth
position = str(output[0]) + " " + str(output[1]) + " " + str(output[2])
return position
def eval_1beetle(beetle_dict_1, beetle_1):
# Get beetle data from dictionaries
beetle1_data = parse_data(beetle_dict_1, beetle_1)
# Predict dance move of each beetle
#beetle1_dance = predict_beetle(beetle1_data, mlp_dance)
pred_arr = mlp_dance.predict(beetle1_data)
unique, counts = numpy.unique(pred_arr, return_counts=True)
pred_count = dict(zip(unique, counts))
beetle1_dance = max(pred_count.items(), key=operator.itemgetter(1))[0]
return beetle1_dance
def normalise_data(data):
try:
scaler = StandardScaler()
scaler.fit(data)
data = scaler.transform(data)
return data
except Exception as e:
return data
if __name__ == '__main__':
# 50:F1:4A:CB:FE:EE: position 1, 1C:BA:8C:1D:30:22: position 2, 78:DB:2F:BF:2C:E2: position 3
start_time = time.time()
# global variables
"""
beetle_addresses = ["50:F1:4A:CC:01:C4", "50:F1:4A:CB:FE:EE", "78:DB:2F:BF:2C:E2",
"1C:BA:8C:1D:30:22"]
"""
beetle_addresses = ["50:F1:4A:CC:01:C4", "50:F1:4A:CB:FE:EE", "78:DB:2F:BF:2C:E2",
"1C:BA:8C:1D:30:22"]
divide_get_float = 100.0
global_delegate_obj = []
global_beetle = []
handshake_flag_dict = {"50:F1:4A:CB:FE:EE": True,
"78:DB:2F:BF:2C:E2": True, "1C:BA:8C:1D:30:22": True}
emg_buffer = {"50:F1:4A:CC:01:C4": ""}
buffer_dict = {"50:F1:4A:CB:FE:EE": "",
"78:DB:2F:BF:2C:E2": "", "1C:BA:8C:1D:30:22": ""}
incoming_data_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
ground_truth = [1, 2, 3]
ACTIONS = ['muscle', 'weightlifting', 'shoutout']
POSITIONS = ['1 2 3', '3 2 1', '2 3 1', '3 1 2', '1 3 2', '2 1 3']
beetle1 = "50:F1:4A:CB:FE:EE"
beetle2 = "78:DB:2F:BF:2C:E2"
beetle3 = "1C:BA:8C:1D:30:22"
dance = "shoutout"
new_pos = "1 2 3"
# data global variables
num_datasets = 200
beetle1_data_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_data_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_data_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_moving_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_moving_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_moving_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_dancing_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_dancing_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_dancing_dict = {"1C:BA:8C:1D:30:22": {}}
datastring_dict = {"50:F1:4A:CB:FE:EE": "",
"78:DB:2F:BF:2C:E2": "", "1C:BA:8C:1D:30:22": ""}
packet_count_dict = {"50:F1:4A:CC:01:C4": 0, "50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
dataset_count_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
float_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
timestamp_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
comma_count_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
checksum_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
start_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
end_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
# clock synchronization global variables
dance_count = 0
clocksync_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
timestamp_dict = {"50:F1:4A:CB:FE:EE": [],
"78:DB:2F:BF:2C:E2": [], "1C:BA:8C:1D:30:22": []}
clock_offset_dict = {"50:F1:4A:CB:FE:EE": [],
"78:DB:2F:BF:2C:E2": [], "1C:BA:8C:1D:30:22": []}
[global_delegate_obj.append(0) for idx in range(len(beetle_addresses))]
[global_beetle.append(0) for idx in range(len(beetle_addresses))]
try:
eval_client = eval_client.Client("192.168.43.6", 8080, 6, "cg40024002group6")
except Exception as e:
print(e)
try:
board_client = dashBoardClient.Client("192.168.43.248", 8080, 6, "cg40024002group6")
except Exception as e:
print(e)
establish_connection("50:F1:4A:CC:01:C4")
time.sleep(2)
establish_connection("78:DB:2F:BF:2C:E2")
time.sleep(3)
# Load MLP NN model
mlp_dance = load('mlp_dance_LATEST.joblib')
establish_connection("50:F1:4A:CB:FE:EE")
time.sleep(3)
# Load Movement ML
mlp_move = load('mlp_movement_LATEST.joblib')
establish_connection("1C:BA:8C:1D:30:22")
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
for beetle in global_beetle:
if beetle.addr == "50:F1:4A:CC:01:C4":
data_executor.submit(getEMGData, beetle)
data_executor.shutdown(wait=True)
# start collecting data only after 1 min passed
while True:
elapsed_time = time.time() - start_time
if int(elapsed_time) >= 60:
break
else:
print(elapsed_time)
time.sleep(1)
"""
for beetle in global_beetle:
print(beetle.addr)
emg_thread = EMGThread(global_beetle[3])
"""
while True:
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
{data_executor.submit(getDanceData, beetle): beetle for beetle in global_beetle}
data_executor.shutdown(wait=True)
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
data_executor.submit(getEMGData, global_beetle[0])
data_executor.shutdown(wait=True)
"""
# do calibration once every 4 moves; change 4 to other values according to time calibration needs
if dance_count == 1:
print("Proceed to do time calibration...")
# clear clock_offset_dict for next time calibration
for beetle in global_beetle:
if beetle.addr != "50:F1:4A:CC:01:C4":
initHandshake(beetle)
if dance_count == 1:
dance_count = 0
dance_count += 1
pool = multiprocessing.Pool()
workers = [pool.apply_async(processData, args=(address, ))
for address in beetle_addresses]
result = [worker.get() for worker in workers]
pool.close()
try:
# change to 1 if using emg beetle, 0 if not using
for idx in range(1, len(result)):
for address in result[idx].keys():
if address == "50:F1:4A:CB:FE:EE":
beetle1_data_dict[address] = result[idx][address]
elif address == "78:DB:2F:BF:2C:E2":
beetle2_data_dict[address] = result[idx][address]
elif address == "1C:BA:8C:1D:30:22":
beetle3_data_dict[address] = result[idx][address]
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle1_data_dict["50:F1:4A:CB:FE:EE"].items():
if dataset_list[0] == 0: # moving data
beetle1_moving_dict["50:F1:4A:CB:FE:EE"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle1_dancing_dict["50:F1:4A:CB:FE:EE"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle2_data_dict["78:DB:2F:BF:2C:E2"].items():
if dataset_list[0] == 0: # moving data
beetle2_moving_dict["78:DB:2F:BF:2C:E2"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle2_dancing_dict["78:DB:2F:BF:2C:E2"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle3_data_dict["1C:BA:8C:1D:30:22"].items():
if dataset_list[0] == 0: # moving data
beetle3_moving_dict["1C:BA:8C:1D:30:22"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle3_dancing_dict["1C:BA:8C:1D:30:22"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
# clear buffer for next move
for address in buffer_dict.keys():
buffer_dict[address] = ""
# print(beetle1_data_dict)
# print(beetle2_data_dict)
# print(beetle3_data_dict)
with open(r'position.txt', 'a') as file:
file.write(json.dumps(beetle1_moving_dict) + "\n")
file.write(json.dumps(beetle2_moving_dict) + "\n")
file.write(json.dumps(beetle3_moving_dict) + "\n")
file.close()
with open(r'dance.txt', 'a') as file:
file.write(json.dumps(beetle1_dancing_dict) + "\n")
file.write(json.dumps(beetle2_dancing_dict) + "\n")
file.write(json.dumps(beetle3_dancing_dict) + "\n")
file.close()
# synchronization delay
try:
beetle1_time_ultra96 = calculate_ultra96_time(
beetle1_dancing_dict, clock_offset_dict["50:F1:4A:CB:FE:EE"][0])
beetle2_time_ultra96 = calculate_ultra96_time(
beetle2_dancing_dict, clock_offset_dict["78:DB:2F:BF:2C:E2"][0])
beetle3_time_ultra96 = calculate_ultra96_time(
beetle3_dancing_dict, clock_offset_dict["1C:BA:8C:1D:30:22"][0])
sync_delay = calculate_sync_delay(beetle1_time_ultra96, beetle2_time_ultra96, beetle3_time_ultra96)
except Exception as e:
print(e)
print("use default sync delay")
sync_delay = 950
# print("Beetle 1 ultra 96 time: ", beetle1_time_ultra96)
# print("Beetle 2 ultra 96 time: ", beetle2_time_ultra96)
# print("Beetle 3 ultra 96 time: ", beetle3_time_ultra96)
print("Synchronization delay is: ", sync_delay)
# machine learning
# ml_result = get_prediction(beetle1_data_dict)
"""
"""
beetle1_moving_dict = parse_data(beetle1_moving_dict, beetle1)
beetle2_moving_dict = parse_data(beetle2_moving_dict, beetle2)
beetle3_moving_dict = parse_data(beetle3_moving_dict, beetle3)
beetle1_moving_dict = normalise_data(beetle1_moving_dict)
beetle2_moving_dict = normalise_data(beetle2_moving_dict)
beetle3_moving_dict = normalise_data(beetle3_moving_dict)
# Predict movement direction of each beetle
try:
beetle1_move = predict_beetle(beetle1_moving_dict, mlp_move)
except Exception as e:
beetle1_move = 'S'
try:
beetle2_move = predict_beetle(beetle2_moving_dict, mlp_move)
except Exception as e:
beetle2_move = 'S'
try:
beetle3_move = predict_beetle(beetle3_moving_dict, mlp_move)
except Exception as e:
beetle3_move = 'S'
# Find new position
new_pos = find_new_position(
ground_truth, beetle1_move, beetle2_move, beetle3_move)
# PREDICT DANCE
if beetle1_dancing_dict[beetle1] and beetle2_dancing_dict[beetle2] and beetle3_dancing_dict[beetle3]:
# Get DANCE data from dictionaries in arguments
beetle1_dance_data = parse_data(beetle1_dancing_dict, beetle1)
beetle2_dance_data = parse_data(beetle2_dancing_dict, beetle2)
beetle3_dance_data = parse_data(beetle3_dancing_dict, beetle3)
# print(beetle1_data)
# Normalise DANCE data
beetle1_dance_data_norm = normalise_data(beetle1_dance_data)
beetle2_dance_data_norm = normalise_data(beetle2_dance_data)
beetle3_dance_data_norm = normalise_data(beetle3_dance_data)
# print(beetle1_data_norm)
# Predict DANCE of each beetle
beetle1_dance = predict_beetle(beetle1_dance_data_norm, mlp_dance)
beetle2_dance = predict_beetle(beetle2_dance_data_norm, mlp_dance)
beetle3_dance = predict_beetle(beetle3_dance_data_norm, mlp_dance)
# print(beetle1_dance)
dance_predictions = [beetle1_dance, beetle2_dance, beetle3_dance]
dance = most_frequent_prediction(dance_predictions)
elif beetle2_dancing_dict[beetle2] and beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle2_dancing_dict, beetle2)
elif beetle1_dancing_dict[beetle1] and beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle1_dancing_dict[beetle1] and beetle2_dancing_dict[beetle2]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle1_dancing_dict[beetle1]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle2_dancing_dict[beetle2]:
dance = eval_1beetle(beetle2_dancing_dict, beetle2)
elif beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle3_dancing_dict, beetle3)
else:
# RNG
dance = random.choice(ACTIONS)
print(dance)
print(new_pos)
# send data to eval and dashboard server
eval_client.send_data(new_pos, dance, str(sync_delay))
ground_truth = eval_client.receive_dancer_position().split(' ')
ground_truth = [int(ground_truth[0]), int(
ground_truth[1]), int(ground_truth[2])]
final_string = dance + " " + new_pos
board_client.send_data_to_DB("MLDancer1", final_string)
beetle1_moving_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_moving_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_moving_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_dancing_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_dancing_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_dancing_dict = {"1C:BA:8C:1D:30:22": {}}
| 47.052392
| 188
| 0.495788
| 6,449
| 0.156105
| 0
| 0
| 0
| 0
| 0
| 0
| 7,671
| 0.185685
|
9a7dca2e7b004aae5d55d6951056ac9880930921
| 3,100
|
py
|
Python
|
tests/test_relations.py
|
OneRaynyDay/treeno
|
ce11b8447f471c0b5ea596a211b3855625ec43eb
|
[
"MIT"
] | 1
|
2021-12-28T19:00:01.000Z
|
2021-12-28T19:00:01.000Z
|
tests/test_relations.py
|
OneRaynyDay/treeno
|
ce11b8447f471c0b5ea596a211b3855625ec43eb
|
[
"MIT"
] | null | null | null |
tests/test_relations.py
|
OneRaynyDay/treeno
|
ce11b8447f471c0b5ea596a211b3855625ec43eb
|
[
"MIT"
] | null | null | null |
import unittest
from treeno.base import PrintMode, PrintOptions
from treeno.expression import Array, Field, wrap_literal
from treeno.orderby import OrderTerm, OrderType
from treeno.relation import (
AliasedRelation,
Lateral,
SampleType,
Table,
TableQuery,
TableSample,
Unnest,
ValuesQuery,
)
class TestRelations(unittest.TestCase):
def test_table(self):
t = Table(name="table", schema="schema", catalog="catalog")
assert t.sql(PrintOptions()) == '"catalog"."schema"."table"'
tq = TableQuery(t)
assert (
tq.sql(PrintOptions(mode=PrintMode.DEFAULT))
== tq.sql(PrintOptions(mode=PrintMode.PRETTY))
== 'TABLE "catalog"."schema"."table"'
)
# Test a richer query type
tq = TableQuery(
t,
offset=2,
limit=5,
orderby=[OrderTerm(value=Field("x"), order_type=OrderType.DESC)],
)
assert (
tq.sql(PrintOptions(mode=PrintMode.DEFAULT))
== 'TABLE "catalog"."schema"."table" ORDER BY "x" DESC OFFSET 2 LIMIT 5'
)
assert tq.sql(PrintOptions(mode=PrintMode.PRETTY)) == (
' TABLE "catalog"."schema"."table"\n'
' ORDER BY "x" DESC\n'
"OFFSET 2\n"
" LIMIT 5"
)
def test_values(self):
v = ValuesQuery([wrap_literal(1), wrap_literal(2), wrap_literal(3)])
assert (
v.sql(PrintOptions(mode=PrintMode.DEFAULT))
== v.sql(PrintOptions(mode=PrintMode.PRETTY))
== "VALUES 1,2,3"
)
v = ValuesQuery(
[wrap_literal(1), wrap_literal(2), wrap_literal(3)],
offset=3,
with_=[AliasedRelation(TableQuery(Table(name="foo")), "foo")],
)
assert (
v.sql(PrintOptions(mode=PrintMode.DEFAULT))
== 'WITH "foo" AS (TABLE "foo") VALUES 1,2,3 OFFSET 3'
)
assert v.sql(PrintOptions(mode=PrintMode.PRETTY)) == (
' WITH "foo" AS (\n TABLE "foo")\n'
"VALUES 1,2,3\n"
"OFFSET 3"
)
def test_tablesample(self):
table_sample = TableSample(
Table(name="table"), SampleType.BERNOULLI, wrap_literal(0.3)
)
assert (
table_sample.sql(PrintOptions(mode=PrintMode.DEFAULT))
== table_sample.sql(PrintOptions(mode=PrintMode.PRETTY))
== '"table" TABLESAMPLE BERNOULLI(0.3)'
)
def test_lateral(self):
lateral = Lateral(TableQuery(Table(name="table")))
assert (
lateral.sql(PrintOptions(mode=PrintMode.DEFAULT))
== lateral.sql(PrintOptions(mode=PrintMode.PRETTY))
== 'LATERAL(TABLE "table")'
)
def test_unnest(self):
unnest = Unnest([Array([wrap_literal(1)])])
assert (
unnest.sql(PrintOptions(mode=PrintMode.DEFAULT))
== unnest.sql(PrintOptions(mode=PrintMode.PRETTY))
== "UNNEST(ARRAY[1])"
)
if __name__ == "__main__":
unittest.main()
| 31
| 84
| 0.560968
| 2,723
| 0.878387
| 0
| 0
| 0
| 0
| 0
| 0
| 510
| 0.164516
|
9a7dd31031b6e51089b5322681d7a6bf9e613fcf
| 4,257
|
py
|
Python
|
tests/preprocess/test_har.py
|
henry1jin/alohamora
|
e51e2488ecdf3e9692d5bb6b25ebc88622087c20
|
[
"MIT"
] | 5
|
2020-12-16T03:13:59.000Z
|
2022-03-06T07:16:39.000Z
|
tests/preprocess/test_har.py
|
henry1jin/alohamora
|
e51e2488ecdf3e9692d5bb6b25ebc88622087c20
|
[
"MIT"
] | 9
|
2020-09-25T23:25:59.000Z
|
2022-03-11T23:45:14.000Z
|
tests/preprocess/test_har.py
|
henry1jin/alohamora
|
e51e2488ecdf3e9692d5bb6b25ebc88622087c20
|
[
"MIT"
] | 3
|
2019-10-16T21:22:07.000Z
|
2020-07-21T13:38:22.000Z
|
import random
from blaze.chrome.har import har_from_json, Har, HarLog, HarEntry, Request, Response
from blaze.config.environment import ResourceType
from blaze.preprocess.har import get_har_entry_type, har_entries_to_resources
from blaze.util.seq import ordered_uniq
from tests.mocks.config import get_config
from tests.mocks.har import get_har_json
class TestGetHarEntryType:
def test_get_har_entry_type(self):
test_cases = [
("application/javascript", ResourceType.SCRIPT),
("application/json", ResourceType.OTHER),
("audio/aac", ResourceType.OTHER),
("image/jpeg", ResourceType.IMAGE),
("image/gif", ResourceType.IMAGE),
("text/html", ResourceType.HTML),
("text/css", ResourceType.CSS),
("text/xml", ResourceType.OTHER),
("font/woff2", ResourceType.FONT),
("font/oft", ResourceType.FONT),
]
for (mime_type, resource_type) in test_cases:
har_entry = HarEntry(
started_date_time="",
request=Request(url="", method=""),
response=Response(status=200, body_size=0, headers_size=0, mime_type=mime_type),
critical=False,
)
assert get_har_entry_type(har_entry) == resource_type
class TestHarEntriesToResources:
def setup(self):
self.config = get_config()
self.har = har_from_json(get_har_json())
def test_har_entries_to_resources(self):
resources = har_entries_to_resources(self.har)
assert resources
sorted_har_entries = sorted(self.har.log.entries, key=lambda e: e.started_date_time)
sorted_har_entries = ordered_uniq(sorted_har_entries, key=lambda e: e.request.url)
sorted_har_entries = [entry for entry in sorted_har_entries if entry.request.url.startswith("http")]
sorted_har_entries = [entry for entry in sorted_har_entries if entry.response.status != 0]
for har_entry, resource in zip(sorted_har_entries, resources):
assert har_entry.request.url == resource.url
assert resource.execution_ms == self.har.timings[resource.url].execution_ms
assert resource.fetch_delay_ms == self.har.timings[resource.url].fetch_delay_ms
assert resource.time_to_first_byte_ms == self.har.timings[resource.url].time_to_first_byte_ms
if self.har.timings[resource.url].initiator == "":
assert resource.initiator == 0
else:
assert resources[resource.initiator].url == self.har.timings[resource.url].initiator
def test_har_entries_to_resources_ignores_non_http_and_non_complete(self):
entries = []
entry_urls = set()
invalid_entries = set()
for entry in self.har.log.entries:
if random.random() < 0.1:
new_url = entry.request.url
new_status = entry.response.status
if random.random() < 0.5:
new_url = "data:image/png;base64,asdflhqp49tqo3hifehqp" + str(random.random())[2:] + "=="
else:
new_status = 0
new_entry = HarEntry(
started_date_time=entry.started_date_time,
request=Request(url=new_url, method=entry.request.url),
response=Response(
status=new_status,
body_size=entry.response.body_size,
headers_size=entry.response.headers_size,
mime_type=entry.response.mime_type,
),
critical=False,
)
entries.append(new_entry)
invalid_entries.add(new_entry.request.url)
else:
entries.append(entry)
entry_urls.add(entry.request.url)
invalid_entries = invalid_entries - set(entry_urls)
resources = har_entries_to_resources(Har(log=HarLog(entries=entries), timings={}))
assert len(resources) < len(entries) - len(invalid_entries)
assert not any(res.url in invalid_entries for res in resources)
assert all(res.url in entry_urls for res in resources)
| 45.287234
| 109
| 0.624383
| 3,899
| 0.915903
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.045102
|
9a7f8708794c267295590be4b52b94df73d85efd
| 1,245
|
py
|
Python
|
Loader.py
|
JaredDobry/Budgeting-Fool
|
0f4ab5dea3b0750b7bf018de1d456a5587dbeb17
|
[
"MIT"
] | null | null | null |
Loader.py
|
JaredDobry/Budgeting-Fool
|
0f4ab5dea3b0750b7bf018de1d456a5587dbeb17
|
[
"MIT"
] | null | null | null |
Loader.py
|
JaredDobry/Budgeting-Fool
|
0f4ab5dea3b0750b7bf018de1d456a5587dbeb17
|
[
"MIT"
] | null | null | null |
from Budget import Budget, Item
FILENAME = 'Budget.txt'
def scrape_off_char(s, c):
out = ''
for ch in s:
if ch != c:
out += ch
return out
def load_budget():
try:
fr = open(FILENAME, 'r')
lines = fr.readlines()
fr.close()
b = Budget()
for line in lines:
spl = line.split(',')
if len(spl) != 4:
raise ValueError
item = Item()
item.name = scrape_off_char(spl[0], '\n')
item.amount = float(scrape_off_char(spl[1], '\n'))
item.frequency = float(scrape_off_char(spl[2], '\n'))
item.frequency_period = scrape_off_char(spl[3], '\n')
b.add_item(item)
return b
except Exception:
return Budget()
def save_budget(budget: Budget):
try:
fw = open(FILENAME, 'w')
first = True
for item in budget.items:
if not first:
fw.write('\n')
else:
first = False
fw.write(item.name + ',' + str(item.amount) + ',' + str(item.frequency) + ',' + item.frequency_period)
fw.close()
except Exception:
return
| 28.295455
| 115
| 0.480321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.041767
|
9a801f3178565c7f1b1008bb487a050d3079d8d5
| 448
|
py
|
Python
|
rush_hour/test_solution.py
|
ssebastianj/taip-2014
|
2a0e62c4bf755ff752136350c246456d65a8c3eb
|
[
"MIT"
] | null | null | null |
rush_hour/test_solution.py
|
ssebastianj/taip-2014
|
2a0e62c4bf755ff752136350c246456d65a8c3eb
|
[
"MIT"
] | null | null | null |
rush_hour/test_solution.py
|
ssebastianj/taip-2014
|
2a0e62c4bf755ff752136350c246456d65a8c3eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import pytest
from .solution import calc_minimum_travels
class CalcMininumTravelsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_calc_minimum_travels(self):
assert calc_minimum_travels([4, 5, 2, 3, 1]) == 3
assert calc_minimum_travels([1, 2, 3]) == 1
assert calc_minimum_travels([9, 4, 2, 7, 8, 3, 5, 6, 1]) == 4
| 23.578947
| 69
| 0.638393
| 346
| 0.772321
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.051339
|
9a822f57f1242bdc5f799bb28767d02eb1a10fd9
| 2,573
|
py
|
Python
|
wordle.py
|
ccattuto/wordle-device
|
65cbd95165cf6c8e7fae508358d58f7e013f5bc8
|
[
"CC0-1.0"
] | 7
|
2022-02-01T17:20:29.000Z
|
2022-02-15T08:09:19.000Z
|
wordle.py
|
ccattuto/wordle-device
|
65cbd95165cf6c8e7fae508358d58f7e013f5bc8
|
[
"CC0-1.0"
] | 1
|
2022-02-13T15:46:36.000Z
|
2022-02-13T15:46:36.000Z
|
wordle.py
|
ccattuto/wordle-device
|
65cbd95165cf6c8e7fae508358d58f7e013f5bc8
|
[
"CC0-1.0"
] | 1
|
2022-02-03T17:33:13.000Z
|
2022-02-03T17:33:13.000Z
|
#!/usr/bin/env python
import sys
from serial.tools import list_ports
import serial
import tweepy
# locate ESP32-C3 USB device
port = None
for device in list_ports.comports():
if device.vid == 0x303a and device.pid == 0x1001:
break
if not device:
sys.exit(-1)
ser = serial.Serial(device.device, baudrate=115200)
# Twitter streaming API
# CHANGE ME - your consumer key/secret below:
CONSUMER_KEY = 'XXX'
CONSUMER_SECRET = 'XXX'
# CHANGE ME - your access token/secret below:
ACCESS_TOKEN = 'XXX'
ACCESS_TOKEN_SECRET = 'XXX'
# LED matrix control (implemented in wordle.ino):
# - 5x5 matrix is viewed as a LED strip
# - sending 'Z' clears matrix and position cursor on first pixel (0)
# - sending 'G' / 'Y' / 'B' writes a green / yellow / "dark gray" pixel and advances cursor
# clear LED matrix
ser.write('Z'.encode())
# maps characters in tweet to 1-char commands over serial
symbol_map = {
'🟩': 'G',
'🟨': 'Y',
'⬛': 'B',
'⬜': 'B'
}
# write Wordle rows to LED matrix
def display_wordle(rows):
cmd = "Z"
for row in rows:
cmd += "".join([symbol_map[s] for s in row])
ser.write(cmd.encode())
# check whether a row of text is a worlde row
def is_worlde_row(s):
if len(s) != 5:
return False
for c in s:
if not c in symbol_map:
return False
return True;
# looks for 1 to 5 consecutive "worlde rows" in tweet
# and pack them into a list. Returns [] otherwise.
def extract_wordle(text):
wordle = []
in_wordle = False
for row in text.split("\n"):
if (in_wordle):
if not is_worlde_row(row):
break
wordle.append(row)
else:
if is_worlde_row(row):
in_wordle = True
wordle.append(row)
# sorry, we don't have space for solutions with 6 rows
if (len(wordle) == 0 or len(wordle) > 5):
return []
# we require the last line to be the solution
if (wordle[-1] != u'🟩🟩🟩🟩🟩'):
return []
return wordle
# process tweet
def process_tweet(text):
wordle = extract_wordle(text)
if len(wordle) == 0:
return
# if we've found a wordle, print it and display it on the LED matrix
print (text)
display_wordle(wordle)
# subclass tweepy
class WordleStream(tweepy.Stream):
def on_status(self, status):
process_tweet(status.text)
wordle_stream = WordleStream(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# filter tweets containing the keyword 'worlde'
wordle_stream.filter(track=["wordle"])
| 24.504762
| 94
| 0.638943
| 102
| 0.039261
| 0
| 0
| 0
| 0
| 0
| 0
| 1,008
| 0.387991
|
9a8309c0c453e677a70c4041fea94265ebf3f4e3
| 4,664
|
py
|
Python
|
DeepAlignmentNetwork/menpofit/lk/result.py
|
chiawei-liu/DeepAlignmentNetwork
|
52621cd2f697abe372b88c9ea0ee08f0d93b43d8
|
[
"MIT"
] | 220
|
2019-09-01T01:52:04.000Z
|
2022-03-28T12:52:07.000Z
|
DeepAlignmentNetwork/menpofit/lk/result.py
|
chiawei-liu/DeepAlignmentNetwork
|
52621cd2f697abe372b88c9ea0ee08f0d93b43d8
|
[
"MIT"
] | 80
|
2015-01-05T16:17:39.000Z
|
2020-11-22T13:42:00.000Z
|
DeepAlignmentNetwork/menpofit/lk/result.py
|
chiawei-liu/DeepAlignmentNetwork
|
52621cd2f697abe372b88c9ea0ee08f0d93b43d8
|
[
"MIT"
] | 64
|
2015-02-02T15:11:38.000Z
|
2022-02-28T06:19:31.000Z
|
from menpofit.result import (ParametricIterativeResult,
MultiScaleParametricIterativeResult)
class LucasKanadeAlgorithmResult(ParametricIterativeResult):
r"""
Class for storing the iterative result of a Lucas-Kanade Image Alignment
optimization algorithm.
Parameters
----------
shapes : `list` of `menpo.shape.PointCloud`
The `list` of shapes per iteration. The first and last members
correspond to the initial and final shapes, respectively.
homogeneous_parameters : `list` of ``(n_parameters,)`` `ndarray`
The `list` of parameters of the homogeneous transform per iteration.
The first and last members correspond to the initial and final
shapes, respectively.
initial_shape : `menpo.shape.PointCloud` or ``None``, optional
The initial shape from which the fitting process started. If
``None``, then no initial shape is assigned.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
costs : `list` of `float` or ``None``, optional
The `list` of cost per iteration. If ``None``, then it is assumed that
the cost function cannot be computed for the specific algorithm.
"""
def __init__(self, shapes, homogeneous_parameters, initial_shape=None,
image=None, gt_shape=None, costs=None):
super(LucasKanadeAlgorithmResult, self).__init__(
shapes=shapes, shape_parameters=homogeneous_parameters,
initial_shape=initial_shape, image=image, gt_shape=gt_shape,
costs=costs)
self._homogeneous_parameters = homogeneous_parameters
@property
def homogeneous_parameters(self):
r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._shape_parameters
class LucasKanadeResult(MultiScaleParametricIterativeResult):
r"""
Class for storing the multi-scale iterative fitting result of an ATM. It
holds the shapes, shape parameters and costs per iteration.
Parameters
----------
results : `list` of :map:`ATMAlgorithmResult`
The `list` of optimization results per scale.
scales : `list` or `tuple`
The `list` of scale values per scale (low to high).
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that transform the shapes into
the original image space.
scale_transforms : `list` of `menpo.shape.Scale`
The list of scaling transforms per scale.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
"""
def __init__(self, results, scales, affine_transforms, scale_transforms,
image=None, gt_shape=None):
super(LucasKanadeResult, self).__init__(
results=results, scales=scales, affine_transforms=affine_transforms,
scale_transforms=scale_transforms, image=image, gt_shape=gt_shape)
# Create parameters list
self._homogeneous_parameters = []
for r in results:
self._homogeneous_parameters += r.homogeneous_parameters
# Correct n_iters
self._n_iters -= len(scales)
@property
def homogeneous_parameters(self):
r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._homogeneous_parameters
@property
def shape_parameters(self):
# Use homogeneous_parameters instead.
raise AttributeError
| 44
| 80
| 0.670455
| 4,536
| 0.972556
| 0
| 0
| 920
| 0.197256
| 0
| 0
| 3,173
| 0.680317
|
9a831450ccec04bdfc6f981e2f3e5d2ad9771f21
| 6,533
|
py
|
Python
|
source/model.py
|
BecauseWeCanStudios/LEGOVNO
|
97654da906e5d8ee999fea6dbc062914cc5710b2
|
[
"MIT"
] | null | null | null |
source/model.py
|
BecauseWeCanStudios/LEGOVNO
|
97654da906e5d8ee999fea6dbc062914cc5710b2
|
[
"MIT"
] | null | null | null |
source/model.py
|
BecauseWeCanStudios/LEGOVNO
|
97654da906e5d8ee999fea6dbc062914cc5710b2
|
[
"MIT"
] | null | null | null |
import os
import keras
import skimage.io
import keras_contrib.applications
from metrics import *
from mrcnn import utils
from mrcnn import config
from imgaug import augmenters as iaa
from dataset import Dataset, PoseEstimationDataset
import numpy as np
import keras.backend as K
import mrcnn.model as modellib
class Config(config.Config):
NAME = 'LEGOVNO'
IMAGES_PER_GPU = 1
GPU_COUNT = 1
NUM_CLASSES = 4
STEPS_PER_EPOCH = 1000
DETECTION_MIN_CONFIDENCE = 0.9
BACKBONE = 'resnet101'
IMAGE_MIN_DIM = 1024
IMAGE_MAX_DIM = 1024
class InferenceConfig(Config):
pass
class Model:
TRAIN = 0
INFERENCE = 1
COCO_WEIGHTS_PATH = './mask_rcnn_coco.h5'
WEIGHT_LOADERS = {
'coco': lambda self: self.__load_coco(),
'last': lambda self: self.model.find_last()[1],
'imagenet': lambda self: self.model.get_imagenet_weights()
}
def __init__(self, weights, mode, logs='./logs'):
assert mode in (self.TRAIN, self.INFERENCE), 'Unrecognised mode'
self.config = Config() if mode == self.TRAIN else InferenceConfig()
self.model = modellib.MaskRCNN(mode='training' if mode == self.TRAIN else 'inference',
config=self.config, model_dir=logs)
lweights = weights.lower()
weights_path = self.WEIGHT_LOADERS[lweights](self) if lweights in self.WEIGHT_LOADERS else weights
self.model.load_weights(weights_path, by_name=True,
exclude=['mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox', 'mrcnn_mask'] if lweights == 'coco' else [])
def train(self, data, epochs=30, learning_rate=1e-3):
train_dataset = Dataset.load_and_prepare(data.root.train[:], data)
test_dataset = Dataset.load_and_prepare(data.root.test[:], data)
self.model.train(train_dataset, test_dataset, learning_rate=learning_rate, epochs=epochs, layers='all')
def detect(self, image, verbose=1):
return self.model.detect([image], verbose=verbose)[0]
def detect_file(self, path, verbose=1):
return self.detect(skimage.io.imread(path), verbose)
def __load_coco(self):
if not os.path.exists(self.COCO_WEIGHTS_PATH):
utils.download_trained_weights(self.COCO_WEIGHTS_PATH)
return self.COCO_WEIGHTS_PATH
class ActivationLayer(keras.engine.topology.Layer):
def __init__(self, **kwargs):
super(ActivationLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(ActivationLayer, self).build(input_shape)
def call(self, x):
return x / K.expand_dims(K.sqrt(K.sum(K.square(x), axis=-1)))
def compute_output_shape(self, input_shape):
return (input_shape[0], 4)
class PoseEstimationConfig:
BACKBONE = 'resnet18'
INPUT_SHAPE = (300, 400, 1)
SHARED_LAYERS = 0
SHARED_UNITS = 1024
POSITION_LAYERS = 0
POSITION_UNITS = 1024
ORIENTATION_LAYERS = 0
ORIENTATION_UNITS = 1024
BATCH_SIZE = 32
VALIDATION_BATCH_SIZE = 1
OPTIMIZER = keras.optimizers.Adam(lr=1e-3)
LOSSES = [
MeshLoss(
['../models/1x1.obj', '../models/1x2.obj', '../models/1x3.obj'],
SequentialLoss(
[
RotationTransform(extract_quaternion),
OffsetTransform(extract_offset)
],
DiffMean
)
)
]
METRICS = [
QuaternionDistanceMetric(extract_quaternion),
QuaternionAngleMetric(extract_quaternion),
DistanceMetric(extract_offset)
]
SAVE_PERIOD = 10
STEPS_PER_EPOCH = None
VALIDATION_STEPS = None
AUGMENTER = iaa.Sequential(
[
iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 3))),
iaa.Multiply((0.5, 1.5))
],
random_order=True
)
class PoseEstimationModel():
BACKBONES = {
'resnet18': lambda input_shape:
PoseEstimationModel.__resnet(input_shape, 'basic', [2, 2, 2, 2]),
'resnet34': lambda input_shape:
PoseEstimationModel.__resnet(input_shape, 'basic', [3, 4, 6, 3]),
'resnet50': lambda input_shape:
PoseEstimationModel.__resnet(input_shape, 'bottleneck', [3, 4, 6, 3]),
'xception': lambda input_shape:
keras.applications.xception.Xception(include_top=False, weights=None, input_shape=input_shape, classes=None)
}
def __init__(self, config=None, weights=None, logs='./logs'):
if not config:
config = PoseEstimationConfig()
if not os.path.exists(logs):
os.makedirs(logs)
backbone = PoseEstimationModel.BACKBONES[config.BACKBONE](config.INPUT_SHAPE)
output = backbone.output
output = keras.layers.Flatten()(output)
for i in range(config.SHARED_LAYERS):
output = keras.layers.Dense(config.SHARED_UNITS, activation='relu')(output)
model = keras.models.Model(inputs=backbone.input, outputs=keras.layers.concatenate([
PoseEstimationModel.__make_fc_layers(output, config.POSITION_LAYERS, config.POSITION_UNITS, 3),
ActivationLayer()(PoseEstimationModel.__make_fc_layers(output, config.ORIENTATION_LAYERS, config.ORIENTATION_UNITS, 4))
]))
model.compile(
optimizer=config.OPTIMIZER,
loss=config.LOSSES,
metrics=config.METRICS
)
if weights:
model.load_weights(weights)
self.model, self.config, self.logs = model, config, logs
def train(self, data, epochs, initial_epoch=0):
train_dataset = PoseEstimationDataset(data.root.train[:], data, self.config.BATCH_SIZE, self.config.AUGMENTER)
test_dataset = PoseEstimationDataset(data.root.test[:], data,
self.config.BATCH_SIZE if self.config.BATCH_SIZE else self.config.VALIDATION_BATCH_SIZE)
save_best = keras.callbacks.ModelCheckpoint(
os.path.join(self.logs, 'weights.{epoch:04d}.hdf5'),
verbose=0,
save_weights_only=True,
period=self.config.SAVE_PERIOD
)
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor='loss', factor=0.2,
patience=5, min_lr=0.00001)
tensorboard = keras.callbacks.TensorBoard(log_dir=self.logs)
self.model.fit_generator(
train_dataset,
validation_data=test_dataset,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
epochs=epochs,
callbacks=[save_best, reduce_lr, tensorboard],
shuffle=True,
workers=0,
validation_steps=self.config.VALIDATION_STEPS,
initial_epoch=initial_epoch
)
def predict(self, images, batch_size=1, verbose=0):
return self.model.predict(images, batch_size=batch_size, verbose=verbose)
def evaluate(self, images, y, batch_size=1, verbose=0):
return self.model.evaluate(images, y, batch_size=batch_size, verbose=verbose)
@staticmethod
def __make_fc_layers(inputs, count, units, last_units):
for i in range(count - 1):
inputs = keras.layers.Dense(units, activation='relu')(inputs)
return keras.layers.Dense(last_units)(inputs)
@staticmethod
def __resnet(input_shape, block, repetitions):
return keras_contrib.applications.resnet.ResNet(input_shape, None, block, repetitions=repetitions, include_top=False)
| 30.386047
| 123
| 0.743762
| 6,210
| 0.950559
| 0
| 0
| 393
| 0.060156
| 0
| 0
| 366
| 0.056023
|
9a83696d4e899b64faddbb5626cfd880f1149543
| 442
|
py
|
Python
|
donations/urls.py
|
nanorepublica/django-donations
|
349aaf17029f3f9b4723fead3fa28dd85959f14e
|
[
"BSD-3-Clause"
] | 9
|
2015-10-13T11:41:20.000Z
|
2020-11-30T04:38:43.000Z
|
donations/urls.py
|
nanorepublica/django-donations
|
349aaf17029f3f9b4723fead3fa28dd85959f14e
|
[
"BSD-3-Clause"
] | 63
|
2015-10-22T17:41:27.000Z
|
2021-11-20T12:18:26.000Z
|
donations/urls.py
|
nanorepublica/django-donations
|
349aaf17029f3f9b4723fead3fa28dd85959f14e
|
[
"BSD-3-Clause"
] | 3
|
2017-08-29T02:44:12.000Z
|
2020-04-07T23:43:12.000Z
|
from django.conf.urls import include, url
from donations.views import DonateAPI, VerifyAPI
app_name = 'donations'
api_urls = ([
url(r'^donate/$', DonateAPI.as_view(), name="donate"),
url(r'^verify/(?P<pk>[0-9]+)$', VerifyAPI.as_view(), name="verify"),
], "donations")
donations = ([
url(r'^api/', include(api_urls, namespace="api")),
], "donations")
urlpatterns = [
url(r'^', include(donations, namespace="donations"))
]
| 23.263158
| 72
| 0.651584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.260181
|
9a83981c040624137fa42558baa04d53d347c0fc
| 3,004
|
py
|
Python
|
orc8r/tools/fab/vagrant.py
|
idoshveki/magma
|
8022267bd8b8d94913fbb9a0836880361d785446
|
[
"BSD-3-Clause"
] | 2
|
2020-11-05T18:58:26.000Z
|
2021-02-09T06:42:49.000Z
|
orc8r/tools/fab/vagrant.py
|
idoshveki/magma
|
8022267bd8b8d94913fbb9a0836880361d785446
|
[
"BSD-3-Clause"
] | 10
|
2021-03-31T20:19:00.000Z
|
2022-02-19T07:09:57.000Z
|
orc8r/tools/fab/vagrant.py
|
119Vik/magma-1
|
107a7b374466a837fc0a49b283ba9d6ff1d702e3
|
[
"BSD-3-Clause"
] | 3
|
2020-08-20T18:45:34.000Z
|
2020-08-20T20:18:42.000Z
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import os.path
from fabric.api import local
from fabric.api import env
def __ensure_in_vagrant_dir():
"""
Error out if there is not Vagrant instance associated with this directory
"""
pwd = local('pwd', capture=True)
if not os.path.isfile(pwd + '/Vagrantfile'):
print("Error: Vagrantfile not found. Try executing from fbcode/magma")
exit(1)
return
def setup_env_vagrant(machine='magma', apply_to_env=True, force_provision=False):
""" Host config for local Vagrant VM.
Sets the environment to point at the local vagrant machine. Used
whenever we need to run commands on the vagrant machine.
"""
__ensure_in_vagrant_dir()
# Ensure that VM is running
isUp = local('vagrant status %s' % machine, capture=True) \
.find('running') < 0
if isUp:
# The machine isn't running. Most likely it's just not up. Let's
# first try the simple thing of bringing it up, and if that doesn't
# work then we ask the user to fix it.
print("VM %s is not running... Attempting to bring it up."
% machine)
local('vagrant up %s' % machine)
isUp = local('vagrant status %s' % machine, capture=True) \
.find('running')
if isUp < 0:
print("Error: VM: %s is still not running...\n"
" Failed to bring up %s'"
% (machine, machine))
exit(1)
elif force_provision:
local('vagrant provision %s' % machine)
ssh_config = local('vagrant ssh-config %s' % machine, capture=True)
ssh_lines = [line.strip() for line in ssh_config.split("\n")]
ssh_params = {key: val for key, val
in [line.split(" ", 1) for line in ssh_lines]}
host = ssh_params.get("HostName", "").strip()
port = ssh_params.get("Port", "").strip()
# some installations seem to have quotes around the file location
identity_file = ssh_params.get("IdentityFile", "").strip().strip('"')
host_string = 'vagrant@%s:%s' % (host, port)
if apply_to_env:
env.host_string = host_string
env.hosts = [env.host_string]
env.key_filename = identity_file
env.disable_known_hosts = True
else:
return {
"hosts": [host_string],
"host_string": host_string,
"key_filename": identity_file,
"disable_known_hosts": True,
}
def teardown_vagrant(machine):
""" Destroy a vagrant machine so that we get a clean environment to work
in
"""
__ensure_in_vagrant_dir()
# Destroy if vm if it exists
created = local('vagrant status %s' % machine, capture=True) \
.find('not created') < 0
if created:
local('vagrant destroy -f %s' % machine)
| 31.957447
| 81
| 0.617843
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,343
| 0.447071
|
9a83eb7c6cde3a0afbb0a6028180ce05131c4869
| 1,988
|
py
|
Python
|
cp_multiply/examples/make_box_packing_cp.py
|
gkonjevod/multiply_CP
|
2410d242a29a340db8184e127d05c5da9d26f1b4
|
[
"MIT"
] | null | null | null |
cp_multiply/examples/make_box_packing_cp.py
|
gkonjevod/multiply_CP
|
2410d242a29a340db8184e127d05c5da9d26f1b4
|
[
"MIT"
] | null | null | null |
cp_multiply/examples/make_box_packing_cp.py
|
gkonjevod/multiply_CP
|
2410d242a29a340db8184e127d05c5da9d26f1b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 19 23:38:25 2022
@author: goran
"""
from ..general_cp import GeneralCP
from ..cp_utils import to_degrees, dihedral_angle, normal
from math import sqrt, pi, tan, atan2
box_packing_cell_nodes = {'A': (2, 0),
'B': (4, 0),
'C': (2, 2),
'D': (4, 2),
'E': (4, 3),
'F': (3.5, 3.5),
'G': (0, 0)}
angle1 = to_degrees(atan2(sqrt(2)/2, 2))
folded_wall_coords = [ (2, 2, 0),
(2, 1, 2),
(2, 2, 2)]
folded_top_coords = [ (2, 1, 2),
(2, 2, 2),
(1, 2, 2)]
folded_slanted_coords = [(1, 2, 2),
(2, 2, 0),
(2, 1, 2)]
angle1_check = to_degrees(dihedral_angle(normal(folded_top_coords),
normal(folded_slanted_coords)))
print('angle1 = ', angle1)
angle2 = to_degrees(dihedral_angle(normal(folded_wall_coords),
normal(folded_slanted_coords)))
print('angle2 = ', angle2)
box_packing_cell_edges = {'AC': -90,
'BD': 180,
'CD': -180,
'CE': 90 + angle1,
'DE': -180,
'EF': 90 + angle2,
'BG': 0}
def generate_box_packing():
l1 = ((0, 0), (2, 2))
l2 = ((4, 0), (4, 1))
l3 = ((0, 4), (1, 4))
#l4 = ((0, 8), (1, 8))
min_cell = GeneralCP(namednodes = box_packing_cell_nodes,
namededges = box_packing_cell_edges)
#min_cell.save_cp('test0')
c1 = min_cell.add_reflection(l1).add_reflection(l2).add_reflection(l3)#.add_reflection(l4)
c1.save_cp('box_packing_cell')
grid = c1.make_grid(grid_size = (5, 5), overlap_frac = 0.25)
grid.save_cp('box_packing_5x5')
| 30.121212
| 94
| 0.454728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 278
| 0.139839
|
9a862a138eaba7a151db1b55e4b4a041ae8dbd8a
| 11,001
|
py
|
Python
|
kq/queue.py
|
grofers/kq
|
1fc96e2a189901b91fdcde7f829b021b6555e217
|
[
"MIT"
] | null | null | null |
kq/queue.py
|
grofers/kq
|
1fc96e2a189901b91fdcde7f829b021b6555e217
|
[
"MIT"
] | 2
|
2018-09-24T15:43:48.000Z
|
2020-06-23T11:15:17.000Z
|
kq/queue.py
|
grofers/kq
|
1fc96e2a189901b91fdcde7f829b021b6555e217
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import time
import uuid
import dill
import kafka
from kafka.errors import KafkaError
from kq.job import Job
class Queue(object):
"""KQ queue.
A queue serializes incoming function calls and places them into a Kafka
topic as *jobs*. Workers fetch these jobs and execute them asynchronously
in the background. Here is an example of initializing and using a queue:
.. code-block:: python
from kq import Queue, Job
queue = Queue(
hosts='host:7000,host:8000',
topic='foo',
timeout=3600,
compression='gzip',
acks=0,
retries=5,
job_size=10000000,
cafile='/my/files/cafile',
certfile='/my/files/certfile',
keyfile='/my/files/keyfile',
crlfile='/my/files/crlfile'
)
job = queue.enqueue(my_func, *args, **kwargs)
assert isinstance(job, Job)
.. note::
The number of partitions in a Kafka topic limits how many workers
can read from the queue in parallel. For example, maximum of 10
workers can work off a queue with 10 partitions.
:param hosts: Comma-separated Kafka hostnames and ports. For example,
``"localhost:9000,localhost:8000,192.168.1.1:7000"`` is a valid input
string. Default: ``"127.0.0.1:9092"``.
:type hosts: str | unicode
:param topic: Name of the Kafka topic. Default: ``"default"``.
:type topic: str | unicode
:param timeout: Default job timeout threshold in seconds. If not set, the
enqueued jobs are left to run until they finish. This means a hanging
job can potentially block the workers. Default: ``None`` (no timeout).
:type timeout: int
:param compression: The algorithm used for compressing job data. Allowed
values are: ``"gzip"``, ``"snappy"`` and ``"lz4"``. Default: ``None``
(no compression).
:type compression: str | unicode
:param acks: The number of acknowledgements required from the broker(s)
before considering a job successfully enqueued. Allowed values are:
.. code-block:: none
0: Do not wait for any acknowledgment from the broker leader
and consider the job enqueued as soon as it is added to
the socket buffer. Persistence is not guaranteed on broker
failures.
1: Wait for the job to be saved on the broker leader but not
for it be replicated across other brokers. If the leader
broker fails before the replication finishes the job may
not be persisted.
-1: Wait for the job to be replicated across all brokers. As
long as one of the brokers is functional job persistence
is guaranteed.
Default: ``1``.
:type acks: int
:param retries: The maximum number of attempts to re-enqueue a job when
the job fails to reach the broker. Retries may alter the sequence of
the enqueued jobs. Default: ``0``.
:type retries: int
:param job_size: The max size of each job in bytes. Default: ``1048576``.
:type job_size: int
:param cafile: Full path to the trusted CA certificate file.
:type cafile: str | unicode
:param certfile: Full path to the client certificate file.
:type certfile: str | unicode
:param keyfile: Full path to the client private key file.
:type keyfile: str | unicode
:param crlfile: Full path to the CRL file for validating certification
expiry. This option is only available with Python 3.4+ or 2.7.9+.
:type crlfile: str | unicode
"""
def __init__(self,
hosts='127.0.0.1:9092',
topic='default',
timeout=None,
compression=None,
acks=1,
retries=0,
job_size=1048576,
cafile=None,
certfile=None,
keyfile=None,
crlfile=None):
self._hosts = hosts
self._topic = topic
self._timeout = timeout
self._logger = logging.getLogger('kq')
self._producer = kafka.KafkaProducer(
bootstrap_servers=self._hosts,
compression_type=compression,
acks=acks,
retries=retries,
max_request_size=job_size,
buffer_memory=max(job_size, 33554432),
ssl_cafile=cafile,
ssl_certfile=certfile,
ssl_keyfile=keyfile,
ssl_crlfile=crlfile
)
def __repr__(self):
"""Return a string representation of the queue.
:return: String representation of the queue.
:rtype: str | unicode
"""
return 'Queue(topic={})'.format(self._topic)
@property
def producer(self):
"""Return the Kafka producer object.
:return: Kafka producer object.
:rtype: kafka.producer.KafkaProducer
"""
return self._producer
@property
def hosts(self):
"""Return the list of Kafka host names and ports.
:return: List of Kafka host names and ports.
:rtype: [str]
"""
return self._hosts.split(',')
@property
def topic(self):
"""Return the name of the Kafka topic in use.
:return: Name of the Kafka topic in use.
:rtype: str | unicode
"""
return self._topic
@property
def timeout(self):
"""Return the job timeout threshold in seconds.
:return: Job timeout threshold in seconds.
:rtype: int
"""
return self._timeout
def enqueue(self, obj, *args, **kwargs):
"""Place the function call (or the job) in the Kafka topic.
For example:
.. code-block:: python
import requests
from kq import Queue
q = Queue()
# You can queue the function call with its arguments
job = q.enqueue(requests.get, 'https://www.google.com')
# Or you can queue a kq.job.Job instance directly
q.enqueue(job)
:param obj: Function or the job object to enqueue. If a function is
given, the function *must* be pickle-able.
:type obj: callable | kq.job.Job
:param args: Arguments for the function. Ignored if a KQ job object
is given for the first argument instead.
:type args: list
:param kwargs: Keyword arguments for the function. Ignored if a KQ
job instance is given as the first argument instead.
:type kwargs: dict
:return: The job that was enqueued
:rtype: kq.job.Job
"""
if isinstance(obj, Job):
func = obj.func
args = obj.args
kwargs = obj.kwargs
key = obj.key
else:
func = obj
key = None
if not callable(func):
raise ValueError('{} is not a callable'.format(func))
job = Job(
id=str(uuid.uuid4()),
timestamp=int(time.time()),
topic=self._topic,
func=func,
args=args,
kwargs=kwargs,
timeout=self._timeout,
key=key
)
future = self._producer.send(self._topic, dill.dumps(job), key=key)
try:
future.get(timeout=self._timeout or 5)
except KafkaError as e:
self._logger.exception('Queuing failed: {}'.format(e.message))
return None
self._logger.info('Enqueued: {}'.format(job))
return job
def enqueue_with_key(self, key, obj, *args, **kwargs):
"""Place the function call (or the job) in the Kafka topic with key.
For example:
.. code-block:: python
import requests
from kq import Queue
q = Queue()
url = 'https://www.google.com'
# You can queue the function call with its arguments
job = q.enqueue_with_key('my_key', requests.get, url)
# Or you can queue a kq.job.Job instance directly
q.enqueue_with_key('my_key', job)
:param key: The key for the Kafka message. Jobs with the same key are
guaranteed to be placed in the same Kafka partition and processed
sequentially. If a job object is enqueued, its key is overwritten.
:type key: str
:param obj: Function or the job object to enqueue. If a function is
given, the function *must* be pickle-able.
:type obj: callable | kq.job.Job
:param args: Arguments for the function. Ignored if a KQ job object
is given for the first argument instead.
:type args: list
:param kwargs: Keyword arguments for the function. Ignored if a KQ
job instance is given as the first argument instead.
:type kwargs: dict
:return: The job that was enqueued
:rtype: kq.job.Job
"""
if isinstance(obj, Job):
func = obj.func
args = obj.args
kwargs = obj.kwargs
else:
func = obj
if not callable(func):
raise ValueError('{} is not a callable'.format(func))
job = Job(
id=str(uuid.uuid4()),
timestamp=int(time.time()),
topic=self._topic,
func=func,
args=args,
kwargs=kwargs,
timeout=self._timeout,
key=key
)
future = self._producer.send(self._topic, dill.dumps(job), key=key)
try:
future.get(timeout=self._timeout or 5)
except KafkaError as e:
self._logger.exception('Queuing failed: {}'.format(e.message))
return None
self._logger.info('Enqueued: {}'.format(job))
return job
def job(self, func):
"""Decorator which add a **delay** method to a function.
When the **delay** method is called, the function is queued as a job.
For example:
.. code-block:: python
from kq import Queue
queue = Queue()
@queue.job
def calculate_sum(a, b, c):
return a + b + c
# Enqueue the function as a job
calculate_sum.delay(1, 2, 3)
:param func: The function to decorate.
:type func: callable
:return: The decorated function with new method **delay**
:rtype: callable
"""
@functools.wraps(func)
def delay(*args, **kwargs): # pragma: no cover
return self.enqueue(func, *args, **kwargs)
func.delay = delay
return func
def flush(self):
"""Force-flush all buffered records to the broker."""
self._logger.info('Flushing {} ...'.format(self))
self._producer.flush()
| 32.937126
| 78
| 0.579947
| 10,782
| 0.980093
| 0
| 0
| 957
| 0.086992
| 0
| 0
| 7,387
| 0.671484
|
9a87b0a003cfac44c4b71f5b09ccd17d4a3eced1
| 8,683
|
py
|
Python
|
python/accel_adxl345/accel_adxl345.py
|
iorodeo/accel_adxl345
|
aadbca1c57840f66a61556ff02e72e8b8e4e93e0
|
[
"Apache-2.0"
] | null | null | null |
python/accel_adxl345/accel_adxl345.py
|
iorodeo/accel_adxl345
|
aadbca1c57840f66a61556ff02e72e8b8e4e93e0
|
[
"Apache-2.0"
] | null | null | null |
python/accel_adxl345/accel_adxl345.py
|
iorodeo/accel_adxl345
|
aadbca1c57840f66a61556ff02e72e8b8e4e93e0
|
[
"Apache-2.0"
] | null | null | null |
"""
accel_adxl345.py
This modules defines the AccelADXL345 class for streaming data from the
ADXL345 accelerometers.
"""
import time
import serial
import sys
import numpy
import struct
BUF_EMPTY_NUM = 5
BUF_EMPTY_DT = 0.05
class AccelADXL345(serial.Serial):
def __init__(self, **kwarg):
# Command ids
self.cmd_id = {
'stop_streaming' : 0,
'start_streaming' : 1,
'set_timer_period' : 2,
'get_timer_period' : 3,
'set_range' : 4,
'get_range' : 5,
'get_sample' : 6,
'get_max_timer_period' : 7,
'get_min_timer_period' : 8,
'get_bad_sample_count' : 9,
}
# Allowed accelerations ranges and scale factors
self.allowedAccelRange = (2, 4, 8, 16)
self.accelScale = 0.0384431560448
try:
self.reset_sleep = kwarg.pop('reset_sleep')
except KeyError:
self.reset_sleep = True
try:
self.accelRange = kwarg.pop('range')
except KeyError:
self.accelRange = 16
if not self.checkAccelRange(self.accelRange):
raise ValueError, 'unknown acceleration range {0}'.format(self.accelRange)
_kwarg = {
'port' : '/dev/ttyUSB0',
'timeout' : 0.1,
'baudrate' : 38400,
}
_kwarg.update(kwarg)
super(AccelADXL345,self).__init__(**_kwarg)
if self.reset_sleep:
time.sleep(2.0)
self.emptyBuffer()
# Get sample dt and current range setting
self.sampleDt = self.getSampleDt()
self.accelRange = self.getRange()
# Get max and min allowed sample dt
self.minSampleDt = self.getMinSampleDt()
self.maxSampleDt = self.getMaxSampleDt()
def sendCmd(self,cmd):
"""
Send the command, cmd, to the device
"""
self.write(cmd)
def readValue(self):
"""
Read a value from the device.
"""
line = self.readline()
line = line.strip()
return line
def readFloat(self):
"""
Read a single float of list of floats separated by commas
"""
value = self.readValue()
if ' ' in value:
value = value.split(' ')
value = [float(x) for x in value]
else:
value = float(value)
return value
def readInt(self):
"""
Read a single integer or list of integers separated by commas.
"""
value = self.readValue()
if ' ' in value:
value = value.split(' ')
value = [int(x) for x in value]
else:
value = int(value)
return value
def emptyBuffer(self):
"""
Empty the serial input buffer.
"""
for i in range(0,BUF_EMPTY_NUM):
#print 'empty %d'%(i,), self.inWaiting()
self.flushInput()
time.sleep(BUF_EMPTY_DT)
def checkAccelRange(self,value):
"""
Check if the value is within the allowed range set.
"""
return value in self.allowedAccelRange
def startStreaming(self):
"""
Start data streaming form the accelerometer
"""
cmd = '[{0}]\n'.format(self.cmd_id['start_streaming'])
self.sendCmd(cmd)
def stopStreaming(self):
"""
Stop data streaming from the accelerometer
"""
cmd = '[{0}]\n'.format(self.cmd_id['stop_streaming'])
self.sendCmd(cmd)
def getSampleDt(self):
"""
Returns the sample interval, dt, in microseconds
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_timer_period'])
self.sendCmd(cmd)
dt = self.readFloat()
return dt
def getBadSampleCount(self):
"""
Returns the number of bad/corrupted samples.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_bad_sample_count'])
self.sendCmd(cmd)
val = self.readInt()
return val
def setSampleDt(self,dt):
"""
Sets the sample interval in microseconds.
"""
_dt = int(dt)
if _dt > self.maxSampleDt or _dt < self.minSampleDt:
raise ValueError, 'sample dt out of range'
cmd = '[{0},{1}]\n'.format(self.cmd_id['set_timer_period'],_dt)
self.sendCmd(cmd)
self.sampleDt = _dt
def getSampleRate(self):
"""
Returns the sample rate in Hz
"""
return 1.0/self.sampleDt
def setSampleRate(self,freq):
"""
Sets the sample rate in Hz
"""
dt = int(1.0e6/freq)
self.setSampleDt(dt)
def getMaxSampleDt(self):
"""
Gets the maximun allowed sample dt in microseconds.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_max_timer_period'])
self.sendCmd(cmd)
value = self.readInt()
return value
def getMinSampleDt(self):
"""
Gets the minimum allowed sample dt in microseconds.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_min_timer_period'])
self.sendCmd(cmd)
value = self.readInt()
return value
def getMaxSampleRate(self):
"""
Returns the maximum allowed sample rate in Hz
"""
minSampleDtSec = self.minSampleDt*(1.0e-6)
return 1.0/minSampleDtSec
def getMinSampleRate(self):
"""
Returns the minum allowed samples rate in Hz
"""
maxSampleDtSec = self.maxSampleDt*(1.0e-6)
return 1.0/maxSampleDtSec
def getRange(self):
"""
Returns the current accelerometer range setting.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_range'])
self.sendCmd(cmd)
accelRange = self.readInt()
return accelRange
def setRange(self,value):
"""
Sets the current accelerometer range.
"""
_value = int(value)
if _value in self.allowedAccelRange:
cmd = '[{0}, {1}]\n'.format(self.cmd_id['set_range'],_value)
self.sendCmd(cmd)
_value = self.getRange()
self.accelRange = _value
def getAllowedAccelRange(self):
"""
Returns all allowed range settings
"""
return self.allowedAccelRange
def peekValue(self):
"""
Grabs a sinlge sample (ax,ay,az) from the accelerometer.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_sample'])
self.sendCmd(cmd)
samples = self.readFloat()
samples = [x*self.accelScale for x in samples]
return samples
def getSamples(self,N,verbose=False):
"""
Streams N samples from the accelerometer at the current sample rate
setting.
"""
# Start streaming
self.emptyBuffer()
self.startStreaming()
# Read samples
data = []
while len(data) < N:
if verbose:
print len(data)
newData = self.readValues()
data.extend(newData)
# Stop streaming and empty buffer
self.stopStreaming()
self.emptyBuffer()
# Convert to an array, truncate to number of samples requested
data = numpy.array(data)
data = self.accelScale*data[:N,:]
# Use sample rate to get array of time points
dtSec = self.sampleDt*1.0e-6
t = dtSec*numpy.arange(data.shape[0])
return t, data
#def readValues(self,verbose=False):
# data = []
# if self.inWaiting() > 0:
# line = self.readline()
# line = line.strip()
# line = line.split(':')
# for vals in line:
# vals = vals.split(' ')
# try:
# vals = [float(x) for x in vals]
# if len(vals) == 3:
# data.append(vals)
# except:
# if verbose:
# print 'fail'
# return data
def readValues(self):
data = []
while self.inWaiting() >= 7:
byteVals = self.read(7)
ax = struct.unpack('<h',byteVals[0:2])[0]
ay = struct.unpack('<h',byteVals[2:4])[0]
az = struct.unpack('<h',byteVals[4:6])[0]
chk = ord(byteVals[6])
if not chk == 0:
raise IOError, 'streaming data is not in sync.'
data.append([ax,ay,az])
return data
| 27.741214
| 86
| 0.524012
| 8,451
| 0.973281
| 0
| 0
| 0
| 0
| 0
| 0
| 3,063
| 0.352758
|
9a8866fd681b05cff1de0c32ef8dae40aefe5351
| 831
|
py
|
Python
|
polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | null | null | null |
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000030"
addresses_name = "local.2018-05-03/Version 2/Democracy_Club__03May2018.tsv"
stations_name = "local.2018-05-03/Version 2/Democracy_Club__03May2018.tsv"
elections = ["local.2018-05-03"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn == "6198433":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E2 9DG"
return rec
if record.addressline6 == "E3 2LB" or record.addressline6 == "E3 5EG":
return None
return super().address_record_to_dict(record)
| 34.625
| 82
| 0.683514
| 745
| 0.89651
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.251504
|
9a89981de0ecebc2323be5e00e13a157cd8dc62f
| 8,490
|
py
|
Python
|
pynaja/common/struct.py
|
xiaoxiamiya/naja
|
222c3e1135bbd2b9a02181273a8a70201fbdf0f5
|
[
"Apache-2.0"
] | 1
|
2021-09-07T07:13:53.000Z
|
2021-09-07T07:13:53.000Z
|
pynaja/common/struct.py
|
xiaoxiamiya/naja
|
222c3e1135bbd2b9a02181273a8a70201fbdf0f5
|
[
"Apache-2.0"
] | null | null | null |
pynaja/common/struct.py
|
xiaoxiamiya/naja
|
222c3e1135bbd2b9a02181273a8a70201fbdf0f5
|
[
"Apache-2.0"
] | null | null | null |
import struct
from collections import OrderedDict
from configparser import RawConfigParser
from pynaja.common.async_base import Utils
from pynaja.common.error import ConstError
class Result(dict):
"""返回结果类
"""
def __init__(self, code=0, data=None, msg=None, details=None):
super().__init__(code=code)
if data is not None:
self.__setitem__(r'data', data)
if msg is not None:
self.__setitem__(r'msg', msg)
if details is not None:
self.__setitem__(r'details', details)
def __bool__(self):
return self.code == 0
@property
def code(self):
return self.get(r'code')
@property
def data(self):
return self.get(r'data', None)
@property
def msg(self):
return self.get(r'msg', None)
@property
def details(self):
return self.get(r'details', None)
class Const(OrderedDict):
"""常量类
"""
def __getattr__(self, key):
return self.__getitem__(key)
def __setattr__(self, key, val):
if key[:1] == r'_':
super().__setattr__(key, val)
else:
self.__setitem__(key, val)
def __delattr__(self, key):
if key[:1] == r'_':
super().__delattr__(key)
else:
raise ConstError()
def __setitem__(self, key, val):
if key in self:
raise ConstError()
else:
super().__setitem__(key, val)
def __delitem__(self, key):
raise ConstError()
def exist(self, val):
return val in self.values()
class ByteArrayAbstract:
"""ByteArray抽象类
"""
NETWORK = r'!'
NATIVE = r'='
NATIVE_ALIGNMENT = r'@'
LITTLE_ENDIAN = r'<'
BIG_ENDIAN = r'>'
def __init__(self):
self._endian = self.NETWORK
def get_endian(self):
return self._endian
def set_endian(self, val):
self._endian = val
def read(self, size):
raise NotImplementedError()
def write(self, buffer):
raise NotImplementedError()
def read_pad_byte(self, _len):
struct.unpack(f'{self._endian}{_len}x', self.read(_len))
def write_pad_byte(self, _len):
self.write(struct.pack(f'{self._endian}{_len}x'))
def read_char(self):
return struct.unpack(f'{self._endian}c', self.read(1))[0]
def write_char(self, val):
self.write(struct.pack(f'{self._endian}c', val))
def read_signed_char(self):
return struct.unpack(f'{self._endian}b', self.read(1))[0]
def write_signed_char(self, val):
self.write(struct.pack(f'{self._endian}b', val))
def read_unsigned_char(self):
return struct.unpack(f'{self._endian}B', self.read(1))[0]
def write_unsigned_char(self, val):
self.write(struct.pack(f'{self._endian}B', val))
def read_bool(self):
return struct.unpack(f'{self._endian}?', self.read(1))[0]
def write_bool(self, val):
self.write(struct.pack(f'{self._endian}?', val))
def read_short(self):
return struct.unpack(f'{self._endian}h', self.read(2))[0]
def write_short(self, val):
self.write(struct.pack(f'{self._endian}h', val))
def read_unsigned_short(self):
return struct.unpack(f'{self._endian}H', self.read(2))[0]
def write_unsigned_short(self, val):
self.write(struct.pack(f'{self._endian}H', val))
def read_int(self):
return struct.unpack(f'{self._endian}i', self.read(4))[0]
def write_int(self, val):
self.write(struct.pack(f'{self._endian}i', val))
def read_unsigned_int(self):
return struct.unpack(f'{self._endian}I', self.read(4))[0]
def write_unsigned_int(self, val):
self.write(struct.pack(f'{self._endian}I', val))
def read_long(self):
return struct.unpack(f'{self._endian}l', self.read(4))[0]
def write_long(self, val):
self.write(struct.pack(f'{self._endian}l', val))
def read_unsigned_long(self):
return struct.unpack(f'{self._endian}L', self.read(4))[0]
def write_unsigned_long(self, val):
self.write(struct.pack(f'{self._endian}L', val))
def read_long_long(self):
return struct.unpack(f'{self._endian}q', self.read(8))[0]
def write_long_long(self, val):
self.write(struct.pack(f'{self._endian}q', val))
def read_unsigned_long_long(self):
return struct.unpack(f'{self._endian}Q', self.read(8))[0]
def write_unsigned_long_long(self, val):
self.write(struct.pack(f'{self._endian}Q', val))
def read_float(self):
return struct.unpack(f'{self._endian}f', self.read(4))[0]
def write_float(self, val):
self.write(struct.pack(f'{self._endian}f', val))
def read_double(self):
return struct.unpack(f'{self._endian}d', self.read(8))[0]
def write_double(self, val):
self.write(struct.pack(f'{self._endian}d', val))
def read_bytes(self, _len):
return struct.unpack(f'{self._endian}{_len}s', self.read(_len))[0]
def write_bytes(self, val):
self.write(struct.pack(f'{self._endian}{len(val)}s', val))
def read_string(self, _len):
return self.read_bytes(_len).decode()
def write_string(self, val):
self.write_bytes(val.encode())
def read_pascal_bytes(self, _len):
return struct.unpack(f'{self._endian}{_len}p', self.read(_len))[0]
def write_pascal_bytes(self, val):
self.write(struct.pack(f'{self._endian}{len(val)}p', val))
def read_pascal_string(self, _len):
return self.read_pascal_bytes(_len).decode()
def write_pascal_string(self, val):
self.write_pascal_bytes(val.encode())
def read_python_int(self, _len):
return struct.unpack(f'{self._endian}{_len}P', self.read(_len))[0]
def write_python_int(self, val):
self.write(struct.pack(f'{self._endian}{len(val)}P', val))
class ConfigParser(RawConfigParser):
"""配置解析类
"""
def getstr(self, section, option, default=None, **kwargs):
val = self.get(section, option, **kwargs)
return val if val else default
def getjson(self, section, option, **kwargs):
val = self.get(section, option, **kwargs)
result = Utils.json_encode(val)
return result
def _split_host(self, val):
if val.find(r':') > 0:
host, port = val.split(r':', 2)
return host.strip(), int(port.strip())
else:
return None
def get_split_host(self, section, option, **kwargs):
val = self.get(section, option, **kwargs)
return self._split_host(val)
def get_split_str(self, section, option, sep=r'|', **kwargs):
val = self.get(section, option, **kwargs)
return tuple(Utils.split_str(val, sep))
def get_split_int(self, section, option, sep=r',', **kwargs):
val = self.get(section, option, **kwargs)
return tuple(Utils.split_int(val, sep))
def split_float(self, val, sep=r','):
result = tuple(float(item.strip()) for item in val.split(sep))
return result
def get_split_float(self, section, option, sep=r',', **kwargs):
val = self.get(section, option, **kwargs)
return self.split_float(val, sep)
class Configure(Const):
"""配置类
"""
def __init__(self):
super().__init__()
self._parser = ConfigParser()
def _init_options(self):
self.clear()
def get_option(self, section, option):
return self._parser.get(section, option)
def get_options(self, section):
parser = self._parser
options = {}
for option in parser.options(section):
options[option] = parser.get(section, option)
return options
def set_options(self, section, **options):
if not self._parser.has_section(section):
self._parser.add_section(section)
for option, value in options.items():
self._parser.set(section, option, value)
self._init_options()
def read(self, files):
self._parser.clear()
self._parser.read(files, r'utf-8')
self._init_options()
def read_str(self, val):
self._parser.clear()
self._parser.read_string(val)
self._init_options()
def read_dict(self, val):
self._parser.clear()
self._parser.read_dict(val)
self._init_options()
| 21.493671
| 74
| 0.607538
| 8,335
| 0.977369
| 0
| 0
| 273
| 0.032012
| 0
| 0
| 942
| 0.11046
|
9a8b2c9a4fe128befea072dd96f7b456a616ecd8
| 15,178
|
py
|
Python
|
YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 12
|
2020-03-25T01:24:22.000Z
|
2021-09-18T06:40:16.000Z
|
YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 1
|
2020-04-22T07:52:36.000Z
|
2020-04-22T07:52:36.000Z
|
YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 4
|
2020-03-25T01:24:26.000Z
|
2020-09-20T11:29:09.000Z
|
# coding:utf-8
import numpy as np
import tensorflow as tf
from layers import *
from MobilenetV2 import MobilenetV2,MobilenetV2_dynamic
class YOLOV3(object):
def __init__(self, training,numcls=20):
self.__training = training
self.__num_classes = numcls
self.__strides=[8,16,32]
def build_nework(self, input_data, val_reuse=False,gt_per_grid=3):
"""
:param input_data: shape为(batch_size, input_size, input_size, 3)
:return: conv_sbbox, conv_mbbox, conv_lbbox, pred_sbbox, pred_mbbox, pred_lbbox
conv_sbbox的shape为(batch_size, input_size / 8, input_size / 8, gt_per_grid * (5 + num_classes))
conv_mbbox的shape为(batch_size, input_size / 16, input_size / 16, gt_per_grid * (5 + num_classes))
conv_lbbox的shape为(batch_size, input_size / 32, input_size / 32, gt_per_grid * (5 + num_classes))
conv_?是YOLO的原始卷积输出(raw_dx, raw_dy, raw_dw, raw_dh, raw_conf, raw_prob)
pred_sbbox的shape为(batch_size, input_size / 8, input_size / 8, gt_per_grid, 5 + num_classes)
pred_mbbox的shape为(batch_size, input_size / 16, input_size / 16, gt_per_grid, 5 + num_classes)
pred_lbbox的shape为(batch_size, input_size / 32, input_size / 32, gt_per_grid, 5 + num_classes)
pred_?是YOLO预测bbox的信息(x, y, w, h, conf, prob),(x, y, w, h)的大小是相对于input_size的
"""
net_name = 'YoloV3'
with tf.variable_scope(net_name, reuse=val_reuse):
feature_map_s, feature_map_m, feature_map_l = MobilenetV2(input_data, self.__training)
#jiangwei
conv = convolutional(name='conv0', input_data=feature_map_l, filters_shape=(1, 1, 1280, 512),
training=self.__training)
conv = separable_conv(name='conv1', input_data=conv, input_c=512, output_c=1024, training=self.__training)
conv = convolutional(name='conv2', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training)
conv = separable_conv(name='conv3', input_data=conv, input_c=512, output_c=1024, training=self.__training)
conv = convolutional(name='conv4', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training)
# ----------**********---------- Detection branch of large object ----------**********----------
conv_lbbox = separable_conv(name='conv5', input_data=conv, input_c=512, output_c=1024,
training=self.__training)
conv_lbbox = convolutional(name='conv6', input_data=conv_lbbox,
filters_shape=(1, 1, 1024, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False)
pred_lbbox = decode(name='pred_lbbox', conv_output=conv_lbbox,
num_classes=self.__num_classes, stride=self.__strides[2])
# ----------**********---------- Detection branch of large object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv7', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training)
conv = upsample(name='upsample0', input_data=conv)
conv = route(name='route0', previous_output=feature_map_m, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional('conv8', input_data=conv, filters_shape=(1, 1, 96 + 256, 256),
training=self.__training)
conv = separable_conv('conv9', input_data=conv, input_c=256, output_c=512, training=self.__training)
conv = convolutional('conv10', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training)
conv = separable_conv('conv11', input_data=conv, input_c=256, output_c=512, training=self.__training)
conv = convolutional('conv12', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training)
# ----------**********---------- Detection branch of middle object ----------**********----------
conv_mbbox = separable_conv(name='conv13', input_data=conv, input_c=256, output_c=512,
training=self.__training)
conv_mbbox = convolutional(name='conv14', input_data=conv_mbbox,
filters_shape=(1, 1, 512, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False)
pred_mbbox = decode(name='pred_mbbox', conv_output=conv_mbbox,
num_classes=self.__num_classes, stride=self.__strides[1])
# ----------**********---------- Detection branch of middle object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv15', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training)
conv = upsample(name='upsample1', input_data=conv)
conv = route(name='route1', previous_output=feature_map_s, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv16', input_data=conv, filters_shape=(1, 1, 32 + 128, 128),
training=self.__training)
conv = separable_conv(name='conv17', input_data=conv, input_c=128, output_c=256, training=self.__training)
conv = convolutional(name='conv18', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training)
conv = separable_conv(name='conv19', input_data=conv, input_c=128, output_c=256, training=self.__training)
conv = convolutional(name='conv20', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training)
# ----------**********---------- Detection branch of small object ----------**********----------
conv_sbbox = separable_conv(name='conv21', input_data=conv, input_c=128, output_c=256,
training=self.__training)
conv_sbbox = convolutional(name='conv22', input_data=conv_sbbox,
filters_shape=(1, 1, 256, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False)
pred_sbbox = decode(name='pred_sbbox', conv_output=conv_sbbox,
num_classes=self.__num_classes, stride=self.__strides[0])
# ----------**********---------- Detection branch of small object ----------**********----------
for var in tf.global_variables(net_name):
tf.add_to_collection(net_name, var)
return conv_sbbox, conv_mbbox, conv_lbbox, pred_sbbox, pred_mbbox, pred_lbbox
def build_network_dynamic(self, input_data,statedict,val_reuse=False,inputsize=544,gt_per_grid=3):
net_name = 'YoloV3'
with tf.variable_scope(net_name, reuse=val_reuse):
feature_map_s, feature_map_m, feature_map_l = MobilenetV2_dynamic(input_data, self.__training,statedict)
conv = convolutional(name='conv0', input_data=feature_map_l, filters_shape=(1, 1, 1280, 512),
training=self.__training,statedict=statedict['headslarge.conv0'])
conv = separable_conv(name='conv1', input_data=conv, input_c=512, output_c=1024, training=self.__training,statedict=statedict['headslarge.conv1'])
conv = convolutional(name='conv2', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training,statedict=statedict['headslarge.conv2'])
conv = separable_conv(name='conv3', input_data=conv, input_c=512, output_c=1024, training=self.__training,statedict=statedict['headslarge.conv3'])
conv = convolutional(name='conv4', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training,statedict=statedict['headslarge.conv4'])
# ----------**********---------- Detection branch of large object ----------**********----------
conv_lbbox = separable_conv(name='conv5', input_data=conv, input_c=512, output_c=1024,
training=self.__training,statedict=statedict['detlarge.conv5'])
conv_lbbox = convolutional(name='conv6', input_data=conv_lbbox,
filters_shape=(1, 1, 1024, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False,statedict=statedict['detlarge.conv6'])
pred_lbbox = decode_validate(name='pred_lbbox', conv_output=conv_lbbox,
num_classes=self.__num_classes, stride=self.__strides[2], shape=inputsize // 32,
gt_pergrid=gt_per_grid)
# ----------**********---------- Detection branch of large object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv7', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training,statedict=statedict['mergelarge.conv7'])
conv = upsample_decode(name='upsample0', input_data=conv,shape1=inputsize//32,shape2=inputsize//32)
conv = route(name='route0', previous_output=feature_map_m, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional('conv8', input_data=conv, filters_shape=(1, 1, 96 + 256, 256),
training=self.__training,statedict=statedict['headsmid.conv8'])
conv = separable_conv('conv9', input_data=conv, input_c=256, output_c=512, training=self.__training,statedict=statedict['headsmid.conv9'])
conv = convolutional('conv10', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training,statedict=statedict['headsmid.conv10'])
conv = separable_conv('conv11', input_data=conv, input_c=256, output_c=512, training=self.__training,statedict=statedict['headsmid.conv11'])
conv = convolutional('conv12', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training,statedict=statedict['headsmid.conv12'])
# ----------**********---------- Detection branch of middle object ----------**********----------
conv_mbbox = separable_conv(name='conv13', input_data=conv, input_c=256, output_c=512,
training=self.__training,statedict=statedict['detmid.conv13'])
conv_mbbox = convolutional(name='conv14', input_data=conv_mbbox,
filters_shape=(1, 1, 512, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False,statedict=statedict['detmid.conv14'])
pred_mbbox = decode_validate(name='pred_mbbox', conv_output=conv_mbbox,
num_classes=self.__num_classes, stride=self.__strides[1], shape=inputsize // 16,
gt_pergrid=gt_per_grid)
# ----------**********---------- Detection branch of middle object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv15', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training,statedict=statedict['mergemid.conv15'])
conv = upsample_decode(name='upsample1', input_data=conv,shape1=inputsize//16,shape2=inputsize//16)
conv = route(name='route1', previous_output=feature_map_s, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv16', input_data=conv, filters_shape=(1, 1, 32 + 128, 128),
training=self.__training,statedict=statedict['headsmall.conv16'])
conv = separable_conv(name='conv17', input_data=conv, input_c=128, output_c=256, training=self.__training,statedict=statedict['headsmall.conv17'])
conv = convolutional(name='conv18', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training,statedict=statedict['headsmall.conv18'])
conv = separable_conv(name='conv19', input_data=conv, input_c=128, output_c=256, training=self.__training,statedict=statedict['headsmall.conv19'])
conv = convolutional(name='conv20', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training,statedict=statedict['headsmall.conv20'])
# ----------**********---------- Detection branch of small object ----------**********----------
conv_sbbox = separable_conv(name='conv21', input_data=conv, input_c=128, output_c=256,
training=self.__training,statedict=statedict['detsmall.conv21'])
conv_sbbox = convolutional(name='conv22', input_data=conv_sbbox,
filters_shape=(1, 1, 256, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False,statedict=statedict['detsmall.conv22'])
pred_sbbox = decode_validate(name='pred_sbbox', conv_output=conv_sbbox,
num_classes=self.__num_classes, stride=self.__strides[0], shape=inputsize // 8,
gt_pergrid=gt_per_grid)
# ----------**********---------- Detection branch of small object ----------**********----------
pred_sbbox = tf.reshape(pred_sbbox, (-1, 5 + self.__num_classes))
pred_mbbox = tf.reshape(pred_mbbox, (-1, 5 + self.__num_classes))
pred_lbbox = tf.reshape(pred_lbbox, (-1, 5 + self.__num_classes))
pred_bbox = tf.concat([pred_sbbox, pred_mbbox, pred_lbbox], 0, name='output/boxconcat')
for var in tf.global_variables(net_name):
tf.add_to_collection(net_name, var)
return pred_bbox
| 80.306878
| 158
| 0.566346
| 15,112
| 0.990951
| 0
| 0
| 0
| 0
| 0
| 0
| 3,899
| 0.255672
|
9a8ce9049f7230937ae69e4978f32515e2f46236
| 654
|
py
|
Python
|
saltlint/rules/CmdWaitRecommendRule.py
|
Poulpatine/salt-lint
|
304917d95d2730e7df8bd7b5dd29a3bd77c80250
|
[
"MIT"
] | null | null | null |
saltlint/rules/CmdWaitRecommendRule.py
|
Poulpatine/salt-lint
|
304917d95d2730e7df8bd7b5dd29a3bd77c80250
|
[
"MIT"
] | null | null | null |
saltlint/rules/CmdWaitRecommendRule.py
|
Poulpatine/salt-lint
|
304917d95d2730e7df8bd7b5dd29a3bd77c80250
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Warpnet B.V.
import re
from saltlint.linter.rule import DeprecationRule
from saltlint.utils import LANGUAGE_SLS
class CmdWaitRecommendRule(DeprecationRule):
id = '213'
shortdesc = 'SaltStack recommends using cmd.run together with onchanges, rather than cmd.wait'
description = 'SaltStack recommends using cmd.run together with onchanges, rather than cmd.wait'
severity = 'LOW'
languages = [LANGUAGE_SLS]
tags = ['formatting']
version_added = 'develop'
regex = re.compile(r"^\s{2}cmd\.wait:(\s+)?$")
def match(self, file, line):
return self.regex.search(line)
| 28.434783
| 100
| 0.697248
| 493
| 0.753823
| 0
| 0
| 0
| 0
| 0
| 0
| 277
| 0.423547
|
9a8d3871c093dea84d65b938bf3c599a010db785
| 7,818
|
py
|
Python
|
sdks/python/apache_beam/ml/inference/pytorch_test.py
|
hengfengli/beam
|
83a8855e5997e0311e6274c03bcb38f94efbf8ef
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/ml/inference/pytorch_test.py
|
hengfengli/beam
|
83a8855e5997e0311e6274c03bcb38f94efbf8ef
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/ml/inference/pytorch_test.py
|
hengfengli/beam
|
83a8855e5997e0311e6274c03bcb38f94efbf8ef
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import os
import shutil
import tempfile
import unittest
from collections import OrderedDict
import numpy as np
import pytest
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# Protect against environments where pytorch library is not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
import torch
from apache_beam.ml.inference.api import PredictionResult
from apache_beam.ml.inference.base import RunInference
from apache_beam.ml.inference.pytorch import PytorchInferenceRunner
from apache_beam.ml.inference.pytorch import PytorchModelLoader
except ImportError:
raise unittest.SkipTest('PyTorch dependencies are not installed')
def _compare_prediction_result(a, b):
return (
torch.equal(a.inference, b.inference) and
torch.equal(a.example, b.example))
class PytorchLinearRegression(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.linear(x)
return out
@pytest.mark.uses_pytorch
class PytorchRunInferenceTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_inference_runner_single_tensor_feature(self):
examples = [
torch.from_numpy(np.array([1], dtype="float32")),
torch.from_numpy(np.array([5], dtype="float32")),
torch.from_numpy(np.array([-3], dtype="float32")),
torch.from_numpy(np.array([10.0], dtype="float32")),
]
expected_predictions = [
PredictionResult(ex, pred) for ex,
pred in zip(
examples,
torch.Tensor([example * 2.0 + 0.5
for example in examples]).reshape(-1, 1))
]
model = PytorchLinearRegression(input_dim=1, output_dim=1)
model.load_state_dict(
OrderedDict([('linear.weight', torch.Tensor([[2.0]])),
('linear.bias', torch.Tensor([0.5]))]))
model.eval()
inference_runner = PytorchInferenceRunner(torch.device('cpu'))
predictions = inference_runner.run_inference(examples, model)
for actual, expected in zip(predictions, expected_predictions):
self.assertTrue(_compare_prediction_result(actual, expected))
def test_inference_runner_multiple_tensor_features(self):
examples = torch.from_numpy(
np.array([1, 5, 3, 10, -14, 0, 0.5, 0.5],
dtype="float32")).reshape(-1, 2)
examples = [
torch.from_numpy(np.array([1, 5], dtype="float32")),
torch.from_numpy(np.array([3, 10], dtype="float32")),
torch.from_numpy(np.array([-14, 0], dtype="float32")),
torch.from_numpy(np.array([0.5, 0.5], dtype="float32")),
]
expected_predictions = [
PredictionResult(ex, pred) for ex,
pred in zip(
examples,
torch.Tensor([f1 * 2.0 + f2 * 3 + 0.5
for f1, f2 in examples]).reshape(-1, 1))
]
model = PytorchLinearRegression(input_dim=2, output_dim=1)
model.load_state_dict(
OrderedDict([('linear.weight', torch.Tensor([[2.0, 3]])),
('linear.bias', torch.Tensor([0.5]))]))
model.eval()
inference_runner = PytorchInferenceRunner(torch.device('cpu'))
predictions = inference_runner.run_inference(examples, model)
for actual, expected in zip(predictions, expected_predictions):
self.assertTrue(_compare_prediction_result(actual, expected))
def test_num_bytes(self):
inference_runner = PytorchInferenceRunner(torch.device('cpu'))
examples = torch.from_numpy(
np.array([1, 5, 3, 10, -14, 0, 0.5, 0.5],
dtype="float32")).reshape(-1, 2)
self.assertEqual((examples[0].element_size()) * 8,
inference_runner.get_num_bytes(examples))
def test_namespace(self):
inference_runner = PytorchInferenceRunner(torch.device('cpu'))
self.assertEqual(
'RunInferencePytorch', inference_runner.get_metrics_namespace())
def test_pipeline_local_model(self):
with TestPipeline() as pipeline:
examples = torch.from_numpy(
np.array([1, 5, 3, 10, -14, 0, 0.5, 0.5],
dtype="float32")).reshape(-1, 2)
expected_predictions = [
PredictionResult(ex, pred) for ex,
pred in zip(
examples,
torch.Tensor([f1 * 2.0 + f2 * 3 + 0.5
for f1, f2 in examples]).reshape(-1, 1))
]
state_dict = OrderedDict([('linear.weight', torch.Tensor([[2.0, 3]])),
('linear.bias', torch.Tensor([0.5]))])
path = os.path.join(self.tmpdir, 'my_state_dict_path')
torch.save(state_dict, path)
model_loader = PytorchModelLoader(
state_dict_path=path,
model_class=PytorchLinearRegression(input_dim=2, output_dim=1))
pcoll = pipeline | 'start' >> beam.Create(examples)
predictions = pcoll | RunInference(model_loader)
assert_that(
predictions,
equal_to(expected_predictions, equals_fn=_compare_prediction_result))
def test_pipeline_gcs_model(self):
with TestPipeline() as pipeline:
examples = torch.from_numpy(
np.array([1, 5, 3, 10], dtype="float32").reshape(-1, 1))
expected_predictions = [
PredictionResult(ex, pred) for ex,
pred in zip(
examples,
torch.Tensor([example * 2.0 + 0.5
for example in examples]).reshape(-1, 1))
]
gs_pth = 'gs://apache-beam-ml/pytorch_lin_reg_model_2x+0.5_state_dict.pth'
model_loader = PytorchModelLoader(
state_dict_path=gs_pth,
model_class=PytorchLinearRegression(input_dim=1, output_dim=1))
pcoll = pipeline | 'start' >> beam.Create(examples)
predictions = pcoll | RunInference(model_loader)
assert_that(
predictions,
equal_to(expected_predictions, equals_fn=_compare_prediction_result))
def test_invalid_input_type(self):
with self.assertRaisesRegex(TypeError, "expected Tensor as element"):
with TestPipeline() as pipeline:
examples = np.array([1, 5, 3, 10], dtype="float32").reshape(-1, 1)
state_dict = OrderedDict([('linear.weight', torch.Tensor([[2.0]])),
('linear.bias', torch.Tensor([0.5]))])
path = os.path.join(self.tmpdir, 'my_state_dict_path')
torch.save(state_dict, path)
model_loader = PytorchModelLoader(
state_dict_path=path,
model_class=PytorchLinearRegression(input_dim=1, output_dim=1))
pcoll = pipeline | 'start' >> beam.Create(examples)
# pylint: disable=expression-not-assigned
pcoll | RunInference(model_loader)
if __name__ == '__main__':
unittest.main()
| 37.228571
| 80
| 0.661678
| 5,973
| 0.764006
| 0
| 0
| 5,762
| 0.737017
| 0
| 0
| 1,450
| 0.185469
|
9a8deeda4be2011a1d0dba2c5373aa43b91fc628
| 6,636
|
py
|
Python
|
example/test/L20_snake.py
|
Michael8968/skulpt
|
15956a60398fac92ee1dab25bf661ffc003b2eaf
|
[
"MIT"
] | 2
|
2021-12-18T06:34:26.000Z
|
2022-01-05T05:08:47.000Z
|
example/test/L20_snake.py
|
Michael8968/skulpt
|
15956a60398fac92ee1dab25bf661ffc003b2eaf
|
[
"MIT"
] | null | null | null |
example/test/L20_snake.py
|
Michael8968/skulpt
|
15956a60398fac92ee1dab25bf661ffc003b2eaf
|
[
"MIT"
] | null | null | null |
import pygame
import sys
import time
import random
from pygame.locals import *
# Pygame Init
pygame.init()
# Play Surface
size = width, height = 800, 800
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Snake Change")
# Colors
red = (255, 0, 0)
green = (0, 255, 0)
black = (0, 0, 0)
white = (255, 255, 255)
brown = (165, 42, 42)
blue = (0, 0, 255)
# FPS controller
times = pygame.time.Clock()
# Game settings
delta = 10
snakePos = [100, 50]
snakeBody = [[100, 50], [90, 50], [80, 50]]
snakePos2 = [100, 350]
snakeBody2 = [[100, 350], [90, 350], [80, 350]]
foodPos = [400, 200]
#snakePos = [delta*4, delta*3]
#snakeBody = [[delta*4, delta*3], [delta*3, delta*3], [delta*2, delta*3]]
#foodPos = [delta*10, delta*3]
foodSpawn = True
direction = 'RIGHT'
direction2 = 'RIGHT'
score = 0
score2 = 0
gameover = False
winner = ''
# Show Score
def showScore(a,b):
SFont = pygame.font.Font(None, 32)
Ssurf = SFont.render("Score{1}p : {0}".format(a,b), True, black)
if b == 1:
screen.blit(Ssurf, (250,10))
elif b == 2:
screen.blit(Ssurf, (450,10))
def showEnd():
font = pygame.font.SysFont(None, 88)
fontimg = font.render('game over', True, red)
screen.blit(fontimg, (250,250))
fontimg2 = font.render('press return to restart', True, brown)
screen.blit(fontimg2, (100,350))
if winner:
fontimg3 = font.render('winner is ' + winner, True, red)
screen.blit(fontimg3, (250,450))
else:
if score > score2:
fontimg3 = font.render('winner is 1p', True, red)
screen.blit(fontimg3, (250,450))
elif score < score2:
fontimg3 = font.render('winner is 2p', True, red)
screen.blit(fontimg3, (250,450))
elif score == score2:
fontimg3 = font.render('no winner', True, red)
screen.blit(fontimg3, (250,450))
def control_1p(aa):
if event.key == K_RIGHT:
if aa != 'LEFT':
aa = 'RIGHT'
if event.key == K_LEFT:
if aa != 'RIGHT':
aa = 'LEFT'
if event.key == K_UP:
if aa != 'DOWN':
aa = 'UP'
if event.key == K_DOWN:
if aa != 'UP':
aa = 'DOWN'
return aa
def control_2p(aa):
if event.key == K_d:
if aa != 'LEFT':
aa = 'RIGHT'
if event.key == K_a:
if aa != 'RIGHT':
aa = 'LEFT'
if event.key == K_w:
if aa != 'DOWN':
aa = 'UP'
if event.key == K_s:
if aa != 'UP':
aa = 'DOWN'
return aa
def move(a,b):
if a == 'RIGHT':
b[0] += delta
if a == 'LEFT':
b[0] -= delta
if a == 'DOWN':
b[1] += delta
if a == 'UP':
b[1] -= delta
def is_game_over():
if snakePos[0] >= width or snakePos[0] < 0:
return True, '2p'
if snakePos[1] >= height or snakePos[1] < 0:
return True, '2p'
if snakePos2[0] >= width or snakePos2[0] < 0:
return True, '1p'
if snakePos2[1] >= height or snakePos2[1] < 0:
return True, '1p'
# Self hit
for block in snakeBody[1:]:
if snakePos == block:
return True, '2p'
if snakePos2 == block:
return True, '1p'
for block in snakeBody2[1:]:
if snakePos == block:
return True, '2p'
if snakePos2 == block:
return True, '1p'
if snakePos == snakePos2:
return True, ''
return False, ''
def is_game_over2():
a = False
b = ''
if snakePos[0] >= width or snakePos[0] < 0:
a = True
b = '2p'
if snakePos[1] >= height or snakePos[1] < 0:
a = True
b = '2p'
if snakePos2[0] >= width or snakePos2[0] < 0:
a = True
b = '1p'
if snakePos2[1] >= height or snakePos2[1] < 0:
a = True
b = '1p'
# Self hit
for block in snakeBody[1:]:
if snakePos == block:
a = True
b = '2p'
if snakePos2 == block:
a = True
b = '1p'
for block in snakeBody2[1:]:
if snakePos == block:
a = True
b = '2p'
if snakePos2 == block:
a = True
b = '1p'
if snakePos == snakePos2:
a = True
return a, b
def motion(score, foodSpawn, snakeBody, snakePos, color):
snakeBody.insert(0, list(snakePos))
if snakePos == foodPos:
foodSpawn = False
score += 1
else:
snakeBody.pop()
for pos in snakeBody:
pygame.draw.rect(screen, color, (pos[0], pos[1], delta, delta))
return score, foodSpawn
while True:
screen.fill(white)
event = pygame.event.poll()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
direction = control_1p(direction)
direction2 = control_2p(direction2)
move(direction,snakePos)
move(direction2,snakePos2)
score, foodSpawn = motion(score, foodSpawn, snakeBody, snakePos, green)
score2, foodSpawn = motion(score2, foodSpawn, snakeBody2, snakePos2, blue)
gameover, winner = is_game_over()
#如果不想上面的写法,则改写函数后调用如下
#gameover, winner = is_game_over2()
if foodSpawn == False:
foodPos = [random.randrange(1, width // delta) * delta, random.randrange(1, height // delta) * delta]
foodSpawn = True
pygame.draw.rect(screen, brown, (foodPos[0], foodPos[1], delta, delta))
showScore(score,1)
showScore(score2,2)
if gameover:
showEnd()
while True:
times.tick(10)
event = pygame.event.poll()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_RETURN:
snakePos = [100, 50]
snakeBody = [[100, 50], [90, 50], [80, 50]]
snakePos2 = [100, 350]
snakeBody2 = [[100, 350], [90, 350], [80, 350]]
foodPos = [400, 200]
foodSpawn = True
direction = 'RIGHT'
direction2 = 'RIGHT'
score = 0
score2 = 0
gameover = False
winner = ''
break
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
pygame.quit()
sys.exit()
pygame.display.flip()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
pygame.quit()
sys.exit()
pygame.display.flip()
times.tick(30)
| 25.231939
| 109
| 0.518987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 664
| 0.099461
|
9a8e3182ccf25a9266ba55ff765c256d44cf7bcc
| 4,203
|
py
|
Python
|
readgadget/readrockstar.py
|
danielmarostica/pygadgetreader
|
977949da7fcb6585f3e0270019d369c6967b317c
|
[
"BSD-3-Clause"
] | 6
|
2020-09-02T21:11:59.000Z
|
2021-09-24T16:12:44.000Z
|
readgadget/readrockstar.py
|
danielmarostica/pygadgetreader
|
977949da7fcb6585f3e0270019d369c6967b317c
|
[
"BSD-3-Clause"
] | 1
|
2021-09-24T14:40:03.000Z
|
2021-09-25T20:07:13.000Z
|
readgadget/readrockstar.py
|
danielmarostica/pygadgetreader
|
977949da7fcb6585f3e0270019d369c6967b317c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-18T19:15:39.000Z
|
2020-11-18T19:15:39.000Z
|
from .modules.common import *
import numpy as np
import os
from .modules.rs_structs import getRSformat
class RockstarFile(object):
def __init__(self,binfile,data,galaxies,debug):
self.galaxies = galaxies
self.binfile = binfile
self.debug = debug
self.header()
self.halos()
if data == 'particles':
self.particles()
self.f.close()
def header(self):
f = open(self.binfile,'rb')
f.seek(8*3 + 4*10,1)
self.num_halos = np.fromfile(f,dtype=np.int64,count=1)[0]
self.num_particles = np.fromfile(f,dtype=np.int64,count=1)[0]
#print self.num_halos
f.seek(4 + 4 + 8,1)
self.format_revision = np.fromfile(f,dtype=np.int32,count=1)[0]
if self.debug: print('found HALO_FORMAT_REVISION %d (header)' % self.format_revision)
bytes_left = 256 - f.tell()
f.seek(bytes_left,1)
self.f = f
self.halostruct = getRSformat(self)
def halos(self):
#print 'reading %d halos (%d)' % (self.num_halos,self.galaxies)
self.halodata = np.fromfile(self.f,dtype=self.halostruct,count=self.num_halos)
def particles(self):
self.particle_IDs = np.zeros(self.num_particles,dtype=np.int64)
self.particle_IDs.fill(-1)
self.particle_haloIDs = np.zeros(self.num_particles,dtype=np.int64)
self.particle_haloIDs.fill(-1)
nparts = 0
for i in range(0,self.num_halos):
hid = self.halodata[i]['id']
num_p = self.halodata[i]['num_p']
#print '%d %d' % (i,num_p)
pids = np.fromfile(self.f,dtype=np.int64,count=num_p)
self.particle_IDs[nparts:nparts+num_p] = pids
self.particle_haloIDs[nparts:nparts+num_p] = hid
nparts += num_p
#print 'complete'
def compileReturnArray(RS,data):
"""compile data from RS binary and return requested value"""
arr = []
singleval = False
## return particle ID data
if data == 'particles':
npart = 0
for i in range(0,len(RS)):
npart += len(RS[i].particle_IDs)
arr = np.zeros((npart,2),dtype=np.int64)
npart = 0
for i in range(0,len(RS)):
n = len(RS[i].particle_IDs)
arr[npart:npart+n,0] = RS[i].particle_IDs
arr[npart:npart+n,1] = RS[i].particle_haloIDs
npart += n
return arr
## return halo struct data
if data in RS[0].halostruct.names:
singleval = True
if RS[0].debug: print('%s found in halodata' % data)
nhalos = 0
for i in range(0,len(RS)):
nhalos += RS[i].num_halos
if singleval:
arr.extend(RS[i].halodata[data])
else:
arr.extend(RS[i].halodata)
#print nhalos,len(arr)
return np.asarray(arr)
def readrockstargalaxies(binfile,data,**kwargs):
if 'galaxies' in kwargs: del kwargs['galaxies']
arr = readrockstar(binfile,data,galaxies=1,**kwargs)
return arr
def readrockstar(binfile,data,**kwargs):
"""read rockstar binary file
Parameters
----------
binfile : string
path to rockstar binary file. Do NOT include file extention or leading number
data : string
requested data, see readme for details
Examples
--------
>>> halo_mass = readrockstar('/Users/bob/halos_020','m')
>>> halo_mass
array([ 7.25643648e+08, 5.70148608e+08, 3.97376288e+08,
3.66277274e+09, 1.99379231e+10, 5.01039648e+08,
...,
1.58950515e+09, 2.10782208e+09, 8.41401088e+09,
4.14653504e+08], dtype=float32)
"""
galaxies = 0
if 'galaxies' in kwargs and kwargs['galaxies']==1:
galaxies = 1
debug = 0
if 'debug' in kwargs and kwargs['debug']==1:
debug = 1
RS_DATA = []
for j in range(0,5000):
b = '%s.%d.bin' % (binfile,j)
if os.path.isfile(b):
if debug: print('reading %s' % b)
RS_DATA.append(RockstarFile(b,data,galaxies,debug))
else:
break
arr = compileReturnArray(RS_DATA,data)
return arr
| 30.23741
| 93
| 0.578158
| 1,789
| 0.425648
| 0
| 0
| 0
| 0
| 0
| 0
| 1,025
| 0.243873
|
9a8e4ada3be3bb52b1edcd6ad889f5b0b8142092
| 7,019
|
py
|
Python
|
src/backend/preprocess/preprocess_helper.py
|
scmc/vch-mri
|
ffd2a7b60d770a76b545ce271f85e12f53cfb3ad
|
[
"MIT"
] | 1
|
2021-12-01T23:40:20.000Z
|
2021-12-01T23:40:20.000Z
|
src/backend/preprocess/preprocess_helper.py
|
scmc/vch-mri
|
ffd2a7b60d770a76b545ce271f85e12f53cfb3ad
|
[
"MIT"
] | 5
|
2021-03-11T03:07:38.000Z
|
2021-03-11T03:11:43.000Z
|
src/backend/preprocess/preprocess_helper.py
|
scmc/vch-mri
|
ffd2a7b60d770a76b545ce271f85e12f53cfb3ad
|
[
"MIT"
] | 18
|
2020-12-30T22:04:44.000Z
|
2021-12-01T23:40:23.000Z
|
import boto3
from datetime import datetime, date
import re
import string
import pandas as pd
from spellchecker import SpellChecker
import uuid
import psycopg2
from psycopg2 import sql
import sys
sys.path.append('.')
from rule_processing import postgresql
def queryTable(conn, table):
cmd = """
SELECT * FROM {}
"""
with conn.cursor() as cur:
cur.execute(sql.SQL(cmd).format(sql.Identifier(table)))
return cur.fetchall()
compr = boto3.client(service_name='comprehend')
compr_m = boto3.client(service_name='comprehendmedical')
spell = SpellChecker()
conn = postgresql.connect()
spelling_list = [x[0] for x in queryTable(conn, 'spellchecker')]
conn.close()
# Add words to spell list
spell.word_frequency.load_words(spelling_list)
def findId(val):
if val == '-1':
return str(uuid.uuid4())
return val
def findUnidentified(val):
if val.lower() == 'unidentified':
return 'U/I'
return val
def convert2CM(height):
if not isinstance(height, str):
return 0
try:
parts = height.split(' ')
unit = parts[1]
if unit == 'CM':
return float(parts[0])
elif unit == 'IN':
quantity_parts = parts[0].replace("'", ' ').replace('"', ' ').split()
foot = quantity_parts[0]
inch = 0
if len(quantity_parts) == 2:
inch = quantity_parts[1]
return float(foot) * 30.48 + float(inch) * 2.54
except:
return 0
def convert2KG(weight):
if not isinstance(weight, str):
return 0
try:
parts = weight.split(' ')
unit = parts[1]
if unit == 'KG':
return float(parts[0])
elif unit == 'LBS':
return 0.453592 * float(parts[0])
except:
return 0
def dob2age(dob):
try:
birthdate = datetime.strptime(dob, '%Y-%m-%d')
today = date.today()
age = today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))
return age
except:
return 0
def contains_word(sample, text):
return f' {sample} ' in f' {text} '
def preProcessText(col):
"""
Takes in a pandas.Series and preprocesses the text
"""
reponct = string.punctuation.replace("?","").replace("/","")
rehtml = re.compile('<.*>')
extr = col.str.strip()
extr = extr.str.replace(rehtml, '', regex=True)
extr = extr.str.translate(str.maketrans('','',reponct))
extr = extr.str.replace('[^0-9a-zA-Z?/ ]+', ' ', regex=True)
extr = extr.str.replace('\s+', ' ', regex=True)
extr = extr.str.lower()
return extr
def checkSpelling(text: str):
words = text.split()
return ' '.join([spell.correction(word) for word in words])
def replace_conjunctions(conj_list, text: str, info_list):
temp_text = f' {text} '
for conj in conj_list:
if contains_word(conj[0],text):
info_list.append(conj[1])
temp_text = temp_text.replace(f' {conj[0]} ', f' {conj[1]} ')
return temp_text[1:len(temp_text)-1]
def find_all_entities(data: str):
if not data:
return []
try:
result = compr_m.detect_entities_v2(Text=data)
return result['Entities']
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def infer_icd10_cm(data: str, med_cond, diagnosis, symptoms):
"""
:data type: string to pass through Comprehend Medical icd10_cm
:med_cond type: List[]
:diagnosis type: List[]
:symptoms type: List[]
"""
if not data:
return
try:
icd10_result = compr_m.infer_icd10_cm(Text=data)
for resp in icd10_result['Entities']:
if resp['Score'] > 0.4:
resp_str = resp['Text']
category = ''
# first check Attributes
for attr in resp['Attributes']:
if attr['Score'] > 0.4:
if attr['Type'] == 'ACUITY' or attr['Type'] == 'DIRECTION':
resp_str = f'{attr["Text"]}' + ' ' + resp_str
elif attr['Type'] == 'SYSTEM_ORGAN_SITE':
resp_str = resp_str + ' ' + f'{attr["Text"]}'
for trait in resp['Traits']:
if trait['Score'] > 0.4:
if trait['Name'] == 'NEGATION':
category = 'NEG'
break #don't save anything for negation
elif trait['Name'] == 'SYMPTOM':
category = 'SYMP'
elif trait['Name'] == 'DIAGNOSIS':
category = 'DIAGN'
# add our response string to corresponding list
if not category:
resp_str = checkSpelling(resp_str)
med_cond.append(resp_str)
elif category == 'SYMP':
resp_str = checkSpelling(resp_str)
symptoms.append(resp_str)
elif category == 'DIAGN':
resp_str = checkSpelling(resp_str)
diagnosis.append(resp_str)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def find_key_phrases(data:str, key_phrases, icd10cm_list, anatomy_list):
"""
:data type: string to pass through Comprehend Detect Key Phrases
:key_phrases type: List[]
:icd10cm_list type: List[]
:anatomy_list type: List[]
"""
if not data:
return
try:
kp_result = compr.detect_key_phrases(Text=data, LanguageCode='en')
for resp in kp_result['KeyPhrases']:
placed = False
if resp['Score'] > 0.4:
for icd10cm in icd10cm_list:
if contains_word(icd10cm, resp['Text']):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
placed = True
break
elif contains_word(resp['Text'], icd10cm):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
placed = True
break
if not placed:
for anatomy in anatomy_list:
if contains_word(anatomy, resp['Text']):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
break
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| 34.747525
| 105
| 0.540248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,263
| 0.17994
|
9a8e57b168ea55c696b5fec3c4c437440c05734d
| 1,182
|
py
|
Python
|
script_example.py
|
op8867555/BGmi
|
22a7b0292f0fe435e87208154826d8f5baeb7b67
|
[
"MIT"
] | null | null | null |
script_example.py
|
op8867555/BGmi
|
22a7b0292f0fe435e87208154826d8f5baeb7b67
|
[
"MIT"
] | null | null | null |
script_example.py
|
op8867555/BGmi
|
22a7b0292f0fe435e87208154826d8f5baeb7b67
|
[
"MIT"
] | null | null | null |
import datetime
from bgmi.script import ScriptBase
from bgmi.utils import parse_episode
class Script(ScriptBase):
class Model(ScriptBase.Model):
bangumi_name = "TEST_BANGUMI"
cover = ""
update_time = "Mon"
due_date = datetime.datetime(2017, 9, 30)
def get_download_url(self):
# fetch and return dict
# ignore they are not same bangumi.
resp = [
{
"title": "[c.c动漫][4月新番][影之诗][ShadowVerse][01][简日][HEVC][1080P][MP4]",
"link": "http://example.com/Bangumi/1/1.torrent",
},
{
"title": "[YMDR][慕留人 -火影忍者新时代-][2017][2][AVC][JAP][BIG5][MP4][1080P]",
"link": "http://example.com/Bangumi/1/2.torrent",
},
{
"title": "[ZXSUB仲夏动漫字幕组][博人传-火影忍者次世代][03][720P繁体][MP4]",
"link": "magnet:?xt=urn:btih:233",
},
]
ret = {}
for item in resp:
e = parse_episode(item["title"])
if e:
ret[e] = item["link"]
return ret
if __name__ == "__main__":
s = Script()
print(s.get_download_url())
| 26.863636
| 86
| 0.5
| 1,090
| 0.865079
| 0
| 0
| 0
| 0
| 0
| 0
| 489
| 0.388095
|
9a8e626f8a8e604d6b65b5bcce02a4426d19dada
| 677
|
py
|
Python
|
3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/10. Exercise - Functions Advanced/11_fill_the_box.py
|
kzborisov/SoftUni
|
ccb2b8850adc79bfb2652a45124c3ff11183412e
|
[
"MIT"
] | 1
|
2021-02-07T07:51:12.000Z
|
2021-02-07T07:51:12.000Z
|
3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/10. Exercise - Functions Advanced/11_fill_the_box.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/10. Exercise - Functions Advanced/11_fill_the_box.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
from collections import deque
def fill_the_box(*args):
box_size = args[0] * args[1] * args[2]
args = deque(args[3:])
while args:
curr_arg = args.popleft()
if curr_arg == "Finish":
break
box_size -= curr_arg
if box_size < 0:
args.remove("Finish")
return f"No more free space! You have {sum(args) + abs(box_size)} more cubes."
return f"There is free space in the box. You could put {abs(box_size // 1)} more cubes."
print(fill_the_box(2, 8, 2, 2, 1, 7, 3, 1, 5, "Finish"))
print(fill_the_box(5, 5, 2, 40, 11, 7, 3, 1, 5, "Finish"))
print(fill_the_box(10, 10, 10, 40, "Finish", 2, 15, 30))
| 29.434783
| 92
| 0.583456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.283604
|
9a8fedf028eb554590720a2eafe70d6a08a4c617
| 19,617
|
py
|
Python
|
src/the_tale/the_tale/common/utils/views.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/common/utils/views.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/common/utils/views.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
# for external code
ViewError = utils_exceptions.ViewError
class Context(object):
def __setattr__(self, name, value):
if hasattr(self, name):
raise ViewError(code='internal.try_to_reassign_context_value',
message=conf.settings.DEFAUL_ERROR_MESSAGE,
info={'name': name})
super(Context, self).__setattr__(name, value)
class View(object):
__slots__ = ('processors', 'logic', 'name', 'path', 'resource', '__doc__', 'csrf_exempt')
def __init__(self, logic):
self.processors = []
self.logic = logic
self.name = None
self.path = None
self.resource = None
self.csrf_exempt = getattr(logic, 'csrf_exempt', False)
self.__doc__ = logic.__doc__
# TODO: uncomment after https://bugs.python.org/issue24329 will be fixed
# self.__name__ = logic.__name__
# self.__qualname__ = logic.__qualname__
def get_processors(self):
return self.resource.get_processors() + self.processors
def add_processor(self, processor):
self.processors.insert(0, processor)
def __call__(self, request, **url_arguments):
context = Context()
context.django_request = request
context.django_url_argumens = url_arguments
unprocessed_processors = self.get_processors()
processed_processors = []
response = None
try:
for processor in unprocessed_processors:
response = processor.preprocess(context)
processed_processors.append(processor)
if response:
break
if response is None:
response = self.logic(context)
for processor in reversed(processed_processors):
response = processor.postprocess(context, response)
if response:
break
return response.complete(context)
except ViewError as error:
return self.process_error(error, request, context)
def _get_error_response_class(self, request):
accepted_mimetypes = request.META.get('HTTP_ACCEPT')
if accepted_mimetypes is None:
return AjaxError
if any(tp in accepted_mimetypes for tp in ('application/xhtml+xml', 'text/html', 'text/plain', 'text/xml')):
return PageError
if any(tp in accepted_mimetypes for tp in ('application/x-javascript',)):
return NotImplemented
return AjaxError
def process_error(self, error, request, context):
error_response_class = self._get_error_response_class(request)
info = error.info
info['resource'] = '%s.%s' % (self.resource.name, self.name)
return error_response_class(code=error.code,
errors=error.message,
context=context,
http_status=error.http_status,
info=info).complete(context)
def get_url_record(self):
regex = ''
for part in self.path:
if len(part) == 0:
regex = '%s/' % regex
elif part[0] == '#':
regex = '%s/(?P<%s>[^\/]*)' % (regex, part[1:])
elif part[0] == '^':
regex = '%s/%s' % (regex, part[1:])
else:
regex = '%s/%s' % (regex, part)
return django_urls.url('^%s$' % regex[1:],
self,
name=self.name,
kwargs={})
def __lt__(self, other):
for l, r in zip(self.path, other.path):
if not l: return True
if not r: return False
if l[0] == r[0] == '#': return l < r
if l[0] == '#': return False
if r[0] == '#': return True
if l[0] != '#' and l != r: return l < r
return len(self.path) < len(other.path)
class Resource(object):
__slots__ = ('name', 'processors', 'views', 'parent', 'children')
def __init__(self, name):
super(Resource, self).__init__()
self.name = name
self.processors = []
self.views = {}
self.parent = None
self.children = []
def get_processors(self):
if self.parent:
return self.parent.get_processors() + self.processors
return self.processors
def add_processor(self, processor):
self.processors.append(processor)
def add_child(self, child):
self.children.append(child)
child.parent = self
def __call__(self, *argv, **kwargs):
name = kwargs.get('name', argv[-1])
methods = kwargs.get('method', ('get',))
if isinstance(methods, str):
methods = [methods]
methods = [m.upper() for m in methods]
@functools.wraps(self.__call__)
def decorator(func):
view = func if isinstance(func, View) else View(logic=func)
# view = functools.wraps(view.logic)(view)
view.name = name
view.path = argv
view.add_processor(HttpMethodProcessor(allowed_methods=methods))
view.add_processor(CSRFProcessor())
if view.name in self.views:
raise exceptions.DuplicateViewNameError(name=view.name)
self.views[view.name] = view
view.resource = self
return view
return decorator
def get_urls(self):
urls = []
for view in sorted(self.views.values()):
urls.append(view.get_url_record())
return urls
class ProcessorArgument(object):
__slots__ = ('default', )
def __init__(self, default=NotImplemented):
self.default = default
# TODO: write metaclass for processing processor arguments
class BaseViewProcessor(object):
__slots__ = ()
def __init__(self, **kwargs):
for argument_name in dir(self):
argument = getattr(self, argument_name)
if not isinstance(argument, ProcessorArgument):
continue
name = argument_name[4:].lower()
value = kwargs.get(name, getattr(self, name.upper(), argument.default))
setattr(self, name, value)
for argument_name, value in kwargs.items():
argument = getattr(self, 'ARG_%s' % argument_name.upper())
if not isinstance(argument, ProcessorArgument):
raise exceptions.WrongProcessorArgumentError(processor=self, argument=argument_name)
setattr(self, argument_name.lower(), value)
self.initialize()
def initialize(self):
pass
def preprocess(self, context):
pass
def postprocess(self, context, response):
return response
def __call__(self, view):
view = view if isinstance(view, View) else View(logic=view)
view.add_processor(self)
return view
class HttpMethodProcessor(BaseViewProcessor):
__slots__ = ('allowed_methods', )
ARG_ALLOWED_METHODS = ProcessorArgument()
def initialize(self):
super(HttpMethodProcessor, self).initialize()
self.allowed_methods = frozenset(self.allowed_methods)
def preprocess(self, context):
if context.django_request.method not in self.allowed_methods:
raise ViewError(code='common.wrong_http_method',
message='К адресу нельзя обратиться с помощью HTTP метода "%(method)s"' % {'method': context.django_request.method})
context.django_method = getattr(utils_relations.HTTP_METHOD, context.django_request.method)
class CSRFProcessor(BaseViewProcessor):
def preprocess(self, context):
context.csrf = django_middleware.csrf.get_token(context.django_request)
class PermissionProcessor(BaseViewProcessor):
__slots__ = ('permission', 'context_name')
ARG_PERMISSION = ProcessorArgument()
ARG_CONTEXT_NAME = ProcessorArgument()
def preprocess(self, context):
setattr(context, self.context_name, context.django_request.user.has_perm(self.permission))
class AccessProcessor(BaseViewProcessor):
__slots__ = ('error_code', 'error_message')
ARG_ERROR_CODE = ProcessorArgument()
ARG_ERROR_MESSAGE = ProcessorArgument()
def check(self, context):
raise NotImplementedError()
def preprocess(self, context):
if not self.check(context):
raise ViewError(code=self.error_code, message=self.error_message)
class FlaggedAccessProcessor(AccessProcessor):
__slots__ = ('error_code', 'error_message')
ARG_ERROR_CODE = ProcessorArgument()
ARG_ERROR_MESSAGE = ProcessorArgument()
ARG_ARGUMENT = ProcessorArgument()
def extract(self, context):
return getattr(context, self.argument)
def validate(self, argument):
return argument
def check(self, context):
return self.validate(self.extract(context))
class FormProcessor(BaseViewProcessor):
__slots__ = ('error_message', 'form_class', 'context_name')
ARG_FORM_CLASS = ProcessorArgument()
ARG_ERROR_MESSAGE = ProcessorArgument()
ARG_CONTEXT_NAME = ProcessorArgument(default='form')
def preprocess(self, context):
form = self.form_class(context.django_request.POST)
if not form.is_valid():
raise ViewError(code='form_errors', message=form.errors)
setattr(context, self.context_name, form)
class ArgumentProcessor(BaseViewProcessor):
__slots__ = ('error_message', 'get_name', 'post_name', 'url_name', 'context_name', 'default_value', 'in_list')
ARG_CONTEXT_NAME = ProcessorArgument()
ARG_ERROR_MESSAGE = ProcessorArgument()
ARG_GET_NAME = ProcessorArgument(default=None)
ARG_POST_NAME = ProcessorArgument(default=None)
ARG_URL_NAME = ProcessorArgument(default=None)
ARG_CONTEXT_NAME = ProcessorArgument()
ARG_DEFAULT_VALUE = ProcessorArgument()
ARG_IN_LIST = ProcessorArgument(default=False)
def initialize(self):
super(ArgumentProcessor, self).initialize()
if sum((1 if self.get_name else 0,
1 if self.post_name else 0,
1 if self.url_name else 0)) != 1:
raise exceptions.SingleNameMustBeSpecifiedError()
def extract(self, context):
if self.url_name:
return context.django_url_argumens.get(self.url_name)
if self.get_name:
if self.in_list:
return context.django_request.GET.getlist(self.get_name)
else:
return context.django_request.GET.get(self.get_name)
if self.in_list:
return context.django_request.POST.getlist(self.post_name)
else:
return context.django_request.POST.get(self.post_name)
def parse(self, context, raw_value):
return raw_value
def _argument_name(self):
if self.url_name:
return self.url_name
if self.get_name:
return self.get_name
if self.post_name:
return self.post_name
def raise_not_specified(self):
raise ViewError(code='%s.not_specified' % self._argument_name(), message=self.error_message)
def raise_wrong_format(self):
raise ViewError(code='%s.wrong_format' % self._argument_name(), message=self.error_message)
def raise_wrong_value(self):
raise ViewError(code='%s.wrong_value' % self._argument_name(), message=self.error_message)
def preprocess(self, context):
raw_value = self.extract(context)
if raw_value:
value = self.parse(context, raw_value)
elif self.default_value is NotImplemented:
self.raise_not_specified()
else:
value = self.default_value
setattr(context, self.context_name, value)
class MapArgumentProcessor(ArgumentProcessor):
__slots__ = ('mapping',)
ARG_MAPPING = ProcessorArgument()
def parse(self, context, raw_value):
mapping = self.mapping if not isinstance(self.mapping, collections.Callable) else self.mapping()
if raw_value not in mapping:
self.raise_wrong_value()
return mapping.get(raw_value)
class IntArgumentProcessor(ArgumentProcessor):
def parse(self, context, raw_value):
try:
return int(raw_value)
except ValueError:
self.raise_wrong_format()
class IntsArgumentProcessor(ArgumentProcessor):
def parse(self, context, raw_value):
try:
return [int(value.strip()) for value in raw_value.split(',')]
except ValueError:
self.raise_wrong_format()
class RelationArgumentProcessor(ArgumentProcessor):
__slots__ = ('relation', 'value_type')
ARG_RELATION = ProcessorArgument()
ARG_VALUE_TYPE = ProcessorArgument(default=int)
def parse(self, context, raw_value):
from rels import exceptions as rels_exceptions
try:
value = self.value_type(raw_value)
except ValueError:
self.raise_wrong_format()
except TypeError:
self.raise_wrong_format()
try:
return self.relation(value)
except rels_exceptions.NotExternalValueError:
self.raise_wrong_value()
class DebugProcessor(BaseViewProcessor):
def preprocess(self, context):
context.debug = django_settings.DEBUG
if not context.debug:
raise ViewError(code='common.debug_required', message='Функционал доступен только в режиме отладки')
class BaseResponse(object):
__slots__ = ('http_status',
'http_mimetype',
'http_charset',
'content')
def __init__(self,
http_mimetype,
http_status=utils_relations.HTTP_STATUS.OK,
http_charset='utf-8',
content=None):
self.http_status = http_status
self.http_mimetype = http_mimetype
self.http_charset = http_charset
self.content = content if content is not None else {}
def complete(self, context):
return django_http.HttpResponse(self.content,
status=self.http_status.value,
content_type='%s; charset=%s' % (self.http_mimetype, self.http_charset))
class Redirect(BaseResponse):
__slots__ = ('target_url', 'permanent')
def __init__(self, target_url, permanent=False, **kwargs):
super(Redirect, self).__init__(http_mimetype=None, **kwargs)
self.target_url = target_url
self.permanent = permanent
def complete(self, context):
ResponseClass = django_http.HttpResponsePermanentRedirect if self.permanent else django_http.HttpResponseRedirect
return ResponseClass(self.target_url)
class Page(BaseResponse):
__slots__ = ('template',)
def __init__(self, template, http_mimetype='text/html', **kwargs):
super(Page, self).__init__(http_mimetype=http_mimetype, **kwargs)
self.template = template
def complete(self, context):
self.content['context'] = context
self.content = utils_jinja2.render(self.template, context=self.content, request=context.django_request)
return super(Page, self).complete(context)
# TODO: refactor error/errors
class PageError(Page):
__slots__ = ('code', 'errors', 'context', 'info')
def __init__(self, code, errors, context, info=None, **kwargs):
if 'template' not in kwargs:
if context.django_request.is_ajax():
kwargs['template'] = conf.settings.DIALOG_ERROR_TEMPLATE
else:
kwargs['template'] = conf.settings.PAGE_ERROR_TEMPLATE
if isinstance(errors, str):
error = errors
else:
error = list(errors.values())[0][0]
if 'content' not in kwargs:
kwargs['content'] = {}
kwargs['content'].update({'error_code': code,
'error_message': error,
'error_info': info,
'context': context,
'resource': context.resource})# TODO: remove resource (added for compartibility with old version)
super(PageError, self).__init__(**kwargs)
self.code = code
self.errors = error
self.context = context
self.info = info
class Atom(BaseResponse):
__slots__ = ('feed',)
def __init__(self, feed, http_mimetype='application/atom+xml', **kwargs):
super(Atom, self).__init__(http_mimetype=http_mimetype, **kwargs)
self.feed = feed
def complete(self, context):
self.content = self.feed.writeString(self.http_charset)
return super(Atom, self).complete(context)
class Ajax(BaseResponse):
def __init__(self, http_mimetype='application/json', **kwargs):
super(Ajax, self).__init__(http_mimetype=http_mimetype, **kwargs)
def wrap(self, content):
return content
def complete(self, context):
self.content = s11n.to_json(self.wrap(self.content))
return super(Ajax, self).complete(context)
class AjaxOk(Ajax):
def wrap(self, content):
return {'status': 'ok', 'data': content}
# TODO: refactor error/errors
class AjaxError(Ajax):
__slots__ = ('code', 'errors', 'context', 'info')
def __init__(self, code, errors, context, info=None, **kwargs):
super(AjaxError, self).__init__(**kwargs)
self.code = code
self.errors = errors
self.context = context
self.info = info
def wrap(self, context):
data = {'status': 'error',
'code': self.code,
'data': self.info}
if isinstance(self.errors, str):
data['error'] = self.errors
else:
data['errors'] = self.errors
return data
class AjaxProcessing(Ajax):
__slots__ = ('status_url',)
def __init__(self, status_url, **kwargs):
super(AjaxProcessing, self).__init__(**kwargs)
self.status_url = status_url
def wrap(self, context):
return {'status': 'processing',
'status_url': self.status_url}
class FakeResource(object):
def __init__(self, context):
self.request = context.django_request
self.account = context.account
self.csrf = django_decorators.csrf.get_token(context.django_request)
class FakeResourceProcessor(BaseViewProcessor):
def preprocess(self, context):
context.resource = FakeResource(context)
class PageNumberProcessor(ArgumentProcessor):
CONTEXT_NAME = 'page'
ERROR_MESSAGE = 'Неверный номер страницы'
GET_NAME = 'page'
DEFAULT_VALUE = 0
def parse(self, context, raw_value):
try:
return max(0, int(raw_value) - 1)
except ValueError:
self.raise_wrong_format()
class TextFilterProcessor(ArgumentProcessor):
CONTEXT_NAME = 'filter'
ERROR_MESSAGE = 'Неверный текст для фильтра'
GET_NAME = 'filter'
DEFAULT_VALUE = None
def parse(self, context, raw_value):
return raw_value
def mime_type_to_response_type(content_type):
if content_type is None:
return 'json'
if any(tp in content_type for tp in ('application/xhtml+xml', 'text/html', 'text/plain', 'text/xml')):
return 'html'
if any(tp in content_type for tp in ('application/x-javascript',)):
return 'js'
return 'json'
| 30.18
| 144
| 0.62247
| 19,085
| 0.967015
| 0
| 0
| 587
| 0.029743
| 0
| 0
| 2,010
| 0.101844
|
9a90d892378e62b46598d590087d4afcc5ce7a6c
| 269
|
py
|
Python
|
NeoAnalysis_Py2.7/NeoAnalysis/__init__.py
|
Research-lab-KUMS/NeoAnalysis
|
32b508dfade3069b1ec5cc7664574b8d3f2d5f57
|
[
"MIT"
] | 23
|
2017-09-04T13:20:38.000Z
|
2022-03-08T08:15:17.000Z
|
NeoAnalysis_Py2.7/NeoAnalysis/__init__.py
|
Research-lab-KUMS/NeoAnalysis
|
32b508dfade3069b1ec5cc7664574b8d3f2d5f57
|
[
"MIT"
] | 4
|
2018-01-05T13:44:29.000Z
|
2021-09-30T17:08:15.000Z
|
NeoAnalysis_Py2.7/NeoAnalysis/__init__.py
|
neoanalysis/NeoAnalysis
|
c5f25b71e16997f3a05f70b1eead11f99a3b7e2b
|
[
"MIT"
] | 5
|
2017-11-26T19:40:46.000Z
|
2021-03-11T17:25:23.000Z
|
__version__ = '0.10.0'
from NeoAnalysis.spikedetection import SpikeDetection
from NeoAnalysis.spikesorting import SpikeSorting
from NeoAnalysis.analogfilter import AnalogFilter
from NeoAnalysis.graphics import Graphics
from NeoAnalysis.popuanalysis import PopuAnalysis
| 38.428571
| 53
| 0.877323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.02974
|
9a9199cd090e7135e6e6634b2297f724636eb3bf
| 5,021
|
py
|
Python
|
people.py
|
sabek/Guess-who
|
91c3f527d258ec81370e3f49fa9b8d23407af3ce
|
[
"MIT"
] | null | null | null |
people.py
|
sabek/Guess-who
|
91c3f527d258ec81370e3f49fa9b8d23407af3ce
|
[
"MIT"
] | null | null | null |
people.py
|
sabek/Guess-who
|
91c3f527d258ec81370e3f49fa9b8d23407af3ce
|
[
"MIT"
] | null | null | null |
class HiddenPeople():
"""Class for holding information on people"""
def __init__(self):
self.people = {
'Paul': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'man', 'hair': 'white', 'hat': False,
'glasses': True, 'moustache': False},
'Richard': {'bald': True, 'beard': True, 'eyes': 'brown', 'gender': 'man', 'hair': 'brown',
'hat': False, 'glasses': False, 'moustache': True},
'George': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'white',
'hat': True, 'glasses': False, 'moustache': False},
'Frans': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'red', 'hat': False,
'glasses': False, 'moustache': False},
'Bernard': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'brown',
'hat': True, 'glasses': True, 'moustache': False},
'Anne': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'girl', 'hair': 'black',
'hat': False, 'glasses': False, 'moustache': False},
'Joe': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'blonde', 'hat': False,
'glasses': True, 'moustache': False},
'Peter': {'bald': False, 'beard': False, 'eyes': 'blue', 'gender': 'boy', 'hair': 'white', 'hat': False,
'glasses': False, 'moustache': False},
'Tom': {'bald': True, 'beard': False, 'eyes': 'blue', 'gender': 'boy', 'hair': 'black', 'hat': False,
'glasses': True, 'moustache': False},
'Susan': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'girl', 'hair': 'blonde',
'hat': False, 'glasses': False, 'moustache': False},
'Sam': {'bald': True, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'white', 'hat': False,
'glasses': True, 'moustache': False},
'Maria': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'girl', 'hair': 'brown',
'hat': True, 'glasses': False, 'moustache': False},
'Robert': {'bald': False, 'beard': False, 'eyes': 'blue', 'gender': 'boy', 'hair': 'brown',
'hat': False, 'glasses': False, 'moustache': False},
'Alex': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'black', 'hat': False,
'glasses': False, 'moustache': True},
'Charles': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'blonde',
'hat': False, 'glasses': False, 'moustache': True},
'Philip': {'bald': False, 'beard': True, 'eyes': 'brown', 'gender': 'boy', 'hair': 'black',
'hat': False, 'glasses': False, 'moustache': False},
'David': {'bald': False, 'beard': True, 'eyes': 'brown', 'gender': 'boy', 'hair': 'blonde',
'hat': False, 'glasses': False, 'moustache': False},
'Eric': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'blonde',
'hat': True, 'glasses': False, 'moustache': False},
'Bill': {'bald': True, 'beard': True, 'eyes': 'brown', 'gender': 'boy', 'hair': 'red',
'hat': False, 'glasses': False, 'moustache': False},
'Alfred': {'bald': False, 'beard': False, 'eyes': 'blue', 'gender': 'boy', 'hair': 'red', 'hat': False,
'glasses': False, 'moustache': True},
'Anita': {'bald': False, 'beard': False, 'eyes': 'blue', 'gender': 'girl', 'hair': 'white',
'hat': False, 'glasses': False, 'moustache': False},
'Max': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'black', 'hat': False,
'glasses': False, 'moustache': True},
'Herman': {'bald': True, 'beard': False, 'eyes': 'brown', 'gender': 'boy', 'hair': 'red', 'hat': False,
'glasses': False, 'moustache': False},
'Claire': {'bald': False, 'beard': False, 'eyes': 'brown', 'gender': 'girl', 'hair': 'red', 'hat': True,
'glasses': True, 'moustache': False}}
def removeperson(self, attribute):
"""Remove a person from listing of people to choose"""
removelist = []
for person in self.people:
if self.people[person][attribute]:
removelist.append(person)
for person in removelist:
del self.people[person]
def printpeople(self):
for person in self.people:
print(person + ": " + str(self.people[person]))
| 74.940299
| 120
| 0.466441
| 5,020
| 0.999801
| 0
| 0
| 0
| 0
| 0
| 0
| 2,113
| 0.420833
|
9a91a0bb1c2222107ec4d2fbb68724bb0b797301
| 247
|
py
|
Python
|
paperplane/backends/click/choice.py
|
abhilash1in/paperplane
|
1dfda182dc8a70fe08fa2284ea63b434246c394b
|
[
"MIT"
] | null | null | null |
paperplane/backends/click/choice.py
|
abhilash1in/paperplane
|
1dfda182dc8a70fe08fa2284ea63b434246c394b
|
[
"MIT"
] | null | null | null |
paperplane/backends/click/choice.py
|
abhilash1in/paperplane
|
1dfda182dc8a70fe08fa2284ea63b434246c394b
|
[
"MIT"
] | null | null | null |
import click
from typing import Any, Optional
from paperplane.backends.click import _prompt
def run(prompt: str, choices: list, default: Optional[Any] = None):
return _prompt(text=prompt, default=default, type=click.Choice(choices=choices))
| 30.875
| 84
| 0.777328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9a95474d7bed8dc0c9bdace087bfb79423d63386
| 1,012
|
py
|
Python
|
lib/python/treadmill/api/nodeinfo.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | 2
|
2017-10-31T18:48:20.000Z
|
2018-03-04T20:35:20.000Z
|
lib/python/treadmill/api/nodeinfo.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/api/nodeinfo.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
"""Implementation of allocation API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import discovery
from treadmill import context
_LOGGER = logging.getLogger(__name__)
class API(object):
"""Treadmill Local REST api."""
def __init__(self):
def _get(hostname):
"""Get hostname nodeinfo endpoint info."""
_LOGGER.info('Redirect: %s', hostname)
discovery_iter = discovery.iterator(
context.GLOBAL.zk.conn,
'root.%s' % hostname, 'nodeinfo', False
)
for (_app, hostport) in discovery_iter:
if not hostport:
continue
_LOGGER.info('Found: %s - %s', hostname, hostport)
return hostport
_LOGGER.info('nodeinfo not found: %s', hostname)
return None
self.get = _get
| 24.095238
| 66
| 0.609684
| 699
| 0.690711
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.183794
|
9a95d81d2c4081cc80031302b6a6bfe2482c9c94
| 167
|
py
|
Python
|
new/views.py
|
Sravan996/django
|
3a982382d5cfe9bfb498534f1effcf58a3771539
|
[
"MIT"
] | null | null | null |
new/views.py
|
Sravan996/django
|
3a982382d5cfe9bfb498534f1effcf58a3771539
|
[
"MIT"
] | null | null | null |
new/views.py
|
Sravan996/django
|
3a982382d5cfe9bfb498534f1effcf58a3771539
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.shortcuts import HttpResponse
# Create your views here.
def index(request):
return HttpResponse('Hello World</en>')
| 20.875
| 41
| 0.790419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.257485
|
9a969dcb4bdc1a8eee56b110c60c1611472a3520
| 1,834
|
py
|
Python
|
bob-ross/cluster-paintings.py
|
h4ckfu/data
|
bdc02fd5051dfb31e42f8e078832ceead92f9958
|
[
"CC-BY-4.0"
] | 16,124
|
2015-01-01T06:18:12.000Z
|
2022-03-31T00:46:52.000Z
|
bob-ross/cluster-paintings.py
|
h4ckfu/data
|
bdc02fd5051dfb31e42f8e078832ceead92f9958
|
[
"CC-BY-4.0"
] | 179
|
2015-01-07T10:19:57.000Z
|
2022-02-21T21:19:14.000Z
|
bob-ross/cluster-paintings.py
|
h4ckfu/data
|
bdc02fd5051dfb31e42f8e078832ceead92f9958
|
[
"CC-BY-4.0"
] | 12,163
|
2015-01-03T14:23:36.000Z
|
2022-03-31T10:10:23.000Z
|
"""
Clusters Bob Ross paintings by features.
By Walter Hickey <walter.hickey@fivethirtyeight.com>
See http://fivethirtyeight.com/features/a-statistical-analysis-of-the-work-of-bob-ross/
"""
import numpy as np
from scipy.cluster.vq import vq, kmeans, whiten
import math
import csv
def main():
# load data into vectors of 1s and 0s for each tag
with open('elements-by-episode.csv','r') as csvfile:
reader = csv.reader(csvfile)
reader.next() # skip header
data = []
for row in reader:
data.append(map(lambda x: int(x), row[2:])) # exclude EPISODE and TITLE columns
# convert to numpy matrix
matrix = np.array(data)
# remove colums that have been tagged less than 5 times
columns_to_remove = []
for col in range(np.shape(matrix)[1]):
if sum(matrix[:,col]) <= 5:
columns_to_remove.append(col)
matrix = np.delete(matrix, columns_to_remove, axis=1)
# normalize according to stddev
whitened = whiten(matrix)
output = kmeans(whitened, 10)
print "episode", "distance", "cluster"
# determine distance between each of 403 vectors and each centroid, find closest neighbor
for i, v in enumerate(whitened):
# distance between centroid 0 and feature vector
distance = math.sqrt(sum((v - output[0][0]) ** 2))
# group is the centroid it is closest to so far, set initally to centroid 0
group = 0
closest_match = (distance, group)
# test the vector i against the 10 centroids, find nearest neighbor
for x in range (0, 10):
dist_x = math.sqrt(sum((v - output[0][x]) ** 2))
if dist_x < closest_match[0]:
closest_match = (dist_x, x)
print i+1, closest_match[0], closest_match[1]
if __name__ == "__main__":
main()
| 31.084746
| 93
| 0.640676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 745
| 0.406216
|
9a96b491ff08bc06ac888649b8beb70e3e05070b
| 880
|
py
|
Python
|
corvette/__init__.py
|
philipkiely/corvette
|
71632f9ed9d628c207c79f6f1b2ee98d911657b7
|
[
"MIT"
] | null | null | null |
corvette/__init__.py
|
philipkiely/corvette
|
71632f9ed9d628c207c79f6f1b2ee98d911657b7
|
[
"MIT"
] | null | null | null |
corvette/__init__.py
|
philipkiely/corvette
|
71632f9ed9d628c207c79f6f1b2ee98d911657b7
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
from corvette.autoindex import autoindex
def main():
if len(sys.argv) == 2:
conf_path = sys.argv[1]
else:
print("Usage: python -m corvette path/to/corvetteconf.json")
return
dirname = os.path.dirname(__file__)
# First load default conf
default_path = os.path.join(dirname, "conf.json")
default_file = open(default_path, "r")
conf = json.loads(default_file.read())
default_file.close()
# Then load user conf
conf_file = open(conf_path, "r")
user_conf = json.loads(conf_file.read())
conf_file.close()
# Override default conf with user conf
for key in conf.keys():
if key in user_conf:
conf[key] = user_conf[key]
if conf["template_dir"] == "False":
conf["template_dir"] = os.path.join(dirname, "theme/templates")
autoindex(conf)
| 30.344828
| 71
| 0.643182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.234091
|
9a970d49581e1f0dbf4db3373345dd1070a85ab1
| 1,965
|
py
|
Python
|
main.py
|
theoboldt/pitemp
|
366f2d1459144fa7f5e3e5526ee0a4e334f52d37
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
theoboldt/pitemp
|
366f2d1459144fa7f5e3e5526ee0a4e334f52d37
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
theoboldt/pitemp
|
366f2d1459144fa7f5e3e5526ee0a4e334f52d37
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sensor
import lcd
import csv
import time
import os
import datetime
import sys
import re
import circular_buffer
lcd.init()
last_time = datetime.datetime.now()
last_minute = last_time.minute
probe_minute_01 = circular_buffer.CircularBuffer(size=30)
probe_minute_15 = circular_buffer.CircularBuffer(size=15)
probes_minute_30 = circular_buffer.CircularBuffer(size=30)
probes_minute_60 = circular_buffer.CircularBuffer(size=60)
# initialize buffers
current_temperature = sensor.read()
probe_minute_01.append(current_temperature)
probe_minute_15.append(current_temperature)
probes_minute_30.append(current_temperature)
probes_minute_60.append(current_temperature)
while True:
try:
current_time = datetime.datetime.now()
current_minute = current_time.minute
current_temperature = sensor.read()
if current_temperature == 9999:
lcd.top("Temperature")
lcd.bottom("Failed to read")
lcd.cleanup()
sys.exit(0)
probe_minute_01.append(current_temperature)
lcd.top("{:2.1f}".format(current_temperature) + chr(223) + "C " + current_time.strftime("%H:%M:%S"))
if last_minute != current_minute:
lcd.display_init()
probe_minute_15.append(current_temperature)
probes_minute_30.append(current_temperature)
probes_minute_60.append(current_temperature)
csv.append(current_time.strftime("%s") + ";" + str(current_time) + ";" + "{:2.1f}".format(
current_temperature).replace('.', ',') + "\n")
lcd.bottom("{:2.1f}".format(probes_minute_60.average) + chr(223) + " " + "{:2.1f}".format(
probes_minute_30.average) + chr(223) + " " + "{:2.1f}".format(probe_minute_15.average) + chr(223))
time.sleep(2)
last_minute = current_minute
last_time = current_time
except KeyboardInterrupt:
lcd.cleanup()
sys.exit(0)
| 30.230769
| 110
| 0.679898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.077354
|
9a9716f606a1775600dbcfac690fb2f212514d33
| 9,988
|
py
|
Python
|
package/github-endpoints.py
|
wahyu9kdl/wahyu9kdl.github.io
|
c7c8ee1c3e7a2eb072467cb43e979ef4fc76d6fa
|
[
"MIT"
] | 2
|
2021-12-05T22:40:52.000Z
|
2022-01-17T08:48:13.000Z
|
package/github-endpoints.py
|
wahyu9kdl/wahyu9kdl.github.io
|
c7c8ee1c3e7a2eb072467cb43e979ef4fc76d6fa
|
[
"MIT"
] | 1
|
2022-01-12T13:58:28.000Z
|
2022-01-12T13:58:28.000Z
|
package/github-endpoints.py
|
wahyu9kdl/wahyu9kdl.github.io
|
c7c8ee1c3e7a2eb072467cb43e979ef4fc76d6fa
|
[
"MIT"
] | 1
|
2022-01-12T19:20:26.000Z
|
2022-01-12T19:20:26.000Z
|
#!/usr/bin/python3
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import re
import time
import requests
import random
import argparse
from urllib.parse import urlparse
from functools import partial
from colored import fg, bg, attr
from multiprocessing.dummy import Pool
TOKENS_FILE = os.path.dirname(os.path.realpath(__file__))+'/.tokens'
MIN_LENGTH = 5
_url_chars = '[a-zA-Z0-9\-\.\?\#\$&@%=_:/\]\[]'
_not_url_chars = '[^a-zA-Z0-9\-\.\?\#\$&@%=_:/\]\[]'
t_endpoints = []
t_exclude = [
r'^http://$',
r'^https://$',
r'^javascript:$',
r'^tel:$',
r'^mailto:$',
r'^text/javascript$',
r'^application/json$',
r'^application/javascript$',
r'^text/plain$',
r'^text/html$',
r'^text/x-python$',
r'^text/css$',
r'^image/png$',
r'^image/jpeg$',
r'^image/x-icon$',
r'^img/favicon.ico$',
r'^application/x-www-form-urlencoded$',
r'/Users/[0-9a-zA-Z\-\_]/Desktop',
r'www.w3.org',
r'schemas.android.com',
r'www.apple.com',
# r'^#',
# r'^\?',
# r'^javascript:',
# r'^mailto:',
]
t_regexp = [
r'[\'"\(].*(http[s]?://'+_url_chars+'*?)[\'"\)]',
r'[\'"\(](http[s]?://'+_url_chars+'+)',
r'[\'"\(]('+_url_chars+'+\.sdirect'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.htm'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.php'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.asp'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.js'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.xml'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.ini'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.conf'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.cfm'+_url_chars+'*)',
r'href\s*[.=]\s*[\'"]('+_url_chars+'+)',
r'src\s*[.=]\s*[\'"]('+_url_chars+'+)',
r'url\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'urlRoot\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'endpoint[s]\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'script[s]\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'\.ajax\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.get\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.post\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.load\s*\(\s*[\'"]('+_url_chars+'+)',
### a bit noisy
# r'[\'"](' + _url_chars + '+/' + _url_chars + '+)?[\'"]',
# r'content\s*[.=]\s*[\'"]('+_url_chars+'+)',
]
def githubApiSearchCode( token, search, page, sort, order ):
headers = { "Authorization":"token "+token }
url = 'https://api.github.com/search/code?per_page=100&s=' + sort + '&type=Code&o=' + order + '&q=' + search + '&page=' + str(page)
# print(">>> "+url)
try:
r = requests.get( url, headers=headers, timeout=5 )
json = r.json()
# print(r.json)
# print(r.text)
return json
except Exception as e:
print( "%s[-] error occurred: %s%s" % (fg('red'),e,attr(0)) )
return False
def getRawUrl( result ):
raw_url = result['html_url']
raw_url = raw_url.replace( 'https://github.com/', 'https://raw.githubusercontent.com/' )
raw_url = raw_url.replace( '/blob/', '/' )
return raw_url
def readCode( regexp, confirm, display_source, display_relative, display_alldomains, result ):
time.sleep( random.random() )
url = getRawUrl( result )
if url in t_history_urls:
return
str = ''
t_local_endpoints = []
t_history_urls.append( url )
code = doGetCode( url )
# print( code )
# print( regexp )
# print( confirm )
# print( display_source )
# print( display_relative )
# print( display_alldomains )
if code:
if display_source:
str = "\n%s>>> %s%s\n\n" % (fg('yellow'),result['html_url'],attr(0))
matches = re.findall( regexp, code, re.IGNORECASE )
if matches:
# domain found in the code
for r in t_regexp:
# looking for endpoints
edpt = re.findall( r, code, re.IGNORECASE )
if edpt:
# endpoints found
for endpoint in edpt:
endpoint = endpoint.strip()
if len(endpoint) >= MIN_LENGTH:
# sys.stdout.write("%s\n" % endpoint)
# continue
goodbye = False
for exclude in t_exclude:
if re.match(exclude,endpoint,re.IGNORECASE):
goodbye = True
break
if goodbye:
continue
if endpoint.lower().startswith('http'):
is_relative = False
else:
is_relative = True
if is_relative and not display_relative:
continue
if endpoint in t_local_endpoints:
continue
# ???
# if not display_source and endpoint in t_endpoints:
# continue
if not display_alldomains and not is_relative:
try:
t_url_parse = urlparse( endpoint )
t_host_parse = tldextract.extract( t_url_parse.netloc )
domain = t_host_parse.domain
# print(domain)
sss = re.findall( regexp, t_url_parse.netloc, re.IGNORECASE )
if not sss:
continue
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
t_endpoints.append( endpoint )
t_local_endpoints.append( endpoint )
str = str + ("%s\n" % endpoint)
# if display_source:
# str = str + ("%s\n" % endpoint)
# else:
# sys.stdout.write( "%s\n" % endpoint )
# if display_source and len(t_local_endpoints):
if len(t_local_endpoints):
sys.stdout.write( str )
def doGetCode( url ):
try:
r = requests.get( url, timeout=5 )
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
return False
return r.text
parser = argparse.ArgumentParser()
parser.add_argument( "-t","--token",help="your github token (required)" )
parser.add_argument( "-d","--domain",help="domain you are looking for (required)" )
parser.add_argument( "-e","--extend",help="also look for <dummy>example.com", action="store_true" )
parser.add_argument( "-a","--all",help="displays urls of all other domains", action="store_true" )
parser.add_argument( "-r","--relative",help="also displays relative urls", action="store_true" )
parser.add_argument( "-s","--source",help="display urls where endpoints are found", action="store_true" )
parser.add_argument( "-v","--verbose",help="verbose mode, for debugging purpose", action="store_true" )
parser.parse_args()
args = parser.parse_args()
t_tokens = []
if args.token:
t_tokens = args.token.split(',')
else:
if os.path.isfile(TOKENS_FILE):
fp = open(TOKENS_FILE,'r')
t_tokens = fp.read().split("\n")
fp.close()
if not len(t_tokens):
parser.error( 'auth token is missing' )
if args.source:
_source = True
else:
_source = False
if args.domain:
_domain = args.domain
else:
parser.error( 'domain is missing' )
if args.relative:
_relative = True
else:
_relative = False
if args.all:
_alldomains = True
else:
_alldomains = False
t_sort_order = [
{ 'sort':'indexed', 'order':'desc', },
{ 'sort':'indexed', 'order':'asc', },
{ 'sort':'', 'order':'desc', }
]
t_history = []
t_history_urls = []
_search = '"' + _domain + '"'
### this is a test, looks like we got more result that way
import tldextract
t_host_parse = tldextract.extract( _domain )
if args.extend:
# which one is
_search = '"' + t_host_parse.domain + '"'
else:
# the most effective ?
_search = '"' + t_host_parse.domain + '.' + t_host_parse.suffix + '"'
# or simply ?
# _search = '"' + _domain + '"'
# print(_search)
# exit()
###
if args.extend:
_regexp = r'(([0-9a-z_\-\.]+\.)?([0-9a-z_\-]+)?'+t_host_parse.domain+'([0-9a-z_\-\.]+)?\.[a-z]{1,5})'
_confirm = t_host_parse.domain
else:
_regexp = r'((([0-9a-z_\-\.]+)\.)?' + _domain.replace('.','\.')+')'
_confirm = _domain
if args.verbose:
print( "Search: %s" % _search )
print( "Regexp: %s" % _regexp)
print( "Confirm: %s" % _confirm)
print( "Relative urls: %s" % _relative)
print( "All domains: %s" % _alldomains)
for so in t_sort_order:
page = 1
if args.verbose:
print( '\n----- %s %s\n' % (so['sort'],so['order']) )
while True:
if args.verbose:
print("page %d" % page)
time.sleep( random.random() )
token = random.choice( t_tokens )
t_json = githubApiSearchCode( token, _search, page, so['sort'], so['order'] )
# print(t_json)
if not t_json or 'documentation_url' in t_json:
if args.verbose:
print(t_json)
t_tokens.remove(token)
if len(t_tokens) == 0:
exit()
continue
page = page + 1
if 'items' in t_json and len(t_json['items']):
pool = Pool( 30 )
pool.map( partial(readCode,_regexp,_confirm,_source,_relative,_alldomains), t_json['items'] )
pool.close()
pool.join()
else:
break
exit()
| 31.507886
| 135
| 0.505406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,143
| 0.314678
|
9a983eb032aad5191f1e045e13d058aec5f59848
| 7,952
|
py
|
Python
|
information111/info/user/views.py
|
SNxiaobei/text
|
637018ff89d992c2ed23f5c90fa2010023bc2ff3
|
[
"MIT"
] | null | null | null |
information111/info/user/views.py
|
SNxiaobei/text
|
637018ff89d992c2ed23f5c90fa2010023bc2ff3
|
[
"MIT"
] | null | null | null |
information111/info/user/views.py
|
SNxiaobei/text
|
637018ff89d992c2ed23f5c90fa2010023bc2ff3
|
[
"MIT"
] | null | null | null |
from flask import abort
from flask import current_app
from flask import g
from flask import request
from flask import session
from info import constants
from info import db
from info.models import Category, News, User
from info.utils.response_code import RET
from . import profile_blue
from flask import render_template,redirect,jsonify
from info.utils.common import user_login_data
from info.utils.image_storage import storage
"""
index.views:只放置首页的业务逻辑
"""
"""其他用户新闻列表"""
@profile_blue.route("/other_news_list")
def other_news_list():
# 获取页数
try:
page = int(request.args.get("p", 1))
except Exception as e:
current_app.logger.error(e)
page = 1
# 获取id
user_id = request.args.get("user_id")
# 通过id获取用户
user = User.query.get(user_id)
paginate = News.query.filter(News.user_id == user.id).paginate(page, 10, False)
# 获取当前页的数据
items = paginate.items
# 获取当前页
current_page = paginate.page
# print(current_page)
# 获取总的页数
total_page = paginate.pages
news_li = []
for news in items:
news_li.append(news.to_review_dict())
data = {
"news_list": news_li,
"current_page": current_page,
"total_page": total_page
}
# return render_template("news/other.html", data=data)
return jsonify(errno=RET.OK, errmsg="OK", data=data)
"""其他用户界面"""
@profile_blue.route("/other_info")
@user_login_data
def other_info():
user = g.user
# 获取其他用户id
user_id = request.args.get("id")
if not user_id:
abort(404)
# 通过id查询用户
other = User.query.get(user_id)
# 判断当前登陆用户是否关注过该用户
is_followed = False
if user:
if other.followers.filter(User.id == user.id).count() > 0:
is_followed = True
data = {
"user_info": user.to_dict() if user else None,
"other_info": other.to_dict(),
"is_followed": is_followed
}
return render_template("news/other.html", data=data)
"""我的关注"""
@profile_blue.route("/user_follow")
@user_login_data
def user_follow():
user = g.user
# 获取页数
try:
page = int(request.args.get("p", 1))
except Exception as e:
current_app.logger.error(e)
page = 1
paginate = user.followed.paginate(page, constants.USER_FOLLOWED_MAX_COUNT, False)
# 获取当前页数据
items = paginate.items
# 获取当前页
current_page = paginate.page
# 获取总页数
total_page = paginate.pages
users = []
for user in items:
users.append(user.to_dict())
data = {
"users": users,
"current_page": current_page,
"total_page": total_page
}
return render_template("news/user_follow.html", data=data)
@profile_blue.route("/news_list")
@user_login_data
def news_list():
try:
page = int(request.args.get("p", 1))
except Exception as e:
current_app.logger.error(e)
page = 1
user = g.user
paginate = News.query.filter(News.user_id == user.id).paginate(page, 2, False)
items = paginate.items
current_page = paginate.page
totle_page = paginate.pages
news_list = []
for item in items:
news_list.append(item.to_review_dict())
data = {
"current_page": current_page,
"totle_page": totle_page,
"news_list": news_list
}
return render_template("news/user_news_list.html", data=data)
@profile_blue.route("/news_release", methods=["GET","POST"])
@user_login_data
def news_release():
if request.method == "GET":
# 首先获取到新闻分类,然后传递到模板页面,进行展示
category_list = Category.query.all()
categorys = []
for category in category_list:
categorys.append(category.to_dict())
# 删除列表当中0的元素
categorys.pop(0)
data = {
"categories": categorys
}
return render_template("news/user_news_release.html", data=data)
# 获取到表单页码提交过来的数据, 获取的是用户发布的新闻数据
title = request.form.get("title")
category_id = request.form.get("category_id")
digest = request.form.get("digest")
index_image = request.files.get("index_image")
content = request.form.get("content")
if not all([title, category_id, digest, index_image, content]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
user = g.user
index_image = index_image.read()
key = storage(index_image)
# 用户发布完成之后, 我们需要把当前发布的新闻储存到数据库
news = News()
news.title = title
news.source = "个人来源"
news.digest = digest
news.content = content
news.index_image_url = constants.QINIU_DOMIN_PREFIX+key
news.category_id = category_id
news.user_id = user.id
# 当前的状态1表示正在审核中
news.status = 1
db.session.add(news)
db.session.commit()
return jsonify(errno=RET.OK, errmsg="发布成功")
@profile_blue.route("/collection")
@user_login_data
def collection():
# 当前表示用户所有收藏的新闻,获取所有新闻涉及到分页,那么肯定是从第一页 开始
page = request.args.get("p", 1)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
user = g.user
# 获取到当前登录用户的所有的收藏的新闻列表
# 第一个参数表示页码
# 第二个参数表示当前每个页码一共有多少条数据
paginate = user.collection_news.paginate(page, 10, False)
items = paginate.items
current_page = paginate.page
total_page = paginate.pages
collections = []
for item in items:
collections.append(item.to_dict())
data = {
"collections" : collections,
"current_page": current_page,
"total_page": total_page
}
return render_template("news/user_collection.html", data = data)
@profile_blue.route("/pass_info", methods=["GET", "POST"])
@user_login_data
def pass_info():
if request.method == "GET":
return render_template("news/user_pass_info.html")
user = g.user
old_password = request.json.get("old_password")
new_password = request.json.get("new_password")
if not all([old_password, new_password]):
return jsonify(errno=RET.PARAMERR, errmsg="请输入密码")
# 判断旧密码是否正确, 只有当旧密码正确,才能修改密码
if not user.check_password(old_password):
return jsonify(errno=RET.PARAMERR, errmsg="旧密码错误")
if old_password == new_password:
return jsonify(errno=RET.PARAMERR, errmsg="新密码不能和旧密码相同")
# 如果旧密码正确,那么直接更新到当前数据库里面
user.password = new_password
db.session.commit()
return jsonify(errno=RET.OK, errmsg="密码修改成功")
@profile_blue.route("/pic_info",methods= ["GET","POST"])
@user_login_data
def pic_info():
user = g.user
if request.method == "GET":
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user_pic_info.html", data=data)
avatar = request.files.get("avatar").read()
# 如果上传成功,那么就会返回一个url地址,或者叫做key
# 如果想在浏览器里面浏览刚刚 上传的图片,那么必须通过
# 七牛的地址 + 刚刚返回的url
# http: // oyucyko3w.bkt.clouddn.com / + url
url = storage(avatar)
user.avatar_url = url
db.session.commit()
return jsonify(errno = RET.OK,errmsg = "上传成功",data={"avatar_url": constants.QINIU_DOMIN_PREFIX + url})
@profile_blue.route("/base_info",methods = ["GET","POST"])
@user_login_data
def base_info():
user = g.user
if request.method == "GET":
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user_base_info.html",data = data)
nick_name = request.json.get("nick_name")
signature = request.json.get("signature")
gender = request.json.get("gender")
user.nick_name = nick_name
user.signature = signature
user.gender = gender
# 更新数据库
db.session.commit()
# 更新session里面的数据
session["nick_name"] = user.nick_name
return jsonify(errno = RET.OK,errmsg = "修改成功")
@profile_blue.route("/info")
@user_login_data
def info():
user = g.user
if not user:
# 重新调转到首页
return redirect("/")
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user.html",data = data)
| 26.774411
| 106
| 0.649774
| 0
| 0
| 0
| 0
| 8,297
| 0.934452
| 0
| 0
| 2,466
| 0.277734
|
9a9a15482e95aa7f0388513fb55229cb50c955bb
| 962
|
py
|
Python
|
code/magicsquares/mgsq/three_by_three.py
|
gerritjvv/blog
|
26dbba7b38ed7aae63467720fcac2d95da1a0d7f
|
[
"MIT"
] | null | null | null |
code/magicsquares/mgsq/three_by_three.py
|
gerritjvv/blog
|
26dbba7b38ed7aae63467720fcac2d95da1a0d7f
|
[
"MIT"
] | null | null | null |
code/magicsquares/mgsq/three_by_three.py
|
gerritjvv/blog
|
26dbba7b38ed7aae63467720fcac2d95da1a0d7f
|
[
"MIT"
] | null | null | null |
"""
Solves a 3x3 square programmatically.
It is not meant to be a full blown solution for magic squares, but rather a writeup
of my thoughts on how it can be solved.
"""
import statistics
def make_pairs(I, mid):
"""
We take pairs as [ [9, 1], [8, 2], [7, 3], [6, 4]]
:param I:
:param mid:
:return:
"""
h = 0
t = len(I) - 1
pairs = []
while h < mid-1:
pairs.append([I[h], I[t]])
h += 1
t -= 1
return pairs
def squares(n):
I = [x for x in range(1, n * n + 1)]
cols = n
mid = statistics.median(I)
print(f"I: {I}")
print(f"cols: {cols}")
print(f"mid: {mid}")
pairs = make_pairs(I, mid)
print(f"pairs: {pairs}")
# the pairs are taken from the left and rigt of mid
# so that the length is mid-1
assert len(pairs) == mid-1, f"len(pairs) = {len(pairs)} mid-1 = {mid-1}"
assert len(pairs[0]) == cols-1
if __name__ == '__main__':
squares(3)
| 20.041667
| 83
| 0.546778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 467
| 0.485447
|