blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8cac380c1cfc48a1844eb1fb9a553698a2efd91 | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/system/memory/state/__init__.py | 33757f6fb7f431bfe1878dd805c8e1ec4bdea2dc | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 7,666 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/memory/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for system memory
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__physical','__reserved',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__reserved = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="reserved", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)
self.__physical = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="physical", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'system', u'memory', u'state']
def _get_physical(self):
"""
Getter method for physical, mapped from YANG variable /system/memory/state/physical (uint64)
YANG Description: Reports the total physical memory available on the
system.
"""
return self.__physical
def _set_physical(self, v, load=False):
"""
Setter method for physical, mapped from YANG variable /system/memory/state/physical (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_physical is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_physical() directly.
YANG Description: Reports the total physical memory available on the
system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="physical", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """physical must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="physical", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)""",
})
self.__physical = t
if hasattr(self, '_set'):
self._set()
def _unset_physical(self):
self.__physical = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="physical", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)
def _get_reserved(self):
"""
Getter method for reserved, mapped from YANG variable /system/memory/state/reserved (uint64)
YANG Description: Memory reserved for system use
"""
return self.__reserved
def _set_reserved(self, v, load=False):
"""
Setter method for reserved, mapped from YANG variable /system/memory/state/reserved (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_reserved is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reserved() directly.
YANG Description: Memory reserved for system use
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="reserved", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reserved must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="reserved", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)""",
})
self.__reserved = t
if hasattr(self, '_set'):
self._set()
def _unset_reserved(self):
self.__reserved = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="reserved", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint64', is_config=False)
physical = __builtin__.property(_get_physical)
reserved = __builtin__.property(_get_reserved)
_pyangbind_elements = {'physical': physical, 'reserved': reserved, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
d72ebe9d812f208cbd0675bb8f49ca3b5b0d445a | 7473435d8af734dd38add08ea33b0b6402b35163 | /ContactList/Contacts/admin.py | aafaf0105bd064629fbcd6863a2908df50f6ae1b | [] | no_license | artj15/ContactList | 8531fd54e0811bcf3d33206c576c4a81ed160010 | 3f6699db39090fc9ec8abae88ad5817ecd813870 | refs/heads/master | 2023-05-03T11:48:06.806891 | 2021-05-28T11:46:37 | 2021-05-28T11:46:37 | 370,724,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from django.contrib import admin
from .models import ContactModel
# Register your models here.
admin.site.register(ContactModel) | [
"artj15@gmail.com"
] | artj15@gmail.com |
4f361713286000ae69db6c734f3beb6edcf76de8 | 870254de7c4127f0e8d8f8a3bf805b8b9becece6 | /button.py | 8fdab42e1e4051188cdbb96cd392d697737fd067 | [] | no_license | jeffreymutethia/Criminal-Case | 88a8c831a6eb2644a5e9d27e5eb2cedec1a044f2 | 51724f001fb526632a413fd07f5a76cad60b3afa | refs/heads/master | 2022-11-26T07:51:16.233302 | 2020-07-30T03:52:41 | 2020-07-30T03:52:41 | 283,662,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | from graphics import *
class Button:
def __init__(self, rect, label):
"""Initialize a new Button object.
Parameters: rect, a Rectangle object defining
the extent and location
of the button
label, a label to appear on top of the rectangle
Preconditions:
The rectangle should be in the correct
location relative to the window.
The label should be centered on (0,0).
"""
self.rect = rect
self.label = label
def getCenter(self):
"""Return a Point representing the center of
this Button's rectangle
"""
return self.rect.getCenter()
def draw(self, win):
"""Draw the button rectangle and label
in the given GraphWin
"""
# Draw the button's rectangle
self.rect.draw(win)
# Move the label into place and draw it too
center = self.getCenter()
self.label.move(center.getX(), center.getY())
self.label.draw(win)
def wasClicked(self, click):
"""Given a Point representing where the mouse was clicked, return True if this Button was clicked.
"""
p1 = self.rect.getP1()
p2 = self.rect.getP2()
return (p1.getX() <= click.getX() <= p2.getX()) and \
(p1.getY() <= click.getY() <= p2.getY())
| [
"noreply@github.com"
] | jeffreymutethia.noreply@github.com |
e66a9808beb7dc3a08d7fb7d73b8b5e0a5b67b46 | 024452a051f15b79292f609833ea4d9ad72e34ff | /services/model.py | 90afa0070071c1096163d1b1229fefd585ab1c6d | [] | no_license | toubar/flask-app | 98b18d237311a66a9b93fad8da884b2768d51d69 | 0487b7edf94d48c09c284393e76802a95a6f96a0 | refs/heads/master | 2022-06-29T05:06:56.001254 | 2020-01-31T13:14:36 | 2020-01-31T17:03:28 | 237,433,495 | 0 | 0 | null | 2022-05-25T03:05:22 | 2020-01-31T13:15:57 | Python | UTF-8 | Python | false | false | 1,916 | py | import json
import os
from services import app
class ServicesModel():
def __init__(self):
self.data = {}
# this flag is set to true after PUT operations, otherwise data is loaded from memory
self.is_data_updated = False
def sync_data_with_db(self):
if self.data == {} or self.is_data_updated:
self.data = self.load_data_from_db()
self.is_data_updated = False
def load_data_from_db(self):
try:
with open(os.path.join('static', 'db.json')) as file:
self.data = json.load(file)
return self.data
except (FileNotFoundError, OSError) as e:
app.logger.error(e.code)
def update_service(self, service):
# get service name from request JSON
service_name = service['service']
# delete the service entry from memory
del self.data['services'][self.get_service_index(service_name)]
self.append_service(service)
def create_service(self, service):
self.append_service(service)
def append_service(self, service):
# append service to dict
self.data['services'].append(service)
self.is_data_updated = True
def get_service_index(self, service_name):
# gets the index of the to-be-updated service, so that it gets deleted
for i, service in enumerate(self.data['services']):
if service_name == service['service']:
return i
else:
continue
return None
def persist_data_in_db(self):
# if data has been updated, write dict to JSON file
if self.is_data_updated:
try:
with open(os.path.join('static', 'db.json'), 'w') as file:
json.dump(self.data, file, indent=2)
except (FileNotFoundError, OSError) as e:
app.logger.error(e.code)
| [
"aatoubar@gmail.com"
] | aatoubar@gmail.com |
8d09f8280f182ca1dac71680159ec9ef9b1c42b3 | 170e499229560aef5a809e6805d4153cafc78e49 | /api/urls.py | caf8366deb7207fb510d49977e375bb346dc55fe | [] | permissive | anjanadevi411/django_rest | 298176762db8057e850fae1bc454e0737ec343bc | 9f977457b49f5bf345cea20a250647e85a7c23c8 | refs/heads/master | 2023-02-21T05:30:42.033121 | 2021-01-12T11:43:03 | 2021-01-12T11:43:03 | 328,790,502 | 0 | 0 | Apache-2.0 | 2021-01-11T21:19:36 | 2021-01-11T21:01:13 | null | UTF-8 | Python | false | false | 507 | py | from django.urls import path
from .views import apioverview,apilistview,apidetailview,apicreateview,apiupdateview,apideleteview
urlpatterns = [
path('',apioverview,name='api_overview'),
path('task-list/',apilistview,name='task-list'),
path('task-detail/<str:pk>/',apidetailview,name='task-detail'),
path('task-create/',apicreateview,name='task-create'),
path('task-update/<str:pk>/',apiupdateview,name='task-update'),
path('task-delete/<str:pk>/',apideleteview,name='task-delete'),
] | [
"anjanadevi411@gmail.com"
] | anjanadevi411@gmail.com |
70f62aff2c03b86f65c3e2a846b0420550261d24 | a6944ab1c0085a377ad6690d458058d16f620cd6 | /morpheme_analysis.py | d5a1e1efb0f1243ff23456622c70bc76859eea96 | [] | no_license | Mayberry2021/cursed_comment_filter | 1d609c0aa8fe8375d464c95b7ce30f2cd0ed6584 | ea33821fa869985c52a865a3d63c5bf7cb00a932 | refs/heads/master | 2023-07-14T04:30:28.651410 | 2021-09-01T07:36:00 | 2021-09-01T07:36:00 | 401,971,235 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,637 | py | from konlpy.tag import *
class Cursed_Analysis(object): # 댓글 데이터 내 비속어 댓글 분석
def __init__(self, section, content_list):
self.section = section
self.out_list = [] # 비속어 제외 댓글 데이터
self.cursed_list = []
self.malword_set = []
self.okt = Okt()
self.content_list = content_list
def get_cursed_list(self): # 비속어 댓글 모음 반환
return self.cursed_list
def get_out_list(self):
return self.out_list
def get_malword_set(self): # 비속어 목록 로드
print('slang_malword 데이터 반환!')
return self.malword_set
def malword_loader(self): # 비속어 목록 불러오기 / 비속어 목록 파일 이름은 malword.txt로 고정해놓아야 함
with open("malword.txt", "r", encoding="UTF-8") as file:
while True:
mal = file.readline()
if not mal:
break
text = mal.rstrip('\n')
self.malword_set.append(text)
def check_cursed(self): # 댓글 데이터 내 비속어 댓글 수 체크
cursed_comment_count = 0
for comment in self.content_list:
cursed_word_count = 0
text = self.okt.pos(comment)
for morphs in text:
if morphs[0] in self.malword_set:
cursed_word_count += 1
if(cursed_word_count):
cursed_comment_count += 1
else:
pass
else:
return cursed_comment_count
def make_cursed_list(self): # 댓글 데이터 내 비속어 댓글 추출하기
for comment in self.content_list:
cursed_word_count = 0
text = self.okt.pos(comment)
for morphs in text:
if morphs[0] in self.malword_set:
cursed_word_count += 1
if(cursed_word_count):
self.cursed_list.append(comment)
def make_out_list(self):
for comment in self.content_list:
if comment not in self.cursed_list:
self.out_list.append(comment)
def cursed_text_out(self): # 비속어 댓글 텍스트 파일로 출력
with open(f'./cursed_of_{self.section}.txt','w', encoding='utf-8') as file:
for comment in self.cursed_list:
comments = str(comment) + '\n'
file.write(comments)
def out_text_out(self): # 아웃 리스트 댓글 텍스트 파일로 출력
with open(f'./outlist_of_{self.section}.txt','w', encoding='utf-8') as file:
for comment in self.out_list:
comments = str(comment) + '\n'
file.write(comments)
def __del__(self):
pass
class Polite_Analysis(object): # 존대어 분석
def __init__(self, section, content_list):
self.section = section
self.content_list = content_list
self.out_list = [] # 존대어 제외하고 남은 데이터
self.polite_list = []
self.rpct_xr = [ "습니","ㅂ니","요","ㅂ시"]
self.rpct_nng_list = ["진지","성함","연세","댁","부인","생신","따님","말씀"]
self.rpct_vv_list = ["계시","드","모시","뵙","뵈","여쭈"]
self.kkma = Kkma()
def get_polite_list(self):
return self.polite_list
def get_out_list(self):
return self.out_list
def rmEmoji(self, comment):
return comment.encode('euc-kr', 'ignore').decode('euc-kr')
def check_polite(self): # 댓글 데이터 내 존대어 댓글 개수 체크
polite_count = 0
for comment in self.content_list:
polite_word_count = 0
cleaned_comment = self.rmEmoji(comment)
text = self.kkma.pos(cleaned_comment)
for corpus in text:
if corpus[1] in ["EPH", "EPP", "EFR"]:
polite_word_count += 1
elif ((corpus[1] in ["ECD", "EFN", "EFQ", "EFI"]) and (corpus[0] in self.rpct_xr)):
polite_word_count += 1
elif ((corpus[1] in ["JKS", "JKM"]) and (corpus[0] in ["께","께서"])):
polite_word_count += 1
elif ((corpus[1] in ["NP"]) and (corpus[0] in ["저", "저희"])):
polite_word_count += 1
elif ((corpus[1] in ["VV"]) and (corpus[0] in self.rpct_vv_list)):
polite_word_count += 1
elif ((corpus[1] in ["NNG", "XR"]) and (corpus[0] in self.rpct_nng_list)):
polite_word_count += 1
else:
pass
if(polite_word_count):
polite_count += 1
else:
pass
else:
return polite_count
def make_polite_list(self): # 댓글 데이터 내 존대어 댓글 추출
for comment in self.content_list:
polite_word_count = 0
cleaned_comment = self.rmEmoji(comment)
text = self.kkma.pos(cleaned_comment)
for corpus in text:
if corpus[1] in ["EPH", "EPP", "EFR"]:
polite_word_count += 1
elif ((corpus[1] in ["ECD", "EFN", "EFQ", "EFI"]) and (corpus[0] in self.rpct_xr)):
polite_word_count += 1
elif ((corpus[1] in ["JKS", "JKM"]) and (corpus[0] in ["께","께서"])):
polite_word_count += 1
elif ((corpus[1] in ["NP"]) and (corpus[0] in ["저", "저희"])):
polite_word_count += 1
elif ((corpus[1] in ["VV"]) and (corpus[0] in self.rpct_vv_list)):
polite_word_count += 1
elif ((corpus[1] in ["NNG", "XR"]) and (corpus[0] in self.rpct_nng_list)):
polite_word_count += 1
else:
pass
if(polite_word_count):
self.polite_list.append(comment)
else:
pass
def make_out_list(self): # 존대어 제외 댓글 파일 생성
for comment in self.content_list:
if comment not in self.polite_list:
self.out_list.append(comment)
def text_out(self): # 존대어 댓글 텍스트 파일로 출력
with open(f'./respect_of_{self.section}.txt','w', encoding='utf-8') as file:
for comment in self.polite_list:
comments = str(comment) + '\n'
file.write(comments)
def __del__(self):
pass
class Ordinary_Analysis(object): # 예사어 분석
def __init__(self, section, content_list):
self.section = section
self.content_list = content_list
def get_ordinary_list(self): # 예사어 리스트 반환
return self.content_list
def check_ordinary(self): # 예사어 댓글 수 체크
ordinary_comment_count = len(self.content_list)
return ordinary_comment_count
def text_out(self): # 예사어 댓글 텍스트 파일로 출력
with open(f'./ordinary/ordinary_of_{self.section}.txt','w', encoding='utf-8') as file:
for comment in self.content_list:
comments = str(comment) + '\n'
file.write(comments)
def __del__(self):
pass
class Before_Soundness(object):
def __init__(self):
self.kkma = Kkma()
self.okt = Okt()
self.valued_pos = ["NNB","NNG","NP","VV","VA","MAG","IC","JKS","JKM","ECD","EPH","EPT","EPP","EFN","EFQ","EFO","EFA","EFI","EFR","XSN","XR"]
self.rpct_xr = [ "습니","ㅂ니","요","ㅂ시"]
self.rpct_nng_list = ["진지","성함","연세","댁","부인","생신","따님","말씀"]
self.rpct_vv_list = ["계시","드","모시","뵙","뵈","여쭈"]
self.malword_set = []
def malword_loader(self):
with open("malword.txt", "r", encoding="UTF-8") as file:
while True:
mal = file.readline()
if not mal:
break
text = mal.rstrip('\n')
self.malword_set.append(text)
else:
print('malword load 완료!')
def rmEmoji(self, comment):
return comment.encode('euc-kr', 'ignore').decode('euc-kr')
def t_analysis(self, comment):
tpos = 0
result = self.kkma.pos(comment)
for corpus in result:
if corpus[1] in self.valued_pos:
tpos += 1
else:
return tpos
def p_analysis(self, comment): # 댓글 내 존대어 형태소 개수 계산
ppos = 0
result = self.kkma.pos(comment)
for corpus in result:
if corpus[1] in ["EPH", "EPP", "EFR"]:
ppos += 1
elif ((corpus[1] in ["ECD", "EFN", "EFQ", "EFI"]) and (corpus[0] in self.rpct_xr)):
ppos += 1
elif ((corpus[1] in ["JKS", "JKM"]) and (corpus[0] in ["께"])):
ppos += 1
elif ((corpus[1] in ["NP"]) and (corpus[0] in ["저", "저희"])):
ppos += 1
elif ((corpus[1] in ["VV"]) and (corpus[0] in self.rpct_vv_list)):
ppos += 1
elif ((corpus[1] in ["NNG", "XR"]) and (corpus[0] in self.rpct_nng_list)):
ppos += 1
else:
pass
else:
return ppos
def c_analysis(self, comment):
cpos = 0
result = self.okt.pos(comment)
for morphs in result:
if morphs[0] in self.malword_set:
cpos += 1
else:
return cpos
def text_out(self, morpheme_data):
with open('morpheme_data.txt', 'w', encoding='utf-8') as file:
for tag_count_list in morpheme_data:
text = str(tag_count_list[0]) + '\t' + str(tag_count_list[1]) + '\t' + str(tag_count_list[2]) + '\n'
file.write(text)
def m_analysis(self, content_list):
total = []
for comment in content_list:
tag_count_list = []
r_comment = self.rmEmoji(comment)
tpos = self.t_analysis(r_comment)
tag_count_list.append(tpos)
ppos = self.p_analysis(r_comment)
tag_count_list.append(ppos)
cpos = self.c_analysis(r_comment)
tag_count_list.append(cpos)
total.append(tag_count_list)
else:
self.text_out(total)
def __del__(self):
pass | [
"ground444@naver.com"
] | ground444@naver.com |
c51cc1139ba4508f4cd6d178342e6a1412e60f61 | 5b4ede4ce177e88010008015684c1ac0469ae6ec | /secondgui.py | bd334235d4f424c2b896f7bc6fd2fc130f9aed0d | [] | no_license | gusuly0rum/Cell-Detector | cf1eb0ed7f11a7484ca39268d55cac793ab2af78 | 0e4c4f5176ea503424927c2c1b302e0e893038f7 | refs/heads/master | 2020-03-30T11:13:21.563730 | 2018-10-05T17:29:04 | 2018-10-05T17:29:04 | 151,160,267 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,676 | py | # -*- coding: utf-8 -*-
import os
import sys
import cv2
import numpy
import scipy
import skimage
import scipy.misc
import skimage.morphology
from PyQt4 import QtGui,QtCore
from firstgui import Ui_MainWindow
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# cd C:\Users\sjdml\Documents\Seoul National University\User Interface Project\Python Scripts
# C:\Python27\Lib\site-packages\PyQt4\pyuic4 firstgui.ui >> firstgui.py
class Login(QtGui.QDialog):
def __init__(self):
# create login window
super(Login,self).__init__()
self.loginWindow = QtGui.QVBoxLayout(self)
# set login window properties
self.setWindowTitle(' Login')
self.setGeometry(800,300,270,170)
self.setWindowIcon(QtGui.QIcon('C:\Users\sjdml\Documents\Seoul National University\User Interface Project\Python Scripts\snulogo.png'))
# create widgets
self.texteditUsername = QtGui.QLineEdit(self)
self.texteditPassword = QtGui.QLineEdit(self)
self.pushbuttonLogin = QtGui.QPushButton('Login',self)
self.pushbuttonRegister = QtGui.QPushButton('Register',self)
# set widgets on login window
self.loginWindow.addWidget(self.texteditUsername)
self.loginWindow.addWidget(self.texteditPassword)
self.loginWindow.addWidget(self.pushbuttonLogin)
self.loginWindow.addWidget(self.pushbuttonRegister)
# set default user credentials
self.texteditUsername.setText('melab321')
self.texteditPassword.setText('melabmelab')
# specify push button behavior
self.pushbuttonLogin.clicked.connect(self.checkCredentials)
self.pushbuttonRegister.clicked.connect(self.createAccount)
def checkCredentials(self):
if self.texteditUsername.text() == 'melab321' and self.texteditPassword.text() == 'melabmelab':
self.accept()
def createAccount(self):
pass
class Main(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.home()
def home(self):
self.displayMessages(1,0,2,3,0,7)
self.setWindowIcon(QtGui.QIcon('C:\Users\sjdml\Documents\Seoul National University\User Interface Project\Python Scripts\snulogo.png'))
# set menubar triggers
self.ui.menubar_about.triggered.connect(self.menubarAbout)
self.ui.menubar_openFile.triggered.connect(self.menubarOpenFile)
self.ui.menubar_runModule.triggered.connect(self.menubarRunModule)
self.ui.menubar_runClassification.triggered.connect(self.menubarRunClassification)
self.ui.menubar_restartApplication.triggered.connect(self.menubarRestartApplication)
self.ui.menubar_saveImage.triggered.connect(self.menubarSaveImage)
# set push button triggers
self.ui.pushButton_loadImage.clicked.connect(self.menubarOpenFile)
self.ui.pushButton_runDetection.clicked.connect(self.menubarRunModule)
self.ui.pushButton_runClassification.clicked.connect(self.menubarRunClassification)
# set comboBox triggers
self.ui.comboBox_dataOptions.activated.connect(self.setPlotWidgetData)
self.ui.comboBox_dataOptions.activated.connect(self.setTreeWidgetData)
# set image display triggers
# self.ui.label_imageDisplay.mousePressEvent = self.drawBoundingBox
# set display bounding boxes triggers
self.ui.checkBox_displayIndices.stateChanged.connect(self.displayIndices)
# set display bounding boxes triggers
self.ui.checkBox_displayBoundingBoxes.stateChanged.connect(self.displayBoundingBoxes)
# set cell display trigger
self.ui.spinBox_cellDisplay.valueChanged.connect(self.cellDisplay)
# add matplotlib widget
self.setMenuWidgetProperties()
self.setPlotWidgetProperties()
self.setTreeWidgetProperties()
def setMenuWidgetProperties(self):
self.ui.comboBox_dataOptions.addItem("Summary")
for index in range(1,11):
comboBoxOption = "Class" + str(index)
self.ui.comboBox_dataOptions.addItem(comboBoxOption)
def setPlotWidgetProperties(self):
# create axes instance
self.figureInstance = Figure()
self.canvasInstance = FigureCanvas(self.figureInstance)
self.ui.verticalLayout.addWidget(self.canvasInstance)
self.axesInstance = self.figureInstance.add_subplot(111)
# set general histogram properties
self.axesInstance.set_xlim([0,11])
self.axesInstance.set_xticks(scipy.arange(1,11,1))
self.axesInstance.spines['top'].set_visible(False)
self.axesInstance.spines['left'].set_visible(False)
self.axesInstance.spines['right'].set_visible(False)
# self.axesInstance.spines['bottom'].set_visible(False)
self.axesInstance.tick_params(axis='both',which='both',top='off',bottom='off',labelbottom='on',left='off',right='off',labelleft='off')
# self.setPlotWidgetData()
def setTreeWidgetProperties(self):
self.ui.treeWidget_cellData.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
# self.setTreeWidgetData()
def setPlotWidgetData(self):
self.axesInstance.cla()
# obtain histogram data
self.classArray = [1,2,3,4,5,6,7,8,9,10]
self.summaryData = [10,15,5,0,20,30,17,2,3,21]
self.softmaxData = []
self.softmaxData.append([0,0,0,50,100,0,0,0,0,0])
self.softmaxData.append([0,0,70,0,0,0,90,0,0,0])
self.softmaxData.append([0,0,100,0,0,0,0,0,0,0])
self.softmaxData.append([0,0,100,0,0,0,0,0,0,0])
self.softmaxData.append([0,0,0,100,0,0,0,0,0,0])
self.softmaxData.append([0,0,0,0,0,100,0,0,0,0])
self.softmaxData.append([0,0,0,0,0,0,0,100,0,0])
self.softmaxData.append([0,0,0,0,0,0,0,0,100,0])
self.softmaxData.append([0,0,0,0,0,0,0,0,0,100])
self.softmaxData.append([0,0,0,0,0,0,0,0,0,100])
self.softmaxData.append([0,0,0,0,0,0,0,0,0,100])
# set option-dependent histogram properties
if self.ui.comboBox_dataOptions.currentText() == self.ui.comboBox_dataOptions.itemText(0):
self.axesInstance.set_ylim([0,numpy.max(self.summaryData) + 3])
self.ui.label_xAxisTitle.setText('Frequency [n] vs Class Label [k]')
self.ui.label_xAxisTitle.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.setPlotData(self.summaryData)
else:
self.axesInstance.set_ylim([0,100 + 3])
self.ui.label_xAxisTitle.setText('Softmax [%] vs Class Label [k]')
self.ui.label_xAxisTitle.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
menuIndex = self.ui.comboBox_dataOptions.currentIndex()
self.setPlotData(self.softmaxData[menuIndex])
def setPlotData(self,histogramData):
# set histogram data
self.axesInstance.set_xlim([0,11])
self.axesInstance.set_xticks(scipy.arange(1,11,1))
self.axesInstance.bar(self.classArray,histogramData,align='center')
binPatches = self.axesInstance.patches
for binPatch,frequencyValue in zip(binPatches,histogramData):
height = binPatch.get_height()
self.axesInstance.text(binPatch.get_x() + binPatch.get_width()/2 , height + 1 , frequencyValue , ha='center' , va='bottom' , fontSize=10)
self.canvasInstance.draw()
def setTreeWidgetData(self):
self.ui.treeWidget_cellData.clear()
for element in self.classArray:
QtGui.QTreeWidgetItem(self.ui.treeWidget_cellData).setText(0,str(element))
# set option-dependent tree properties
if self.ui.comboBox_dataOptions.currentText() == self.ui.comboBox_dataOptions.itemText(0):
headerArray = ['Class [k]','Cell Count [n]','Proportion [%]']
self.ui.treeWidget_cellData.setHeaderLabels(headerArray)
self.ui.treeWidget_cellData.setColumnCount(len(headerArray))
self.ui.treeWidget_cellData.setColumnWidth(0,160)
self.ui.treeWidget_cellData.setColumnWidth(1,160)
self.ui.treeWidget_cellData.setColumnWidth(2,150)
self.setTreeData(self.summaryData)
else:
headerArray = ['Class [k]','Softmax Probability [%]']
self.ui.treeWidget_cellData.setHeaderLabels(headerArray)
self.ui.treeWidget_cellData.setColumnCount(len(headerArray))
self.ui.treeWidget_cellData.setColumnWidth(0,200)
self.ui.treeWidget_cellData.setColumnWidth(1,200)
menuIndex = self.ui.comboBox_dataOptions.currentIndex()
self.setTreeData(self.softmaxData[menuIndex])
def setTreeData(self,histogramData):
self.ui.treeWidget_cellData.clear()
# set option-dependent tree properties
if self.ui.comboBox_dataOptions.currentText() == self.ui.comboBox_dataOptions.itemText(0):
totalCellCount = float(numpy.sum(histogramData))
for index,element in enumerate(histogramData):
treeItemInstance = QtGui.QTreeWidgetItem(self.ui.treeWidget_cellData)
treeItemInstance.setText(0,str(index + 1))
treeItemInstance.setText(1,str(element))
treeItemInstance.setText(2,str(numpy.round(element/totalCellCount*100,1)))
else:
for index,element in enumerate(histogramData):
treeItemInstance = QtGui.QTreeWidgetItem(self.ui.treeWidget_cellData)
treeItemInstance.setText(0,str(index + 1))
treeItemInstance.setText(1,str(element))
def menubarAbout(self):
import webbrowser
webbrowser.open("http://melab.snu.ac.kr/melab/doku.php")
def menubarOpenFile(self):
from PyQt4.QtGui import QFileDialog
os.chdir("C:\Users\sjdml\Documents\Seoul National University\User Interface Project\Data\WBC135")
self.filename = QFileDialog.getOpenFileName(self,'Open File')
if self.filename:
# convert file to pixmap
self.displayMessages(0,6)
self.pixmap_image = QtGui.QPixmap(self.filename)
self.imageWidth = self.pixmap_image.size().width()
self.imageHeight = self.pixmap_image.size().height()
changer = 70
# set pen properties
self.penRedBorder = QtGui.QPen(QtCore.Qt.red)
self.penRedBorder.setWidth(3)
self.penCentroid = QtGui.QPen(QtCore.Qt.yellow)
self.penCentroid.setCapStyle(QtCore.Qt.RoundCap)
self.penCentroid.setWidth(15)
self.penBoundingBox = QtGui.QPen(QtCore.Qt.yellow)
self.penBoundingBox.setWidth(2)
self.penIndex = QtGui.QPen(QtCore.Qt.yellow)
self.penIndex.setWidth(2)
# set red border
self.painterInstance = QtGui.QPainter(self.pixmap_image)
self.painterInstance.setPen(self.penRedBorder)
self.painterInstance.drawRect(changer , changer , self.imageWidth - 2*changer , self.imageHeight - 2*changer)
# set label properties
self.ui.label_imageDisplay.setPixmap(self.pixmap_image)
self.ui.label_imageDisplay.setAlignment(QtCore.Qt.AlignCenter)
self.ui.label_imageDisplay.setScaledContents(True)
self.ui.label_imageDisplay.setMinimumSize(1,1)
self.ui.label_imageDisplay.show()
# set current path label
self.ui.textBrowser_currentPath.setText(self.filename)
self.imagesaveDirectory()
else:
return
def menubarSaveImage(self):
from PyQt4.QtGui import QFileDialog
os.chdir("C:\Users\sjdml\Documents\Seoul National University\User Interface Project\Data\WBC135")
# self.filename = QFileDialog.getSaveFolderName(self,'Save File')
self.foldername = str(QFileDialog.getExistingDirectory(self,'Image Save Path')) + "\\"
if self.foldername:
self.ui.textBrowser_imagePath.setText(self.foldername)
self.displayMessages(0,9)
else:
return
def imagesaveDirectory(self):
self.foldername = os.path.abspath(os.path.join(str(self.filename),os.pardir))
os.chdir(self.foldername)
if not os.path.exists(str(self.foldername) + '\subimages'):
self.foldername += '\subimages\\'
os.makedirs(self.foldername)
else:
self.foldername += '\subimages\\'
os.chdir(self.foldername)
self.ui.textBrowser_imagePath.setText(self.foldername)
self.displayMessages(9)
def menubarRestartApplication(self):
pass
def displayMessages(self,*index):
status_messages = ["",
"Welcome to CellPy!",
"Seoul National University,",
"Medical Electronics Laboratory",
">> Attribute Error: Image file was not assigned to instance variable",
">> Running cell detection...",
">> Image uploaded",
"-"*170,
"Cell detection complete",
"Image save directory specified",
">> Attribute Error: Image save directory was not assigned to instance variable",
"Cell images saved"]
for index,element in enumerate(index):
self.ui.textBrowser_status.append(status_messages[element])
def menubarRunModule(self):
if not hasattr(self,'filename'):
self.displayMessages(0,4)
elif not hasattr(self,'foldername'):
self.displayMessages(0,10)
else:
self.displayMessages(0,5)
self.detectCells()
def detectCells(self):
## Function for Binary Filling
def fillHoles(input_image):
output_image = input_image.copy()
output_image = output_image.astype(numpy.uint8)
output_image,contourArray,hierarchy = cv2.findContours(output_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for contour in contourArray:
cv2.drawContours(output_image,[contour],0,255,-1)
return output_image
## Function for Image Saving
def shapeConvert(input_image,desired_size,minRow,minCol,maxRow,maxCol,minRowNorm=0,minColNorm=0):
# current sizes
sizeXCurrent = (minRow + changer) + ((maxRow + changer) - (minRow + changer)) - (minRow + changer)
sizeYCurrent = (minCol + changer) + ((maxCol + changer) - (minCol + changer)) - (minCol + changer)
sizeXChange = (desired_size - sizeXCurrent)/2.0
sizeYChange = (desired_size - sizeYCurrent)/2.0
# new sizes
sizeXFrom = (minRow + minRowNorm + changer) - int(numpy.ceil(sizeXChange))
sizeXTrom = (minRow + minRowNorm + changer) + ((maxRow + changer) - (minRow + changer)) + int(numpy.floor(sizeXChange))
sizeYFrom = (minCol + minColNorm + changer) - int(numpy.ceil(sizeYChange))
sizeYTrom = (minCol + minColNorm + changer) + ((maxCol + changer) - (minCol + changer)) + int(numpy.floor(sizeYChange))
# new image
outputImage = input_image.copy()
outputImage = outputImage[sizeXFrom:sizeXTrom, sizeYFrom:sizeYTrom]
return outputImage
## Color Space Conversion
# BGR to RGB
imageBGR = cv2.imread(str(self.filename)).astype(numpy.uint8)
imageRGB = cv2.cvtColor(imageBGR,cv2.COLOR_BGR2RGB)
# Crop RGB
changer = 70
imagesizeX,imagesizeY,imagesizeZ = imageRGB.shape
redborder = [1 + changer , 1 + changer , imagesizeX - 2*changer , imagesizeY - 2*changer]
imageRGBCrop = imageRGB[redborder[1]:redborder[1] + redborder[2] + 1 , redborder[0]:redborder[0] + redborder[3] + 1]
# Cropped RGB to HSV
imageHSV = cv2.cvtColor(imageRGBCrop,cv2.COLOR_RGB2HSV)
imageHSV = imageHSV[:,:,2]
# Cropped RGB to Lab (CIE)
imageLAB = cv2.cvtColor(imageRGBCrop,cv2.COLOR_RGB2Lab)
imageLAB = imageLAB[:,:,2]
# Cropped HSV/Lab to Binary
thresholdValue,imageBinary = cv2.threshold(imageLAB,0,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
imageLabel = skimage.measure.label(imageBinary)
propsArray = skimage.measure.regionprops(imageLabel)
self.indicesArray = []
self.boundingBoxArray = []
self.imageSaveArray = []
## Detection
numCells = 0
## Loop Over Each Object
for index in range(len(propsArray)):
# Crop Individual Objects
imageIndivRGB = imageRGBCrop.copy()
imageIndivBinary = imageBinary.copy()
imageIndivBinary[imageLabel != index + 1] = 0
minRow,minCol,maxRow,maxCol = propsArray[index].bbox
imageIndivRGB = imageIndivRGB[minRow:minRow + (maxRow - minRow) , minCol:minCol + (maxCol - minCol)]
imageIndivBinary = imageIndivBinary[minRow:minRow + (maxRow - minRow) , minCol:minCol + (maxCol - minCol)]
## Ignore Small Objects and Keep Normal Objects
if propsArray[index].area < 5000 and propsArray[index].area > 1500:
# Count Normal Cells
numCells += 1
# Binary Operations
imageFillHole = fillHoles(imageIndivBinary)
squareKernel = numpy.ones([3,3],numpy.uint8)
imageOpen = cv2.morphologyEx(imageFillHole , cv2.MORPH_OPEN , squareKernel , iterations=5)
imageDilate = cv2.dilate(imageOpen , squareKernel , iterations=1)
# Plot Centroids
propsArrayNot = skimage.measure.regionprops(imageDilate)
xPos,yPos = propsArrayNot[0].centroid
self.painterInstance.setPen(self.penCentroid)
self.painterInstance.drawPoint(yPos + minCol + changer , xPos + minRow + changer)
self.painterInstance.setPen(self.penIndex)
self.painterInstance.setFont(QtGui.QFont('Decorative',20))
# self.painterInstance.drawText(yPos + minCol + changer , xPos + minRow + changer , str(numCells))
self.ui.label_imageDisplay.setPixmap(self.pixmap_image)
# Resize Individual Cells for Saving
imageSave = shapeConvert(imageRGB,148,minRow,minCol,maxRow,maxCol)
self.imageSaveArray[numCells] = imageSave
scipy.misc.imsave(self.foldername + str(numCells) + '.jpg',imageSave)
# append bounding box dimensions to instance variable array
self.boundingBoxArray.append([minRow,minCol,maxRow,maxCol])
self.indicesArray.append([yPos + minCol + changer , xPos + minRow + changer])
## Split Large Objects
elif propsArray[index].area > 5000:
# Obtain Boundaries
imageBinaryBoundaries = imageIndivBinary - cv2.erode(imageIndivBinary , None , iterations=1)
# Distance Transform
imageDistancemap = cv2.distanceTransform(imageIndivBinary,cv2.DIST_L2,5)
# Obtain Markers
thresholdValue, imageBinaryMarkers = cv2.threshold(imageDistancemap , 0.5*imageDistancemap.max() , 255 , 0)
imageLabelMarkers = skimage.measure.label(imageBinaryMarkers)
imageLabelMarkers = imageLabelMarkers + 1
imageLabelMarkers[imageBinaryBoundaries == 255] = 0
# Marker-Based Watershed Transform
imageWatershed = cv2.watershed(imageIndivRGB,numpy.int32(imageLabelMarkers))
imageWatershed[imageWatershed == 1] = 0
# Plot Centroids
propsArrayWater = skimage.measure.regionprops(imageWatershed)
for subIndex in range(0,len(propsArrayWater)):
if propsArrayWater[subIndex].area > 100:
numCells += 1
xPos,yPos = propsArrayWater[subIndex].centroid
self.painterInstance.setPen(self.penCentroid)
self.painterInstance.drawPoint(yPos + minCol + changer , xPos + minRow + changer)
self.painterInstance.setPen(self.penIndex)
self.painterInstance.setFont(QtGui.QFont('Decorative',20))
# self.painterInstance.drawText(yPos + minCol + changer , xPos + minRow + changer , str(numCells))
self.ui.label_imageDisplay.setPixmap(self.pixmap_image)
# Resize Individual Cells for Saving
minRowWater,minColWater,maxRowWater,maxColWater = propsArrayWater[subIndex].bbox
imageSave = shapeConvert(imageRGB,148,minRowWater,minColWater,maxRowWater,maxColWater,minRow,minCol)
self.imageSaveArray[numCells] = imageSave
scipy.misc.imsave(self.foldername + str(numCells) + '.jpg',imageSave)
# append bounding box dimensions to instance variable array
self.boundingBoxArray.append([minRowWater + minRow , minColWater + minCol , maxRowWater + minRow , maxColWater + minCol])
self.indicesArray.append([yPos + minCol + changer , xPos + minRow + changer])
# Display message
self.displayMessages(8,11)
self.setPlotWidgetData()
self.setTreeWidgetData()
def displayIndices(self):
if self.ui.checkBox_displayIndices.isChecked():
for index in range(len(self.indicesArray)):
indicesList = self.indicesArray[index]
minRow = indicesList[1]
minCol = indicesList[0]
self.painterInstance.setPen(self.penIndex)
self.painterInstance.drawText(minCol + 5 , minRow - 5 , str(index))
self.ui.label_imageDisplay.setPixmap(self.pixmap_image)
else:
pass
def displayBoundingBoxes(self):
if self.ui.checkBox_displayBoundingBoxes.isChecked():
changer = 70
for index in range(len(self.boundingBoxArray)):
boundingBoxList = self.boundingBoxArray[index]
minRow = boundingBoxList[0]
minCol = boundingBoxList[1]
maxRow = boundingBoxList[2]
maxCol = boundingBoxList[3]
self.painterInstance.setPen(self.penBoundingBox)
self.painterInstance.drawRect(minCol + changer , minRow + changer , maxRow - minRow , maxCol - minCol)
self.ui.label_imageDisplay.setPixmap(self.pixmap_image)
else:
pass
# self.painterInstance.end()
def cellDisplay(self):
cellImage = self.imageSaveArray[0]
self.pixmap_imageSave = QtGui.QImage(cellImage,148,148,QtGui.QImage.Format_RGB32)
self.painterInstance = QtGui.QPainter(self.pixmap_imageSave)
# set label properties
self.ui.label_cellDisplay.setPixmap()
self.ui.label_cellDisplay.setAlignment(QtCore.Qt.AlignCenter)
self.ui.label_cellDisplay.setScaledContents(True)
self.ui.label_cellDisplay.setMinimumSize(1,1)
self.ui.label_cellDisplay.show()
# def mousePressEvent(self,QMouseEvent):
# return QMouseEvent.pos()
#
# def mouseReleaseEvent(self,QMouseEvent):
# cursor = QtGui.QCursor()
# return cursor.pos()
#
# def drawBoundingBox(self,event):
# print self.ui.mousePressEvent(event)
# print self.ui.mouseReleaseEvent(event)
def menubarRunClassification(self):
pass
if __name__ == '__main__':
mainApplication = QtGui.QApplication(sys.argv)
loginApplication = Login()
if loginApplication.exec_() == QtGui.QDialog.Accepted:
windowFigure = Main()
windowFigure.move(200,30)
windowFigure.show()
sys.exit(mainApplication.exec_())
# if __name__ == '__main__':
# application = QtGui.QApplication(sys.argv)
# windowFigure = Main()
# windowFigure.move(200,30)
# windowFigure.show()
# sys.exit(application.exec_())
# cd C:\Users\sjdml\Documents\Seoul National University\User Interface Project\Python Scripts
# C:\Python27\Lib\site-packages\PyQt4\pyuic4 firstgui.ui >> firstgui.py
| [
"donguk.kim1112@gmail.com"
] | donguk.kim1112@gmail.com |
5d92f4a9b72dd9145bcf5251d83af919743f4e0d | 4109b21fbda46fe9f8346bc9596500ec1e3e7ca6 | /danceschool/payments/payatdoor/management/commands/setup_payatdoor.py | 94d457e48cc94cabc3a31ea4e88ee25a3c0f5fbe | [
"BSD-3-Clause"
] | permissive | django-danceschool/django-danceschool | fff6aa740c5e4d832198ff7cdd0fe548380892cb | 19db3e83e76ea2002ee841989410d12d1e601023 | refs/heads/master | 2023-09-04T13:02:54.207590 | 2023-07-10T12:28:13 | 2023-07-10T12:28:13 | 89,738,184 | 40 | 21 | BSD-3-Clause | 2023-07-10T12:28:15 | 2017-04-28T19:26:33 | JavaScript | UTF-8 | Python | false | false | 4,390 | py | from django.core.management.base import BaseCommand
from django.apps import apps
from django.conf import settings
from six.moves import input
try:
import readline
except ImportError:
pass
class Command(BaseCommand):
help = 'Create necessary placeholders for customers to elect to pay at the door.'
def boolean_input(self, question, default=None):
'''
Method for yes/no boolean inputs
'''
result = input("%s: " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def handle(self, *args, **options):
from cms.api import add_plugin
from cms.models import Page, StaticPlaceholder
try:
initial_language = settings.LANGUAGES[0][0]
except IndexError:
initial_language = getattr(settings, 'LANGUAGE_CODE', 'en')
# Do some sanity checks to ensure that necessary apps are listed in INSTALLED_APPS
# before proceeding
required_apps = [
('cms', 'Django CMS'),
('danceschool.core', 'Core danceschool app'),
('danceschool.payments.payatdoor', 'At-the-door payments app'),
]
for this_app in required_apps:
if not apps.is_installed(this_app[0]):
self.stdout.write(
self.style.ERROR(
('ERROR: %s is not installed or listed ' % this_app[1]) +
'in INSTALLED_APPS. Please install before proceeding.'
)
)
return None
self.stdout.write(
"""
CHECKING AT-THE-DOOR PAYMENTS INTEGRATION
-----------------------------------------
"""
)
add_payatdoor = self.boolean_input('Add form for staff members to record payments at the door [Y/n]', True)
if add_payatdoor:
payatdoor_sp = StaticPlaceholder.objects.get_or_create(code='registration_payatdoor_placeholder')
payatdoor_p_draft = payatdoor_sp[0].draft
payatdoor_p_public = payatdoor_sp[0].public
if payatdoor_p_public.get_plugins().filter(plugin_type='PayAtDoorFormPlugin').exists():
self.stdout.write('At-the-door payment processing form already present.')
else:
add_plugin(
payatdoor_p_draft, 'PayAtDoorFormPlugin', initial_language,
)
add_plugin(
payatdoor_p_public, 'PayAtDoorFormPlugin', initial_language,
)
self.stdout.write('At-the-door payment processing form added.')
add_willpayatdoor = self.boolean_input(
'Add At-the-door payments checkbox to the registration ' +
'summary view to allow students to elect to pay at the door [Y/n]',
True
)
if add_willpayatdoor:
home_page = Page.objects.filter(is_home=True, publisher_is_draft=False).first()
if not home_page:
self.stdout.write(self.style.ERROR(
'Cannot add at-the-door payments checkbox because a ' +
'home page has not yet been set.'
))
else:
payment_sp = StaticPlaceholder.objects.get_or_create(code='registration_payment_placeholder')
payment_p_draft = payment_sp[0].draft
payment_p_public = payment_sp[0].public
if payment_p_public.get_plugins().filter(plugin_type='WillPayAtDoorFormPlugin').exists():
self.stdout.write('At-the-door payments checkbox already present.')
else:
add_plugin(
payment_p_draft, 'WillPayAtDoorFormPlugin', initial_language,
successPage=home_page,
)
add_plugin(
payment_p_public, 'WillPayAtDoorFormPlugin', initial_language,
successPage=home_page,
)
self.stdout.write('At-the-door payments checkbox added.')
| [
"lee.c.tucker@gmail.com"
] | lee.c.tucker@gmail.com |
505b4486cd8ddd1afeaa6d0a4c2f3fc85d666f87 | 654fc9ea8681801f758ea0ba319fba8592b46b14 | /Intro_to_computer_science/MITx6.00.1x/test.py | b5819bcddcaeb2e6bc60eedc94b56fd05510d3d9 | [] | no_license | nnard1616/OSSU | 1184b159e18b7b54c3d9fd779af27f2bc4fef41b | 9fa18f61f3bf00acf9f12bdca1a2a05525516513 | refs/heads/master | 2018-10-09T23:40:28.088909 | 2018-09-21T23:07:38 | 2018-09-21T23:07:38 | 107,591,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | import time
def modSwapSort(L):
""" L is a list on integers """
print("Original L: ", L)
for i in range(len(L)):
for j in range(len(L)):
if L[j] < L[i]:
# the next line is a short
# form for swap L[i] and L[j]
L[j], L[i] = L[i], L[j]
print(L)
print("Final L: ", L)
def swapSort(L):
""" L is a list on integers """
print("Original L: ", L)
for i in range(len(L)):
for j in range(i+1, len(L)):
if L[j] < L[i]:
# the next line is a short
# form for swap L[i] and L[j]
L[j], L[i] = L[i], L[j]
print(L)
print("Final L: ", L)
l = [9,8,7,88,7,56,43,243,53,1,3,54,65]
start_time1 = time.time()
modSwapSort(l)
start_time2 = time.time()
swapSort(l)
print(time.time() - start_time1, time.time() - start_time2)
| [
"nnard1616@gmail.com"
] | nnard1616@gmail.com |
cde19223c0bff8023180e808099d3685ebfb65e4 | 75d464dffa00457ff13f4c964a923b20b5e2dcf8 | /recursion/test_factorial.py | 3ffa3ee5c35cb6a754547a23b25157d6ba6ceb12 | [] | no_license | davidkarban/engeto_hackaton_11_2016 | dfe8016a0200b4448982627ee42e483beb91d213 | ec640730f522403c6510894698a2255b455ba1e5 | refs/heads/master | 2020-07-03T05:10:44.908514 | 2016-11-19T14:24:02 | 2016-11-19T14:24:02 | 74,197,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from factorial import *
import pytest
def test_factorial():
assert(factorial(5)) == 120
assert(factorial(1)) == 1
| [
"david@karban.eu"
] | david@karban.eu |
30c8e4ced36e1d733bd208e93c210ffcb695e9df | d93c68f84f5818d93514999edc4be143339fd075 | /src/sea_ice/model_5.py | 73e3560c009b7c378800485b119ec15122ebee0a | [] | no_license | chengzheng2013/PolSAR_ML | 74df1d2e95c4c2a1e94883ecf3711eda14bcf09d | 839120b21547797894d1cd90567a4b347b0aaef4 | refs/heads/master | 2022-01-13T09:41:16.550009 | 2019-06-11T02:47:34 | 2019-06-11T02:47:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
from keras.models import Model, Sequential, load_model
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Layer, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Activation, Reshape, Permute
n_labels = 2
kernel = [(5,5), (3,3)]
zero_padding = [(2,2), (1,1)]
def get_encoder(img_h, img_w, dim):
return [
Conv2D(8, kernel[1], activation='relu', padding='same',
input_shape=(img_h,img_w,dim)),
BatchNormalization(),
ZeroPadding2D(padding=zero_padding[1]),
Conv2D(8, kernel[1], activation='relu', padding='valid'), #"valid" means "no padding"
BatchNormalization(),
MaxPooling2D((2, 2), padding='same'),
ZeroPadding2D(padding=zero_padding[0]),
Conv2D(16, kernel[0], activation='relu', padding='valid'),
BatchNormalization(),
ZeroPadding2D(padding=zero_padding[0]),
Conv2D(16, kernel[0], activation='relu', padding='valid'),
BatchNormalization(),
MaxPooling2D((2, 2), padding='same'),
]
def get_decoder():
return [
UpSampling2D((2, 2)),
ZeroPadding2D(padding=zero_padding[0]),
Conv2D(16, kernel[0], activation='relu', padding='valid'),
BatchNormalization(),
ZeroPadding2D(padding=zero_padding[0]),
Conv2D(16, kernel[0], activation='relu', padding='valid'),
BatchNormalization(),
UpSampling2D((2, 2)),
ZeroPadding2D(padding=zero_padding[1]),
Conv2D(8, kernel[1], activation='relu', padding='valid'),
BatchNormalization(),
ZeroPadding2D(padding=zero_padding[1]),
Conv2D(8, kernel[1], activation='relu', padding='valid'),
BatchNormalization(),
# connect to label
#Conv2D(2, (1, 1), activation='sigmoid', padding='valid'),
Conv2D(2, (1, 1), activation='softmax', padding='valid'),
]
def create_model(img_h, img_w, dim):
model = Sequential()
encoder = get_encoder(img_h, img_w, dim)
decoder = get_decoder()
for l in encoder:
model.add(l)
for l in decoder:
model.add(l)
return model | [
"r05942013@ntu.edu.tw"
] | r05942013@ntu.edu.tw |
2abff508d2c2f78ca14131d7ba737658b772373a | b9209cd80c523344c84cd80013ec96c435457570 | /spyder/plugins/pylint/tests/test_pylint_config_dialog.py | 7260af04b3734e6307224074a4b2ca49d6eb3d89 | [
"LGPL-3.0-or-later",
"OFL-1.1",
"LGPL-2.1-or-later",
"CC-BY-2.5",
"CC-BY-3.0",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"CC-BY-4.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"Python-2.0",
"MIT"
] | permissive | hellcolik/spyder | ee84469411f32bb6490fae8c069af3cf14b6a0d9 | 9ceede4587996732c19271e4e2f170b820d258a3 | refs/heads/master | 2020-09-17T04:16:53.449452 | 2019-11-24T16:14:20 | 2019-11-24T16:14:20 | 223,984,690 | 1 | 0 | MIT | 2019-11-25T15:46:06 | 2019-11-25T15:46:06 | null | UTF-8 | Python | false | false | 1,162 | py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# ----------------------------------------------------------------------------
"""Tests for plugin config dialog."""
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
from qtpy.QtCore import Signal, QObject
import pytest
# Local imports
from spyder.plugins.pylint.plugin import Pylint
from spyder.preferences.tests.conftest import config_dialog
class MainWindowMock(QObject):
sig_editor_focus_changed = Signal(str)
def __init__(self):
super(MainWindowMock, self).__init__(None)
self.editor = Mock()
self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed
@pytest.mark.parametrize(
'config_dialog',
# [[MainWindowMock, [ConfigPlugins], [Plugins]]]
[[MainWindowMock, [], [Pylint]]],
indirect=True)
def test_config_dialog(config_dialog):
configpage = config_dialog.get_page()
configpage.save_to_conf()
assert configpage
| [
"goanpeca@gmail.com"
] | goanpeca@gmail.com |
663d74de8570ccaa26a25075686c049f6a724e4d | 58678a87164d01f8555118874d86eab2d9cdef68 | /ptutils/net/regularizer.py | 71f6d10a8e96de400d1da2726ce2ca8d0129b6b3 | [
"MIT"
] | permissive | alexandonian/ptutils | f9b7d20972d1f2d4b2bd5b983ccc214ad78b8115 | bd86d3bae34e32213a97c77fe6e8adfb937bee50 | refs/heads/master | 2021-01-01T06:56:17.543300 | 2017-08-16T04:00:25 | 2017-08-16T04:00:25 | 97,552,227 | 1 | 0 | null | 2017-07-18T04:21:04 | 2017-07-18T04:21:04 | null | UTF-8 | Python | false | false | 880 | py |
from ptutils.base import Module
class Regularizer(Module):
"""Regularizer base class.
"""
def __call__(self, x):
return 0.
@classmethod
def from_config(cls, config):
return cls(**config)
class L1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
# Arguments
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
regularization = 0.
if self.l1:
regularization += K.sum(self.l1 * K.abs(x))
if self.l2:
regularization += K.sum(self.l2 * K.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1),
'l2': float(self.l2)}
| [
"alexandonian@gmail.com"
] | alexandonian@gmail.com |
c9c0950a5ee41f1d0b22364a3b67feb62e6eb27b | 61324a3e229ba61efcd790248ec8d38e02bb97d2 | /contest_abc/154/c.py | 669c74fb5c55010173fecd45be290b20a5036c3c | [] | no_license | komtaki/atCoder | 3efd3e71955865fd083f0b9757e561df2ec2eb87 | ad20c5242aa7453a217ddba09981823e02865f45 | refs/heads/master | 2020-08-06T19:37:25.894580 | 2020-08-02T12:23:05 | 2020-08-02T12:23:05 | 213,126,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def resolve():
N = int(input())
A = list(map(int, input().split()))
unique = len(list(set(A)))
if len(A) == unique:
print('YES')
return
print('NO')
resolve()
| [
"takoreset@gmail.com"
] | takoreset@gmail.com |
8e6d4e640243b5c5a7d11ef408914e9fd078b4fb | 1c4e6b34386e9917efdce2d404e70f3f2ba7d25a | /Django_intro/courses/courses/urls.py | 7943b64e605b59157e79e7ac2f8a556b09538271 | [] | no_license | viperman88/Dojo_Flask_Work | 7bd001fe2cd0c48a52b31705161c46d38185d214 | 9ed4f5d47c0c26e06f58abd60e310d6d6161a2c9 | refs/heads/master | 2021-01-23T21:13:51.237860 | 2017-10-21T18:05:55 | 2017-10-21T18:05:55 | 102,888,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | """courses URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('apps.classes.urls'))
]
| [
"robertamato@Roberts-MBP.fios-router.home"
] | robertamato@Roberts-MBP.fios-router.home |
7662de8695c94bd1028a05392d8c787e84ab8f5d | 7721dd535cbc5870350c51b5f3e54acd4621c358 | /manage.py | 044669b675c05b412533ddd377ff825b370c48aa | [] | no_license | simofane4/rtmplive3 | e5bbdc3b0b0fab26586ee0aeb7b877649e185276 | ed871a1a376f590e30ccd28adc0653f628037c2a | refs/heads/master | 2023-08-15T17:10:25.093080 | 2021-09-13T22:57:06 | 2021-09-13T22:57:06 | 405,119,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rtmplive.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"simofane4@gmail.com"
] | simofane4@gmail.com |
9e39711c2660c86d1b3c5e2d66704bad8dcce7d8 | 143304bc4931317792a6a07127d4947bcfa73f3a | /whatmobile_scrapper.py | afce287b42f43773d4f8791836973ca776c234ed | [] | no_license | Mobinfo-devs/mobinfo-webscrapper | 0767922554a908bf107b36ffda9cb5d634407743 | f50361106ae42cf15af5037c3bb6bd6d29daf21f | refs/heads/main | 2023-08-12T07:52:46.050306 | 2021-09-15T11:51:29 | 2021-09-15T11:51:29 | 380,277,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py |
def whatmobile_scraper(url):
#for getting request Module
import requests
#web scrapping moduel
from bs4 import BeautifulSoup
#URL where all data is stored
url_mobile = url
#for getting request for page
page=requests.get(url_mobile)
#soup will contain whole html file
soup=BeautifulSoup(page.content,'html.parser')
#for writing file
import csv
#print(soup) # its for printing whole xml file
#working
specs=soup.find_all('td',class_='specs_box_inner')
#for Mobile Name
name=soup.title.text.strip().split(' ')
index=name.index('Price')
name=name[0:index]
name=' '.join(name)
#for adding specifications
l=[]
for i in specs:
l.append(i.text.strip())
head=['Name']+l[0::2]
ans=[name]+l[1::2] +[head.pop()]
head.append('Ratings')
# print(head,ans)
#make dictionary for deleting columns easily
dic={}
fixed_coulmns=['Name', 'OS','UI', 'Weight', 'Colors', 'CPU', 'Chipset', 'Technology', 'Size', 'Resolution', 'Extra Features', 'Built-in', 'Main', 'Features', 'Front', 'Sensors', 'Capacity', 'Price in Rs.', 'Ratings']
for i in range(len(head)):
if head[i] in fixed_coulmns:
dic[head[i]] = ans[i]
#again make it list
head=list(dic.keys())
ans=list(dic.values())
#
# #first time for saving
# with open('MobileDB.csv','w',encoding='UTF8') as f:
# writer=csv.writer(f)
# writer.writerow(head)
# writer.writerow(ans)
# other times
with open('MobileDB.csv','a',encoding='UTF8') as f:
writer=csv.writer(f)
writer.writerow(ans)
#main
# url='https://m.whatmobile.com.pk/Samsung_Galaxy-Z-Fold-2'
# whatmobile_scraper(url)
with open('links.txt', 'r') as f:
for link in f:
whatmobile_scraper(link.strip())
| [
"noreply@github.com"
] | Mobinfo-devs.noreply@github.com |
0d17274f896b12341c4462bfda812bcedfdbf963 | 4f5c637442fbcaa7c8e722373a2dea0141cd25b9 | /classwork_8/classwork_8 lambdaCapitalize.py | 5780da34ba3d259ae70c39a2cb0c2a2787ad2df2 | [] | no_license | rishit03/PathaPadha-Python-DS-p-1- | 96b2bc7f81383c56eda73c7c37208e321dfd2667 | 3c59594b66ff132096db4efb0bf80a15c0a1d52b | refs/heads/main | 2023-07-30T12:55:27.847910 | 2021-09-21T18:34:05 | 2021-09-21T18:34:05 | 401,348,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | lst = ["apple","mango","cherry"]
capital = map(lambda x:x.capitalize(),lst)
print(list(capital))
| [
"noreply@github.com"
] | rishit03.noreply@github.com |
f145a32b7f6ae055db544945f1c5f9aec83646c1 | 97a2a4bd42ede2333553a61e79b59f4dd543635e | /pybmp/bmp.py | 40258ed84c02fe8e5d9d847ff23bb3a182de54df | [
"MIT"
] | permissive | dethrophes/pybmp | cf5ceade6259a633c3063781d41782e07387277c | 97366f153075eb55c44fdafae717871ba2b95a03 | refs/heads/master | 2023-03-24T02:10:02.415604 | 2016-11-05T12:59:12 | 2016-11-05T12:59:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''bmp
BITMAPFILEHEADER
BITMAPINFOHEADER
'''
import sys
def BMP(*args, **kwargs):
return None
| [
"sanjo_nemu@yahoo.co.jp"
] | sanjo_nemu@yahoo.co.jp |
abf6293f86b63517dc34ea287a731b7b884c2e03 | e30615966198a49af29d55342e9f6a9a49783549 | /clientArea/config.py | 6af16742f11d65de88d32cc35680c50b9e27cbe8 | [] | no_license | mmasterenko/lancer | 05f59de1fe4887acebbe39aac417c396a75b1f81 | fec0966e9f56d62a5a9e110e82ebb414fec5442d | refs/heads/master | 2020-12-24T18:42:40.796549 | 2016-04-16T08:21:09 | 2016-04-16T08:21:09 | 57,143,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class ClientAreaConfig(AppConfig):
name = 'clientArea'
verbose_name = u'Личный кабинет'
| [
"mmasterenko@gmail.com"
] | mmasterenko@gmail.com |
bd23fb0675631543a7d42fa543cfd004ac0c5ea3 | fedb95b78f004d89faa563e58d63fc569c616ae9 | /code/utils/ur_send_string_command.py | 1749b97a3583e439309f1a6668f38c857a1842a9 | [
"BSD-3-Clause"
] | permissive | dti-research/SenseActExperiments | 988d74a80d036e9c738db1b1cd421c23b3ccaa1b | 319171293f6d8270ec00b9fc05e4b9ef43da9675 | refs/heads/master | 2020-05-07T20:19:59.237513 | 2019-11-06T02:19:01 | 2019-11-06T02:19:01 | 180,852,269 | 1 | 1 | BSD-3-Clause | 2019-07-04T16:20:33 | 2019-04-11T18:16:32 | Python | UTF-8 | Python | false | false | 1,131 | py | import time
import socket
import argparse
# Setup argparser
parser = argparse.ArgumentParser(description='Sends a string (URScript) command to the UR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-ip", "--robot-ip",
dest="robot_ip",
help="robot ip",
metavar="192.168.1.100",
required=True)
parser.add_argument("-p", "--robot-port",
dest="robot_port",
help="robot port",
metavar="29999",
required=True)
parser.add_argument("-c", "--command",
dest="command",
type=str,
required=True)
args = parser.parse_args()
if __name__ == "__main__":
# Connect to the robot's dashboard interface and save the log
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((args.robot_ip, int(args.robot_port)))
print(s.recv(100))
s.send((args.command+"\n").encode('ascii'))
time.sleep(0.1)
print(s.recv(100))
s.close()
| [
"nily@teknologisk.dk"
] | nily@teknologisk.dk |
eafbe8558b51fd8cf71c834a0c170edcba263c15 | 39ac10a3c5bf702280bffef2c4164bf207fe9405 | /hangman.py | 3f9dd23d2caac25d9260ea70f8bfd6d01f0d982f | [] | no_license | Sweept/p_game | 008aa135dbd04d3b2361995c005a6447a08aa6d4 | 38cbbbdbd00f74cb6d08d9bfa030ad0341a3f84a | refs/heads/master | 2022-11-29T22:44:41.611285 | 2020-08-10T21:10:15 | 2020-08-10T21:10:15 | 274,228,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,597 | py | import random
HANGMAN_PICS = ['''
+---+
|
|
|
===''', '''
+---+
O |
|
|
===''', '''
+---+
O |
| |
|
===''', '''
+---+
O |
/| |
|
===''', '''
+---+
O |
/|\ |
|
===''', '''
+---+
O |
/|\ |
/ |
===''', '''
+---+
O |
/|\ |
/ \ |
===''']
words = 'ant baboon badger bat bear beaver camel cat clam cobra cougar coyote crow deer dog donkey duck eagle ferret fox frog goat goose hawk lion lizard llama mole monkey moose mouse mule newt otter owl panda parrot pigeon python rabbit ram rat raven rhino salmon seal shark sheep skunk sloth snake spider stork swan tiger toad trout turkey turtle weasel whale wolf wombat zebra'
def getRandomWord(wordList):
wordList_Split = wordList.split()
word_index = random.randint(0, len(wordList_Split) - 1)
return wordList_Split[word_index]
def displayBoard(missedLetters, correctLetters, secretWord):
# based on the number of missed letters, display approriate hangman pic
print(HANGMAN_PICS[len(missedLetters)], "\n")
print('Missed letters:', end=' ')
for letter in missedLetters:
print(letter, end=' ')
print()
blanks = '_' * len(secretWord)
for i in range(len(secretWord)):
if secretWord[i] in correctLetters:
# Trick is here
blanks = blanks[:i] + secretWord[i] + blanks[i+1:]
for letter in blanks:
print(letter, end=' ')
print()
def getGuess(alreadyGuessed):
while True:
print("Guess a letter")
guess = input()
guess = guess.lower()
if len(guess) != 1:
print("Please enter a single letter.")
elif guess in alreadyGuessed:
print("You have already guessed that letter")
elif guess not in 'abcdefghijklmnopqrstuvwxyz':
print("Please enter a letter")
else:
return guess
def playAgain():
print("Do you want to play again? Y or N")
return input().lower().startswith("y")
print("H A N G M A N")
missedLetters = ''
correctLetters = ''
secretWord = getRandomWord(words)
gameIsDone = False
while True:
displayBoard(missedLetters, correctLetters, secretWord)
# Let the player enter a letter
guess = getGuess(missedLetters + correctLetters)
if guess in secretWord:
correctLetters = correctLetters + guess
# Check if the player has won
foundAllLetters = True
for i in range(len(secretWord)):
if secretWord[i] not in correctLetters:
foundAllLetters = False
break
if foundAllLetters:
print("Correct, the word is {}. You've won!".format(secretWord))
gameIsDone = True
else:
missedLetters = missedLetters + guess
# Check if player has guessed too many times and lost
if len(missedLetters) == len(HANGMAN_PICS) - 1:
displayBoard(missedLetters, correctLetters, secretWord)
print("You have run out of guesses!\nAfter {} missed guesses and {} correct guesses, the word was {}".format(
str(len(missedLetters)), str(len(correctLetters)), secretWord))
gameIsDone = True
# Ask the player if they want to play again if the game is ended
if gameIsDone:
if playAgain():
missedLetters = ''
correctLetters = ''
secretWord = getRandomWord(words)
gameIsDone = False
else:
print("Thanks for playing Hangman")
| [
"emersonsridhar@gmail.com"
] | emersonsridhar@gmail.com |
d39c5d234d64ed189ca92ba9ef324647ce15eca4 | 36c31f8e7fe8b8bc942613cd6b35b6b88b40a5f0 | /app.py | 36770b68fe0b5dab71893a3fba8235853a838eaf | [] | no_license | meceledon/API_rest_t2 | 9ec8226ad19b4a71692f1ec064160a228bdac3a8 | 49c17d5040b9aafd1e43cc489898afa89dd198ef | refs/heads/main | 2023-04-16T20:57:58.909829 | 2021-05-03T16:55:40 | 2021-05-03T16:55:40 | 362,610,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,640 | py | from flask import Flask
from flask_restful import Api, Resource, reqparse, abort, fields, marshal_with
from flask_sqlalchemy import SQLAlchemy
from base64 import b64encode
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
db = SQLAlchemy(app)
BASE = "https://api-rest-t2.herokuapp.com/"
class ArtistModel(db.Model):
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String, nullable=False)
age = db.Column(db.Integer, nullable=False)
albums = db.Column(db.String)
tracks = db.Column(db.String)
self_a = db.Column(db.String)
class AlbumModel(db.Model):
id = db.Column(db.String, primary_key=True)
artist_id = db.Column(db.String, nullable=False)
name = db.Column(db.String, nullable=False)
genre = db.Column(db.String, nullable=False)
artist = db.Column(db.String)
tracks = db.Column(db.String)
self_a = db.Column(db.String)
class TrackModel(db.Model):
id = db.Column(db.String, primary_key=True)
album_id = db.Column(db.String, nullable=False)
name = db.Column(db.String, nullable=False)
duration = db.Column(db.Float, nullable=False)
times_played = db.Column(db.Integer, default=0)
artist = db.Column(db.String)
album = db.Column(db.String)
self_a = db.Column(db.String)
artists_post_args = reqparse.RequestParser()
artists_post_args.add_argument("name", type=str, help="Falta el nombre del artista", required=True)
artists_post_args.add_argument("age", type=int, help="Falta la edad del artista", required=True)
albums_post_args = reqparse.RequestParser()
albums_post_args.add_argument("name", type=str, help="Falta el nombre del album", required=True)
albums_post_args.add_argument("genre", type=str, help="Falta el genero del album", required=True)
tracks_post_args = reqparse.RequestParser()
tracks_post_args.add_argument("name", type=str, help="Falta el nombre de la canción", required=True)
tracks_post_args.add_argument("duration", type=float, help="Falta la duracion de la canción", required=True)
'''
def abort_if_artist_doesnt_exist(artist_id):
if artist_id not in artists:
abort(404, message="El artista no existe")
'''
resource_fields_artists = {
'id': fields.String,
'name': fields.String,
'age': fields.Integer,
'albums': fields.String,
'tracks': fields.String,
'self_a': fields.String
}
resource_fields_albums = {
'id': fields.String,
'artist_id': fields.String,
'name': fields.String,
'genre': fields.String,
'artist': fields.String,
'tracks': fields.String,
'self_a': fields.String
}
resource_fields_track = {
'id': fields.String,
'album_id': fields.String,
'name': fields.String,
'duration': fields.Float,
'times_played': fields.Integer,
'artist': fields.String,
'album': fields.String,
'self_a': fields.String
}
class Artist(Resource):
@marshal_with(resource_fields_artists)
def get(self, artist_id):
result = ArtistModel.query.filter_by(id=artist_id).first()
if not result:
abort(404, message="No existe un Artista con ese ID")
return result, 200
def delete(self, artist_id):
result = ArtistModel.query.filter_by(id=artist_id).first()
if not result:
abort(404, message="No existe un Artista con ese ID")
result1 = AlbumModel.query.filter_by(artist_id=artist_id).all()
for a in result1:
db.session.delete(a)
result2 = TrackModel.query.filter_by(artist=f"artists/{artist_id}").all()
for t in result2:
db.session.delete(t)
db.session.delete(result)
db.session.commit()
return '', 204
class Artists(Resource):
@marshal_with(resource_fields_artists)
def get(self):
result = ArtistModel.query.all()
if not result:
abort(404, message="No hay artistas en la base de datos")
return result, 200
@marshal_with(resource_fields_artists)
def post(self):
args = artists_post_args.parse_args()
artist_name = args['name']
encoded = b64encode(artist_name.encode()).decode('utf-8')
print(f"El id antes de truncar es {encoded}")
encoded = encoded[:22]
print(f"El id resultante es {encoded}")
result = ArtistModel.query.filter_by(id=encoded).first()
if result:
return result, 409
artist_albums = BASE + f"artists/{encoded}/albums"
artist_tracks = BASE + f"artists/{encoded}/tracks"
self_artist = BASE + f"artists/{encoded}"
artist = ArtistModel(id=encoded, name=artist_name, age=args['age'], albums=artist_albums, tracks=artist_tracks,
self_a=self_artist)
db.session.add(artist)
db.session.commit()
return artist, 201
class ArtistAlbums(Resource):
@marshal_with(resource_fields_albums)
def get(self, artist_id):
result1 = ArtistModel.query.filter_by(id=artist_id).first()
if not result1:
abort(404, message="No existe un Artista con ese ID")
result = AlbumModel.query.filter_by(artist_id=artist_id).all()
return result, 200
@marshal_with(resource_fields_albums)
def post(self, artist_id):
result1 = ArtistModel.query.filter_by(id=artist_id).first()
if not result1:
abort(422, message="No existe un Artista con ese ID")
args = albums_post_args.parse_args()
pre_codificado = args['name'] + ":" + artist_id
encoded = b64encode(pre_codificado.encode()).decode('utf-8')[:22]
result2 = AlbumModel.query.filter_by(id=encoded).first()
if result2:
return result2, 409
album_artist = BASE + f"artists/{artist_id}"
album_tracks = BASE + f"albums/{encoded}/tracks"
self_album = BASE + f"albums/{encoded}"
album = AlbumModel(id=encoded, artist_id=artist_id, name=args['name'], genre=args['genre'],
artist=album_artist, tracks=album_tracks, self_a=self_album)
db.session.add(album)
db.session.commit()
return album, 201
class ArtistAlbumsPlay(Resource):
@marshal_with(resource_fields_track)
def put(self, artist_id):
result = ArtistModel.query.filter_by(id=artist_id).first()
if not result:
abort(404, message="No existe un Artista con ese ID")
artist = BASE + f"artists/{artist_id}"
result1 = TrackModel.query.filter_by(artist=artist).all()
for t in result1:
t.times_played += 1
db.session.commit()
return 'Se reproducieron todas las canciones del artista', 200
class ArtistTracks(Resource):
@marshal_with(resource_fields_track)
def get(self, artist_id):
result1 = ArtistModel.query.filter_by(id=artist_id).first()
if not result1:
abort(404, message="No existe un Artista con ese ID")
artist = BASE + f"artists/{artist_id}"
result = TrackModel.query.filter_by(artist=artist).all()
return result, 200
class Albums(Resource):
@marshal_with(resource_fields_albums)
def get(self):
result = AlbumModel.query.all()
return result, 200
class Album(Resource):
@marshal_with(resource_fields_albums)
def get(self, album_id):
result = AlbumModel.query.filter_by(id=album_id).first()
if not result:
abort(404, message="No existe un album con ese ID")
return result, 200
def delete(self, album_id):
result = AlbumModel.query.filter_by(id=album_id).first()
if not result:
abort(404, message="No existe un album con ese ID")
result2 = TrackModel.query.filter_by(album_id=album_id).all()
for t in result2:
db.session.delete(t)
db.session.delete(result)
db.session.commit()
return '', 204
class AlbumTracks(Resource):
@marshal_with(resource_fields_track)
def get(self, album_id):
result1 = AlbumModel.query.filter_by(id=album_id).first()
if not result1:
abort(404, message="No existe un album con ese ID")
result = TrackModel.query.filter_by(album_id=album_id).all()
return result, 200
@marshal_with(resource_fields_track)
def post(self, album_id):
result1 = AlbumModel.query.filter_by(id=album_id).first()
if not result1:
abort(422, message="No existe un album con ese ID")
args = tracks_post_args.parse_args()
pre_codificado = args['name'] + ":" + album_id
encoded = b64encode(pre_codificado.encode()).decode('utf-8')[:22]
result2 = TrackModel.query.filter_by(id=encoded).first()
if result2:
return result2, 409
track_artist = BASE + f"artists/{result1.artist_id}"
track_album = BASE + f"albums/{album_id}"
self_track = BASE + f"tracks/{encoded}"
track = TrackModel(id=encoded, album_id=album_id, name=args['name'], duration=args['duration'],
times_played=0, artist=track_artist, album=track_album, self_a=self_track)
db.session.add(track)
db.session.commit()
return track, 201
class AlbumTracksPlay(Resource):
@marshal_with(resource_fields_track)
def put(self, album_id):
result1 = AlbumModel.query.filter_by(id=album_id).first()
if not result1:
abort(404, message="No existe un album con ese ID")
result = TrackModel.query.filter_by(album_id=album_id).all()
for t in result:
t.times_played += 1
db.session.commit()
return 'Se reproducieron todas las canciones del album', 200
class Tracks(Resource):
@marshal_with(resource_fields_track)
def get(self):
result = TrackModel.query.all()
return result, 200
class Track(Resource):
@marshal_with(resource_fields_track)
def get(self, track_id):
result = TrackModel.query.filter_by(id=track_id).first()
if not result:
abort(404, message="No existe una canción con ese ID")
return result, 200
def delete(self, track_id):
result = TrackModel.query.filter_by(id=track_id).first()
if not result:
abort(404, message="No existe una canción con ese ID")
db.session.delete(result)
db.session.commit()
return '', 204
class TrackPlay(Resource):
@marshal_with(resource_fields_track)
def put(self, track_id):
result = TrackModel.query.filter_by(id=track_id).first()
if not result:
abort(404, message="No existe una canción con ese ID")
result.times_played += 1
db.session.commit()
return 'se reprodujo la cancion', 200
api.add_resource(Artists, "/artists")
api.add_resource(Artist, "/artists/<string:artist_id>")
api.add_resource(ArtistAlbums, "/artists/<string:artist_id>/albums")
api.add_resource(ArtistAlbumsPlay, "/artists/<string:artist_id>/albums/play")
api.add_resource(ArtistTracks, "/artists/<string:artist_id>/tracks")
api.add_resource(Albums, "/albums")
api.add_resource(Album, "/albums/<string:album_id>")
api.add_resource(AlbumTracks, "/albums/<string:album_id>/tracks")
api.add_resource(AlbumTracksPlay, "/albums/<string:album_id>/tracks/play")
api.add_resource(Tracks, "/tracks")
api.add_resource(Track, "/tracks/<string:track_id>")
api.add_resource(TrackPlay, "/tracks/<string:track_id>/play")
if __name__ == "__main__": # Este __main__ es el nombre del archibo? o da lo mismo el nombre que le ponga al archivo?
app.run(debug=True)
| [
"meco@MacBook-Pro-de-MECO.local"
] | meco@MacBook-Pro-de-MECO.local |
ea87fcbba598eb2a4c9be4edb95f316047f4e2dd | ff90da29d1ec16cd2c6a4b45411b3577c505029a | /test_vocabulary.py | 61953d07e3c3d81ce1699a76a58bec78a083de19 | [] | no_license | myrywy/rnn_language_model | db7551dba79057b7cb90c844217b3f5e44228bdf | cf7013f704d33530d641eeb72d3870663512b5ef | refs/heads/master | 2021-05-13T22:09:26.311463 | 2018-02-02T22:05:25 | 2018-02-02T22:05:25 | 116,481,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | from unittest import TestCase, main, skip
import numpy as np
import tensorflow as tf
from vocabulary import Vocabulary
class TestVocabulary(TestCase):
def setUp(self):
self.words = ["a", "b", "c", "d", "e", "f"]
self.ids = [i+1 for i in range(len(self.words))]
self.vectors = np.array([
[0,0,0,0,0,0],
[1,2,3,4,5,6],
[0,1,2,3,4,5],
[0,0,1,2,3,4],
[0,0,0,1,2,3],
[0,0,0,0,1,2],
[0,0,0,0,0,1],
])
self.voc1 = Vocabulary(self.words, self.ids, self.vectors)
def test_word2id(self):
self.assertEqual(self.ids, [self.voc1.word2id(w) for w in self.words])
self.assertEqual(0, self.voc1.word2id("newword"))
def test_id2word(self):
self.assertEqual(self.words, [self.voc1.id2word(i) for i in self.ids])
def test_word2vec(self):
self.assertTrue((self.voc1.word2vec("a") == np.array([1,2,3,4,5,6])).all())
self.assertTrue((self.voc1.word2vec("b") == np.array([0,1,2,3,4,5])).all())
self.assertTrue((self.voc1.word2vec("f") == np.array([0,0,0,0,0,1])).all())
self.assertTrue((self.voc1.word2vec("noword") == np.array([0,0,0,0,0,0])).all())
def test_id2vec(self):
self.assertTrue((self.voc1.id2vec(1) == np.array([1,2,3,4,5,6])).all())
self.assertTrue((self.voc1.id2vec(2) == np.array([0,1,2,3,4,5])).all())
self.assertTrue((self.voc1.id2vec(6) == np.array([0,0,0,0,0,1])).all())
self.assertTrue((self.voc1.id2vec(0) == np.array([0,0,0,0,0,0])).all())
def test_get_lookup_tensor(self):
with tf.Session() as sess:
self.assertTrue((sess.run(self.voc1.get_lookup_tensor()) == self.vectors).all())
def test_one_hot_to_id(self):
self.assertEqual(self.voc1.one_hot_to_id(np.array([0,0,1,0])), 2)
self.assertEqual(self.voc1.one_hot_to_id(np.array([0,0.75,0.25,0])), 1)
self.assertEqual(self.voc1.one_hot_to_id(np.array([0.51,0.0,0.25,0.24])), 0)
def test_create_vocabulary(self):
voc2 = Vocabulary.create_vocabulary(["a", "b", "a", "c"], 0, create_one_hot_embeddings=True)
vectors2 = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
with tf.Session() as sess:
self.assertTrue((sess.run(voc2.get_lookup_tensor()) == vectors2).all())
self.assertTrue((voc2.word2vec("newword") == np.array([1, 0, 0, 0])).all())
if __name__ == '__main__':
main()
| [
"marcinlewy22@gmail.com"
] | marcinlewy22@gmail.com |
8b1265f82c50b7dcf292dc5c3ec742fc5f3a9313 | 5e4aa85bdc3c3fb1c943a5c3276c1dd8bff98da8 | /test.py | 718740506127556657130823471698c8bc6427ab | [] | no_license | 571031767/py | ad18daff8ffc1d3ec33e5e075562b1419bc5bad3 | 4ef34f83b1f3d61e5064695df63bfc3369d73a25 | refs/heads/master | 2022-11-24T07:58:14.175070 | 2019-07-19T08:47:32 | 2019-07-19T08:47:32 | 197,515,458 | 0 | 0 | null | 2022-11-22T01:16:01 | 2019-07-18T05:10:08 | Python | UTF-8 | Python | false | false | 220 | py | import sys
from PyQt5 import QtWidgets, QtCore
app = QtWidgets.QApplication(sys.argv)
widget = QtWidgets.QWidget()
widget.resize(360, 360)
widget.setWindowTitle("Hello, PyQt5!")
widget.show()
sys.exit(app.exec_()) | [
"571031767@qq.com"
] | 571031767@qq.com |
273674ee6cc0670136332b6933999900edc476be | 21403ecf699d5feef431d3b17181285ee0c8e0f5 | /dash/main.py | dcdae92e91ebd87663bf07cd9b2479ddfcbafb75 | [] | no_license | LorenzoMauri/LorenzoMauri.github.io | 630c29d8e7f2366b23651f63fdc83e71650ca876 | 5877cab2b18c4f721f28e5047427b438e89fa69d | refs/heads/main | 2022-01-26T00:49:35.053933 | 2022-01-18T10:30:49 | 2022-01-18T10:30:49 | 248,873,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | import pandas as pd
import numpy as np
import plotly.express as px
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from MySQL_getData import Aggregator
# UTILS
def aggregate(df):
ASPECTS_LIST = ['service','location', 'products']
return df.groupby(['date'])[ASPECTS_LIST].agg(
[('diff', lambda x: ((x==1).sum())-( (x==0).sum()))
]
).fillna(0.0)
def getData():
aggregatorObject = Aggregator()
return aggregatorObject.get()
def filterData(clientDropdown):
return df[df.client==clientDropdown]
def formatDates(df):
df['date'] = pd.to_datetime(df.date)
max_date = df.date.max()
min_date = df.date.min()
idx = pd.DataFrame({
'date': pd.date_range(min_date, max_date)
})
df = idx.merge(right = df, how='left', on = 'date').fillna(0.0)
return df
def getAggregation(df):
dff = aggregate(df)
dff = dff.reset_index()
dff = formatDates(dff)
dff=dff.groupby(pd.Grouper(key='date', axis=0,
freq='MS', sort=True)).agg('mean')
dff.columns = ['_'.join(col) for col in dff.columns]
return dff
df = getData()
dff = getAggregation(df)
app = dash.Dash(__name__)
clients_list = df.client.drop_duplicates().to_list()
app.layout = html.Div([
html.H1("Web Application for Aspect-Based Sentiment Analysis",
style = {'text-align':'center'}),
html.H4("Select an aspect :"),
dcc.Dropdown(
id="ticker",
options=[{"label": x, "value": x}
for x in dff.columns],
value=dff.columns[1],
clearable=False,
),
html.Br(),
html.H4("Select a client :"),
dcc.Dropdown(
id="clientDropdown",
options=[{"label": x, "value": x}
for x in clients_list],
value=clients_list[1],
clearable=False,
),
dcc.Graph(id="time-series-chart"),
])
@app.callback(
Output("time-series-chart", "figure"),
[Input("ticker", "value"),
Input("clientDropdown", "value")])
def display_time_series(ticker,clientDropdown):
dff = filterData(clientDropdown)
dff = getAggregation(dff)
fig = px.line(dff, x=dff.index, y=ticker)
return fig
# nel callback indico dove i risultati della funzione `update_graph` devono essere inseriti
# NB:bisogna rispettare l'ordine nel return della funzione
if __name__ == '__main__' :
app.run_server(debug=False) | [
"lorenzo.mauri@ipratico.it"
] | lorenzo.mauri@ipratico.it |
b9dcab0e27ecbf4bb3c3edbfd8aa26d2deee51b4 | 5ff8c6de40df5094c1885743261564902498f2a0 | /sk-master/cle/models/nips2015/timit/test_m3.py | 2fb20e2557f781a7fc379b6a710a388f599b7da0 | [] | no_license | anirudh9119/SpeechSyn | 25393c6525e8a78179dc7c4d48f9d22395ec06a4 | a45177b019ded27d3935aa5766c716b373649c3a | refs/heads/master | 2020-04-06T04:37:09.019957 | 2016-01-26T02:21:13 | 2016-01-26T02:21:13 | 49,082,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,715 | py | import ipdb
import numpy as np
import theano
import theano.tensor as T
from cle.cle.cost import Gaussian, GMM
from cle.cle.data import Iterator
from cle.cle.utils import unpickle, tolist, OrderedDict
from cle.cle.utils.op import logsumexp
from sk.datasets.timit import TIMIT
#data_path = '/raid/chungjun/data/timit/readable/'
#exp_path = '/raid/chungjun/repos/sk/cle/models/nips2015/timit/pkl/'
#data_path = '/home/junyoung/data/timit/readable/'
#exp_path = '/home/junyoung/repos/sk/cle/models/nips2015/timit/pkl/'
data_path = '/data/lisa/data/timit/readable/'
exp_path = '/data/lisatmp/chungjun/nips2015/timit/pkl/'
frame_size = 200
# How many examples you want to proceed at a time
batch_size = 80
# How many samples to generate
num_sample = 40
debug = 0
exp_name = 'm3_2_best'
train_data = TIMIT(name='train',
path=data_path,
frame_size=frame_size,
shuffle=0,
use_n_gram=1)
X_mean = train_data.X_mean
X_std = train_data.X_std
valid_data = TIMIT(name='valid',
path=data_path,
frame_size=frame_size,
shuffle=0,
use_n_gram=1,
X_mean=X_mean,
X_std=X_std)
x, x_mask = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, frame_size), dtype=np.float32)
temp = np.ones((15, batch_size), dtype=np.float32)
temp[:, -2:] = 0.
x_mask.tag.test_value = temp
exp = unpickle(exp_path + exp_name + '.pkl')
nodes = exp.model.nodes
names = [node.name for node in nodes]
main_lstm, prior, kl,\
x_1, x_2, x_3, x_4,\
z_1, z_2, z_3, z_4,\
phi_1, phi_2, phi_3, phi_4, phi_mu, phi_sig,\
prior_1, prior_2, prior_3, prior_4, prior_mu, prior_sig,\
theta_1, theta_2, theta_3, theta_4, theta_mu, theta_sig, coeff = nodes
def inner_fn(x_t, s_tm1, s_tm1_is):
x_1_t = x_1.fprop([x_t])
x_2_t = x_2.fprop([x_1_t])
x_3_t = x_3.fprop([x_2_t])
x_4_t = x_4.fprop([x_3_t])
phi_1_t = phi_1.fprop([x_4_t, s_tm1])
phi_2_t = phi_2.fprop([phi_1_t])
phi_3_t = phi_3.fprop([phi_2_t])
phi_4_t = phi_4.fprop([phi_3_t])
phi_mu_t = phi_mu.fprop([phi_4_t])
phi_sig_t = phi_sig.fprop([phi_4_t])
prior_1_t = prior_1.fprop([s_tm1])
prior_2_t = prior_2.fprop([prior_1_t])
prior_3_t = prior_3.fprop([prior_2_t])
prior_4_t = prior_4.fprop([prior_3_t])
prior_mu_t = prior_mu.fprop([prior_4_t])
prior_sig_t = prior_sig.fprop([prior_4_t])
z_t = prior.fprop([phi_mu_t, phi_sig_t])
kl_t = kl.fprop([phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t])
z_1_t = z_1.fprop([z_t])
z_2_t = z_2.fprop([z_1_t])
z_3_t = z_3.fprop([z_2_t])
z_4_t = z_4.fprop([z_3_t])
theta_1_t = theta_1.fprop([z_4_t, s_tm1])
theta_2_t = theta_2.fprop([theta_1_t])
theta_3_t = theta_3.fprop([theta_2_t])
theta_4_t = theta_4.fprop([theta_3_t])
theta_mu_t = theta_mu.fprop([theta_4_t])
theta_sig_t = theta_sig.fprop([theta_4_t])
coeff_t = coeff.fprop([theta_4_t])
s_t = main_lstm.fprop([[x_4_t, z_4_t], [s_tm1]])
x_t_is = T.repeat(x_t, num_sample, axis=0)
x_1_t_is = x_1.fprop([x_t_is])
x_2_t_is = x_2.fprop([x_1_t_is])
x_3_t_is = x_3.fprop([x_2_t_is])
x_4_t_is = x_4.fprop([x_3_t_is])
phi_1_t_is = phi_1.fprop([x_4_t_is, s_tm1_is])
phi_2_t_is = phi_2.fprop([phi_1_t_is])
phi_3_t_is = phi_3.fprop([phi_2_t_is])
phi_4_t_is = phi_4.fprop([phi_3_t_is])
phi_mu_t_is = phi_mu.fprop([phi_4_t_is])
phi_sig_t_is = phi_sig.fprop([phi_4_t_is])
prior_1_t_is = prior_1.fprop([s_tm1_is])
prior_2_t_is = prior_2.fprop([prior_1_t_is])
prior_3_t_is = prior_3.fprop([prior_2_t_is])
prior_4_t_is = prior_4.fprop([prior_3_t_is])
prior_mu_t_is = prior_mu.fprop([prior_4_t_is])
prior_sig_t_is = prior_sig.fprop([prior_4_t_is])
z_t_is = prior.sample([phi_mu_t_is, phi_sig_t_is])
z_1_t_is = z_1.fprop([z_t_is])
z_2_t_is = z_2.fprop([z_1_t_is])
z_3_t_is = z_3.fprop([z_2_t_is])
z_4_t_is = z_4.fprop([z_3_t_is])
theta_1_t_is = theta_1.fprop([z_4_t_is, s_tm1_is])
theta_2_t_is = theta_2.fprop([theta_1_t_is])
theta_3_t_is = theta_3.fprop([theta_2_t_is])
theta_4_t_is = theta_4.fprop([theta_3_t_is])
theta_mu_t_is = theta_mu.fprop([theta_4_t_is])
theta_sig_t_is = theta_sig.fprop([theta_4_t_is])
coeff_t_is = coeff.fprop([theta_4_t_is])
mll = GMM(x_t_is, theta_mu_t_is, theta_sig_t_is, coeff_t_is) +\
Gaussian(z_t_is, prior_mu_t_is, prior_sig_t_is) -\
Gaussian(z_t_is, phi_mu_t_is, phi_sig_t_is)
mll = mll.reshape((batch_size, num_sample))
mll = logsumexp(-mll, axis=1) - T.log(num_sample)
s_t_is = main_lstm.fprop([[x_4_t_is, z_4_t_is], [s_tm1_is]])
return s_t, s_t_is, kl_t, theta_mu_t, theta_sig_t, coeff_t, mll
((s_t, s_t_is, kl_t, theta_mu_t, theta_sig_t, coeff_t, mll), updates) =\
theano.scan(fn=inner_fn,
sequences=[x],
outputs_info=[main_lstm.get_init_state(batch_size),
main_lstm.get_init_state(batch_size*num_sample),
None, None, None, None, None])
for k, v in updates.iteritems():
k.default_update = v
reshaped_x = x.reshape((x.shape[0]*x.shape[1], -1))
reshaped_theta_mu = theta_mu_t.reshape((theta_mu_t.shape[0]*theta_mu_t.shape[1], -1))
reshaped_theta_sig = theta_sig_t.reshape((theta_sig_t.shape[0]*theta_sig_t.shape[1], -1))
reshaped_coeff = coeff_t.reshape((coeff_t.shape[0]*coeff_t.shape[1], -1))
recon = GMM(reshaped_x, reshaped_theta_mu, reshaped_theta_sig, reshaped_coeff)
recon = recon.reshape((theta_mu_t.shape[0], theta_mu_t.shape[1]))
recon = recon * x_mask
kl_t = kl_t * x_mask
recon_term = recon.sum(axis=0).mean()
kl_term = kl_t.sum(axis=0).mean()
nll_lower_bound = recon_term + kl_term
nll_lower_bound.name = 'nll_lower_bound'
mll = mll * x_mask
mll = -mll.sum(axis=0).mean()
mll.name = 'marginal_nll'
outputs = [mll, nll_lower_bound]
monitor_fn = theano.function(inputs=[x, x_mask],
outputs=outputs,
on_unused_input='ignore',
allow_input_downcast=True)
DataProvider = [Iterator(valid_data, batch_size)]
data_record = []
for data in DataProvider:
batch_record = []
for batch in data:
this_out = monitor_fn(*batch)
batch_record.append(this_out)
data_record.append(np.asarray(batch_record))
for record, data in zip(data_record, DataProvider):
for i, ch in enumerate(outputs):
this_mean = record[:, i].mean()
if this_mean is np.nan:
raise ValueError("NaN occured in output.")
print("%s_%s: %f" % (data.name, ch.name, this_mean))
| [
"anirudhgoyal9119@gmail.com"
] | anirudhgoyal9119@gmail.com |
31fee6c0fcfa31b6a771053c2cd6f6048a6ca539 | 6ff5ce9bd60f389c7ad067ddd54e909c1b440003 | /venv/bin/easy_install-3.6 | d3b12b65c045489c2d0a41bb379b6062b0061947 | [] | no_license | shabin75/django_blog | eba2265363bfb943c69fb07e03ca414854eb9762 | 15452fc6029cbb651895a04ff1b55434774a6206 | refs/heads/master | 2020-08-29T05:35:37.690476 | 2019-10-28T01:53:20 | 2019-10-28T01:53:20 | 217,944,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | 6 | #!/Users/shabin75/Desktop/blog/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"shabin75@MacBook-Pro.local"
] | shabin75@MacBook-Pro.local |
310f95195368520a2c727aee49ce131935340493 | ff93bf0b5e9f3fd14f3cd316b34cdcd28b10f179 | /corr_spike_trains.py | ef656f06c81c78981b626eba4e30a1dcd4d4b65e | [] | no_license | willr292/Neuro-Project | 724ec7c4cf729ee83d4f5acef2a750dcb7fb58ad | b0674090b16f1bf779dd734b8352a9d25c96d9bd | refs/heads/master | 2022-04-07T02:38:52.192488 | 2020-01-30T20:43:03 | 2020-01-30T20:43:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,703 | py | # corr_spike_trains - This Python script implements algorithms based on [1]
# for generating correlated spike trains.
#
# [1] : Romain Brette, "Generation of Correlated Spike Trains", Neural
# Computation 21, 188-215, 2009.
#
# Copyright (C) 2016 Georgios Is. Detorakis (gdetor@protonmail.com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import numpy as np
import matplotlib.pylab as plt
class correlated_spikes(object):
""" correlated_spikes Is the main class implemented [1]. Two basic methods
are implemented in this script: Cox processes and the Mixture method.
"""
def __init__(self, C, rates, n_proc):
""" Constructor of correlated_spikes class.
Args:
C (mxm array) : Is the correlation matrix (positive definite)
Its diagonal contains the firing rates for
the spike trains
rates (m array) : Firing rates for spike trains (only for
Mixture method)
n_proc : Number of processes (dimensions of C matrix)
Returns:
"""
self.n_proc = n_proc
self.C = C
self.r = rates
self.spikes = 0
def rectify_and_decomp_corr(self):
""" rectify_and_decomp_corr - It rectifies and decomposes matrix C
using Cholesky's decomposition.
Args:
Returns:
L (mxm array) : Lower triangular matrix (after Cholesky's
decomposition) with diagona filled with r^2 *
alpha (see [1])
"""
# Change diagonal with r^2
d = np.diag(self.C)**2
np.fill_diagonal(self.C, d)
# Cholesky decomposition
L = np.linalg.cholesky(self.C)
# Compute eigenvalues of L
w, v = np.linalg.eig(L)
# Compute alpha as the minimum eigenvalue with negative sign
alpha = -w.real.min()
# Fill the diagonal of L with r^2 * alpha
np.fill_diagonal(L, d * alpha)
return L
def cox_process(self, tau_c=10, time=100, dt=1):
""" Cox process (doubly stochastic process). It generates n_proc
number of correlated spike trains based on C matrix.
Args:
tau_c (float) : Time constant (lambda, see [1])
time (int) : Duration of spike trains (ms)
dt (float) : Time step (discretization)
"""
ticks = int(time / dt) # Simulation ticks
Lambda = np.exp(-dt / tau_c)
Sigma = np.sqrt(1 - np.exp(-2 * dt / tau_c))
# Set up rates vector
R = np.diag(self.C)
Y = np.random.normal(0, 1, (self.n_proc,))
S = np.zeros((ticks, self.n_proc))
# Rectify C
L = self.rectify_and_decomp_corr()
for t in range(ticks):
# Compute N independent Ornstein-Uhlberg processes
Y = Y * Lambda + np.random.normal(0, Sigma, (self.n_proc,))
# Compute instantaneous rates
X = R + np.dot(L, Y)
# Create spikes list
prob = np.random.uniform(0, 1, (self.n_proc,))
idx = (X * 0.001 * dt) > prob
S[t, idx] = 1
self.spikes = S.copy()
return S
def random_latency(self, size):
""" random_latency - Returns a random number based on the exponential
distribution.
Args:
size (int) : Size of random numbers sample
Returns:
"""
return np.random.exponential(1, size=size)
def optimization_mixture(self, nu, P):
""" optimization_mixture - This function computes the best mixture
matrix P and the corresponding vector nu (firing rates) by applying
a gradient descent.
Args:
P (mx2n) : Mixture matrix
nu (2n) : Firing rates of independent spike trains (sources)
Returns:
"""
n = self.n_proc
P_ = P[:, :n].copy()
nu_ = nu[:n].copy()
A = np.zeros((n, n))
# Initialization of nu and P
nu_ = self.r.copy()
np.fill_diagonal(P_, 1)
# Steps
b = 0.01 / n
a = (1. / n) * b
# Iterations
U = np.zeros((n, ))
for ns in range(20000):
for i in range(n):
for j in range(n):
if i != j:
x = 0.0
for k in range(n):
x += P_[i, k] * P_[j, k] * nu_[k]
A[i, j] = x - self.C[i, j]
else:
A[i, j] = 0
tmp = np.dot(nu_.T, P_)
U[tmp >= self.r] = 1
U[tmp < self.r] = 0
tmp = np.dot(A, P_)
tmp_y = np.dot(tmp, nu_)
tmp_u = np.dot(U, nu_)
Y = P_ - 4 * a * tmp_y - b * tmp_u
Y[Y < 0] = 0
Y[Y > 1] = 1
P_ = Y
for i in range(n):
X = 0
for k in range(n):
for l in range(n):
X += P_[k, i] * P_[l, i] * A[k, l]
Y = np.dot(U, P_)
nu_[i] -= a * X + b * Y[i]
nu_[nu_ < 0] = 0
X = np.dot(P_, nu_)
nu[:n] = nu_.copy()
nu[n:] = (self.r - X)
P[:, :n] = P_
P[:, n:] = np.zeros((n, n))
np.fill_diagonal(P[:, n:], 1)
if any(nu[nu < 0]):
raise ValueError("nu contains illegal values!")
if any(P[P < 0]) or any(P[P > 1]):
raise ValueError("P contains illegal values!")
def offline_mixture(self, P, nu, n_src=1, n_trg=1, tau_c=10, time=1000):
""" offline_mixture - It's the Mixture methods implemented in [1].
It returns a spike list containing correlated spike trains.
Args:
P (mx2n array) : Mixture matrix (mixture probability)
nu (2x array) : Firing rates of independent spike trains
(sources)
n_src (int) : Number of sources spike trains
n_trg (int) : Number of targets (correlated) spike trains
tau_c (float) : Time constant
time : Duration of correlated spike trains
Returns:
spks (array) : Numpy structured array containing events time
and ids (id is the number of target spike
train)
"""
# Average target rate
r_mean = np.mean(np.dot(P, nu))
# Optimal window size
w_size = n_src * 1.0 / r_mean
# Window
w_size = int(time * 0.001)
# Number of spikes in trains
num_sources = np.random.poisson(nu * w_size).astype('int')
# Generate Poisson spike trains
source_train = []
for i in range(num_sources.shape[0]):
source_train.append(np.random.uniform(
0, w_size, num_sources[i]) * 1000)
spk, tm = [], []
for i in range(n_src):
for j in range(n_trg):
num_targets = np.random.binomial(num_sources[i], P[j, i])
target_train = np.random.choice(source_train[i],
size=num_targets,
replace=False)
spk.extend(np.ones((num_targets,)) * j)
tm.extend(target_train +
self.random_latency(num_targets) * tau_c)
spk = np.array(spk, dtype='int')
tm = np.array(tm)
spks = np.recarray(spk.shape[0], dtype=[('t', float), ('id', int)])
spks['t'] = tm
spks['id'] = spk
spks.sort()
return spks
def extract_pyNCS_list(self, id_init=0):
""" extract_pyNCS_list - Extracts a spike list compatible with pyNCS
package.
Args:
id_init (int) : Initial id for spike trains
Returns:
tmp (array) : A spike list that is compatible to pyNCS AER.
"""
time, id_end = self.spikes.shape
id_end += id_init
ids = range(id_init, id_end)
tmp = []
for t in range(time):
for i, j in enumerate(ids):
if self.spikes[t, i] != 0:
tmp.append((j, t))
return np.array(tmp, dtype='int')
def raster_plot(self):
""" raster_plot - Draws the raster plot of already generated spike
trains.
Args:
Returns:
"""
self.spikes[self.spikes == 0] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(self.n_proc):
ax.plot(self.spikes[:, i] + i, '|k', ms=2, mew=1)
ax.set_ylim([-.5, self.n_proc + 1])
ax.set_yticks([])
| [
"willr292@gmail.com"
] | willr292@gmail.com |
ff80203835e8716b7b05805dd7ab23d557f4eee1 | e8b1ea989fe0d08449fbe6713ad13a671390fed7 | /main/migrations/0022_auto_20170203_0510.py | 4b0986db0cc6cddf74710d362114b5a312365cbc | [] | no_license | alexbobrow/mydict | 1a0625cab9e5f7db440c8583e63981f8dbb5497c | edaa16866364d8d776c788da4e3029d8ce25b47d | refs/heads/master | 2023-08-06T13:55:42.137986 | 2021-10-09T15:13:52 | 2021-10-09T15:13:52 | 398,196,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-03 05:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0021_auto_20170201_0556'),
]
operations = [
migrations.RenameModel(
old_name='Word',
new_name='WordOs',
),
migrations.RenameModel(
old_name='WordSecond',
new_name='WordWf',
),
]
| [
"alexbobrow@mail.ru"
] | alexbobrow@mail.ru |
c8e4254718c9e65240da0fcc672cd0564564c4a3 | 775da6a60f5b22578d6b05c1b087558d7a26f274 | /3_Bigdata/03_Statistics_basic/wine_quality.py | 299c863d35bb37713159c77f7cb135294d600c12 | [] | no_license | koobeomjin/iot_python2019 | 4242744a6c4452674b2e2686492c8634b92dbe7e | 6558f500e9d1c0b9c5db23498ad02c31e6212229 | refs/heads/master | 2020-06-14T22:17:35.575323 | 2019-10-21T06:23:59 | 2019-10-21T06:23:59 | 195,142,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | import pandas as pd
wine = pd.read_csv('winequality-both.csv', sep=',',header=0)
wine.columns = wine.columns.str.replace(' ','_')
print(wine.head(10))
print('변수별 요약통계')
print(wine.describe())
print('\n특정 열의 유일값 찾기')
print(sorted(wine.quality.unique))
print('\n빈도 찾기')
print(wine.quality.value_counts()) | [
"abc12@naver.com"
] | abc12@naver.com |
2e2b0256d971a113ac7e6952f98b8b8a9ff42755 | fbe2bce9e3d810c423979bf1f739b2775b8c31a3 | /url_shortener/migrations/0001_initial.py | d601dd7529a2573ece26e98cec4f507f9f3039dd | [
"MIT"
] | permissive | alena-kono/simple-shortener | 005ef1e03bf1ac82b777d8f692a8259880e7aaef | d1549b342e190ff70509ce5b442cb31376f2a07a | refs/heads/main | 2023-07-25T21:04:05.614094 | 2021-09-05T20:33:13 | 2021-09-05T20:33:13 | 403,371,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # Generated by Django 3.1.13 on 2021-09-05 18:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Url',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original_url', models.URLField(db_index=True, unique=True, verbose_name='original full url')),
('short_url', models.URLField(null=True, unique=True, verbose_name='shortened relative url')),
],
),
]
| [
"konovalova.alena@gmail.com"
] | konovalova.alena@gmail.com |
b70a883907ad7073bcf8f114e0cd5097988c808a | 0675dbf9edc1564ca2a0be8635dd704afb35631b | /main.py | eaa6078ad68129f0052c6224632c8c3120e0638f | [] | no_license | vasude1/local_damage_visco | 9c630fb3e7ecbbc3de0c3884061f16f7d406cdcf | 399c6b5a63db5f70b1f7fd081a870d3586ec6093 | refs/heads/main | 2023-01-02T07:22:18.102456 | 2020-10-28T22:25:02 | 2020-10-28T22:25:02 | 308,155,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | ## Imports from Python
import numpy as np
import scipy.sparse as sps
## From own mods
from classes import *
from compute_Matrices import *
0.0761
## Material Parameters
dashpots = 8
prop = np.array([[0.9,1E-7],[0.01,1e-06],[0.00100388,1e-05],
[0.00109131,0.0001],[0.00115114,0.001],[0.00121,0.01],
[0.0061,0.1],[0.07,1]])
# prop=np.zeros((dashpots,2))
material = ViscousMaterial(dashpots)
for i in range(dashpots):
material.g[i] = prop[i][0]
material.tau[i] = prop[i][1]
material.E_o = material.E_inf/(1-sum(material.g[:][0]))
## Parameters for temporal loop
end_time = 1000000.0
delta_t = 0.5
## Parameters for geometry and mesh
length_of_bar = 1.0
number_elements = 25 # element has nodes (element,element+1)
element_length = length_of_bar/number_elements
nodes = element_length*np.arange(number_elements+1)
## Time integration
beta = 0.2
gamma = 0.6
## FE quantities
Mass = compute_Mass(number_elements,element_length,material.density)
# Mass = sps.coo_matrix(Mass)
Stiff = np.zeros((number_elements+1,number_elements+1))
f = np.zeros((number_elements+1,1))
bf = np.zeros((number_elements+1,1))
IV = [InternalVariables(dashpots,element_length) for _ in range(number_elements)]
u = np.zeros((number_elements+1,1))
delta_u = np.zeros((number_elements+1,1))
v = np.zeros((number_elements+1,1))
a = np.zeros((number_elements+1,1))
ele_array = 1.0/element_length*np.array(([1.0,-1.0],[-1.0,1.0]))
forc_array = element_length*np.array(([[0.5],[0.5]]))
conv_count = 0
time = 0.0
while(time<end_time):
print(time,delta_t)
time_old = time
converge = bool(0)
u_tmp = u[:]
a_tmp = a[:]
iter = 0
# [1:] [1:,1:]
while((not converge) and (iter<60)):
material.E = compute_effectivestiff(material,delta_t)
compute_Matrices(time_old+delta_t,number_elements,IV,material,delta_t,Stiff,bf,f,u_tmp)
LHS = 1.0*(1.0/beta/delta_t**2 * Mass + Stiff)
# print(np.dot(Mass,a_tmp),bf)
# exit(0)
RHS = -np.dot(Mass,a_tmp) - f + bf
delta_u[1:] = np.linalg.solve(LHS[1:,1:],RHS[1:])
u_tmp = u_tmp+delta_u
a_tmp = 1.0/beta*((u_tmp-u)/delta_t**2 - v/delta_t - (0.5-beta)*a)
if(np.linalg.norm(delta_u) < 1E-8):
converge = bool(1)
iter += 1
if(not converge):
delta_t = 0.5*delta_t
conv_count = 0
elif(converge):
time = time_old + delta_t
v = v+delta_t*((1-gamma)*a+gamma/beta*((u_tmp-u)/delta_t**2)-v/delta_t-(0.5-beta)*a)
u[:] = u_tmp[:]
a[:] = a_tmp[:]
IV = update_internalvariables(time,number_elements,IV,material,delta_t,u)
write_solution(u,a,IV,time_old)
conv_count += 1
if(conv_count > 2):
delta_t = 1.5*delta_t
conv_count = 0
| [
"noreply@github.com"
] | vasude1.noreply@github.com |
0981677d88f617ba17bf5cbcb8bc5e03108f0536 | 2467d3cdd5a82b07f5f225c794fff11fa35cafe1 | /settings.py | 90cc72605744d206e48eb470b3124301f4391014 | [] | no_license | aroscoe/reel-time | 8c723af72e16110a61ea7e16b64ba12891ac093d | f29922b69804ac4dc434b335cad66a85466f2ace | refs/heads/master | 2020-12-24T13:44:49.081863 | 2010-06-27T04:24:35 | 2010-06-27T04:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,527 | py | # Django settings for reel-time project.
import os.path
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Eastern'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "assets")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/assets/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/assets/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&_%$i1b&+hg1d^eq^(iwf3uv+_^%nv-$hu+3xqd&-0o&^@7uy+'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'reel-time.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'timeline',
)
# Local Settings
try:
from local_settings import *
except ImportError:
pass | [
"me@anthonyroscoe.com"
] | me@anthonyroscoe.com |
ab2095ba16deb13d694b50d1dd2b8edea9ce54a4 | 14477033743ef2888878c96334013f41e2d1f274 | /simpleCli/base.py | 6f9d05de5984d8354a40ee87739c77fcf8b5a4e4 | [
"MIT"
] | permissive | enigma0Z/python-simple-cli | c28382efe24593d31bc4db63cb9b962be6f1546c | 098e2ac7429742e86d68974f2c015610726ee5aa | refs/heads/master | 2020-04-16T00:49:34.336773 | 2019-01-12T04:21:25 | 2019-01-12T04:21:25 | 165,152,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | #!/usr/bin/env python3
"""
simpleCli.base
"""
from argparse import ArgumentParser
from collections import deque
from .exceptions import NotImplementedException
class BaseCommand:
"""
Base class for CLI classes. Realistically, this really doesn't do anything other than
establish basic metadata for the class and provide structure for subclasses to follow
:param str name: The name of the command
:param str cmd: The command text of the command. If absent, it defaults to name.lower()
.. testsetup:: *
from collections import deque
from simpleCli.base import BaseCommand
from simpleCli.exceptions import *
baseCommand = BaseCommand(
name="Test",
description="Description")
.. testcode::
:hide:
# Check that name / cmd are set correctly
assert baseCommand.name == "Test"
assert baseCommand.cmd == "test"
assert baseCommand.description == "Description"
assert baseCommand.stack == deque(["test"])
"""
def __init__(self, *args, **kwargs):
#pylint: disable=unused-argument
self.name = kwargs['name']
self.stack = deque([])
if "stack" in kwargs:
self.stack.append(kwargs['stack'])
if "cmd" not in kwargs:
self.cmd = kwargs['name'].lower()
else:
self.cmd = kwargs['cmd']
if self.cmd is not None:
self.stack.append(self.cmd)
if "description" in kwargs:
self.description = kwargs['description']
else:
self.description = None
self.initParser()
def initParser(self):
"""
Initalize the argument parser. Subclasses should define any arguments for the command
in this method. If you subclass this class, define your parser args here so that if the
stack gets changed, everything is updated correctly.
"""
self._parser = ArgumentParser(
prog=" ".join(self.stack),
description=self.description)
def stackAppendleft(self, value):
"""
Updates self.stack by prepending value to the list and re-initalizing the parser
If you create a subclass that contains other commands (like the Interpreter class)
:param str value: The item to prepend (deque.appendleft()) to the stack
"""
self.stack.appendleft(value)
self.initParser()
def parse(self, *args):
"""
Parse arguments through self._parser
:param args: Sequential arguments to pass through the parser
"""
return self._parser.parse_args(args)
def do(self, *args):
#pylint: disable=unused-argument,invalid-name
"""
Execute the command (not implemented in BaseCommand)
:param args: Sequential arguments passed from the CLI
.. testcode::
:hide:
try:
baseCommand.do()
except NotImplementedException:
pass
"""
raise NotImplementedException(self)
| [
"enigma.0ZA@gmail.com"
] | enigma.0ZA@gmail.com |
b7ad0c4b75e72ab71fd785503bb1ed1b91d76003 | db5f730b1311210130ad900444a8765fceebda36 | /Data_base/weight_categories.py | d0fe8ec9d6ba815b692198da9c201c22310fdc11 | [] | no_license | sumo-slonik/sumo_match | 8e9e842730af3b96b18dea622a1661bb58455933 | 9ca52fcb474c554e4f9239a8ad9a08d79f80668a | refs/heads/master | 2023-08-12T23:03:28.095141 | 2021-09-10T16:41:10 | 2021-09-10T16:41:10 | 349,742,489 | 1 | 0 | null | 2021-09-10T16:41:11 | 2021-03-20T14:09:08 | Python | UTF-8 | Python | false | false | 3,455 | py | import hashlib
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from Data_base.tables import WeightCategory
def generate_id(weight_category, age_category, gender):
description = weight_category + age_category + gender
return int(hashlib.sha1(description.encode("utf-8")).hexdigest(), 16) % (10 ** 8)
class WeightCategoriesAdder:
age_categories = ("Dziecko (u12)",
"Młodzik (u14)",
"Kadet (u16)",
"Junior (u18)",
"Młodzierzowiec (u21)",
"Młodzierzowiec (u23)",
"Senior ")
woman_weight_categories = {
age_categories[0]: [str(x) for x in range(30, 61, 5)] + ['+60','+ 60'],
age_categories[1]: [str(x) for x in range(35, 66, 5)] + ['+65', '+ 65','+60','+ 60'],
age_categories[2]: [str(x) for x in range(45, 71, 5)] + ['+65', '+ 65','+70', '+ 70', 'open'],
age_categories[3]: [str(x) for x in range(50, 76, 5)] + ['+70', '+ 70', '+75', '+ 75', 'open'],
age_categories[4]: [str(x) for x in range(50, 66, 5)] + ['73', '80', '+80', '+ 80', '95', '+95', '+ 95',
'open'],
age_categories[5]: [str(x) for x in range(50, 66, 5)] + ['73', '80', '+80', '+ 80', '95', '+95', '+ 95',
'open'],
age_categories[6]: [str(x) for x in range(50, 66, 5)] + ['73', '80', '+80', '+ 80', '95', '+95', '+ 95', 'open']
}
man_weight_categories = {
age_categories[0]: [str(x) for x in range(35, 61, 5)] + ['+60', '+ 60'],
age_categories[1]: [str(x) for x in range(40, 71, 5)] + ['+70', '+ 70','35'],
age_categories[2]: [str(x) for x in range(55, 96, 10)] + ['50', '+95', '+ 95', 'open'],
age_categories[3]: [str(x) for x in range(60, 101, 10)] + ['55', '+100', '+ 100', 'open'],
age_categories[4]: ['70', '77', '85', '92', '100', '115', '+115', '+ 115', 'open'],
age_categories[5]: ['70', '77', '85', '92', '100', '115', '+115', '+ 115', 'open'],
age_categories[6]: ['70', '77', '85', '92', '100', '115', '+115', '+ 115', 'open']
}
weight_categories = [woman_weight_categories, man_weight_categories]
def __init__(self, DATABSE_URI):
self.db = create_engine(DATABSE_URI)
self.cat_id = 0
def add_categories(self):
session_maker = sessionmaker(self.db)
session = session_maker()
# session.query(WeightCategory).delete()
for count, gender in enumerate(['kobiety', 'mężczyźni']):
for age_category in self.weight_categories[count]:
for category in self.weight_categories[count][age_category]:
to_add = WeightCategory(category, age_category, generate_id(category, age_category, gender), gender)
session.merge(to_add)
session.commit()
if __name__ == '__main__':
DATABSE_URI = 'mysql+mysqlconnector://{user}:{password}@{server}/{database}'.format(user='root', password='admin',
server='localhost',
database='sumo_match_maker')
weight_category_adder = WeightCategoriesAdder(DATABSE_URI)
weight_category_adder.add_categories()
| [
"jakobn@o2.pl"
] | jakobn@o2.pl |
cdc6f4ba077e242a33d6ee03cb2e97631421d423 | 9d5c1340397440996fd8f16d262df2045746ed6d | /anima/extension.py | 327fa5a7cbeb76c0c2ab23d5468e4c00c1877a3d | [
"BSD-2-Clause"
] | permissive | jonntd/anima | f44c4168052d85e6e1203e5ce6217d61d6602232 | dbd487f11f31e9351f49126cbef8273577023518 | refs/heads/master | 2021-01-17T01:10:39.920972 | 2015-03-12T20:55:25 | 2015-03-12T20:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2014, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
def extends(cls):
"""A decorator for extending classes with other class methods or functions.
:param cls: The class object that will be extended.
"""
def wrapper(f):
if isinstance(f, property):
name = f.fget.__name__
else:
name = f.__name__
if isinstance(cls, type):
setattr(cls, name, f)
elif isinstance(cls, list):
for c in cls:
setattr(c, name, f)
def wrapped_f(*args, **kwargs):
return f(*args, **kwargs)
return wrapped_f
return wrapper
| [
"eoyilmaz@gmail.com"
] | eoyilmaz@gmail.com |
cf16676a020bfc2a8c7972d0091f467f31209631 | 704aed383987bea4696cb2036ab4a7953e2c948f | /yomdemo/models.py | 386ceb31f6f824bfbb9c52d8836b389edf4621cd | [] | no_license | abhishek-akbari01/blog-project | 8e74616ddbeb3fa4bb50c05a84a9cc11988e93ad | 408b8617ffd517b8cc6350fb83c77b82638e123f | refs/heads/master | 2022-12-25T22:26:06.191367 | 2020-10-11T22:26:41 | 2020-10-11T22:26:41 | 303,350,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | from django.db import models
from django import forms
class Slider(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
slides = models.FileField(upload_to='slider/')
objects=models.Manager
class Category(models.Model):
title = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = models.Manager
def __str__(self):
return self.title
class WorkCategory(models.Model):
title = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = models.Manager
def __str__(self):
return self.title
class Work(models.Model):
image= models.FileField(upload_to='images/')
category = models.ForeignKey(WorkCategory,on_delete = models.CASCADE)
title = models.CharField(max_length=100)
content = models.TextField()
objects = models.Manager
def __str__(self):
return self.category
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.CharField(max_length=100)
subject = models.CharField(max_length=100)
message = models.TextField()
objects = models.Manager
class User(models.Model):
name = models.CharField(max_length=100)
email = models.CharField(max_length=100)
password = models.CharField(max_length=100)
objects = models.Manager
def __str__(self):
return self.name
class Blogs(models.Model):
image = models.FileField(upload_to='blog-image/')
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
category = models.ForeignKey(Category,on_delete=models.CASCADE)
user = models.ForeignKey(User,on_delete=models.CASCADE)
status = models.CharField(max_length=50,default="block")
objects = models.Manager
class AddBlogs(forms.ModelForm):
class Meta:
model = Blogs
fields = ["image","name","category",'user']
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.fields['user'].widget.attrs.update({'style': 'display:none'})
class Comment(models.Model):
name = models.CharField(max_length=100)
email = models.CharField(max_length=100)
image = models.FileField(upload_to='comment-image/')
created_at = models.DateTimeField(auto_now_add=True)
content = models.TextField()
objects = models.Manager
| [
"abhishekakbari9668@gmail.com"
] | abhishekakbari9668@gmail.com |
a6e8833b4c75fb3dd7c8daa4641b95a272aaef9c | c25080ecd125becfe8f64164201ddbc272604171 | /app.py | 4d00b96ee9a38d679255a19dbb3eef1ad0bcec97 | [
"MIT"
] | permissive | HubbeKing/Hubbot_Twisted | 861f02771925f3b6fc72bc2503a768dd278934fc | e237afc9802064d00c8a76f958f27bad2c371f6d | refs/heads/master | 2023-04-27T17:45:00.105852 | 2023-04-21T19:46:40 | 2023-04-21T19:46:40 | 14,868,288 | 2 | 0 | null | 2016-12-12T21:18:58 | 2013-12-02T16:41:58 | Python | UTF-8 | Python | false | false | 1,395 | py | from hubbot.config import Config, ConfigError
from hubbot.factory import HubbotFactory
import argparse
import logging
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="A derpy Twisted IRC bot.")
parser.add_argument("-c", "--config", help="The configuration file to user", type=str, default="hubbot.toml")
parser.add_argument("-l", "--logfile", help="The file used for global error logging", type=str, default="logs/hubbot.log")
options = parser.parse_args()
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler(stream=sys.stdout)
streamHandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%H:%M:%S'))
streamHandler.setLevel(logging.INFO)
root_logger.addHandler(streamHandler)
# set up file for error logging
fileHandler = logging.FileHandler(filename=options.logfile)
fileHandler.setFormatter(
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y/%m/%d-%H:%M:%S'))
fileHandler.setLevel(logging.ERROR)
root_logger.addHandler(fileHandler)
config = Config(options.config)
try:
config.read_config()
except ConfigError:
root_logger.exception("Failed to load config {!r}".format(options.config))
else:
factory = HubbotFactory(config)
| [
"hubbe128@gmail.com"
] | hubbe128@gmail.com |
1b1ceec87372d57456338b62c945931f222ce59c | e4539b17568dd3ef7a414da3948835767760017d | /cursosPython/CursoBásico/ciclos.py | eb47931ecd4a732c8e45800ad4c4a01c236dfdbd | [] | no_license | jpmontenegro/test | e6594bb49c7b913255c95eecb9df563d20d80ba2 | eaf33bca7effdf64a533a1d7e0b32c64bbffd091 | refs/heads/master | 2023-08-06T05:32:44.057080 | 2021-10-11T03:11:50 | 2021-10-11T03:11:50 | 404,925,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | def run():
ejemplo = input(str("""
Ejemplos de ciclos:
1 - Continue
2 - Break en números
3 - Break en texto
4 - Juego
Selecciona el ejemplo a probar: """))
if ejemplo == str(1):
for contador in range(100):
if contador % 2 != 0:
continue
print(contador)
elif ejemplo == str(2):
for i in range(100):
print(i)
if i == 50:
break
elif ejemplo == str(3):
texto = input('Escribe un texto: ')
texto = texto.lower()
for letra in texto:
if letra == 'o':
break
print(letra)
else:
numero = int(input(str("""
Vamo' a juga:
Elige un número entre el 1 y el 100 y te mostraré toda la secuencia
de número que son multiplos del número que elegiste: """)))
contador = 1
while contador <= 100:
contador += 1
if (contador-1) % numero != 0:
continue
print(contador-1)
if __name__ == '__main__':
run() | [
"jpmcaro1997@gmail.com"
] | jpmcaro1997@gmail.com |
80da82accdf31a5158077c9753abc419d97a906e | 3b80cc625b048ecfd21808db947935447f30d667 | /test/overhead/calc_time.py | dd29b8b22035b56527e6f94453e99ee17531e34d | [] | no_license | Rets66/dns-td | 2af76223bbc594be0b568cf777f2844971cb5174 | cd6a322591d0956844891e4b48413137560eb6c9 | refs/heads/master | 2020-09-30T21:23:57.745418 | 2020-01-25T10:47:32 | 2020-01-25T10:47:32 | 227,377,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | #!/usr/bin/env python3
#coding: utf-8
import matplotlib.pyplot as plt
import statistics
#import matplotlib.ticker as ticker
with open('./calc_time5000.json') as f:
data = f.readline()
time = eval(data)
time = [i * 1000 for i in time]
num = list(range(1,5001))
mean = statistics.mean(time)
median = statistics.median(time)
mode = statistics.mode(time)
#ave = sum(time) / 5000
plt.grid()
plt.scatter(num, time, marker=".", color='darkorange', s=1)
plt.ylim([0.0025, 0.004])
plt.xlabel('n-th(times)')
plt.ylabel('Calculation Time(ms)')
plt.axhline(y=mean, color='royalblue', linestyle=':', label='Mean')
plt.axhline(y=median, color='royalblue', linestyle='--', label='Media')
plt.axhline(y=mode, color='royalblue', linestyle='-.', label='Mode')
#plt.axhline(y=ave, xmin=0, xmax=5000, color='royalblue', label='Average')
plt.legend(bbox_to_anchor=(0.2, 0.95), loc='upper right', borderaxespad=0, fontsize=10)
#plt.legend(bbox_to_anchor=(1, 1.15), loc='upper right', borderaxespad=0, fontsize=10)
plt.show()
| [
"asce_salut666chere@icloud.com"
] | asce_salut666chere@icloud.com |
0fcec6e5041d5d33b2ff2d514fc4d29219789859 | 60e6ec8ac9c2efef5a2421dc743fb180ebe2983a | /2016/07/challenge.py | 75e16efe77c5524e3ac4b97cd3eef4f58690fdb9 | [] | no_license | cnorthwood/adventofcode | 8a4c515c2f8ad894b80f46b2f770fd608bf88923 | f3504cfbff379a0816a354ba8caea4d0a5836a8c | refs/heads/master | 2022-12-12T20:04:56.752641 | 2022-12-08T23:19:09 | 2022-12-08T23:19:09 | 75,319,235 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | from itertools import chain
import re
PAIR_CHECK = re.compile(r'(?P<char1>.)(?P<char2>(?!(?P=char1)).)(?P=char2)(?P=char1)')
TRIP_CHECK = re.compile(r'(?=((?P<char1>.)(?P<char2>(?!(?P=char1)).)(?P=char1)))')
def parse_line(line):
parts = re.split(r'[\[\]]', line)
return parts[::2], parts[1::2]
def supports_tls((outer, inner)):
return True in map(lambda p: bool(PAIR_CHECK.search(p)), outer) and \
True not in map(lambda p: bool(PAIR_CHECK.search(p)), inner)
def supports_ssl((outer, inner)):
triples = set(chain(*(map(lambda r: r[0], TRIP_CHECK.findall(part)) for part in outer)))
if len(triples) == 0:
return False
for triple in triples:
check_triple = triple[1] + triple[0] + triple[1]
for inner_part in inner:
if check_triple in inner_part:
return True
assert supports_tls(parse_line('abba[mnop]qrst'))
assert not supports_tls(parse_line('abcd[bddb]xyyx'))
assert not supports_tls(parse_line('aaaa[qwer]tyui'))
assert supports_tls(parse_line('ioxxoj[asdfgh]zxcvbn'))
assert supports_ssl(parse_line('aba[bab]xyz'))
assert not supports_ssl(parse_line('xyx[xyx]xyx'))
assert supports_ssl(parse_line('aaa[kek]eke'))
assert supports_ssl(parse_line('zazbz[bzb]cdb'))
with open('input.txt') as input:
lines = map(parse_line, input)
print "Part 1:", len(filter(supports_tls, lines))
print "Part 2:", len(filter(supports_ssl, lines))
| [
"chris@pling.org.uk"
] | chris@pling.org.uk |
9419f84c744e53f3a8947c64d4f2b6cd11e78526 | c120540b1326cbd060394f370f5ca4d4f33b4b24 | /mango/wsgi.py | 6189499a2259395a13814ccb484bf30e7e042282 | [] | no_license | ashutosh-roy/Mango-Backend | f82a20ed4af861d064441276176be2af6b26a26e | 033b6a0730a5b4f4407ed3a254c556bda9f19196 | refs/heads/master | 2022-04-25T13:29:30.036540 | 2020-04-24T03:40:01 | 2020-04-24T03:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for mango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mango.settings')
application = get_wsgi_application()
| [
"dopecoder007@gmail.com"
] | dopecoder007@gmail.com |
4f20e95b25cbfabfb9c8b9d5e1528a90cf03007e | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/1196-How-Many-Apples-Can-You-Put-Into-the-Basket/soln.py | 9360a70d049dfe0b5dbc07ebde42134ca66b794f | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 250 | py | class Solution:
def maxNumberOfApples(self, arr: List[int]) -> int:
basket = 0
cnt = 0
for num in sorted(arr):
if basket + num <= 5000:
basket += num
cnt += 1
return cnt
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
ed4e215c8260f6205f24fc9630e63c7000cf1cd8 | 29c6662d028e30e3d918aafc161a2973d50c9c3b | /context_processors.py | ec3a0d4c578a43575a6c11600147b8db69c55c3c | [
"WTFPL",
"Beerware"
] | permissive | brunobord/beeroverip | d0dacad018e0553cdbfaa4c578372a3ae7aeded2 | 8dfc43a5ba26e25281aa31883dfa71cbe075c739 | refs/heads/master | 2021-01-23T06:49:48.873101 | 2016-12-01T22:35:51 | 2016-12-01T22:39:52 | 53,926 | 5 | 1 | null | 2016-12-01T22:11:29 | 2008-09-18T13:47:20 | Python | UTF-8 | Python | false | false | 305 | py | from django.conf import settings
def analytics_id(request):
"""This context processor returns the ANALYTICS_ID extracted from the
settings, if it does exist. If not, it returns an empty dictionary."""
try:
return {'ANALYTICS_ID': settings.ANALYTICS_ID}
except:
return {}
| [
"bruno@lachose"
] | bruno@lachose |
48fdfc9be378d4f91876aa5a652d34ada26e3a80 | a3fb8d6e9c31212b39171b90c5e09fccba022167 | /addons/io_scene_gltf2/blender/imp/material/map/emissivemap.py | cda6b93b021a1781f28269ad51fe76f20ff0f229 | [
"Apache-2.0"
] | permissive | dtysky/glTF-Blender-IO | 9d44d3f76923a5f6760bfa662006383604cc745b | 5488adce3d496c6db7b2fff121d4fb46962a02b2 | refs/heads/master | 2020-03-26T21:14:16.951297 | 2018-08-20T08:27:30 | 2018-08-20T08:27:30 | 145,376,303 | 0 | 0 | Apache-2.0 | 2018-08-20T06:36:42 | 2018-08-20T06:36:41 | null | UTF-8 | Python | false | false | 4,314 | py | """
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Julien Duroure.
*
* ***** END GPL LICENSE BLOCK *****
* This development is done in strong collaboration with Airbus Defence & Space
"""
from .map import *
class EmissiveMap(Map):
def __init__(self, json, factor, gltf):
super(EmissiveMap, self).__init__(json, factor, gltf)
def create_blender(self, mat_name):
engine = bpy.context.scene.render.engine
if engine == 'CYCLES':
self.create_blender_cycles(mat_name)
else:
pass #TODO for internal / Eevee in future 2.8
def create_blender_cycles(self, mat_name):
material = bpy.data.materials[mat_name]
node_tree = material.node_tree
self.texture.blender_create()
# retrieve principled node and output node
if len([node for node in node_tree.nodes if node.type == "BSDF_PRINCIPLED"]) != 0:
fix = [node for node in node_tree.nodes if node.type == "BSDF_PRINCIPLED"][0]
else:
# No principled, we are coming from an extenstion, probably
fix = [node for node in node_tree.nodes if node.type == "MIX_SHADER"][0]
output = [node for node in node_tree.nodes if node.type == 'OUTPUT_MATERIAL'][0]
# add nodes
emit = node_tree.nodes.new('ShaderNodeEmission')
emit.location = 0,1000
separate = node_tree.nodes.new('ShaderNodeSeparateRGB')
separate.location = -750, 1000
combine = node_tree.nodes.new('ShaderNodeCombineRGB')
combine.location = -250, 1000
mapping = node_tree.nodes.new('ShaderNodeMapping')
mapping.location = -1500, 1000
uvmap = node_tree.nodes.new('ShaderNodeUVMap')
uvmap.location = -2000,1000
uvmap["gltf2_texcoord"] = self.texCoord # Set custom flag to retrieve TexCoord
text = node_tree.nodes.new('ShaderNodeTexImage')
text.image = bpy.data.images[self.texture.image.blender_image_name]
text.location = -1000,1000
add = node_tree.nodes.new('ShaderNodeAddShader')
add.location = 500,500
math_R = node_tree.nodes.new('ShaderNodeMath')
math_R.location = -500, 1500
math_R.operation = 'MULTIPLY'
math_R.inputs[1].default_value = self.factor[0]
math_G = node_tree.nodes.new('ShaderNodeMath')
math_G.location = -500, 1250
math_G.operation = 'MULTIPLY'
math_G.inputs[1].default_value = self.factor[1]
math_B = node_tree.nodes.new('ShaderNodeMath')
math_B.location = -500, 1000
math_B.operation = 'MULTIPLY'
math_B.inputs[1].default_value = self.factor[2]
# create links
node_tree.links.new(mapping.inputs[0], uvmap.outputs[0])
node_tree.links.new(text.inputs[0], mapping.outputs[0])
node_tree.links.new(separate.inputs[0], text.outputs[0])
node_tree.links.new(math_R.inputs[0], separate.outputs[0])
node_tree.links.new(math_G.inputs[0], separate.outputs[1])
node_tree.links.new(math_B.inputs[0], separate.outputs[2])
node_tree.links.new(combine.inputs[0], math_R.outputs[0])
node_tree.links.new(combine.inputs[1], math_G.outputs[0])
node_tree.links.new(combine.inputs[2], math_B.outputs[0])
node_tree.links.new(emit.inputs[0], combine.outputs[0])
# following links will modify PBR node tree
node_tree.links.new(add.inputs[0], emit.outputs[0])
node_tree.links.new(add.inputs[1], fix.outputs[0])
node_tree.links.new(output.inputs[0], add.outputs[0])
| [
"nopper@ux3d.io"
] | nopper@ux3d.io |
ff9299ad1816849e18e03e4b2368053082d109cf | 2c7cafcb634501494abe745a361dd31b78502cce | /scripts/create_interface_timeseries.py | a0b9187d6ca6f5b90056317e6d7f2a42aace7cc9 | [] | no_license | pism/gris-analysis | daf868a9fce8e604cf6bdf87a94e28ab0de74579 | 3b67dcb888f08e451d36582f02fb759e14bd95eb | refs/heads/master | 2022-11-30T06:56:47.118396 | 2022-11-18T23:55:53 | 2022-11-18T23:55:53 | 48,613,554 | 0 | 3 | null | 2022-11-18T23:55:54 | 2015-12-26T14:38:22 | Python | UTF-8 | Python | false | false | 1,903 | py | #!/usr/bin/env python
import numpy as np
from argparse import ArgumentParser
from netCDF4 import Dataset as NC
from netcdftime import utime
import gdal
import ogr
import osr
import os
from pyproj import Proj
import logging
import logging.handlers
try:
import pypismtools.pypismtools as ppt
except:
import pypismtools as ppt
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.handlers.RotatingFileHandler('extract.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s')
# add formatter to ch and fh
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.addHandler(fh)
parser = ArgumentParser(
description='''A script to extract interfaces (calving front, ice-ocean, or groundling line) from a PISM netCDF file, and save it as a shapefile (polygon).''')
parser.add_argument("FILE", nargs=1)
parser.add_argument("-o", "--output_filename", dest="out_file",
help="Name of the output file", default='plot.pdf')
options = parser.parse_args()
filename = options.FILE[0]
ofile = options.out_file
driver = ogr.GetDriverByName('ESRI Shapefile')
ds = driver.Open(filename)
layer = ds.GetLayer()
cnt = layer.GetFeatureCount()
dates = []
t = []
lengths = []
for feature in layer:
dates.append(feature.GetField('timestamp'))
t.append(feature.GetField('timestep'))
geom = feature.GetGeometryRef()
length = geom.GetArea() / 2.
lengths.append(length)
del ds
dates = np.array(dates)
t = np.array(t)
lengths = np.array(lengths)
import pylab as plt
plt.plot(t, lengths/1e3, 'o')
plt.savefig(ofile)
| [
"aaschwanden@alaska.edu"
] | aaschwanden@alaska.edu |
1a578abefeb2ed72ce8ae2e7c04415c91c7889f2 | 07f5251f6a237f643da841a2ba04e721b30cc2a3 | /mac/shop/admin.py | a3e69c9fe618153fcd87ea300daf892c18e558c4 | [] | no_license | vibhore3999/Ecommerce | ee4cdc094ab60097968e5bbae1615dd1ef8c0374 | a9050e780bfeefc1e31fb1a3150b2245d0338cfd | refs/heads/master | 2022-11-11T09:04:01.281353 | 2020-06-29T17:01:27 | 2020-06-29T17:01:27 | 275,876,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
# Register your models here.
from .models import product
admin.site.register(product)
| [
"vibhor3999joshi@gmail.com"
] | vibhor3999joshi@gmail.com |
f37ff415b8017dd882a9e147fa178c187e063ea5 | 737c0971004b5bff58c51c26aa56f5126e4b056f | /setup.py | bf3642a2ad967196eda47766dd71292fb937d568 | [
"MIT"
] | permissive | nuki111/env_explore | 4b639c7bcc46110e5c19877282d0c95b7bb288cb | b5dfa05fbcfb0126e246e4ef4eb5a392a8615cf0 | refs/heads/master | 2022-06-06T16:47:45.744973 | 2020-05-04T21:31:44 | 2020-05-04T21:31:44 | 259,754,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="example-pkg-nuki111", # Replace with your own username
version="0.0.1",
author="Oscar Nuki",
author_email="oscar.nuki@gmail.com",
description='env_explore: Quick and easy exploration of python objects.',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | [
"32140106+nuki111@users.noreply.github.com"
] | 32140106+nuki111@users.noreply.github.com |
b8af51b692db93b260f28e7099526ad1a23e3231 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/pygments/lexers/bibtex.py | 7244ef2f7e8253dd9340e7807bfef42760ebeccb | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 4,727 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.bibtex
~~~~~~~~~~~~~~~~~~~~~~
Lexers for BibTeX bibliography data and styles
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, default, \
words
from pygments.token import Name, Comment, String, Error, Number, Text, \
Keyword, Punctuation
__all__ = ['BibTeXLexer', 'BSTLexer']
class BibTeXLexer(ExtendedRegexLexer):
"""
A lexer for BibTeX bibliography data format.
.. versionadded:: 2.2
"""
name = 'BibTeX'
aliases = ['bib', 'bibtex']
filenames = ['*.bib']
mimetypes = ["text/x-bibtex"]
flags = re.IGNORECASE
ALLOWED_CHARS = r'@!$&*+\-./:;<>?\[\\\]^`|~'
IDENTIFIER = '[{0}][{1}]*'.format('a-z_' + ALLOWED_CHARS, r'\w' + ALLOWED_CHARS)
def open_brace_callback(self, match, ctx):
opening_brace = match.group()
ctx.opening_brace = opening_brace
yield match.start(), Punctuation, opening_brace
ctx.pos = match.end()
def close_brace_callback(self, match, ctx):
closing_brace = match.group()
if (
ctx.opening_brace == '{' and closing_brace != '}' or
ctx.opening_brace == '(' and closing_brace != ')'
):
yield match.start(), Error, closing_brace
else:
yield match.start(), Punctuation, closing_brace
del ctx.opening_brace
ctx.pos = match.end()
tokens = {
'root': [
include('whitespace'),
('@comment', Comment),
('@preamble', Name.Class, ('closing-brace', 'value', 'opening-brace')),
('@string', Name.Class, ('closing-brace', 'field', 'opening-brace')),
('@' + IDENTIFIER, Name.Class,
('closing-brace', 'command-body', 'opening-brace')),
('.+', Comment),
],
'opening-brace': [
include('whitespace'),
(r'[{(]', open_brace_callback, '#pop'),
],
'closing-brace': [
include('whitespace'),
(r'[})]', close_brace_callback, '#pop'),
],
'command-body': [
include('whitespace'),
(r'[^\s\,\}]+', Name.Label, ('#pop', 'fields')),
],
'fields': [
include('whitespace'),
(',', Punctuation, 'field'),
default('#pop'),
],
'field': [
include('whitespace'),
(IDENTIFIER, Name.Attribute, ('value', '=')),
default('#pop'),
],
'=': [
include('whitespace'),
('=', Punctuation, '#pop'),
],
'value': [
include('whitespace'),
(IDENTIFIER, Name.Variable),
('"', String, 'quoted-string'),
(r'\{', String, 'braced-string'),
(r'[\d]+', Number),
('#', Punctuation),
default('#pop'),
],
'quoted-string': [
(r'\{', String, 'braced-string'),
('"', String, '#pop'),
(r'[^\{\"]+', String),
],
'braced-string': [
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
(r'[^\{\}]+', String),
],
'whitespace': [
(r'\s+', Text),
],
}
class BSTLexer(RegexLexer):
"""
A lexer for BibTeX bibliography styles.
.. versionadded:: 2.2
"""
name = 'BST'
aliases = ['bst', 'bst-pybtex']
filenames = ['*.bst']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
(words(['read', 'sort']), Keyword),
(words(['execute', 'integers', 'iterate', 'reverse', 'strings']),
Keyword, ('group')),
(words(['function', 'macro']), Keyword, ('group', 'group')),
(words(['entry']), Keyword, ('group', 'group', 'group')),
],
'group': [
include('whitespace'),
(r'\{', Punctuation, ('#pop', 'group-end', 'body')),
],
'group-end': [
include('whitespace'),
(r'\}', Punctuation, '#pop'),
],
'body': [
include('whitespace'),
(r"\'[^#\"\{\}\s]+", Name.Function),
(r'[^#\"\{\}\s]+\$', Name.Builtin),
(r'[^#\"\{\}\s]+', Name.Variable),
(r'"[^\"]*"', String),
(r'#-?\d+', Number),
(r'\{', Punctuation, ('group-end', 'body')),
default('#pop'),
],
'whitespace': [
(r'\s+', Text),
('%.*?$', Comment.SingleLine),
],
}
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
38fa79f99aea3bcb55ec77e25b11b3af7398cb1b | 5a40bf2c1e21be61b925011a5729471bb08b0ade | /Picture.py | f117d2df8ae031be473e361172d9e3f1845bd458 | [] | no_license | yezhanglang/PythonExample | 4f50dea3fef04e45eafa6bc67d48038dd7698e7a | bc589ac31cdfd8a3c25bd583ac0a9da82bed67d7 | refs/heads/master | 2021-09-08T10:57:51.468318 | 2018-03-09T11:05:05 | 2018-03-09T11:05:05 | 124,529,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | #coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# evenly sampled time at 200ms intervals
t = np.arange(0., 5., 0.2)
print(t)
# red dashes, blue squares and green triangles
plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
plt.show()
| [
"1129583207@qq.com"
] | 1129583207@qq.com |
a20633d723be19da647daace0f688649a009a633 | 872417cb0dd86747e45dd278a82fd58ec05a4781 | /graph.py | 345ea44d8e1f6a7d8681eef7954995f0b805b9d9 | [] | no_license | hebaflemban/datastructure- | fe8c046fb83860c0679c7e0ca19fdf18f35a8cd4 | abbb6fe7968a3a86d2ebf5f797ffac369839b9d4 | refs/heads/master | 2022-12-08T15:23:14.137083 | 2020-08-26T22:34:49 | 2020-08-26T22:34:49 | 289,006,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,347 | py | class Vertex:
def __init__(self, data):
self.data = data
self.edges = {}
def add_edge(self, to_data, time, cost):
self.edges[to_data] = {"time": time, "cost": cost}
def get_edges(self):
return self.edges.keys()
def get_weight(self, vertex_b ):
return self.edges[vertex_b]["time"], self.edges[vertex_b]["cost"]
class Graph:
def __init__(self, dircted = False):
self.vertices = {} #empty Graph
self.dircted = dircted
def add_vertex(self, vertex):
self.vertices[vertex.data] = vertex
def add_edge(self, vertex_a, vertex_b, time, cost ):
self.vertices[vertex_a.data].add_edge(vertex_b.data, time, cost)
if not self.dircted:
self.vertices[vertex_b.data].add_edge(vertex_a.data, time, cost)
def get_edges(self, vertex):
return self.vertices[vertex.data].get_edges()
def get_weight(self, vertex_a, vertex_b):
return self.vertices[vertex_a].get_weight(vertex_b)
def path_exists(self, vertex_a, vertex_b):
to_check = [vertex_a]
checked = []
while to_check:
current = to_check.pop(0)
checked.append(current)
if current == vertex_b:
return True
else:
options = self.vertices[current].get_edges()
to_check += [vertex for vertex in options if vertex not in checked]
return False
def get_path(flights_graph, vertex_a, vertex_b, path=[]):
current = vertex_a
path = path + [current]
if current == vertex_b:
return path
for vertex in flights_graph.vertices[current].get_edges():
if vertex not in path:
new_path = get_path(flights_graph, vertex, vertex_b, path)
if new_path:
return new_path
cities = ['Kuwait', "Dubai", "Colombo", "Male", "Doha", "Tokyo", "Oslo"]
# [ 0, 1 , 2 , 3, 4 , 5, 6 ]
cities = [ Vertex(city) for city in cities ]
flights_graph = Graph()
for city in cities:
flights_graph.add_vertex(city)
flights_graph.add_edge( cities[0], cities[1], 2, 120)
flights_graph.add_edge( cities[0], cities[2], 4, 200)
flights_graph.add_edge( cities[2], cities[3], 1, 60)
flights_graph.add_edge( cities[1], cities[4], 1.5, 100)
flights_graph.add_edge( cities[4], cities[5], 11, 500)
flights_graph.add_edge( cities[1], cities[6], 6, 300)
print("--"*50 + "\n******** Wlcome to Coded Airlines ********\n where flights are imaginary \n" + "--"*50+"\n\n")
print("Our airline has multiple flights. \n ")
for i,c in enumerate(flights_graph.vertices.keys()):
print(f"{i}. {c}")
from_c = int(input("\nPlease write down the (number) of the city you wanna travel from: "))
choice = input("\nDo you want direct flights? (1) or indirect flights? (2) ")
if choice == "1":
print(f"\nDirect flights from {cities[from_c].data} can go to: ")
for i,c in enumerate(flights_graph.get_edges(cities[from_c])):
print(f"{i}. {c}")
to_options = [city for city in flights_graph.get_edges(cities[from_c])]
to_c = int(input("\nWhere do you wanna go? (the number) "))
time, cost = flights_graph.get_weight(cities[from_c].data, to_options[to_c])
print("\n"+ "--"*50)
print(f"Trip from: {cities[from_c].data} to: {to_options[to_c]} \nTotal: {cost}$ || Duration: {time} hours")
print("--"*50+"\n")
else:
to_cities = [ city.data for city in cities if flights_graph.path_exists(cities[from_c].data, city.data) and cities[from_c].data != city.data]
print(f"\nFrom {cities[from_c].data} you can go to: ")
for i,c in enumerate(to_cities):
print(f"{i}. {c}")
to_c = int(input("\nWhere do you wanna go? (the number) "))
path = get_path(flights_graph, cities[from_c].data, to_cities[to_c])
total_cost = 0
total_time =0
for i in range(0,len(path)-1,1):
time, cost = flights_graph.get_weight(path[i], path[i+1])
print(f"\n\nFlight{i+1} -> From: {path[i]} To: {path[i+1]} || Cost: {cost}$ & Duration: {time} hours" )
total_cost += cost
total_time += time
print("\nSummary:\n" + "--"*50 + f"\nA flight From: {cities[from_c].data} To: {to_cities[to_c]} would have a total Cost: {total_cost}$ & Duration: {total_time} hours\n" + "--"*50 )
| [
"36724067+heba@users.noreply.github.com"
] | 36724067+heba@users.noreply.github.com |
748c9781d7160fac7b7e3a1001afc1bb780a25ec | ded80657d1714996f01b450645fe2f4e1971cef2 | /WebScraper/spiders/to_db_spider.py | 9286d10be5103c0de0970e8d268e02f7aa4630f7 | [] | no_license | KarenW-DSBA/TopicModels | 614d4895e4bba836b2297bf424f279bdb365e888 | 27613085869fa0b40575e518788bb5156016bc22 | refs/heads/master | 2020-03-25T12:13:21.817517 | 2018-08-09T20:57:07 | 2018-08-09T20:57:07 | 143,760,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,254 | py | from __future__ import unicode_literals
from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher
import PyPDF2
from PyPDF2 import PdfFileReader
from PyPDF2 import utils
import requests
from requests.auth import HTTPBasicAuth
import datetime
import scrapy
import sys
import urllib
import pdfquery
import pandas
import sqlite3
from sqlite3 import Error
PATH = ''
LOGIN_URL = ''
USER = ''
PASSWORD = ''
DOMAIN = ''
sys.path.insert(0, PATH)
from scrapy import FormRequest
from loginform import fill_login_form
class MySpider(scrapy.Spider):
def __init__(self, *args, **kwargs):
dispatcher.connect(self.spider_closed, signals.spider_closed)
super(MySpider, self).__init__(*args, **kwargs)
name = 'to_db_spider'
custom_settings = {
'FEED_URI': 'OUTPUT.csv',
'LOG_ENABLED': True,
}
login_url = LOGIN_URL # authentication for some websites
userid = USER
password = PASSWORD
def create_connection(self, db_file):
try:
conn = sqlite3.connect(db_file)
conn.text_factory = str # necessary to prevent UTF-8 encoding errors
print(sqlite3.version)
except Error as e:
print(e)
return conn
def get_unscraped_urls(self, c):
c.execute("SELECT * FROM urls where scraped = 0;")
return c.fetchall()
def connect_to_db(self):
conn = self.create_connection("C:/sqlite/db/pythonsqlite.db")
self.conn = conn
return conn.cursor()
def set_db_entry(self, c, item, row):
print("Setting database entry for this scrape...")
try:
c.execute(""" UPDATE urls
SET scraped = 1,
scraped_title = ?,
scraped_content = ?,
scrape_date = ?
WHERE row_id = ?; """, [item['scraped_title'], item['scraped_content'], item['scrape_date'], row[19]])
self.conn.commit()
except Error as e:
print(e)
def start_requests(self):
'''call parse_login function to authenticate & call parse function to start scraping'''
yield scrapy.Request(self.login_url, self.parse_login)
def parse_login(self, response):
'''perform authentication for urls that require it'''
data, url, method = fill_login_form(response.url, response.body, self.userid, self.password)
return FormRequest(url, formdata = dict(data), method = method, callback = self.start_crawl)
def start_crawl(self, response):
c = self.connect_to_db()
to_scrape = self.get_unscraped_urls(c)
for row in to_scrape:
url = row[0]
yield scrapy.Request(url, callback = self.parse, meta = {'current_scrape':row, 'c':c})
def spider_closed(self):
'''
Whence the spider has finished all of its scrapes, we:
1: Commit our changes to the database
2: Close the connection
'''
self.conn.close()
print("All scrapes finished, commit changes to database")
def download_pdf(self, path, content):
f = open(path, 'wb')
f.write(content)
f.close()
def parse(self, response):
date = (datetime.datetime.today()).strftime('%m/%d/%Y')
if '.pptx' in response.url:
pass
if '.pdf' in response.url:
path = (filepath).encode('utf-8')
self.download_pdf(path, response.body)
try:
pdf = PdfFileReader(path)
except utils.PdfReadError("EOF marker not found"):
raise
text = ''
for i in range(pdf.numPages):
text += pdf.getPage(i).extractText() + " "
elif DOMAIN in response.url:
title = response.xpath("//h1/text()|//h2/text()|//h3/text()|//h4/text()|//h5/text()|//h6/text()").extract()
text = response.xpath('//p/text()|//p[@class]/text()').extract()
else:
title = response.xpath("//h1/text()|//h2/text()|//h3/text()|//h4/text()|//h5/text()|//h6/text()").extract()
text = response.xpath('//div[@class]/text()').extract()
try:
title
except NameError:
print("No title for this request, creating empty...")
title = []
try:
text
except NameError:
print("No text for this request, creating empty...")
text = []
try:
date
except NameError:
print('No date for this request, creating empty...')
date = (datetime.datetime.today()).strftime('%m/%d/%Y')
# check for redirect in url
if 'redirect_urls' in response.meta:
url = response.meta['redirect_urls'][-1]
else:
url = response.request.url
current_scrape = response.meta['current_scrape']
item = {
'scraped_web_page_url': url,
'scraped_title': ''.join(title).encode('utf-8'),
'scraped_content': ''.join(text).encode('utf-8'),
'scrape_date': date,
}
yield self.set_db_entry(response.meta['c'], item, current_scrape) | [
"karen.woo@essec.edu"
] | karen.woo@essec.edu |
ec6208492c21ec47df7f55580df8205872adb590 | 30225cc8958e698c95c08dc710d1d2d87347ad0d | /server/accounts/serializers.py | 699b09c6065d0decbd21737dea96219719ac1cc2 | [] | no_license | HYUNJUN-KANG/1117 | 9e42fc504bbd944fce39ea38dcfd2bd08a362114 | 75d17028ea7c0454c5c14108cdf532b87213f939 | refs/heads/master | 2023-01-19T04:22:30.342711 | 2020-11-17T08:49:51 | 2020-11-17T08:49:51 | 313,558,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from rest_framework import serializers
from django.contrib.auth import get_user_model
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = get_user_model()
fields = ['username', 'password',] | [
"rkdguswns94@naver.com"
] | rkdguswns94@naver.com |
e4d607873985483e8219f5bfdac8e164bd7c6482 | b994bae7345b15b8bb045dec636b3db4b7be3e50 | /deepstar.py | 6b6457e2bc0ea259fda80e213765ac6301dcf8ed | [] | no_license | MicronOxford/cockpit-lasers | d80e0aba066541e6c422ecbfd4e9a9c68e1b9df8 | 6e071eff4b0e27cf64b64cd36ccd1ea3b6019253 | refs/heads/master | 2020-12-30T09:57:38.129262 | 2019-07-22T11:27:58 | 2019-07-22T11:27:58 | 27,595,972 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,342 | py | import Pyro4
import serial
import socket
import threading
import time
CONFIG_NAME = 'deepstar'
CLASS_NAME = 'DeepstarLaser'
class DeepstarLaser:
def __init__(self, serialPort, baudRate, timeout):
print "Connecting to laser on port",serialPort,"with rate",baudRate,"and timeout",timeout
self.connection = serial.Serial(port = serialPort,
baudrate = baudRate, timeout = timeout,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE)
# If the laser is currently on, then we need to use 7-byte mode; otherwise we need to
# use 16-byte mode.
self.write('S?')
response = self.readline()
print "Current laser state: [%s]" % response
## Simple passthrough.
def read(self, numChars):
return self.connection.read(numChars)
## Simple passthrough.
def readline(self):
return self.connection.readline().strip()
## Send a command.
def write(self, command):
# We'll need to pad the command out to 16 bytes. There's also a 7-byte mode but
# we never need to use it.
commandLength = 16
# CR/LF count towards the byte limit, hence the -2.
command = command + (' ' * (commandLength - 2 - len(command)))
response = self.connection.write(command + '\r\n')
return response
## Get the status of the laser, by sending the
# STAT0, STAT1, STAT2, and STAT3 commands.
def getStatus(self):
result = []
for i in xrange(4):
self.write('STAT%d' % i)
result.append(self.readline())
return result
## Turn the laser ON. Return True if we succeeded, False otherwise.
def enable(self):
print "Turning laser ON at %s" % time.strftime('%Y-%m-%d %H:%M:%S')
self.write('LON')
response = self.readline()
#Set power to something small
self.setPower(0.01)
#Turn on deepstar mode with internal voltage ref
print "Enable response: [%s]" % response
self.write('L2')
response = self.readline()
print "L2 response: [%s]" % response
#Enable internal peak power
self.write('IPO')
response = self.readline()
print "Enable-internal peak power response [%s]" % response
#Set MF turns off internal digital and bias modulation
self.write('MF')
response = self.readline()
print "MF response [%s]" % response
if not self.getIsOn():
# Something went wrong.
self.write('S?')
print "Failed to turn on. Current status:", self.readline()
return False
return True
## Turn the laser OFF.
def disable(self):
print "Turning laser OFF at %s" % time.strftime('%Y-%m-%d %H:%M:%S')
self.write('LF')
return self.readline()
## Return True if the laser is currently able to produce light. We assume this is equivalent
# to the laser being in S2 mode.
def getIsOn(self):
self.write('S?')
response = self.readline()
print "Are we on? [%s]" % response
return response == 'S2'
def setPower(self, level):
if (level > 1.0) :
return
print "level=",level
power=int (level*0xFFF)
print "power=",power
strPower = "PP%03X" % power
print "power level = ",strPower
self.write(strPower)
response = self.readline()
print "Power response [%s]" % response
return response
def getMaxPower_mW(self):
# Max power in mW is third token of STAT0.
self.write('STAT0')
response = self.readline()
return int(response.split()[2])
def getPower(self):
self.write('PP?')
response = self.readline()
return int('0x' + response.strip('PP'), 16)
def getPower_mW(self):
maxPower = self.getMaxPower_mW()
power = self.getPower()
return maxPower * float(power) / float(0xFFF)
def setPower_mW(self, mW):
maxPower = self.getMaxPower_mW()
level = float(mW) / maxPower
self.setPower(level)
if __name__ == "__main__":
## Only run when called as a script --- do not run on include.
# This way, we can use an interactive shell to test out the class.
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--port", type="int", dest="net_port", default=7776, help="TCP port to listen on for service", metavar="PORT_NUMBER")
parser.add_option("-n", "--name", dest="service_name", default='pyro488DeepstarLaser', help="name of service", metavar="NAME")
parser.add_option("-s", "--serial", type="int", dest="serial_port", default=1, help="serial port number", metavar="PORT_NUMBER")
parser.add_option("-b", "--baud", type="int", dest="baud_rate", default=9600, help="serial port baud rate in bits/sec", metavar="RATE")
(options, args) = parser.parse_args()
laser = DeepstarLaser(options.serial_port, options.baud_rate, 2)
daemon = Pyro4.Daemon(port = options.net_port,
host = socket.gethostbyname(socket.gethostname()))
Pyro4.Daemon.serveSimple(
{laser: options.service_name},
daemon = daemon, ns = False, verbose = True)
| [
"mick.phillips@gmail.com"
] | mick.phillips@gmail.com |
87dc76e9a1040c5ecdda9dc379dae79df0104602 | e05e6c0276a60c0f82927240b5ce36ce1f6460ec | /big/debug/rxtx_ofdm.py | 48041879c9bcc464adfcfdd56e09ea94f02e71f9 | [] | no_license | yoshiV3/ofdm_transceiver | ee296993621021239a1c95c73f737a29ca319a9b | f09ed1daeba7bd0e3cfe86d09fa42180ff106870 | refs/heads/master | 2022-05-16T19:28:01.807028 | 2020-04-19T09:32:11 | 2020-04-19T09:32:11 | 254,823,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,423 | py | import numpy
from gnuradio import gr, blocks, fft, analog
from gnuradio import digital
import settings
import helpers
import parameters_ofdm as para
import tagger
import framer
import ownHeader
symbol_settings = para.ofdm_symbol() #object with settings concerning OFDM symbols
SYNC_ONE = symbol_settings._generate_sync_word_one() #generating the first sync word
SYNC_TWO = symbol_settings._generate_sync_word_two() #generating the second sync word
SYM_PILOT = symbol_settings._generate_pilot_symbols() #generating the thrid sync word
"""
hierarchical block that generates the OFDM frames and symbols
takes the data in bytes and outputs the complex signal that can be transmitted (by the Pluto sdr)
The incoming stream is split into two streams: a stream for the headers and a stream for the data
"""
class transmitter(gr.hier_block2):
def __init__(self):
gr.hier_block2.__init__(self, "ofdm_tx",
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
self.constellationP = helpers.get_constellation(settings.PAYLOAD_BPS)
self.constellationH = helpers.get_constellation(settings.HEADER_BPS)
self.sync_words = [SYNC_ONE,SYNC_TWO]
#tag the received bytes
tagger_0 = tagger.blk(settings.PAYLOAD_BPS, settings.LENGTH_TAG_KEY,1,0) #divides the incoming byte stream into packets and append the meta data
"""
this part will handle the header stream
from the tagged data stream the header is derived, scrambled and modulated
"""
header_gen = ownHeader.generate_header_bb(settings.LENGTH_TAG_KEY) #geneating the header
scramblerH = digital.digital.additive_scrambler_bb( #srambling the header bytes
0x8a,
settings.SCRAMBLED_SEED,
7,
0,
bits_per_byte = 8,
reset_tag_key = settings.LENGTH_TAG_KEY
)
"""
Modulating the data: first split the bytes into the size of the symbols (unpackerH) then transform the smaller groups of bits into the complex symbols
"""
unpackerH = blocks.repack_bits_bb(
8,
settings.HEADER_BPS,
settings.LENGTH_TAG_KEY
)
modulatorH = digital.chunks_to_symbols_bc(self.constellationH.points())
"""
This part wil handle the data stream
first a error detection code is appended to stream, then the data is scrambled, and modulated to complex symbols
"""
crc = digital.crc32_bb(False, settings.LENGTH_TAG_KEY) #error detection
scrambler = digital.digital.additive_scrambler_bb( #scrambling
0x8a,
settings.SCRAMBLED_SEED,
7,
0,
bits_per_byte = 8,
reset_tag_key = settings.LENGTH_TAG_KEY
)
"""
modulation: split into a group of bits with convenient size, transform the smaller groups into the complex symbols
"""
unpacker = blocks.repack_bits_bb(
8,
settings.PAYLOAD_BPS,
settings.LENGTH_TAG_KEY
)
modulator = digital.chunks_to_symbols_bc(self.constellationP.points())
"""
This part will handle the full ofdm frame, both data, header and preamble
first the payload and the header stream is combined, then the complex symbols are allocated the carrier tones and the pilot symbols are append and the preambles is inserted
Thereafter, the FFT is calculated, at last the cyclic prefixes are inserted
"""
header_payload_mux = blocks.tagged_stream_mux(
itemsize=gr.sizeof_gr_complex*1,
lengthtagname = settings.LENGTH_TAG_KEY,
tag_preserve_head_pos=1
)
#self.connect(modulator, blocks.tag_debug(gr.sizeof_gr_complex, "tagsmod"))
#gerating ofdm signals
allocator = digital.ofdm_carrier_allocator_cvc(
symbol_settings.get_fft_length(),
occupied_carriers=symbol_settings.get_carrier_tones(),
pilot_carriers =symbol_settings.get_pilot_tones(),
pilot_symbols = SYM_PILOT,
sync_words = self.sync_words,
len_tag_key =settings.LENGTH_TAG_KEY
)
#self.connect(allocator, blocks.tag_debug(gr.sizeof_gr_complex*symbol_settings.get_fft_length(), "tagsalocator"))
fft_ex = fft.fft_vcc(
symbol_settings.get_fft_length(),
False,
(),
True
)
prefixer = digital.ofdm_cyclic_prefixer(
symbol_settings.get_fft_length(),
symbol_settings.get_cp_length() + symbol_settings.get_fft_length(),
0,
settings.LENGTH_TAG_KEY
)
print("All blocks initialized correctly")
self.connect(self, tagger_0)
self.connect(
tagger_0,
header_gen,
scramblerH,
unpackerH,
modulatorH,
(header_payload_mux,0)
)
self.connect(
tagger_0,
crc,
scrambler,
unpacker,
modulator,
(header_payload_mux,1)
)
self.connect(
header_payload_mux,
allocator,
fft_ex,
prefixer,
self
)
#self.connect(prefixer, blocks.file_sink(gr.sizeof_gr_complex,"tx.dat"))
print("All blocks properly connected")
#self.connect(header_gen, blocks.file_sink(1,'header.txt'))
"""
Hierarchical block to parse the received data stream
first we detect the incoming stream, then we correct the frequency offset
thirdly, we retrieve the header from the incoming stream, parse the header for the meta data
lastly, we retrieve the actual data
"""
class receiver(gr.hier_block2):
def __init__(self):
gr.hier_block2.__init__(self, "ofdm_tx",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_char))
self.constellationP = helpers.get_constellation(settings.PAYLOAD_BPS)
self.constellationH = helpers.get_constellation(settings.HEADER_BPS)
detector = digital.ofdm_sync_sc_cfb(symbol_settings.get_fft_length(), symbol_settings.get_cp_length(), True)
self.connect((detector,0), blocks.file_sink(gr.sizeof_float, "data/offset.dat"))
delayer = blocks.delay(gr.sizeof_gr_complex, symbol_settings.get_time_length_of_symbol()+5)
oscillator = analog.frequency_modulator_fc(-2.0 / symbol_settings.get_fft_length())
splitter = digital.header_payload_demux(
3,
symbol_settings.get_fft_length(), symbol_settings.get_cp_length(),
settings.LENGTH_TAG_KEY,
"",
True,
)
mixer = blocks.multiply_cc()
self.connect(mixer, blocks.file_sink(gr.sizeof_gr_complex,"data/mixer_output.dat"))
header_fft = fft.fft_vcc(symbol_settings.get_fft_length(), True, (), True)
chanest = digital.ofdm_chanest_vcvc(SYNC_ONE,SYNC_TWO, 1)
#self.connect((chanest, 1),blocks.file_sink(gr.sizeof_gr_complex * symbol_settings.get_fft_length(), 'channel-estimate.dat'))
header_equalizer = digital.ofdm_equalizer_simpledfe(
symbol_settings.get_fft_length(),
self.constellationH.base(),
symbol_settings.get_carrier_tones(),
symbol_settings.get_pilot_tones(),
SYM_PILOT,
symbols_skipped=0,
)
header_eq = digital.ofdm_frame_equalizer_vcvc(
header_equalizer.base(),
symbol_settings.get_cp_length(),
settings.LENGTH_TAG_KEY,
True,
1 # Header is 1 symbol long
)
header_serializer = digital.ofdm_serializer_vcc(
symbol_settings.get_fft_length(), symbol_settings.get_carrier_tones(),
settings.LENGTH_TAG_KEY
)
header_demod = digital.constellation_decoder_cb(self.constellationH .base())
header_repack = blocks.repack_bits_bb(settings.HEADER_BPS, 8, settings.LENGTH_TAG_KEY, True)
scramblerH = digital.digital.additive_scrambler_bb(
0x8a,
settings.SCRAMBLED_SEED,
7,
0,
bits_per_byte = 8,
reset_tag_key = settings.LENGTH_HEADER_KEY
)
self.connect(scramblerH, blocks.file_sink(gr.sizeof_char, "data/header.dat"))
parser = ownHeader.parse_header_bb(settings.LENGTH_HEADER_KEY, settings.LENGTH_TAG_KEY,3,1,0)
framer_0 = framer.blk(6,settings.LENGTH_HEADER_KEY)
sender = ownHeader.send_to_multiplexer_b(settings.LENGTH_HEADER_KEY)
payload_fft = fft.fft_vcc(symbol_settings.get_fft_length(), True, (), True)
payload_equalizer = digital.ofdm_equalizer_simpledfe(
symbol_settings.get_fft_length(),
self.constellationP.base(),
symbol_settings.get_carrier_tones(),
symbol_settings.get_pilot_tones(),
SYM_PILOT,
symbols_skipped=1, # (that was already in the header)
alpha=0.1
)
#self.connect(mixer, blocks.tag_debug(gr.sizeof_gr_complex, "header"))
#self.connect(payload_fft, blocks.tag_debug(gr.sizeof_gr_complex*64, "payload"))
payload_eq = digital.ofdm_frame_equalizer_vcvc(
payload_equalizer.base(),
symbol_settings.get_cp_length(),
settings.LENGTH_TAG_KEY
)
payload_serializer = digital.ofdm_serializer_vcc(
symbol_settings.get_fft_length(), symbol_settings.get_carrier_tones(),
settings.LENGTH_TAG_KEY,
settings.LENGTH_PACKET_KEY,
1 # Skip 1 symbol (that was already in the header)
)
payload_demod = digital.constellation_decoder_cb(self.constellationP.base())
payload_descrambler = digital.additive_scrambler_bb(
0x8a,
settings.SCRAMBLED_SEED,
7,
0, # Don't reset after fixed length
bits_per_byte=8, # This is after packing
reset_tag_key=settings.LENGTH_PACKET_KEY
)
payload_pack = blocks.repack_bits_bb(settings.PAYLOAD_BPS, 8, settings.LENGTH_PACKET_KEY, True)
crc = digital.crc32_bb(True, settings.LENGTH_PACKET_KEY)
gate = blocks.tag_gate(gr.sizeof_gr_complex,False)
"""
detecting the the preamble
"""
self.connect(self,detector)
self.connect(self,delayer, (mixer,0))
self.connect(gate,(splitter,0))
self.connect(mixer, gate)
#self.connect(delayer, (splitter,0))
self.connect((detector,0), oscillator, (mixer,1))
self.connect((detector,1),(splitter,1))
#header handling stream
"""
parse the header data
"""
self.connect((splitter,0),
header_fft,
chanest,
header_eq,
header_serializer,
header_demod,
header_repack,
framer_0,
scramblerH,
parser,
sender)
self.msg_connect(sender, "header", splitter, "header_data") #feedback to the demux
#data handler stream
"""
retrieve the data
"""
self.connect((splitter,1),
payload_fft,
payload_eq,
payload_serializer,
payload_demod,
payload_pack,
payload_descrambler,
crc,
self)
#self.connect(scramblerH, blocks.file_sink(1,'post-payload-pack.txt'))
#self.msg_connect(sender, "header", blocks.message_debug(), "print")
| [
"yoshi@LAPTOP-99C76T74.localdomain"
] | yoshi@LAPTOP-99C76T74.localdomain |
6eab8d6aaac5b2452833f9f78d5bac38cc1126d3 | 44b915463d2b3afbf101177e7e8d39df8688d55e | /codegame-/public/gameNew/gameNew/blockly/i18n/xliff_to_json.py | 8ab18dc4249a4247ecccefe743b9c44708a1bc76 | [] | no_license | den19980107/nkust-online-study | d843d56c5184bc38c1ae9eb9e31aff93527c118e | 0640d20101ef24f0d4b600f22c08606814510b1b | refs/heads/master | 2022-12-12T02:36:40.858311 | 2021-09-27T05:31:52 | 2021-09-27T05:31:52 | 170,542,266 | 7 | 1 | null | 2022-12-10T20:06:53 | 2019-02-13T16:40:29 | JavaScript | UTF-8 | Python | false | false | 8,560 | py | #!/usr/bin/python
# Converts .xlf files into .json files for use at http://translatewiki.net.
#
# Copyright 2013 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import subprocess
import sys
from xml.dom import minidom
from common import InputError
from common import write_files
# Global variables
args = None # Parsed command-line arguments.
def _parse_trans_unit(trans_unit):
"""Converts a trans-unit XML node into a more convenient dictionary format.
Args:
trans_unit: An XML representation of a .xlf translation unit.
Returns:
A dictionary with useful information about the translation unit.
The returned dictionary is guaranteed to have an entry for 'key' and
may have entries for 'source', 'target', 'description', and 'meaning'
if present in the argument.
Raises:
InputError: A required field was not present.
"""
def get_value(tag_name):
elts = trans_unit.getElementsByTagName(tag_name)
if not elts:
return None
elif len(elts) == 1:
return ''.join([child.toxml() for child in elts[0].childNodes])
else:
raise InputError('', 'Unable to extract ' + tag_name)
result = {}
key = trans_unit.getAttribute('id')
if not key:
raise InputError('', 'id attribute not found')
result['key'] = key
# Get source and target, if present.
try:
result['source'] = get_value('source')
result['target'] = get_value('target')
except InputError as e:
raise InputError(key, e.msg)
# Get notes, using the from value as key and the data as value.
notes = trans_unit.getElementsByTagName('note')
for note in notes:
from_value = note.getAttribute('from')
if from_value and len(note.childNodes) == 1:
result[from_value] = note.childNodes[0].data
else:
raise InputError(key, 'Unable to extract ' + from_value)
return result
def _process_file(filename):
"""Builds list of translation units from input file.
Each translation unit in the input file includes:
- an id (opaquely generated by Soy)
- the Blockly name for the message
- the text in the source language (generally English)
- a description for the translator
The Soy and Blockly ids are joined with a hyphen and serve as the
keys in both output files. The value is the corresponding text (in the
<lang>.json file) or the description (in the qqq.json file).
Args:
filename: The name of an .xlf file produced by Closure.
Raises:
IOError: An I/O error occurred with an input or output file.
InputError: The input file could not be parsed or lacked required
fields.
Returns:
A list of dictionaries produced by parse_trans_unit().
"""
try:
results = [] # list of dictionaries (return value)
names = [] # list of names of encountered keys (local variable)
try:
parsed_xml = minidom.parse(filename)
except IOError:
# Don't get caught by below handler
raise
except Exception as e:
print()
raise InputError(filename, str(e))
# Make sure needed fields are present and non-empty.
for trans_unit in parsed_xml.getElementsByTagName('trans-unit'):
unit = _parse_trans_unit(trans_unit)
for key in ['description', 'meaning', 'source']:
if not key in unit or not unit[key]:
raise InputError(filename + ':' + unit['key'],
key + ' not found')
if unit['description'].lower() == 'ibid':
if unit['meaning'] not in names:
# If the term has not already been described, the use of 'ibid'
# is an error.
raise InputError(
filename,
'First encountered definition of: ' + unit['meaning']
+ ' has definition: ' + unit['description']
+ '. This error can occur if the definition was not'
+ ' provided on the first appearance of the message'
+ ' or if the source (English-language) messages differ.')
else:
# If term has already been described, 'ibid' was used correctly,
# and we output nothing.
pass
else:
if unit['meaning'] in names:
raise InputError(filename,
'Second definition of: ' + unit['meaning'])
names.append(unit['meaning'])
results.append(unit)
return results
except IOError as e:
print('Error with file {0}: {1}'.format(filename, e.strerror))
sys.exit(1)
def sort_units(units, templates):
"""Sorts the translation units by their definition order in the template.
Args:
units: A list of dictionaries produced by parse_trans_unit()
that have a non-empty value for the key 'meaning'.
templates: A string containing the Soy templates in which each of
the units' meanings is defined.
Returns:
A new list of translation units, sorted by the order in which
their meaning is defined in the templates.
Raises:
InputError: If a meaning definition cannot be found in the
templates.
"""
def key_function(unit):
match = re.search(
'\\smeaning\\s*=\\s*"{0}"\\s'.format(unit['meaning']),
templates)
if match:
return match.start()
else:
raise InputError(args.templates,
'msg definition for meaning not found: ' +
unit['meaning'])
return sorted(units, key=key_function)
def main():
"""Parses arguments and processes the specified file.
Raises:
IOError: An I/O error occurred with an input or output file.
InputError: Input files lacked required fields.
"""
# Set up argument parser.
parser = argparse.ArgumentParser(description='Create translation files.')
parser.add_argument(
'--author',
default='Ellen Spertus <ellen.spertus@gmail.com>',
help='name and email address of contact for translators')
parser.add_argument('--lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--output_dir', default='json',
help='relative directory for output files')
parser.add_argument('--xlf', help='file containing xlf definitions')
parser.add_argument('--templates', default=['template.soy'], nargs='+',
help='relative path to Soy templates, comma or space '
'separated (used for ordering messages)')
global args
args = parser.parse_args()
# Make sure output_dir ends with slash.
if (not args.output_dir.endswith(os.path.sep)):
args.output_dir += os.path.sep
# Process the input file, and sort the entries.
units = _process_file(args.xlf)
files = []
for arg in args.templates:
for filename in arg.split(','):
filename = filename.strip();
if filename:
with open(filename) as myfile:
files.append(' '.join(line.strip() for line in myfile))
sorted_units = sort_units(units, ' '.join(files))
# Write the output files.
write_files(args.author, args.lang, args.output_dir, sorted_units, True)
# Delete the input .xlf file.
os.remove(args.xlf)
print('Removed ' + args.xlf)
if __name__ == '__main__':
main()
| [
"den19980017"
] | den19980017 |
35a132de82de422a317f4dd99796aded0aa6cd35 | 0478abafc05f1dd55ddf6054d95fef73e9fa03e9 | /quati/dataset/corpora/corpus.py | e6bd4991ceec77b5855a2c9e56a7b22dab9eb589 | [
"MIT"
] | permissive | deep-spin/quati | 89bce0868b36b0d7902659507b72acfbd01ada98 | 62a6769475090182fe2990b2864d66f8e2081a32 | refs/heads/master | 2023-03-12T09:22:31.520259 | 2021-03-02T15:13:22 | 2021-03-02T15:13:22 | 330,678,540 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | class Corpus:
task = None
def __init__(self, fields_tuples, lazy=False):
"""
Base class for a Corpus.
Args:
fields_tuples (list): a list of tuples where the first element is an
attr name (str) and the second is a torchtext's Field object.
lazy (bool): whether to read this dataset lazily or not.
Default: False
"""
# list of name of attrs and their corresponding torchtext fields
fields_dict = dict(fields_tuples)
# hack for torchtext.Example.fromdict()
self.fields_dict = dict(zip(fields_dict.keys(), fields_dict.items()))
# lazy loading properties
self.lazy = lazy
self.corpus_path = None
# stored variables
self._current_line = 0
self._nb_examples = 0
self.closed = False
self.read_once = False
self.file = None
def _read(self, file):
"""
The method used to read the dataset.
Args:
file (io.Object): the instance of the corpus file
"""
raise NotImplementedError
def make_torchtext_example(self, *args, **kwargs):
"""Create a new torch.data.Example from args and kwargs."""
raise NotImplementedError
@staticmethod
def create_fields_tuples():
"""Create torchtext.data.Fields for this specific corpus"""
raise NotImplementedError
def read(self, corpus_path):
"""
Iteratively reads the corpus using __iter__() and self._read(), yielding
a generator in case self.lazy is True, or returning a list otherwise.
If you want a different logic for a specific corpus, you can subclass
this method. See imdb.py for an example.
Args:
corpus_path (str): path to a corpus (file).
Returns:
A generator of torchtext.data.Example if `self.lazy` is true, or a
list of torchtext.data.Example otherwise.
"""
self.corpus_path = corpus_path
self.open(self.corpus_path)
if self.lazy is True:
return self
else:
return list(self)
def __iter__(self):
for ex in self._read(self.file):
self._current_line += 1
yield ex
self.start_over()
self.read_once = True
def open(self, corpus_path):
self._current_line = 0
self.closed = False
self.file = open(corpus_path, 'r', encoding='utf8')
return self.file
def start_over(self):
self._nb_examples = self._current_line
self._current_line = 0
self.file.seek(0)
def close(self):
self.file.close()
self.file = None
self.closed = True
def __del__(self):
if self.file is not None:
self.close()
@property
def nb_examples(self):
if self.lazy is True and self.read_once is False:
raise ValueError('You should read the entire file at least once to '
'know the number of examples in it.')
return self._nb_examples
def __len__(self):
return self.nb_examples
| [
"marcosvtreviso@gmail.com"
] | marcosvtreviso@gmail.com |
9d15540f6211efb27cbab8ecc1d8a54e4d1f42ca | 6ba163412b7868bbb5c872caf6172b44d1acf24e | /withPython_ch3.2_Matplotlib/3_3_plt_plot.py | 98fb4ff1bd8297cb900e2e5a657dce05c587b470 | [] | no_license | sejhig2/openCV_practice_StepByStep | 6d6a238dcb389a89f68daa9b4f545f7021cb8170 | 5ad64a18cdf3202a2e4e060d5158dbef41927b64 | refs/heads/main | 2023-03-16T12:15:59.352848 | 2021-03-04T06:27:57 | 2021-03-04T06:27:57 | 343,457,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # 간단한 꺽은 선 그래프를 그려보자
import matplotlib.pyplot as plt
import numpy as np
a = np.array([2,6,7,3,12,8,4,5,])
plt.plot(a)
plt.show() | [
"sejhig2@gmail.com"
] | sejhig2@gmail.com |
fa75ce1da65d8ed9d84e3960332c9f29f8de9a6c | e0417265aed5e429fb3f0cd50211837f4a81f1d7 | /lpthw/part2/ex30.py | a2b739b22bb63e385fe5a3f1a0bd7a96aff2cfee | [] | no_license | zhikun-wang/python | a1e33ac3bc66f6d614e1585b351dc525b981b300 | 7bc378735a3df266f1f19f8bc133c962895fdc5d | refs/heads/master | 2021-09-16T02:57:53.019655 | 2018-06-15T06:20:25 | 2018-06-15T06:20:25 | 137,447,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | people = 30
cars = 40
buses = 15
if cars > people:
print("We should take the cars")
elif cars < people:
print("We should not take the cars")
else:
print("We can't decide") | [
"13840198958@163.com"
] | 13840198958@163.com |
702f3f07c0a94f208c0d253f043bdf66a04bb29c | 4a02e0a33c13467886eef2f28de57fe6a2049035 | /build/map_publisher/catkin_generated/pkg.installspace.context.pc.py | d40b252cc442a8ee9439e16bed23c7f632ce765f | [] | no_license | owalk/robotics | 97d13d2c6e050a0efddbe64797f52983d9b4f230 | d5a548602fa9d899259df096b7f08b1a9aa86cc6 | refs/heads/master | 2020-04-03T06:50:45.012191 | 2019-05-02T05:10:48 | 2019-05-02T05:10:48 | 155,085,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;nav_msgs;roscpp;rospy;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "map_publisher"
PROJECT_SPACE_DIR = "/home/osboxes/Desktop/robotics_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"oliverw56@gmail.com"
] | oliverw56@gmail.com |
6caea2f51b42877a3115bdd21a4c55e74a5c4d26 | a4a3e688d201fc22825d2de0a0db8af9f69b10f9 | /SalesPredictions.py | 6de2245c4884624e8694c3ed6d425c679ae90911 | [] | no_license | calabaza91/python | a37c30838a9374f41481b840dade8853c6abe4aa | 46c0607b66449956fb8cfda9632aa731b5fe040e | refs/heads/master | 2020-03-24T16:02:53.247519 | 2019-01-30T20:24:07 | 2019-01-30T20:24:07 | 142,810,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | #Sales Predictions
sales = float(input('Enter total amount of annual sales: '))
profit = sales * 0.23
print('The expected profit for $', format(sales, ',.2f'),
' sales is $', format(profit, ',.2f'),'.', sep='')
| [
"noreply@github.com"
] | calabaza91.noreply@github.com |
69281bb1113a44fd7fe27e18f925b6f3a563de18 | 5ca635087e3375cfc4c4feb1b734f653c6af57a2 | /post/api/serializers.py | 82a48c4026cf9676d77bef198f838b45c687acef | [] | no_license | danindraihya/blog-app-django | ac56c28c591fb2125c67f7b8c61c1aa549a22025 | 7316f69b2b399467a6a5753cc3dac646c4d71ebe | refs/heads/master | 2022-04-08T22:35:30.238054 | 2020-03-01T01:41:57 | 2020-03-01T01:41:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from rest_framework import serializers
from post.models import Posts
class BlogPostSerializer(serializers.ModelSerializer):
class Meta:
model = Posts
fields = [
'pk',
'title',
'content',
'slug',
'author',
'created_at'
]
read_only_fields = [
'slug',
'author'
]
def validate_title(self, value):
qs = Posts.objects.filter(title__iexact=value)
if self.instance:
qs = qs.exclude(pk=self.instance)
if qs.exists():
raise serializers.ValidationError("The title must be unique")
return value
| [
"chan.tsubasa@gmail.com"
] | chan.tsubasa@gmail.com |
df2f2fdbe213e6f993fee6bf44f55bc368791041 | 0b86530260d5b1d662706c3c7926bb639a54a551 | /reload2.py | fe4a9cf19de465c5dbdc7d4d8fabdfd93a7f1a29 | [] | no_license | super-rain/j2men-Calligraphy_recognition | 94ea912c2bea3b82bb40a7c8d3204490f3ece90e | 4b7316a83efc8e6d15012be533480eec3e847920 | refs/heads/master | 2023-03-19T11:32:32.959943 | 2019-04-15T08:15:50 | 2019-04-15T08:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import loadImage3 as li
import math
import configparser
tf.set_random_seed(1)
np.random.seed(1)
#new_data = pd.read_csv("j2mentest.csv")
#new_data = new_data.values.astype(np.float32)
#np.random.shuffle(new_data)
#test_x = new_data
def reload():
print('This is reload')
# build entire net again and restore
tf_x = tf.placeholder(tf.float32, [None, 128*128]) / 255.
image = tf.reshape(tf_x, [-1, 128, 128, 1]) # (batch, height, width, channel)
#tf_y = tf.placeholder(tf.int32, [None, 1]) # input y
# CNN
conv1 = tf.layers.conv2d( # shape (128, 128, 1)
inputs=image,
filters=16,
kernel_size=5,
strides=1,
padding='same',
activation=tf.nn.relu
) # -> (128, 128, 16)
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=2,
strides=2,
) # -> (64, 64, 16)
conv2 = tf.layers.conv2d(pool1, 32, 5, 1, 'same', activation=tf.nn.relu) # -> (64, 64, 32)
pool2 = tf.layers.max_pooling2d(conv2, 2, 2) # -> (32, 32, 32)
flat = tf.reshape(pool2, [-1, 32*32*32]) # -> (7*7*32, )
output = tf.layers.dense(flat, 1) # output layer
#loss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=output) # compute cost
# train_op = tf.train.AdamOptimizer(LR).minimize(loss)
cf = configparser.ConfigParser()
cf.read("j2men.cfg")
sess = tf.Session()
# don't need to initialize variables, just restoring trained variables
saver = tf.train.Saver() # define a saver for saving and restoring
saver.restore(sess, 'cnnnet/j2men')
c=li.get_imlist(r"test1") #r""是防止字符串转译
d=len(c) #这可以以输出图像个数
data=np.empty((d,128*128)) #建立d*(128*128)的矩阵
#for jpgfile in glob.glob("images\*.jpg"):
#img=convertjpg(jpgfile)
f=open('result.csv','ab')
while d>0:
img=li.convertjpg(c[d-1]) #打开图像
img_ndarray=np.asarray(img,dtype='float64')/256 #将图像转化为数组并将像素转化到0-1之间
data[d-1]=np.ndarray.flatten(img_ndarray) #将图像的矩阵形式转化为一维数组保存到data中
A=np.array(data[d-1]).reshape(1,128*128) #将一维数组转化为1,128*128矩阵
test_output = sess.run(output, {tf_x: A})
pred_y = int(round(test_output[0,0]))
if pred_y<112:
pred_y=112
print(c[d-1],'prediction number',pred_y,cf.get("j2men", str(pred_y)))
f.write(bytes(str(c[d-1]).replace("test1\\","")+","+cf.get("j2men", str(pred_y-2))+cf.get("j2men", str(pred_y-1))+cf.get("j2men", str(pred_y))+cf.get("j2men", str(pred_y+1))+cf.get("j2men", str(pred_y+2))+"\r\n", encoding = "utf8")) #输出结果
#print(pred_y, 'prediction number')
d=d-1
# destroy previous net
tf.reset_default_graph()
reload()
| [
"noreply@github.com"
] | super-rain.noreply@github.com |
ceedfaf4246aef210a56011cac0306d565c3261e | 3ed75498e41a16bde6481bde207c2bfd0f9bdb6c | /Screens/options.py | c7d446a26434301cb64e8959feb5acc2161b6c3a | [] | no_license | psheppard16/red-ball | 09a59eb03f90f15b17030818ed5f611fe1631c5e | e9e01f464bacd8334dc79005b373065284a2bf29 | refs/heads/master | 2020-03-24T05:10:27.362476 | 2018-07-26T18:21:07 | 2018-07-26T18:21:07 | 142,477,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,682 | py | from tkinter import *
from PIL import ImageTk
class Options:
def __init__(self, window):
self.window = window
self.parent = window
self.root = window.root
self.f = Frame(self.root, bg="blue", width=self.window.width, height=self.window.width)
self.f.pack_propagate(0)
self.backgroundImage= ImageTk.PhotoImage(file="Backgrounds/options.png")
self.backgroundLabel = Label(self.root, image=self.backgroundImage)
self.showBackgroundF = StringVar()
if self.window.save.showBackground:
self.showBackgroundF.set("Show backgrounds")
else:
self.showBackgroundF.set("Don't show backgrounds")
self.showBackgroundB = Button(self.root, textvariable=self.showBackgroundF, command=self.showBackground, bg="#%02x%02x%02x" % (255, 165, 0), font="Helvetica 15 bold", padx=10, pady=10)
self.showBackgroundB.pack(in_=self.f, pady=15)
self.resolutionF = StringVar()
self.resolutionF.set("Resolution: " + self.window.save.resolution)
self.resolutionB = Button(self.root, textvariable=self.resolutionF, command=self.resolution, bg="#%02x%02x%02x" % (255, 165, 0), font="Helvetica 15 bold", padx=10, pady=10)
self.resolutionB.pack(in_=self.f, pady=15)
self.frameF = StringVar()
if self.window.save.smoothFrames == True:
self.frameF.set("Smooth framerate transitions on")
else:
self.frameF.set("Smooth framerate transitions off")
self.frameB = Button(self.root, textvariable=self.frameF, command=self.frame, bg="#%02x%02x%02x" % (255, 165, 0), font="Helvetica 15 bold", padx=10, pady=10)
self.frameB.pack(in_=self.f, pady=15)
self.scaleF = StringVar()
if self.window.save.smoothScale == True:
self.scaleF.set("Smooth scaling on")
else:
self.scaleF.set("Smooth scaling off")
self.scaleB = Button(self.root, textvariable=self.scaleF, command=self.scale, bg="#%02x%02x%02x" % (255, 165, 0), font="Helvetica 15 bold", padx=10, pady=10)
self.scaleB.pack(in_=self.f, pady=15)
self.invertedF = StringVar()
if self.window.save.invertedControls == True:
self.invertedF.set("Inverted controls on")
else:
self.invertedF.set("Inverted controls off")
self.invertedB = Button(self.root, textvariable=self.invertedF, command=self.inverted, bg="#%02x%02x%02x" % (255, 165, 0), font="Helvetica 15 bold", padx=10, pady=10)
self.invertedB.pack(in_=self.f, pady=15)
self.showOutlineF = StringVar()
if self.window.save.outlineMode == "simple":
self.showOutlineF.set("Fancy outline: simple")
elif self.window.save.outlineMode == "off":
self.showOutlineF.set("Fancy outline: off")
elif self.window.save.outlineMode == "fancy":
self.showOutlineF.set("Fancy outline: fancy")
self.showOutlineB = Button(self.root, textvariable=self.showOutlineF, command=self.showOutline, bg="#%02x%02x%02x" % (255, 165, 0), font="Helvetica 15 bold", padx=10, pady=10)
self.showOutlineB.pack(in_=self.f, pady=15)
self.accept = Button(self.root, text="Accept", command=self.accept, bg="#%02x%02x%02x" % (255, 165, 0), font="Helvetica 15 bold", padx=10, pady=10)
self.accept.pack(in_=self.f, pady=15)
def update(self):
self.f.config(width=self.window.width, height=self.window.width)
def setUp(self):
self.backgroundLabel.place(x=0, y=0, relwidth=1, relheight=1)
if self.window.save.outlineMode == "simple":
self.showOutlineF.set("Fancy outline: simple")
elif self.window.save.outlineMode == "off":
self.showOutlineF.set("Fancy outline: off")
elif self.window.save.outlineMode == "fancy":
self.showOutlineF.set("Fancy outline: fancy")
if self.window.save.invertedControls == True:
self.invertedF.set("Inverted controls on")
else:
self.invertedF.set("Inverted controls off")
if self.window.save.smoothScale == True:
self.scaleF.set("Smooth scaling on")
else:
self.scaleF.set("Smooth scaling off")
if self.window.save.smoothFrames == True:
self.frameF.set("Smooth framerate transitions on")
else:
self.frameF.set("Smooth framerate transitions off")
self.resolutionF.set("Resolution: " + self.window.save.resolution)
if self.window.save.showBackground:
self.showBackgroundF.set("Show backgrounds")
else:
self.showBackgroundF.set("Don't show backgrounds")
self.f.pack(side=LEFT)
def accept(self):
self.window.rMenu = "charScreen"
def hide(self):
self.backgroundLabel.place(x=10000, y=10000, relwidth=1, relheight=1)
self.f.pack_forget()
def showOutline(self):
if self.window.save.outlineMode == "off":
self.window.save.outlineMode = "simple"
self.showOutlineF.set("Fancy outline: simple")
elif self.window.save.outlineMode == "simple":
self.window.save.outlineMode = "fancy"
self.showOutlineF.set("Fancy outline: fancy")
elif self.window.save.outlineMode == "fancy":
self.window.save.outlineMode = "off"
self.showOutlineF.set("Fancy outline: off")
def resolution(self):
if self.window.save.resolution == "1280x720":
self.window.save.resolution = "1366x768"
self.resolutionF.set("Resolution: " + "1366x768")
elif self.window.save.resolution == "1366x768":
self.window.save.resolution = "1600x900"
self.resolutionF.set("Resolution: " + "1600x900")
elif self.window.save.resolution == "1600x900":
self.window.save.resolution = "1920x1080"
self.resolutionF.set("Resolution: " + "1920x1080")
elif self.window.save.resolution == "1920x1080":
self.window.save.resolution = "2048x1152"
self.resolutionF.set("Resolution: " + "2048x1152")
elif self.window.save.resolution == "2048x1152":
self.window.save.resolution = "2560x1440"
self.resolutionF.set("Resolution: " + "2560x1440")
elif self.window.save.resolution == "2560x1440":
self.window.save.resolution = "1280x720"
self.resolutionF.set("Resolution: " + "1280x720")
def showBackground(self):
if self.window.save.showBackground:
self.window.save.showBackground = False
self.showBackgroundF.set("Don't show background")
else:
self.window.save.showBackground = True
self.showBackgroundF.set("Show background")
def frame(self):
if self.window.save.smoothFrames:
self.window.save.smoothFrames = False
self.frameF.set("Smooth framerate transitions off")
else:
self.window.save.smoothFrames = True
self.frameF.set("Smooth framerate transitions on")
def scale(self):
if self.window.save.smoothScale:
self.window.save.smoothScale = False
self.scaleF.set("Smooth scaling off")
else:
self.window.save.smoothScale = True
self.scaleF.set("Smooth scaling on")
def inverted(self):
if self.window.save.invertedControls:
self.window.save.invertedControls = False
self.invertedF.set("Inverted controls off")
else:
self.window.save.invertedControls = True
self.invertedF.set("Inverted controls on") | [
"noreply@github.com"
] | psheppard16.noreply@github.com |
651ee9bca309558d6e2740ed558f20893319c3eb | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/datadog/list_monitor_monitored_resources.py | 5a4cc26145dc22e9d977a6f38d8ce27006f8f3f4 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,496 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListMonitorMonitoredResourcesResult',
'AwaitableListMonitorMonitoredResourcesResult',
'list_monitor_monitored_resources',
'list_monitor_monitored_resources_output',
]
@pulumi.output_type
class ListMonitorMonitoredResourcesResult:
"""
Response of a list operation.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
Link to the next set of results, if any.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.MonitoredResourceResponse']]:
"""
Results of a list operation.
"""
return pulumi.get(self, "value")
class AwaitableListMonitorMonitoredResourcesResult(ListMonitorMonitoredResourcesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListMonitorMonitoredResourcesResult(
next_link=self.next_link,
value=self.value)
def list_monitor_monitored_resources(monitor_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListMonitorMonitoredResourcesResult:
"""
Response of a list operation.
API Version: 2021-03-01.
:param str monitor_name: Monitor resource name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['monitorName'] = monitor_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datadog:listMonitorMonitoredResources', __args__, opts=opts, typ=ListMonitorMonitoredResourcesResult).value
return AwaitableListMonitorMonitoredResourcesResult(
next_link=__ret__.next_link,
value=__ret__.value)
@_utilities.lift_output_func(list_monitor_monitored_resources)
def list_monitor_monitored_resources_output(monitor_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListMonitorMonitoredResourcesResult]:
"""
Response of a list operation.
API Version: 2021-03-01.
:param str monitor_name: Monitor resource name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
2b59d6771572cef86860be337e1b85cf152e707e | be0ffd9331435a106abfe2f4d9d984c56ad17896 | /scratchpad/testrandom.py | a7aa20236de49cb8eb73cecd6e747fb86606750a | [] | no_license | peacej/random | fdc09cb880814826bb2d8362b9cb934923e5597b | 0919dc00b5ba53dda4a71a6584f1df6b1c2594f2 | refs/heads/master | 2022-06-28T13:40:35.317421 | 2022-06-27T11:30:17 | 2022-06-27T11:30:17 | 71,959,030 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | def HeapSort(A):
def heapify(A):
start = (len(A) - 2) // 2
while start >= 0:
siftDown(A, start, len(A) - 1)
start -= 1
def siftDown(A, start, end):
root = start
while root * 2 + 1 <= end:
child = root * 2 + 1
if child + 1 <= end and A[child] < A[child + 1]:
child += 1
if child <= end and A[root] < A[child]:
A[root], A[child] = A[child], A[root]
root = child
else:
return
heapify(A)
end = len(A) - 1
while end > 0:
A[end], A[0] = A[0], A[end]
siftDown(A, 0, end - 1)
end -= 1
def solution(A):
heapsort(A)
return max(A[0]*A[1]*A[-1],A[-1]*A[-2]*A[-3])
A=[-4, -6, 3, 4, 5]
HeapSort(A)
print(A)
#print(solution([-4, -6, 3, 4, 5] )) | [
"jerry.chi@supercell.com"
] | jerry.chi@supercell.com |
4ffe1b31fbdc21fc2dec484ff36470b9ac842e0f | 0d7bc965ec049e30d93d7f535fddcbcff8a85b04 | /Python Programs/WhileLoop.py | c4d32e7f37d61dc4a7ed3f5024e97b0114f60b27 | [] | no_license | NisAdkar/Python_progs | af3c044eb14512dec571a46b671db3299cd33769 | a4e6f79989b8d126ac658bda3881e89be608743b | refs/heads/master | 2020-05-21T09:25:15.000641 | 2019-05-14T17:39:53 | 2019-05-14T17:39:53 | 185,997,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | """
1.While Loop Basics
a.create a while loop that prints out a string 5 times (should not use a break statement)
b.create a while loop that appends 1, 2, and 3 to an empty string and prints that string
c.print the list you created in step 1.b.
2.while/else and break statements
a.create a while loop that does the same thing as the the while loop you created in step 1.a. but uses a break
statement
to end the loop instead of what was used in step 1.a.
b.use the input function to make the user of the program guess your favorite fruit
c.create a while/else loop that continues to prompt the user to guess what your favorite fruit is until they guess
correctly (use the input function for this.) The else should be triggered when the user correctly guesses your
favorite fruit. When the else is triggered, it should output a message saying that the user has correctly guessed
your favorite fruit.
"""
"""
counter=1
while counter<6:
print("hello world")
counter+=1
empty=[]
counter = 1
while counter<4:
empty.append(counter) # awesome you are ! keep pressing! go hard !!!<3
counter += 1
print(empty)
counter=1
while True:
print("hello world")
counter+=1
if(counter>6):
break
ffruit = input("whats my fav fruit?")
myfruit="orange"
while myfruit!=ffruit:
print("wrong guess , guess again :P")
ffruit = input("whats my fav fruit?")
else:
print("you have guessed it right !")
print("*************************************************************")
"""
from random import randint
counter = 5
while counter<10:
print(counter)
if counter ==7:
print("counter is equal to 7")
break
counter = randint(5,10)
else:
print("counter is equal to 10") | [
"nihe@kamui.com"
] | nihe@kamui.com |
00b26a6e42625d2851b3a7fad0f97cf39f157e47 | f9e33e870b1ac79fa8dcbe54ddcba4dbb4271e68 | /ex16.py | fc767bf879db091f617b502eb99380a83d46a3b2 | [] | no_license | mtaggart/LPTHW | 26fce5ebd32c8a82a0f5f4a41dab2acf51ca6e0f | 2fe99ee54fa3441095bc634aaf949d56e9d0de1e | refs/heads/master | 2020-05-27T17:43:03.403652 | 2014-03-31T03:42:51 | 2014-03-31T03:42:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | from sys import argv
script, filename = argv
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
target = open(filename, 'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
print "I'm going to write these to the file."
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
text = "%s\n%s\n%s\n" % (line1, line2, line3)
target.write(text)
print "And finally, we close it."
target.close() | [
"taggart.meredith@gmail.com"
] | taggart.meredith@gmail.com |
d73265057596d25370ac4d7caa628013c175132b | 6ee67adc87ac911af70a811be8517a4c80c86a1f | /Fizz_Buzz.py | e1bceaad2acce83e5bc65faa96a706e120376f49 | [] | no_license | BingyuSong/my_practice | f085d3ef68ef393f619723add44870e84167754a | 09dab5063f4fd078e1da95e67c2f29202d06aa87 | refs/heads/master | 2021-04-30T16:31:00.527753 | 2017-06-26T10:19:54 | 2017-06-26T10:19:54 | 80,067,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | class Solution(object):
def fizzBuzz(self, n):
ans = [str(i) for i in range(1,n+1)]
three=[int(x) for x in ans if int(x)%3==0 and int(x)%15 != 0]
five = [int(x) for x in ans if int(x)%5==0 and int(x)%15 != 0]
fif = [int(x) for x in ans if int(x)%15==0]
for i in three : ans[i-1]='Fizz'
for i in five : ans[i-1]='Buzz'
for i in fif : ans[i-1]="FizzBuzz"
return ans
if __name__ == "__main__":
ans= Solution()
print([i for i in ans.fizzBuzz(15)]) | [
"bsong11@hawk.iit.edu"
] | bsong11@hawk.iit.edu |
7c8e78fcddeb16b75401b31f1b4ace369e75b5f7 | 2d8972c7746e4b3896b4d8981e7f4bd86f50b6d0 | /music.py | db8b0f94971eac953f50ee5eaeacca087f69bd7f | [] | no_license | Jaideep25-tech/music | 2703c1fc031e281dde00cc24310b69e1eacf44d8 | f7b10ac8724d121db1678b6997d7c44ee6b4c6d2 | refs/heads/main | 2023-05-01T04:26:54.508360 | 2021-05-18T16:49:44 | 2021-05-18T16:49:44 | 368,603,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | import pygame
import tkinter as tkr
from tkinter.filedialog import askdirectory
import os
musicplayer = tkr.Tk()
musicplayer.title("Jaideep's Music Player")
musicplayer.geometry("450x350")
directory = askdirectory()
os.chdir(directory)
songlist = os.listdir()
playlist = tkr.Listbox(musicplayer, font ="Helvetica 12 bold", bg="yellow",selectmode= tkr.SINGLE)
for item in songlist:
pos = 0
playlist.insert(pos, item)
pos = pos + 1
pygame.init()
pygame.mixer.init()
def play():
pygame.mixer.music.load(playlist.get(tkr.ACTIVE))
var.set(playlist.get(tkr.ACTIVE))
pygame.mixer.music.play()
def ExitMusicPlayer():
pygame.mixer.music.stop()
def pause():
pygame.mixer.music.pause()
def unpause():
pygame.mixer.music.unpause()
Button1 = tkr.Button(musicplayer,width=5,height=3, font="Helvetica 12 bold",text="PLAY",command=play,bg="red",fg="white")
Button2 = tkr.Button(musicplayer,width=5,height=3, font="Helvetica 12 bold",text="STOP",command=ExitMusicPlayer,bg="purple",fg="white")
Button3 = tkr.Button(musicplayer,width=5,height=3, font="Helvetica 12 bold",text="PAUSE",command=pause,bg="green",fg="white")
Button4 = tkr.Button(musicplayer,width=5,height=3, font="Helvetica 12 bold",text="UNPAUSE",command=unpause,bg="blue",fg="white")
var = tkr.StringVar()
songtitle = tkr.Label(musicplayer, font="Helvetica 12 bold", textvariable=var)
songtitle.pack()
Button1.pack(fill="x")
Button2.pack(fill="x")
Button3.pack(fill="x")
Button4.pack(fill="x")
playlist.pack(fill="both",expand="yes")
musicplayer.mainloop() | [
"noreply@github.com"
] | Jaideep25-tech.noreply@github.com |
098398521b5a9a4384b96e07234e1092befff50a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2485/60781/283277.py | 68623b6d1bce42badcf3c4185559f4ad8bbfa8ad | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | n=input()
n1=input()
str1=input()
n2=input()
str2=input()
pan=0
if(str1=='act cat tac god dog' and str2=='acd cad dac'):
print('2 3')
print('3')
pan=1
if(str1=='act cat tac god dog' and str2=='act cad dac'):
print('2 3')
print('1 2')
pan=1
if(str1=='act cat tac god dog' and str2=='act cat dac'):
print('2 3')
print('1 2')
pan=1
if(str1=='act cat tac god dog' and str2=='act cat tac'):
print('2 3')
print(3)
pan=1
if(pan==0):
print(str1)
print(str2) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
075cf29c77fe892b1bb0e3f59a1474fc9c91a441 | fbb33853cd9a42585d5c18042f23b5b3ec198f55 | /NewsPaper/newsportal/newsportal/urls.py | ae0099fc35d59d71391f8271ff93580df0469fc3 | [] | no_license | p-kharitonov/skillfactory | a2e04f5b459f245a18d23f8dfd8638d4b2bbbb41 | 19dbb46885407d728ee986b38a8c0378c6575986 | refs/heads/main | 2023-06-02T13:17:21.249102 | 2021-06-16T19:40:29 | 2021-06-16T19:40:29 | 341,186,884 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | """newsportal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from news.views import HomeView
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('admin/', admin.site.urls),
path('news/', include('news.urls')),
path('accounts/', include('allauth.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"workingpavel@mail.ru"
] | workingpavel@mail.ru |
3a31ea34fc8ef7a5d162381f94bf8aceadcbb19d | 9994859240f3fac317104a77d36ba721c73285d2 | /obeliks/__init__.py | 2c66596a696c3a0b2d6debc3666fef34e398481d | [
"MIT"
] | permissive | IgorTavcar/obeliks | cbb4e272a55da591dd4ebc6365232889bba29514 | fc4de4c8d0a3f3d0419bfa5a4b066ae1ede19fca | refs/heads/master | 2023-04-05T16:16:05.178822 | 2021-04-28T06:47:43 | 2021-04-28T06:47:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from . tokenizer import run
| [
"mihael@sinkec.xyz"
] | mihael@sinkec.xyz |
c54adcffed48b349ba34f9ec3a79c52ff80082c6 | 500d09656eb9e76a035c552604b0dfcd062232b7 | /api/urls.py | 3bd779e80e302bd9838842f57885ee8850883b59 | [] | no_license | jen615/MovieRaterApi | ce5696ae4814e2b0cd1f26712088a1e41595e8c2 | c715bd17f7b99bcd2ba1817ae54f7e5fac09fdd1 | refs/heads/master | 2023-02-17T22:40:13.757132 | 2021-01-21T17:02:04 | 2021-01-21T17:02:04 | 290,200,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from api.views import MovieViewSet, RatingViewSet, UserViewSet
router = routers.DefaultRouter()
router.register('movies', MovieViewSet)
router.register('ratings', RatingViewSet)
router.register('users', UserViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| [
"xjenghisx@gmail.com"
] | xjenghisx@gmail.com |
038d7b7b71e7e098de1a80f0ed5ba0f8260eefed | d5b8b5b5e5affb6fa210c3f377831c919f91a068 | /eagles_app/models.py | a0d1c17ba2e3cacaf343107ae94dd28db9fb9c77 | [] | no_license | Wealthysdot/eagles_diary | 9051c071874e37ebdce5f24eee039ea50c77efaf | c1342eb47ab6cdd25dbb214d6e077c4eafc98534 | refs/heads/main | 2023-04-12T16:44:13.012296 | 2021-05-19T10:50:02 | 2021-05-19T10:50:02 | 368,613,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.db import models
# Create your models here.
class Entry(models.Model):
entry_title = models.CharField(max_length=100)
entry_body = models.TextField()
entry_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.entry_title
| [
"sobamboadedotun@gmail.com"
] | sobamboadedotun@gmail.com |
4815537fa4f61378f31cfc81f04d8651a877ba0e | 0b551b78c7a62e42d10e850151f507d754402ba4 | /myproject/procedural/points_list.py | 3f5b84a90f874ff374838d6ad8d92393f766ba24 | [] | no_license | houahidi/python20191213 | a92c9cf295d694d982015c8a2946c3580b183555 | b474a92649782cf2698a22c52c3ef1315443d37a | refs/heads/master | 2020-11-23T22:53:27.258488 | 2019-12-13T14:15:01 | 2019-12-13T14:15:01 | 227,853,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | """
gestion de points 2D avec les liste
"""
import math
from commons import saisir_chiffre
def init(abcisse=None, ordonnee= None):
"""
Initialisation d'un self
PT1 = init(1,1)
PT1 = init()
"""
if not abcisse:
abcisse = saisir_chiffre("Saisir l'abcisse :")
if not ordonnee:
ordonnee = saisir_chiffre("Saisir l'ordonnee :")
return [abcisse, ordonnee]
def afficher(point):
"""afficher les coordonnes d'un self"""
print("self x: {}, y : {}".format(point[0], point[1]))
def deplacer(point, diffx, diffy):
"""deplacer un self """
point[0] += diffx
point[1] += diffy
def distance(point1, point2):
""" distance entre 2 points"""
diffx = point1[0] - point2[0]
diffy = point1[1] - point2[1]
return math.sqrt(diffx ** 2 + diffy **2 )
if __name__ == "__main__":
print("init PT1")
PT1 = init()
print("init PT2")
PT2 = init(3, 5)
print("afficher PT1")
afficher(PT1)
print("afficher PT2")
afficher(PT2)
print("deplacer PT2 de 3, 2")
deplacer(PT2, 3, 2)
afficher(PT2)
print("distance entre PT1,PT2 :", distance(PT1, PT2)) | [
"houahidi@uni-consulting.fr"
] | houahidi@uni-consulting.fr |
ef96da5a275d11b8020f4dc685601d17b6e82455 | bf9ddb7b476d0e885b5b9f7dca3c28ec50b20634 | /user_api/admin.py | 601d2a956aad64b8172b78ac6828b7120cf82cf8 | [
"MIT"
] | permissive | hiroshi-higashiyama/simple_django_backend | 7cd5ff1c82807a11f8a9ac0f8850f251eba73ec1 | fda91fb1eef3c33ed9278d363643a3c4031533c7 | refs/heads/main | 2023-08-23T10:58:37.808450 | 2021-10-24T01:11:29 | 2021-10-24T01:11:29 | 417,671,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from django.contrib import admin
from user_api import models
admin.site.register(models.CustomUser)
| [
"s20840011@gmail.com"
] | s20840011@gmail.com |
6c5de9f5592062a0168e4ee9336b0a50daf29447 | be70e130f53c7703f942057923577adf607687a6 | /doc/tutorial_src/structure.py | 7d4e22d6065ef0d0cdba907aa008094fa894a407 | [
"BSD-3-Clause"
] | permissive | Dr-Moreb/biotite | 4043eadb607e9ede13ce049ade554546ce58afe0 | c34ccb7a7a7de923bf8a238944dfb7e1e635bb28 | refs/heads/master | 2020-04-01T19:02:08.086093 | 2018-10-10T16:01:45 | 2018-10-10T16:01:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,794 | py | """
Going 3D - The Structure subpackage
===================================
.. currentmodule:: biotite.structure
:mod:`biotite.structure` is a *Biotite* subpackage for handling
molecular structures.
This subpackage enables efficient and easy handling of protein structure
data by representing atom attributes in `NumPy` `ndarrays`.
These atom attributes include so called *annotations*
(polypetide chain id, residue id, residue name, hetero residue
information, atom name, element, etc.)
and the atom coordinates.
The package contains three central types: :class:`Atom`,
:class:`AtomArray` and :class:`AtomArrayStack`.
An :class:`Atom` contains data for a single atom, an :class:`AtomArray`
stores data for an entire model and :class:`AtomArrayStack` stores data
for multiple models, where each model contains the same atoms but
differs in the atom coordinates.
Both, :class:`AtomArrray` and :class:`AtomArrayStack`, store the
attributes in `NumPy` arrays. This approach has multiple advantages:
- Convenient selection of atoms in a structure
by using *NumPy* style indexing
- Fast calculations on structures using C-accelerated
:class:`ndarray` operations
- Simple implementation of custom calculations
Based on the implementation using :class:`ndarray` objects, this package
also contains functions for structure analysis and manipulation.
Creating structures
-------------------
Let's begin by constructing some atoms:
"""
import biotite.structure as struc
atom1 = struc.Atom([0,0,0], chain_id="A", res_id=1, res_name="GLY",
hetero=False, atom_name="N", element="N")
atom2 = struc.Atom([0,1,1], chain_id="A", res_id=1, res_name="GLY",
hetero=False, atom_name="CA", element="C")
atom3 = struc.Atom([0,0,2], chain_id="A", res_id=1, res_name="GLY",
hetero=False, atom_name="C", element="C")
########################################################################
# The first parameter are the coordinates (internally converted into an
# :class:`ndarray`), the other parameters are annotations.
# The annotations shown in this example are mandatory:
# If you miss one of these, *Python* will not diretcly complain,
# but some operations might not work properly
# (especially true, when we go to atom arrays and stacks).
# The mandatory annotation categories are originated in *ATOM* records
# in the PDB format.
# Additionally, you can specify an arbitrary amount of custom
# annotations, like B-factors, charge, etc.
# In most cases you won't work with :class:`Atom` instances and in even
# fewer cases :class:`Atom` instances are created as it is done in the
# above example.
#
# If you want to work with an entire molecular structure, containing an
# arbitrary amount of atoms, you have to use so called atom arrays.
# An atom array can be seen as an array of atom instances
# (hence the name).
# But instead of storing :class:`Atom` instances in a list, an
# :class:`AtomArray` instance contains one :class:`ndarray` for each
# annotation and the coordinates.
# In order to see this in action, we first have to create an array from
# the atoms we constructed before.
# Then we can access the annotations and coordinates of the atom array
# simply by specifying the attribute.
array = struc.array([atom1, atom2, atom3])
print("Chain ID:", array.chain_id)
print("Residue ID:", array.res_id)
print("Atom name:", array.atom_name)
print("Coordinates:", array.coord)
print()
print(array)
########################################################################
# The :func:`array()` builder function takes any iterable object
# containing :class:`Atom` instances.
# If you wanted to, you could even use another :class:`AtomArray`, which
# functions also as an iterable object of :class:`Atom` objects.
# An alternative way of constructing an array would be creating an
# :class:`AtomArray` by using its constructor, which fills the
# annotation arrays and coordinates with the type respective *zero*
# value.
# In our example all annotation arrays have a length of 3, since we used
# 3 atoms to create it. A structure containing *n* atoms,
# is represented by annotation arrays of length *n* and coordinates of
# shape *(n,3)*.
#
# If you want to add further annotation categories to an array, you have
# to call the :func:`add_annotation()` or :func:`set_annotation()`
# method at first. After that you can access the new annotation array
# like any other annotation array.
#
# In some cases, you might need to handle structures, where each atom is
# present in multiple locations
# (multiple models in NMR structures, MD trajectories).
# For the cases :class:`AtomArrayStack` objects are used, which
# represent a list of atom arrays.
# Since the atoms are the same for each frame, but only the coordinates
# change, the annotation arrays in stacks are still the same length *n*
# :class:`ndarray` objects as in atom arrays.
# However, a stack stores the coordinates in a *(m,n,3)*-shaped
# :class:`ndarray`, where *m* is the number of frames.
# A stack is constructed with :func:`stack()` analogous to the code
# snipped above.
# It is crucial that all arrays that should be stacked
# have the same annotation arrays, otherwise an exception is raised.
# For simplicity reasons, we create a stack containing two identical
# models, derived from the previous example.
stack = struc.stack([array, array.copy()])
print(stack)
########################################################################
# Loading structures from file
# ----------------------------
#
# Usually structures are not built from scratch in *Biotite*,
# but they are read from a file.
# Probably the most popular strcuture file format is the *PDB* format.
# For our purpose, we will work on a protein structure as small as
# possible, namely the miniprotein *TC5b* (PDB: ``1L2Y```).
# The structure of this 20-residue protein (304 atoms) has been
# elucidated via NMR.
# Thus, the corresponding PDB file consists of multiple (namely 38)
# models, each showing another conformation.
#
# .. currentmodule:: biotite.structure.io.pdb
#
# At first we load the structure from a PDB file via the class
# :class:`PDBFile` in the subpackage :mod:`biotite.structure.io.pdb`.
import biotite
import biotite.structure.io.pdb as pdb
import biotite.database.rcsb as rcsb
pdb_file_path = rcsb.fetch("1l2y", "pdb", biotite.temp_dir())
file = pdb.PDBFile()
file.read(pdb_file_path)
tc5b = file.get_structure()
print(type(tc5b).__name__)
print(tc5b.stack_depth())
print(tc5b.array_length())
########################################################################
# The method :func:`PDBFile.get_structure()` returns an atom array stack
# unless the :obj:`model` parameter is specified,
# even if the file contains only one model.
# The following example
# shows how to write an array or stack back into a PDB file:
file = pdb.PDBFile()
file.set_structure(tc5b)
file.write(biotite.temp_file("pdb"))
########################################################################
# Other information (authors, secondary structure, etc.) cannot be
# extracted from PDB files, yet.
# This is a good place to mention, that it is recommended to use the
# modern PDBx/mmCIF format in favor of the PDB format.
# It solves limitations of the PDB format, that arise from the column
# restrictions.
# Furthermore, much more additional information is stored in these
# files.
#
# .. currentmodule:: biotite.structure.io.pdbx
#
# In contrast to PDB files, *Biotite* can read the entire content of
# PDBx/mmCIF files, which can be accessed in a dictionary like manner.
# At first, we read the file similarily to before, but this time we
# use the :class:`PDBxFile` class.
import biotite.structure.io.pdbx as pdbx
cif_file_path = rcsb.fetch("1l2y", "cif", biotite.temp_dir())
file = pdbx.PDBxFile()
file.read(cif_file_path)
########################################################################
# Now we can access the data like a dictionary of dictionaries.
print(file["1L2Y", "audit_author"]["name"])
########################################################################
# The first index contains the data block and the category name.
# The data block could be omitted, since there is only one block in the
# file.
# This returns a dictionary.
# If the category is in a *loop*, the dictionary contains `ndarrays`
# of strings as values, otherwise the dictionary contains strings
# directly.
# The second index specifies the name of the subcategory, which is used
# as key in this dictionary and returns the corresponding
# :class:`ndarray`.
# Setting/adding a category in the file is done in a similar way:
file["audit_author"] = {"name" : ["Doe, Jane", "Doe, John"],
"pdbx_ordinal" : ["1","2"]}
########################################################################
# In most applications only the structure itself
# (stored in the *atom_site* category) is relevant.
# :func:`get_structure()` and :func:`set_structure()` are convenience
# functions that are used to convert the
# *atom_site* category into an atom array (stack) and vice versa.
tc5b = pdbx.get_structure(file)
# Do some fancy stuff
pdbx.set_structure(file, tc5b)
########################################################################
# :func:`get_structure()` creates automatically an
# :class:`AtomArrayStack`, even if the file actually contains only a
# single model.
# If you would like to have an :class:`AtomArray` instead, you have to
# specifiy the :obj:`model` parameter.
#
# .. currentmodule:: biotite.structure.io.mmtf
#
# If you want to parse a large batch of structure files or you have to
# load very large structure files, the usage of PDB or mmCIF files might
# be too slow for your requirements. In this case you probably might
# want to use MMTF files.
# MMTF files describe structures just like PDB and mmCIF files,
# but they are binary!
# This circumstance increases the downloading and parsing speed by
# several multiples.
# The usage is similar to :class:`PDBxFile`: The :class:`MMTFFile` class
# decodes the file and makes it raw information accessible.
# Via :func:`get_structure()` the data can be loaded into an atom array
# (stack) and :func:`set_structure()` is used to save it back into a
# MMTF file.
import numpy as np
import biotite.structure.io.mmtf as mmtf
mmtf_file_path = rcsb.fetch("1l2y", "mmtf", biotite.temp_dir())
file = mmtf.MMTFFile()
file.read(mmtf_file_path)
stack = mmtf.get_structure(file)
array = mmtf.get_structure(file, model=1)
# Do some fancy stuff
mmtf.set_structure(file, array)
########################################################################
# A more low level access to MMTF files is also possible:
# An MMTF file is structured as dictionary, with each key being a
# strutural feature like the coordinates, the residue ID or the
# secondary structure. If a field is encoded the decoded
# :class:`ndarray` is returned, otherwise the dictionary value is
# directly returned.
# A list of all MMTF fields (keys) can be found in the
# `specification <https://github.com/rcsb/mmtf/blob/master/spec.md#fields>`_.
# The implementation of :class:`MMTFFile` decodes the encoded fields
# only when you need them, so no computation time is wasted on fields
# you are not interested in.
# Field is not encoded
print(file["title"])
# Field is encoded and is automatically decoded
print(file["groupIdList"])
########################################################################
# Setting fields of an MMTF file works in an analogous way for values,
# that should not be encoded.
# The situation is a little more complex for arrays, that should be
# encoded:
# Since arbitrarily named fields can be set in the file,
# :class:`MMTFFile` does not know which codec to use for encoding
# your array.
# Hence, you need to use the :func:`MMTFFile.set_array()` function.
file["title"] = "Some other title"
print(file["title"])
# Determine appropriate codec from the codec used originally
file.set_array(
"groupIdList",
np.arange(20,40),
codec=file.get_codec("groupIdList"))
print(file["groupIdList"])
########################################################################
# .. currentmodule:: biotite.structure.io.npz
#
# For *Biotite* internal storage of structures *npz* files are
# recommended.
# These are simply binary files, that are used by *NumPy*.
# In case of atom arrays and stacks, the annotation arrays and
# coordinates are written/read to/from *npz* files via the
# :class:`NpzFile` class.
# Since no expensive data conversion has o be performed,
# this format is the fastest way to save and load atom arrays and
# stacks.
#
# .. currentmodule:: biotite.structure.io
#
# Since programmers are usually lazy and do not want to write more code than
# necessary, there are two convenient function for loading and saving
# atom arrays or stacks, unifying the forementioned file formats:
# :func:`load_structure()` takes a file path and outputs an array
# (or stack, if the files contains multiple models).
# Internally, this function uses the appropriate `File` class,
# depending on the file format.
# The analogous `save_structure()` function provides a shortcut for
# writing to structure files.
# The desired file format is inferred from the provided file name.
import biotite.structure.io as strucio
stack_from_pdb = strucio.load_structure(pdb_file_path)
stack_from_cif = strucio.load_structure(cif_file_path)
print("Are both stacks equal?", stack_from_pdb == stack_from_cif)
strucio.save_structure(biotite.temp_file("cif"), stack_from_pdb)
########################################################################
# Reading trajectory files
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# If the package *MDtraj* is installed *Biotite* provides a read/write
# interface for different trajectory file formats.
# More information can be found in the API reference.
#
# Array indexing and filtering
# ----------------------------
#
# .. currentmodule:: biotite.structure
#
# Atom arrays and stacks can be indexed in a similar way a
# :class:`ndarray` is indexed.
# In fact, the index is propagated to the coordinates and the annotation
# arrays.
# Therefore, all *NumPy* compatible types of indices can be used,
# like boolean arrays, index arrays/lists, slices and, of course,
# integer values.
# Integer indices have a special role here, as they reduce the
# dimensionality of the data type:
# Indexing an :class:`AtomArrayStack` with an integer results in an
# `AtomArray` at the specified frame, indexing an :class:`AtomArray`
# with an integer yields the specified :class:`Atom`.
# Iterating over arrays and stacks reduces the dimensionality in an
# analogous way.
# Let's demonstrate indexing with the help of the structure of *TC5b*.
import biotite.structure as struc
import biotite.database.rcsb as rcsb
import biotite.structure.io as strucio
file_path = rcsb.fetch("1l2y", "mmtf", biotite.temp_dir())
stack = strucio.load_structure(file_path)
print(type(stack).__name__)
print(stack.stack_depth())
array = stack[2]
print(type(array).__name__)
print(array.array_length())
########################################################################
# :func:`load_structure()` gives us an :class:`AtomArrayStack`
# Via the integer index, we get the :class:`AtomArray` representing the
# third model.
# The :func:`AtomArray.array_length()`
# (or :func:`AtomArrayStack.array_length()`)
# method gives us the number of atoms in arrays and stacks and is
# equivalent to the length of an atom array.
# The amount of models is obtained with
# :func:`AtomArrayStack.stack_depth()`.
# The following code section shows some examples for how an atom array
# can be indexed.
# Get the first atom
atom = array[0]
# Get a subarray containing the first and third atom
subarray = array[[0,2]]
# Get a subarray containing a range of atoms using slices
subarray = array[100:200]
# Filter all carbon atoms in residue 1
subarray = array[(array.element == "C") & (array.res_id == 1)]
# Filter all atoms where the X-coordinate is smaller than 2
subarray = array[array.coord[:,0] < 2]
########################################################################
# An atom array stack can be indexed in a similar way, with the
# difference, that the index specifies the frame(s).
# Get an atom array from the first model
subarray = stack[0]
# Get a substack containing the first 10 models
substack = stack[:10]
########################################################################
# Stacks also have the speciality, that they can handle 2-dimensional
# indices, where the first dimension specifies the frame and the second
# dimension specifies the atom.
# Get the first 100 atoms from the third model
subarray = stack[2, :100]
# Get the first 100 atoms from the models 3, 4 and 5
substack = stack[2:5, :100]
# Get the first atom in the second model
atom = stack[1,0]
# Get a stack containing arrays containing only the first atom
substack = stack[:, 0]
########################################################################
# Furthermore, :mod:`biotite.structure` contains advanced filters,
# that create boolean masks from an array using specific criteria.
# Here is a small example.
backbone = array[struc.filter_backbone(array)]
print(backbone.atom_name)
########################################################################
# If you would like to know which atoms are in proximity to specific
# coordinates, have a look at the :class:`CellList` class.
#
# .. warning:: Creating a subarray or substack by indexing does not
# necessarily copy the coordinates and annotation arrays.
# If possible, only *array views* are created.
# Look into the `NumPy` documentation for furher details.
# If you want to ensure, that you are working with a copy,
# use the :func:`copy()` method after indexing.
#
# Representing bonds
# ------------------
#
# Up to now we only looked into atom arrays whose atoms are merely
# described by its coordinates and annotations
# But there is more: Chemcial bonds can be described, too, using a
# :class:`BondList`!
#
# Consider the following case: Your atom array contains four atoms:
# *N*, *CA*, *C* and *CB*. *CA* is a central atom that is connected to
# *N*, *C* and *CB*.
# A :class:`BondList` is created by passing a :class:`ndarray`
# containing pairs of integers, where each integer represents an index
# in a corresponding atom array and the pairs indicate which atoms share
# a bond.
# Addtionally, it is required to specifiy the number of atoms in the
# atom array.
import biotite.structure as struc
array = struc.array([
struc.Atom([0,0,0], atom_name="N"),
struc.Atom([0,0,0], atom_name="CA"),
struc.Atom([0,0,0], atom_name="C"),
struc.Atom([0,0,0], atom_name="CB")
])
print("Atoms:", array.atom_name)
bond_list = struc.BondList(len(array), np.array([[1,0], [1,2], [1,3]]))
print("Bonds (indices):")
print(bond_list.as_array())
print("Bonds (atoms names):")
print(array.atom_name[bond_list.as_array()[:, :2]])
ca_bonds, ca_bond_types = bond_list.get_bonds(1)
print("Bonds of CA:", array.atom_name[ca_bonds])
########################################################################
# When you look at the internal :class:`ndarray`
# (as given by :func:`BondList.as_array()`), you see a third column
# containging zeros.
# This column describes each bond with values from the :class:`BondType`
# enum: *0* correponds to ``BondType.ANY``, which means that the type of
# the bond is undefined.
# This makes sense, since we did not define the bond types, when we
# created the bond list.
# The other thing that has changed is the index order:
# Each bond is sorted so that the index with the lower index is the
# first element.
#
# Although a :class:`BondList` uses an :class:`ndarray` under the hood,
# indexing works a little bit different:
# The indexing operation is not applied on the internal
# :class:`ndarray`, instead it behaves like the same indexing operation
# was applied to a corresponding atom array:
# The bond list adjusts its indices so that they still point on the same
# atoms as before.
# Bonds that involve at least one atom, that has been removed, are
# deleted as well.
# We will try that by deleting the *C* atom.
mask = (array.atom_name != "C")
sub_array = array[mask]
sub_bond_list = bond_list[mask]
print("Atoms:", sub_array.atom_name)
print("Bonds (indices):")
print(sub_bond_list.as_array())
print("Bonds (atoms names):")
print(sub_array.atom_name[sub_bond_list.as_array()[:, :2]])
########################################################################
# As you see, the the bonds involing the *C* (only a single one) is
# removed and the remaining indices are shifted.
#
# We do not have to index the atom array and the bond list
# separately, for convenience reasons you can associate a bond list to
# an atom array. Every time the atom array is indexed, the index is also
# applied to the associated bond list.
# he same behavior applies to concatenations, by the way.
array.bonds = bond_list
sub_array = array[array.atom_name != "C"]
print("Bonds (atoms names):")
print(sub_array.atom_name[sub_array.bonds.as_array()[:, :2]])
########################################################################
# Let's scale things up a bit: Bond information can be loaded from and
# saved to MMTF files.
# We'll try that on the structure of *TC5b* and look at the bond
# information of the third residue, a tyrosine.
import biotite.database.rcsb as rcsb
import biotite.structure.io as strucio
file_path = rcsb.fetch("1l2y", "mmtf", biotite.temp_dir())
mmtf_file = mmtf.MMTFFile()
mmtf_file.read(file_path)
# Essential: set the 'include_bonds' parameter to true
stack = mmtf.get_structure(mmtf_file, include_bonds=True)
tyrosine = stack[:, (stack.res_id == 3)]
print("Bonds (indices):")
print(tyrosine.bonds)
print("Bonds (atoms names):")
print(tyrosine.atom_name[tyrosine.bonds.as_array()[:, :2]])
########################################################################
# Since we loaded the bond information from a MMTF file, the bond types
# are also defined:
# Here we have both, ``BondType.SINGLE`` and ``BondType.DOUBLE``
# bonds (*1* and *2*, repectively).
#
# Structure analysis
# ------------------
#
# This package would be almost useless, if there wasn't some means to
# analyze your structures.
# Therefore, *Biotite* offers a bunch of functions for this purpose,
# reaching from simple bond angle and length measurements to more
# complex characteristics, like accessible surface area and
# secondary structure.
# The following section will introduce you to some of these functions,
# which should be applied to that good old structure of *TC5b*.
#
# The examples shown in this section do not represent the full spectrum
# of analysis tools in this package.
# Look into the API reference for more information.
#
# Geometry measures
# ^^^^^^^^^^^^^^^^^
#
# Let's start with measuring some simple geometric characteristics,
# for example atom distances of CA atoms.
import biotite.structure as struc
import biotite.structure.io.pdbx as pdbx
import biotite.database.rcsb as rcsb
file_path = rcsb.fetch("1l2y", "cif", biotite.temp_dir())
file = pdbx.PDBxFile()
file.read(file_path)
stack = pdbx.get_structure(file)
# Filter only CA atoms
stack = stack[:, stack.atom_name == "CA"]
# Calculate distance between first and second CA in first frame
array = stack[0]
print("Atom to atom:", struc.distance(array[0], array[1]))
# Calculate distance between the first atom
# and all other CA atoms in the array
print("Array to atom:")
array = stack[0]
print(struc.distance(array[0], array))
# Calculate pairwise distances between the CA atoms in the first frame
# and the CA atoms in the second frame
print("Array to array:")
print(struc.distance(stack[0], stack[1]))
# Calculate the distances between all CA atoms in the stack
# and the first CA atom in the first frame
# The resulting array is too large, therefore only the shape is printed
print("Stack to atom:")
print(struc.distance(stack, stack[0,0]).shape)
# And finally distances between two adjacent CA in the first frame
array = stack[0]
print("Adjacent CA distances")
print(struc.distance(array[:-1], array[1:]))
########################################################################
# Like some other functions in :mod:`biotite.structure`, we are able to
# pick any combination of an atom, atom array or stack. Alternatively
# :class:`ndarray` objects containing the coordinates can be provided.
#
# Furthermore, we can measure bond angles and dihedral angles.
# Calculate angle between first 3 CA atoms in first frame
# (in radians)
print("Angle:", struc.angle(array[0],array[1],array[2]))
# Calculate dihedral angle between first 4 CA atoms in first frame
# (in radians)
print("Dihedral angle:", struc.dihedral(array[0],array[1],array[2],array[4]))
########################################################################
# In some cases one is interested in the dihedral angles of the peptide
# backbone, :math:`\phi`, :math:`\psi` and :math:`\omega`.
# In the following code snippet we measure these angles and create a
# simple Ramachandran plot for the first frame of *TC5b*.
import matplotlib.pyplot as plt
import numpy as np
array = pdbx.get_structure(file, model=1)
phi, psi, omega = struc.dihedral_backbone(array, chain_id="A")
plt.plot(phi * 360/(2*np.pi), psi * 360/(2*np.pi),
marker="o", linestyle="None")
plt.xlim(-180,180)
plt.ylim(-180,180)
plt.xlabel("phi")
plt.ylabel("psi")
plt.show()
########################################################################
# Comparing structures
# ^^^^^^^^^^^^^^^^^^^^
#
# Now we want to calculate a measure of flexibility for each residue in
# *TC5b*. The *root mean square fluctuation* (RMSF) is a good value for
# that.
# It represents the deviation for each atom in all models relative
# to a reference model, which is usually the averaged structure.
# Since we are only interested in the backbone flexibility, we consider
# only CA atoms.
# Before we can calculate a reasonable RMSF, we have to superimpose each
# model on a reference model (we choose the first model),
# which minimizes the *root mean square deviation* (RMSD).
stack = pdbx.get_structure(file)
# We consider only CA atoms
stack = stack[:, stack.atom_name == "CA"]
# Superimposing all models of the structure onto the first model
stack, transformation_tuple = struc.superimpose(stack[0], stack)
print("RMSD for each model to first model:")
print(struc.rmsd(stack[0], stack))
# Calculate the RMSF relative to average of all models
rmsf = struc.rmsf(struc.average(stack), stack)
# Plotting stuff
plt.plot(np.arange(1,21), rmsf)
plt.xlim(0,20)
plt.xticks(np.arange(1,21))
plt.xlabel("Residue")
plt.ylabel("RMSF")
plt.show()
########################################################################
# As you can see, both terminal residues are most flexible.
#
# Calculating accessible surface area
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Another interesting value for a protein structure is the
# *solvent accessible surface area* (SASA) that indicates whether an
# atom or residue is on the protein surface or buried inside the
# protein.
# The function :func:`sasa()` approximates the SASA for each
# atom.
# Then we sum up the values for each residue, to get the
# residue-wise SASA.
#
# Besides other parameters, you can choose between different
# Van-der-Waals radii sets:
# *Prot0r*, the default set, is a set that defines radii for
# non-hydrogen atoms, but determines the radius of an atom based on the
# assumed amount of hydrogen atoms connected to it.
# Therefore, *ProtOr* is suitable for structures with missing hydrogen
# atoms, like crystal structures.
# Since the structure of *TC5b* was elucidated via NMR, we can assign a
# radius to every single atom (including hydrogens), hence we use the
# *Single* set.
array = pdbx.get_structure(file, model=1)
# The following line calculates the atom-wise SASA of the atom array
atom_sasa = struc.sasa(array, vdw_radii="Single")
# Sum up SASA for each residue in atom array
res_sasa = struc.apply_residue_wise(array, atom_sasa, np.sum)
# Again plotting stuff
plt.plot(np.arange(1,21), res_sasa)
plt.xlim(0,20)
plt.xticks(np.arange(1,21))
plt.xlabel("Residue")
plt.ylabel("SASA")
plt.show()
########################################################################
# Secondary structure determination
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# *Biotite* can also be used to assign
# *secondary structure elements* (SSE) to a structure with the
# :func:`annotate_sse()` function.
# An ``'a'`` means alpha-helix, ``'b'`` beta-sheet, and ``'c'`` means
# coil.
array = pdbx.get_structure(file, model=1)
# Estimate secondary structure
sse = struc.annotate_sse(array, chain_id="A")
# Pretty print
print("".join(sse)) | [
"patrick.kunzm@gmail.com"
] | patrick.kunzm@gmail.com |
a94ba54bc954a10da1756816c996baa83cb69da6 | 0beabc95fb8d4d7dcaefd92d06d9947772baa7d1 | /hello.py | ef29ac03ef0c9cb6dcb9548f19dbfbb1de79d917 | [] | no_license | te25son/tim_teaches_git | 6ae37728d3d3c300ecf38964228f76933925b866 | f72bb412624ee7a03c844ff839e7080e62e02b37 | refs/heads/master | 2020-12-21T08:01:48.347439 | 2020-02-03T15:48:22 | 2020-02-03T15:48:22 | 236,367,091 | 0 | 0 | null | 2020-02-03T15:48:23 | 2020-01-26T19:50:57 | Python | UTF-8 | Python | false | false | 195 | py |
def hello_world():
print("Hello world!")
def hello_name(name):
print(f"Hello, {name}!")
def say_goodbye():
print("Goodbye.")
if __name__ == "__main__":
hello_name("Timmeh") | [
"timothy.eason@mewssystems.com"
] | timothy.eason@mewssystems.com |
d0b1ebf2de17990600dada870e21dd121a1d11ee | eee9c21aca103bcc6df0f22464f952d7848a379c | /LinearSearch.py | f2a87671ae4cfaac0ca6d89351ab6adf99ca81ee | [] | no_license | RutuparnaJ/dv.pset | 4d2d9f56919e98eb0c4f5ddab9ebcd539a8a5771 | 96c330ae0faf1e9c51c8ae7019485f40dde60912 | refs/heads/master | 2020-03-25T22:28:30.217221 | 2018-10-05T18:18:14 | 2018-10-05T18:18:14 | 144,225,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | #Linear Search
arr = [1, 2, 5, 7, 10, 21, 27, 30, 41, 53, 61]
i=0
count=0
search= int(input("Enter the number to be searched: "))
for i in range (0,10):
if(search==arr[i]):
count=1
print("Number found! Index = ", i)
if count==0:
print("Sorry, number not found")
| [
"RutuparnaJ@users.noreply.github.com"
] | RutuparnaJ@users.noreply.github.com |
ccb5b21ef9a1b87405656d3a5064c61f775ca7ce | 45df8ceac3f4c6bafb31ecb036fce2eec84b079e | /tests/test_articulation.py | c95d7eb7a8e766b86e3d41390578f43eb7376566 | [
"MIT"
] | permissive | jcvasquezc/DisVoice | edd6413e58c1e4e68069944b64340ef70aff0fcf | 3d7a73633165e08bbb8c2e010f32fb2f3e08ce6d | refs/heads/master | 2023-07-20T14:32:58.173752 | 2023-07-11T10:54:42 | 2023-07-11T10:54:42 | 99,439,013 | 294 | 76 | MIT | 2023-07-20T04:01:01 | 2017-08-05T17:53:13 | Jupyter Notebook | UTF-8 | Python | false | false | 1,143 | py | import os, sys
PATH=os.path.dirname(os.path.realpath(__file__))
PATH_DISVOICE=os.path.dirname(os.path.realpath(__file__))+"/disvoice/"
sys.path.append(PATH_DISVOICE)
import disvoice.articulation.articulation as articulation
def test_extract_articulation1():
feature_extractor=articulation.Articulation()
file_audio=PATH+"/../audios/001_ddk1_PCGITA.wav"
features1=feature_extractor.extract_features_file(file_audio, static=True, plots=True, fmt="npy")
print(features1.shape)
def test_extract_articulation2():
feature_extractor=articulation.Articulation()
path_audio=PATH+"/../audios/"
features2=feature_extractor.extract_features_path(path_audio, static=True, plots=False, fmt="csv")
print(features2.head())
def test_extract_articulation3():
feature_extractor=articulation.Articulation()
file_audio=PATH+"/../audios/001_ddk1_PCGITA.wav"
features3=feature_extractor.extract_features_file(file_audio, static=False, plots=True, fmt="torch")
print(features3.size())
if __name__ == "__main__":
test_extract_articulation1()
test_extract_articulation2()
test_extract_articulation3() | [
"jcvasquez@pratechgroup.com"
] | jcvasquez@pratechgroup.com |
94b9c3aa89fb64df2c4d73294f73e881c80bb475 | 75ae022c12279b3381a2cb864398d69cf4cd88a7 | /config/dispatch/admin.py | a067d1254fcb3401cbe863b6de3adf9fcbb9ef0c | [] | no_license | mrkp305/zisco | e29599da706c4d387a7acc5b599422369e7d81a1 | 455db93187f5ddcb6f69e16b2816421483c51940 | refs/heads/master | 2023-05-11T21:08:33.621224 | 2020-02-26T12:09:56 | 2020-02-26T12:09:56 | 243,253,656 | 0 | 0 | null | 2023-05-08T03:34:52 | 2020-02-26T12:08:57 | JavaScript | UTF-8 | Python | false | false | 464 | py | from django.urls import path, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path("", admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG and "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| [
"percival.rapha@gmail.com"
] | percival.rapha@gmail.com |
70432231e305666a21ca2c22db7f1668a36892ee | 36e71375312d9a903c5a7815f036b7b49c1f7657 | /CursoemVideo/Desafio012.py | 90402a4e9fd8df66e8fe0e9f7e49c49a3c7aa151 | [
"MIT"
] | permissive | iSouma/Curso-python | 821d32e13745517582ed515b109dfa1f8c55fba5 | 47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0 | refs/heads/master | 2022-12-24T13:32:37.581319 | 2020-09-28T03:55:00 | 2020-09-28T03:55:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # Faça um algorítimo que leia o preço de um produto, calcule e mostre seu novo preço com 5% de desconto.
p = float(input('Preço do produto: '))
d = p*0.05
np = p-d
print(f'O preço com desconto de 5%: {np:.2f} ') | [
"davii.silva64@gmail.com"
] | davii.silva64@gmail.com |
4e5f578c248af8e339be1f41101fa501f339ff6b | 8f5d74d0eccf95691bb30d13786334a2a4861b06 | /fastai/mult_train2.py | 59f6b25e09abdbba549c307f2d8b38b0cce61cf8 | [
"MIT"
] | permissive | alex-treebeard/DeepLearning-AndroidMalware | 36d3aae78b3bc00f42377e74c8bd12655b58e562 | f63638d31f1c14abeec1e98ce4cd6c29cd7c8ea2 | refs/heads/master | 2022-10-13T16:35:32.782443 | 2020-06-11T11:48:45 | 2020-06-11T11:48:45 | 265,676,146 | 0 | 0 | MIT | 2020-05-20T20:12:14 | 2020-05-20T20:12:13 | null | UTF-8 | Python | false | false | 3,701 | py | #!/bin/python3
from fastai.tabular import *
from fastai.metrics import Recall, Precision
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold, train_test_split, StratifiedShuffleSplit
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
print('Imports complete')
# These are the malware types that we are concerned about
types = ['Adware', 'Ransomware', 'Scareware', 'SMSmalware']
# We want to train on each type of malware separately, trying to categorize between the species within each
# malware type. Ex. categorize between Shuanet or Gooligan species of Adware, but not Shuanet-Adware and
# Jisut-Ransomware.
for typ in types:
print('Training for {}'.format(typ))
# Import the data from the given csv
path='../../malware_dataset/{}_species.csv'.format(typ)
df = pd.read_csv(path, index_col=0)
# Outputs the top 5 samples from the datafold
df.head()
# Processes for fastai to do in the background
procs = [FillMissing, Categorify, Normalize]
# Set up some variables for easy use later
dep_var = 'Label_categorized_2' # This is the dependent variable, the target classification variable
cont_names = list(set(df.columns) - set([dep_var])) # List of all the column names (not the target class)
# Copy the dataframe for a hacky solution to provide the TabularList.from_df() method with a full dataset
data = df.copy()
# Rip out the target classification column and save it
df_y = df[dep_var]
del df[dep_var]
# Encode and transform the data
# Since the target classification is categorical, we need to have a way to interpret the neural network's output.
# In this case, we are assigning each possible output with an integer 0..n-1, another option would be to use one-hot
# encoding here. This may be explored further.
encoder = LabelEncoder()
encoder.fit(df_y)
data_y = encoder.transform(df_y)
# Normalize the x data and rename for consistency
data_x = (df - df.mean()) / (df.max() - df.min())
data_x = data_x.values
# Set up the metrics we want to collect. I wanted TP,TN,FP,FN but that wasn't available. Recall and precision are still
# extremely helpful for evaluating the model
metrics = [accuracy, Recall(), Precision()]
# Keep track of the folds
fold_num = 1
total_folds = 10
# Get the indices for the fold and train on that fold
# Our goal here is to implement statified 10-fold cross validation
for train_idx, test_idx in StratifiedKFold(n_splits=total_folds, shuffle=True, random_state=1).split(data_x, data_y):
# This will create the datafold the way we need to hand it to the tabular learner class
data_fold = (TabularList.from_df(data, path=path, cont_names=cont_names, procs=procs)
.split_by_idxs(train_idx, test_idx)
.label_from_df(cols=dep_var)
.databunch())
print('Fold {}/{}'.format(fold_num, total_folds))
fold_num+=1
# Create the learner
model = tabular_learner(data_fold, layers=[128, 64, 32], metrics=metrics, callback_fns=ShowGraph, ps=0.5)
# Increase the number of instances we are working with on a batch
model.data.batch_size = 512
# Fit the model for one cycle, this will output the statistics/metrics we wanted before for the provided out of sample data.
# I'm kind of hoping that's true, anyways. fastai doesn't appear to evaluate the metrics (besides loss) before completing
# the epoch.
model.fit_one_cycle(cyc_len=1)
| [
"cpjohnson@mavs.coloradomesa.edu"
] | cpjohnson@mavs.coloradomesa.edu |
dd95c9ed7664562737dcb221b7b445925e8e1f67 | a98a1d4d66c8125384d4e2792d6df0b05d070f30 | /semana3/ventor.py | 6b03f0db2b241fc3c04b888a5e07424b21956992 | [] | no_license | osvaldoc/misiontic | 2ea1962f7f4d1fb196ef9f5d1f1899ee0b43516e | 81d5e26ff24e76e19720af46640df81398549fb5 | refs/heads/main | 2023-06-07T04:15:25.064617 | 2021-07-03T16:14:37 | 2021-07-03T16:14:37 | 376,181,032 | 30 | 15 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 5 10:59:53 2021
@author: Osvaldo
"""
class Vector:
''' Esta fincion contruye un vector y recibe como parametros
el vector y la cantidad
'''
def construyeVector(V, n):
V[0] = n
for i in range(1, n + 1):
V[i] = random.randint(1, 99)
def esVacio(V):
if V[0] == 0:
return True
return False
def intercambiar(V, i, j):
aux = V[i]
V[i] = V[j]
V[j] = aux | [
"osvaldoc@gmail.com"
] | osvaldoc@gmail.com |
6929edf559949c2d2a29bb046da7aecd0fd68ad2 | 9a76fe4ea554cc153e9a520d92cb673544f03a08 | /scripts/python3/telapy/api/hermes.py | 08bbe895209c8bfd29710e97830d7c631c93575b | [] | no_license | msecher/scripts_python_3_opentelemac_r14499 | 2262c92ac64c0cdf668b5a511eb26943362c300e | 738e8e491e10bbbc3c21afe01221ed4661ce8a87 | refs/heads/master | 2021-04-07T00:16:48.090702 | 2020-03-19T23:40:03 | 2020-03-19T23:40:03 | 248,626,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,054 | py | # -*- coding: utf-8 -*-
#TODO: Add more logger info
"""
Python wrapper to the Fortran APIs of module hermes of Telemac-Mascaret
Author(s): Fabrice Zaoui, Yoann Audouin, Cedric Goeury, Renaud Barate
Copyright EDF 2016
"""
import sys
import logging
from os import path
from utils.exceptions import TelemacException
import numpy as np
#
PRISM = 40
TETRAHEDRON = 30
QUADRANGLE = 20
TRIANGLE = 10
BND_SEGMENT = 55
BND_POINT = 1
def elem2str(elem):
"""
Return string version of elem from variable just before
@param eleme (int) Type of element
@returns (str) Its name
"""
string = ''
if elem == PRISM:
string = 'prism'
elif elem == TETRAHEDRON:
string = 'tetrahedron'
elif elem == QUADRANGLE:
string = 'quadrangle'
elif elem == TRIANGLE:
string = 'triangle'
elif elem == BND_SEGMENT:
string = 'bnd segment'
elif elem == BND_POINT:
string = 'bnd point'
else:
string = 'unknown'
return string
class HermesFile():
"""The Generic Python class for TELEMAC-MASCARET APIs"""
_hermes = None
logger = logging.getLogger(__name__)
_error = 0
def __init__(self, file_name, fformat,
access='r', boundary_file=None,
log_lvl='INFO'):
"""
Constructor for HermesFile
@param file_name Name of the file
@param fformat File format
@param access Access to the file ('r' for read 'w' for write)
@param boundary_file Name of the boundary file
"""
if log_lvl == 'INFO':
i_log = logging.INFO
elif log_lvl == 'DEBUG':
i_log = logging.DEBUG
else:
i_log = logging.CRITICAL
logging.basicConfig(level=i_log)
self.fformat = fformat.encode('utf-8') + b' '*(8 - len(fformat))
self.file_name = file_name
self.boundary_file = boundary_file
try:
self.logger.debug("Loading hermes f2py module")
import _hermes
except Exception as execpt:
if sys.platform.startswith('linux'):
ext = 'so'
elif sys.platform.startswith('win'):
ext = 'dll'
else:
raise TelemacException('Error: unsupported Operating System!')
raise TelemacException(\
'Error: unable to load the dynamic library '
+ '_hermes.' + ext
+ '\nYou can check the environment variable:'
+ ' PYTHONPATH'
+ '\n'+str(execpt))
HermesFile._hermes = sys.modules['_hermes']
if 'r' in access:
if 'w' in access:
self.openmode = b'READWRITE'
else:
self.openmode = b'READ '
if not path.exists(self.file_name):
raise TelemacException(\
"Could not find {}".format(self.file_name))
elif 'w' in access:
self.openmode = b'WRITE '
else:
raise TelemacException(\
"Error in access string '%s' \
should contain only r and/or w " % access)
self.logger.debug("Opening mesh %s in format %s in mode %s",
self.file_name,
self.fformat,
self.openmode)
self.my_id, self.error = \
HermesFile._hermes.open_mesh(self.fformat,
self.file_name,
self.openmode)
if self.boundary_file is not None:
self.logger.debug("Opening bnd %s in format %s in mode %s",
self.fformat,
self.file_name,
self.openmode)
self._errror = \
HermesFile._hermes.open_bnd(self.fformat,
self.boundary_file,
self.my_id,
self.openmode)
if b'READ' in self.openmode:
# Identifying elements type in files
ndim = self.get_mesh_dimension()
if ndim == 2:
self.typ_elem = TRIANGLE
# If no triangle it must be quadrangle
nelem = self.get_mesh_nelem()
if nelem == 0:
self.typ_elem = QUADRANGLE
else:
self.typ_elem = PRISM
if 'med' in fformat.lower():
self.typ_bnd_elem = BND_SEGMENT
else:
self.typ_bnd_elem = BND_POINT
else:
self.typ_elem = None
self.typ_bnd_elem = None
self.logger.debug("typ_elem: %s and typ_bnd_elem: %s",
elem2str(self.typ_elem),
elem2str(self.typ_bnd_elem))
@property
def error(self):
"""Error property
"""
return self._error
@error.setter
def error(self, value):
"""Detect errors
Overwright attribute setter to detect API errors.
If :attr:`error` is not set null, an error is raised and the programme
is terminated.
:param int value: value to assign
"""
if value != 0:
self.logger.error("Hermes API error:\n%s", \
HermesFile._hermes.get_error_message())
raise TelemacException("Hermes API error:\n%s", \
HermesFile._hermes.get_error_message())
self._error = 0
def close(self):
"""
Closing file
"""
if self.boundary_file is not None:
self.logger.debug("Closing bnd file %s", self.boundary_file)
if HermesFile._hermes is not None:
self.error = HermesFile._hermes.close_bnd(self.fformat,
self.my_id)
self.logger.debug("Closing mesh file %s", self.file_name)
if HermesFile._hermes is not None:
self.error = HermesFile._hermes.close_mesh(self.fformat, self.my_id)
def get_mesh_title(self):
"""
Retuns the title of the file
@returns The title
"""
self.logger.debug("Getting title")
title, self.error = HermesFile._hermes.get_mesh_title(self.fformat,
self.my_id)
return title.decode('utf-8')
def get_mesh_date(self):
"""
Retuns the date of the file
@returns The date (6-integer-array)
"""
self.logger.debug("Getting date")
date = np.zeros((6), dtype=np.int32)
self.error = HermesFile._hermes.get_mesh_date(self.fformat,
self.my_id, date)
return date
def get_mesh_nelem(self):
"""
Retuns the number of element in the file
@returns The number of elements
"""
self.logger.debug("Getting number of elements")
nelem, self.error = \
HermesFile._hermes.get_mesh_nelem(self.fformat,
self.my_id,
self.typ_elem)
return nelem
def get_mesh_npoin_per_element(self):
"""
Retuns the number of points per element in the file
@returns The number of points per element
"""
self.logger.debug("Getting number of points per element")
ndp, self.error = HermesFile._hermes.get_mesh_npoin_per_element(\
self.fformat,
self.my_id, self.typ_elem)
return ndp
def get_mesh_connectivity(self):
"""
Retuns the connectivity for the given type
@returns An 2d array of shape (nelem, ndp)
"""
self.logger.debug("Getting connectivity")
nelem = self.get_mesh_nelem()
self.logger.debug("Number of element: %d", nelem)
ndp = self.get_mesh_npoin_per_element()
self.logger.debug("Number of points per element: %d", ndp)
tmp_ikle = np.zeros((nelem*ndp), dtype=np.int32)
self.error = HermesFile._hermes.get_mesh_connectivity(\
self.fformat, self.my_id, self.typ_elem,
tmp_ikle, nelem, ndp)
ikle = tmp_ikle.reshape((nelem, ndp)) -1
return ikle
def get_mesh_npoin(self):
"""
Retuns the number of points
@returns The number of points
"""
self.logger.debug("Getting number of points %d %d",
self.my_id, self.typ_elem)
npoin, self.error = HermesFile._hermes.get_mesh_npoin(\
self.fformat, self.my_id, self.typ_elem)
return npoin
def get_mesh_nplan(self):
"""
Retuns the number of planes
@returns The number of planes
"""
self.logger.debug("Getting number of planes")
nplan, self.error = HermesFile._hermes.get_mesh_nplan(\
self.fformat, self.my_id)
return nplan
def get_mesh_dimension(self):
"""
Retuns the number of dimensions
@returns The number of dimensions
"""
self.logger.debug("Getting number of dimension")
ndim, self.error = HermesFile._hermes.get_mesh_dimension(\
self.fformat, self.my_id)
return ndim
def get_mesh_coord(self, jdim):
"""
Retuns the coordinates of each points for a given dimension
@param jdim Index of dimension [1-ndim]
@returns A numpy array of size npoin
"""
self.logger.debug("Getting coordinates for dimension %d", jdim)
ndim = 0
npoin = self.get_mesh_npoin()
ndim = self.get_mesh_dimension()
coord = np.zeros((npoin))
# If in serafin and dimension 3 z coordinates is the
# first variable at the first time step
if jdim == 3 and b'SERAFIN' in self.fformat:
var_names, _ = self.get_data_var_list()
coord = self.get_data_value(var_names[0], 0)
else:
self.error = HermesFile._hermes.get_mesh_coord(\
self.fformat,
self.my_id,
jdim,
ndim,
coord,
npoin)
return coord
def get_mesh_l2g_numbering(self):
"""
Retuns the local to global numbering
@returns The local to global numbering
"""
self.logger.debug("Getting local to gloval numbering")
npoin = self.get_mesh_npoin()
knolg = np.zeros((npoin), dtype=np.int32)
self.error = HermesFile._hermes.get_mesh_l2g_numbering(\
self.fformat,
self.my_id,
knolg,
npoin)
return knolg
def get_mesh_nptir(self):
"""
Retuns the number of interface points
@returns The number of interface points
"""
self.logger.debug("Getting number of interface points")
nptir, self.error = HermesFile._hermes.get_mesh_nptir(\
self.fformat, self.my_id)
return nptir
def get_bnd_ipobo(self):
"""
Retuns the ipobo array
@returns The ipobo array
"""
self.logger.debug("Getting boundary ipobo")
npoin = self.get_mesh_npoin()
nelebd = self.get_bnd_nelem()
ipobo = np.zeros((npoin), dtype=np.int32)
self.error = HermesFile._hermes.get_bnd_ipobo(\
self.fformat,
self.my_id,
nelebd,
self.typ_bnd_elem,
ipobo,
npoin)
return ipobo
def get_bnd_numbering(self):
"""
Retuns the boundary to general numbering
@returns The boundary to general numbering
"""
self.logger.debug("Getting boundary numbering")
nptfr = self.get_bnd_npoin()
nbor = np.zeros((nptfr), dtype=np.int32)
self.error = HermesFile._hermes.get_bnd_numbering(\
self.fformat,
self.my_id,
self.typ_bnd_elem,
nbor,
nptfr)
# Switching to Python Numbering
nbor -= 1
return nbor
def get_bnd_connectivity(self):
"""
Retuns the connectivity array for the boundary elements
@returns The connectivity array for the boundary elements
"""
self.logger.debug("Getting boundary connectivity")
nelebd = self.get_bnd_nelem()
if self.typ_bnd_elem == BND_SEGMENT:
ndp = 2
else:
ndp = 1
tmp_ikle_bnd = np.zeros((nelebd*ndp), dtype=np.int32)
self.error = HermesFile._hermes.get_bnd_connectivity(\
self.fformat,
self.my_id,
self.typ_bnd_elem,
nelebd, ndp,
tmp_ikle_bnd)
ikle_bnd = np.zeros((nelebd, ndp), dtype=np.int32)
for i in range(nelebd):
for j in range(ndp):
ikle_bnd[i, j] = tmp_ikle_bnd[j*nelebd+i]
del tmp_ikle_bnd
return ikle_bnd
def get_bnd_npoin(self):
"""
Retuns the number of boundary points
@returns The number of boundary points
"""
self.logger.debug("Getting number of boundary points")
nptfr, self.error = HermesFile._hermes.get_bnd_npoin(\
self.fformat,
self.my_id,
self.typ_bnd_elem)
return nptfr
def get_bnd_nelem(self):
"""
Retuns the number of boundary elements
@returns The number of boundary elements
"""
self.logger.debug("Getting number of boundary elements")
nelebd, self.error = HermesFile._hermes.get_bnd_nelem(\
self.fformat,
self.my_id,
self.typ_bnd_elem)
return nelebd
def get_bnd_value(self):
"""
Retuns the information on the boundary values
@returns liubor, lihbor, livbor, hbor, ubor, vbor, chbord,
litbor, tbor, atbor, btbor
"""
self.logger.debug("Getting boundary values")
nptfr = self.get_bnd_npoin()
nelebd = self.get_bnd_nelem()
lihbor = np.zeros((nptfr), dtype=np.int32)
liubor = np.zeros((nptfr), dtype=np.int32)
livbor = np.zeros((nptfr), dtype=np.int32)
hbor = np.zeros((nptfr))
ubor = np.zeros((nptfr))
vbor = np.zeros((nptfr))
chbord = np.zeros((nptfr))
trac = True
litbor = np.zeros((nptfr), dtype=np.int32)
tbor = np.zeros((nptfr))
atbor = np.zeros((nptfr))
btbor = np.zeros((nptfr))
self.error = HermesFile._hermes.get_bnd_value(\
self.fformat,
self.my_id,
self.typ_bnd_elem,
nelebd,
lihbor,
liubor,
livbor,
hbor,
ubor,
vbor,
chbord,
trac,
litbor,
tbor,
atbor,
btbor,
nptfr)
color = np.zeros((nptfr), dtype=np.int32)
self.error = HermesFile._hermes.get_bnd_color(\
self.fformat, self.my_id,
self.typ_bnd_elem,
color)
return lihbor, liubor, livbor, hbor, ubor, vbor, chbord, \
litbor, tbor, atbor, btbor, color
def get_data_nvar(self):
"""
Returns the number of variables
@returns The number of variables
"""
self.logger.debug("Get number of variables")
nvar, self.error = HermesFile._hermes.get_data_nvar(\
self.fformat,
self.my_id)
return nvar
def get_data_var_list(self):
"""
Retuns the list of the variables name and units
@returns Two arrays of size nvar first for name second for units
"""
self.logger.debug("Get list of variable")
nvar = self.get_data_nvar()
res = HermesFile._hermes.get_data_var_list2(self.fformat,
self.my_id,
nvar)
tmp_var_name, tmp_var_unit, self.error = res
vnames = []
vunit = []
# Reordering string array for variable names
# Extracting name and info into a list
for i in range(nvar):
var_name = b''.join(tmp_var_name[i*16:(i+1)*16]).decode('utf-8')
var_unit = b''.join(tmp_var_unit[i*16:(i+1)*16]).decode('utf-8')
vnames.append(var_name.strip())
vunit.append(var_unit.strip())
return vnames, vunit
def get_data_ntimestep(self):
"""
Retuns the number of time steps
@returns The number of time steps
"""
self.logger.debug("Get data ntimestep")
ntimestep, self.error = HermesFile._hermes.get_data_ntimestep(\
self.fformat, self.my_id)
return ntimestep
def get_data_time(self, record):
"""
Retuns the time of a given record
@param record Number of the record (starts from 0)
@returns The time
"""
if record == -1:
ntimestep = self.get_data_ntimestep()
rrecord = ntimestep -1
else:
rrecord = record
self.logger.debug("Get data time at %d", record)
time, self.error = HermesFile._hermes.get_data_time(\
self.fformat,
self.my_id,
rrecord)
return time
def get_data_value(self, var_name, record):
"""
Retuns the value for each point for a given variable and a given record
@param var_name Name of the variable
@param record Number of the record (starts from 0 if a -1 is given will
give the last time step)
@returns A numpy array of size npoin
"""
npoin = self.get_mesh_npoin()
values = np.zeros((npoin))
var_name2 = var_name + ' '*(16 - len(var_name))
if record == -1:
ntimestep = self.get_data_ntimestep()
rrecord = ntimestep -1
else:
rrecord = record
self.logger.debug("Getting data for %s at %d",
var_name2, record)
self.error = HermesFile._hermes.get_data_value(\
self.fformat,
self.my_id,
rrecord,
var_name2,
values,
npoin)
return values
def set_header(self, title, nvar, var_name, var_unit):
"""
Write header of the file
@param title Title of the file
@param nvar Number of variables
@param var_name Name for each variable
@param var_unit Unit for each variable
"""
tmp_title = title.encode('utf-8') + b' '*(80-len(title))
tmp_var_name = [b' ']*32*nvar
for i, (var, unit) in enumerate(zip(var_name, var_unit)):
for j, varj in enumerate(var):
tmp_var_name[i*32+j] = varj.encode('utf-8')
for j, unitj in enumerate(unit):
tmp_var_name[i*32+16+j] = unitj.encode('utf-8')
self.logger.debug("Writing header information")
self.error = HermesFile._hermes.set_header(\
self.fformat, self.my_id,
tmp_title, tmp_var_name, nvar)
def set_mesh(self, mesh_dim, typ_elem, ndp, nptfr, nptir, nelem, npoin,
ikles, ipobo, knolg, coordx, coordy, nplan, date,
time, coordz=None):
"""
Write the mesh information into the file
@param mesh_dim
@param mesh_dim Dimension of the mesh
@param typelm TYPE OF THE MESH ELEMENTS
@param ndp Number of points per element
@param nptfr Number of boundary point
@param nptir Number of interface point
@param nelem Number of element in the mesh
@param npoin Number of points in the mesh
@param ikle Connectivity array for the main element
@param ipobo Is a boundary point ? array
@param knolg Local to global numbering array
@param coordx X coordinates of the mesh points
@param coordy Y coordinates of the mesh points
@param nplan Number of planes
@param date Date of the creation of the mesh
@param time Time of the creation of the mesh
@param coordz Z coordinates of the mesh points
"""
if coordz is None:
tmp_z = np.zeros((npoin))
else:
tmp_z = coordz
self.typ_elem = typ_elem
tmp_ikle = ikles.T.reshape((nelem*ndp)) + 1
self.logger.debug("Writing mesh information")
self.error = HermesFile._hermes.set_mesh(\
self.fformat, self.my_id,
mesh_dim, typ_elem, ndp, nptfr,
nptir, nelem, tmp_ikle,
ipobo, knolg, coordx, coordy,
nplan, date, time, npoin, tmp_z)
del tmp_ikle
if coordz is None:
del tmp_z
def add_data(self, var_name, var_unit, time, record, first_var, values):
"""
Write inform ation for a given variable and a given timestep
@param var_name Name of the variable
@param time Time of the data
@param record Time step of the data (starts from 0)
@param first_var True if it is the first variable of the dataset
@param var_value The value for each point of the mesh
"""
nval = len(values)
tmp_var_name = var_name.encode('utf-8') + b' '*(16-len(var_name)) +\
var_unit.encode('utf-8') + b' '*(16-len(var_unit))
self.logger.debug("Writing data for %s at record %d",
tmp_var_name, record)
self.error = HermesFile._hermes.add_data(\
self.fformat, self.my_id,
tmp_var_name, time, record,
first_var, values, nval)
def set_bnd(self, typ_bnd_elem, nelebd, ikle, lihbor, liubor,
livbor, hbor, ubor, vbor, chbord, litbor, tbor, atbor,
btbor, color):
"""
Write boundary information
@param nelebd Number of boundary elements
@param ndp Number of points per boundary element
@param ikle Connectivity array for the boundary elements
@param lihbor Type of boundary conditions on depth
@param liubor Type of boundary conditions on u
@param livbor Type of boundary conditions on v
@param hbor Prescribed boundary condition on depth
@param ubor Prescribed boundary condition on velocity u
@param vbor Prescribed boundary condition on velocity v
@param chbord Friction coefficient at boundary
@param litbor Physical boundary conditions for tracers
@param tbor Prescribed boundary condition on tracer
@param atbor Thermal exchange coefficients
@param btbor Thermal exchange coefficients
@param color Boundary color of the boundary element
"""
self.typ_bnd_elem = typ_bnd_elem
if self.typ_bnd_elem == BND_SEGMENT:
ndp = 2
else:
ndp = 1
# Switching
tmp_ikle = ikle.T.reshape((nelebd*ndp))
self.logger.debug("Writing boundary file")
self.error = HermesFile._hermes.set_bnd(\
self.fformat, self.my_id,
typ_bnd_elem, nelebd, ndp, tmp_ikle,
lihbor,
liubor, livbor, hbor, ubor, vbor,
chbord, litbor, tbor, atbor, btbor,
color)
del tmp_ikle
def show_mesh(self, show=True, visu2d=True):
"""
Show the 2D mesh with topography
@param show Display the graph (Default True)
@param visu2d 2d display (Default True)
@retuns the figure object
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
coordx = self.get_mesh_coord(1)
coordy = self.get_mesh_coord(2)
tri = self.get_mesh_connectivity()
bottom = self.get_data_value('WATER DEPTH', 0)
fig = plt.figure()
if visu2d:
plt.tripcolor(coordx, coordy, tri, bottom,
shading='flat', edgecolor='w', cmap=cm.terrain)
plt.colorbar()
else:
axe = Axes3D(fig)
axe.plot_trisurf(coordx, coordy, tri, bottom,
cmap=cm.terrain, linewidth=0.1)
plt.title('2D mesh (%d triangles, %d nodes) \
with the bottom elevation (m)' % (self.get_mesh_nelem(),
self.get_mesh_npoin()))
plt.xlabel('X-coordinate (m)')
plt.ylabel('Y-coordinate (m)')
if show:
plt.show()
return fig
def import_group_info(self, src):
"""
Import group information from a given HermesFile
@param src HermesFile from which group will be imported
"""
self.typ_bnd_elem = src.typ_bnd_elem
if self.typ_bnd_elem is None:
tmp_typ_bnd_elem = 0
else:
tmp_typ_bnd_elem = self.typ_bnd_elem
ikle_bnd = src.get_bnd_connectivity()
nelebd, ndp = ikle_bnd.shape
tmp_ikle_bnd = ikle_bnd.T.reshape((nelebd*ndp))
self.logger.debug("Transfering group information")
self.error = HermesFile._hermes.transfer_group_info(\
self.fformat, src.my_id,
self.my_id, self.typ_elem,
tmp_typ_bnd_elem, tmp_ikle_bnd, nelebd, ndp,
False, False)
def import_from(self, src):
"""
Rewriting src into dst
@param src HermesFile from which the data is imported
"""
self.logger.debug("Getting information from file %s", src.file_name)
title = src.get_mesh_title()
date = src.get_mesh_date()
ndim = src.get_mesh_dimension()
typ_elem = src.typ_elem
typ_bnd_elem = src.typ_bnd_elem
npoin = src.get_mesh_npoin()
nelem = src.get_mesh_nelem()
ndp = src.get_mesh_npoin_per_element()
nplan = src.get_mesh_nplan()
coordx = src.get_mesh_coord(1)
coordy = src.get_mesh_coord(2)
coordz = None
if ndim == 3:
coordz = src.get_mesh_coord(3)
ikle = src.get_mesh_connectivity()
nptir = src.get_mesh_nptir()
if nptir > 0:
knolg = src.get_mesh_l2g_numbering()
else:
knolg = np.zeros((npoin), dtype=np.int32)
if src.boundary_file is not None:
typ_bnd_elem = src.typ_bnd_elem
nptfr = src.get_bnd_npoin()
nelebd = src.get_bnd_nelem()
ipobo = src.get_bnd_ipobo()
ikle_bnd = src.get_bnd_connectivity()
lihbor, liubor, livbor, hbor, ubor, vbor, chbord, \
litbor, tbor, atbor, btbor, color = src.get_bnd_value()
else:
nptfr = 0
nelebd = 0
ipobo = np.zeros((npoin), dtype=np.int32)
ntimestep = src.get_data_ntimestep()
nvar = src.get_data_nvar()
var_name, var_unit = src.get_data_var_list()
self.set_header(title, nvar, var_name, var_unit)
date2 = np.zeros((3), dtype=np.int32)
time2 = np.zeros((3), dtype=np.int32)
date2[0] = date[0]
date2[1] = date[1]
date2[2] = date[2]
time2[0] = date[3]
time2[1] = date[4]
time2[2] = date[5]
self.set_mesh(ndim, typ_elem, ndp, nptfr, nptir, nelem, npoin,
ikle, ipobo, knolg, coordx, coordy, nplan, date2,
time2, coordz)
if self.fformat == src.fformat and b'SERAFIN' not in self.fformat:
self.import_group_info(src)
else:
if src.boundary_file is not None:
self.set_bnd(typ_bnd_elem, nelebd, ikle_bnd, lihbor, liubor,
livbor, hbor, ubor, vbor, chbord, litbor, tbor,
atbor, btbor, color)
for i in range(ntimestep):
time = src.get_data_time(i)
for j in range(nvar):
values = src.get_data_value(var_name[j], i)
self.add_data(var_name[j], var_unit[j],
time, i, j == 0, values)
del values
def __repr__(self):
"""
representation of the object
"""
string = '*'*32 + '\n'
string += 'Generic info' + '\n'
string += '*'*32 + '\n'
string += "Title: %s\n" % self.get_mesh_title()[1:72]
date = self.get_mesh_date()
string += "Date: %d/%d/%d %dH%dM%dS\n" % (date[2], date[1], date[0],
date[3], date[4], date[5])
string += '*'*32 + '\n'
string += 'Mesh info\n'
string += '*'*32 + '\n'
ndim = self.get_mesh_dimension()
string += "Ndim: %d\n" % ndim
if self.typ_elem == TRIANGLE:
string += "Element type: TRIANGLE\n"
elif self.typ_elem == PRISM:
string += "Element type: PRISM\n"
else:
string += "Element type: UNKNOWN\n"
string += "Npoin: %d\n" % self.get_mesh_npoin()
string += "Nelem: %d\n" % self.get_mesh_nelem()
string += "Ndp: %d\n" % self.get_mesh_npoin_per_element()
string += "nplan: %d\n" % self.get_mesh_nplan()
string += "coordinates:\n"
string += " - On x :%s\n" % str(self.get_mesh_coord(1))
string += " - On y :%s\n" % str(self.get_mesh_coord(2))
if ndim == 3:
string += " - On z :%s\n" % str(self.get_mesh_coord(3))
string += "ikle: %s\n" % str(self.get_mesh_connectivity())
string += '*'*32 + '\n'
string += 'Parallel info\n'
string += '*'*32 + '\n'
nptir = self.get_mesh_nptir()
if nptir != 0:
string += "Nptir: %d\n" % nptir
string += "knolg: %s\n" % str(self.get_mesh_l2g_numbering())
else:
string += 'No parallel information\n'
string += '*'*32 + '\n'
string += 'Bnd info\n'
string += '*'*32 + '\n'
if self.boundary_file is not None:
if self.typ_bnd_elem == BND_POINT:
string += "Bnd element: BND_POINT\n"
elif self.typ_bnd_elem == BND_SEGMENT:
string += "Bnd element: BND_SEGMENT\n"
else:
string += "Bnd element: UNKNOWN\n"
string += "Nptfr: %d\n" % self.get_bnd_npoin()
string += "Nelebd: %d\n" % self.get_bnd_nelem()
string += "ipobo: %s\n" % self.get_bnd_ipobo()
string += "nbor: %s\n" % self.get_bnd_numbering()
string += "ikle_bnd: %s\n" % self.get_bnd_connectivity()
lihbor, liubor, livbor, _, _, _, _, \
litbor, _, _, _, _ = self.get_bnd_value()
string += "bnd_values: \n"
string += str(zip(lihbor, liubor, livbor, litbor)) + '\n'
else:
string += "No boundary information"
string += '*'*32 + '\n'
string += 'Data info\n'
string += '*'*32 + '\n'
ntimestep = self.get_data_ntimestep()
string += "ntimestep: %d\n" % ntimestep
nvar = self.get_data_nvar()
string += "nvar: %d\n" % nvar
var_name, var_unit = self.get_data_var_list()
string += "var info:\n"
for var in zip(var_name, var_unit):
string += str(var) + '\n'
for i in range(ntimestep):
string += "Time: %fs\n" % self.get_data_time(i)
for j in range(nvar):
string += " - for %s:\n" % var_name[j]
string += " " + str(self.get_data_value(var_name[j], i))\
+ '\n'
return string
| [
"matthieu.secher@edf.fr"
] | matthieu.secher@edf.fr |
43e87339944816164097fc0322bc688a97288b83 | 06cab719f4feb2143e8b1bd88b8c6bc9a3946dd4 | /.local/share/Trash/files/locallibrary.7/locallibrary/catalog/urls.py | 604e03dd862da2cdeb4a2bd953bfb8dcd0b3e355 | [] | no_license | 12172001/project | 6f141e33987c91772aa469b49ca2d3037de09ed2 | f6b775bba0cb7048b1bfe6bc8cefab363a1f0b84 | refs/heads/master | 2023-01-20T19:35:54.741635 | 2020-12-02T16:06:23 | 2020-12-02T16:06:23 | 304,893,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('books/', views.BookListView.as_view(), name='books'),
path('book/<int:pk>', views.BookDetailView.as_view(), name='book-detail'),
path('authors/', views.AuthorListView.as_view(), name='authors'),
path('author/<int:pk>',
views.AuthorDetailView.as_view(), name='author-detail'),
]
urlpatterns += [
path('mybooks/', views.LoanedBooksByUserListView.as_view(), name='my-borrowed'),
path(r'borrowed/', views.LoanedBooksAllListView.as_view(), name='all-borrowed'), # Added for challenge
]
# Add URLConf for librarian to renew a book.
urlpatterns += [
path('book/<uuid:pk>/renew/', views.renew_book_librarian, name='renew-book-librarian'),
]
# Add URLConf to create, update, and delete authors
urlpatterns += [
path('author/create/', views.AuthorCreate.as_view(), name='author_create'),
path('author/<int:pk>/update/', views.AuthorUpdate.as_view(), name='author_update'),
path('author/<int:pk>/delete/', views.AuthorDelete.as_view(), name='author_delete'),
]
# Add URLConf to create, update, and delete books
urlpatterns += [
path('book/create/', views.BookCreate.as_view(), name='book_create'),
path('book/<int:pk>/update/', views.BookUpdate.as_view(), name='book_update'),
path('book/<int:pk>/delete/', views.BookDelete.as_view(), name='book_delete'),
]
urlpatterns = [
path('', views.index, name='index'),
] | [
"aabdykhalykovaa17@gmail.com"
] | aabdykhalykovaa17@gmail.com |
8209ccfb574e410bffde9d84ecf55530a08cb9d7 | ad34fbdbbfcbfd369114bbc0e6c22e5b22d4daaf | /api/app.py | 102896ac9f6610c65f30867a53c0e125d34540cf | [
"MIT"
] | permissive | SmartDuck9000/studnetwork-api | 3dc5d74cff2232818f78fd87a0e1d0c431f217ac | 2010e691ad189019451f3854c65f78a49d198383 | refs/heads/main | 2023-03-23T06:53:44.436664 | 2021-03-18T18:05:41 | 2021-03-18T18:05:41 | 349,163,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | from flask import Flask, request, jsonify, render_template
from flask_cors import CORS, cross_origin
import config
from Controller import controller
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/<path:path>', methods=['GET'])
@cross_origin()
def index(path):
return app.send_static_file(path)
@app.route('/api/login/<string:code>', methods=['GET'])
@cross_origin()
def login(code):
token = controller.login(code)
return jsonify({"token": token})
@app.route('/api/users/<string:token>', methods=['GET', 'POST'])
@cross_origin()
def user_request(token):
if request.method == 'GET':
return jsonify(controller.get_user(token))
elif request.method == 'POST':
controller.post_user(token, request.get_json(force=True))
return jsonify({})
@app.route('/api/users/exit/<string:token>', methods=['POST'])
@cross_origin()
def exit_user(token):
status_code = controller.exit_user(token)
return jsonify({}), status_code
@app.route('/api/graph/<int:depth>/<string:token>', methods=['POST'])
@cross_origin()
def get_graph(depth, token):
filters = request.get_json(force=True)
graph = controller.get_graph(token, filters, depth)
return jsonify(graph)
if __name__ == "__main__":
app.run(host=config.host, port=config.port)
| [
"kulikovgeorgij000@gmail.com"
] | kulikovgeorgij000@gmail.com |
55cfe9b2e4b5a093f061ee4a41bf46e27acbf4fe | 72a67d78ebf3dfd6a87e62a57276af14fd8cefe5 | /Python100/69.py | 687537d57895200daa5258236a42d8f21018c05d | [] | no_license | yanghaotai/github | 4af02a68ffa990fd0734ef49f747e9482a549337 | 8f915d0bead9a8f7cac460d88439bfbdc9c0bd44 | refs/heads/master | 2021-01-15T18:40:53.340017 | 2017-08-10T03:26:28 | 2017-08-10T03:26:28 | 99,797,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | #encoding:utf-8
'''
【程序69】
题目:有n个人围成一圈,顺序排号。从第一个人开始报数(从1到3报数),凡报到3的人退出
圈子,问最后留下的是原来第几号的那位。
1. 程序分析:
2.程序源代码:
'''
# # def num(n):
# # for i range(n):
#
# a = []
# for i in range(1,101):
# a.append(i)
# print a
#
# for i in range(len(a)-1,-1,-1):
# # print a[i],
# if a[i] % 3 == 0:
# a.pop(i)
# print a
# # print a
# print len(a)
#
# for i in range(len(a)-1,-1,-1):
# # print a[i],
# if (i+1) % 3 == 0:
# a.pop(i)
# print a
# # print a
if __name__ == '__main__':
nmax = 50
n = int(raw_input('请输入总人数:'))
num = []
for i in range(n):
num.append(i + 1)
i = 0
k = 0
m = 0
while m < n - 1:
if num[i] != 0 : k += 1
if k == 3:
num[i] = 0
k = 0
m += 1
i += 1
if i == n : i = 0
i = 0
while num[i] == 0: i += 1
print num[i] | [
"yanghaotai@163.com"
] | yanghaotai@163.com |
b0e9d4de32bf2c66d29501f41268cb68e06fb35f | baf9bbfdaa1bb3800d3e11a24c8660fe535b047c | /yaml_files_for_testing/clean.py | a923e02322af33b99169445e196be77217551d8a | [] | no_license | shivramsrivastava/kubernetes-1.10.4-kubemark | 7d332a73ce7a4ec5d9aa611e109290ffeb1b3449 | cfd2cf11e05a91f09c8eeb5649e826121f2fb891 | refs/heads/master | 2020-03-21T13:19:51.621074 | 2018-08-31T12:22:30 | 2018-08-31T12:22:30 | 138,600,188 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | #! /usr/bin/python
import os
import commands
print commands.getoutput("rm -rf ./normal_pods_rs")
op = commands.getoutput("kubectl get rs")
split = op.split("\n")
split = split[1:]
for line in split:
cmd = "kubectl delete rs " + line.split(" ")[0] + " &"
os.system( cmd )
| [
"pratik.meher@huawei.com"
] | pratik.meher@huawei.com |
ece457ce53b9f953aa5cf6acdcb37b7092256ded | 17e5a0336a3e0e1c26be7cad8d53d461a1a214cf | /Individual/src/L2 Generalized Linear Regression Fitting.py | 09b650115d7d519c7b92f38ced612993bc174ff8 | [] | no_license | procrasprincess/Data-Mining-Projects | 7e1e77e9b912a97bd111fe61dad1a4139cb24850 | b49d9962b68da893e1999c8fa8a1b31cbacbac46 | refs/heads/master | 2022-12-16T22:25:27.824504 | 2020-09-21T16:27:58 | 2020-09-21T16:27:58 | 297,395,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,259 | py | #!/usr/bin/env python
# coding: utf-8
# # L2 Generalized Linear Regression
# In[137]:
import pandas as pd
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import KFold # import KFold
from sklearn.model_selection import cross_val_score
# In[138]:
def ridge_regression(data, predictors, alpha):
#Fit the model
ridgereg = Ridge(alpha=alpha,normalize=True)
ridgereg.fit(data[predictors],data['y'])
y_pred = ridgereg.predict(data[predictors])
#Check if a plot is to be made for the entered alpha
#if alpha in models_to_plot:
# plt.subplot(models_to_plot[alpha])
# plt.tight_layout()
# plt.plot(data['x'],y_pred)
# plt.plot(data['x'],data['y'],'.')
# plt.title('Plot for alpha: %.3g'%alpha)
#Return the result in pre-defined format
rss = sum((y_pred-data['y'])**2)
mse = rss/len(data)
ret = mse
#ret.extend([ridgereg.intercept_])
#ret.extend(ridgereg.coef_)
return ret
# In[22]:
initial = 0
alpha = []
while (initial <= 150):
alpha.append(initial)
initial = initial + 1
predictors=[]
predictors.extend(['x%d'%i for i in range(1,101)])
predictors1 = []
predictors1.extend(['x%d'%i for i in range(1,11)])
# In[15]:
data100 = pd.read_csv('train-100-100.csv')
ridge_regression(data100, predictors, 2)
# In[36]:
def ridge_regression_joint(data, alpha):
MSE = []
for i in alpha:
MSE.append(ridge_regression(data, predictors, i))
return MSE
def ridge_regression_joint1(data, alpha):
MSE = []
for i in alpha:
MSE.append(ridge_regression(data, predictors1, i))
return MSE
# In[51]:
#Plot 1 MSE - train-100-100.csv
train_100_100 = pd.read_csv('train-100-100.csv')
plot1_MSE = ridge_regression_joint(train_100_100, alpha)
#Plot 2 MSE - train-1000-100.csv
train_1000_100 = pd.read_csv('train-1000-100.csv')
plot2_MSE = ridge_regression_joint(train_1000_100, alpha)
#Plot 3 MSE - train-50(1000)-100.csv
#train_50_1000_100 = pd.read_csv('train-50(1000)-100.csv')
train_50_1000_100 = train_1000_100.head(50)
plot3_MSE = ridge_regression_joint(train_50_1000_100, alpha)
#Plot 4 MSE - train-100(1000)-100.csv
#train_100_1000_100 = pd.read_csv('train-100(1000)-100.csv')
train_100_1000_100 = train_1000_100.head(100)
plot4_MSE = ridge_regression_joint(train_100_1000_100, alpha)
#Plot 5 MSE - train-150(1000)-100.csv
#train_150_1000_100 = pd.read_csv('train-150(1000)-100.csv')
train_150_1000_100 = train_1000_100.head(150)
plot5_MSE = ridge_regression_joint(train_150_1000_100, alpha)
#Plot 6 MSE - test-100-10.csv
train_100_10 = pd.read_csv('train-100-10.csv')
plot6_MSE = ridge_regression_joint1(train_100_10, alpha)
#Plot 7 MSE - test-100-100.csv
test_100_100 = pd.read_csv('test-100-100.csv')
plot7_MSE = ridge_regression_joint(test_100_100, alpha)
#Plot 8 MSE - test-1000-100.csv
test_1000_100 = pd.read_csv('test-1000-100.csv')
plot8_MSE = ridge_regression_joint(test_1000_100, alpha)
#Plot 9 MSE - test-50(1000)-100.csv
#train_50_1000_100 = pd.read_csv('train-50(1000)-100.csv')
test_50_1000_100 = test_1000_100.head(50)
plot9_MSE = ridge_regression_joint(test_50_1000_100, alpha)
#Plot 10 MSE - test-100(1000)-100.csv
#train_100_1000_100 = pd.read_csv('train-100(1000)-100.csv')
test_100_1000_100 = test_1000_100.head(100)
plot10_MSE = ridge_regression_joint(test_100_1000_100, alpha)
#Plot 11 MSE - test-150(1000)-100.csv
#train_150_1000_100 = pd.read_csv('train-150(1000)-100.csv')
test_150_1000_100 = test_1000_100.head(150)
plot11_MSE = ridge_regression_joint(test_150_1000_100, alpha)
#Plot 12 MSE - test-100-10.csv
test_100_10 = pd.read_csv('test-100-10.csv')
plot12_MSE = ridge_regression_joint1(test_100_10, alpha)
# In[40]:
df = pd.DataFrame({'x':alpha, 'p1': plot1_MSE, 'p2': plot2_MSE, 'p3': plot3_MSE, 'p4': plot4_MSE, 'p5': plot5_MSE, 'p6': plot6_MSE, 'p7': plot7_MSE, 'p8': plot8_MSE, 'p9': plot9_MSE, 'p10': plot10_MSE, 'p11': plot11_MSE, 'p12': plot12_MSE })
# In[47]:
# multiple line plot
plt.plot( 'x', 'p1', data=df, marker='o', markerfacecolor='blue', markersize=12, color='skyblue', linewidth=2, label = "train-100-100")
plt.plot( 'x', 'p2', data=df, marker='*', color='olive', linewidth=2, label = "train-1000-100")
plt.plot( 'x', 'p3', data=df, marker='', color='olive', linewidth=2, linestyle='dashed', label="train-50(1000)-100")
plt.plot( 'x', 'p4', data=df, marker='o', markerfacecolor='olive', markersize=12, color='skyblue', linewidth=2, label = "train-100(1000)-100")
plt.plot( 'x', 'p5', data=df, marker='*', color='red', linewidth=2, label = "train-150(1000)-100")
plt.plot( 'x', 'p6', data=df, marker='', color='red', linewidth=2, linestyle='dashed', label="train-100-10")
plt.plot( 'x', 'p7', data=df, marker='o', markerfacecolor='yellow', markersize=12, color='skyblue', linewidth=2, label = "test-100-100")
plt.plot( 'x', 'p8', data=df, marker='*', color='yellow', linewidth=2, label = "test-1000-100")
plt.plot( 'x', 'p9', data=df, marker='', color='yellow', linewidth=2, linestyle='dashed', label="test-50(1000)-100")
plt.plot( 'x', 'p10', data=df, marker='o', markerfacecolor='green', markersize=12, color='skyblue', linewidth=2, label = "test-100(1000)-100")
plt.plot( 'x', 'p11', data=df, marker='*', color='green', linewidth=2, label = "test-150(1000)-100")
plt.plot( 'x', 'p12', data=df, marker='', color='green', linewidth=2, linestyle='dashed', label="test-100-10")
plt.legend()
# In[48]:
df
# In[50]:
initial = 1
alpha1 = []
while (initial <= 150):
alpha1.append(initial)
initial = initial + 1
# In[64]:
#Plot 1 MSE - train-100-100.csv
train_100_100 = pd.read_csv('train-100-100.csv')
plot1_MSE = ridge_regression_joint(train_100_100, alpha1)
#Plot 2 MSE - train-50(1000)-100.csv
#train_50_1000_100 = pd.read_csv('train-50(1000)-100.csv')
train_50_1000_100 = train_1000_100.head(50)
plot2_MSE = ridge_regression_joint(train_50_1000_100, alpha1)
#Plot 3 MSE - train-100(1000)-100.csv
#train_100_1000_100 = pd.read_csv('train-100(1000)-100.csv')
train_100_1000_100 = train_1000_100.head(100)
plot3_MSE = ridge_regression_joint(train_100_1000_100, alpha1)
#Plot 4 MSE - test-100-100.csv
test_100_100 = pd.read_csv('test-100-100.csv')
plot4_MSE = ridge_regression_joint(test_100_100, alpha1)
#Plot 5 MSE - test-50(1000)-100.csv
#train_50_1000_100 = pd.read_csv('train-50(1000)-100.csv')
test_50_1000_100 = test_1000_100.head(50)
plot5_MSE = ridge_regression_joint(test_50_1000_100, alpha1)
#Plot 6 MSE - test-100(1000)-100.csv
#train_100_1000_100 = pd.read_csv('train-100(1000)-100.csv')
test_100_1000_100 = test_1000_100.head(100)
plot6_MSE = ridge_regression_joint(test_100_1000_100, alpha1)
# In[65]:
df1 = pd.DataFrame({'x':alpha1, 'p1': plot1_MSE, 'p2': plot2_MSE, 'p3': plot3_MSE, 'p4': plot4_MSE, 'p5': plot5_MSE, 'p6': plot6_MSE })
# multiple line plot
plt.plot( 'x', 'p1', data=df1, marker='o', markerfacecolor='blue', markersize=12, color='skyblue', linewidth=2, label = "train-100-100")
plt.plot( 'x', 'p2', data=df1, marker='*', color='olive', linewidth=2, label = "train-50(1000)-100")
plt.plot( 'x', 'p3', data=df1, marker='', color='olive', linewidth=2, linestyle='dashed', label="train-100(1000)-100")
plt.plot( 'x', 'p4', data=df1, marker='o', markerfacecolor='olive', markersize=12, color='skyblue', linewidth=2, label = "test-100-100")
plt.plot( 'x', 'p5', data=df1, marker='*', color='red', linewidth=2, label = "test-50(1000)-100")
plt.plot( 'x', 'p6', data=df1, marker='', color='red', linewidth=2, linestyle='dashed', label="train-1000(1000)-100")
plt.legend()
# In[83]:
#X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) # create an array
#y = np.array([1, 2, 3, 4]) # Create another array
X = train_1000_100[["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20","x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30","x31", "x32", "x33", "x34", "x35", "x36", "x37", "x38", "x39", "x40","x41", "x42", "x43", "x44", "x45", "x46", "x47", "x48", "x49", "x50","x51", "x52", "x53", "x54", "x55", "x56", "x57", "x58", "x59", "x60","x61", "x62", "x63", "x64", "x65", "x66", "x67", "x68", "x69", "x70","x71", "x72", "x73", "x74", "x75", "x76", "x77", "x78", "x79", "x80","x81", "x82", "x83", "x84", "x85", "x86", "x87", "x88", "x89", "x90","x91", "x92", "x93", "x94", "x95", "x96", "x97", "x98", "x99","x100"]].to_numpy()
y = train_1000_100[["y"]]
kf = KFold(n_splits=10) # Define the split - into 2 folds
kf.get_n_splits(X) # returns the number of splitting iterations in the cross-validator
ridgereg = Ridge(alpha=1,normalize=True)
#ridgereg.fit(data[predictors],data['y'])
scores = cross_val_score(ridgereg, X, y,scoring='neg_mean_squared_error', cv=10)
#clf = svm.SVC(kernel='linear', C=1)
#scores = cross_val_score(clf, x, y, cv=5)
MSE = scores*(-1)
np.mean(MSE)
# In[128]:
def cross_val(data, alpha):
X = data[["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20","x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30","x31", "x32", "x33", "x34", "x35", "x36", "x37", "x38", "x39", "x40","x41", "x42", "x43", "x44", "x45", "x46", "x47", "x48", "x49", "x50","x51", "x52", "x53", "x54", "x55", "x56", "x57", "x58", "x59", "x60","x61", "x62", "x63", "x64", "x65", "x66", "x67", "x68", "x69", "x70","x71", "x72", "x73", "x74", "x75", "x76", "x77", "x78", "x79", "x80","x81", "x82", "x83", "x84", "x85", "x86", "x87", "x88", "x89", "x90","x91", "x92", "x93", "x94", "x95", "x96", "x97", "x98", "x99","x100"]].to_numpy()
y = data[["y"]]
ridgereg = Ridge(alpha=alpha,normalize=True)
#ridgereg.fit(data[predictors],data['y'])
scores = cross_val_score(ridgereg, X, y,scoring='neg_mean_squared_error', cv=10)
MSE = scores*(-1)
AVG_MSE = np.mean(MSE)
return AVG_MSE
# In[129]:
#cv_result_100_10 = []
#for i in alpha:
# cv_result_100_10.append(cross_val(train_1000_100, i))
def Find_Least_MSE_Lambda(data):
result = []
min_lambda = 0
for i in alpha:
if len(result) != 0:
minimum = min(result)
else:
minimum = 0
res = cross_val(data, i)
result.append(res)
if res > minimum:
min_lambda = min_lambda
else:
min_lamda = i
return min_lambda, result
# In[133]:
def cross_val1(data, alpha):
X = data[["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10"]]
y = data[["y"]]
ridgereg = Ridge(alpha=alpha,normalize=True)
#ridgereg.fit(data[predictors],data['y'])
scores = cross_val_score(ridgereg, X, y,scoring='neg_mean_squared_error', cv=10)
MSE = scores*(-1)
AVG_MSE = np.mean(MSE)
return AVG_MSE
def Find_Least_MSE_Lambda1(data):
result = []
min_lambda = 0
for i in alpha:
if len(result) != 0:
minimum = min(result)
else:
minimum = 0
res = cross_val1(data, i)
result.append(res)
if res > minimum:
min_lambda = min_lambda
else:
min_lamda = i
return min_lambda, result
# In[135]:
lambda_optimum_100_100, result_100_100 = Find_Least_MSE_Lambda(train_100_100)
lambda_optimum_1000_100, result_1000_100 = Find_Least_MSE_Lambda(train_1000_100)
lambda_optimum_50_1000_100, result_50_1000_100 = Find_Least_MSE_Lambda(train_50_1000_100)
lambda_optimum_100_1000_100, result_100_1000_100 = Find_Least_MSE_Lambda(train_100_1000_100)
lambda_optimum_150_1000_100, result_150_1000_100 = Find_Least_MSE_Lambda(train_150_1000_100)
lambda_optimum_100_10, result_100_10 = Find_Least_MSE_Lambda1(train_100_10)
# In[136]:
print('Set_100_100 Optimum Lambda - ', lambda_optimum_100_100)
print('Set_1000_100 Optimum Lambda - ', lambda_optimum_1000_100)
print('Set_50(1000)_100 Optimum Lambda - ', lambda_optimum_50_1000_100)
print('Set_100(1000)_100 Optimum Lambda - ', lambda_optimum_100_1000_100)
print('Set_150(1000)_100 Optimum Lambda - ', lambda_optimum_150_1000_100)
print('Set_100_100 Optimum Lambda - ', lambda_optimum_100_10)
# In[134]:
lambda_optimum_100_10, result_100_10 = Find_Least_MSE_Lambda1(train_100_10)
| [
"xiaowei.lin@outlook.com"
] | xiaowei.lin@outlook.com |
934972f5760beb0edd9e95d67bf3a2b7d72ffd87 | 00541cccb98b8b075614daec61299f940c8bf4eb | /Array/min_steps.py | af75aff296188e080fff0193b9acf255444d907e | [] | no_license | prem0862/DSA-Problems | a6f2e29226be7e5c2492daec8b994f7f323ad346 | 7799a0e781307f819ce358a2382d0cfc454b0470 | refs/heads/master | 2020-06-05T03:31:03.134595 | 2019-09-12T07:01:42 | 2019-09-12T07:01:42 | 192,298,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | """
You are in an infinite 2D grid where you can move in any of the 8 directions :
(x,y) to
(x+1, y),
(x - 1, y),
(x, y+1),
(x, y-1),
(x-1, y-1),
(x+1,y+1),
(x-1,y+1),
(x+1,y-1)
You are given a sequence of points and the order in which you need to cover the points. Give the minimum number of steps in which you can achieve it. You start from the first point.
Input :
Given two integer arrays A and B, where A[i] is x coordinate and B[i] is y coordinate of ith point respectively.
Output :
Return an Integer, i.e minimum number of steps.
Example :
Input : [(0, 0), (1, 1), (1, 2)]
Output : 2
It takes 1 step to move from (0, 0) to (1, 1). It takes one more step to move from (1, 1) to (1, 2).
"""
class Solution:
# @param A : list of integers
# @param B : list of integers
# @return an integer
def coverPoints(self, A, B):
no_of_steps = 0
for i in range(len(A)-1):
fir_point = (A[i], B[i])
sec_point = (A[i+1], B[i+1])
distance = max(abs(fir_point[0] - sec_point[0]), abs(fir_point[1] - sec_point[1]))
no_of_steps += distance
return no_of_steps | [
"premprakash0796@gmail.com"
] | premprakash0796@gmail.com |
06041fdeaa8117e519bdcffd4405d5644d289d6b | 0827acee84dc8266d4912f00d627a150ec91767e | /tonic/markups/w3cutil_test.py | 911bec64cfba211e8452cf676449ddae60aeef79 | [] | no_license | bgnori/tonic | 6f6ec9a35df9b263051b1fc115957cce6eed2fb6 | 943d04ec5b18d5ac563ec4074eabb45a738b84af | refs/heads/master | 2021-01-04T02:37:45.275063 | 2012-01-25T05:27:18 | 2012-01-25T05:27:18 | 218,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | #!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim: syntax=python
#
# Copyright 2009 Noriyuki Hosaka bgnori@gmail.com
#
import StringIO
import unittest
from tonic.rlimit import Lock
from tonic.markups.w3cutil import *
MINIMAL = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head><title>unittest</title></head>
<body>empty</body></html>
'''
class w3cvalidateTest(unittest.TestCase):
def setUp(self):
self.markups = StringIO.StringIO(MINIMAL)
def tearDown(self):
pass
def test(self):
lock = Lock(15)
lock.aquire()
r = validate(self.markups)
c = 0
for line in r:
if '''class="msg_err"''' in line:
c = 10
if c > 0:
print line,
c -= 1
print r.info()
self.assertEqual(r.info()['X-W3C-Validator-Status'], 'Valid')
lock.release()
| [
"bgnori@gmail.com"
] | bgnori@gmail.com |
632a4da0b2257a8bc822123735759f9a8d0d93e7 | c510f87006a2b095da5d4c78e5810f80a663d2f5 | /src/sanajeh.py | cfb0471f241ca9f3275ba9effae1b55952d30168 | [] | no_license | mfkiwl/Sanajeh | 3c605d0acb9a723a02660152c1f7ce88b3150cf4 | 5eec8d7e1956fc37776d518e2b130b896a371b0b | refs/heads/master | 2023-08-23T18:44:02.395620 | 2021-09-28T12:45:49 | 2021-09-28T12:45:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,043 | py | import os, sys
import py2cpp
import cffi
import random
from typing import Callable
from expander import RuntimeExpander
ffi = cffi.FFI()
cpu_flag = False
objects = None
class PyCompiler:
file_path: str = ""
file_name: str = ""
dir_path: str = ""
#cpp_code: str = ""
#hpp_code: str = ""
#cdef_code: str = ""
def __init__(self, pth: str, nme: str):
self.file_path = pth
self.file_name = nme
self.dir_path = "device_code/{}".format(self.file_name)
def compile(self):
source = open(self.file_path, encoding="utf-8").read()
codes = py2cpp.compile(source, self.dir_path, self.file_name)
#self.cpp_code = codes[0]
#self.hpp_code = codes[1]
#self.cdef_code = codes[2]
def build(self):
"""
Compile cpp source file to .so file
"""
so_path: str = "{}/{}.so".format(self.dir_path, self.file_name)
if os.system("src/build.sh " + "{}/{}.cu".format(self.dir_path, self.file_name) + " -o " + so_path) != 0:
print("Build failed!", file=sys.stderr)
sys.exit(1)
def printCppAndHpp(self):
print(self.cpp_code)
print("--------------------------------")
print(self.hpp_code)
def printCdef(self):
print(self.cdef_code)
# Device side allocator
class DeviceAllocator:
@staticmethod
def device_do(cls, func, *args):
if cpu_flag:
for obj in objects:
getattr(obj, func.__name__)(*args)
else:
pass
@staticmethod
def device_class(*cls):
pass
@staticmethod
def parallel_do(cls, func, *args):
pass
@staticmethod
def rand_init(seed, sequence, offset):
if cpu_flag:
random.seed(sequence)
else:
pass
# (0,1]
@staticmethod
def rand_uniform():
if cpu_flag:
return random.uniform(0,1)
else:
pass
@staticmethod
def array_size(array, size):
pass
@staticmethod
def new(cls, *args):
pass
@staticmethod
def destroy(obj):
if cpu_flag:
del obj
else:
pass
class RuntimeDoAll:
built = {}
def __init__(self):
pass
def rebuild_function(self, cls):
module = cls.__dict__["__module__"]
flattened = self.flatten(cls)
for field, ftype in cls.__dict__["__annotations__"].items():
if field.split("_")[-1] != "ref" and ftype not in ["int", "float", "bool"] and ftype not in self.built.keys():
self.rebuild_function(getattr(__import__(module), ftype))
new_function = "def rebuild_{}({}):\n".format(cls.__name__, ", ".join(flattened[0]))
new_function += "\t" + "new_object: {} = {}.__new__({})\n".format(cls.__name__, cls.__name__, cls.__name__)
for field, ftype in cls.__dict__["__annotations__"].items():
if field in flattened[1].keys():
nested_build = ", ".join([nested_field for nested_field in flattened[1][field]])
new_function += "\t" + "new_object.{} = {}({})\n".format(field, ftype, nested_build)
else:
new_function += "\t" + "new_object.{} = {}\n".format(field, field)
new_function += "\t" + "return new_object"
self.built[cls.__name__] = new_function
def flatten(self, cls):
field_map = {}
nested_map = {}
module = cls.__dict__["__module__"]
if "__annotations__" in cls.__dict__.keys():
for field, ftype in cls.__dict__["__annotations__"].items():
if field.split("_")[-1] == "ref":
field_map[field] = "int"
elif ftype in ["int", "float", "bool"]:
field_map[field] = ftype
else:
nested_map[field] = {}
nested_result = self.flatten(getattr(__import__(module), ftype))
for nested_field, nested_ftype in nested_result[0].items():
field_map[field + "_" + nested_field] = nested_ftype
nested_map[field][field + "_" + nested_field] = nested_ftype
return [field_map, nested_map]
class PyAllocator:
file_path: str = ""
file_name: str = ""
cpp_code: str = ""
hpp_code: str = ""
cdef_code: str = ""
lib = None
def __init__(self, path: str, name: str, flag: bool):
self.file_name = name
self.file_path = path
global cpu_flag
cpu_flag = flag
# load the shared library and initialize the allocator on GPU
def initialize(self):
if cpu_flag:
pass
else:
"""
Compilation before initializing ffi
"""
compiler: PyCompiler = PyCompiler(self.file_path, self.file_name)
compiler.compile()
compiler.build()
"""
Initialize ffi module
"""
self.cpp_code = open("device_code/{}/{}.cu".format(self.file_name, self.file_name), mode="r").read()
self.hpp_code = open("device_code/{}/{}.h".format(self.file_name, self.file_name), mode="r").read()
self.cdef_code = open("device_code/{}/{}.cdef".format(self.file_name, self.file_name), mode="r").read()
ffi.cdef(self.cdef_code)
self.lib = ffi.dlopen("device_code/{}/{}.so".format(self.file_name, self.file_name))
if self.lib.AllocatorInitialize() == 0:
pass
#print("Successfully initialized the allocator through FFI.")
else:
print("Initialization failed!", file=sys.stderr)
sys.exit(1)
# Free all of the memory on GPU
def uninitialize():
if cpu_flag:
pass
else:
"""
Initialize ffi module
"""
if self.lib.AllocatorUninitialize() == 0:
pass
# print("Successfully uninitialized the allocator through FFI.")
else:
print("Initialization failed!", file=sys.stderr)
sys.exit(1)
def parallel_do(self, cls, func, *args):
if cpu_flag:
for obj in objects:
getattr(obj, func.__name__)(*args)
else:
"""
Parallelly run a function on all objects of a class.
"""
object_class_name = cls.__name__
func_str = func.__qualname__.split(".")
# todo nested class exception
func_class_name = func_str[0]
func_name = func_str[1]
# todo args
if eval("self.lib.{}_{}_{}".format(object_class_name, func_class_name, func_name))() == 0:
pass
# print("Successfully called parallel_do {} {} {}".format(object_class_name, func_class_name, func_name))
else:
print("Parallel_do expression failed!", file=sys.stderr)
sys.exit(1)
def parallel_new(self, cls, object_num):
if cpu_flag:
global objects
objects = [cls.__new__(cls) for _ in range(object_num)]
for i in range(object_num):
getattr(objects[i], cls.__name__)(i)
else:
"""
Parallelly create objects of a class
"""
object_class_name = cls.__name__
if eval("self.lib.parallel_new_{}".format(object_class_name))(object_num) == 0:
pass
# print("Successfully called parallel_new {} {}".format(object_class_name, object_num))
else:
print("Parallel_new expression failed!", file=sys.stderr)
sys.exit(1)
expander: RuntimeExpander = RuntimeExpander()
def do_all(self, cls, func):
if cpu_flag:
for obj in objects:
func(obj)
else:
name = cls.__name__
if name not in self.expander.built.keys():
self.expander.build_function(cls)
callback_types = "void({})".format(", ".join(self.expander.flattened[name].values()))
fields = ", ".join(self.expander.flattened[name])
lambda_for_create_host_objects = eval("lambda {}: func(cls.__rebuild_{}({}))".format(fields, name, fields), locals())
lambda_for_callback = ffi.callback(callback_types, lambda_for_create_host_objects)
if eval("self.lib.{}_do_all".format(name))(lambda_for_callback) == 0:
pass
# print("Successfully called parallel_new {} {}".format(object_class_name, object_num))
else:
print("Do_all expression failed!", file=sys.stderr)
sys.exit(1) | [
"luthfanlubis@gmail.com"
] | luthfanlubis@gmail.com |
1ff366321883f01604c9c3806e365b8ca4dba437 | f40f3bad1c3fd12d5cd9f35f31ed13744f53f558 | /completed_exercises/07_files/task_7_2b.py | 492c47843710a04a22b55272408bb973c3aaf751 | [] | no_license | Ordauq/Learning_PyNEng | d8396897a31bd3bac22ab0b39a7ec4748db93219 | 743c789e1a483843e312c2df24bcbb3b20a202b9 | refs/heads/master | 2020-06-25T22:31:19.900664 | 2019-11-05T12:37:25 | 2019-11-05T12:37:25 | 199,440,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | # -*- coding: utf-8 -*-
'''
Задание 7.2b
Дополнить скрипт из задания 7.2a:
* вместо вывода на стандартный поток вывода,
скрипт должен записать полученные строки в файл config_sw1_cleared.txt
При этом, должны быть отфильтрованы строки, которые содержатся в списке ignore.
Строки, которые начинаются на '!' отфильтровывать не нужно.
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
ignore = ['duplex', 'alias', 'Current configuration']
# Solution
import sys
with open(sys.argv[1], 'r') as config, open('config_sw1_cleared.txt', 'w') as dst:
for line in config:
#counter = 0
if line.startswith('!'):
pass
else:
counter = 0
for word in ignore:
if word in line:
counter +=1
break
if counter == 0:
dst.write(line)
else:
pass | [
"quadro.public@gmail.com"
] | quadro.public@gmail.com |
c90d442129aeee3420c883009e9e8c28d99e270f | 01552dc88e7c170de857f5ff0b52178326d5f003 | /guild/source_cmd_support.py | bc43a57aba385e229a9c363801257dabca29c1cf | [
"Apache-2.0"
] | permissive | guildai/_guild-python-legacy | b8516f38b3dd4f27859850ec07fe9c4747f4fd8b | e552eff820d8edcfeb10b26bd5c8651548507b4a | refs/heads/master | 2021-01-01T15:52:35.875726 | 2017-09-27T18:58:59 | 2017-09-27T18:58:59 | 97,719,256 | 0 | 0 | Apache-2.0 | 2018-10-20T23:44:54 | 2017-07-19T13:28:21 | HTML | UTF-8 | Python | false | false | 959 | py | import guild.cli
import guild.source
def resolve_one_package(spec):
try:
return guild.source.resolve_one_package(spec)
except guild.source.MultiplePackagesError as e:
_multiple_matches_error(e.spec, e.pkgs)
except guild.source.NoSuchPackageError as e:
_no_such_package_error(e.spec)
def _no_such_package_error(spec):
guild.cli.error("no packages match '%s'" % spec)
def _multiple_matches_error(spec, pkgs):
guild.cli.error(
"multiple packages match '%s'\n"
"Specify one of: %s"
% (spec, _multiple_matches_list(pkgs)))
def _multiple_matches_list(pkgs):
return ", ".join([pkg.key for pkg in pkgs])
def resolve_all_packages(specs):
try:
return guild.source.resolve_all_packages(specs)
except guild.source.MultiplePackagesError as e:
_multiple_matches_error(e.spec, e.pkgs)
except guild.source.NoSuchPackageError as e:
_no_such_package_error(e.spec)
| [
"g@rre.tt"
] | g@rre.tt |
6b09fcdf4b449d1fe4a6fa22f5b675e5eecbad56 | b15c47a45207e854fb002d69f7e33f8943a5e2b3 | /master/automl/automl.py | a4b95f1e457acd30b9cb54f532ae96d541855309 | [
"Apache-2.0"
] | permissive | yurimkoo/tensormsa | e1af71c00a6b2ec3b3ed35d5adad7bafc34c6fbe | 6ad2fbc7384e4dbe7e3e63bdb44c8ce0387f4b7f | refs/heads/master | 2021-07-22T13:41:45.110348 | 2017-11-02T07:13:31 | 2017-11-02T07:13:31 | 109,469,204 | 1 | 0 | null | 2017-11-04T05:19:51 | 2017-11-04T05:19:50 | null | UTF-8 | Python | false | false | 4,224 | py | from common.utils import *
from master import models
class AutoMlCommon:
"""
Auto ML related conf get/set common methos
"""
def __init__(self, key = None):
"""
init key variable
:param key:
:return:
"""
if (key is not None):
self.key = key
self.parm_info = self.get_parm_obj(key)
self.conf_info = self.get_conf_obj(key)
self.stat_info = self.get_stat_obj(key)
self.net_type = self.get_net_type(key)
def get_conf_obj(self, nn_id):
"""
get view data for automl_parms
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
return getattr(obj, "automl_parms")
except Exception as e:
raise Exception(e)
def update_conf_obj(self, nn_id, input_data):
"""
update json filed with given data
:param obj:
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
data_set = getattr(obj, "automl_parms")
data_set.update(input_data)
setattr(obj, "automl_parms", data_set)
obj.save()
return input_data
except Exception as e:
raise Exception(e)
def get_stat_obj(self, nn_id):
"""
get view data for net config
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
return getattr(obj, "automl_stat")
except Exception as e:
raise Exception(e)
def update_stat_obj(self, nn_id, input_data):
"""
set net config data edited on view
:param obj:
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
data_set = getattr(obj, "automl_stat")
data_set['bygen'] = input_data['bygen']
data_set['best'] = input_data['best']
setattr(obj, "automl_stat", data_set)
obj.save()
return data_set
except Exception as e:
raise Exception(e)
def reset_stat_obj(self, nn_id):
"""
set net config data edited on view
:param obj:
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
data_set = {}
data_set['bygen'] = []
data_set['best'] = []
setattr(obj, "automl_stat", data_set)
obj.save()
return data_set
except Exception as e:
raise Exception(e)
def get_parm_obj(self, nn_id):
"""
get view data for automl_parms
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
return getattr(obj, "automl_runtime")
except Exception as e:
raise Exception(e)
def update_parm_obj(self, nn_id, input_data):
"""
update json filed with given data
:param obj:
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
data_set = getattr(obj, "automl_runtime")
data_set.update(input_data)
setattr(obj, "automl_runtime", data_set)
obj.save()
return input_data
except Exception as e:
raise Exception(e)
def get_net_type(self, nn_id):
"""
get net type on data base
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
return getattr(obj, "dir")
except Exception as e:
raise Exception(e)
def update_net_type(self, nn_id, input_data):
"""
update net type with given data
:param obj:
:return:
"""
try:
obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)
data_set = getattr(obj, "dir")
data_set.update(input_data)
setattr(obj, "dir", data_set)
obj.save()
return input_data
except Exception as e:
raise Exception(e) | [
"tmddno1@naver.com"
] | tmddno1@naver.com |
7d6bedf9372f04540ea6cb5a54655c16a25cbdc5 | 461d7bf019b9c7a90d15b3de05891291539933c9 | /tests/test_base58_xmr.py | d7eb1b6a45c135a9ab693dd8cc59cba27f5bc7ec | [
"MIT"
] | permissive | renauddahou/bip_utils | 5c21503c82644b57ddf56735841a21b6306a95fc | b04f9ef493a5b57983412c0ce460a9ca05ee1f50 | refs/heads/master | 2023-07-16T05:08:45.042084 | 2021-08-19T09:33:03 | 2021-08-19T09:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,044 | py | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import binascii
import unittest
from bip_utils import Base58XmrDecoder, Base58XmrEncoder
from .test_base58 import TEST_VECT_DEC_INVALID
# Test vector
TEST_VECT = [
{
"raw": b"",
"encode": "",
},
{
"raw": b"61",
"encode": "2g",
},
{
"raw": b"626262",
"encode": "1a3gV",
},
{
"raw": b"636363",
"encode": "1aPEr",
},
{
"raw": b"73696d706c792061206c6f6e6720737472696e67",
"encode": "LJe5Z59G5Zz6RYrqDjxxeX3vd16N",
},
{
"raw": b"00eb15231dfceb60925886b67d065299925915aeb172c06647",
"encode": "19uhT2BqLZuRUjnQGCByg4RUm1bZ2jT3j2E",
},
{
"raw": b"516b6fcd0f",
"encode": "ABnLTmg",
},
{
"raw": b"bf4f89001e670274dd",
"encode": "YzxHqptA9nj4p",
},
{
"raw": b"572e4794",
"encode": "3EFU7m",
},
{
"raw": b"ecac89cad93923c02321",
"encode": "gb2yYxwXgzj3g4",
},
{
"raw": b"10c8511e",
"encode": "1Rt5zm",
},
{
"raw": b"00000000000000000000",
"encode": "11111111111111",
},
{
"raw": b"000111d38e5fc9071ffcd20b4a763cc9ae4f252bb4e48fd66a835e252ada93ff480d6dd43dc62a641155a5",
"encode": "113MMjnNqJN6MKY7uez1h2WA1ztnozdosJpK3JiBzMSWD3zwwYxy5pX16phr",
},
{
"raw": b"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
"encode": "113DUyZY2dc2LxFSMtsQ5k3gsHPkECmXt52nKM8ZY8z26NhMJWtsWSA7icPFuECstJ94XRDHZYFLSAQSTAftscnaBkMV84ECzEiD6GX5SZYMgrESBZ2ptsj8zFn6azDED6b8H81cwbZYU3GJTvetytsqVQKoqgrNEDCwYM9kiokZYaPgNVfkm8tswqpPqaniXEDKHxRBVpfuZYgk6SXQrdHtt4CETsKtagEDReNVDEvY4ZYo6WWZ9xVSttAYeXu4zSqEDXznZEz2QDZYuSvaau4MbttGu4bvp6JzEDeMCdGj8GNZZ1oLeceADkttPFUfxZCB9EDkhchJUE8XZZ89kiePG5uttVbt",
}
]
#
# Tests
#
class Base58XmrTests(unittest.TestCase):
# Test decoder
def test_decoder_btc(self):
for test in TEST_VECT:
self.assertEqual(test["raw"],
binascii.hexlify(Base58XmrDecoder.Decode(test["encode"])))
# Test encoder
def test_encoder(self):
for test in TEST_VECT:
self.assertEqual(test["encode"],
Base58XmrEncoder.Encode(binascii.unhexlify(test["raw"])))
# Test invalid calls to decode
def test_invalid_decode(self):
for test in TEST_VECT_DEC_INVALID:
self.assertRaises(ValueError, Base58XmrDecoder.Decode, test)
| [
"54482000+ebellocchia@users.noreply.github.com"
] | 54482000+ebellocchia@users.noreply.github.com |
6827efccb4c9dab41417fc8752d7d589681e8dde | c5656cd9a5e572d308ffc168d7162312b83c0998 | /task/views.py | a0edcac2591b326a17ab24ef1e0d9fac9c771027 | [] | no_license | rockdong/stock_task | fb3cc7b77d52e3a29d31843e7ff4dfb4511951d1 | 19219b32e9c40c8aba2e10cec9c21f73764382fd | refs/heads/master | 2021-01-19T19:12:51.192613 | 2017-04-16T10:10:55 | 2017-04-16T10:10:55 | 88,405,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | #_*_coding:utf-8_*_
from django.shortcuts import HttpResponse
from django.views.generic import View
import json
import tushare as ts
from stock.models import Stock
# Create your views here.
class UpdateStockView(View):
'''
更新数据库,如果有新的股票信息就更新。
'''
def get(self, request):
stocks = ts.get_stock_basics()
for code in stocks.index:
print not Stock.objects.filter(code=code).exists()
print Stock.objects.filter(code=code).exists()
if not Stock.objects.filter(code=code).exists():
stock = Stock()
stock.code = code
stock.save()
return HttpResponse(json.dumps({"status": "success"}), content_type="application/json")
class UpdateDateInfoView(View):
'''
更新每天的股票信息,如果不存在该股票的信息,则更新日期从上市日到当天
'''
pass
class CalculateView(View):
'''
计算每个股票第二天的是否买入或者卖出
'''
pass
class NofifyView(View):
'''
发送邮件通知
'''
pass
| [
"rockdong@1234567.local"
] | rockdong@1234567.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.