blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
708a1c43e8e73d7b97aaaf5accb60204fcf46553
|
31fb20acd57af064190f69a1d33ee9ac1ed6ab4e
|
/wxagent/txbase.py
|
f1ed8d86f6b97e6a6ac9f54f657669062e899094
|
[] |
no_license
|
kakliu/wxagent
|
2ba27b531d4a07b4e059a37f4de03a663173ae1b
|
b7c5ea47d6616556b4d43eb81c61cf7ac3031c18
|
refs/heads/master
| 2020-05-27T02:29:27.621687
| 2016-11-13T13:19:04
| 2016-11-13T13:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,674
|
py
|
from PyQt5.QtCore import *
from PyQt5.QtNetwork import *
from .agentstats import AgentStats
# 带获取所有cookie扩展功能的定制类
class AgentCookieJar(QNetworkCookieJar):
def __init__(self, parent=None):
super(AgentCookieJar, self).__init__(parent)
def xallCookies(self):
return self.allCookies()
# XXAgent基类,实现共有的抽象功能
class TXBase(QObject):
def __init__(self, parent=None):
super(TXBase, self).__init__(parent)
self.acj = AgentCookieJar()
self.nam = QNetworkAccessManager()
# regradless network, QNetworkSession leave away
self.nam.setConfiguration(QNetworkConfiguration())
# reconnect state
self.reconnect_total_times = 0
self.reconnect_start_time = QDateTime()
self.reconnect_last_time = QDateTime()
self.reconnect_retry_times = 0
# self.reconnect_slot = None
self.RECONN_WAIT_TIMEOUT = 4567
self.RECONN_MAX_RETRY_TIMES = 8
self.queue_shot_timers = {} # QTimer => [slot, extra]
self.asts = AgentStats()
# test some
# self.testNcm()
return
# 在reconnect策略允许的范围内
def canReconnect(self):
if self.reconnect_retry_times <= self.RECONN_MAX_RETRY_TIMES:
return True
return False
def inReconnect(self):
if self.reconnect_retry_times > 0:
return True
return False
def tryReconnect(self, slot):
self.queueShot(self.RECONN_WAIT_TIMEOUT, self._tryReconnectImpl, slot)
return
def _tryReconnectImpl(self, slot):
if not self.canReconnect():
qDebug('wtf???')
return False
# 累计状态改变
if self.reconnect_retry_times == 0:
self.reconnect_start_time = QDateTime.currentDateTime()
self.reconnect_last_time = QDateTime.currentDateTime()
self.reconnect_total_times += 1
self.reconnect_retry_times += 1
oldname = self.nam
self.nam = None
oldname.finished.disconnect()
qDebug('see this reconnect...')
# self.acj = AgentCookieJar()
self.nam = QNetworkAccessManager()
self.nam.finished.connect(self.onReply, Qt.QueuedConnection)
self.nam.setCookieJar(self.acj)
# self.queueShot(1234, slot)
QTimer.singleShot(1234, slot)
# QTimer.singleShot(1234, self.eventPoll)
return
def finishReconnect(self):
if not self.inReconnect():
qDebug('wtf???')
return
qDebug('reconn state: retry:%s, time=%s' %
(self.reconnect_retry_times,
self.reconnect_start_time.msecsTo(self.reconnect_last_time)))
self.reconnect_retry_times = 0
self.reconnect_start_time = QDateTime()
self.reconnect_last_time = QDateTime()
return
def queueShot(self, msec, slot, extra=None):
tmer = QTimer()
tmer.setInterval(msec)
tmer.setSingleShot(True)
tmer.timeout.connect(self.onQueueShotTimeout, Qt.QueuedConnection)
self.queue_shot_timers[tmer] = [slot, extra]
tmer.start()
return
def onQueueShotTimeout(self):
tmer = self.sender()
slot, extra = self.queue_shot_timers.pop(tmer)
if extra is None: slot()
else: slot(extra)
return
def testNcm(self):
def onAdded(cfg):
qDebug('ncm added:' + cfg.name())
return
def onChanged(cfg):
qDebug('ncm changed:' + cfg.name())
return
def onRemoved(cfg):
qDebug('ncm removed:' + cfg.name())
return
def onOnlineStateChanged(online):
qDebug('ncm online:' + str(online))
return
def onUpdateCompleted():
qDebug('ncm update completed')
return
# QNetworkConfigurationManager会检测好多网络信息啊
# 比如哪些无线网络可用,哪些无线网络不可用,都能显示出来,但这样也更耗资源。
self.ncm = QNetworkConfigurationManager()
self.ncm.configurationAdded.connect(onAdded)
self.ncm.configurationChanged.connect(onChanged)
self.ncm.configurationRemoved.connect(onRemoved)
# 这个触发了一个bug哈,https://bugreports.qt.io/browse/QTBUG-49048
# 不过应该fix了,看到代码加了个if (session) { the warning },fix链接在上面bug链接中有。
self.ncm.onlineStateChanged.connect(onOnlineStateChanged)
self.ncm.updateCompleted.connect(onUpdateCompleted)
return
|
[
"drswinghead@163.com"
] |
drswinghead@163.com
|
d71f616745cc995556bc100726f428cb131bcfd1
|
32efa132bd56d5a3161f0053e682f35d478ac9eb
|
/老男孩python全栈开发第14期/python基础知识(day1-day40)/configparser模块.py
|
601eccaf656d54b800f085339158746063cdf457
|
[] |
no_license
|
dengyungao/python
|
6de287aeb26861813724459f4fa37fa82813f9bb
|
d4b83fe55c6afd84ec009db235ae83c8224e7351
|
refs/heads/master
| 2020-08-05T04:25:23.843730
| 2020-02-18T16:30:21
| 2020-02-18T16:30:21
| 187,057,495
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
import configparser
import os
config = configparser.ConfigParser()
config["DEFAULT"] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9',
'ForwardX11': 'yes'
}
config['bitbucket.org'] = {'User': 'hg'}
config['topsecret.server.com'] = {'Host Port': '50022', 'ForwardX11': 'no'}
with open(os.path.dirname(__file__) + '/config/settings.ini', 'w',encoding="utf-8") as configfile:
config.write(configfile)
|
[
"18140172792@163.com"
] |
18140172792@163.com
|
fe0a2f7e3dd768ecb94a1f4db777f8f8e4963f8c
|
5fd5bf0639db4e93bac2a9122cdee9395908247c
|
/gis_4ban_1/settings/local.py
|
285faf8b46e6d38a43837bc3ed4be570b331d1ee
|
[] |
no_license
|
haki-land/gis_4ban_1
|
23637c205673e08afed8099bf494312d0415236a
|
80f6d6536740dfe36b0ca76be9bc7648545d8b55
|
refs/heads/master
| 2023-08-11T04:47:38.451633
| 2021-10-05T03:17:43
| 2021-10-05T03:17:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
from .base import *
env_list = dict()
local_env =open(os.path.join(BASE_DIR, '.env'), encoding='utf-8') # 운영체제 상 경로(path) / join 합쳐준다 BASE_DIR, '.env'
while True:
line = local_env.readline() ##한줄씩 읽다가 없으면 나온다 break
if not line:
break
line = line.replace('\n', '')
start = line.find('=') ## SECRET_KEY=django-insecu = 로 좌 -key / 우 -value
key = line[:start]
value = line[start+1:]
env_list[key] = value ## key, value 나눈걸 딕셔러리에 추가
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env_list['SECRET_KEY'] ##env_list 딕셔러리 만들어야한다
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"] #"*" 모두 허용한다
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
|
[
"zoneofgod@gmail.com"
] |
zoneofgod@gmail.com
|
556e6ee4972beb6872c63bf608a925dd397c0d33
|
ab0cc0cb2bf34b24d3aa43a7eacd217a7e33f322
|
/profiles_project/settings.py
|
7863c452915caf4882a272afff391369c5c552be
|
[
"MIT"
] |
permissive
|
MarcelIrawan/Profiles-REST-API
|
f4e81cb81ea28e7869cd77787ec868bf18aec2e7
|
1d5c22cad1789459567a9fd3234c48a8552fe816
|
refs/heads/master
| 2022-12-12T02:54:45.306573
| 2020-09-02T03:06:45
| 2020-09-02T03:06:45
| 290,787,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,365
|
py
|
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vr@q&e(dn$l0q#^g%im&an*9$=8&7ms8xrjc4khezbm-&msbns'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
ALLOWED_HOSTS = [
'ec2-3-129-9-187.us-east-2.compute.amazonaws.com',
'127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
|
[
"50488473+MarcelIrawan@users.noreply.github.com"
] |
50488473+MarcelIrawan@users.noreply.github.com
|
91e65577a10c06e5542cd09c5fb38bde70ed2a09
|
24d2032596bcb4dcebeec4a448d5f8d0fa343f1a
|
/Photo_detection.py
|
bdfd82f566e2a250c6d4609e34fa5ecda64d51c0
|
[] |
no_license
|
sanath-kumar364/Object-Detection
|
fe392d5593c11047b006aaab80d03e4c4c407430
|
74e03bec9419060fcd10dc2b36ba4f28d9363d8a
|
refs/heads/main
| 2023-07-16T07:15:00.105713
| 2021-08-15T06:57:37
| 2021-08-15T06:57:37
| 396,250,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import cv2
thres=0.5
img=cv2.imread('car.png')
classNames=[]
classFile= 'coco_names'
with open(classFile, 'rt') as f:
classNames=f.read().rstrip('\n').split('\n')
configPath='ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath= 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320,320)
net.setInputScale(1.0/127.5)
net.setInputMean((127.5,127.5,127.5))
net.setInputSwapRB(True)
classIds,confs, bbox= net.detect(img, confThreshold=thres)
print(classIds, bbox)
if len(classIds) !=0 :
for classId, confidence, box in zip(classIds.flatten(),confs.flatten(),bbox):
cv2.rectangle(img, bbox,color=(0,255,0), thickness=2)
cv2.putText(img, classNames[classId-1].upper(), (box[0]+10,box[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)
cv2.putText(img, str(round(confidence*100,2)), (box[0]+200,box[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)
cv2.imshow("Output",img)
cv2.waitKey(0)
|
[
"noreply@github.com"
] |
sanath-kumar364.noreply@github.com
|
c2a7c6d6fb4d162c42fa162175195ee56f5a70cf
|
cedfdd1398b947b15eccf4473e9bbaddccb878d7
|
/SDK/openstack/tests/functional/compute/v2/test_server.py
|
6712375a7f77b2868cffb1ade69917ad0ebc78a9
|
[] |
no_license
|
Doctor-DC/CMP-Recycle
|
36fb1fdcf7c3a396bfef89d03948bd0ce626b053
|
e3e6421f0b5dc28a075bc5bf91be9a45bcbe97c6
|
refs/heads/dev
| 2022-12-15T06:28:12.695868
| 2019-02-26T06:22:21
| 2019-02-26T06:22:21
| 142,127,512
| 0
| 0
| null | 2022-12-08T02:29:44
| 2018-07-24T08:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,295
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from SDK.openstack.compute.v2 import server
from SDK.openstack.tests.functional import base
from SDK.openstack.tests.functional.compute import base as ft_base
from SDK.openstack.tests.functional.network.v2 import test_network
class TestServer(ft_base.BaseComputeTest):
def setUp(self):
super(TestServer, self).setUp()
self.NAME = self.getUniqueString()
self.server = None
self.network = None
self.subnet = None
self.cidr = '10.99.99.0/16'
flavor = self.conn.compute.find_flavor(base.FLAVOR_NAME,
ignore_missing=False)
image = self.conn.compute.find_image(base.IMAGE_NAME,
ignore_missing=False)
self.network, self.subnet = test_network.create_network(
self.conn,
self.NAME,
self.cidr)
self.assertIsNotNone(self.network)
sot = self.conn.compute.create_server(
name=self.NAME, flavor_id=flavor.id, image_id=image.id,
networks=[{"uuid": self.network.id}])
self.conn.compute.wait_for_server(sot, wait=self._wait_for_timeout)
assert isinstance(sot, server.Server)
self.assertEqual(self.NAME, sot.name)
self.server = sot
def tearDown(self):
sot = self.conn.compute.delete_server(self.server.id)
self.assertIsNone(sot)
# Need to wait for the stack to go away before network delete
self.conn.compute.wait_for_delete(self.server,
wait=self._wait_for_timeout)
test_network.delete_network(self.conn, self.network, self.subnet)
super(TestServer, self).tearDown()
def test_find(self):
sot = self.conn.compute.find_server(self.NAME)
self.assertEqual(self.server.id, sot.id)
def test_get(self):
sot = self.conn.compute.get_server(self.server.id)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.server.id, sot.id)
def test_list(self):
names = [o.name for o in self.conn.compute.servers()]
self.assertIn(self.NAME, names)
def test_server_metadata(self):
test_server = self.conn.compute.get_server(self.server.id)
# get metadata
test_server = self.conn.compute.get_server_metadata(test_server)
self.assertFalse(test_server.metadata)
# set no metadata
self.conn.compute.set_server_metadata(test_server)
test_server = self.conn.compute.get_server_metadata(test_server)
self.assertFalse(test_server.metadata)
# set empty metadata
self.conn.compute.set_server_metadata(test_server, k0='')
server = self.conn.compute.get_server_metadata(test_server)
self.assertTrue(server.metadata)
# set metadata
self.conn.compute.set_server_metadata(test_server, k1='v1')
test_server = self.conn.compute.get_server_metadata(test_server)
self.assertTrue(test_server.metadata)
self.assertEqual(2, len(test_server.metadata))
self.assertIn('k0', test_server.metadata)
self.assertEqual('', test_server.metadata['k0'])
self.assertIn('k1', test_server.metadata)
self.assertEqual('v1', test_server.metadata['k1'])
# set more metadata
self.conn.compute.set_server_metadata(test_server, k2='v2')
test_server = self.conn.compute.get_server_metadata(test_server)
self.assertTrue(test_server.metadata)
self.assertEqual(3, len(test_server.metadata))
self.assertIn('k0', test_server.metadata)
self.assertEqual('', test_server.metadata['k0'])
self.assertIn('k1', test_server.metadata)
self.assertEqual('v1', test_server.metadata['k1'])
self.assertIn('k2', test_server.metadata)
self.assertEqual('v2', test_server.metadata['k2'])
# update metadata
self.conn.compute.set_server_metadata(test_server, k1='v1.1')
test_server = self.conn.compute.get_server_metadata(test_server)
self.assertTrue(test_server.metadata)
self.assertEqual(3, len(test_server.metadata))
self.assertIn('k0', test_server.metadata)
self.assertEqual('', test_server.metadata['k0'])
self.assertIn('k1', test_server.metadata)
self.assertEqual('v1.1', test_server.metadata['k1'])
self.assertIn('k2', test_server.metadata)
self.assertEqual('v2', test_server.metadata['k2'])
# delete metadata
self.conn.compute.delete_server_metadata(
test_server, test_server.metadata.keys())
test_server = self.conn.compute.get_server_metadata(test_server)
self.assertFalse(test_server.metadata)
|
[
"8920622362@qq.com"
] |
8920622362@qq.com
|
e6bb2639249198f8f174f101075567a90774bae1
|
b414757d40c62f50febfd721af01dd4316484a53
|
/m-reftemplate.py
|
357416336e712ba6f0963965c5a451ebde69fcc7
|
[] |
no_license
|
masti01/pcms
|
60e21b7bd469e4beef7f9ecfb9198b224399e240
|
43506a9cee9fa49e4119238b2f433a3c9279e277
|
refs/heads/master
| 2021-09-25T16:38:45.683536
| 2021-09-25T09:54:01
| 2021-09-25T09:54:01
| 73,956,313
| 4
| 1
| null | 2019-03-25T16:25:27
| 2016-11-16T20:02:25
|
Python
|
UTF-8
|
Python
| false
| false
| 10,079
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is a bot to remove template {{Przypisy}} if no reference in article present
Call:
python pwb.py masti/m-reftemplate.py -transcludes:Przypisy -outpage:"Wikipedysta:mastiBot/refTemplate" -maxlines:10000 -summary:"Bot usuwa zbędny szablon {{s|Przypisy}}"
Use global -simulate option for test purposes. No changes to live wiki
will be done.
The following parameters are supported:
¶ms;
-always If used, the bot won't ask if it should file the message
onto user talk page.
-text: Use this text to be added; otherwise 'Test' is used
-replace: Dont add text but replace it
-top Place additional text on top of the page
-summary: Set the action summary message for the edit.
-outpage Results page; otherwise "Wikipedysta:mastiBot/test" is used
-maxlines Max number of entries before new subpage is created; default 1000
"""
#
# (C) Pywikibot team, 2006-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: c1795dd2fb2de670c0b4bddb289ea9d13b1e9b3f $'
#
import pywikibot
from pywikibot import pagegenerators
from pywikibot.bot import (
SingleSiteBot, ExistingPageBot, NoRedirectPageBot, AutomaticTWSummaryBot)
from pywikibot.tools import issue_deprecation_warning
import re
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
refTemplates = [
u'<ref',
u'{{r',
u'{{odn',
u'{{Odn',
u'{{uwaga',
u'{{okres geologiczny infobox',
u'{{zwierzę infobox',
u'{{hetmani wielcy litewscy',
u'{{przesilenia',
u'{{równonoce',
u'{{wartość odżywcza',
u'{{ziemia-śnieżka',
u'{{związki cywilne osób tej samej płci',
u'{{rynek alternatywnych przeglądarek internetowych',
u'{{linia czasu modeli iphone',
u'{{ostatnie stabilne wydanie/gnome',
u'{{ostatnie stabilne wydanie/kde',
u'{{ostatnie testowe wydanie/kde',
u'{{ostatnie stabilne wydanie/konqueror',
u'{{otatnie stabilne wydanie/mirc',
u'{{pubchem',
]
referencesT = [
u'<references/>',
u'{{przypisy',
u'{{przypisy-lista',
u'{{mini przypisy',
u'{{uwagi',
u'{{uwagi-lista',
]
class BasicBot(
# Refer pywikobot.bot for generic bot classes
SingleSiteBot, # A bot only working on one site
# CurrentPageBot, # Sets 'current_page'. Process it in treat_page method.
# # Not needed here because we have subclasses
ExistingPageBot, # CurrentPageBot which only treats existing pages
NoRedirectPageBot, # CurrentPageBot which only treats non-redirects
AutomaticTWSummaryBot, # Automatically defines summary; needs summary_key
):
"""
An incomplete sample bot.
@ivar summary_key: Edit summary message key. The message that should be used
is placed on /i18n subdirectory. The file containing these messages
should have the same name as the caller script (i.e. basic.py in this
case). Use summary_key to set a default edit summary message.
@type summary_key: str
"""
summary_key = 'basic-changing'
def __init__(self, generator, **kwargs):
"""
Constructor.
@param generator: the page generator that determines on which pages
to work
@type generator: generator
"""
# Add your own options to the bot and set their defaults
# -always option is predefined by BaseBot class
self.availableOptions.update({
'replace': False, # delete old text and write the new text
'summary': None, # your own bot summary
'text': 'Test', # add this text from option. 'Test' is default
'top': False, # append text on top of the page
'test': False, #switch on test functionality
'outpage': u'User:mastiBot/test', #default output page
'maxlines': 1000, #default number of entries per page
'negative': False, #if True negate behavior i.e. mark pages that DO NOT contain search string
'restart': False, #if restarting do not clean summary page
})
# call constructor of the super class
super(BasicBot, self).__init__(site=True, **kwargs)
# handle old -dry paramter
self._handle_dry_param(**kwargs)
# assign the generator to the bot
self.generator = generator
def _handle_dry_param(self, **kwargs):
"""
Read the dry parameter and set the simulate variable instead.
This is a private method. It prints a deprecation warning for old
-dry paramter and sets the global simulate variable and informs
the user about this setting.
The constuctor of the super class ignores it because it is not
part of self.availableOptions.
@note: You should ommit this method in your own application.
@keyword dry: deprecated option to prevent changes on live wiki.
Use -simulate instead.
@type dry: bool
"""
if 'dry' in kwargs:
issue_deprecation_warning('dry argument',
'pywikibot.config.simulate', 1)
# use simulate variable instead
pywikibot.config.simulate = True
pywikibot.output('config.simulate was set to True')
def run(self):
counter = 1
onPageCount = 0
marked = 0
try:
if self.getOption('restart'):
self.saveProgress(self.getOption('outpage'), counter, marked, '', init=False, restart=True)
else:
self.saveProgress(self.getOption('outpage'), counter, marked, '', init=True, restart=False)
for page in self.generator:
pywikibot.output(u'Processing #%i (%i marked):%s' % (counter, marked, page.title(asLink=True)))
counter += 1
onPageCount += 1
if onPageCount >= int(self.getOption('maxlines')):
self.saveProgress(self.getOption('outpage'), counter-1, marked, page.title(asLink=True))
onPageCount = 0
if self.treat(page):
marked += 1
finally:
self.saveProgress(self.getOption('outpage'), counter, marked, page.title(asLink=True))
pywikibot.output(u'Processed: %i, Orphans:%i' % (counter,marked))
def treat(self, page):
"""Load the given page, do some changes, and save it."""
text = page.text
found = False
refActionNeeded = False
# TODO
# set of templates: {{Przypisy}}, {{Przypisy-lista}}, {{Mini przypisy}}, {{Uwagi}} lub {{Uwagi-lista}}, <references/>
for r in referencesT:
if r in: text.lower():
if self.getOption('test'):
pywikibot.output(u'reference template found:%s' % r)
refActionNeeded = True
if not refActionNeeded:
return(False)
for t in refTemplates:
if t in text.lower():
found = True
if not found:
page.text = re.sub(ur'\n\{\{przypisy.*?\}\}', u'', text, re.I)
if self.getOption('test'):
pywikibot.input('Waiting...')
pywikibot.output(page.text)
# if summary option is None, it takes the default i18n summary from
# i18n subdirectory with summary_key as summary key.
page.save(summary=self.getOption('summary'))
return(not found)
def saveProgress(self, pagename, counter, marked, lastPage, init=False, restart=False):
"""
log run progress
"""
outpage = pywikibot.Page(pywikibot.Site(), pagename)
if init:
outpage.text = u'Process started: ~~~~~'
elif restart:
outpage.text += u'\n:#Process restarted: ~~~~~'
else:
outpage.text += u'\n#' +str(counter) + u'#' + str(marked) + u' – ' + lastPage + u' – ~~~~~'
outpage.save(summary=u'Bot aktualizuje postęp prac (' + str(counter) + u'#' + str(marked) + u')')
return
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
# Process global arguments to determine desired site
local_args = pywikibot.handle_args(args)
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
genFactory = pagegenerators.GeneratorFactory()
# Parse command line arguments
for arg in local_args:
# Catch the pagegenerators options
if genFactory.handleArg(arg):
continue # nothing to do here
# Now pick up your own options
arg, sep, value = arg.partition(':')
option = arg[1:]
if option in ('summary', 'text', 'outpage', 'maxlines'):
if not value:
pywikibot.input('Please enter a value for ' + arg)
options[option] = value
# take the remaining options as booleans.
# You will get a hint if they aren't pre-definded in your bot class
else:
options[option] = True
gen = genFactory.getCombinedGenerator()
if gen:
# The preloading generator is responsible for downloading multiple
# pages from the wiki simultaneously.
gen = pagegenerators.PreloadingGenerator(gen)
# pass generator and private options to the bot
bot = BasicBot(gen, **options)
bot.run() # guess what it does
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == '__main__':
main()
|
[
"mastigm+git@gmail.com"
] |
mastigm+git@gmail.com
|
a7266b63b4e8699dce12d35488b80d449c2de1ea
|
12c6b9f4bacb735ade3d8e51e6befa7f15a85d26
|
/scripts/demo/mutliapp.py
|
ce173080f49932e0c699510d1fa221640219445e
|
[] |
no_license
|
fkanvaly/image_registration
|
fbec6018f1f61aa68e5e28585390220736972a4e
|
220193c074ed6ca3b293b523fbe257553552053f
|
refs/heads/main
| 2023-07-12T08:02:51.862138
| 2021-08-22T12:00:43
| 2021-08-22T12:00:43
| 377,304,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
import streamlit as st
class MultiApp:
"""Framework for combining multiple streamlit applications.
Usage:
def foo():
st.title("Hello Foo")
def bar():
st.title("Hello Bar")
app = MultiApp()
app.add_app("Foo", foo)
app.add_app("Bar", bar)
app.run()
It is also possible keep each application in a separate file.
import foo
import bar
app = MultiApp()
app.add_app("Foo", foo.app)
app.add_app("Bar", bar.app)
app.run()
"""
def __init__(self):
self.apps = []
def add_app(self, title, func):
"""Adds a new application.
Parameters
----------
func:
the python function to render this app.
title:
title of the app. Appears in the dropdown in the sidebar.
"""
self.apps.append({
"title": title,
"function": func
})
def run(self):
st.sidebar.title('Navigation')
app = st.sidebar.radio(
'Go To',
self.apps,
format_func=lambda app: app['title'])
app['function']()
|
[
"123azeDSQ"
] |
123azeDSQ
|
053f711b6ae47aea5d315e48b27d56fba7464f6d
|
59287fcc4ce0c0fdcfabc73f1fb924265845389a
|
/a_intro/progs/a.py
|
8cf9aa7d88ee69fe485bcd67cfb91615ac07796c
|
[] |
no_license
|
ebuonocore/NSIT_17_Calculabilite
|
787a9579b801727f005000166162a209fc16ace7
|
2450fd4f7ef2694a73ddbe36ea6549f76e2e862c
|
refs/heads/main
| 2023-05-06T04:33:34.509720
| 2021-05-20T21:54:32
| 2021-05-20T21:54:32
| 368,262,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 11:50:29 2019
Dessine des carrés imbriqués en impératif
@author: Eric Buonocore
"""
from turtle import *
import math
# importation de la bibliothèque turtle
def pen_style(niveau_max, niveau):
""" Modifie la taille et la couleur du stylo en fonction du niveau max et du niveau actuel
"""
m = math.sqrt(2)**niveau
dim = int(10 / m)
pensize(dim)
r = (niveau_max- niveau)/niveau_max
v = 0.1
b = niveau/niveau_max
col = (r, v, b)
pencolor(col)
# Phase d'initialisation
a = 200 #Taille du côté d'origine
n = 2 # Profondeur maximale
c = math.sqrt(2) # Coefficient de réduction à chaque étape
colormode(1)
clearscreen()
up()
goto(-a,a)
pensize(2)
down()
speed(0)
# Abaisse le crayon pour pouvoir laisser une trace
#Corps du programme
# Descente
for i in range(n):
pen_style(n,i)
forward (a/(c**i))
right(45)
# Annule le dernier virage en trop
left(45)
for i in range(n-1, -1, -1):
pen_style(n,i)
forward(a/(c**(i))) # termine le demi-segment entamé
# Se présente dans le bon sens pour finir les 3 derniers côtés du carré
right(90)
for r in range (3):
pen_style(n,i)
forward (2*a/(c**i))
right(90)
left(45)
exitonclick()
# Ferme la fenêtre générée par turtle
mainloop()
|
[
"noreply@github.com"
] |
ebuonocore.noreply@github.com
|
6c6d324c7a9eea2c04da96f957aa1bd7d49f4170
|
eda52deedb0484146497c376c16e1b4c084e2e37
|
/Object_detection_main.py
|
49abf8faef8133fe0d867499a4435f3d7b1fb6e5
|
[
"MIT"
] |
permissive
|
sriragjayakumar/Table-column-detection
|
1d4cf952e0107abddd4bbd22101a0774b52e4a26
|
9542253525cf40bf1af8517ccaffb4a68a0b91c0
|
refs/heads/main
| 2023-03-18T11:43:08.319508
| 2021-03-18T14:45:53
| 2021-03-18T14:45:53
| 348,051,072
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,471
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 18 22:41:12 2021
@author: srirag
"""
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
from utils import label_map_util
from utils import visualization_utils as vis_util
def table_det(img_name,line_thickness=8,min_score_thresh=0.60):
sys.path.append("..")
CWD_PATH = os.getcwd()
Model_Folder = 'inference_graph'
PATH_TO_CKPT = os.path.join(CWD_PATH,Model_Folder,'frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
PATH_TO_IMAGE = os.path.join(CWD_PATH,img_name)
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=1, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image = cv2.imread(PATH_TO_IMAGE)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_expanded = np.expand_dims(image_rgb, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=line_thickness,
min_score_thresh=min_score_thresh)
return image
if __name__=='__main__':
input_img='tab1.jpg'
detected_image=table_det(input_img)
cv2.imwrite('Output.jpg',detected_image)
|
[
"sriragjayakumar@gmail.com"
] |
sriragjayakumar@gmail.com
|
cdf7745843d590141c77de2fc6e44db8ed1d145e
|
088d42e04689db579cd5d9e0d600ae4ebafd85ec
|
/Hackerrank.py
|
811d61b60b791838dc12326affe474ac7058040b
|
[] |
no_license
|
amankumar94/coding_practice
|
0c039f46330829dd21a5a1bfa9e9a164419be0bc
|
b1b5865f6272513471040db15f74e430e3b163fe
|
refs/heads/master
| 2022-12-08T12:47:58.136754
| 2020-09-06T00:13:13
| 2020-09-06T00:13:13
| 292,985,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
from itertools import permutations
def getTotalX(a, b):
# Write your code here
list_of_nums = []
init_num = max(a)
max_num = min(b)
while init_num <= max_num:
a_has_factors = 0
num_is_factor = 0
for element in a:
if init_num % element == 0:
a_has_factors += 1
for element in b:
if element % init_num == 0:
num_is_factor += 1
if a_has_factors == len(a) and num_is_factor == len(b):
list_of_nums.append(init_num)
init_num += 1
return len(list_of_nums)
a = [2, 4]
b = [16, 32, 96]
# print(getTotalX(a, b))
def birthday(s, d, m):
# s -> array
# d-> birthday date
# m -> birthday month -> number of consecutive pieces she wants to give him
pos = 0
if d in s:
pos += 1
for i in range(len(s) - m):
temp = 0
for j in range(m):
temp += s[j]
if temp == s[i]:
pos += 1
return pos
# print(birthday([4], 4, 1))
def migratoryBirds(arr):
count_dict ={}
for num in arr:
if num in count_dict.keys():
count_dict[num] += 1
else:
count_dict[num] = 1
max_bird = 0
max_freq = 0
for key in count_dict.keys():
if count_dict[key] >= max_freq and key<max_bird:
max_bird = key
max_freq = count_dict[key]
elif count_dict[key] > max_freq:
max_bird = key
max_freq= count_dict[key]
return max_bird
print(migratoryBirds([1,2,3,4,5,4,3,2,1,3,4]))
def dayOfProgrammer(year):
isleapyear = False
if year < 1918:
if (year % 4 == 0):
isleapyear = True
if isleapyear == False:
return '13.09.' + str(year)
else:
return '12.09.' + str(year)
elif year > 1918:
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
isleapyear = True
if isleapyear == False:
return '13.09.' + str(year)
else:
return '12.09.' + str(year)
else:
return '12.09.1984'
print(dayOfProgrammer(1800))
#magic square cost reduction calculation
X = []
X.extend(list(map(int, '4 9 2'.split())))
X.extend(list(map(int, '3 5 7'.split())))
X.extend(list(map(int, '8 1 5'.split())))
print(X)
Ans = 81
for P in permutations(range(1, 10)):
if sum(P[0:3]) == 15 and sum(P[3:6]) == 15 and sum(P[0::3]) == 15 and sum(P[1::3]) == 15 and P[0] + P[4] + P[8] == 15 and (P[2] + P[4] + P[6] == 15):
print(P)
Ans = min(Ans, sum(abs(P[i] - X[i]) for i in range(0, 9)))
print(Ans)
def reverseWords(s):
# temp_lst = []
# temp_str = ""
# for letter in s:
# if s != '\s':
# temp_str += letter
# print(temp_str)
# elif s== " ":
# print(temp_lst)
# temp_lst.append(temp_str)
# temp_lst.append(" ")
# temp_str = ""
# temp_lst = temp_lst[::-1]
# new_str = "".join(temp_lst)
# print(new_str.split())
s = "".join(s)
s = s.split(" ")
s = s[::-1]
s = " ".join(s)
s = [letter for letter in s]
print(s)
s= ["t","h","e"," ","s","k","y"," ","i","s"," ","b","l","u","e"]
reverseWords(s)
|
[
"aman1994.srm@gmail.com"
] |
aman1994.srm@gmail.com
|
2ec5d2b57a549231c503b53bad41ba084eaa5180
|
bb493e881b1efb5966b048dabb3330861fc2348a
|
/Problem2_Even_Fibonaci_Nums.py
|
e89ea2a2ca8010b5af4dd39b709863fb26c92a17
|
[] |
no_license
|
sharon-ruane/coding_practice
|
1d13f669a2c218602a93e2c8a7991096465516a0
|
7f05098b48e5ef6915e148132ea8a7c6a7756063
|
refs/heads/master
| 2020-06-03T14:09:32.564956
| 2019-06-12T19:49:21
| 2019-06-12T19:49:21
| 191,598,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
# Problem2
# Even Fibonacci numbers
# Each new term in the Fibonacci sequence is generated by adding the previous two terms.
# By starting with 1 and 2, the first 10 terms will be:
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
# Considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
def even_fibonachi_sum(limit):
seq = [0, 1, 1, 2] # prob shouldn't have to hard code this
x = 1
while x < limit:
# print(seq[-2::1])
seq.append(sum(seq[-2::1]))
x = seq[-1]
even = [a for a in seq if a % 2 == 0]
return sum(even)
# print(even_fibonachi_sum(100))
def even_fibonachi_sum2(limit):
seq = []
len = 0
num = 0
while num < limit:
if len <= 1:
seq.append(len)
else:
seq.append(sum(seq[-2::1]))
len = len+1
num = seq[-1]
print(seq)
even = [a for a in seq if a % 2 == 0 and a < limit]
# when is best to remove the last function?
return sum(even)
print(even_fibonachi_sum2(100))
#
#
# can so recursively - this is a bit trippy, and I think it's slower...
def gen_seq(length):
if(length <= 1):
return length
else:
return gen_seq(length-1) + gen_seq(length-2)
def even_fibonachi_sum_rec(limit):
seq = []
len = 0
num = 0
while num < limit:
seq.append(gen_seq(len))
len = len + 1
num = seq[-1]
print(seq)
even = [a for a in seq if a % 2 == 0 and a < limit]
return sum(even)
print(even_fibonachi_sum_rec(100))
|
[
"sharon.ruane@gmail.com"
] |
sharon.ruane@gmail.com
|
86b6b59da4ff0bd5cd94ad890b04687008b97e5a
|
136de4786d4a90d75aa168bb8d26ebc85e017366
|
/app.py
|
12419ddf217f8a6973bd75de494f811f94c230f5
|
[] |
no_license
|
aaravpandya/topic-model-article
|
69a29353e5ffc3cd38f01050e6c50526012048ff
|
867a68b3a1a764daca94169246a9548023d5d5bc
|
refs/heads/master
| 2020-05-18T17:26:19.924308
| 2019-05-02T07:50:12
| 2019-05-02T07:50:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
from flask import Flask, request, Response, send_file, render_template, flash, redirect, url_for, jsonify
import io
from tempfile import NamedTemporaryFile
import nltk
import numpy as np
import pandas as pd
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
from nltk.corpus import wordnet
nltk.download('wordnet')
stemmer = SnowballStemmer('english')
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize_stemming(token))
return result
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/findtopic', methods=['POST'])
def findtopic():
text = request.form['text']
data = pd.read_csv('abcnews-date-text.csv', error_bad_lines=False)
data = data.iloc[0:5000,0:].values
data=pd.DataFrame(data,columns=["publish_date","headline_text"])
data_text = data['headline_text']
data_text['index'] = data_text.index
documents = data_text
data_text = data_text.drop(data_text.index[len(data_text)-1])
documents = documents.drop(documents.index[len(documents)-1])
doc_sample = documents[documents.index[40]]
print('original document: ')
words = []
for word in doc_sample.split(' '):
words.append(word)
documents=pd.DataFrame(documents)
processed_docs = documents['headline_text'].map(preprocess)
print(processed_docs[:10])
dictionary = gensim.corpora.Dictionary(processed_docs)
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
a = preprocess(text)
print (a)
other_corpus = [dictionary.doc2bow(a)]
unseen_doc = other_corpus[0]
print(unseen_doc)
#dictionary = dictionary.filter_extremes(no_below=15, no_above=0.5)
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
bow_corpus[40]
bow_doc_40 = bow_corpus[40]
for i in range(len(bow_doc_40)):
print("Word {} (\"{}\") appears {} time.".format(bow_doc_40[i][0],
dictionary[bow_doc_40[i][0]],
bow_doc_40[i][1]))
lda_model = gensim.models.ldamodel.LdaModel(bow_corpus, num_topics=1, id2word=dictionary)
vector = lda_model[unseen_doc]
print("new text")
output = ""
for index, score in sorted(vector,key=lambda tup: -1*tup[1]):
output = output + ("Score:{}, \n Topic: {}".format(score, lda_model.print_topic(index,10)))
return output
if __name__ == '__main__':
#app.run(debug=True)
app.run(host='0.0.0.0')
# app.run(debug=True)
|
[
"shiwang.romi@gmail.com"
] |
shiwang.romi@gmail.com
|
d1602560d544b4ced553267b4b55e496e58e3b3b
|
e4f45c96508207366ec192041404e5e0d21fe489
|
/ECOL565/tree_calculations.py
|
6b6149bed844b3b37c894c26e01035a067fce3a9
|
[] |
no_license
|
asherkhb/cousework
|
742c05cbb3a03c92ce02291f1becc3c5502e73c3
|
36df2bde2fd7cf68b805b4e5f874f71b1fc7bd2c
|
refs/heads/master
| 2020-05-24T15:50:08.971640
| 2015-11-09T22:30:34
| 2015-11-09T22:30:34
| 35,519,330
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
__author__ = 'asherkhb'
# tree_calculations.py
# Calculates tree metrics for two trees fed as arguments
#
# Usage: python tree_calcuations.py <tree1> <tree2>
#
# Dependencies: DendroPy
from sys import argv
import dendropy
tree1_file = argv[1]
tree2_file = argv[2]
tree1 = dendropy.Tree.get_from_path(tree1_file, 'newick')
tree2 = dendropy.Tree.get_from_path(tree2_file, 'newick')
sym_diff = tree1.symmetric_difference(tree2)
#sym_diff = dendropy.treecalc.symmetric_difference(tree1, tree2)
pos_neg = tree1.false_positives_and_negatives(tree2)
#pos_neg = dendropy.treecalc.false_positives_and_negatives(tree1, tree2)
euc_dist = tree1.euclidean_distance(tree2)
#euc_dist = dendropy.treecalc.euclidean_distance(tree1, tree2)
rob_fol = tree1.robinson_foulds_distance(tree2)
#rob_fol = dendropy.treecalc.robinson_foulds_distance(tree1, tree2)
print("Tree Distances")
print("- Tree 1: %s" % tree1_file)
print("- Tree 2: %s" % tree2_file)
print('Symmetric Distance: %s' % str(sym_diff))
print('False Positives and Negatives: %s' % str(pos_neg))
print('Euclidean Distance: %s' % str(euc_dist))
print('Robinson_Foulds_Distance: %s' % str(rob_fol))
|
[
"asherkhb@gmail.com"
] |
asherkhb@gmail.com
|
250cab1ddcefe33e4ec725b9a411629b96eadbab
|
39d4504ec1da8975fac526d6801b94f4348b6b61
|
/research/object_detection/utils/visualization_utils.py
|
4a6f3295590def33574e84e94fed587f31809eb9
|
[
"Apache-2.0"
] |
permissive
|
vincentcheny/models
|
fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
refs/heads/master
| 2020-07-23T21:38:24.559521
| 2019-11-15T07:50:11
| 2019-11-15T07:50:11
| 207,712,649
| 1
| 0
|
Apache-2.0
| 2019-09-11T03:12:31
| 2019-09-11T03:12:31
| null |
UTF-8
|
Python
| false
| false
| 44,714
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index, include_masks=False,
include_keypoints=False, include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4-6]: masks (optional)
[4-6]: keypoints (optional)
[4-6]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be:
image - uint8 numpy array with shape (img_height, img_width, 3).
boxes - a numpy array of shape [N, 4].
classes - a numpy array of shape [N].
scores - a numpy array of shape [N] or None.
-- Optional positional arguments --
instance_masks - a numpy array of shape [N, image_height, image_width].
keypoints - a numpy array of shape [N, num_keypoints, 2].
track_ids - a numpy array of shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if key != input_data_fields.original_image:
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_with_detections_list.append(
tf.concat([images_with_detections, images_with_groundtruth], axis=2))
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[
(prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image'):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
summary_name_prefix: A string prefix for each image summary.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4),
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]])
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(
get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right'):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(
eval_dict, self._category_index, self._max_boxes_to_draw,
self._min_score_thresh, self._use_normalized_coordinates)
|
[
"1155107977@link.cuhk.edu.hk"
] |
1155107977@link.cuhk.edu.hk
|
ad2689bc3a1ef3272dfa263f7f741b1dd5782703
|
74289c7af7e014aeea6cc76c7fffc8675dca89ab
|
/dashboard/migrations/0002_auto_20201203_1929.py
|
fd7859e4673b73367ebb41f2bf9e026ef2e67d1c
|
[
"Apache-2.0"
] |
permissive
|
Kgermando/e-s
|
f8f7f569205ad20b48fcc4ea27fd9de668a28ad9
|
249ada84c63ffe99a71c1fbb301c533b9f5a3869
|
refs/heads/main
| 2023-03-19T05:34:55.115746
| 2021-03-15T23:12:53
| 2021-03-15T23:12:53
| 310,040,022
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
# Generated by Django 3.1.2 on 2020-12-03 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='forms_artisans',
name='created',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='forms_artisans',
name='date',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='forms_consultant',
name='created',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='forms_consultant',
name='date',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='forms_entreprise',
name='created',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='forms_entreprise',
name='date',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='forms_investisseur',
name='created',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='forms_partenaire',
name='created',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='opportunite',
name='created',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"katakugermain@gmail.com"
] |
katakugermain@gmail.com
|
abec0b0ab2b3d432728609d79e7973892253d855
|
b7851ffc689990a5c394697b1d016ba34307630c
|
/venv/lib/python3.8/site-packages/django/contrib/postgres/constraints.py
|
1f4e2617ffcafa0c00abd00ab0dea4d74e29e34a
|
[] |
no_license
|
denokenya/django-schooling-rest-api
|
f38fb5cc31a6f40462f9cb1dcc6c3fd36e1301c6
|
552b98d5494344049541df615f446713cb5da1fa
|
refs/heads/main
| 2023-06-14T12:53:11.897887
| 2021-07-10T18:02:11
| 2021-07-10T18:02:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,916
|
py
|
from django.db.backends.ddl_references import Statement, Table
from django.db.models import Deferrable, F, Q
from django.db.models.constraints import BaseConstraint
from django.db.models.sql import Query
__all__ = ["ExclusionConstraint"]
class ExclusionConstraint(BaseConstraint):
template = "CONSTRAINT %(name)s EXCLUDE USING %(index_type)s (%(expressions)s)%(where)s%(deferrable)s"
def __init__(
self, *, name, expressions, index_type=None, condition=None, deferrable=None
):
if index_type and index_type.lower() not in {"gist", "spgist"}:
raise ValueError(
"Exclusion constraints only support GiST or SP-GiST indexes."
)
if not expressions:
raise ValueError(
"At least one expression is required to define an exclusion "
"constraint."
)
if not all(
isinstance(expr, (list, tuple)) and len(expr) == 2 for expr in expressions
):
raise ValueError("The expressions must be a list of 2-tuples.")
if not isinstance(condition, (type(None), Q)):
raise ValueError("ExclusionConstraint.condition must be a Q instance.")
if condition and deferrable:
raise ValueError("ExclusionConstraint with conditions cannot be deferred.")
if not isinstance(deferrable, (type(None), Deferrable)):
raise ValueError(
"ExclusionConstraint.deferrable must be a Deferrable instance."
)
self.expressions = expressions
self.index_type = index_type or "GIST"
self.condition = condition
self.deferrable = deferrable
super().__init__(name=name)
def _get_expression_sql(self, compiler, schema_editor, query):
expressions = []
for expression, operator in self.expressions:
if isinstance(expression, str):
expression = F(expression)
expression = expression.resolve_expression(query=query)
sql, params = compiler.compile(expression)
sql = sql % tuple(schema_editor.quote_value(p) for p in params)
expressions.append("%s WITH %s" % (sql, operator))
return expressions
def _get_condition_sql(self, compiler, schema_editor, query):
if self.condition is None:
return None
where = query.build_where(self.condition)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def constraint_sql(self, model, schema_editor):
query = Query(model, alias_cols=False)
compiler = query.get_compiler(connection=schema_editor.connection)
expressions = self._get_expression_sql(compiler, schema_editor, query)
condition = self._get_condition_sql(compiler, schema_editor, query)
return self.template % {
"name": schema_editor.quote_name(self.name),
"index_type": self.index_type,
"expressions": ", ".join(expressions),
"where": " WHERE (%s)" % condition if condition else "",
"deferrable": schema_editor._deferrable_constraint_sql(self.deferrable),
}
def create_sql(self, model, schema_editor):
return Statement(
"ALTER TABLE %(table)s ADD %(constraint)s",
table=Table(model._meta.db_table, schema_editor.quote_name),
constraint=self.constraint_sql(model, schema_editor),
)
def remove_sql(self, model, schema_editor):
return schema_editor._delete_constraint_sql(
schema_editor.sql_delete_check, model, schema_editor.quote_name(self.name)
)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
kwargs["expressions"] = self.expressions
if self.condition is not None:
kwargs["condition"] = self.condition
if self.index_type.lower() != "gist":
kwargs["index_type"] = self.index_type
if self.deferrable:
kwargs["deferrable"] = self.deferrable
return path, args, kwargs
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.name == other.name
and self.index_type == other.index_type
and self.expressions == other.expressions
and self.condition == other.condition
and self.deferrable == other.deferrable
)
return super().__eq__(other)
def __repr__(self):
return "<%s: index_type=%s, expressions=%s%s%s>" % (
self.__class__.__qualname__,
self.index_type,
self.expressions,
"" if self.condition is None else ", condition=%s" % self.condition,
"" if self.deferrable is None else ", deferrable=%s" % self.deferrable,
)
|
[
"lucasciccomy@gmail.com"
] |
lucasciccomy@gmail.com
|
6959baaf8e251598084436cb4e2e01b9d3cc408c
|
96431e6ac30ee6c584e88015a8a07c6b88672992
|
/bruteforce/1476.py
|
1e87c3429da20d85d02982e3da15caa58e152c31
|
[] |
no_license
|
HeidiHyeji/ex-python
|
69d66f289ba15c184407a2b5f5248e1ad1fb6aef
|
bcdb87e4a7d7ac1efdc4f75fc3f3a58697c843e1
|
refs/heads/master
| 2023-04-09T13:48:43.983019
| 2021-04-22T14:17:38
| 2021-04-22T14:17:38
| 110,086,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
import sys
E, S, M = map(int,sys.stdin.readline().split())#15,28,19
i = 0
while True:
tmp = 15*i+E
rs = tmp % 28 if tmp % 28 != 0 else 28
rm = tmp % 19 if tmp % 19 != 0 else 19
if rs == S and rm == M:
break
i = i+1
print(tmp)
|
[
"gogo6076@naver.com"
] |
gogo6076@naver.com
|
53dd9e2c67a9c021b3cdc7fec38a7bd1609c45fd
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_surfaces.py
|
b3a610ca69920fd9bb90387cf7f0229b274b8b1b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from xai.brain.wordbase.verbs._surface import _SURFACE
#calss header
class _SURFACES(_SURFACE, ):
def __init__(self,):
_SURFACE.__init__(self)
self.name = "SURFACES"
self.specie = 'verbs'
self.basic = "surface"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4a1093c19780bc68360d3038fe87837c29ad8616
|
abca9e32e4fb97c9433ce50720049e0a8f18d9d4
|
/qa/rpc-tests/getchaintips.py
|
6dcf5b4462eb551de671637e340e38f0cd974495
|
[
"MIT"
] |
permissive
|
nikolake/minerium
|
b0829475f24033b81b184781308dbaef1db182d1
|
aa014119a70ba4997df1ab4ab05570a0b01f1590
|
refs/heads/master
| 2022-07-18T13:33:04.536700
| 2020-05-17T19:03:20
| 2020-05-17T19:03:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Copyright (c) 2014-2020 The Minerium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import MineriumTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (MineriumTestFramework):
def run_test (self):
MineriumTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
[
"46746362+bunbunbunbunbunny@users.noreply.github.com"
] |
46746362+bunbunbunbunbunny@users.noreply.github.com
|
a500b44ee0e69ac3157847f19501d44eec615d3b
|
569da3e77e1e3675b7b1fa041ddd413a9d6e5a81
|
/scripts/label_image.py
|
852f64daad730228c0b9122a788148900df8b0bf
|
[] |
no_license
|
arun-kumark/Ionic-Android-Application
|
35b7e063beb7cf655f5ffd6a13acb25f2c028e69
|
1d462cbc96d6651b03b9751612f4f69f52a44fef
|
refs/heads/master
| 2021-09-05T05:32:01.021616
| 2018-01-24T12:30:23
| 2018-01-24T12:30:23
| 83,131,159
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,648
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
#Global list for Negative Cases
unknown = ['agata potato', 'cashew', 'honneydew melon', 'nectarine', 'spanish pear', 'asterix potato', \
'fuji apple', 'kiwi', 'onion', 'plum', 'taiti lime', 'diamond peach', 'granny smith apple', \
'orange', 'watermelon', 'broccoli']
rolls = ['rolls round', 'rolls square', 'rolls bag']
chicken_wings = ['chicken wings uncut','chicken wings cut']
chicken_legs = ['chicken legs uncut', 'chicken legs cut']
french_fries = ['french fries thick', 'french fries thin', 'french fries wavy']
chicken_nuggets = ['chicken nugget cut', 'chicken nugget uncut']
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
input_mean=0, input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels = 3,
name='png_reader')
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
name='gif_reader'))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
else:
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
if __name__ == "__main__":
file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg"
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
args = parser.parse_args()
if args.graph:
model_file = args.graph
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
graph = load_graph(model_file)
t = read_tensor_from_image_file(file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
with tf.Session(graph=graph) as sess:
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
for i in top_k:
if labels[i] in unknown:
print ("Unknown")
elif labels[i] in chicken_nuggets:
print("chicken_nuggets")
elif labels[i] in rolls:
print("rolls")
elif labels[i] in chicken_wings:
print("chicken_wings")
elif labels[i] in chicken_legs:
print("chicken_legs")
elif labels[i] in french_fries:
print("french_fries")
else:
#print(labels[i], results[i])
print(labels[i])
|
[
"noreply@github.com"
] |
arun-kumark.noreply@github.com
|
73215e472af2a62953128dacd6bc7599100f045e
|
290e4ab74219e8826044a954b9ab6ff44e3916e7
|
/src/controller/schema.py
|
a381e38c35abd67b88fb9920407eb4943352088e
|
[
"MIT"
] |
permissive
|
mehsoy/jaws
|
1ff437778fc8d94be58cd9468eafed0396e55e62
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
refs/heads/master
| 2023-02-09T02:01:16.946703
| 2021-07-17T22:40:14
| 2021-07-17T22:40:14
| 192,337,939
| 2
| 0
|
MIT
| 2023-02-02T03:36:52
| 2019-06-17T11:56:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
""".. module:: schema """
import re
from voluptuous import Schema, Invalid, Required, Any, All
from application.system.user_role import UserRole
"""Contains all schemata used in controller package
``Schema`` objects are used validate incoming JSON formatted
requests.
"""
#Validators for schemata
def Role(value):
if value in [color.name for color in UserRole]:
return value
else:
raise Invalid('Role doesnt exist.')
def Status(value):
if value == 'ACTIVE' or value == 'DEACTIVATED':
return value
else:
raise Invalid('Bad Request: Status has to be either ACTIVE or DEACTIVATED!')
def Instruction(value):
perm_pattern = re.compile('[+-=][rwx-]{0,3}')
if perm_pattern.match(value):
return value
else:
raise Invalid('Bad Request: invalid instruction syntax.')
def Tag_Type(value):
if value in ['user', 'group', 'other']:
return value
else:
raise Invalid('Bad Request: invalid instruction syntax.')
#Schemata are defined here
job_schema = Schema({
Required('workspace'): str,
Required('target'): str,
Required('a', default=False): bool,
Required('b', default=False): bool,
Required('e', default=False): bool,
Required('for_user', default=None): Any(None, str),
})
job_status_schema = Schema({
'priority': int,
'status': Status,
})
binary_status_schema = Schema({
Required('status'): Status,
})
role_schema = Schema({
Required('role'): Role,
})
patch_access_schema = Schema({
Required('tag_type'): Tag_Type,
Required('name', default=None): object,
Required('instruction'): Instruction,
})
|
[
"mehmet@soysal.eu"
] |
mehmet@soysal.eu
|
3afd25af65dd48d3153fca7882ee2e10c9f20cbc
|
7054f04b8aa78792dcc96297eea4c1e5ac86a4fa
|
/inference.py
|
6b00a2497862d2224754a71777fcede4a291c055
|
[
"MIT"
] |
permissive
|
T-C-J/shuiying
|
bba08e1a1ef16063f9613ef167069bae36eb513d
|
d47a6a3f0ae0c131117d105b58d60932e4be2538
|
refs/heads/master
| 2020-05-29T08:57:42.709669
| 2019-05-28T15:09:22
| 2019-05-28T15:09:22
| 189,045,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
"""Translate an image to another image
An example of command-line usage is:
python export_graph.py --model pretrained/apple2orange.pb \
--input input_sample.jpg \
--output output_sample.jpg \
--image_size 256
"""
import tensorflow as tf
import os
from model import CycleGAN
import utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('model', '', 'model path (.pb)')
tf.flags.DEFINE_string('input', 'input_sample.jpg', 'input image path (.jpg)')
tf.flags.DEFINE_string('output', 'output_sample.jpg', 'output image path (.jpg)')
tf.flags.DEFINE_integer('image_size', '256', 'image size, default: 256')
def inference():
graph = tf.Graph()
with graph.as_default():
with tf.gfile.FastGFile(FLAGS.input, 'rb') as f:
image_data = f.read()
input_image = tf.image.decode_jpeg(image_data, channels=3)
input_image = tf.image.resize_images(input_image, size=(FLAGS.image_size, FLAGS.image_size))
input_image = utils.convert2float(input_image)
input_image.set_shape([FLAGS.image_size, FLAGS.image_size, 3])
with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(model_file.read())
[output_image] = tf.import_graph_def(graph_def,
input_map={'input_image': input_image},
return_elements=['output_image:0'],
name='output')
with tf.Session(graph=graph) as sess:
generated = output_image.eval()
with open(FLAGS.output, 'wb') as f:
f.write(generated)
def main(unused_argv):
inference()
if __name__ == '__main__':
tf.app.run()
|
[
"995321524@qq.com"
] |
995321524@qq.com
|
ec22a55cb8bd259d0c57c2ada4fd7ef763304579
|
68772267a2125c17ff4b4b1cf7dfb94a4c97ae56
|
/gcloudspeechtotext/models/recognition_config.py
|
205aac3ca75802ee3f1d6d5728a5caadcd748959
|
[] |
no_license
|
r3versi/gcloud-speech-to-text
|
43dfa3f359bf5e207d1374d4177134b5b452b0ea
|
4c06a601f09edf58c63cd6696ade7c9034e8734a
|
refs/heads/master
| 2022-11-11T03:23:47.714652
| 2020-07-03T12:26:12
| 2020-07-03T12:26:12
| 276,894,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,229
|
py
|
# coding: utf-8
"""
Cloud Speech-to-Text API
Converts audio to text by applying powerful neural network models. <br> **PLEASE NOTE**: This API is provided by Google, beside the documentation provide below, you can find Google API documentation [here](https://cloud.google.com/speech-to-text/docs/reference/rest). You can refer to the Google documentation as well except by the URLs needed to call the API and that are documented here below. # noqa: E501
OpenAPI spec version: v3.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RecognitionConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'encoding': 'str',
'speech_contexts': 'list[SpeechContext]',
'model': 'str',
'audio_channel_count': 'int',
'diarization_config': 'SpeakerDiarizationConfig',
'enable_word_time_offsets': 'bool',
'language_code': 'str',
'profanity_filter': 'bool',
'use_enhanced': 'bool',
'metadata': 'RecognitionMetadata',
'sample_rate_hertz': 'int',
'enable_separate_recognition_per_channel': 'bool',
'enable_automatic_punctuation': 'bool',
'max_alternatives': 'int'
}
attribute_map = {
'encoding': 'encoding',
'speech_contexts': 'speechContexts',
'model': 'model',
'audio_channel_count': 'audioChannelCount',
'diarization_config': 'diarizationConfig',
'enable_word_time_offsets': 'enableWordTimeOffsets',
'language_code': 'languageCode',
'profanity_filter': 'profanityFilter',
'use_enhanced': 'useEnhanced',
'metadata': 'metadata',
'sample_rate_hertz': 'sampleRateHertz',
'enable_separate_recognition_per_channel': 'enableSeparateRecognitionPerChannel',
'enable_automatic_punctuation': 'enableAutomaticPunctuation',
'max_alternatives': 'maxAlternatives'
}
def __init__(self, encoding=None, speech_contexts=None, model=None, audio_channel_count=None, diarization_config=None, enable_word_time_offsets=None, language_code=None, profanity_filter=None, use_enhanced=None, metadata=None, sample_rate_hertz=None, enable_separate_recognition_per_channel=None, enable_automatic_punctuation=None, max_alternatives=None): # noqa: E501
"""RecognitionConfig - a model defined in Swagger""" # noqa: E501
self._encoding = None
self._speech_contexts = None
self._model = None
self._audio_channel_count = None
self._diarization_config = None
self._enable_word_time_offsets = None
self._language_code = None
self._profanity_filter = None
self._use_enhanced = None
self._metadata = None
self._sample_rate_hertz = None
self._enable_separate_recognition_per_channel = None
self._enable_automatic_punctuation = None
self._max_alternatives = None
self.discriminator = None
if encoding is not None:
self.encoding = encoding
if speech_contexts is not None:
self.speech_contexts = speech_contexts
if model is not None:
self.model = model
if audio_channel_count is not None:
self.audio_channel_count = audio_channel_count
if diarization_config is not None:
self.diarization_config = diarization_config
if enable_word_time_offsets is not None:
self.enable_word_time_offsets = enable_word_time_offsets
if language_code is not None:
self.language_code = language_code
if profanity_filter is not None:
self.profanity_filter = profanity_filter
if use_enhanced is not None:
self.use_enhanced = use_enhanced
if metadata is not None:
self.metadata = metadata
if sample_rate_hertz is not None:
self.sample_rate_hertz = sample_rate_hertz
if enable_separate_recognition_per_channel is not None:
self.enable_separate_recognition_per_channel = enable_separate_recognition_per_channel
if enable_automatic_punctuation is not None:
self.enable_automatic_punctuation = enable_automatic_punctuation
if max_alternatives is not None:
self.max_alternatives = max_alternatives
@property
def encoding(self):
"""Gets the encoding of this RecognitionConfig. # noqa: E501
Encoding of audio data sent in all `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV` audio files and required for all other audio formats. For details, see AudioEncoding. # noqa: E501
:return: The encoding of this RecognitionConfig. # noqa: E501
:rtype: str
"""
return self._encoding
@encoding.setter
def encoding(self, encoding):
"""Sets the encoding of this RecognitionConfig.
Encoding of audio data sent in all `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV` audio files and required for all other audio formats. For details, see AudioEncoding. # noqa: E501
:param encoding: The encoding of this RecognitionConfig. # noqa: E501
:type: str
"""
allowed_values = ["ENCODING_UNSPECIFIED", "LINEAR16", "FLAC", "MULAW", "AMR", "AMR_WB", "OGG_OPUS", "SPEEX_WITH_HEADER_BYTE"] # noqa: E501
if encoding not in allowed_values:
raise ValueError(
"Invalid value for `encoding` ({0}), must be one of {1}" # noqa: E501
.format(encoding, allowed_values)
)
self._encoding = encoding
@property
def speech_contexts(self):
"""Gets the speech_contexts of this RecognitionConfig. # noqa: E501
Array of SpeechContext. A means to provide context to assist the speech recognition. For more information, see [speech adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). # noqa: E501
:return: The speech_contexts of this RecognitionConfig. # noqa: E501
:rtype: list[SpeechContext]
"""
return self._speech_contexts
@speech_contexts.setter
def speech_contexts(self, speech_contexts):
"""Sets the speech_contexts of this RecognitionConfig.
Array of SpeechContext. A means to provide context to assist the speech recognition. For more information, see [speech adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). # noqa: E501
:param speech_contexts: The speech_contexts of this RecognitionConfig. # noqa: E501
:type: list[SpeechContext]
"""
self._speech_contexts = speech_contexts
@property
def model(self):
"""Gets the model of this RecognitionConfig. # noqa: E501
Which model to select for the given request. Select the model best suited to your domain to get best results. If a model is not explicitly specified, then we auto-select a model based on the parameters in the RecognitionConfig. <table> <tr> <td><b>Model</b></td> <td><b>Description</b></td> </tr> <tr> <td><code>command_and_search</code></td> <td>Best for short queries such as voice commands or voice search.</td> </tr> <tr> <td><code>phone_call</code></td> <td>Best for audio that originated from a phone call (typically recorded at an 8khz sampling rate).</td> </tr> <tr> <td><code>video</code></td> <td>Best for audio that originated from from video or includes multiple speakers. Ideally the audio is recorded at a 16khz or greater sampling rate. This is a premium model that costs more than the standard rate.</td> </tr> <tr> <td><code>default</code></td> <td>Best for audio that is not one of the specific audio models. For example, long-form audio. Ideally the audio is high-fidelity, recorded at a 16khz or greater sampling rate.</td> </tr> </table> # noqa: E501
:return: The model of this RecognitionConfig. # noqa: E501
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this RecognitionConfig.
Which model to select for the given request. Select the model best suited to your domain to get best results. If a model is not explicitly specified, then we auto-select a model based on the parameters in the RecognitionConfig. <table> <tr> <td><b>Model</b></td> <td><b>Description</b></td> </tr> <tr> <td><code>command_and_search</code></td> <td>Best for short queries such as voice commands or voice search.</td> </tr> <tr> <td><code>phone_call</code></td> <td>Best for audio that originated from a phone call (typically recorded at an 8khz sampling rate).</td> </tr> <tr> <td><code>video</code></td> <td>Best for audio that originated from from video or includes multiple speakers. Ideally the audio is recorded at a 16khz or greater sampling rate. This is a premium model that costs more than the standard rate.</td> </tr> <tr> <td><code>default</code></td> <td>Best for audio that is not one of the specific audio models. For example, long-form audio. Ideally the audio is high-fidelity, recorded at a 16khz or greater sampling rate.</td> </tr> </table> # noqa: E501
:param model: The model of this RecognitionConfig. # noqa: E501
:type: str
"""
self._model = model
@property
def audio_channel_count(self):
"""Gets the audio_channel_count of this RecognitionConfig. # noqa: E501
The number of channels in the input audio data. ONLY set this for MULTI-CHANNEL recognition. Valid values for LINEAR16 and FLAC are `1`-`8`. Valid values for OGG_OPUS are '1'-'254'. Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to one channel (mono). Note: We only recognize the first channel by default. To perform independent recognition on each channel set `enable_separate_recognition_per_channel` to 'true'. # noqa: E501
:return: The audio_channel_count of this RecognitionConfig. # noqa: E501
:rtype: int
"""
return self._audio_channel_count
@audio_channel_count.setter
def audio_channel_count(self, audio_channel_count):
"""Sets the audio_channel_count of this RecognitionConfig.
The number of channels in the input audio data. ONLY set this for MULTI-CHANNEL recognition. Valid values for LINEAR16 and FLAC are `1`-`8`. Valid values for OGG_OPUS are '1'-'254'. Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to one channel (mono). Note: We only recognize the first channel by default. To perform independent recognition on each channel set `enable_separate_recognition_per_channel` to 'true'. # noqa: E501
:param audio_channel_count: The audio_channel_count of this RecognitionConfig. # noqa: E501
:type: int
"""
self._audio_channel_count = audio_channel_count
@property
def diarization_config(self):
"""Gets the diarization_config of this RecognitionConfig. # noqa: E501
:return: The diarization_config of this RecognitionConfig. # noqa: E501
:rtype: SpeakerDiarizationConfig
"""
return self._diarization_config
@diarization_config.setter
def diarization_config(self, diarization_config):
"""Sets the diarization_config of this RecognitionConfig.
:param diarization_config: The diarization_config of this RecognitionConfig. # noqa: E501
:type: SpeakerDiarizationConfig
"""
self._diarization_config = diarization_config
@property
def enable_word_time_offsets(self):
"""Gets the enable_word_time_offsets of this RecognitionConfig. # noqa: E501
If `true`, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If `false`, no word-level time offset information is returned. The default is `false`. # noqa: E501
:return: The enable_word_time_offsets of this RecognitionConfig. # noqa: E501
:rtype: bool
"""
return self._enable_word_time_offsets
@enable_word_time_offsets.setter
def enable_word_time_offsets(self, enable_word_time_offsets):
"""Sets the enable_word_time_offsets of this RecognitionConfig.
If `true`, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If `false`, no word-level time offset information is returned. The default is `false`. # noqa: E501
:param enable_word_time_offsets: The enable_word_time_offsets of this RecognitionConfig. # noqa: E501
:type: bool
"""
self._enable_word_time_offsets = enable_word_time_offsets
@property
def language_code(self):
"""Gets the language_code of this RecognitionConfig. # noqa: E501
Required. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: \"en-US\". See [Language Support](https://cloud.google.com/speech-to-text/docs/languages) for a list of the currently supported language codes. # noqa: E501
:return: The language_code of this RecognitionConfig. # noqa: E501
:rtype: str
"""
return self._language_code
@language_code.setter
def language_code(self, language_code):
"""Sets the language_code of this RecognitionConfig.
Required. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: \"en-US\". See [Language Support](https://cloud.google.com/speech-to-text/docs/languages) for a list of the currently supported language codes. # noqa: E501
:param language_code: The language_code of this RecognitionConfig. # noqa: E501
:type: str
"""
self._language_code = language_code
@property
def profanity_filter(self):
"""Gets the profanity_filter of this RecognitionConfig. # noqa: E501
If set to `true`, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. \"f***\". If set to `false` or omitted, profanities won't be filtered out. # noqa: E501
:return: The profanity_filter of this RecognitionConfig. # noqa: E501
:rtype: bool
"""
return self._profanity_filter
@profanity_filter.setter
def profanity_filter(self, profanity_filter):
"""Sets the profanity_filter of this RecognitionConfig.
If set to `true`, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. \"f***\". If set to `false` or omitted, profanities won't be filtered out. # noqa: E501
:param profanity_filter: The profanity_filter of this RecognitionConfig. # noqa: E501
:type: bool
"""
self._profanity_filter = profanity_filter
@property
def use_enhanced(self):
"""Gets the use_enhanced of this RecognitionConfig. # noqa: E501
Set to true to use an enhanced model for speech recognition. If `use_enhanced` is set to true and the `model` field is not set, then an appropriate enhanced model is chosen if an enhanced model exists for the audio. If `use_enhanced` is true and an enhanced version of the specified model does not exist, then the speech is recognized using the standard version of the specified model. # noqa: E501
:return: The use_enhanced of this RecognitionConfig. # noqa: E501
:rtype: bool
"""
return self._use_enhanced
@use_enhanced.setter
def use_enhanced(self, use_enhanced):
"""Sets the use_enhanced of this RecognitionConfig.
Set to true to use an enhanced model for speech recognition. If `use_enhanced` is set to true and the `model` field is not set, then an appropriate enhanced model is chosen if an enhanced model exists for the audio. If `use_enhanced` is true and an enhanced version of the specified model does not exist, then the speech is recognized using the standard version of the specified model. # noqa: E501
:param use_enhanced: The use_enhanced of this RecognitionConfig. # noqa: E501
:type: bool
"""
self._use_enhanced = use_enhanced
@property
def metadata(self):
"""Gets the metadata of this RecognitionConfig. # noqa: E501
:return: The metadata of this RecognitionConfig. # noqa: E501
:rtype: RecognitionMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this RecognitionConfig.
:param metadata: The metadata of this RecognitionConfig. # noqa: E501
:type: RecognitionMetadata
"""
self._metadata = metadata
@property
def sample_rate_hertz(self):
"""Gets the sample_rate_hertz of this RecognitionConfig. # noqa: E501
Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of re-sampling). This field is optional for FLAC and WAV audio files, but is required for all other audio formats. For details, see AudioEncoding. # noqa: E501
:return: The sample_rate_hertz of this RecognitionConfig. # noqa: E501
:rtype: int
"""
return self._sample_rate_hertz
@sample_rate_hertz.setter
def sample_rate_hertz(self, sample_rate_hertz):
"""Sets the sample_rate_hertz of this RecognitionConfig.
Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of re-sampling). This field is optional for FLAC and WAV audio files, but is required for all other audio formats. For details, see AudioEncoding. # noqa: E501
:param sample_rate_hertz: The sample_rate_hertz of this RecognitionConfig. # noqa: E501
:type: int
"""
self._sample_rate_hertz = sample_rate_hertz
@property
def enable_separate_recognition_per_channel(self):
"""Gets the enable_separate_recognition_per_channel of this RecognitionConfig. # noqa: E501
This needs to be set to `true` explicitly and `audio_channel_count` > 1 to get each channel recognized separately. The recognition result will contain a `channel_tag` field to state which channel that result belongs to. If this is not true, we will only recognize the first channel. The request is billed cumulatively for all channels recognized: `audio_channel_count` multiplied by the length of the audio. # noqa: E501
:return: The enable_separate_recognition_per_channel of this RecognitionConfig. # noqa: E501
:rtype: bool
"""
return self._enable_separate_recognition_per_channel
@enable_separate_recognition_per_channel.setter
def enable_separate_recognition_per_channel(self, enable_separate_recognition_per_channel):
"""Sets the enable_separate_recognition_per_channel of this RecognitionConfig.
This needs to be set to `true` explicitly and `audio_channel_count` > 1 to get each channel recognized separately. The recognition result will contain a `channel_tag` field to state which channel that result belongs to. If this is not true, we will only recognize the first channel. The request is billed cumulatively for all channels recognized: `audio_channel_count` multiplied by the length of the audio. # noqa: E501
:param enable_separate_recognition_per_channel: The enable_separate_recognition_per_channel of this RecognitionConfig. # noqa: E501
:type: bool
"""
self._enable_separate_recognition_per_channel = enable_separate_recognition_per_channel
@property
def enable_automatic_punctuation(self):
"""Gets the enable_automatic_punctuation of this RecognitionConfig. # noqa: E501
If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add punctuation to result hypotheses. Note: This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a premium feature. # noqa: E501
:return: The enable_automatic_punctuation of this RecognitionConfig. # noqa: E501
:rtype: bool
"""
return self._enable_automatic_punctuation
@enable_automatic_punctuation.setter
def enable_automatic_punctuation(self, enable_automatic_punctuation):
"""Sets the enable_automatic_punctuation of this RecognitionConfig.
If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add punctuation to result hypotheses. Note: This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a premium feature. # noqa: E501
:param enable_automatic_punctuation: The enable_automatic_punctuation of this RecognitionConfig. # noqa: E501
:type: bool
"""
self._enable_automatic_punctuation = enable_automatic_punctuation
@property
def max_alternatives(self):
"""Gets the max_alternatives of this RecognitionConfig. # noqa: E501
Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of `SpeechRecognitionAlternative` messages within each `SpeechRecognitionResult`. The server may return fewer than `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of one. If omitted, will return a maximum of one. # noqa: E501
:return: The max_alternatives of this RecognitionConfig. # noqa: E501
:rtype: int
"""
return self._max_alternatives
@max_alternatives.setter
def max_alternatives(self, max_alternatives):
"""Sets the max_alternatives of this RecognitionConfig.
Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of `SpeechRecognitionAlternative` messages within each `SpeechRecognitionResult`. The server may return fewer than `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of one. If omitted, will return a maximum of one. # noqa: E501
:param max_alternatives: The max_alternatives of this RecognitionConfig. # noqa: E501
:type: int
"""
self._max_alternatives = max_alternatives
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RecognitionConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognitionConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"fluo392@gmail.com"
] |
fluo392@gmail.com
|
5e8d50005ab897c655e31ce7412c5ebb10c4f5db
|
dcf29102afcf78cc37073daedd2b4044c235e1e1
|
/Bisection.py
|
8f24d36ca278fadf62958fd5bd5ba3d1e6f0ecac
|
[] |
no_license
|
RonsonGallery/Numerical-analysis
|
88e9d4335645e5c2b3402cf4940715530dbed69f
|
91357e99f2bee4ee147158e2b54440bed4d709d7
|
refs/heads/master
| 2023-02-18T16:23:58.707028
| 2021-01-19T10:40:55
| 2021-01-19T10:40:55
| 306,577,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,960
|
py
|
#Roni Gerkerov - 316583145
#Eden Mozes - 315997049
import math
import sympy as sp
from numpy import log
x = sp.symbols('x')
f = sp.Function('f')
def func_calc(func,x,val):
return func.subs(x,val).evalf()
def f(x):
#x = sp.symbols('x')
#x**5 -5*x - +2
#(math.sin(x**2 + 5*x + 6))/(2*(2.718)**(-x))
return (0.5*2.2718**x)*(math.sin(x**2 + 5*x + 6))
def bisection (a,b,epsilon): #Recives a range and returns the root in that range if one exists
xl = a
xr = b
#k = round(-log(epsilon/(b-a))/log(2)) + 1
iterator = 0
#print(k)
while abs((xl-xr)) >= epsilon:
c = (xl+xr)/2
prod = f(xl) * f(c)
if prod > epsilon:
xl = c
elif prod < epsilon:
xr = c
#if iterator == int(k):
#return None
iterator += 1
return c
def bisection_special (a,b,epsilon,f): #same as the previos one but for dervative
xl = a
xr = b
while abs((xl-xr)) >= epsilon:
c = (xl+xr)/2
prod = func_calc(f,x,xl) * func_calc(f,x,c)
if prod > epsilon:
xl = c
elif prod < epsilon:
xr = c
return c
iter = 0
def bigger_bisection(a,b,g): #recives a large range and uses bisection function to find all possible roots
xl = a
xr = b
my_dict = {}
while xr - 0.1 >= xl - 0.1:
# print(xr)
my_dict[xr] = g(xr)
xr = round(xr - 0.100, 2)
current = 0
last = f(b)
current_key = 0
last_key = 0
roots = []
for key in my_dict:
current_key = key
current = my_dict[key]
#print(my_dict[key])
if current * last < 0:
#print(current, last)
print("Potential root in this range: " + str( current_key), str(last_key))
if bisection(current_key, last_key, 1e-10) != None:
roots.append(bisection(current_key, last_key, 1e-10))
last = current
last_key = current_key
return roots
def bigger_bisection_diff(a,b,g): #same as previous only for the direvative version
xl = a
xr = b
my_dict = {}
while xr - 0.1 >= xl - 0.1:
#print(xr)
my_dict[xr] = func_calc(g,x,xr)
xr = round(xr - 0.100, 2)
current = 0
last = func_calc(g,x,b)
current_key = 0
last_key = 0
roots = []
for key in my_dict:
current_key = key
current = my_dict[key]
#print(my_dict[key])
if check_root(my_dict[key]):
roots.append(my_dict[key])
if current * last < 0:
#print(current, last)
#print(current_key,last_key)
roots.append(bisection_special(current_key, last_key, 1e-10,g))
last = current
last_key = current_key
return roots
"""
my_dict = {}
while xr - 0.1 >= xl-0.1:
# print(xr)
my_dict[xr] = f(xr)
xr = round(xr - 0.100,2)
current = 0
last = f(b)
current_key = 0
last_key = 0
roots = []
for key in my_dict:
current_key = key
current = my_dict[key]
if current * last < 0:
print(current,last)
roots.append(bisection(current_key,last_key,1e-10))
last = current
last_key = current_key
"""
def check_root(x):
if f(x) == 0:
return True
return False
def ftx(x):
return 0.5 * (2.718 ** x) * (math.cos(x**2 + 6 + 5*x)*(5+2*x) + math.sin(x**2 + 6 + 5*x))
#print(key , current)
#answer = bisection(-5,5,1e-10)
a = -3
b = 1
x = sp.symbols('x')
solutions = bigger_bisection(a,b,f)
print(solutions)
#ftx = (sp.diff(f(x),x))
#check_roots = bigger_bisection_diff(a,b,ftx)
print("Potential roots for derivative:")
check_roots = bigger_bisection(a,b,ftx)
roots = list(filter(check_root,check_roots))
for i in roots:
solutions.append(roots[i])
print("Answer with bisection method gives the root at X = ",solutions)
#print(f(check_roots[0]))
#print(f(check_roots[1]))
|
[
"ronson537@gmail.com"
] |
ronson537@gmail.com
|
9c2185ac3b16e3c826891d95458499b40f42744f
|
d49faba12b010adf33ee0fa99f83741887f40c17
|
/machete/questions/models.py
|
a6e9cec751b61c37d4ca5f51c5286af5cbf845f6
|
[] |
no_license
|
rustyrazorblade/machete
|
db0c7a07f8d8efba256196d1b676d331fe8f76b9
|
47dd8b9dfe8f74edb286a3b38b9a06be1e63bbfa
|
refs/heads/master
| 2020-05-17T22:05:27.499482
| 2013-05-02T04:53:53
| 2013-05-02T04:53:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
import thunderdome
from machete.base.models import BaseVertex, BaseEdge
class Question(BaseVertex):
text = thunderdome.String()
class Answer(BaseVertex):
text = thunderdome.String()
@property
def question(self):
self.inV()
class HasAnswer(BaseEdge):
pass
|
[
"jon@jonhaddad.com"
] |
jon@jonhaddad.com
|
685c747e0f76ba7addacb084fdc786402ebe3ff4
|
6ccc87e32feadc811d8a77f119a766fc8b1dde6a
|
/tests/conftest.py
|
141427e988ad9a9f19fe4fa0189e179e2d2e4539
|
[
"MIT"
] |
permissive
|
ddhira123/visualkeras
|
45ccdf54477f86dcb0dfd3cb3b76a0ae9a57d921
|
6adb60c695dc29fdaa373d7e7a5b0feeb06c0e6a
|
refs/heads/master
| 2023-06-30T23:46:06.041331
| 2021-08-05T03:50:48
| 2021-08-05T03:50:48
| 392,893,038
| 0
| 0
|
MIT
| 2021-08-05T03:52:52
| 2021-08-05T03:40:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,315
|
py
|
import pytest
try:
import tensorflow as tf
HAS_TF = True
except ModuleNotFoundError:
HAS_TF = False
try:
import keras
HAS_KERAS = True
except ModuleNotFoundError:
HAS_KERAS = False
def get_functional_model(lib):
shape_x = 48
shape_y = 48
input_img = lib.layers.Input(shape=(shape_x, shape_y, 1), name='input_1') # input
layer_1 = lib.layers.Conv2D(1, (1, 1), padding='same', activation='relu', name='layer_1_1')(input_img)
layer_1 = lib.layers.Conv2D(1, (3, 3), padding='same', activation='relu', name='layer_1_2')(layer_1)
layer_2 = lib.layers.Conv2D(1, (1, 1), padding='same', activation='relu', name='layer_2_1')(input_img)
layer_2 = lib.layers.Conv2D(1, (5, 5), padding='same', activation='relu', name='layer_2_2')(layer_2)
layer_3 = lib.layers.MaxPooling2D((3, 3), strides=(1, 1), padding='same', name='layer_3_1')(input_img)
layer_3 = lib.layers.Conv2D(1, (1, 1), padding='same', activation='relu', name='layer_3_2')(layer_3)
input_img2 = lib.layers.Input(shape=(shape_x, shape_y, 1), name='input_2') # input
mid_1 = lib.layers.concatenate([layer_1, layer_2, layer_3, input_img2], axis=3, name='concat')
flat_1 = lib.layers.Flatten(name='flatten')(mid_1)
dense_1 = lib.layers.Dense(1, activation='relu', name='dense_1')(flat_1)
dense_2 = lib.layers.Dense(1, activation='relu', name='dense_2')(dense_1)
dense_3 = lib.layers.Dense(1, activation='relu', name='dense_3')(dense_2)
output = lib.layers.Dense(1, activation='softmax', name='dense_4')(dense_3)
model = lib.Model([input_img, input_img2], [output, mid_1])
return model
def get_functional_model_with_nested(lib):
shape_x = 48
shape_y = 48
input_img = lib.layers.Input(shape=(shape_x, shape_y, 1), name='input_1') # input
layer_1 = lib.layers.Conv2D(1, (1, 1), padding='same', activation='relu', name='layer_1_1')(input_img)
layer_1 = lib.layers.Conv2D(1, (3, 3), padding='same', activation='relu', name='layer_1_2')(layer_1)
layer_2 = lib.layers.Conv2D(1, (1, 1), padding='same', activation='relu', name='layer_2_1')(input_img)
layer_2 = lib.layers.Conv2D(1, (5, 5), padding='same', activation='relu', name='layer_2_2')(layer_2)
layer_3 = lib.layers.MaxPooling2D((3, 3), strides=(1, 1), padding='same', name='layer_3_1')(input_img)
layer_3 = lib.layers.Conv2D(1, (1, 1), padding='same', activation='relu', name='layer_3_2')(layer_3)
input_img2 = lib.layers.Input(shape=(shape_x, shape_y, 1), name='input_2') # input
mid_1 = lib.layers.concatenate([layer_1, layer_2, layer_3, input_img2], axis=3, name='concat')
flat_1 = lib.layers.Flatten(name='flatten')(mid_1)
dense_1 = lib.layers.Dense(1, activation='relu', name='dense_1')(flat_1)
dense_2 = lib.layers.Dense(1, activation='relu', name='dense_2')(dense_1)
dense_3 = lib.layers.Dense(1, activation='relu', name='dense_3')(dense_2)
subsubnet_in = lib.layers.Input(shape=(1,), name='sub_input')
subsubnet_l1 = lib.layers.Dense(10, activation='relu', name='sub_dense_1')(subsubnet_in)
subsubnet_l2 = lib.layers.Dense(10, activation='relu', name='sub_dense_2')(subsubnet_in)
subsubnet_m1 = lib.layers.concatenate([subsubnet_l1, subsubnet_l2], axis=1, name='sub_concatenate')
subsubnet_model = lib.Model([subsubnet_in], [subsubnet_m1], name='sub_model')
sub_out = subsubnet_model(dense_3)
output = lib.layers.Dense(1, activation='softmax', name='dense_4')(sub_out)
model = lib.Model([input_img, input_img2], [output, mid_1])
return model
def get_sequential_model(lib):
image_size = 8
model = lib.models.Sequential()
model.add(lib.layers.InputLayer(input_shape=(image_size, image_size, 3), name='input'))
model.add(lib.layers.ZeroPadding2D((1, 1), name='zero_padding'))
model.add(lib.layers.Conv2D(64, activation='relu', kernel_size=(3, 3), name='conv'))
model.add(lib.layers.MaxPooling2D((2, 2), strides=(2, 2), name='max_pooling'))
model.add(lib.layers.Flatten(name='flatten'))
model.add(lib.layers.Dense(1, activation='relu', name='dense_1'))
model.add(lib.layers.Dropout(0.5, name='dropout'))
model.add(lib.layers.Dense(1, activation='softmax', name='dense_2'))
return model
def get_sequential_model_with_nested(lib):
submodel = lib.models.Sequential()
submodel.add(lib.layers.Dense(1, activation='relu', name='sub_dense_1'))
submodel.add(lib.layers.Dropout(0.5, name='sub_dropout'))
submodel.add(lib.layers.Dense(1, activation='relu', name='sub_dense_2'))
image_size = 8
model = lib.models.Sequential()
model.add(lib.layers.InputLayer(input_shape=(image_size, image_size, 3), name='input'))
model.add(lib.layers.ZeroPadding2D((1, 1), name='zero_padding'))
model.add(lib.layers.Conv2D(64, activation='relu', kernel_size=(3, 3), name='conv'))
model.add(lib.layers.MaxPooling2D((2, 2), strides=(2, 2), name='max_pooling'))
model.add(lib.layers.Flatten(name='flatten'))
model.add(lib.layers.Dense(1, activation='relu', name='dense_1'))
model.add(lib.layers.Dropout(0.5, name='dropout'))
model.add(submodel)
model.add(lib.layers.Dense(1, activation='softmax', name='dense_2'))
return model
def pytest_generate_tests(metafunc):
if "functional_model" in metafunc.fixturenames:
metafunc.parametrize("functional_model", ["functional_model_tf", "functional_model_keras"], indirect=True)
if "sequential_model" in metafunc.fixturenames:
metafunc.parametrize("sequential_model", ["sequential_model_tf", "sequential_model_keras"], indirect=True)
if "model" in metafunc.fixturenames:
metafunc.parametrize("model", ["sequential_model_tf", "sequential_model_keras",
"functional_model_tf", "functional_model_keras",
"sequential_model_tf_with_nested", "sequential_model_keras_with_nested",
"functional_model_tf_with_nested", "functional_model_keras_with_nested"
],
indirect=True)
@pytest.fixture
def model(request):
return _get_models(request)
@pytest.fixture
def sequential_model(request):
return _get_models(request)
@pytest.fixture
def functional_model(request):
return _get_models(request)
def _get_models(request):
if request.param == "functional_model_tf":
return get_functional_model(tf.keras)
elif request.param == "functional_model_keras":
return get_functional_model(keras)
elif request.param == "sequential_model_tf":
return get_sequential_model(tf.keras)
elif request.param == "sequential_model_keras":
return get_sequential_model(keras)
elif request.param == "functional_model_tf_with_nested":
return get_functional_model_with_nested(tf.keras)
elif request.param == "functional_model_keras_with_nested":
return get_functional_model_with_nested(keras)
elif request.param == "sequential_model_tf_with_nested":
return get_sequential_model_with_nested(tf.keras)
elif request.param == "sequential_model_keras_with_nested":
return get_sequential_model_with_nested(keras)
else:
raise ValueError("invalid internal test config")
|
[
"paulgavrikov@yahoo.de"
] |
paulgavrikov@yahoo.de
|
8ecd9ec1df16e13cb9b076d15190233ee81d77f0
|
bef2676a1768d2b2f723a2325bb07d61b5c1da39
|
/mjengo/bin/zlogger.py
|
11544277a9cceb2483dad26b31cfed839ade1f4e
|
[
"MIT"
] |
permissive
|
bilha-analytics/school
|
f7c15f6a6881a26b11371971d476c41162166ee0
|
d3491eb3f88386dcef35abe13ff8d494790a607d
|
refs/heads/master
| 2023-04-23T08:55:16.020982
| 2021-05-14T01:10:16
| 2021-05-14T01:10:16
| 294,672,089
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,557
|
py
|
'''
author: bg
goal: proper and consistent logging
type: util
how: use std logging module, format to liking,
learn: https://www.loggly.com/ultimate-guide/python-logging-basics/ , https://docs.python.org/3.5/howto/logging-cookbook.html ,
refactors: Do we want this as a class? What form; Singelton?
'''
import os
import sys, traceback
from datetime import datetime
import logging
from termcolor import colored
os.system('color')
DEFAULT_LOGGING_LEVEL = logging.NOTSET
LOGGER = None
APP_NAME = None
def startLogger(name, level=DEFAULT_LOGGING_LEVEL):
'''
Input:
name: Name of logger, say app name
level: Logging level. Default is everything @ NOTSET
Return: None
TODO: review at module Vs app level usage @ LOGGER object instance + basicConfig
'''
global LOGGER, APP_NAME
APP_NAME = name #"UNNAMED" if name is None else name
if LOGGER is None:
LOGGER = logging.getLogger( APP_NAME )
LOGGER.addHandler( logging.StreamHandler() )
logging.basicConfig()
setLogLevel( level )
print("Logger is started")
def setLogLevel( level=DEFAULT_LOGGING_LEVEL):
'''
Input: level: Logging level. Default is everything @ NOTSET
Return: None
'''
if LOGGER is not None:
LOGGER.setLevel( level )
def log(src, msg, ltype=logging.INFO, appName='zmoi'):
'''
For now using once instance for entire app and it's modules. Doing some name_formating hack
The only call needed to get things working
Input:
src: App or module making the request
msg: Message to log. Can be any object type
type: level
appName: overarching app name. Very first time used
Return: None
'''
if LOGGER is None:
startLogger(appName)
logit = {
logging.DEBUG : LOGGER.debug,
logging.WARNING : LOGGER.warning,
logging.ERROR : LOGGER.error,
logging.CRITICAL : LOGGER.critical ,
} #INFO @ default;all else
colorit = {
logging.WARNING : 'yellow',
logging.ERROR : 'red',
logging.CRITICAL : 'red' ,
}# default = blue
nameit = {
logging.WARNING : "WARNING ",
logging.ERROR : "ERROR ",
logging.CRITICAL : "CRITICAL" ,
} # default = INFOR
nm = nameit.get( ltype, "INFOR ") if APP_NAME is None else ""
msg_str = "{}: {} [{}] {}".format(
nm,
datetime.now(),
colored(src, colorit.get(ltype, 'blue') ),
msg )
log_ = logit.get(ltype, LOGGER.info)
log_(msg_str)
print(msg_str)
def logError(src, msg):
'''
Specific formatting for errors and provide stack trace on exceptions etc
Input:
src : source of log request
msg : message to go with exception output
Return: None
'''
e = sys.exc_info()[0]
log("{}".format(src), "{}: {}".format(msg, e), ltype=logging.ERROR )
print( traceback.format_exc() )
if __name__ == "__main__":
log(__name__, "Trying out the logger")
log("Main.MyModule", "The quick brown fox jumped over the lazy dogs!", logging.WARN)
log( __name__, "Yet another message here with a very very very very ong long long string string", logging.ERROR)
log( __name__, "Yet another message here", logging.CRITICAL)
|
[
"bilha.analytics@gmail.com"
] |
bilha.analytics@gmail.com
|
9cb139e239abbaa3191055c86ee06cfdc79f561f
|
5865cc1b70db72b7a9a9a07547f05a1f47959bb1
|
/math/0x01-plotting/100-gradient.py
|
bf7651d269addbcd5a85b11d97dd23e4a4f3424a
|
[] |
no_license
|
nildiert/holbertonschool-machine_learning
|
c8cefc3a784348f09128c0f4d82d65b9d56000c5
|
273f81feaa14fe24ac4db5d82be0d13299e857b8
|
refs/heads/master
| 2020-12-21T12:27:48.280880
| 2020-09-25T17:58:33
| 2020-09-25T17:58:33
| 236,429,499
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(5)
x = np.random.randn(2000) * 10
y = np.random.randn(2000) * 10
z = np.random.rand(2000) + 40 - np.sqrt(np.square(x) + np.square(y))
plt.scatter(x, y, c=z)
clrbar = plt.colorbar()
clrbar.set_label("elevation (m)")
plt.ylabel('y coordinate (m)')
plt.xlabel('x coordinate (m)')
plt.suptitle('Mountain Elevation')
plt.show()
|
[
"niljordan23@gmail.com"
] |
niljordan23@gmail.com
|
f026bccb96d874bc15ceeb09ae475077d68edd2f
|
dd364d0defd5164939be057f087576b5f076d610
|
/demo/rest_views.py
|
4dd1b2ffeef871363107fcf8b958e4d1afca2ec2
|
[] |
no_license
|
srikanthpragada/PYTHON_07_SEP_2018_WEBDEMO
|
31cc45bd29fb16fbbf1d6e65dd25cee0cad04405
|
227f333fa90a8aafc25a5116799663b03b64de87
|
refs/heads/master
| 2020-04-01T19:23:21.337333
| 2018-11-01T02:30:47
| 2018-11-01T02:30:47
| 153,551,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
from rest_framework import serializers
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Book
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = ('id', 'title', 'price', 'pubid')
@api_view(['GET', 'POST'])
def list_books(request):
if request.method == "GET":
books = Book.objects.all()
serializer = BookSerializer(books, many=True)
return Response(serializer.data)
else: # POST
print("Adding new books", request.data)
serializer = BookSerializer(data=request.data)
if serializer.is_valid():
serializer.save() # insert row into table
return Response(serializer.data, status = 201)
return Response(serializer.errors, status=400) # Bad request
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
d26c8b7c1f124ad63556a6d7f37ada8c9460daa2
|
7e86a9bd9ec1f82838d114bf71ad0f6d0f12152c
|
/venv/Lib/site-packages/stellar_sdk/memo.py
|
34dbdb18e8a5f30b598f845a00eb162cedea6e34
|
[
"MIT"
] |
permissive
|
yunoUNo/fini
|
b39688e7203d61f031f2ae9686845b0beccd9b2a
|
a833bc64a3aaf94f7268ec6eac690aa68327dd96
|
refs/heads/master
| 2023-08-05T17:42:48.726825
| 2021-09-29T13:30:32
| 2021-09-29T13:30:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,226
|
py
|
import abc
from typing import Union
from . import xdr as stellar_xdr
from .exceptions import MemoInvalidException
from .utils import hex_to_bytes
__all__ = ["Memo", "NoneMemo", "TextMemo", "IdMemo", "HashMemo", "ReturnHashMemo"]
class Memo(object, metaclass=abc.ABCMeta):
"""The :class:`Memo` object, which represents the base class for memos for
use with Stellar transactions.
The memo for a transaction contains optional extra information about the
transaction taking place. It is the responsibility of the client to
interpret this value.
See the following implementations that serve a more practical use with the
library:
* :class:`NoneMemo` - No memo.
* :class:`TextMemo` - A string encoded using either ASCII or UTF-8, up to 28-bytes long.
* :class:`IdMemo` - A 64 bit unsigned integer.
* :class:`HashMemo` - A 32 byte hash.
* :class:`RetHashMemo` - A 32 byte hash intended to be interpreted as the hash of the transaction the sender is refunding.
See `Stellar's documentation on Transactions
<https://www.stellar.org/developers/guides/concepts/transactions.html#memo>`__
for more information on how memos are used within transactions, as well as
information on the available types of memos.
"""
@abc.abstractmethod
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`Memo`."""
@staticmethod
def from_xdr_object(xdr_object: stellar_xdr.Memo) -> "Memo":
"""Returns an Memo object from XDR memo object."""
xdr_types = {
stellar_xdr.MemoType.MEMO_TEXT: TextMemo,
stellar_xdr.MemoType.MEMO_ID: IdMemo,
stellar_xdr.MemoType.MEMO_HASH: HashMemo,
stellar_xdr.MemoType.MEMO_RETURN: ReturnHashMemo,
stellar_xdr.MemoType.MEMO_NONE: NoneMemo,
}
# TODO: Maybe we should raise Key Error here
memo_cls = xdr_types.get(xdr_object.type, NoneMemo)
return memo_cls.from_xdr_object(xdr_object) # type: ignore[attr-defined]
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
pass # pragma: no cover
class NoneMemo(Memo):
"""The :class:`NoneMemo`, which represents no memo for a transaction."""
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "NoneMemo":
"""Returns an :class:`NoneMemo` object from XDR memo object."""
return cls()
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`NoneMemo`."""
return stellar_xdr.Memo(type=stellar_xdr.MemoType.MEMO_NONE)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return True
def __str__(self):
return "<NoneMemo>"
class TextMemo(Memo):
"""The :class:`TextMemo`, which represents MEMO_TEXT in a transaction.
:param text: A string encoded using either ASCII or UTF-8, up to
28-bytes long.
:type text: str, bytes
:raises: :exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``text`` is not a valid text memo.
"""
def __init__(self, text: Union[str, bytes]) -> None:
if not isinstance(text, (str, bytes)):
raise MemoInvalidException(
f"TextMemo expects string or bytes type got a {type(text)}"
)
if not isinstance(text, bytes):
text = bytes(text, encoding="utf-8")
self.memo_text: bytes = text
length = len(self.memo_text)
if length > 28:
raise MemoInvalidException(
f"Text should be <= 28 bytes (ascii encoded), got {length} bytes."
)
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "TextMemo":
"""Returns an :class:`TextMemo` object from XDR memo object."""
assert xdr_object.text is not None
return cls(bytes(xdr_object.text))
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`TextMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_TEXT, text=self.memo_text
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return self.memo_text == other.memo_text
def __str__(self):
return f"<TextMemo [memo={self.memo_text}]>"
class IdMemo(Memo):
"""The :class:`IdMemo` which represents MEMO_ID in a transaction.
:param int memo_id: A 64 bit unsigned integer.
:raises:
:exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``id`` is not a valid id memo.
"""
def __init__(self, memo_id: int) -> None:
if memo_id < 0 or memo_id > 2 ** 64 - 1:
raise MemoInvalidException(
"IdMemo is an unsigned 64-bit integer and the max valid value is 18446744073709551615."
)
self.memo_id: int = memo_id
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "IdMemo":
"""Returns an :class:`IdMemo` object from XDR memo object."""
assert xdr_object.id is not None
return cls(xdr_object.id.uint64)
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`IdMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_ID, id=stellar_xdr.Uint64(self.memo_id)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return self.memo_id == other.memo_id
def __str__(self):
return f"<IdMemo [memo={self.memo_id}]>"
class HashMemo(Memo):
"""The :class:`HashMemo` which represents MEMO_HASH in a transaction.
:param memo_hash: A 32 byte hash hex encoded string.
:raises: :exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``memo_hash`` is not a valid hash memo.
"""
def __init__(self, memo_hash: Union[bytes, str]) -> None:
memo_hash = hex_to_bytes(memo_hash)
length = len(memo_hash)
if length != 32:
raise MemoInvalidException(
f"The length of HashMemo should be 32 bytes, got {length} bytes."
)
self.memo_hash: bytes = memo_hash # type: ignore[assignment]
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "HashMemo":
"""Returns an :class:`HashMemo` object from XDR memo object."""
assert xdr_object.hash is not None
return cls(xdr_object.hash.hash)
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`HashMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_HASH, hash=stellar_xdr.Hash(self.memo_hash)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return self.memo_hash == other.memo_hash
def __str__(self):
return f"<HashMemo [memo={self.memo_hash}]>"
class ReturnHashMemo(Memo):
"""The :class:`ReturnHashMemo` which represents MEMO_RETURN in a transaction.
MEMO_RETURN is typically used with refunds/returns over the network - it is
a 32 byte hash intended to be interpreted as the hash of the transaction
the sender is refunding.
:param memo_return: A 32 byte hash or hex encoded string intended to be interpreted as the
hash of the transaction the sender is refunding.
:raises: :exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``memo_return`` is not a valid return hash memo.
"""
def __init__(self, memo_return: Union[bytes, str]) -> None:
memo_return = hex_to_bytes(memo_return)
length = len(memo_return)
if length != 32:
raise MemoInvalidException(
f"The length of ReturnHashMemo should be 32 bytes, got {length} bytes."
)
self.memo_return: bytes = memo_return # type: ignore[assignment]
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "ReturnHashMemo":
"""Returns an :class:`ReturnHashMemo` object from XDR memo object."""
assert xdr_object.ret_hash is not None
return cls(xdr_object.ret_hash.hash)
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`ReturnHashMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_RETURN,
ret_hash=stellar_xdr.Hash(self.memo_return),
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return self.memo_return == other.memo_return
def __str__(self):
return f"<ReturnHashMemo [memo={self.memo_return}]>"
|
[
"quit5123@gmail.com"
] |
quit5123@gmail.com
|
7149333885f89e42070a2bb81b06de54af1b288f
|
da29f1f5b4459fbfec968bb694bedb9586f87b14
|
/new_algs/Number+theoretic+algorithms/Multiplication+algorithms/PrimeMultiplicationTable.py
|
563619a279e1a80bcdeddb9bef091618db4481e4
|
[] |
no_license
|
coolsnake/JupyterNotebook
|
547806a45a663f090f313dc3e70f779ad9b213c0
|
20d8df6172906337f81583dabb841d66b8f31857
|
refs/heads/master
| 2023-01-13T18:55:38.615312
| 2020-11-17T22:55:12
| 2020-11-17T22:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
import math
class PrimeMultiplicationTable(object):
def get_primes_3(self, num):
"""
Time Complexity = O(N) Space = O(N)
"""
if num <= 0:
return []
if num == 1:
return [2]
size = self.prime_bound(num)
res = []
count = 0
is_prime = [True]*size
is_prime[0] = False
is_prime[1] = False
for i in xrange(2, size):
if is_prime[i]:
res.append(i)
count += 1
if count == num:
break
for j in xrange(0, count):
if i*res[j] >= size:
break
is_prime[i*res[j]] = False
if i%res[j] == 0:
break
return res
def get_primes_2(self, num):
"""
Time Complexity = O(NloglogN) Space = O(N)
"""
if num <= 0:
return []
if num == 1:
return [2]
size = self.prime_bound(num)
is_prime = [True]*size
is_prime[0] = False
is_prime[1] = False
sqrt_size = int(math.sqrt(size))+1
for i in range(2, sqrt_size):
if is_prime[i]:
for j in range(i*i, size, i):
is_prime[j] = False
res = []
count = 0
for j in xrange(0, size):
if is_prime[j]:
res.append(j)
count += 1
if count == num:
break
return res
def get_primes_1(self, num):
"""
Time Complexity < O(n^1.5) Space = O(1)
"""
if num <= 0:
return []
if num == 1:
return [2]
res = [2]
count = 1
target = 3
while count < num:
is_prime = True
for prime in res:
if prime > int(math.sqrt(target)):
break
if target % prime == 0:
is_prime = False
break
if is_prime:
res.append(target)
count += 1
target += 2
return res
def prime_bound(self, num):
"""
Approximate upper bound of the value of the nth prime
"""
if num <= 10:
size = 30
else:
factor = 1.3
size = int(num*math.log(num, math.e)*factor)
return size
def get_primes(self, num):
return self.get_primes_3(num)
def print_row(self, nums, name, width):
items = map(str, nums)
row = '{0: >{width}} |'.format(name, width = width + 1)
for item in items:
row += '{0: >{width}}'.format(item, width = width + 1)
print (row)
def print_cutting_line(self, length, width):
print ("-"*(length+2)*(width + 1))
def generate_prime_table(self, num):
"""
Generate the prime table with dynamic col widths
"""
if num <= 0 or num is None:
print ("the table is empty")
return
primes = self.get_primes(num)
# Dynamically calculate the maximum col width
size = self.prime_bound(num)
max_digits = len(str(size)) * 2
# Print the header row
self.print_row(primes, " "*max_digits, max_digits)
self.print_cutting_line(len(primes), max_digits)
# Print the muplication table
for x in primes:
row = []
for y in primes:
row.append(x*y)
self.print_row(row, x, max_digits)
if __name__ == "__main__":
prime_muplication = PrimeMultiplicationTable()
prime_muplication.generate_prime_table(10)
|
[
"chenqh@uci.edu"
] |
chenqh@uci.edu
|
6547e8279bbd5efdc686705d8244aa68569c4fbd
|
87112170096f33d0bb4426ad07fb038caf0513ac
|
/node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi
|
ecd8b58fe4ad058b024d9c891b26e4769f797809
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
nikithav/MusicLibrary
|
bd91d0f925c52e5b03072a080275dca0cc7ca6fb
|
472482d5001571d53a6a4b020b3704bee14e4f71
|
refs/heads/master
| 2020-03-27T22:35:33.864619
| 2016-07-19T20:16:13
| 2016-07-19T20:16:13
| 63,095,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,175
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 49,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/srikar/.node-gyp/0.10.33",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.28 node/v0.10.33 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/srikar/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/srikar/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "2",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "0.10.33",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/srikar/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}
|
[
"nvenkann@kent.edu"
] |
nvenkann@kent.edu
|
19859afd50d88112c2b7d43a86fdbd7d638132ed
|
3e7a0ee19fa6331e09c9c0bf3b1faffbc3ae45e1
|
/audioproc/hoa.py
|
2a184fd31336faf0abca78f5e2f158f8e59fd3cf
|
[
"MIT"
] |
permissive
|
penrin/audioproc
|
105084105f9ba1c75ecf8b0f7821d0d411d679d1
|
ac3df5015d87f2a1e2a7a86ac7f5b75ae8314c03
|
refs/heads/master
| 2020-12-07T13:32:08.798438
| 2020-11-10T01:51:23
| 2020-11-10T01:51:23
| 95,565,116
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,075
|
py
|
import sys
import numpy as np
import audioproc as ap
from scipy.special import spherical_jn, spherical_yn, sph_harm
def acn_index(N):
'''
ACN ordering
n: order, m: degree
'''
L = (int(np.floor(N)) + 1) ** 2
n_list = np.empty(L, dtype=np.int16)
m_list = np.empty(L, dtype=np.int16)
i = 0
for n in range(N + 1):
for m in range(-n, n + 1):
#print(n, m)
n_list[i] = n
m_list[i] = m
i += 1
return n_list, m_list
def hv_index(H, V):
'''
return n & m of #H#V Mixed-order Ambisonics
Chris Travis, "A New Mixed-order Scheme for Ambisonics signals",
Ambisonics Symposium 2009
'''
n_tmp, m_tmp = acn_index(H)
v = n_tmp - np.abs(m_tmp)
i = np.where(v <= V)[0]
n_list = np.copy(n_tmp[i])
m_list = np.copy(m_tmp[i])
return n_list, m_list
def sph_harm_realvalued(m, n, theta, phi):
if m < 0:
Y = np.sqrt(2) * (-1) * np.imag(sph_harm(m, n, theta, phi))
elif m == 0:
Y = np.real(sph_harm(m, n, theta, phi))
elif m > 0:
Y = np.sqrt(2) * (-1) ** int(m) * np.real(sph_harm(m, n, theta, phi))
return Y
def spherical_hn1(n, z):
return spherical_jn(n, z) + 1j * spherical_yn(n, z)
def spherical_hn2(n, z):
return spherical_jn(n, z) - 1j * spherical_yn(n, z)
class EncodeMatrix:
def setup_micarray(self, x, y, z, alpha=1):
self.r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
self.theta = np.arctan2(y, x)
self.phi = np.arctan2(np.sqrt(x ** 2 + y ** 2), z)
self.alpha = alpha
return
def hoa_encodematrix(self, order, wavenum):
n, m = acn_index(order)
return self.encodematrix(n, m, wavenum)
def hv_encodematrix(self, H, V, wavenum):
n, m = hv_index(H, V)
return self.encodematrix(n, m, wavenum)
def encodematrix(self, n, m, wavenum):
print('Calc. encode matrix')
# reshape
r_ = self.r.reshape(-1, 1, 1)
theta_ = self.theta.reshape(-1, 1, 1)
phi_ = self.phi.reshape(-1, 1, 1)
n_ = n.reshape(1, -1, 1)
m_ = m.reshape(1, -1, 1)
k_ = np.array(wavenum).reshape(1, 1, -1)
# spherical bessel function matrix
if (self.alpha == np.array([1])).all():
J = spherical_jn(n_, k_ * r_)
else:
J = self.alpha * spherical_jn(n_, k_ * r_)\
- 1.j * (1 - self.alpha)\
* spherical_jn(n_, k_ * r_, derivative=True)
# Spherical function matrix
Y = np.empty([r_.shape[0], n_.shape[1]], dtype=np.float)
for i in range(len(m)):
Y[:, i] = sph_harm_realvalued(m[i], n[i], self.theta, self.phi)
Y = Y.reshape(Y.shape[0], Y.shape[1], 1)
# Encoding matrix
JY = J * Y
Enc = np.empty([JY.shape[1], JY.shape[0], JY.shape[2]], dtype=JY.dtype)
for i in range(JY.shape[2]):
ap.progressbar(i, JY.shape[2])
Enc[:, :, i] = np.linalg.pinv(JY[:, :, i])
ap.progressbar(1)
return Enc
class DecodeMatrix:
def setup_loudspeakerarray(self, x, y, z):
self.r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
self.theta = np.arctan2(y, x)
self.phi = np.arctan2(np.sqrt(x ** 2 + y ** 2), z)
return
def decodematrix(self, n, m, wavenum, nearfieldmodel=False):
if nearfieldmodel:
Dec = self._decodematrix_nearfield(n, m, wavenum)
else:
Dec = self._decodematrix_planewave(n, m, wavenum)
return Dec
def hoa_decodematrix(self, order, wavenum, nearfieldmodel=False):
n, m = acn_index(order)
return self.decodematrix(n, m, wavenum, nearfieldmodel)
def hv_decodematrix(self, H, V, wavenum, nearfieldmodel=False):
n, m = hv_index(H, V)
return self.decodematrix(n, m, wavenum, nearfieldmodel)
def _decodematrix_planewave(self, n, m, wavenum):
print('Calc. decode matrix (plane wave model)')
print('Not yet support nearfieldmodel=False')
return
def _decodematrix_nearfield(self, n, m, wavenum):
print('Calc. decode matrix (near field model)')
# reshape
r_ = self.r.reshape(1, -1, 1)
theta_ = self.theta.reshape(1, -1, 1)
phi_ = self.phi.reshape(1, -1, 1)
n_ = n.reshape(-1, 1, 1)
m_ = m.reshape(-1, 1, 1)
k_ = np.array(wavenum).reshape(1, 1, -1)
# Decoding matrix
Y = np.empty([n_.shape[0], r_.shape[1]], dtype=np.float)
for i in range(len(m)):
Y[i, :] = sph_harm_realvalued(m[i], n[i], self.theta, self.phi)
Y = Y.reshape(Y.shape[0], Y.shape[1], 1)
H = spherical_hn2(n_, k_ * r_)
C = 1j * k_ * H * Y
Dec = np.empty([C.shape[1], C.shape[0], C.shape[2]], dtype=C.dtype)
for i in range(C.shape[2]):
ap.progressbar(i, C.shape[2])
Dec[:, :, i] = np.linalg.pinv(C[:, :, i])
ap.progressbar(1)
return Dec
|
[
"hrs@penr.in"
] |
hrs@penr.in
|
c1cfc0961e408dded2fd95b998843ccb855de9c9
|
dfb63403ca20927eda6c2215acd9039f7a3afcc8
|
/wsl_distro_build/build_distro.py
|
b9203504ffc8b61806cbaa764f1fbbd311833c83
|
[] |
no_license
|
TUM-Core-Facility-Microbiome/ngstoolkit
|
d511d9e690ef2dc51691c678a73443dd4c285458
|
b333f9396266548b6af67be307f1eec388838ab8
|
refs/heads/main
| 2023-04-18T22:26:07.712478
| 2021-10-08T07:35:54
| 2021-10-08T07:35:54
| 380,019,157
| 1
| 1
| null | 2021-09-24T08:15:30
| 2021-06-24T18:35:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
import os
import pathlib
from wiesel.wsl_distributions import Dockerfile, DistributionTarFile
def build_ngstoolkit_wsl_distro(distro_name: str, ngstoolkit_version: str):
"""
Build the ngstoolkit WSL distribution.
This requires docker.
:param distro_name: name for distro
:param ngstoolkit_version: version number that will be saved in /usr/local/bin/wsl_distro_version.txt
:return:
"""
distro_from_dockerfile = Dockerfile(
dockerfile_path=os.path.join(pathlib.Path(__file__).parent.absolute(), "build-context", "Dockerfile"),
docker_context_path=os.path.join(pathlib.Path(__file__).parent.absolute(), "build-context"),
distribution_name=distro_name,
install_location=".",
version=2,
build_args={'ngstoolkit_version': ngstoolkit_version}
)
distro = distro_from_dockerfile.build(force=True)
if distro:
print(f"Successfully registered a WSL distribution named {distro.name!r}.")
def export(distro_name: str, ngstoolkit_version: str, remove_image: bool = False):
distro_from_dockerfile = Dockerfile(
dockerfile_path=os.path.join(pathlib.Path(__file__).parent.absolute(), "build-context", "Dockerfile"),
docker_context_path=os.path.join(pathlib.Path(__file__).parent.absolute(), "build-context"),
distribution_name=distro_name,
install_location=".",
version=2,
build_args={'ngstoolkit_version': ngstoolkit_version}
)
distro_from_dockerfile.build_tar_file(f"{distro_name}.tar", remove_image)
def import_from_tar(distro_name: str, tar_file_path: str):
distro_from_tar = DistributionTarFile(
distribution_name=distro_name,
tar_file=tar_file_path,
install_location=".",
version=2
)
distro = distro_from_tar.build(force=True)
if distro:
print(f"Successfully imported a WSL distribution named {distro.name!r}.")
|
[
"zsewa@outlook.de"
] |
zsewa@outlook.de
|
63d9898c8d7855db94d9afdddf237c38f1cc1a3b
|
264a1b67473cf734224fd3aefa6893ce2ff3fffc
|
/driver/art_driver/src/art.py
|
d3f64ac624e8d830f317c44bb773539540ad42b3
|
[] |
no_license
|
ChenZhiqiang12138/rrooss
|
b7b1e4d5658006afa4ebb861cbdfaeea73467df8
|
fa02a11328e7608e47b1c65bcd10b7f786b8ed12
|
refs/heads/master
| 2020-05-21T06:31:00.292237
| 2019-05-10T08:37:01
| 2019-05-10T08:37:01
| 185,946,925
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import threading
from ctypes import *
def fun_timer():
global timer
timer = threading.Timer(0.05, fun_timer)
timer.start()
lib.send_cmd(vel,angle)
if __name__=="__main__":
vel = 1500
angle = 1500
lib_path = os.path.abspath(os.path.join(os.getcwd(), "..")) + "/lib"+ "/libart_driver.so"
so = cdll.LoadLibrary
lib = so(lib_path)
#print lib
try:
car = "/dev/ttyUSB0"
if(lib.art_racecar_init(38400,car) < 0):
raise
pass
timer = threading.Timer(0.05, fun_timer)
timer.start()
while(1):
pass
except:
print "error"
finally:
print "finally"
|
[
"noreply@github.com"
] |
ChenZhiqiang12138.noreply@github.com
|
b0ee4ba7b95a7524a13ef8e3a388ffa639062ad5
|
2cffe2b1ccef3909f88445a2b1f408e57d051721
|
/apis/urls.py
|
ec3271511bab43218a13119ef34a9e5ea6fa93f9
|
[] |
no_license
|
jfarriagada/estacionamiento
|
80da7a298b9ef4fc837fff4c367ee68905cee16b
|
e518ad4917f2212318d8fa3905807b15d1b7f015
|
refs/heads/master
| 2021-01-11T19:45:30.715323
| 2017-01-18T22:27:23
| 2017-01-18T22:27:23
| 79,386,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
from django.conf.urls import url, include
from rest_framework import routers
from apis import views
router = routers.DefaultRouter()
router.register(r'parking', views.ParkingViewSet, base_name='parking')
router.register(r'parking', views.ParkingViewSet, base_name='parking-detail')
urlpatterns = [
url(r'^api-', include(router.urls)),
]
|
[
"farriagada@MacBook-Pro-de-francisco.local"
] |
farriagada@MacBook-Pro-de-francisco.local
|
e79a3615b117d1d9c4adf31d8dfddca43c51eb47
|
6a15ca69993b6db29f8c8f0213ff17e8a4d8b65f
|
/finances/tools/create_credit_cards.py
|
7108af50c1950e93eab38cc84ef3d2f31bf8f102
|
[] |
no_license
|
julieqiu/python-finances
|
aa84de5bcf3dd48fce2b99bd4a63fbe0c4b2acfe
|
9e3223ba7e7927f9cceff8b4b331a8781decd78d
|
refs/heads/master
| 2020-03-23T05:49:58.904674
| 2019-01-11T01:25:52
| 2019-01-11T01:25:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
from recipes.database import db_session
from recipes.models import Blog
CHASE_RESERVE_CC = 'https://www.domesticate-me.com'
HOST_FOOD_FAITH_FITNESS = 'https://www.foodfaithfitness.com'
HOST_ORGANIZE_YOURSELLF_SKINNY = 'https://www.organizeyourselfskinny.com'
HOST_ANDIE_MITCHELL = 'https://www.andiemitchell.com'
BLOGS = [
Blog(
id=1,
host=HOST_DOMESTICATE_ME,
seed='{}/recipes-2'.format(HOST_DOMESTICATE_ME),
categories_root='{}/recipes-2'.format(HOST_DOMESTICATE_ME),
recipes_root=HOST_DOMESTICATE_ME,
),
Blog(
id=2,
host=HOST_FOOD_FAITH_FITNESS,
seed=HOST_FOOD_FAITH_FITNESS,
categories_root='{}/category'.format(HOST_FOOD_FAITH_FITNESS),
recipes_root=HOST_FOOD_FAITH_FITNESS,
),
Blog(
id=3,
host=HOST_ORGANIZE_YOURSELLF_SKINNY,
seed='{}/category/recipes'.format(HOST_ORGANIZE_YOURSELLF_SKINNY),
categories_root='{}/category'.format(HOST_ORGANIZE_YOURSELLF_SKINNY),
recipes_root='{}/201'.format(HOST_ORGANIZE_YOURSELLF_SKINNY),
),
Blog(
id=4,
host=HOST_ANDIE_MITCHELL,
seed='{}/category/recipes'.format(HOST_ANDIE_MITCHELL),
categories_root='{}/category/recipes'.format(HOST_ANDIE_MITCHELL),
recipes_root=HOST_ANDIE_MITCHELL,
),
]
def main():
with db_session() as session:
hosts = {blog.host for blog in session.query(Blog).all()}
for blog in BLOGS:
if blog.host not in hosts:
print('Creating blog for {}'.format(blog.host))
session.add(blog)
session.commit()
if __name__ == '__main__':
main()
|
[
"julieyeqiu@gmail.com"
] |
julieyeqiu@gmail.com
|
f52e79a4c2596ca8c98360f6b9ea1cb4eb97f092
|
110907fd9804a46992123cf1a88326c87f136a1d
|
/pc/z64porter/z64lib.py
|
1ac2817556310acc7295f1470facd1d6fbce24be
|
[] |
no_license
|
Mooliecool/z64
|
7835ea813cc44ae2b8b23d0c99e5873bccf081dc
|
9c1216a2450c1aae263199e047782f4d3268cc03
|
refs/heads/master
| 2020-05-20T16:46:23.879011
| 2015-04-02T23:51:05
| 2015-04-02T23:51:05
| 33,337,193
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,947
|
py
|
from struct import unpack
#When editing scene maximums, the two are located below:
MAX_MM_SCENE = 105
MAX_OOT_SCENE = 109
# Scene max values ^---HERE
MM_SCENE_FMT = ">LLLL"
OOT_SCENE_FMT = ">LLLLL"
MM_SCENE_TABLE = 0xC5A250
OOT_SCENE_TABLE = 0xBA0BB0
BAD_MM_SCENES = [ 2, 7, 8, 51, 42 ]
oot_to_mm_acts = {
# OoT : MM
0x0000 : 0x0000, #Link
0x0008 : 0x0004, #Flame
0x0009 : 0x0005, #Door
0x0007 : 0x000F, #Dissipating flames
0x000A : 0x0006, #Chest
0x0010 : 0x0009, #Bomb
0x0011 : 0x000A, #Wallmaster
0x0012 : 0x000B, #Dodongo
0x0013 : 0x000C, #Keese
0x0014 : 0x0054, #Epona
0x0015 : 0x000E, #Collectables
0x0018 : 0x0010, #Fairies
0x0019 : 0x0011, #Cucco
0x001B : 0x0012, #Tektite
0x001C : 0x0013, #???
0x001D : 0x0014, #Peahat
0x001E : 0x0015, #Butterfly
0x0020 : 0x0016, #Bugs
0x0021 : 0x0017, #Fish
0x0023 : 0x0018, #Room changing plane
0x0025 : 0x0019, #Dinolfos/Lizafos
0x0026 : 0x001A, #Wooden post with red cloth
0x0029 : 0x0152, #Zelda
0x002D : 0x001D, #Bubble
0x002E : 0x001E, #Studded lifting door
0x0032 : 0x0020, #Boomerang
0x0032 : 0x0022, #???
0x0037 : 0x0024, #Skulltula
0x0039 : 0x0027, #gameplay_keep stuffs
0x003B : 0x0028, #sounds
0x0049 : 0x0162, #Flame circle
0x004C : 0x002F, #Bombflowers
0x0055 : 0x0033, #Deku baba
0x005D : 0x0038, #Warp portals
0x005E : 0x0039, #Torch stand
0x005F : 0x003A, #Heart container
0x0060 : 0x003B, #Deku scrub
0x0065 : 0x003F, #Water
0x0068 : 0x0061, #Twisted hallway
#NOTE: oot one requires objects 0x71, 0x72 and 0x73 to be loaded
0x0069 : 0x003E, #Bubble (bouncing skull)
0x0077 : 0x0041, #Tree
0x008A : 0x0047, #Beamos
0x008D : 0x0172, #Flame Wall
0x008E : 0x004A, #Floormaster
0x0090 : 0x004C, #ReDead
0x0094 : 0x004F, #Butterflies (again)
0x0095 : 0x0050, #Skullwalltula
0x009D : 0x01E3, #Gravestone
0x00A1 : 0x0069, #Ruto
0x00B0 : 0x00A7, #I'M ON A BOAT
0x00B5 : 0x018E, #Flying rubble
#Zeth's:
0x00DD : 0x006C, #Like Like
0x0167 : 0x00bd, #Kakariko Roof Guy
0x0153 : 0x0248, #Music Box Grinder man
0x0162 : 0x017d, #Runningman/postman
0x019B : 0x00e2, #Doggie
0x01B9 : 0x00ef, #Gossip Stones
0x0178 : 0x01C7, #Hyrule/Town guard oot var 0000
0x0142 : 0x01C7, #Hyrule/Town guard oot var 0001
0x00B3 : 0x01C7, #Hyrule/Town guard
0x01CE : 0x0228, #Zora MM:normal oot var 0000
0x0186 : 0x00FA, #Walking Gerudo Guards oot Var < 3
0x01AE : 0x0242, #Gorons oot var < 0xD
0x01AF : 0x00EC, #Wolfos
0x01C0 : 0x00F1 #Guay
}
mm_to_oot_acts = {
# MM : OoT
0x0000 : 0x0000, #Link
0x0004 : 0x0008, #Flame
0x0005 : 0x0009, #Door
0x000F : 0x0007, #Dissipating flames
0x0006 : 0x000A, #Chest
0x0009 : 0x0010, #Bomb
0x000A : 0x0011, #Wallmaster
0x000B : 0x0012, #Dodongo
0x000C : 0x0013, #Keese
0x0054 : 0x0014, #Epona
0x000E : 0x0015, #Collectables
0x0010 : 0x0018, #Fairies
0x0011 : 0x0019, #Cucco
0x0012 : 0x001B, #Tektite
0x0013 : 0x001C, #???
0x0014 : 0x001D, #Peahat
0x0015 : 0x001E, #Butterfly
0x0016 : 0x0020, #Bugs
0x0017 : 0x0021, #Fish
0x0018 : 0x0023, #Room changing plane
0x0019 : 0x0025, #Dinolfos/Lizafos
0x001A : 0x0026, #Wooden post with red cloth
0x0152 : 0x0029, #Zelda
0x001D : 0x002D, #Bubble
# 0x000E : 0x002E, #Studded lifting door
0x001E : 0x002E, #Studded lifting door
0x0020 : 0x0032, #Boomerang
0x0022 : 0x0032, #???
0x0024 : 0x0037, #Skulltula
0x0027 : 0x0039, #gameplay_keep stuffs
0x0028 : 0x003B, #sounds
0x0162 : 0x0049, #Flame circle
0x002F : 0x004C, #Bombflowers
0x0033 : 0x0055, #Deku baba
0x0038 : 0x005D, #Warp portals
0x0039 : 0x005E, #Torch stand
0x003A : 0x005F, #Heart container
0x003B : 0x0060, #Deku scrub
0x003F : 0x0065, #Water
0x0061 : 0x0068, #Twisted hallway
0x003E : 0x0069, #Bubble (bouncing skull)
0x0041 : 0x0077, #Tree
0x0047 : 0x008A, #Beamos
0x0172 : 0x008D, #Flame Wall
0x004A : 0x008E, #Floormaster
0x004C : 0x0090, #ReDead
0x004F : 0x0094, #Butterflies (again)
0x0050 : 0x0095, #Skullwalltula
0x01E3 : 0x009D, #Gravestone
0x0069 : 0x00A1, #Ruto
0x00A7 : 0x00B0, #I'M ON A BOAT
0x018E : 0x00B5, #Flying rubble
0x01DA : 0x0090, #Gibodo (Use oot var -2 )
0x0235 : 0x0090, #Gibodos (Use oot var -2)
0x00ED : 0x01B0, #Stalchild
0x02A5 : 0x01B0, #Stalchild
0x0191 : 0x0115, #Skullkid
0x00E4 : 0x019E, #Beehive
0x00E5 : 0x01A0, #Crate
0x00E9 : 0x01AC, #Honey and darling
0x0105 : 0x0054, #Armos
0x0110 : 0x008D, #Firewall
0x028D : 0x01A8, #Cracked wall
0x0298 : 0x01A9, #cracked wall
0x0255 : 0x00CF, #cracked wall
0x0258 : 0x015B, #cracked wall
0x00F3 : 0x01C6, #Cow
0x0220 : 0x00E7, #Cremia/Child malon
0x01A4 : 0x01C5, #Malon/Romani oot var FFFF
0x021F : 0x01C5, #Malon/Romani(guess) oot var FFFF
0x02AF : 0x0112, #invisible collectibles
0x0066 : 0x00C7, #Withered Deku Baba
0x011F : 0x01C7, #Iceicles
0x012D : 0x0055, #Bio deku baba
0x01BD : 0x011A, #Deku salesman
0x0274 : 0x011A, #Deku salesman
0x026E : 0x0142, #Gaurd
0x01EE : 0x019B, #racing dog
0x01F1 : 0x0021, #Labratory fish
0x01F3 : 0x000D, #Poe
0x0208 : 0x006D, #Big poe
0x01CA : 0x0085, #Dampe
0x008F : 0x0121, #Freezard
0x0216 : 0x001C, #leever
#Zeth's:
0x006C : 0x00DD, #Like Like
0x00bd : 0x0167, #Kakariko Roof Guy
0x0248 : 0x0153, #Music Box Grinder man
0x017d : 0x0162, #Runningman/postman
0x00e2 : 0x019B, #Doggie
0x00ef : 0x01B9, #Gossip Stones
0x01C7 : 0x0142, #Hyrule/Town guard oot var 0001
0x00F8 : 0x01CE, #Zora MM:Swimming oot var 0000
0x0228 : 0x01CE, #Zora MM:normal oot var 0000
0x0231 : 0x01CE, #Zora MM:Guitarist oot var 0000
0x0238 : 0x01CE, #Zora MM:Drummer oot var 0000
0x0241 : 0x01CE, #Zora MM:Pianist oot var 0000
0x0252 : 0x01CE, #Zora MM:Singer oot var 0000
0x0260 : 0x01CE, #Zora MM:Swimming oot var 0000
0x00FA : 0x0186, #Walking Gerudo Guards oot Var < 3
0x0242 : 0x01AE, #Gorons oot var < 0xD
0x00EC : 0x01AF, #Wolfos
0x00F1 : 0x01C0 #Guay
}
oot_to_mm_objs = {
# OoT : MM
0x000E : 0x000C, #Chests
0x000B : 0x0009, #Wallmaster
0x000C : 0x000A, #Dodongo
0x000D : 0x000B, #Keese
0x001A : 0x007D, #Epona
0x0013 : 0x000F, #Cucco
0x0016 : 0x0012, #Tektie
0x0017 : 0x0013, #???
0x0018 : 0x0014, #Peahat
0x001B : 0x0017, #Dinolfos/Lizafos
0x0076 : 0x005F, #Wooden post with red cloth
0x001D : 0x014B, #Zelda
0x0012 : 0x000E, #Bubble
0x0022 : 0x00BC, #???
0x0024 : 0x0020, #Skulltula
0x0031 : 0x002A, #bombflowers
0x0039 : 0x0031, #deku baba
0x0048 : 0x003E, #Warp portals
0x00A4 : 0x0080, #Torch stand
0x00BD : 0x0096, #Heart container
0x004A : 0x0040, #Deku scrub
0x0059 : 0x017E, #Water
0x0070 : 0x0088, #Twisted hallway
0x005D : 0x0051, #Bubble (bouncing skull)
0x007C : 0x0061, #Tree
0x008B : 0x006A, #Beamos
0x002C : 0x0153, #Flame Wall
0x000B : 0x0009, #Floormaster
0x0098 : 0x0075, #ReDead
0x0024 : 0x0020, #Skullwalltula
0x00A2 : 0x01C2, #Gravestone
0x00A3 : 0x00A2, #Ruto
0x0069 : 0x017F, #I'M ON A BOAT
0x0092 : 0x018D, #Flying rubble
0x00D4 : 0x00AB, #Like like
0x00EC : 0x00C2, #Kakariko roof guy
0x0133 : 0x00FF, #windmill man
0x013C : 0x0107, #Runningman/postman
0x016B : 0x0132, #Dog
0x0188 : 0x0143, #Gossip stones
0x0097 : 0x01B5, #Gaurds
0x00FE : 0x00D0, #Zora (Swimming and normal)
0x0167 : 0x0130, #Gerudo walkers
0x00C9 : 0x00A1, #Gorons
0x0183 : 0x0141, #Wolfos
0x0008 : 0x0006 #Guay
}
mm_to_oot_objs = {
# MM : OoT
0x000C : 0x000E, #Chests
0x0009 : 0x000B, #Wallmaster
0x000A : 0x000C, #Dodongo
0x000B : 0x000D, #Keese
0x007D : 0x001A, #Epona
0x000F : 0x0013, #Cucco
0x0012 : 0x0016, #Tektie
0x0013 : 0x0017, #???
0x0014 : 0x0018, #Peahat
0x0017 : 0x001B, #Dinolfos/Lizafos
0x005F : 0x0076, #Wooden post with red cloth
0x014B : 0x001D, #Zelda
0x000E : 0x0012, #Bubble
0x00BC : 0x0022, #???
0x0020 : 0x0024, #Skulltula
0x002A : 0x0031, #bombflowers
0x0031 : 0x0039, #deku baba
0x003E : 0x0048, #Warp portals
0x0080 : 0x00A4, #Torch stand
0x0096 : 0x00BD, #Heart container
0x0040 : 0x004A, #Deku scrub
0x017E : 0x0059, #Water
0x0088 : 0x0070, #Twisted hallway
0x0051 : 0x005D, #Bubble (bouncing skull)
0x0061 : 0x007C, #Tree
0x006A : 0x008B, #Beamos
0x0153 : 0x002C, #Flame Wall
0x0009 : 0x000B, #Floormaster
0x0075 : 0x0098, #ReDead
0x0020 : 0x0024, #Skullwalltula
0x01C2 : 0x00A2, #Gravestone
0x00A2 : 0x00A3, #Ruto
0x017F : 0x0069, #I'M ON A BOAT
0x018D : 0x0092, #Flying rubble
0x00AB : 0x00D4, #Like like
0x00C2 : 0x00EC, #Kakariko roof guy
0x00FF : 0x0133, #windmill man
0x0107 : 0x013C, #Runningman/postman
0x0132 : 0x016B, #Dog
0x0143 : 0x0188, #Gossip stones
0x01B5 : 0x0097, #Gaurds
0x00D0 : 0x00FE, #Zora (Swimming and normal)
0x0211 : 0x00FE, #Zora (guitarist)
0x0216 : 0x00FE, #Zora (drummer)
0x0220 : 0x00FE, #Zora (Pianist)
0x022B : 0x00FE, #Zora (Singer)
0x0130 : 0x0167, #Gerudo walkers
0x00A1 : 0x00C9, #Gorons
0x0141 : 0x0183, #Wolfos
0x0006 : 0x0008, #Guay
0x0142 : 0x0184, #Stalchildren
0x0192 : 0x010A, #Skullkid
0x01B9 : 0x0002, #beehive
0x0133 : 0x0170, #Crate
0x0140 : 0x0182, #Honey and darling
0x0030 : 0x0038, #Armos
0x0153 : 0x002C, #Firewall
0x0267 : 0x0074, #Cracked wall
0x0234 : 0x00B1, #Cracked wall
0x0203 : 0x002C, #cracked wall
0x01E0 : 0x00F1, #cracked wall
0x0146 : 0x018B, #Cow
0x00A7 : 0x00E0, #Cremia
0x00B7 : 0x00D0, #Malon/Romani
0x0031 : 0x0039, #Withered Deku Baba
0x0157 : 0x006B, #Icicles
0x015E : 0x0039, #Bio deku baba
0x01E5 : 0x0168, #Deku salesman
0x01B6 : 0x0097, #Gaurd
0x01C3 : 0x0009, #Poe
0x01F1 : 0x006D, #Big poe
0x01AF : 0x0089, #Dampe
0x00E4 : 0x0114, #Freezard
0x0201 : 0x0017 #leever
}
ActorTypes = {
0 : "type 0",
1 : "1 (Prop)",
2 : "2 (Link?)",
3 : "3 (Bomb)",
4 : "4 (NPC)",
5 : "5 (Enemy)",
6 : "6 (Prop)",
7 : "7 (Item/Action)",
8 : "8 (Miscellaneous)",
9 : "9 (Boss)",
10 : "type 10",
11 : "11 (Door?)",
12 : "type 12",
13 : "type 13",
14 : "type 14",
15 : "type 15" }
def mkMapActor(endianess, num = 0, x = 0, y = 0, z = 0, xr = 0, yr = 0, zr = 0, var = 0):
return pack("%sHhhhhhhH"% (endianess, num, x, y, x, xr, yr, zr, v))
def GenActorEntryFmt(endianess):
"""Returns FMT strings to be used with struct to unpack actor file pointers"""
return "%sLLLLxxxxLLxxxx" % endianess
def GenSceneEntryFmt(endianess):
"""Returns FMT strings to be used with struct to unpack scene file pointers"""
return "%sLLLLL" % endianess
def GenObjectEntryFmt(endianess):
"""Returns FMT strings to be used with struct to unpack object file pointers"""
return "%sLL" % endianess
def GenFileEntryFmt(endianess):
"""Returns FMT strings to be used with struct to unpack file pointers"""
return "%sLLLL" % endianess
def GenActorHeaderFmt(endianess):
"""Returns FMT strings to be used with struct to unpack actor headers"""
return "%sLLLLL" % endianess
def GenActorInfoFmt(endianess):
"""Returns FMT strings to be used with struct to unpack actor info (actor number, type, object)"""
return "%sHBxxxxxH" % endianess
def GenActorFileFmt(endianess,textlen,datalen,rodatalen,bsslen, no_rels):
"""Returns a FMT string to be used with struct to unpack a actor's parts"""
return "%s%is%is%is%is%is" % (endianess, textlen, datalen, rodatalen,
bsslen, no_rels*4)
def FindFileTable(RomFile,endianess):
"""Returns filetable offset if found, else None"""
FileTableOffset = None
CurrentOffset = 0
BuildInfo = None
RomFile.seek(0)
for i in range(0,0x20000,16):
DD=RomFile.read(16)
if len(DD.split("@srd")) == 2:
CurrentOffset = RomFile.tell()
RomFile.seek(RomFile.tell() - 16)
BuildInfo = CleanBuildInfo(RomFile.read(0x30))
break
for i in range(0,0x80,16):
CurrentOffset+=16
RomFile.seek(CurrentOffset)
DoubleWord = unpack( "%sQ" % endianess, RomFile.read(8) )[0]
if DoubleWord == 0x0000000000001060:
FileTableOffset = CurrentOffset
break
return FileTableOffset, BuildInfo
def CleanBuildInfo(RawBuildInfo):
"""Cleans raw build info (including the 0s)"""
CleanedBuildInfo = ''
for char in RawBuildInfo:
if char == '\x00':
CleanedBuildInfo += ' '
else: CleanedBuildInfo += char
del RawBuildInfo
while CleanedBuildInfo[-1] == ' ':
CleanedBuildInfo = CleanedBuildInfo[:-1]
return CleanedBuildInfo
def FindNameTable(RomFile,endianess):
"""Returns nametable offset if found, else None"""
ret=None
for i in range(0x1060,0x10000,16):
RomFile.seek(i)
if unpack("%sQQ"%endianess,RomFile.read(16))== (0x6D616B65726F6D00, 0x626F6F7400000000):
ret=i
break
return ret
def FindCode(RomFile,endianess):
"""Returns code's offsets if found, else None"""
ret=None
FileTableOff=FindFileTable(RomFile,endianess)[0]
for i in range(0, 0x300, 16):
RomFile.seek(FileTableOff+i)
vst,ve,pst,pe=unpack(">LLLL",RomFile.read(16))
if pe==0:
RomFile.seek(ve-16)
if unpack("%sQQ"%endianess,RomFile.read(16))==(0x6A6E8276E707B8E3,0x7D8A471D6A6E18F9):
ret=(vst,ve)
break
return ret
def ScanForHierarchies(File, endianess, FileBank):
"""Finds hierarchies within a zelda 64 resource file"""
hierarchies = []
OldPos = File.tell()
File.seek(0,2)
FileEnd = File.tell()
j = -1
for i in range(0, FileEnd, 4):
File.seek(i)
CurrentWord = unpack("%sL" % endianess,
File.read(4))[0]
if (CurrentWord >> 24 == FileBank and
CurrentWord&3 == 0 and
CurrentWord&0xFFFFFF < FileEnd):
NoPts = unpack("%sB" % endianess,
File.read(1))[0]
if NoPts < 255:
for j in range((CurrentWord&0xFFFFFF),
(CurrentWord&0xFFFFFF)+ NoPts * 4, 4):
File.seek(j)
_CurrentWord = unpack("%sL" % endianess,
File.read(4))[0]
if (_CurrentWord >> 24 != FileBank):
break
if (_CurrentWord&3 != 0):
break
if (_CurrentWord&0xFFFFFF > FileEnd):
break
if j == (CurrentWord&0xFFFFFF)+ NoPts * 4 - 4:
hierarchies.append(i)
File.seek(OldPos)
return hierarchies
def ScanForAnimations(File, endianess, FileBank):
"""Finds animations within a zelda 64 resource file"""
return []
def FindEndOfFiles(File,SkipScene=0):
"""Finds the end offset within a ROM that is safe to write to"""
End = 0
FPos = FindFileTable( File ,">" )[0]+4
Entry = -1
while (Entry != 0):
File.seek(FPos)
Entry = unpack( ">L", File.read(4) )[0]
if (Entry > End):
End = Entry
FPos+=16
codeOff = FindCode( File,">" )[0]
for i in range( codeOff + 0xF9440, codeOff + 0xFB5E0, 0x20 ):
File.seek(i+4)
Entry = unpack( ">L", File.read(4) )[0]
if (Entry > End):
End = Entry
for i in range( codeOff + 0x10A6D0, codeOff + 0x10B360, 0x8 ):
File.seek(i+4)
Entry = unpack( ">L", File.read(4) )[0]
if (Entry > End):
End = Entry
c=0
for i in range( codeOff + 0x10CBB0, codeOff + 0x10CBB0 + (MAX_OOT_SCENE+1) * 0x14, 0x14 ):
File.seek(i)
Entry = unpack( ">LL", File.read(8) )
if(c==SkipScene and End <= Entry[0]):
End = Entry[0]
else:
if (Entry[1] > End):
End = Entry[1]
File.seek(Entry[0])
command = -1
while (command != 4):
command,ent,off = unpack(">BBxxL",File.read(8))
if ( command == 0x14 ):
break
off=(off & 0xFFFFFF) + Entry[0]
File.seek(off)
for i in range(ent):
st,en = unpack(">LL", File.read(8) )
if (en > End):
End = en
c+=1
return End
|
[
"spinout_182@yahoo.com@0b3f8760-7c76-11de-98bd-fdc22f85cdda"
] |
spinout_182@yahoo.com@0b3f8760-7c76-11de-98bd-fdc22f85cdda
|
1d38ca9420318d94c10d2d91f562ff836a49acf4
|
35d3c1ba1d3130414355917b6bb35009121075de
|
/scripts/test.py
|
2c169fc31cbd96e03b0bf819c7a449aa4136671a
|
[] |
no_license
|
raymondnuaa/oxfordreadingtree
|
191b852d331dca3e6fd5f3722958b7b433b1f7da
|
15acb5d221f870500166d7c0c669717c7ccec968
|
refs/heads/master
| 2021-07-10T18:24:02.959923
| 2016-11-13T15:28:49
| 2016-11-13T15:28:49
| 58,813,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
import PythonMagick
img = PythonMagick.Image("1900.png")
img.sample('128x128')
img.write('ax.png')
'''
import PythonMagick
pdf = "aa.pdf"
p = PythonMagick.Image()
p.density('300')
p.read(pdf)
p.write('timg3.jpg')
'''
import PythonMagick
img = PythonMagick.Image()
img.density("300")
img.read("G:/oxfordtree/b.pdf") # read in at 300 dpi
img.write("G:/oxfordtree/bq.png")
'''
'''
import os
from pyPdf import PdfFileReader, PdfFileWriter
from tempfile import NamedTemporaryFile
from PythonMagick import Image
reader = PdfFileReader(open("G:/oxfordtree/test/b.pdf", "rb"))
for page_num in xrange(reader.getNumPages()):
writer = PdfFileWriter()
writer.addPage(reader.getPage(page_num))
temp = NamedTemporaryFile(prefix=str(page_num), suffix=".pdf", delete=False)
writer.write(temp)
temp.close()
im = Image()
im.density("300") # DPI, for better quality
im.read(temp.name)
im.write("bbsome_%d.jpg" % (page_num))
os.remove(temp.name)
'''
|
[
"raymondnuaa@gmail.com"
] |
raymondnuaa@gmail.com
|
1bf99a77ebb7106b4099ecd2911a72cea32ea595
|
35e4efcbb9163101c72ebe02585e8ec7c39c104a
|
/apps/educacion/ed_ladera/models.py
|
1568808709ec8a8990306d539cbf938398064bff
|
[] |
no_license
|
Ivan252512/resiliencia
|
ff89a30844812fd3916a1d8c31b734745540b9af
|
e5f9f9cc76f222438476b6c21022fea1d49f41c3
|
refs/heads/master
| 2020-04-27T15:12:12.266210
| 2019-04-03T08:56:12
| 2019-04-03T08:56:12
| 174,436,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
from django.db import models
# Create your models here.
class LaderaDefEd(models.Model):
termino = models.CharField(max_length=30)
definicion = models.CharField(max_length=300)
# Create your models here.
class PostEdLadera(models.Model):
subtitulo = models.CharField(max_length=80, null=True, blank=True)
parrafo = models.CharField(max_length=800, null=True, blank=True)
descripcion = models.CharField(max_length=200, null=True, blank=True)
imagen = models.ImageField(upload_to = 'image/', null=True, blank=True)
youtube = models.BooleanField(default=False)
video = models.FileField(upload_to = 'video/', null=True, blank=True)
|
[
"ivanpineda@ciencias.unam.mx"
] |
ivanpineda@ciencias.unam.mx
|
4855150320dadf34f922d6a3f9a994c403185fd4
|
b94079ef7f5c5748897cb12635b96de545068c0b
|
/implementations/WGAN/model.py
|
f2cd9e598cad31a9ae308692c42fc42337e1c07b
|
[
"MIT"
] |
permissive
|
WN1695173791/animeface
|
80e33de33ce7ba29e9df4270389f12f025220f22
|
8836223dcbbdcbeec98bbc0d31c394cf7ea0f70b
|
refs/heads/master
| 2023-08-27T03:30:21.603566
| 2021-11-13T04:27:01
| 2021-11-13T04:27:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,289
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Generator(nn.Module):
def __init__(self, latent_dim):
super(Generator, self).__init__()
self.conv = nn.Sequential(
nn.ConvTranspose2d(latent_dim, 1024, 4, 1, 0, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, x):
x = x.view(x.size(0), x.size(1), 1, 1)
return self.conv(x)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(3, 64, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(512, 1024, 4, 2, 1, bias=False),
nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(1024, 1, 4, 1, 0, bias=False)
)
def forward(self, img):
return self.conv(img)
|
[
"blackie0110@gmail.com"
] |
blackie0110@gmail.com
|
6863c84f1e08fd765a78585b8b11b43528e1a653
|
8137e2d4c8a780243d705494f433f1ffa3936045
|
/sensor_portal/sensors/admin.py
|
6d167554f02de6567981ed31e17466232b4daea2
|
[] |
no_license
|
Cyberbyte-Studios/Sensor-Portal
|
ce8629d339950fa5b3842c7ca6d4ebc43c41aaf1
|
91ec0c148edfd9c12f3845b50f609bbf9907cdb3
|
refs/heads/master
| 2021-06-21T11:12:14.641949
| 2016-10-09T00:11:38
| 2016-10-09T00:11:38
| 70,187,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
from django.contrib import admin
from ordered_model.admin import OrderedModelAdmin
from .models import Sensor, Metric, Reading
class SensorAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'position', 'active', 'site')
search_fields = ('id', 'name', 'position', 'site__name')
list_filter = ('active', 'site')
class MetricAdmin(OrderedModelAdmin):
list_display = ('name', 'unit', 'move_up_down_links')
class ReadingAdmin(admin.ModelAdmin):
date_hierarchy = 'recorded'
search_fields = ('id', 'site__name', 'metric__name')
list_filter = ('sensor', 'metric')
list_display = ('id', 'metric', 'value', 'recorded')
class ReadingInline(admin.TabularInline):
model = Reading
admin.site.register(Sensor, SensorAdmin)
admin.site.register(Metric, MetricAdmin)
admin.site.register(Reading, ReadingAdmin)
|
[
"theatrepro11@gmail.com"
] |
theatrepro11@gmail.com
|
d8a5907bf59cb7c9a2c036734da17d6e6b78feb7
|
3df995fa02a43932ab2ea5fea26c06403f139f1f
|
/abc/abc159b.py
|
bb2f7da1ad146338f6f88aef886947811fafb18a
|
[] |
no_license
|
jojonki/atcoder
|
75fb7016dd90b3b7495f1ff558eedcdc755eac11
|
ec487b4e11835f25c6770f0115b98b7e93b16466
|
refs/heads/master
| 2021-06-23T09:56:05.636055
| 2021-03-13T03:38:50
| 2021-03-13T03:38:50
| 201,834,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
def main():
S = input()
def helper(s):
l, r = 0, len(s) -1
while l < r:
if s[l] != s[r]:
return False
l += 1
r -= 1
return True
N = len(S)
if helper(S) and helper(S[:(N-1)//2]) and helper(S[(N+3)//2-1:]):
print('Yes')
else:
print('No')
main()
|
[
"junki.ohmura@gmail.com"
] |
junki.ohmura@gmail.com
|
5613fef55e30f9a87c537f2b7e7e61555d1f36eb
|
033b29b6b1538d10e060e5734a1d7488a3fa03b4
|
/attic/strings-bytes/identifier_norm_writer.py
|
2dfb9b62829436add0cd1d70f288129e0a247d2c
|
[
"MIT"
] |
permissive
|
yuechuanx/fluent-python-code-and-notes
|
f99967416abc9c46be50d95f822b2ef3609f2d2d
|
2ae19fff8e1d292c6e8d163c99ca63e07259499c
|
refs/heads/master
| 2023-08-09T22:14:22.985987
| 2022-08-28T09:06:32
| 2022-08-28T09:06:32
| 229,009,764
| 2
| 0
|
MIT
| 2023-07-20T15:11:59
| 2019-12-19T08:30:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 256
|
py
|
src = """
café = 1
cafe\u0301 = 2
names = {(name, tuple(name)):value
for name, value in globals().items()
if not name.startswith('__')}
print(names)
"""
with open('identifier_norm.py', 'tw', encoding='utf8') as out:
out.write(src)
|
[
"xiaoyuechuanz@163.com"
] |
xiaoyuechuanz@163.com
|
7d090276a374a3c55ab482e6d47a825b198232fc
|
09c6f66759f3a96c2665c77b5f1e8ae128e79513
|
/ML_ColoringBook/20191018_cartoonlization_1.py
|
114483c6316a0ea3674270276939cdea1b4a99be
|
[] |
no_license
|
waylen94/Machine-Learning-Case-Study
|
8ab4979e1284fcc5fd7a5e119c39efe5bf96387e
|
f2f7618cec5c20d51edd771b3f3e67be9128fcc9
|
refs/heads/master
| 2020-07-26T13:00:45.267002
| 2019-10-18T10:00:25
| 2019-10-18T10:00:25
| 208,652,265
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
# -*- coding: utf-8 -*-
import cv2
import os
def cartoonise(picture_name):
#capturing image effectively
imgInput_FileName = picture_name
edge_filename = 'edge_' + picture_name
saved_filename = 'cartoon_' + picture_name
num_bilateral = 7
print("Cartoonnizing" + imgInput_FileName)
#read image
img_rgb = cv2.imread(imgInput_FileName)
img_color = img_rgb
for _ in range(num_bilateral):
img_color = cv2.bilateralFilter(img_color,d=9,sigmaColor=9,sigmaSpace=7)
#gray and blur
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
img_blur = cv2.medianBlur(img_gray, 7)
#edge detection
img_edge = cv2.adaptiveThreshold(img_blur,255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
blockSize=9,
C=2)
#transfer to color image
img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
img_cartoon = cv2.bitwise_and(img_color, img_edge)
cv2.imwrite(edge_filename , img_edge)
cv2.imwrite(saved_filename , img_cartoon)
cartoonise('image_001.jpg')
cartoonise('image_002.jpg')
cartoonise('testing001.jpg')
|
[
"763027562@qq.com"
] |
763027562@qq.com
|
74b909d448937f1a9f601e91163932784bb35eb9
|
40a18752fe454bbf029f3f39b7e84cf4403d4977
|
/Class Files/duplicates.py
|
f98140df95034eb29dccb47b04032c0bb60804c8
|
[] |
no_license
|
jcanning/Class_craftingQualityCode
|
51e63b3c08371c1816db48ee3af0ce6813d10712
|
745b9cc4fa0a5b49dd6d64bf9f6243c24c76ea2e
|
refs/heads/master
| 2021-01-01T05:35:45.441972
| 2013-05-01T21:53:09
| 2013-05-01T21:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
def removed_shared(L1, L2):
"""(List list)
>>> list_1 = [1, 2, 3, 4, 5, 6]
>>> list_2 = [2, 4, 5, 7]
>>> remove_shared(list_1, list_2)
>>> list_1
[1, 3, 6]
>>> list_2
[2, 4, 5, 7]
"""
for v in L2:
if v in L1:
L1.remove(v)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"JohnAllan@.(none)"
] |
JohnAllan@.(none)
|
ed689b5e193dc7ed24846cd0dfc9299fd29309fe
|
d51f222779a4289f074b821fa4c882a7dd144c33
|
/app/main/errors.py
|
537f163505e4a4da06f3589aa47e05762c87654d
|
[] |
no_license
|
Nanrou/blog_site
|
f4657250af26db6f25b24abf979cb1b4d1e56ae4
|
33fd0690b473987186a7b38c5d40ecc1d115e40d
|
refs/heads/master
| 2021-01-19T14:27:27.109925
| 2018-01-15T02:16:48
| 2018-01-15T02:16:48
| 100,903,152
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from flask import render_template, request, jsonify
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'page not found'})
response.status_code = 404
return response
return render_template('main/404.html'), 404
|
[
"kkkcomkkk@qq.com"
] |
kkkcomkkk@qq.com
|
954066244665d6ff6cafb2c5db58cff8997d5b2c
|
3d7d0b0abb93e9799161880339571877d753086d
|
/menuApp/migrations/0005_auto_20170130_1626.py
|
a3e87f2cf183db030a0f4a3c91bf018d7477b523
|
[
"Apache-2.0"
] |
permissive
|
che4web/canteen
|
efe26b1d5f46f3815929fd9b34dc5100b01ab9c7
|
5bba3bb391795c2d00ce18dad316fe3da57e37ee
|
refs/heads/master
| 2020-05-23T09:09:12.160405
| 2017-02-25T20:50:03
| 2017-02-25T20:50:03
| 80,439,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-01-30 11:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menuApp', '0004_categorymenu_order'),
]
operations = [
migrations.AlterModelOptions(
name='categorymenu',
options={'ordering': ['order'], 'verbose_name': 'категория', 'verbose_name_plural': 'категории'},
),
migrations.AddField(
model_name='dish',
name='img',
field=models.ImageField(blank=True, upload_to='', verbose_name='фото блюда'),
),
]
|
[
"kochergina@prognoz.ru"
] |
kochergina@prognoz.ru
|
bd32ec23fe4da9f7a873c505755008fde5a3955e
|
054b3171aec06fb64dd4dd4f50156621024aa59f
|
/login_register/migrations/0003_auto_20180405_1314.py
|
7a9ce60f7feaad0eb31645d9ff741d95b0f29b3b
|
[] |
no_license
|
TtTRz/ALG_x
|
4d21368a525d6a60ec9465a941260d059df542bd
|
6db7cd0b7893288ea7d232a40925ce2b975a1407
|
refs/heads/master
| 2020-03-08T13:30:36.919513
| 2018-05-14T04:09:49
| 2018-05-14T04:09:49
| 128,159,726
| 3
| 0
| null | 2018-04-14T14:48:02
| 2018-04-05T04:39:37
|
HTML
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
# Generated by Django 2.0.2 on 2018-04-05 13:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_register', '0002_auto_20180405_1312'),
]
operations = [
migrations.AlterField(
model_name='user_role',
name='rolename',
field=models.CharField(default='访客', max_length=30, verbose_name='名称'),
),
]
|
[
"thelns@vip.qq.com"
] |
thelns@vip.qq.com
|
3ebc23f9675e254ad92cd1d6c65f6c23ef92026b
|
2b167e29ba07e9f577c20c54cb943861d0ccfa69
|
/numerical_analysis_backup/small-scale-multiobj/resource_usage2/arch4_new/ru_arch4_7.py
|
2c16791d14e838246ae3e2a686b864b63185fe48
|
[] |
no_license
|
LiYan1988/kthOld_OFC
|
17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f
|
b1237577ea68ad735a65981bf29584ebd889132b
|
refs/heads/master
| 2021-01-11T17:27:25.574431
| 2017-01-23T05:32:35
| 2017-01-23T05:32:35
| 79,773,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
import csv
from gurobipy import *
import numpy as np
from arch4_decomposition_new import Arch4_decompose
np.random.seed(2010)
num_cores=3
num_slots=80
filename = 'traffic_matrix.csv'
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
#%% arch4
beta = 0.01
m = Arch4_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=3000,mipgap=0.01, method=2)
m.create_model_sa(mipfocus=1,timelimit=25000,submipnodes=2000,heuristics=0.8)
m.sa_heuristic(ascending1=False,ascending2=False)
m.save_tensor(m.tensor_milp, 'tensor_milp_%.2e.csv'%beta)
m.save_tensor(m.tensor_heuristic, 'tensor_heuristic_%.2e.csv'%beta)
filename = 'milp_cnk_%.2e.csv'%beta
suclist = m.suclist_sa
m.write_result_csv(filename, suclist)
filename = 'heuristic_cnk_%.2e.csv'%beta
m.write_heuristic_result_csv(filename)
efficiency_milp = m.efficiency_milp
efficiency_heuristic = m.efficiency_heuristic
with open('efficiency_%.2e.csv'%beta, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['beta', 'milp', 'heuristic'])
writer.writerow([beta, efficiency_milp, efficiency_heuristic])
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
3304b1318fafc5c6e1b040e75f24f5bd5f4f19cb
|
97806f36a304b3526fab64f03f92f2e0d6bf3663
|
/content/tutorials/hsl-color-wheel/axonometry.py
|
78da1f96c4902a0286ede1553b15750eb28b7c9a
|
[] |
no_license
|
joanmas/PythonForDesigners
|
41762ce577a5abc687381c17aaaf8156991a8916
|
9e676f0cf0632dc404359bda3fd614fafc48a5bd
|
refs/heads/master
| 2023-03-03T06:25:48.087996
| 2021-02-16T15:02:25
| 2021-02-16T15:02:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
#!/usr/bin/env python3
# coding: utf-8
# ---------- #
# Axonometry #
# ---------- #
### Modules
# dependencies
from drawBot import newPage, width, height, translate
from drawBot import save, restore, scale, saveImage
from drawBot import newDrawing, endDrawing, savedState
# from the project folder
from HSLdonut import hslDonut
### Variables
discs = 16
rings = 22
ringThickness = 5
holeRadius = 45
### Instructions
if __name__ == '__main__':
newDrawing()
newPage(952, 488)
translate(width()*.27, height()*.25)
save()
for eachDisc in range(discs):
with savedState():
scale(1, .65)
hslDonut(rings,
ringThickness,
holeRadius,
fixedValue=eachDisc/(discs-1),
isLuminosityConst=True,
captions=False)
translate(0, 16)
restore()
translate(width()*.44, 0)
save()
for eachDisc in range(discs):
with savedState():
scale(1, .65)
hslDonut(rings,
ringThickness,
holeRadius,
fixedValue=eachDisc/(discs-1),
isLuminosityConst=True,
captions=False)
translate(0, 16)
restore()
saveImage('cd-roms.pdf')
endDrawing()
|
[
"hello@robertoarista.it"
] |
hello@robertoarista.it
|
41f182f5c51ff7dae4a0d1aa3db79e6e5f1d63b1
|
8b868defe347284513bbdc3d486d2542acdd4eea
|
/python/TBUtils.py
|
61b447febcacc33d928691abfe489ce44c9caaa6
|
[] |
no_license
|
ShashlikTB/H4TestBeam
|
bdcbe9e13e0c17851cba59556289e6a318fe7397
|
f1bbabfc0e64bf86bd3fe2616a32bc22777d28be
|
refs/heads/master
| 2021-01-21T04:54:19.602615
| 2016-06-09T22:34:34
| 2016-06-09T22:34:34
| 25,589,648
| 1
| 4
| null | 2016-06-09T22:26:49
| 2014-10-22T15:40:04
|
C++
|
UTF-8
|
Python
| false
| false
| 12,956
|
py
|
# Created 4/12/2014 B.Hirosky: Initial release
import sys, os, bz2, inspect, re, time, collections, StringIO, pickle
from commands import getoutput,getstatusoutput
from ROOT import *
def hit_continue(msg='Hit any key to continue'):
print
print msg
sys.stdout.flush()
raw_input('')
# a simple command string builder
def ccat(*arg):
cc=""
for i in range(len(arg)): cc=cc+" "+str(arg[i])
return cc
def checkDep(file,msg=""):
if not os.path.isfile(file):
print file, "not found"
if msg!="": print msg
sys.exit()
def checkEnv(var,msg=""):
if os.getenv(var)==None:
print var, "not found"
if msg!="": print msg
sys.exit()
def LoadLibs(tblib,*libs):
checkEnv(tblib,"Source the setup script")
tblib=str(os.getenv("TBLIB"))
for i in range(len(libs)):
lib=tblib+"/"+str(libs[i])
gSystem.Load(lib)
# A simple error logger class
# Instantiate as logger=Logger(num=1)
# Print information messages and up to num (default=1) occurances of each warning
# The Summary method provides statistics on all warnings
class Logger():
def __init__(self,max=1):
self.warnings={}
self.RED='\033[91m'
self.COL_OFF='\033[0m'
self.max=max
self.logfile=""
self.stdout=sys.stdout
print "Init logger, max print count =",max
def SetLogFile(self,logfile):
self.logfile=logfile
self.stdout = open(self.logfile, 'w') # output socket
def Info(self,*arg):
msg="Info: "+ccat(*arg)+"\n"
sys.stdout.write(msg)
if (self.logfile !=""): self.stdout.write("Info: "+msg+"\n")
def Warn(self,*arg):
msg="Warning: "+ccat(*arg)+"\n"
if msg in self.warnings: self.warnings[msg]=self.warnings[msg]+1
else: self.warnings[msg]=1
if self.warnings[msg]<=self.max:
sys.stdout.write(self.RED+msg+self.COL_OFF)
if (self.logfile !=""): self.stdout.write(msg)
return True # message printed
return False # message just logged
def Fatal(self,*arg):
msg="**FATAL**: "+ccat(*arg)+"\n"
sys.stdout.write(self.RED+msg+self.COL_OFF)
if (self.logfile !=""): self.stdout.write(msg)
sys.exit(1)
def Summary(self):
output = StringIO.StringIO()
print >>output
print >>output,"="*40
print >>output," WARNING Summary"
print >>output,"="*40
print >>output
if len(self.warnings)==0: print >>output,"No Warnings reported"
else:
owarn = collections.OrderedDict(sorted(self.warnings.items()))
for a in owarn: print >>output,"(%5d) %s" % (owarn[a],a)
print >>output,"="*40
print >>output," WARNING Summary (end)"
print >>output,"="*40
print output.getvalue()
if (self.logfile !=""): self.stdout.write(output.getvalue())
output.close()
# hack to pass immutable data types "by reference" (under consideration)
class pyref():
def __init__(self,data):
self.data=[data]
def ref(self):
return self.data[0]
def TBOpen(fin):
if fin.endswith("bz2"): return bz2.BZ2File(fin,"r")
else: return open(fin,"r")
##############################
# data file parsers
##############################
def ParsePadeData(padeline):
try:
padeline=padeline.split()
pade_ts=long(padeline[0])
pade_transfer_size=int(padeline[1],16)<<8+int(padeline[2],16)
pade_board_id=int(padeline[3],16)
pade_hw_counter=int(padeline[4]+padeline[5]+padeline[6],16)
pade_ch_number=int(padeline[7],16)
eventNumber = int(padeline[8]+padeline[9],16)
waveform=(padeline[10:])
return (pade_ts,pade_transfer_size,pade_board_id,
pade_hw_counter,pade_ch_number,eventNumber,waveform)
except IOError as e:
print "Failed to parse PADEline" % (padeline, e)
sys.exit()
# version for H4, ignore FNAL WC info
def ParsePadeSpillHeader(padeline):
spill = { 'number':0, 'pctime':0, 'nTrigWC':0, 'wcTime':0, 'status':0 }
padeline=padeline.split()
spill['number']=int(padeline[4])
pcTime=padeline[7]+" "+padeline[8]+" "+padeline[9]
spill['pcTime']=long(time.mktime(time.strptime(pcTime, "%m/%d/%Y %H:%M:%S %p")))
return spill
def ParsePadeBoardHeader(padeline):
if "error" in padeline:
log=Logger()
log.Fatal("Error in board header",padeline)
master = "Master" in padeline
padeline=re.sub('=', ' ', padeline).split()
boardID=int(padeline[5])
status=int(padeline[7],16)
trgStatus=int(padeline[9],16)
events=int(padeline[13],16)
memReg=int(padeline[16],16)
trigPtr=int(padeline[19],16)
pTemp=int(padeline[21],16)
sTemp=int(padeline[23],16)
return (master,boardID,status,trgStatus,events,memReg,trigPtr,pTemp,sTemp)
def readWCevent(fWC):
endOfEvent=0
nhits=0
while 1:
wcline=fWC.readline()
if not wcline: break
if "SPILL" in wcline: continue
wcline=wcline.split()
if not foundWC and "EVENT" in wcline[0]: # found new event
trigWCrun=wcline[1]
trigWCspill=wcline[2]
foundWC=true
continue
elif "EVENT" in wcline[0]:
fWC.seek(endOfEvent)
break
if "Module" in wcline[0]:
tdcNum=int(wcline[1])
endOfEvent=fWC.tell()
if "Channel" in wcline[0]:
wire=int(wcline[1])
tdcCount=int(wcline[2])
eventDict[eventNumber].AddWCHit(tdcNum,wire,tdcCount) #!
endOfEvent=fWC.tell()
if DEBUG_LEVEL>1: event.GetWCChan(nhits).Dump()
nhits=nhits+1
def getWCspills(fWC):
endOfEvent=0
fWC.seek(loc)
while 1:
wcline=fWC.readline()
if not wcline: return -1
if "SPILL" in wcline: continue
wcline=wcline.split()
if not foundWC and "EVENT" in wcline[0]: # found new event
trigWCrun=wcline[1]
trigWCspill=wcline[2]
foundWC=true
continue
elif "EVENT" in wcline[0]:
fWC.seek(endOfEvent)
break
if "Module" in wcline[0]:
tdcNum=int(wcline[1])
endOfEvent=fWC.tell()
if "Channel" in wcline[0]:
wire=int(wcline[1])
tdcCount=int(wcline[2])
eventDict[eventNumber].AddWCHit(tdcNum,wire,tdcCount) #!
endOfEvent=fWC.tell()
if DEBUG_LEVEL>1: event.GetWCChan(nhits).Dump()
nhits=nhits+1
# WC Database lookup
# match WC spills w/in PAST 45 seconds of WC timestamp read by PADE
def wcLookup(tgttime, bound=45, filename="wcdb.txt"):
print "tgttime",tgttime
lookval=int(tgttime)/100 # seek matches w/in 100 second time range
try:
stat,spills=getstatusoutput("look "+str(lookval)+" "+filename) # binary search of file
if (int(stat)!=0) or len(spills)==0:
return (-1, None) # no lines match
spills=spills.split("\n")
for spill in spills: # search spills <100 seconds from time in PADE spill header
print spill
split = re.split(' +', spill.strip())
sTime = float(split[0]) # spill time from WC controller
diff = tgttime-sTime # PADE read time - WC DAQ read time
print "diff",diff
if diff<0: # Moved past the spill in the db file
print "miss!"
return (-1, None)
if diff <= bound: # fuzzy time match
return( int(split[4]),split[3] ) # byte offset and filename
except IOError as e:
print "Failed to open file %s due to %s" % (filename, e)
return (-1,None)
# WC Database lookup [old version]
# match WC spills w/in 15 seconds of timestamp given by PADE
def wcLookup_(tgttime, bound=15, filename="wcdb.txt"):
lookval=long(tgttime)/1000 # seek matches w/in 1000 second time range
try:
stat,spills=getstatusoutput("look "+str(lookval)+" "+filename) # binary search of file
if (int(stat)!=0) or len(spills)==0:
return (-1, None) # no lines match
spills=spills.split("\n")
for spill in spills: # search spills <100 seconds from time in PADE spill header
split = re.split(' +', spill.strip())
sTime = float(split[0])
diff = sTime-tgttime
if abs(diff) <= bound: # fuzzy time match
return( int(split[4]),split[3] ) # byte offset and filename
elif (diff>bound): # Moved past the spill in the db file
return (-1, None)
except IOError as e:
print "Failed to open file %s due to %s" % (filename, e)
return (-1,None)
# WC Database lookup [older version]
# match WC spills w/in 15 seconds of timestamp given by PADE
def wcLookup__(tgttime, bound=15, filename="wcdb.txt"):
tgttime=float(tgttime)
try:
handle = open(filename, 'r')
withinBound = []
for line in handle: # todo: replace with binary search!
split = re.split(' +', line.strip())
sTime = float(split[0])
diff = sTime-tgttime
if abs(diff) <= bound: # fuzzy time match
return( int(split[4]),split[3] ) # byte offset and filename
elif (diff>bound): # Moved past the spill in the db file
return (-1, None)
except IOError as e:
print "Failed to open file %s due to %s" % (filename, e)
return (-1,None)
# find matching WC event number
def findWCEvent(fd,tgtevent):
wcline=fd.readline() # remove 1st line constaining SPILL number
while(1):
wcline=fd.readline()
if not wcline or "SPILL" in wcline: return -1
if "EVENT" in wcline:
thisevent=int(wcline.split()[2])
if thisevent-1==tgtevent: return fd.tell() # WC/PADE events start at 1/0
elif thisevent-1>tgtevent: return -1 # past the event number
def getTableXY(timeStamp):
checkEnv("TBHOME","Source the setup script")
tbhome=str(os.getenv("TBHOME"))
posFile=tbhome+"/doc/TablePositions.txt"
x=-999.0
y=-999.0
try:
inFile=open(posFile, "r")
for line in inFile:
if line.find(timeStamp)>-1:
line=line.split()
nf=len(line)
x=float(line[nf-2])
y=float(line[nf-1])
except IOError as e:
print "Failed to open file %s due to %s" % (posFile, e)
return (x,y)
def getRunData(timeStamp):
with open('runlist.dat', 'r') as f:
runlist = pickle.load(f)
print "search for",timeStamp
for run in runlist: # could move to a binary search
if run[0]==timeStamp:
print run
particle=run[3]
try:
vga=int(run[4],16)
except:
vga=0
momentum=run[5].replace("GeV","")
try: momentum=float(momentum)
except: momentum=0
# table location
try:
tableX=float(run[18])
except:
tableX=-999.
try:
tableY=float(run[19])
except:
tableY=-999.
# beam type
pid=0
if "elec" in particle:
pid=11
elif "posi" in particle:
pid=-11
elif "muo" in particle:
pid=12
elif "pion" in particle:
pid=211
elif "prot" in particle:
pid=2212
elif "las" in particle:
pid=-22
# gain setting
pga_lna=run[6]
gain=6 # default is Mid_High = 0110 binary
if "Low_" in pga_lna: gain=gain-4
#elif "Mid_" in pga_lna: gain=gain
elif "High_" in pga_lna: gain=gain+4
elif "VHigh_" in pga_lna: gain=gain+8
if "_Low" in pga_lna: gain=gain-2
elif "_Mid" in pga_lna: gain=gain-1
#elif "_High" in pga_lna: gain=gain
gain=gain+vga<<4
try:
angle=float(run[20])
except:
angle=0
return (pid,momentum,gain,tableX,tableY,angle)
return []
def lastRunDat():
if not os.path.isfile('runlist.dat') : return "00000000_000000"
with open('runlist.dat', 'r') as f:
runlist = pickle.load(f)
runs=len(runlist)
last=runlist[runs-1][2].replace(".txt","").replace("rec_capture_","")
return last
def dumpRunDat():
with open('runlist.dat', 'r') as f:
runlist = pickle.load(f)
runs=len(runlist)
for a in range(len(runlist)):
print runlist[a]
|
[
"bob.hirosky@gmail.com"
] |
bob.hirosky@gmail.com
|
f36722fa138b892831049e06bd6d310de8047724
|
912324171c1181eb1ea033dd57414c3ef28b6b74
|
/LR/lr/util/decorators.py
|
147bcc600068122ff0dba2a51bb169ece26d09a1
|
[
"Apache-2.0"
] |
permissive
|
aworkman/LearningRegistry
|
0161c41978ee3376e51008003fb43f2703d3a261
|
cf9e9071cba50953f1231cd92fca1c64d9864eb3
|
refs/heads/master
| 2021-01-15T23:06:54.083832
| 2012-02-01T17:06:49
| 2012-02-01T17:06:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,136
|
py
|
'''
Created on Oct 11, 2011
@author: jklo
'''
from uuid import uuid1
import json
import logging
from contextlib import closing
from pylons import config
import time
import urllib2
import ijson
from ijson.parse import items
import os
import urllib
log = logging.getLogger(__name__)
def ForceCouchDBIndexing():
json_headers = {"Content-Type": "application/json"}
couch = {
"url": config["couchdb.url"],
"resource_data": config["couchdb.db.resourcedata"]
}
def indexTestData(obj):
opts = {
"startkey":"_design/",
"endkey": "_design0",
"include_docs": True
}
design_docs = obj.db.view('_all_docs', **opts)
for row in design_docs:
if "views" in row.doc and len(row.doc["views"].keys()) > 0:
for view in row.doc["views"].keys():
# view = row.doc["views"].keys()[0]
view_name = "{0}/_view/{1}".format( row.key, view)
index_opts = { "limit": 1, "descending": 'true'}
if "reduce" in row.doc["views"][view]:
index_opts["reduce"] = 'false'
log.error("Indexing: {0}".format( view_name))
req = urllib2.Request("{url}/{resource_data}/{view}?{opts}".format(view=view_name, opts=urllib.urlencode(index_opts), **couch),
headers=json_headers)
res = urllib2.urlopen(req)
# view_result = obj.db.view(view_name, **index_opts)
log.error("Indexed: {0}, got back: {1}".format(view_name, json.dumps(res.read())))
else:
log.error("Not Indexing: {0}".format( row.key))
def test_decorator(fn):
def test_decorated(self, *args, **kw):
try:
#print "Wrapper Before...."
indexTestData(self)
fn(self, *args, **kw)
except :
raise
finally:
indexTestData(self)
#print "Wrapper After...."
return test_decorated
return test_decorator
def PublishTestDocs(sourceData, prefix, sleep=0, force_index=True):
json_headers = {"Content-Type": "application/json"}
test_data_log = "test-data-%s.log" % prefix
couch = {
"url": config["couchdb.url"],
"resource_data": config["couchdb.db.resourcedata"]
}
def writeTestData(obj):
if not hasattr(obj, "test_data_ids"):
obj.test_data_ids = {}
obj.test_data_ids[prefix] = []
with open(test_data_log, "w") as plog:
for doc in sourceData:
doc["doc_ID"] = prefix+str(uuid1())
obj.app.post('/publish', params=json.dumps({"documents": [ doc ]}), headers=json_headers)
plog.write(doc["doc_ID"] + os.linesep)
obj.test_data_ids[prefix].append(doc["doc_ID"])
if sleep > 0:
time.sleep(sleep)
def indexTestData(obj):
if force_index == False:
return
opts = {
"startkey":"_design/",
"endkey": "_design0",
"include_docs": True
}
design_docs = obj.db.view('_all_docs', **opts)
for row in design_docs:
if "views" in row.doc and len(row.doc["views"].keys()) > 0:
for view in row.doc["views"].keys():
# view = row.doc["views"].keys()[0]
view_name = "{0}/_view/{1}".format( row.key, view)
index_opts = { "limit": 1, "descending": 'true'}
if "reduce" in row.doc["views"][view]:
index_opts["reduce"] = 'false'
log.error("Indexing: {0}".format( view_name))
req = urllib2.Request("{url}/{resource_data}/{view}?{opts}".format(view=view_name, opts=urllib.urlencode(index_opts), **couch),
headers=json_headers)
res = urllib2.urlopen(req)
# view_result = obj.db.view(view_name, **index_opts)
log.error("Indexed: {0}, got back: {1}".format(view_name, json.dumps(res.read())))
else:
log.error("Not Indexing: {0}".format( row.key))
def cacheTestData(obj):
req = urllib2.Request("{url}/{resource_data}/_all_docs?include_docs=true".format(**couch),
data=json.dumps({"keys":obj.test_data_ids[prefix]}),
headers=json_headers)
res = urllib2.urlopen(req)
docs = list(items(res, 'rows.item.doc'))
if not hasattr(obj, "test_data_sorted"):
obj.test_data_sorted = {}
obj.test_data_sorted[prefix] = sorted(docs, key=lambda k: k['node_timestamp'])
def removeTestData(obj):
for doc_id in obj.test_data_ids[prefix]:
try:
del obj.db[doc_id]
except Exception as e:
print e.message
try:
del obj.db[doc_id+"-distributable"]
except Exception as e:
print e.message
try:
del obj.test_data_ids[prefix]
except Exception as e:
print e.message
try:
del obj.test_data_ids[prefix]
except Exception as e:
print e.message
def test_decorator(fn):
def test_decorated(self, *args, **kw):
try:
#print "Wrapper Before...."
writeTestData(self)
indexTestData(self)
cacheTestData(self)
fn(self, *args, **kw)
except :
raise
finally:
removeTestData(self)
indexTestData(self)
#print "Wrapper After...."
return test_decorated
return test_decorator
|
[
"jim.klo@sri.com"
] |
jim.klo@sri.com
|
27339df49ba8e212dfde10d2caa99ea5666d1985
|
d6cc63e9c21cf3d806397aa8dc18c3927b53ebc8
|
/DjangoDemoProject/wsgi.py
|
04d0b0444588ba1532a063e4b9616e14be225f53
|
[] |
no_license
|
Yogesh-Shaligram/Ecommerces
|
03614c396abaef14d9edb1a52dc77e884265c6cd
|
408da57fb35c5550f8dd1ff2bd9336f953609714
|
refs/heads/master
| 2023-06-24T22:24:04.282410
| 2021-07-28T16:59:09
| 2021-07-28T16:59:09
| 390,432,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for DjangoDemoProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoDemoProject.settings')
application = get_wsgi_application()
|
[
"yogesh.shaligram98@gmail.com"
] |
yogesh.shaligram98@gmail.com
|
041066009b160414b3a5f7a2b9dd2e8ceba37856
|
e22a696ab7c2a0ecd14136acbcd0c999b905ec8d
|
/DAS/codes/trail.py
|
5153b052415b12eb0b849863c03deb77c0525656
|
[] |
no_license
|
99002646/Genesis
|
5faaf9ce2165556a52c57bfa468a081aeaefd2af
|
edfd755aa6e42153b1217ad1620b1040c4bdd2d3
|
refs/heads/main
| 2023-02-03T22:29:07.970692
| 2020-12-24T09:25:55
| 2020-12-24T09:25:55
| 309,572,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
import numpy as np
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Your Parameters
amp = 1 # 1V (Amplitude)
f = 1000 # 1kHz (Frequency)
fs = 200000 # 200kHz (Sample Rate)
T = 1/f
Ts = 1/fs
# Select if you want to display the sine as a continous wave
# True = Continous (not able to zoom in x-direction)
# False = Non-Continous (able to zoom)
continous = True
x = np.arange(fs)
y = [ amp*np.sin(2*np.pi*f * (i/fs)) for i in x]
class Scope(object):
def __init__(self, ax, maxt=2*T, dt=Ts):
self.ax = ax
self.dt = dt
self.maxt = maxt
self.tdata = [0]
self.ydata = [0]
self.line = Line2D(self.tdata, self.ydata)
self.ax.add_line(self.line)
self.ax.set_ylim(-amp, amp)
self.ax.set_xlim(0, self.maxt)
def update(self, y):
lastt = self.tdata[-1]
if continous :
if lastt > self.tdata[0] + self.maxt:
self.ax.set_xlim(lastt-self.maxt, lastt)
t = self.tdata[-1] + self.dt
self.tdata.append(t)
self.ydata.append(y)
self.line.set_data(self.tdata, self.ydata)
return self.line,
def sineEmitter():
for i in x:
yield y[i]
fig, ax = plt.subplots()
scope = Scope(ax)
# pass a generator in "sineEmitter" to produce data for the update func
ani = animation.FuncAnimation(fig, scope.update, sineEmitter, interval=10,
blit=True)
plt.show()
|
[
"noreply@github.com"
] |
99002646.noreply@github.com
|
323800e9c6b4f20af345fb68cbaced088e6fb955
|
9a0858d1fdccdc0114cd1a33ab2364cf504d18e8
|
/Семинар1. Задача 5.py
|
693554e9e2614351b9e2e7bfe3c22d051ccc38ea
|
[] |
no_license
|
DenisGlebov96/-
|
6d458d92f879dd04a7c18c4fdecc0332680f396d
|
a9175aff572d6ab589d3735a596d0c19fe6fc57b
|
refs/heads/master
| 2022-12-30T22:38:03.459551
| 2020-10-14T18:29:16
| 2020-10-14T18:29:16
| 300,227,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
a = input()
for i in reversed(a):
print(i,end='')
|
[
"noreply@github.com"
] |
DenisGlebov96.noreply@github.com
|
aac12304f8e7507d5574691b3321be28f30fc98f
|
8a89ba166e6740c0082d56c6af4021c7e8c93686
|
/FindTheUnknownDigit.py
|
74b48172f60de57ceea26e4f3095d3107bce201d
|
[] |
no_license
|
xylonlin/codewars
|
db26b198efa15919dbfa501d47fde9b60146c999
|
e23a6dabbdb9cdeb1dabbc3d3a57e46451fa092a
|
refs/heads/master
| 2020-03-28T10:22:38.785845
| 2018-09-22T12:18:55
| 2018-09-22T12:18:55
| 148,103,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
#codewars link
#https://www.codewars.com/kata/546d15cebed2e10334000ed9/train/python
def solve_runes(runes):
temp = runes
print(temp)
for i in "0123456789":
if i in temp or i == '0' and (temp[0] in '0?' and \
temp[1] in '?1234567890' or \
any([ True for i,s in enumerate(temp) \
if s in '=+-*' and temp[i+1] == '?'and i+2 < len(temp) and temp[i+2] in '?0123456789'])):
continue
temp = temp.replace('?',i)
temp = temp.replace('=','==')
print(temp)
if eval(temp):
return int(i)
temp = runes
return -1
test = "?38???+595???=833444"
print(solve_runes(test))
|
[
"xylonlin@tencent.com"
] |
xylonlin@tencent.com
|
4dd9b94c80c34f8e6191e417b3ac9124d3613601
|
1c33942c013776c6b4a2e1d26be0a3ac28f36c91
|
/chainer/layer_function.py
|
190944e8fd3e6a0144e3ce4bdc7589af3060c663
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
asrlabncku/RAP
|
d9845868204bd5b687b77bb3e39836eded5b1da7
|
11fab37c8d98257ec0aed1b306aa9709a3a51328
|
refs/heads/master
| 2023-02-02T01:53:02.576516
| 2020-12-23T07:49:10
| 2020-12-23T07:49:10
| 280,380,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,026
|
py
|
import math
import chainer
from chainer import cuda
from chainer.variable import Variable
# from chainer.functions import clipped_relu as f_clipped_relu
# from chainer.functions import crelu as f_crelu
# from chainer.functions import elu as f_elu
# from chainer.functions import hard_sigmoid as f_hard_sigmoid
# from chainer.functions import leaky_relu as f_leaky_relu
# from chainer.functions import log_softmax as f_log_softmax
# from chainer.functions import maxout as f_maxout
# from chainer.functions import relu as f_relu
# from chainer.functions import sigmoid as f_sigmoid
# from chainer.functions import softmax as f_softmax
# from chainer.functions import softplus as f_softplus
# from chainer.functions import tanh as f_tanh
# from chainer.functions import dropout as f_dropout
# from chainer.functions import gaussian as f_gaussian
# from chainer.functions import average_pooling_2d as f_average_pooling_2d
# from chainer.functions import max_pooling_2d as f_max_pooling_2d
# from chainer.functions import spatial_pyramid_pooling_2d as f_spatial_pyramid_pooling_2d
# from chainer.functions import unpooling_2d as f_unpooling_2d
# from chainer.functions import reshape as f_reshape
# from chainer.functions import softmax_cross_entropy as f_softmax_cross_entropy
class Function(object):
def __call__(self, x):
raise NotImplementedError()
def from_dict(self, dict):
for attr, value in dict.iteritems():
setattr(self, attr, value)
def to_dict(self):
dict = {}
for attr, value in self.__dict__.iteritems():
dict[attr] = value
return dict
class Activation(object):
def __init__(self, nonlinearity="relu"):
self.nonlinearity = nonlinearity
def to_function(self):
if self.nonlinearity.lower() == "clipped_relu":
return clipped_relu()
if self.nonlinearity.lower() == "crelu":
return crelu()
if self.nonlinearity.lower() == "elu":
return elu()
if self.nonlinearity.lower() == "hard_sigmoid":
return hard_sigmoid()
if self.nonlinearity.lower() == "leaky_relu":
return leaky_relu()
if self.nonlinearity.lower() == "relu":
return relu()
if self.nonlinearity.lower() == "sigmoid":
return sigmoid()
if self.nonlinearity.lower() == "softmax":
return softmax()
if self.nonlinearity.lower() == "softplus":
return softplus()
if self.nonlinearity.lower() == "tanh":
return tanh()
if self.nonlinearity.lower() == "bst":
return bst()
raise NotImplementedError()
from chainer.functions.eBNN import function_bst
class bst(Function):
def __init__(self):
self._function = "bst"
def __call__(self, x):
return function_bst.bst(x)
class clipped_relu(Function):
def __init__(self, z=20.0):
self._function = "clipped_relu"
self.z = z
def __call__(self, x):
return chainer.functions.clipped_relu(x, self.z)
class crelu(Function):
def __init__(self, axis=1):
self._function = "crelu"
self.axis = axis
def __call__(self, x):
return chainer.functions.crelu(x, self.axis)
class elu(Function):
def __init__(self, alpha=1.0):
self._function = "elu"
self.alpha = alpha
def __call__(self, x):
return chainer.functions.elu(x, self.alpha)
class hard_sigmoid(Function):
def __init__(self):
self._function = "hard_sigmoid"
pass
def __call__(self, x):
return chainer.functions.hard_sigmoid(x)
class leaky_relu(Function):
def __init__(self, slope=0.2):
self._function = "leaky_relu"
self.slope = slope
def __call__(self, x):
return chainer.functions.leaky_relu(x, self.slope)
class log_softmax(Function):
def __init__(self, use_cudnn=True):
self._function = "log_softmax"
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.log_softmax(x, self.use_cudnn)
class maxout(Function):
def __init__(self, pool_size, axis=1):
self._function = "maxout"
self.pool_size = pool_size
self.axis = axis
def __call__(self, x):
return chainer.functions.maxout(x, self.pool_size, self.axis)
class relu(Function):
def __init__(self, use_cudnn=True):
self._function = "relu"
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.relu(x, self.use_cudnn)
class sigmoid(Function):
def __init__(self, use_cudnn=True):
self._function = "sigmoid"
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.sigmoid(x, self.use_cudnn)
class softmax(Function):
def __init__(self, use_cudnn=True):
self._function = "softmax"
self.use_cudnn = use_cudnn
pass
def __call__(self, x):
return chainer.functions.softmax(x, self.use_cudnn)
class softplus(Function):
def __init__(self, use_cudnn=True):
self._function = "softplus"
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.softplus(x, self.use_cudnn)
class tanh(Function):
def __init__(self, use_cudnn=True):
self._function = "tanh"
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.tanh(x, self.use_cudnn)
class dropout_comm_test(Function):
def __init__(self, ratio=0.5):
self._function = "dropout_comm_test"
self.ratio = ratio
def __call__(self, x, train=True):
if not train:
return chainer.functions.dropout(x, self.ratio, True)
return x
class dropout_comm_train(Function):
def __init__(self, ratio=0.5):
self._function = "dropout_comm_train"
self.ratio = ratio
def __call__(self, x, train=True):
if train:
return chainer.functions.dropout(x, self.ratio, True)
return x
class dropout(Function):
def __init__(self, ratio=0.5):
self._function = "dropout"
self.ratio = ratio
def __call__(self, x, train=True):
return chainer.functions.dropout(x, self.ratio, train)
class gaussian_noise(Function):
def __init__(self, std=0.3):
self._function = "gaussian_noise"
self.std = std
def __call__(self, x):
xp = cuda.get_array_module(x.data)
ln_var = math.log(self.std ** 2)
noise = chainer.functions.gaussian(Variable(xp.zeros_like(x.data)), Variable(xp.full_like(x.data, ln_var)))
return x + noise
class average_pooling_2d(Function):
def __init__(self, ksize, stride=None, pad=0, use_cudnn=True):
self._function = "average_pooling_2d"
self.ksize = ksize
self.stride = stride
self.pad = pad
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.average_pooling_2d(x, self.ksize, self.stride, self.pad, self.use_cudnn)
class max_pooling_2d(Function):
def __init__(self, ksize, stride=None, pad=0, cover_all=True, use_cudnn=True):
self._function = "max_pooling_2d"
self.ksize = ksize
self.stride = stride
self.pad = pad
self.cover_all = cover_all
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.max_pooling_2d(x, self.ksize, self.stride, self.pad, self.cover_all, self.use_cudnn)
class spatial_pyramid_pooling_2d(Function):
def __init__(self, pyramid_height, pooling_class, use_cudnn=True):
self._function = "spatial_pyramid_pooling_2d"
self.pyramid_height = pyramid_height
self.pooling_class = pooling_class
self.use_cudnn = use_cudnn
def __call__(self, x):
return chainer.functions.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class, self.use_cudnn)
class unpooling_2d(Function):
def __init__(self, ksize, stride=None, pad=0, outsize=None, cover_all=True):
self._function = "unpooling_2d"
self.ksize = ksize
self.stride = stride
self.pad = pad
self.outsize = outsize
self.cover_all = cover_all
def __call__(self, x):
return chainer.functions.unpooling_2d(x, self.ksize, self.stride, self.pad, self.outsize, self.cover_all)
class reshape(Function):
def __init__(self, shape):
self._function = "reshape"
self.shape = shape
def __call__(self, x):
return chainer.functions.reshape(x, self.shape)
class reshape_1d(Function):
def __init__(self):
self._function = "reshape_1d"
def __call__(self, x):
batchsize = x.data.shape[0]
return chainer.functions.reshape(x, (batchsize, -1))
class softmax_cross_entropy(Function):
def __init__(self):
self._function = "softmax_cross_entropy"
def __call__(self, x, t):
return chainer.functions.softmax_cross_entropy(x,t)
|
[
"monica43a@gmail.com"
] |
monica43a@gmail.com
|
2ba61dfc1f4009952ba24d1ad2f07b48321ea0be
|
d8004ee845f8d9b883f4ff9ebc28e262700cfba5
|
/Anagram solver.py
|
974b6ff67060b912b8534a332f4ef008bc609d61
|
[] |
no_license
|
Bhavan24/Anagram_solver
|
8b1dc1b5c3ca9102f3eba558c8c9a3d02261c755
|
160a28cbc0b95b7d27e9bc19d701f31035a21809
|
refs/heads/main
| 2023-02-14T21:25:27.313678
| 2021-01-09T13:22:41
| 2021-01-09T13:22:41
| 326,018,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
user_input = input('Enter your anagram: ')
from itertools import permutations
spel = [''.join(data) for data in permutations(user_input)]
for i in spel:
with open("WordList.txt", "r") as a_file:
for line in a_file:
stripped_line = line.strip()
if i == stripped_line:
print(stripped_line)
|
[
"noreply@github.com"
] |
Bhavan24.noreply@github.com
|
020c7f5f4602d40cbdfc38168fc2605b321b5420
|
65532d899ee8dde699d176677397d41605822bd3
|
/componentspython/configure_env.py
|
53e0893473a899e554197a7dd07002de0fce8b68
|
[
"Apache-2.0"
] |
permissive
|
Mirantis/mos-components-ci
|
2d07c5460ea9b2f689119f15814fc464c8075441
|
9fbf056ba47a5d278869f8a9c90f4091bd2fc19a
|
refs/heads/master
| 2021-01-10T16:23:54.267826
| 2016-04-18T14:31:30
| 2016-04-18T14:31:30
| 52,099,218
| 1
| 4
|
Apache-2.0
| 2020-02-26T11:57:42
| 2016-02-19T16:04:42
|
Shell
|
UTF-8
|
Python
| false
| false
| 7,247
|
py
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import getpass
import hashlib
import logging
import os
import settings
from sys import argv
import urllib
from glanceclient.client import Client as Glance
from keystoneclient.v2_0 import Client as Keystone
from novaclient.client import Client as Nova
def logger_func():
log_file = os.environ.get("CONFIGURE_ENV_LOG", "configure_env_log.txt")
if log_file.startswith('/'):
logfile = log_file
else:
logfile = os.path.join(os.path.join(os.getcwd()), log_file)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s %(filename)s:'
'%(lineno)d -- %(message)s',
filename=logfile,
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s %(filename)s:'
'%(lineno)d -- %(message)s')
console.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(console)
return logger
LOGGER = logger_func()
class Common():
# This script adds Images to glance and configures security groups.
def __init__(self, controller_ip, keystone_proto='http'):
self.controller_ip = controller_ip
self.keystone_proto = keystone_proto
def _get_auth_url(self):
LOGGER.debug('Slave-01 is {0}'.format(self.controller_ip))
return '{0}://{1}:5000/v2.0/'.format(self.keystone_proto,
self.controller_ip)
def goodbye_security(self, pkey_path):
auth_url = self._get_auth_url()
nova = Nova("2", settings.SERVTEST_USERNAME,
settings.SERVTEST_PASSWORD, settings.SERVTEST_TENANT,
auth_url, service_type='compute', no_cache=True,
insecure=True)
LOGGER.info('Permit all TCP and ICMP in security group default')
secgroup = nova.security_groups.find(name='default')
for rule in secgroup.rules:
nova.security_group_rules.delete(rule['id'])
nova.security_group_rules.create(secgroup.id,
ip_protocol='tcp',
from_port=1,
to_port=65535)
nova.security_group_rules.create(secgroup.id,
ip_protocol='udp',
from_port=1,
to_port=65535)
nova.security_group_rules.create(secgroup.id,
ip_protocol='icmp',
from_port=-1,
to_port=-1)
key_name = getpass.getuser()
if not nova.keypairs.findall(name=key_name):
LOGGER.info("Adding keys")
with open(os.path.expanduser(pkey_path)) as fpubkey:
nova.keypairs.create(name=key_name, public_key=fpubkey.read())
try:
nova.flavors.find(name='sahara')
except Exception:
LOGGER.info("Adding sahara flavor")
nova.flavors.create('sahara', 2048, 1, 40)
def check_image(self, url, image, md5,
path=settings.SERVTEST_LOCAL_PATH):
download_url = "{0}/{1}".format(url, image)
local_path = os.path.expanduser("{0}/{1}".format(path, image))
LOGGER.debug('Check md5 {0} of image {1}/{2}'.format(md5, path, image))
if not os.path.isfile(local_path):
urllib.urlretrieve(download_url, local_path)
if md5:
with open(local_path, mode='rb') as fimage:
digits = hashlib.md5()
while True:
buf = fimage.read(4096)
if not buf:
break
digits.update(buf)
md5_local = digits.hexdigest()
if md5_local != md5:
LOGGER.debug('MD5 is not correct, download {0} to {1}'.format(
download_url, local_path))
urllib.urlretrieve(download_url, local_path)
def image_import(self, properties, local_path, image, image_name):
LOGGER.info('Import image {0}/{1} to glance'.format(local_path, image))
auth_url = self._get_auth_url()
LOGGER.debug('Auth URL is {0}'.format(auth_url))
keystone = Keystone(username=settings.SERVTEST_USERNAME,
password=settings.SERVTEST_PASSWORD,
tenant_name=settings.SERVTEST_TENANT,
auth_url=auth_url,
verify=False)
token = keystone.auth_token
LOGGER.debug('Token is {0}'.format(token))
glance_endpoint = keystone.service_catalog.url_for(
service_type='image', endpoint_type='publicURL')
LOGGER.debug('Glance endpoind is {0}'.format(glance_endpoint))
glance = Glance("2", endpoint=glance_endpoint, token=token,
insecure=True)
LOGGER.debug('Importing {0}'.format(image))
with open(os.path.expanduser('{0}/{1}'.format(local_path,
image))) as fimage:
image = glance.images.create(name=image_name,
disk_format='qcow2',
container_format='bare',
visibility='public',
properties=str(properties))
glance.images.upload(image.id, fimage)
for tag_name, value in properties.iteritems():
glance.image_tags.update(image.id, tag_name)
tag = {tag_name: value}
glance.images.update(image.id, **tag)
def main():
controller = argv[1]
public_key_path = argv[2]
mos_version = argv[3]
keystone_proto = argv[4]
common_func = Common(controller, keystone_proto)
for image_info in settings.images:
if mos_version in image_info['mos_versions']:
LOGGER.debug(image_info)
common_func.check_image(
image_info['url'],
image_info['image'],
image_info['md5sum'])
common_func.image_import(
image_info['meta'],
settings.SERVTEST_LOCAL_PATH,
image_info['image'],
image_info['name'])
common_func.goodbye_security(public_key_path)
LOGGER.info('All done !')
|
[
"vrovachev@mirantis.com"
] |
vrovachev@mirantis.com
|
41ef6c08789a5dc38f99ad6c193e5ba387c28397
|
a984fa1a01a6b5153483b5abef634c926ffd3065
|
/scripts/Test scripts/test3.py
|
d6050fe765d0f8d8ac23f6f211a5467256dcd662
|
[] |
no_license
|
jkapilivsky/IG---Valeria
|
585ceaa2ea7198ffc128836ed9a07f77b1af0cf5
|
db465f51771ef008c6d96abada388b59729ea557
|
refs/heads/master
| 2022-06-27T12:07:17.863574
| 2018-08-27T03:25:35
| 2018-08-27T03:25:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
def subtract():
num = 150
for x in range(150):
num -= 1
print(num)
if num == 140:
quit()
|
[
"jamie.kapilivsky@gmail.com"
] |
jamie.kapilivsky@gmail.com
|
54bb06e4261361caee62ccf3d67c664134ce721a
|
94889e022b2ffd80d17a626b68df597f14a028f7
|
/auto_python4/common/handledata.py
|
79597a5b02ef5313e1cec84a8490ade28946b289
|
[] |
no_license
|
wushengling/AutoTest
|
2059b799374aa7794435d28642246d77580ca018
|
6698f2f275ed4ce47e197d87e9e5a0cda2a8d6a0
|
refs/heads/master
| 2022-07-02T17:58:45.651811
| 2020-04-19T11:15:02
| 2020-04-19T11:15:02
| 188,354,973
| 4
| 0
| null | 2022-06-21T03:15:04
| 2019-05-24T04:59:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
#coding:utf-8
# vscode读取不到了包路径解决方案
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import re
from common.myconfig import conf
class CaseData():
''''这个类专门用来保存,用例执行过程中提取出来,给其他用例用的数据'''
pass
def replace_data(s):
r1= r"#(.+?)#"
while re.search(r1,s):
res = re.search(r1,s)
data = res.group()
key = res.group(1)
try:
value = conf.get("test_data",key)
except Exception:
value = getattr(CaseData,key)
finally:
s = re.sub(data,value,s,1)
return s
|
[
"740776409@qq.com"
] |
740776409@qq.com
|
12edd86424b96b1e5788cbd1593107b8c86e375a
|
e6640746bc6fd5cbe272b764e7744239545b8a94
|
/check.py
|
35e33b7abb58fb7989401b95ed1302b2baea12cb
|
[] |
no_license
|
avoredo/weather-vk-bot
|
014acbe9d5a9175f79bcc8f3f799d2ea9caf4f05
|
373efde40ae2b1e9fea2a81a8b0a1a9ec59ed065
|
refs/heads/master
| 2020-06-05T10:34:08.387628
| 2020-01-08T14:46:35
| 2020-01-08T14:46:35
| 192,410,737
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
# -*- coding: utf-8 -*-
import time
import requests
import os
import locale
import pytz
try:
locale.setlocale(locale.LC_TIME, 'ru_RU')
except:
locale.setlocale(locale.LC_ALL, '')
def check(place):
url = 'https://meteoinfo.ru/hmc-output/meteoalert/map_fed_data.php'
html = requests.get(url).text
data = eval(html)
places = {
'орел': '51',
'орёл': '51',
'воронеж': '77',
'курск': '33',
'калуга': '27',
'брянск': '7',
'ярославль': '81',
'смоленск': '60',
'тамбов': '65',
'кострома': '36',
'тверь': '70',
'владимир': '75',
'тула': '68',
'мо': '43',
'московская область': '43',
'белгород': '6',
'рязань': '57',
'липецк': '38',
'иваново': '19',
'москва': '102'
}
area = data[places[place]]
intsn = {
'0': 'Зеленый',
'1': 'Зеленый',
'2': 'Желтый',
'3': 'Оранжевый',
'4': 'Красный'
}
areas = {
'белгород': 'Белгородская область',
'брянск': 'Брянская область',
'владимир': 'Владимирская область',
'воронеж': 'Воронежская область',
'иваново': 'Ивановская область',
'калуга': 'Калужская область',
'кострома': 'Костромская область',
'курск': 'Курская область',
'липецк': 'Липецкая область',
'мо': 'Московская область',
'московская область': 'Московская область',
'орел': 'Орловская область',
'орёл': 'Орловская область',
'рязань': 'Рязанская область',
'смоленск': 'Смоленская область',
'тамбов': 'Тамбовская область',
'тверь': 'Тверская область',
'тула': 'Тульская область',
'ярославль': 'Ярославская область',
'москва': 'Москва'
}
alert = {
'Гроза': '⛈️',
'Дождь': '☔',
'Ветер': '💨',
'Заморозки': '➖',
'Туман': '🌫️',
'Очень низкая температура': '🌡️🔻',
'Очень высокая температура': '🌡️🔺',
'Высокая температура': '🌡️🔺',
'сильная жара': '🌡️🔺',
'Высокая пожароопасность': '🔥',
'Пожарная опасность': '🔥',
'Паводок': '💧',
'Пыльная (песчаная) буря': '🌪️',
'Прочие опасности': '❗',
'Снег/Обледенение': '❄️',
'Прибрежные события': '🏖️',
'Лавины': '⛰️',
'Сель': '⛰️',
'Наводнение': '🌊',
'Гололедно - изморозевое отложение': '⛸️',
'Оповещения о погоде не требуется': ''
}
length = (len(area))
text = []
for k in range(0, length):
weather = area[str(k)]["3"]
intensity = str(area[str(k)]['2'])[0]
start_time = time.strftime('%H:%M %d %B', time.localtime(int(area[str(k)]['0'])))
end_time = time.strftime('%H:%M %d %B', time.localtime(int(area[str(k)]['1'])))
if int(time.strftime('%H', time.localtime(int(area[str(k)]['0'])))) == int(time.strftime('%H', time.localtime())):
start_from = ''
else:
start_from = ' c ' + str(start_time)
r = area[str(k)]['4']
if r == '':
remark = 'Уточнений нет'
else:
remark = r
text.append('🗺️ Регион: ' + areas[str(place)] + '\n' + '⚠️ Оповещение: ' + weather + alert[weather] + '\n' + '🕑 Период предупреждения — ' + start_from + ' до ' + end_time + '\n' + '📝 Уточнения: ' + remark + '\n' + '❗️ Уровень: ' + intsn[intensity] + '\n')
return '\n\n'.join(text)
|
[
"georgybombelo@gmail.com"
] |
georgybombelo@gmail.com
|
d409d9b0d081d9962a79d6d88693067405aab5b0
|
b5ee0c8f0dfc58b2065b361dbc5d530ec9ae9981
|
/joblog/__init__.py
|
e44fe10ebef26bed2486e358fa706c2bd35bc0e5
|
[] |
no_license
|
Esmaeili/joblog
|
523152eaacbed446231f332b947652447b402bde
|
45614d05872f28166ef6618b7cc4610d38e60b23
|
refs/heads/master
| 2020-12-27T10:13:37.990310
| 2013-01-31T18:18:17
| 2013-01-31T18:18:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
from .joblog import Job, JobFactory
|
[
"beaumont@hawaii.edu"
] |
beaumont@hawaii.edu
|
d6d6f8eda2c0111a45ed400d96dc7173bcd437cc
|
37508e5ea95f5404e7afc073a64a8007367254f0
|
/apps/organization/migrations/0001_initial.py
|
27b0c7185a6ba4b08f6ae75923e837d314ae7970
|
[] |
no_license
|
Snow670/EDonline
|
5ea6e96f05c5f406856cc791cff3ebcccf8b504a
|
1da1c546f4a880621d9ec1a3c6139f3d76f030f6
|
refs/heads/master
| 2022-11-06T04:15:39.345033
| 2020-06-18T13:01:40
| 2020-06-18T13:01:40
| 271,508,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,191
|
py
|
# Generated by Django 3.0.7 on 2020-06-10 01:51
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CityDict',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='城市')),
('desc', models.CharField(max_length=200, verbose_name='描述')),
('add_time', models.DateTimeField(default=datetime.datetime.now)),
],
options={
'verbose_name': '城市',
'verbose_name_plural': '城市',
},
),
migrations.CreateModel(
name='CourseOrg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='机构名称')),
('desc', models.TextField(verbose_name='机构描述')),
('click_nums', models.IntegerField(default=0, verbose_name='点击数')),
('fav_nums', models.IntegerField(default=0, verbose_name='收藏数')),
('image', models.ImageField(upload_to='org/%Y%m', verbose_name='封面图')),
('address', models.CharField(max_length=150, verbose_name='机构地址')),
('add_time', models.DateTimeField(default=datetime.datetime.now)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.CityDict', verbose_name='所在城市')),
],
options={
'verbose_name': '课程机构',
'verbose_name_plural': '课程机构',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='教师名')),
('work_years', models.IntegerField(default=0, verbose_name='工作年限')),
('work_company', models.CharField(max_length=50, verbose_name='就职公司')),
('work_position', models.CharField(max_length=50, verbose_name='公司职位')),
('points', models.CharField(max_length=50, verbose_name='教学特点')),
('click_nums', models.IntegerField(default=0, verbose_name='点击数')),
('fav_nums', models.IntegerField(default=0, verbose_name='收藏数')),
('add_time', models.DateTimeField(default=datetime.datetime.now)),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.CourseOrg', verbose_name='所属机构')),
],
options={
'verbose_name': '教师',
'verbose_name_plural': '教师',
},
),
]
|
[
"1419517126@qq.com"
] |
1419517126@qq.com
|
10ded9deb1efd0dc17c9e96152f66b7aec0396c1
|
eaceab983a69a3394b41c8de538ea224651d83cc
|
/UMOD_PAPER/src/PED2HTML.py
|
2fa40a949a0c1d5435f42a27e7389ee5598f541b
|
[] |
no_license
|
wavefancy/CircularPedigreeTree
|
01e70bf6a74a457e22dee330384da119a2a0d1b4
|
783283b49467aff156227e02e4aab5f5906783a6
|
refs/heads/master
| 2020-06-01T13:05:32.740511
| 2020-04-07T07:36:06
| 2020-04-07T07:36:06
| 190,789,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,234
|
py
|
#!/usr/bin/env python3
"""
Convert ped file to newick format.
@Author: wavefancy@gmail.com
Usage:
PED2HTML.py [--html base_file] [-d int] [--notext] [-c color] [--sweep txt] [--degree int]
PED2HTML.py -h | --help | -v | --version | -f | --format
Notes:
1. Read sigle family ped file from stdin, and output results to stdout.
2. See example by -f.
Options:
--html base_file html template file, default base.html
-d int distance between mating partner, default 2000.
-c color Set the line color, default #05668D.
--sweep txt Target node name for sweep arc, e.g. 115,117.
--notext Do not show text on the figure, default show.
--degree int Set the layout circular degree, default 360.
-h --help Show this screen.
-v --version Show version.
-f --format Show input/output file format example.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
def ShowFormat():
'''Input File format example:'''
print('''
#input example
------------------------
c1 1
c2 2
c3 5
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
# print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
basehtml = args['--html'] if args['--html'] else 'base.html'
from ete3 import Tree,TreeNode
#read ped file from stdin.
ped_data = {} #map for name -> raw data.
node_data = {} #map for name -> TreeNode
for line in sys.stdin:
line = line.strip()
if line and line[0] != '#': #skip comment line.
ss = line.split()
ped_data[ss[1]] = ss
n = TreeNode(name=ss[1])
n.add_feature('raw',ss)
node_data[ss[1]] = n
# for k,v in node_data.items():
# print(v.write(format=2,features=['raw']))
#find the root node, and convert results to josn.
#Check data integrity.
m_error = False
for _, data in ped_data.items():
if data[2] != '0' and data[2] not in ped_data.keys():
m_error = True
sys.stderr.write('ERROR: missing declearation for father: %s\n'%(data[2]) )
if data[3] != '0' and data[3] not in ped_data.keys():
m_error = True
sys.stderr.write('ERROR: missing declearation for mother: %s\n'%(data[3]) )
if m_error:
sys.exit(-1)
T = Tree()
# def checkAddNode(name):
# if name != '0' and name not in NodeMap:
# NodeMap[name] = Node(name)
for name,data in ped_data.items():
#set node children.
[node_data[x].add_child(child=node_data[data[1]]) for x in data[2:4] if x != '0']
#set mating info.
if data[2] != '0' and data[3] != '0':
node_data[data[2]].add_feature('mate',node_data[data[3]].raw)
node_data[data[3]].add_feature('mate',node_data[data[2]].raw)
elif data[2] == '0' and data[3] == '0':
pass
else:
sys.stderr.write('ERROR: Please set full parent info. Error at: %s\n'%('\t'.join(data)))
sys.exit(-1)
# T.add_child(child=node_data['f1'])
# print(T.write(format=1))
# for k,v in node_data.items():
# print(v.name)
# print(v.write(format=2,features=['name','mate']))
root = ''
for name,data in ped_data.items():
if data[2] == '0' and data[3] == '0':
# mateName = node_data[name]
if 'mate' in node_data[name].features:
mdata = node_data[name].mate
# print(mdata)
if mdata[2] == '0' and mdata[3] == '0':
# print("ROOT NAME:" + name)
# Indeed we have two roots, but we chose abitrary one as root.
root = node_data[name]
break
# print(root)
# update node name for output.
for k,v in node_data.items():
n = '_'.join(v.raw)
if 'mate' in v.features:
n = n + '||' + '_'.join(v.mate)
v.name = n
T.add_child(root)
treeData = T.write(format=1)[:-1] + 'root||root:1;'
# print(out)
ss = ''
if args['--sweep']:
temp = args['--sweep'].split(',')
ss = 'SWEEP_ARC_NODE=new Set([' + str(temp)[1:-1] +'])'
with open(basehtml,'r') as bf:
for line in bf:
line = line.replace('__treeData__',treeData)
if args['-d']:
line = line.replace('DISTANCE_PARTNER=2000','DISTANCE_PARTNER='+args['-d'])
if args['--notext']:
line = line.replace('SHOW_TEXT=true','SHOW_TEXT=false')
if args['-c']:
line = line.replace('#05668D',args['-c'])
if args['--sweep']:
# ss = 'SWEEP_ARC_NODE=new Set([' + args['--sweep']+'])'
line = line.replace('SWEEP_ARC_NODE=new Set()',ss)
if args['--degree']:
line = line.replace('LAYOUT_DEGREE=360','LAYOUT_DEGREE='+args['--degree'])
sys.stdout.write('%s'%(line))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
|
[
"wavefancy@gmail.com"
] |
wavefancy@gmail.com
|
5afeb293e7de68b98386f7002a1bca0ce280583c
|
028e1f1544573e9dc85a7f267257085a076305c1
|
/models/base_res101/model.py
|
27b6230ad4ab7066b5be01a62118213a385c21f4
|
[
"MIT"
] |
permissive
|
cadkins052/tab-vcr
|
e5333b05c7a1afbdf81a9f482b2980f535f5b332
|
ea713a6ef7ca54eb3123d8729dfc26dc604644c5
|
refs/heads/master
| 2020-11-26T19:27:21.267397
| 2019-12-24T03:11:39
| 2019-12-24T03:11:39
| 229,185,220
| 0
| 0
|
MIT
| 2019-12-20T03:46:05
| 2019-12-20T03:46:04
| null |
UTF-8
|
Python
| false
| false
| 10,110
|
py
|
from typing import Dict, List, Any
import torch
import torch.nn as nn
from torchvision.models import resnet
from torch.nn.modules import BatchNorm2d,BatchNorm1d
from utils.pytorch_misc import Flattener
import torch.nn.functional as F
import torch.nn.parallel
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, FeedForward, InputVariationalDropout, TimeDistributed
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.modules.matrix_attention import BilinearMatrixAttention
from utils.detector_101 import SimpleDetector
from allennlp.nn.util import masked_softmax, weighted_sum, replace_masked_values
from allennlp.nn import InitializerApplicator
@Model.register("LSTMBatchNormFreezeDetGlobalFullRes101NoFinalImage")
class LSTMBatchNormFreezeDetGlobalFullRes101NoFinalImage(Model):
def __init__(self,
vocab: Vocabulary,
option_encoder: Seq2SeqEncoder,
input_dropout: float = 0.3,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(LSTMBatchNormFreezeDetGlobalFullRes101NoFinalImage, self).__init__(vocab)
self.rnn_input_dropout = TimeDistributed(InputVariationalDropout(input_dropout)) if input_dropout > 0 else None
self.detector = SimpleDetector(pretrained=True, average_pool=True, semantic=False, final_dim=512)
# freeze everything related to conv net
for submodule in self.detector.backbone.modules():
# if isinstance(submodule, BatchNorm2d):
# submodule.track_running_stats = False
for p in submodule.parameters():
p.requires_grad = False
for submodule in self.detector.after_roi_align.modules():
# if isinstance(submodule, BatchNorm2d):
# submodule.track_running_stats = False
for p in submodule.parameters():
p.requires_grad = False
self.image_BN = BatchNorm1d(512)
self.option_encoder = TimeDistributed(option_encoder)
self.option_BN = torch.nn.Sequential(
BatchNorm1d(512)
)
self.query_BN = torch.nn.Sequential(
BatchNorm1d(512)
)
self.final_mlp = torch.nn.Sequential(
torch.nn.Linear(1024, 512),
torch.nn.ReLU(inplace=True),
)
self.final_BN = torch.nn.Sequential(
BatchNorm1d(512)
)
self.final_mlp_linear = torch.nn.Sequential(
torch.nn.Linear(512,1)
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
# recevie redundent parameters for convinence
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def embed_span(self, span, span_tags, span_mask, object_reps):
"""
:param span: Thing that will get embed and turned into [batch_size, ..leading_dims.., L, word_dim]
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:param span_mask: [batch_size, ..leading_dims.., span_mask
:return:
"""
retrieved_feats = self._collect_obj_reps(span_tags, object_reps)
span_rep = torch.cat((span['bert'], retrieved_feats), -1)
# add recurrent dropout here
if self.rnn_input_dropout:
span_rep = self.rnn_input_dropout(span_rep)
return span_rep, retrieved_feats
def forward(self,
images: torch.Tensor,
objects: torch.LongTensor,
segms: torch.Tensor,
boxes: torch.Tensor,
box_mask: torch.LongTensor,
question: Dict[str, torch.Tensor],
question_tags: torch.LongTensor,
question_mask: torch.LongTensor,
answers: Dict[str, torch.Tensor],
answer_tags: torch.LongTensor,
answer_mask: torch.LongTensor,
metadata: List[Dict[str, Any]] = None,
label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
"""
:param images: [batch_size, 3, im_height, im_width]
:param objects: [batch_size, max_num_objects] Padded objects
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:param question: AllenNLP representation of the question. [batch_size, num_answers, seq_length]
:param question_tags: A detection label for each item in the Q [batch_size, num_answers, seq_length]
:param question_mask: Mask for the Q [batch_size, num_answers, seq_length]
:param answers: AllenNLP representation of the answer. [batch_size, num_answers, seq_length]
:param answer_tags: A detection label for each item in the A [batch_size, num_answers, seq_length]
:param answer_mask: Mask for the As [batch_size, num_answers, seq_length]
:param metadata: Ignore, this is about which dataset item we're on
:param label: Optional, which item is valid
:return: shit
"""
# Trim off boxes that are too long. this is an issue b/c dataparallel, it'll pad more zeros that are
# not needed
max_len = int(box_mask.sum(1).max().item())
objects = objects[:, :max_len]
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
segms = segms[:, :max_len]
obj_reps = self.detector(images=images, boxes=boxes, box_mask=box_mask, classes=objects, segms=segms)
# option part
batch_size, num_options, padded_seq_len, _ = answers['bert'].shape
options, option_obj_reps = self.embed_span(answers, answer_tags, answer_mask, obj_reps['obj_reps'])
assert (options.shape == (batch_size, num_options, padded_seq_len, 1280))
option_rep = self.option_encoder(options, answer_mask) # (batch_size, 4, seq_len, emb_len(512))
option_rep = replace_masked_values(option_rep, answer_mask[...,None], 0)
seq_real_length = torch.sum(answer_mask, dim=-1, dtype=torch.float) # (batch_size, 4)
seq_real_length = seq_real_length.view(-1,1) # (batch_size * 4,1)
option_rep = option_rep.sum(dim=2) # (batch_size, 4, emb_len(512))
option_rep = option_rep.view(batch_size * num_options,512) # (batch_size * 4, emb_len(512))
option_rep = option_rep.div(seq_real_length) # (batch_size * 4, emb_len(512))
option_rep = self.option_BN(option_rep)
option_rep = option_rep.view(batch_size, num_options, 512) # (batch_size, 4, emb_len(512))
# query part
batch_size, num_options, padded_seq_len, _ = question['bert'].shape
query, query_obj_reps = self.embed_span(question, question_tags, question_mask, obj_reps['obj_reps'])
assert (query.shape == (batch_size, num_options, padded_seq_len, 1280))
query_rep = self.option_encoder(query, question_mask) # (batch_size, 4, seq_len, emb_len(512))
query_rep = replace_masked_values(query_rep, question_mask[...,None], 0)
seq_real_length = torch.sum(question_mask, dim=-1, dtype=torch.float) # (batch_size, 4)
seq_real_length = seq_real_length.view(-1,1) # (batch_size * 4,1)
query_rep = query_rep.sum(dim=2) # (batch_size, 4, emb_len(512))
query_rep = query_rep.view(batch_size * num_options,512) # (batch_size * 4, emb_len(512))
query_rep = query_rep.div(seq_real_length) # (batch_size * 4, emb_len(512))
query_rep = self.query_BN(query_rep)
query_rep = query_rep.view(batch_size, num_options, 512) # (batch_size, 4, emb_len(512))
# image part
# assert (obj_reps['obj_reps'][:,0,:].shape == (batch_size, 512))
# images = obj_reps['obj_reps'][:,0,:] # the background i.e. whole image
# images = self.image_BN(images)
# images = images[:,None,:]
# images = images.repeat(1,4,1) # (batch_size, 4, 512)
# assert (images.shape == (batch_size, num_options,512))
query_option_image_cat = torch.cat((option_rep,query_rep),-1)
assert (query_option_image_cat.shape == (batch_size,num_options, 512*2))
query_option_image_cat = self.final_mlp(query_option_image_cat)
query_option_image_cat = query_option_image_cat.view(batch_size*num_options,512)
query_option_image_cat = self.final_BN(query_option_image_cat)
query_option_image_cat = query_option_image_cat.view(batch_size,num_options,512)
logits = self.final_mlp_linear(query_option_image_cat)
logits = logits.squeeze(2)
class_probabilities = F.softmax(logits, dim=-1)
output_dict = {"label_logits": logits, "label_probs": class_probabilities}
if label is not None:
loss = self._loss(logits, label.long().view(-1))
self._accuracy(logits, label)
output_dict["loss"] = loss[None]
# print ('one pass')
return output_dict
def get_metrics(self,reset=False):
return {'accuracy': self._accuracy.get_metric(reset)}
|
[
"deanplayerljx@gmail.com"
] |
deanplayerljx@gmail.com
|
7651f52a6e70bf69a76a880ee086f1b872405b0a
|
8a8c9517e0107802c5abfea72f8fe78d73569879
|
/Books/forms.py
|
2b1698a52ae85f2c0b3cbd744a5e8b185c2ecfa7
|
[] |
no_license
|
SrivastavaRishabh/Projects
|
b4d2b841cf6051e9def55bbf123b70f677e06938
|
e768e43a3f9c31d48370629f0c97c8249f9619ce
|
refs/heads/master
| 2020-03-24T21:44:24.220022
| 2018-08-13T13:43:39
| 2018-08-13T13:43:39
| 143,047,422
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
from django import forms
from .models import Books
class EntryForm(forms.ModelForm):
class Meta:
model = Books
fields = ('name', 'isbn', 'pages', 'image', 'description', 'genre',
'publisher', 'authors', 'pubdate')
|
[
"rishabh@testpress.in"
] |
rishabh@testpress.in
|
5f2721ffbb6a6b15822f2107e3fc1814431d1975
|
a969f4d87360010bb0ae7fff1373bb0b92e2b21a
|
/badger/models.py
|
b6487c6f80ff849f8e103af68ab27c0771bd2b24
|
[] |
no_license
|
philratcliffe/django_badger
|
b059e52025930696352020a318dc1a7100a47193
|
1f136741f391c918ed75862373e4a858e63d2f40
|
refs/heads/master
| 2020-04-05T04:09:23.694598
| 2019-01-23T13:14:14
| 2019-01-23T13:14:14
| 156,539,879
| 0
| 0
| null | 2018-11-23T10:18:57
| 2018-11-07T11:59:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
import itertools
from django.template.defaultfilters import slugify
from django.conf import settings
from django.db import models
from django.urls import reverse
from model_utils.models import TimeStampedModel
from .validators import validate_employee_name
class Badge(TimeStampedModel):
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
slug = slugify(self.name)
for x in itertools.count(1):
if not Employee.objects.filter(slug=slug).exists():
break
slug = '%s-%d' % (slug, x)
self.slug = slug
super(Badge, self).save(*args, **kwargs)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('badger:badge_detail', args=[self.slug])
class Meta:
ordering = ["name"]
class BadgeAwarded(TimeStampedModel):
badge = models.ForeignKey(Badge, on_delete=models.CASCADE)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='badges_awarded',
)
def __str__(self):
return self.badge.name
class Meta:
verbose_name_plural = "BadgesAwarded"
# A Badge can only be awarded once to a user
unique_together = ('user', 'badge')
class Employee(TimeStampedModel):
first_name = models.CharField(
max_length=30, validators=[validate_employee_name])
last_name = models.CharField(
max_length=30, validators=[validate_employee_name])
badges = models.ManyToManyField(Badge, blank=True)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
slug = slugify("{} {}".format(self.first_name, self.last_name))
for x in itertools.count(1):
if not Employee.objects.filter(slug=slug).exists():
break
slug = '%s-%d' % (slug, x)
self.slug = slug
super(Employee, self).save(*args, **kwargs)
def __str__(self):
return "{} {}".format(self.first_name, self.last_name)
def get_absolute_url(self):
return reverse('badger:employee_detail', args=[self.slug])
class Meta:
ordering = ["last_name"]
|
[
"phil@philratcliffe.co.uk"
] |
phil@philratcliffe.co.uk
|
4019e75fe7c301209c534516e97c8758f8c51c65
|
efad856f87ce545e640633112c094363e03d98a1
|
/venv/bin/easy_install-3.7
|
dffc980f922be2122df9a0f4404b113c74031daf
|
[] |
no_license
|
Gabriel-Tales/PayBot_Whatts_App
|
1e18f83b7726a9824d2b5aa22a1bb30784a78f05
|
23a052f41c94a2ec578090547d79a5e1eceecb65
|
refs/heads/master
| 2022-12-01T16:23:08.766578
| 2019-05-30T15:32:20
| 2019-05-30T15:32:20
| 189,235,859
| 0
| 0
| null | 2022-11-22T03:50:43
| 2019-05-29T13:55:35
|
Python
|
UTF-8
|
Python
| false
| false
| 262
|
7
|
#!/root/Projetos/python/zipzop/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gabrieltales.pinho@gmail.com"
] |
gabrieltales.pinho@gmail.com
|
9f1d0915a82bbebeff6a08afaad893423aae0abf
|
84d1d0e86f85ff945f65c9ce83c6f75477c52022
|
/sample.py
|
baa1010924b1e3d717a7e3a544700b18c5585e11
|
[] |
no_license
|
kodamitsuyoshi/tradingbot
|
d761128f9a7cb87e4e920eab72fb18733b91799e
|
8933560f50f1592a4db36756f2cb9cdf82cbc2b7
|
refs/heads/master
| 2020-03-10T17:40:22.444458
| 2018-05-08T17:47:00
| 2018-05-08T17:47:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,722
|
py
|
from bitmexwebsocket import BitmexWebsocket
import time
import numpy as np
import pandas as pd
from notify import Line_Notify
def date_from_timestamp(s=None):
if s==None:
s = round(time.time())
return str(datetime.fromtimestamp(s + 60 * 60 * 9))
if __name__ == '__main__':
LINE_TOKEN=sys.argv[1]
start=time.time()
bs =BitmexWebsocket()
#bs=BitMEXWebsocket(endpoint="https://testnet.bitmex.com/api/v1", symbol="XBTUSD", api_key=None, api_secret=None)
#bs=BitMEXWebsocket(endpoint="https://www.bitmex.com/api/v1", symbol="XBTUSD", api_key=None, api_secret=None)
#bs._get_url()
same_count_index = {"Sell":0,"Buy":0}
b_buy =(0,0)
b_sell=(0,0)
position = 0 # -1 0 1
target=5
sleep_num=1
print("loop")
for i in bs.ws.sock.connected:
#print("test")
md=bs.market_depth()
bsres = md[md.side=='Sell' ].sort_values(by=["price","size"])
bbres = md[md.side=='Buy'].sort_values(by=["price","size"])
sell_min = bsres.price.min()
sell_size = bsres[bsres.price==sell_min]["size"].values[0]
buy_max = bbres.price.max()
buy_size = bbres[bbres.price==buy_max]["size"].values[0]
ticker = bs.get_ticker()
c_sell=(sell_min,sell_size)
c_buy =(buy_max,buy_size)
#print( c_sell,c_buy ,ticker["last"] )
print ("CURRENT:",ticker["last"] ,"POSSTION:",position)
time.sleep(sleep_num)
if (b_buy == c_buy):
same_count_index["Buy"] += 1
else:
same_count_index["Buy"] = 0
if(b_sell == c_sell ):
same_count_index["Sell"] += 1
else:
same_count_index["Sell"] = 0
if (position !=-1 and same_count_index["Buy"]>target and same_count_index["Sell"]<target):
position = -1
print("SELL")
#bs.reset_market_depth()
if (position !=1 and same_count_index["Sell"]>target and same_count_index["Buy"]<target):
position = 1
print("BUY")
#bs.reset_market_depth()
if (same_count_index["Sell"]>target and same_count_index["Buy"]>target):
if (position==1):
print("BUY CLOSE")
position =0
elif(position==-1):
print("SELL CLOSE")
position =0
same_count_index = {"Sell":0,"Buy":0}
bs.reset_market_depth()
print("RESET!!")
b_sell = c_sell
b_buy = c_buy
print(same_count_index)
bs.exit()
print(time.time()-start)
|
[
"3nan.mkoda@gmail.com"
] |
3nan.mkoda@gmail.com
|
a38915a88964170726d29431be29c2924d72c2cd
|
4f8b0cb6779752f3c0b61c0e403f011f20e16a5e
|
/venv/Scripts/easy_install-3.7-script.py
|
4e766e84754e05f97bc5c469c4542cee44f68c02
|
[] |
no_license
|
FredoCeroAnon/TallerGitHub
|
c8086d45f425ffd73a3cb3c0e658b4abe3435824
|
d5e3c2439ee306b4d140134ace9b60174750c01b
|
refs/heads/master
| 2020-05-14T11:32:46.442694
| 2019-04-16T23:12:09
| 2019-04-16T23:12:09
| 181,779,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#!C:\Users\LUISALFREDO\PycharmProjects\TallerGitHub\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"luisalfredo9905@gmail.com"
] |
luisalfredo9905@gmail.com
|
bb93cb7b1fe4a984fbc93ec52a9b316230cc8390
|
cb9bc5926279b2396c8b5489171fd0f43b5eaa50
|
/scripts/py-callgraph
|
0f2334b72285c514fcddb5f4c21c597db73321df
|
[
"MIT"
] |
permissive
|
roy2220/systemtap-python-tools
|
008e1e615de325091896fa301fe196f25f7007f5
|
2bb850f4ce70cb9175c2dbeb0752755b0faa9bb7
|
refs/heads/master
| 2021-05-04T08:49:48.730066
| 2016-09-29T22:15:11
| 2016-09-29T22:15:11
| 70,374,292
| 0
| 0
| null | 2016-10-09T03:31:26
| 2016-10-09T03:31:25
| null |
UTF-8
|
Python
| false
| false
| 1,677
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import sys
from common import abspath, build_stap_args, gen_tapset_macros
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('-x', '--pid', help='PID to profile', required=True)
argparser.add_argument('-n', '--ncalls',
help='Number of times to record function execution',
default='20')
argparser.add_argument('-t', '--trigger', help='Trigger function',
required=True)
argparser.add_argument('--py3', action='store_true',
help='Pass when profiling Python 3 programs')
argparser.add_argument('-v', '--verbose', action='store_true')
args, extra_args = argparser.parse_known_args()
main_pid = str(args.pid)
if args.py3:
tapset_dir = abspath('../tapset/python3')
else:
tapset_dir = abspath('../tapset/python2')
gen_tapset_macros(main_pid, tapset_dir)
stap_cmd = ['stap', abspath('py-callgraph.stp'), args.trigger, args.ncalls,
'-I', tapset_dir]
stap_cmd.extend(build_stap_args(main_pid))
limits=['-D', 'MAXSTRINGLEN=4096', '-D', 'MAXBACKTRACE=200',
'-D', 'MAXMAPENTRIES=10240']
stap_cmd.extend(limits)
stap_cmd.extend(extra_args)
if args.verbose:
print(" ".join(stap_cmd))
p = subprocess.Popen(stap_cmd)
p.wait()
if p.returncode != 0:
print("Error running stap script (exit code {}). "
"You may need to pass --py3.".format(p.returncode), file=sys.stderr)
if __name__ == '__main__':
main()
|
[
"freemaneben@gmail.com"
] |
freemaneben@gmail.com
|
|
e73a9385df2d135e22c411c5d7930ecc8f37b31d
|
50ac1b24ecab60da963f143dad2018c0f82301d1
|
/urls.py
|
5553a1c61fcd500f1933b856105d20a50446b7f3
|
[] |
no_license
|
odero/django_oauth
|
bc70dfb5c3e157cd46a77755ee640acd41a20b05
|
d2455f6f67b9ed96d91c8db76e44704cf7e7429c
|
refs/heads/master
| 2020-06-04T16:51:03.866400
| 2011-06-09T19:08:53
| 2011-06-09T19:08:53
| 1,870,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^admin/', include(admin.site.urls)),
(r'^login/$', 'django.contrib.auth.views.login', {'template_name':'admin/login.html'}),
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
urlpatterns += patterns(
'django_oauth.server.views',
# Example:
# (r'^oa_server/', include('oa_server.foo.urls')),
(r'^oauth/request_token/$', 'request_token'),
(r'^oauth/authorize/$', 'authorize'),
(r'^oauth/access_token/$', 'access_token'),
(r'^oauth/resource/$', 'get_resource'),
(r'^api/register/$', 'register'),
(r'^api/applications/$', 'applications'),
(r'^api/logout/$', 'logout'),
)
|
[
"billyx5@gmail.com"
] |
billyx5@gmail.com
|
1258569d923b14536a0ab54b19113ff8c54e5152
|
2555654319106963d7d833dacf2c870f073f2950
|
/serwer/rest_srv/serializers.py
|
0f1247b94d26fbfac4acc744fdc8f5872d4fa13e
|
[] |
no_license
|
LuzikArbuzik/Server-Python
|
0462c40b68bb5955f5ec750934f88261abbcc860
|
594650c3c525278837567ae01d7bfdcdb7e85c91
|
refs/heads/master
| 2021-01-21T20:19:11.638397
| 2017-05-23T21:37:13
| 2017-05-23T21:37:13
| 92,220,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,118
|
py
|
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_srv.models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email')
class OrderSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Order
fields = ('url', 'restaurant_name')
class AddressSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Address
fields = ('url', 'id', 'city', 'street', 'address_num', 'door_num')
class ClientSerializer(serializers.HyperlinkedModelSerializer):
address = AddressSerializer(many=False, read_only=False)
class Meta:
model = Client
fields = ('url', 'id', 'first_name', 'last_name', 'phone_number', 'address')
def create(self, validated_data):
address_data = validated_data.pop('address')
address = None
if Address.objects.filter(**address_data).exists():
address = Address.objects.get(**address_data)
else:
address = Address.objects.create(**address_data)
address.save()
client = Client.objects.create(address=address, **validated_data)
return client
class DishSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Dish
fields = ('url', 'name', 'quantity')
class RestaurantSerializer(serializers.HyperlinkedModelSerializer):
address = AddressSerializer(many=False, read_only=False)
class Meta:
model = Restaurant
fields = ('url', 'address', 'name')
def create(self, validated_data):
address_data = validated_data.pop('address')
address, acreated = Address.objects.get_or_create(**address_data)
if acreated:
address.save()
restaurant = Restaurant.objects.create(address=address, **validated_data)
return restaurant
class OrderSerializer(serializers.ModelSerializer):
dishes = DishSerializer(many=True)
client = ClientSerializer(many=False, read_only=False)
restaurant = RestaurantSerializer(many=False, read_only=False)
class Meta:
model = Order
fields = ('url', 'dishes', 'client', 'restaurant')
def create(self, validated_data):
dishes = []
restaurant_data = {}
restaurant_addr_data = {}
client_data = {}
client_addr_data = {}
if 'dishes' in validated_data:
dishes = validated_data.pop('dishes')
if 'restaurant' in validated_data:
restaurant_data = validated_data.pop('restaurant')
if 'address' in restaurant_data:
restaurant_addr_data = restaurant_data.pop('address')
if 'client' in validated_data:
client_data = validated_data.pop('client')
if 'address' in client_data:
client_addr_data = client_data.pop('address')
restaurant_addr = Address.objects.create(**restaurant_addr_data)
restaurant_addr.save()
restaurant = Restaurant.objects.create(**restaurant_data, address=restaurant_addr)
restaurant.save()
client_addr = Address.objects.create(**client_addr_data)
client_addr.save()
client = Client.objects.create(**client_data, address=client_addr)
client.save()
order = Order.objects.create(**validated_data, client=client, restaurant=restaurant)
order.save()
# restaurant = Restaurant.objects.create(**)
# client = Client()
for dish in dishes:
d, created = Dish.objects.get_or_create(order=order, **dish)
if created is True:
d.save()
return order
# Tworzenie restauracji -> przykład
# metodą POST wysyłamy na address 127.0.0.1:8000/restaurants/ to coś: {"menu": {"dishes": [{"name": "burak"}]}, "address": {"city": "warsaw"}}
# w bazie utworzy się restauracja z menu które będzie posiadało lisę dań oraz obiekt adresu,
# innymi słowy w bazie utworzą się rekordy w 4 tabelach: restaurant, dishes, menu, address
|
[
"jablonski.bartosz93@gmail.com"
] |
jablonski.bartosz93@gmail.com
|
16f30bf6fd6afc00cb0ded311baf89c145a1e50c
|
bc67fe1a95800fc008e168cf72b67830dd0d8ad4
|
/scripts/train_gold.py
|
13e64adff9a28b9f0a0ede53fefa8dd6eb7ef268
|
[
"MIT"
] |
permissive
|
Ekeany/Dawid-Skene
|
635a3e347239371b55202720f849627a106360e9
|
288b64bffd6e2796b3b34ffaa47286afadd4d23e
|
refs/heads/master
| 2020-05-24T11:19:25.238701
| 2019-05-17T16:57:52
| 2019-05-17T16:57:52
| 187,246,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score,f1_score
from gold_sample import m_sample
from io import StringIO
import pandas as pd
import numpy as np
def generate_prediction_file(y_test, y_pred, sen_id,file_name,approach_name): #Decllared function to generate the F-score, Accuracy and Prediction probabilites table
f = open(file_name+'.txt', 'w') #Open a file as writing
score = accuracy_score(y_test, y_pred) * 100 #Get the accuracy
generate_confusionMatrix(y_test,y_pred,file_name) #Generate the confusion matrix
f_score = f1_score(y_test, y_pred, average='binary', pos_label='pos') #Calculate the F-score
probability_tb = c.predict_proba(x_test) #Calculate the probabilites for pos and neg classes
label_tb = c.predict(x_test) #c is the decision tree model declared in bottom
result = approach_name+": The accuracy is :" + str(round(score, 1)) + "%\n\n"
result += "\n\nThe F-Score is:" + str(f_score)+"\n"
print(result)
f.write(result)
pos_prob = list(probability_tb[:, 1])
neg_prob = list(probability_tb[:, 0])
Data = {
'Neg_Prob': neg_prob,
'Pos_Prob': pos_prob,
'Pre_Label': label_tb,
'Sentence ID': sen_id
}
mdf = pd.DataFrame(Data, columns=['Sentence ID', 'Neg_Prob', 'Pos_Prob', 'Pre_Label'])
f.write(mdf.to_string()) #Ouput to file
f.close();
def generate_confusionMatrix(y_test,y_pred,file_name): #generate the confustion matrix funciton
TP=0
TN=0
FP=0
FN=0
for m in range(len(y_test)):
if (y_test[m]=='pos')& (y_pred[m]=='pos'):
TP=TP+1
if (y_test[m]=='pos')&(y_pred[m]=='neg'):
FN=FN+1
if (y_test[m]=='neg')&(y_pred[m]=='pos'):
FP=FP+1
if (y_test[m]=='neg')&(y_pred[m]=='neg'):
TN=TN+1
print("TP: ",TP,"TN: ",TN,"FP: ",FP,"FN: ",FN)
#----------------------------------------------------------------------------------------
training_set=m_sample
c=DecisionTreeClassifier(min_samples_split=100,random_state=0) #Building decision tree model
features=list(training_set.columns[1:-1])
x_train=training_set.loc[:,features]
y_train=training_set.loc[:,"class"]
model=c.fit(x_train,y_train)
test=open('../data/test.csv',encoding='UTF-8') #Import testing set to predict
testing_set=pd.read_csv(test)
x_test=testing_set.loc[:,features] #split the features in testing set
y_test=testing_set.loc[:,"class"]
y_pred=c.predict(x_test)#IShowing result and export result into file
sen_id=testing_set.loc[:,'id']
generate_prediction_file(y_test,y_pred,sen_id,'../results/train_gold',"Gold Sample")
|
[
"noreply@github.com"
] |
Ekeany.noreply@github.com
|
bf7039aa3899de3043bc3db68c5610d1ac4283fb
|
4bc6028ed8ba403b69adfd6f5cbd139baece0f4d
|
/basic_python/multi_thread/produce_consume_demo2.py
|
c18acdfccda9eb4b6e2336e247e9fd341d63cff3
|
[] |
no_license
|
xrw560/learn-pyspark
|
0ef9ed427ff887ceed1c5e5773bf97ed25ecae04
|
618d16dafd73165e714111670119d9cdecc0bf1f
|
refs/heads/master
| 2020-03-07T00:12:36.885000
| 2019-01-04T09:51:32
| 2019-01-04T09:51:32
| 127,152,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
#!/usr/bin/python
# -*- encoding:utf-8 -*-
"""
@author: zhouning
@file:produce_consume_demo2.py
@time:2018/7/30 18:51
@desc:生产者消费者
"""
import threading
import time
condition = threading.Condition()
products = 0
class Producer(threading.Thread):
def run(self):
global products
while True:
if condition.acquire():
if products < 10:
products += 1
print("Producer(%s): deliver one, now produces:%s" % (self.name, products))
condition.notify() # 不释放锁定,因此需要下面一句
condition.release()
else:
print("Producer(%s): already 10, stop deliver, now products: %s " % (self.name, products))
condition.wait() # 自动释放锁定
time.sleep(2)
class Consumer(threading.Thread):
def run(self):
global products
while True:
if condition.acquire():
if products > 1:
products -= 1
print("Consumer(%s): consume one, now produces:%s" % (self.name, products))
condition.notify()
condition.release()
else:
print("Consumer(%s): only 1, stop consume, products: %s" % (self.name, products))
condition.wait()
if __name__ == "__main__":
for p in range(0, 2):
p = Producer()
p.start()
for c in range(0, 3):
c = Consumer()
c.start()
|
[
"ncutits@163.com"
] |
ncutits@163.com
|
c3c6c6d1d9393e69ac648a65ecc9a297ae9e8921
|
cdbc1e37586bd6b08bed4ee3c97831250b585699
|
/pre_process/extract_bboxes.py
|
9b515f7efd5ad8ddaeabb89e6d4b293b6ccd56c0
|
[] |
no_license
|
PeterZhouSZ/hf2vad
|
efdf66e3191e0b9597cfeec07cbc16c691bd5afd
|
f909f427001e7bbabf8994e8992bd3e72334f547
|
refs/heads/master
| 2023-08-06T16:11:57.432201
| 2021-10-09T04:35:22
| 2021-10-09T04:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,329
|
py
|
import numpy as np
import os
import argparse
import cv2
import torch
from tqdm import tqdm
from datasets.dataset import get_dataset, img_tensor2numpy, img_batch_tensor2numpy
from pre_process.mmdet_utils import init_detector, inference_detector
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
DATASET_CFGS = {
"ped2": {"conf_thr": 0.5, "min_area": 10 * 10, "cover_thr": 0.6, "binary_thr": 18, "gauss_mask_size": 3},
"avenue": {"conf_thr": 0.25, "min_area": 40 * 40, "cover_thr": 0.6, "binary_thr": 18, "gauss_mask_size": 5},
"shanghaitech": {"conf_thr": 0.5, "min_area": 40 * 40, "cover_thr": 0.65, "binary_thr": 15, "gauss_mask_size": 5}
}
def getObjBboxes(img, model, dataset_name):
result = inference_detector(model, img)
CONF_THR = DATASET_CFGS[dataset_name]["conf_thr"]
MIN_AREA = DATASET_CFGS[dataset_name]["min_area"]
# bboxes = show_result(img, result, model.CLASSES, score_thr)
bbox_result = result
bboxes = np.vstack(bbox_result)
scores = bboxes[:, -1] # x1,y1,x2,y2,class_score
bboxes = bboxes[scores > CONF_THR, :]
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
bbox_areas = (y2 - y1 + 1) * (x2 - x1 + 1)
return bboxes[bbox_areas >= MIN_AREA, :4]
def delCoverBboxes(bboxes, dataset_name):
assert bboxes.ndim == 2
assert bboxes.shape[1] == 4
COVER_THR = DATASET_CFGS[dataset_name]["cover_thr"]
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
bbox_areas = (y2 - y1 + 1) * (x2 - x1 + 1)
sort_idx = bbox_areas.argsort() # Index of bboxes sorted in ascending order by area size
keep_idx = []
for i in range(sort_idx.size): # calculate overlap with i-th bbox
# Calculate the point coordinates of the intersection
x11 = np.maximum(x1[sort_idx[i]], x1[sort_idx[i + 1:]])
y11 = np.maximum(y1[sort_idx[i]], y1[sort_idx[i + 1:]])
x22 = np.minimum(x2[sort_idx[i]], x2[sort_idx[i + 1:]])
y22 = np.minimum(y2[sort_idx[i]], y2[sort_idx[i + 1:]])
# Calculate the intersection area
w = np.maximum(0, x22 - x11 + 1)
h = np.maximum(0, y22 - y11 + 1)
overlaps = w * h
ratios = overlaps / bbox_areas[sort_idx[i]]
num = ratios[ratios > COVER_THR]
if num.size == 0:
keep_idx.append(sort_idx[i])
return bboxes[keep_idx]
def getFgBboxes(cur_img, img_batch, bboxes, dataset_name):
area_thr = DATASET_CFGS[dataset_name]["min_area"]
binary_thr = DATASET_CFGS[dataset_name]["binary_thr"]
gauss_mask_size = DATASET_CFGS[dataset_name]["gauss_mask_size"]
extend = 2
sum_grad = 0
for i in range(img_batch.shape[0] - 1):
img1 = img_batch[i, :, :, :]
img2 = img_batch[i + 1, :, :, :]
img1 = cv2.GaussianBlur(img1, (gauss_mask_size, gauss_mask_size), 0)
img2 = cv2.GaussianBlur(img2, (gauss_mask_size, gauss_mask_size), 0)
grad = cv2.absdiff(img1, img2)
sum_grad = grad + sum_grad
sum_grad = cv2.threshold(sum_grad, binary_thr, 255, cv2.THRESH_BINARY)[1] # temporal gradient
for bbox in bboxes:
bbox_int = bbox.astype(np.int32)
extend_y1 = np.maximum(0, bbox_int[1] - extend)
extend_y2 = np.minimum(bbox_int[3] + extend, sum_grad.shape[0])
extend_x1 = np.maximum(0, bbox_int[0] - extend)
extend_x2 = np.minimum(bbox_int[2] + extend, sum_grad.shape[1])
sum_grad[extend_y1:extend_y2 + 1, extend_x1:extend_x2 + 1] = 0
sum_grad = cv2.cvtColor(sum_grad, cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(sum_grad, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
fg_bboxes = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
sum_grad = cv2.rectangle(sum_grad, (x, y), (x + w, y + h), color=255, thickness=1)
area = (w + 1) * (h + 1)
if area > area_thr and w / h < 10 and h / w < 10:
extend_x1 = np.maximum(0, x - extend)
extend_y1 = np.maximum(0, y - extend)
extend_x2 = np.minimum(x + w + extend, sum_grad.shape[1])
extend_y2 = np.minimum(y + h + extend, sum_grad.shape[0])
fg_bboxes.append([extend_x1, extend_y1, extend_x2, extend_y2])
return np.array(fg_bboxes)
def obj_bboxes_extraction(dataset_root, dataset_name, mode):
# mmdet config file and pre-trained model weights
mm_det_config_file = 'assets/latest_version_cascade_rcnn_r101_fpn_1x.py'
mm_det_ckpt_file = 'assets/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth'
dataset = get_dataset(dataset_name=dataset_name,
dir=os.path.join(dataset_root, dataset_name),
context_frame_num=1, mode=mode)
mm_det_model = init_detector(mm_det_config_file, mm_det_ckpt_file, device="cuda:0")
all_bboxes = list()
for idx in tqdm(range(len(dataset)), total=len(dataset)):
batch, _ = dataset.__getitem__(idx)
# centric frame
cur_img = img_tensor2numpy(batch[1])
h, w = cur_img.shape[0], cur_img.shape[1]
obj_bboxes = getObjBboxes(cur_img, mm_det_model, dataset_name)
# filter some overlapped bbox
obj_bboxes = delCoverBboxes(obj_bboxes, dataset_name)
fg_bboxes = getFgBboxes(cur_img, img_batch_tensor2numpy(batch), obj_bboxes, dataset_name)
if fg_bboxes.shape[0] > 0:
cur_bboxes = np.concatenate((obj_bboxes, fg_bboxes), axis=0)
else:
cur_bboxes = obj_bboxes
all_bboxes.append(cur_bboxes)
np.save(os.path.join(os.path.join(dataset_root, dataset_name),
'%s_bboxes_%s.npy' % (dataset_name, mode)), all_bboxes)
print('bboxes saved!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--proj_root", type=str, default="/home/liuzhian/hdd4T/code/hf2vad", help='project root path')
parser.add_argument("--dataset_name", type=str, default="ped2", help='dataset name')
parser.add_argument("--mode", type=str, default="train", help='train or test data')
args = parser.parse_args()
obj_bboxes_extraction(dataset_root=os.path.join(args.proj_root, "data"),
dataset_name=args.dataset_name,
mode=args.mode)
|
[
"csliuzhian@mail.scut.edu.cn"
] |
csliuzhian@mail.scut.edu.cn
|
1797cc28eea736cc574b2a6da4546b8189e379b5
|
9923e30eb99716bfc179ba2bb789dcddc28f45e6
|
/apimatic/python_generic_lib/Samsara+API-Python/samsaraapi/models/address.py
|
2b975d18c5a4bcc100d752ae06f73c387069e96d
|
[
"MIT"
] |
permissive
|
silverspace/samsara-sdks
|
cefcd61458ed3c3753ac5e6bf767229dd8df9485
|
c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa
|
refs/heads/master
| 2020-04-25T13:16:59.137551
| 2019-03-01T05:49:05
| 2019-03-01T05:49:05
| 172,804,041
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,681
|
py
|
# -*- coding: utf-8 -*-
"""
samsaraapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import samsaraapi.models.contact
import samsaraapi.models.address_geofence
import samsaraapi.models.tag_metadata
class Address(object):
"""Implementation of the 'Address' model.
Information about an address/geofence. Geofences are either a circle or a
polygon.
Attributes:
contacts (list of Contact): TODO: type description here.
formatted_address (string): The full address associated with this
address/geofence, as it might be recognized by maps.google.com
geofence (AddressGeofence): The geofence that defines this address and
its bounds. This can either be a circle, or a polygon - only one
key should be provided, depending on the geofence type.
id (long|int): ID of the address
name (string): Name of the address or geofence
notes (string): Notes associated with an address.
tags (list of TagMetadata): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"contacts":'contacts',
"formatted_address":'formattedAddress',
"geofence":'geofence',
"id":'id',
"name":'name',
"notes":'notes',
"tags":'tags'
}
def __init__(self,
contacts=None,
formatted_address=None,
geofence=None,
id=None,
name=None,
notes=None,
tags=None):
"""Constructor for the Address class"""
# Initialize members of the class
self.contacts = contacts
self.formatted_address = formatted_address
self.geofence = geofence
self.id = id
self.name = name
self.notes = notes
self.tags = tags
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
contacts = None
if dictionary.get('contacts') != None:
contacts = list()
for structure in dictionary.get('contacts'):
contacts.append(samsaraapi.models.contact.Contact.from_dictionary(structure))
formatted_address = dictionary.get('formattedAddress')
geofence = samsaraapi.models.address_geofence.AddressGeofence.from_dictionary(dictionary.get('geofence')) if dictionary.get('geofence') else None
id = dictionary.get('id')
name = dictionary.get('name')
notes = dictionary.get('notes')
tags = None
if dictionary.get('tags') != None:
tags = list()
for structure in dictionary.get('tags'):
tags.append(samsaraapi.models.tag_metadata.TagMetadata.from_dictionary(structure))
# Return an object of this model
return cls(contacts,
formatted_address,
geofence,
id,
name,
notes,
tags)
|
[
"greg@samsara.com"
] |
greg@samsara.com
|
fabb5d55230bd7f608c260d329d753b3fc9ad165
|
dd16094e1128c7b5708df537b3dd9189db53511d
|
/wellen/wellen/urls.py
|
67d0faf994e00af1ea3d5a4cec4bebd35bbfbc51
|
[] |
no_license
|
jpcvandam/acaciadata
|
30937ec770762ea78dfa2f21a98ca5e4c09ee699
|
d2c6a1f13d8eb9944000e00f9a4ff19979969989
|
refs/heads/master
| 2021-01-21T08:58:06.598229
| 2015-10-19T13:27:36
| 2015-10-19T13:27:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from .views import HomeView, DashGroupView
admin.autodiscover()
urlpatterns = patterns('wellen.views',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^data/', include('acacia.data.urls',namespace='acacia')),
url(r'^(?P<name>[\w\s]+)$', DashGroupView.as_view(), name='wellen-dashboard'),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.IMG_URL, document_root=settings.IMG_ROOT)
from django.contrib.auth import views as auth_views
urlpatterns += patterns('',
url(r'^password/change/$',
auth_views.password_change,
name='password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='password_reset'),
url(r'^accounts/password/reset/done/$',
auth_views.password_reset_done,
name='password_reset_done'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='password_reset_complete'),
url(r'^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='password_reset_confirm'),
url(r'^accounts/', include('registration.backends.default.urls'))
)
|
[
"tkleinen@gmail.com"
] |
tkleinen@gmail.com
|
143e7bdddeb47fa9368a9a91853b3f277b78725a
|
e3cfd7e0b30b9605f0f9d83876b79d9511b02b58
|
/vrtManager/create.py
|
3a755014a55ad8a2b565da2448cce757df447cba
|
[
"Apache-2.0"
] |
permissive
|
AliasRK/WebVirtCloud-B7
|
56dd8539375a3e0c06b77582e0a1c707ff15d7da
|
3694f5615bae9fd071d4400bc129918d277775af
|
refs/heads/master
| 2020-03-24T17:58:04.601169
| 2018-07-27T19:14:20
| 2018-07-27T19:14:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,939
|
py
|
import string
from vrtManager import util
from vrtManager.connection import wvmConnect
from webvirtcloud.settings import QEMU_CONSOLE_DEFAULT_TYPE
from webvirtcloud.settings import INSTANCE_VOLUME_DEFAULT_FILE_EXTENSION
from webvirtcloud.settings import INSTANCE_VOLUME_DEFAULT_FORMAT
def get_rbd_storage_data(stg):
xml = stg.XMLDesc(0)
ceph_user = util.get_xml_path(xml, "/pool/source/auth/@username")
def get_ceph_hosts(doc):
hosts = []
for host in doc.xpath("/pool/source/host"):
name = host.prop("name")
if name:
hosts.append({'name': name, 'port': host.prop("port")})
return hosts
ceph_hosts = util.get_xml_path(xml, func=get_ceph_hosts)
secret_uuid = util.get_xml_path(xml, "/pool/source/auth/secret/@uuid")
return ceph_user, secret_uuid, ceph_hosts
class wvmCreate(wvmConnect):
image_extension = INSTANCE_VOLUME_DEFAULT_FILE_EXTENSION
image_format = INSTANCE_VOLUME_DEFAULT_FORMAT
def get_storages_images(self):
"""
Function return all images on all storages
"""
images = []
storages = self.get_storages(only_actives=True)
for storage in storages:
stg = self.get_storage(storage)
try:
stg.refresh(0)
except:
pass
for img in stg.listVolumes():
if img.endswith('.iso'):
pass
else:
images.append(img)
return images
def get_os_type(self):
"""Get guest capabilities"""
return util.get_xml_path(self.get_cap_xml(), "/capabilities/guest/os_type")
def get_host_arch(self):
"""Get guest capabilities"""
return util.get_xml_path(self.get_cap_xml(), "/capabilities/host/cpu/arch")
def create_volume(self, storage, name, size, image_format=image_format, metadata=False, image_extension=image_extension):
size = int(size) * 1073741824
stg = self.get_storage(storage)
storage_type = util.get_xml_path(stg.XMLDesc(0), "/pool/@type")
if storage_type == 'dir':
name += '.' + image_extension
alloc = 0
else:
alloc = size
metadata = False
xml = """
<volume>
<name>%s</name>
<capacity>%s</capacity>
<allocation>%s</allocation>
<target>
<format type='%s'/>
</target>
</volume>""" % (name, size, alloc, image_format)
stg.createXML(xml, metadata)
try:
stg.refresh(0)
except:
pass
vol = stg.storageVolLookupByName(name)
return vol.path()
def get_volume_type(self, path):
vol = self.get_volume_by_path(path)
vol_type = util.get_xml_path(vol.XMLDesc(0), "/volume/target/format/@type")
if vol_type == 'unknown':
return 'raw'
if vol_type:
return vol_type
else:
return 'raw'
def get_volume_path(self, volume):
storages = self.get_storages(only_actives=True)
for storage in storages:
stg = self.get_storage(storage)
if stg.info()[0] != 0:
stg.refresh(0)
for img in stg.listVolumes():
if img == volume:
vol = stg.storageVolLookupByName(img)
return vol.path()
def get_storage_by_vol_path(self, vol_path):
vol = self.get_volume_by_path(vol_path)
return vol.storagePoolLookupByVolume()
def clone_from_template(self, clone, template, metadata=False):
vol = self.get_volume_by_path(template)
stg = vol.storagePoolLookupByVolume()
storage_type = util.get_xml_path(stg.XMLDesc(0), "/pool/@type")
format = util.get_xml_path(vol.XMLDesc(0), "/volume/target/format/@type")
if storage_type == 'dir':
clone += '.img'
else:
metadata = False
xml = """
<volume>
<name>%s</name>
<capacity>0</capacity>
<allocation>0</allocation>
<target>
<format type='%s'/>
</target>
</volume>""" % (clone, format)
stg.createXMLFrom(xml, vol, metadata)
clone_vol = stg.storageVolLookupByName(clone)
return clone_vol.path()
def _defineXML(self, xml):
self.wvm.defineXML(xml)
def delete_volume(self, path):
vol = self.get_volume_by_path(path)
vol.delete()
def create_instance(self, name, memory, vcpu, host_model, uuid, images, cache_mode, networks, virtio, mac=None):
"""
Create VM function
"""
memory = int(memory) * 1024
if self.is_kvm_supported():
hypervisor_type = 'kvm'
else:
hypervisor_type = 'qemu'
xml = """
<domain type='%s'>
<name>%s</name>
<description>None</description>
<uuid>%s</uuid>
<memory unit='KiB'>%s</memory>
<vcpu>%s</vcpu>""" % (hypervisor_type, name, uuid, memory, vcpu)
if host_model:
xml += """<cpu mode='host-model'/>"""
xml += """<os>
<type arch='%s'>%s</type>
<boot dev='hd'/>
<boot dev='cdrom'/>
<bootmenu enable='yes'/>
</os>""" % (self.get_host_arch(), self.get_os_type())
xml += """<features>
<acpi/><apic/><pae/>
</features>
<clock offset="utc"/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>"""
disk_letters = list(string.lowercase)
for image, img_type in images.items():
stg = self.get_storage_by_vol_path(image)
stg_type = util.get_xml_path(stg.XMLDesc(0), "/pool/@type")
if stg_type == 'rbd':
ceph_user, secret_uuid, ceph_hosts = get_rbd_storage_data(stg)
xml += """<disk type='network' device='disk'>
<driver name='qemu' type='%s' cache='%s'/>
<auth username='%s'>
<secret type='ceph' uuid='%s'/>
</auth>
<source protocol='rbd' name='%s'>""" % (img_type, cache_mode, ceph_user, secret_uuid, image)
if isinstance(ceph_hosts, list):
for host in ceph_hosts:
if host.get('port'):
xml += """
<host name='%s' port='%s'/>""" % (host.get('name'), host.get('port'))
else:
xml += """
<host name='%s'/>""" % host.get('name')
xml += """
</source>"""
else:
xml += """<disk type='file' device='disk'>
<driver name='qemu' type='%s' cache='%s'/>
<source file='%s'/>""" % (img_type, cache_mode, image)
if virtio:
xml += """<target dev='vd%s' bus='virtio'/>""" % (disk_letters.pop(0),)
else:
xml += """<target dev='sd%s' bus='ide'/>""" % (disk_letters.pop(0),)
xml += """</disk>"""
xml += """ <disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file=''/>
<target dev='hda' bus='ide'/>
<readonly/>
<address type='drive' controller='0' bus='1' target='0' unit='1'/>
</disk>"""
for net in networks.split(','):
xml += """<interface type='network'>"""
if mac:
xml += """<mac address='%s'/>""" % mac
xml += """<source network='%s'/>
<filterref filter='clean-traffic'/>""" % net
if virtio:
xml += """<model type='virtio'/>"""
xml += """</interface>"""
xml += """ <input type='mouse' bus='ps2'/>
<input type='tablet' bus='usb'/>
<graphics type='%s' port='-1' autoport='yes' passwd='%s' listen='127.0.0.1'/>
<console type='pty'/>
<video>
<model type='cirrus'/>
</video>
<memballoon model='virtio'/>
</devices>
</domain>""" % (QEMU_CONSOLE_DEFAULT_TYPE, util.randomPasswd())
self._defineXML(xml)
|
[
"r.v.mirchev@gmail.com"
] |
r.v.mirchev@gmail.com
|
558f941e1f5daf39d0d1e2e9416df55369a27e83
|
f5070c669f20f89dc23de19db94d662fd245eebb
|
/s3recovery/ut/s3recovery_recover_tests.py
|
0e10d0ded6e0888ff61e0200ab470865a3153e38
|
[
"Apache-2.0"
] |
permissive
|
kaustubh-d/cortx-s3server
|
8471e9581d6843e260e048a911ddb8beb0af4316
|
43cffc0e3e9e261e9956dfcf90b1e75e97cd367d
|
refs/heads/main
| 2022-12-18T18:03:03.438484
| 2020-09-24T15:33:39
| 2020-09-24T15:33:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,153
|
py
|
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
#!/usr/bin/python3.6
import mock
import unittest
from s3backgrounddelete.cortx_s3_kv_api import CORTXS3KVApi
from s3backgrounddelete.cortx_s3_success_response import CORTXS3SuccessResponse
from s3recovery.s3recovercorruption import S3RecoverCorruption
from s3recovery.s3recoverybase import S3RecoveryBase
from s3recovery.config import Config
class S3RecoverCorruptionTestCase(unittest.TestCase):
@mock.patch.object(S3RecoveryBase, 'initiate')
@mock.patch.object(S3RecoveryBase, 'dry_run')
@mock.patch.object(S3RecoverCorruption, 'check_consistency')
def test_check_consistency_check_for_recover(self,mock_initiate, mock_dry_run, mock_check_consistency):
# Tests to check consistency check is used during recover option
mockS3RecoverCorruption = S3RecoverCorruption()
mock_initiate.return_value = None
mock_dry_run.return_value = {}
mock_check_consistency.return_value = None
mockS3RecoverCorruption.recover_corruption("Global bucket index",
Config.global_bucket_index_id,
Config.global_bucket_index_id_replica,
"Bucket metadata index",
Config.bucket_metadata_index_id,
Config.bucket_metadata_index_id_replica)
self.assertTrue(mock_initiate.called)
self.assertTrue(mock_dry_run.called)
self.assertTrue(mock_check_consistency.called)
# Assert Consistency and other mock calls
self.assertEqual(S3RecoveryBase.initiate.call_count, 2)
self.assertEqual(S3RecoveryBase.dry_run.call_count, 2)
self.assertEqual(S3RecoverCorruption.check_consistency.call_count, 1)
@mock.patch.object(S3RecoveryBase, 'initiate')
@mock.patch.object(S3RecoveryBase, 'dry_run')
@mock.patch.object(S3RecoverCorruption, 'restore_data')
def test_check_restore_for_recover(self,mock_initiate, mock_dry_run, mock_restore_data):
# Tests to check restore (PutKV) is used during recover option
mockS3RecoverCorruption = S3RecoverCorruption()
mock_initiate.return_value = None
mock_dry_run.return_value = {}
mock_restore_data.return_value = None
mockS3RecoverCorruption.recover_corruption("Global bucket index",
Config.global_bucket_index_id,
Config.global_bucket_index_id_replica,
"Bucket metadata index",
Config.bucket_metadata_index_id,
Config.bucket_metadata_index_id_replica)
self.assertTrue(mock_initiate.called)
self.assertTrue(mock_dry_run.called)
self.assertTrue(mock_restore_data.called)
# Assert PutKV and other mock calls
self.assertEqual(S3RecoveryBase.initiate.call_count, 2)
self.assertEqual(S3RecoveryBase.dry_run.call_count, 2)
self.assertEqual(S3RecoverCorruption.restore_data.call_count, 1)
def test_inconsistent_data_entries(self):
# Tests to check consistency check works for empty indexes
mockS3RecoverCorruption = S3RecoverCorruption()
mockS3RecoverCorruption.list_result = {
"key1": "value1",
}
mockS3RecoverCorruption.metadata_result = {
"617326/key2": "value2",
}
mockS3RecoverCorruption.check_consistency()
# Assert inconsistent data should not be recovered
self.assertEqual(len(mockS3RecoverCorruption.common_keys), 0)
self.assertEqual(mockS3RecoverCorruption.common_keys, [])
def test_consistent_data_entries(self):
# Tests to check consistent data is recovered during recovery
mockS3RecoverCorruption = S3RecoverCorruption()
mockS3RecoverCorruption.list_result = {
"key1": "value1",
"key2": "value2"
}
mockS3RecoverCorruption.metadata_result = {
"617326/key1": "value1",
"617326/key2": "value2"
}
mockS3RecoverCorruption.check_consistency()
# Assert for data to be recovered
self.assertEqual(len(mockS3RecoverCorruption.common_keys), 2)
self.assertEqual(mockS3RecoverCorruption.common_keys, ["key1","key2"])
def test_partial_inconsistent_data_entries(self):
# Tests to check isconsistent data is not recovered during recovery
mockS3RecoverCorruption = S3RecoverCorruption()
mockS3RecoverCorruption.list_result = {
"key1": "value1",
"key2": "value2",
"key3": "value3"
}
mockS3RecoverCorruption.metadata_result = {
"617326/key3": "value3",
"617326/key4": "value4",
"617326/key5": "value5"
}
mockS3RecoverCorruption.check_consistency()
# Assert inconsistent data should not be recovered
self.assertEqual(len(mockS3RecoverCorruption.common_keys), 1)
self.assertEqual(mockS3RecoverCorruption.common_keys, ["key3"])
@mock.patch.object(CORTXS3KVApi, 'put')
def test_restore_data_none_index_list(self, mock_put):
# Test 'restore_data' when list: 'list_result' is None
mockS3RecoverCorruption = S3RecoverCorruption()
mockS3RecoverCorruption.list_result = None
mockS3RecoverCorruption.restore_data('global_list_index_id',
'replica_list_index_id',
'global_metadata_index_id',
'replica_metadata_index_id'
)
self.assertEqual(mock_put.call_count, 0)
@mock.patch.object(CORTXS3KVApi, 'put')
def test_restore_data_none_metadata_list(self, mock_put):
# Test 'restore_data' when dict: 'metadata_result' is None
mockS3RecoverCorruption = S3RecoverCorruption()
mockS3RecoverCorruption.list_result = dict()
mockS3RecoverCorruption.metadata_result = None
mockS3RecoverCorruption.restore_data('global_list_index_id',
'replica_list_index_id',
'global_metadata_index_id',
'replica_metadata_index_id'
)
self.assertEqual(mock_put.call_count, 0)
@mock.patch.object(CORTXS3KVApi, 'put')
def test_restore_data_empty_index_list(self, mock_put):
# Test 'restore_data' when dict: 'list_result' is empty
mockS3RecoverCorruption = S3RecoverCorruption()
mockS3RecoverCorruption.list_result = dict()
mockS3RecoverCorruption.metadata_result = dict()
mockS3RecoverCorruption.restore_data('global_list_index_id',
'replica_list_index_id',
'global_metadata_index_id',
'replica_metadata_index_id'
)
self.assertEqual(mock_put.call_count, 0)
@mock.patch.object(CORTXS3KVApi, 'put')
def test_restore_data_non_empty_index_list(self, mock_put):
# Test 'restore_data' when dict: 'list_result' & 'metadata_result' is not empty
mockS3RecoverCorruption = S3RecoverCorruption()
mockS3RecoverCorruption.metadata_result = {
r'123/key3': 'value3'
}
mockS3RecoverCorruption.list_result = {
'key1': 'value1',
'key2': 'value2'
}
mockS3RecoverCorruption.common_keys = ['key1', 'key3']
mock_put.return_value = True, CORTXS3SuccessResponse("body".encode('utf-8'))
mockS3RecoverCorruption.restore_data('global_list_index_id',
'replica_list_index_id',
'global_metadata_index_id',
'replica_metadata_index_id')
self.assertEqual(mock_put.call_count, 4) # 2 calls each to CORTXS3KVApi::put, for key1 and key3
|
[
"noreply@github.com"
] |
kaustubh-d.noreply@github.com
|
65a182d71eda63a105602f86927d6443abe837d3
|
222a67d44ea67372f61c090d467bf43f874ee769
|
/prac_08/silver_service_taxi.py
|
2c7365032ce11dd7b40fe12179abc3fa425cac62
|
[] |
no_license
|
JarrodPW/cp1404practicals
|
1504f7cd44fff202b2b020c90fbff508d663e16b
|
7e508fe6dcce8f0e5879a27a5c777352a66537b1
|
refs/heads/master
| 2023-08-24T05:35:37.214840
| 2021-10-11T22:27:29
| 2021-10-11T22:27:29
| 397,093,991
| 0
| 0
| null | 2021-10-05T03:01:17
| 2021-08-17T04:17:03
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
"""
prac 08
SilverServiceTaxi Class
"""
from prac_08.taxi import Taxi
class SilverServiceTaxi(Taxi):
price_per_km = 1.23
flagfall = 4.5
def __init__(self, name, fuel, fanciness=0):
super().__init__(name, fuel)
self.fanciness = float(fanciness)
self.price_per_km *= fanciness
def get_fare(self):
return super().get_fare() + self.flagfall
def __str__(self):
"""Return a string like a Car but with current fare distance."""
return f"{super().__str__()} plus flagfall of {self.flagfall:.2f}"
|
[
"jarrod.paynewatson@my.jcu.edu.au"
] |
jarrod.paynewatson@my.jcu.edu.au
|
ee29f93552f28f3f2169f28fead98430500fabf2
|
154d42c739793fbf61df002f9d8c659b07a0fcac
|
/19_friend_date/friend_date.py
|
8a95efc01a96105034214e3b1588a8fc5e752352
|
[] |
no_license
|
mcodemax/PythonDSPrac
|
048aced3d4e58e9091dc25404160c7702bd6c597
|
e62dc6efb4bedb0fb9f7e94448f22330581fb571
|
refs/heads/master
| 2023-05-06T19:44:30.122344
| 2021-05-25T00:37:21
| 2021-05-25T00:37:21
| 369,969,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
def friend_date(a, b):
"""Given two friends, do they have any hobbies in common?
- a: friend #1, a tuple of (name, age, list-of-hobbies)
- b: same, for friend #2
Returns True if they have any hobbies in common, False is not.
>>> elmo = ('Elmo', 5, ['hugging', 'being nice'])
>>> sauron = ('Sauron', 5000, ['killing hobbits', 'chess'])
>>> gandalf = ('Gandalf', 10000, ['waving wands', 'chess'])
>>> friend_date(elmo, sauron)
False
>>> friend_date(sauron, gandalf)
True
"""
a_hobbies = set(a[2])
b_hobbies = set(b[2])
if a_hobbies & b_hobbies:
return True
else:
return False
|
[
"mwalterjohnson7@gmail.com"
] |
mwalterjohnson7@gmail.com
|
217f34413d63eb6f3d1186e468fd660c6e515573
|
303f5990985aba9a1884053baf08ff0d47c3a5de
|
/build/ork/tod/catkin_generated/pkg.installspace.context.pc.py
|
c9c7b014ef7ddc0718c98663eb475ce45d5460a1
|
[] |
no_license
|
Brendon2016/Tsing-Siemens-Competiton
|
d1339e162c8582a59aa153f29bec0180c4e261d7
|
8ffc7ee931a877898342fdd3593e92b633e22bce
|
refs/heads/master
| 2020-03-28T23:30:48.267738
| 2018-09-18T13:53:33
| 2018-09-18T13:53:33
| 149,294,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "object_recognition_tod"
PROJECT_SPACE_DIR = "/home/h/catkin_ws/install"
PROJECT_VERSION = "0.5.6"
|
[
"251311876@qq.com"
] |
251311876@qq.com
|
561b48e0f9faad2ba29366f95238236a3b524eb6
|
05b0bcbef59def8743ae56e4110153837ff24ec1
|
/Scripts/get_model_weights.py
|
23d806b38b1582bb4e772757722662ce44fa20a4
|
[] |
no_license
|
AkselAllas/Bachelor_thesis
|
fdb468d142d209e593729ff205fe12a3ae4a244e
|
50aebb8a3ad60f9604c1bb525f6c5949fc4c0f0b
|
refs/heads/master
| 2020-04-30T15:38:40.391119
| 2020-01-15T07:03:54
| 2020-01-15T07:03:54
| 176,925,588
| 0
| 0
| null | null | null | null |
MacCentralEurope
|
Python
| false
| false
| 4,808
|
py
|
#!/usr/bin/env python
# coding: utf-8
#Import all the dependencies
#This disables python on GPU
#import os
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
from sklearn.utils import class_weight
from keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint
from keras.optimizers import SGD, RMSprop, adam
from keras.utils import np_utils
from keras import backend as K
import numpy as np
from sklearn.model_selection import train_test_split
from scipy import ndarray
import time
import sys
import matplotlib
import matplotlib.pyplot as plt
from s2_preprocessor import *
from s2_model import *
from plotter import *
version_start = str(sys.argv[1])
#Because fit_generator needs different data preprocessing functions, then we define functions for windowing in this script
def input_windows_preprocessing(preprocessor_X_output, preprocessor_Y_output, s2_preprocessor):
nb_tile_pixels = s2_preprocessor.tile_dimension*s2_preprocessor.tile_dimension
dim = (s2_preprocessor.window_dimension,s2_preprocessor.window_dimension,s2_preprocessor.nb_images)
input_data = preprocessor_X_output.astype('float32')
input_labels = np.reshape(preprocessor_Y_output,(nb_tile_pixels,s2_preprocessor.nb_classes))
#Get Region of Interest mask from loaded array
ROI_mask = input_data[:,:,0,5]
X_2D_nowindows = input_data[:,:,:,0:5]
reshaped_ROI_mask = np.reshape(ROI_mask,(nb_tile_pixels))
valid_pixels_count = np.count_nonzero(reshaped_ROI_mask)
X = np.zeros((0,s2_preprocessor.nb_bands,*dim))
Y = np.zeros((0,s2_preprocessor.nb_classes))
X = np.concatenate((X,np.zeros((valid_pixels_count, s2_preprocessor.nb_bands, *dim))),axis=0)
Y = np.concatenate((Y,np.zeros((valid_pixels_count, s2_preprocessor.nb_classes))))
for j in range(s2_preprocessor.nb_images):
for i in range(s2_preprocessor.nb_bands):
padded_overpad = skimage.util.pad(X_2D_nowindows[:s2_preprocessor.tile_dimension,:,i,j],4,'reflect')
padded = padded_overpad[:-1,:-1].copy() #Copy is made so that next view_as_windows wouldn't throw warning about being unable to provide views. Without copy() interestingly enough, it doesn't take extra RAM, just throws warnings.
windows = skimage.util.view_as_windows(padded,(s2_preprocessor.window_dimension,s2_preprocessor.window_dimension))
reshaped_windows = np.reshape(windows,(nb_tile_pixels,s2_preprocessor.window_dimension,s2_preprocessor.window_dimension))
k=0
l=0
for mask_element in reshaped_ROI_mask:
if(mask_element==True):
X[k,i,:,:,j] = reshaped_windows[l]
Y[k] = input_labels[l]
k+=1
l+=1
return X,Y
s2_preprocessor_params = {'input_dimension':5120, #5120
'label_dir':'./Label_tifs/',
'data_dir':'./Data/',
'input_data_dir':'./Big_tile_data/',
'region_of_interest_shapefile':'./ROI/ROI.shp',
'window_dimension':8,
'tile_dimension':512,
'nb_images':5,
'nb_bands':22,
'nb_steps':8, #This is unused!! #nb_steps defines how many parts the tile will be split into for training
'rotation_augmentation':0,
'flipping_augmentation':0
}
s2_preprocessor = s2_preprocessor(**s2_preprocessor_params)
class_weights = np.load("class_weights.npy")
optimizer_params = {
'lr':0.001,
}#'clipvalue':0.5,
#Callback for CTRL+Z to stop training
stop_cb = SignalStopping()
filepath="best_model.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early_stopping_params = {
'monitor':'val_loss',
'min_delta':0.,
'patience':5,
'verbose':1,
#'mode':'auto'
}
s2_model_params = {
's2_preprocessor' : s2_preprocessor,
'batch_size' : 512,
'nb_epochs' : 1000,
'nb_filters' : [32, 32, 64],
'max_pool_size' : [2,2,1],
'conv_kernel_size' : [3,3,3],
'optimizer' : SGD(**optimizer_params),
'loss_function' : 'categorical_crossentropy',
'metrics' : ['mse', 'accuracy'],
'version' : '0',
'cb_list' : [EarlyStopping(**early_stopping_params),stop_cb,checkpoint]
}
s2_model = s2_model(**s2_model_params)
#for layer in s2_model.model.layers:
# print(layer.get_config())
# print(np.array(layer.get_weights()).shape)
conv_layer_one_weights = np.array(s2_model.model.layers[0].get_weights()[0])
print(np.array(conv_layer_one_weights[:,:,0,13,0]).shape)
#conv_layer_two_weights = np.array(s2_model.model.layers[4].get_weights()[0])
#conv_layer_three_weights = np.array(s2_model.model.layers[8].get_weights()[0])
#print(s2_model.model.layers[0].get_config())
for i in range(22):
print("Indeksi "+i+" filtri kaalude abosluutv√√rtuse summa on: "+str(np.sum(np.absolute(conv_layer_one_weights[:,:,:,i,:]))))
|
[
"allasaksel@gmail.com"
] |
allasaksel@gmail.com
|
b1814ccda60066a724e595cb3887d46398bfb2bf
|
3fc8150fe6cd3dd7f800341a1c193a628f6c60ad
|
/App/settings.py
|
ddce24681fc319deb2a1d5bf6260bdd204221d4b
|
[
"MIT"
] |
permissive
|
passed-by/WeatherTornado
|
9fbe8b179115dac866745b4aa50c71bc06164e94
|
05f1ccc56b93c3267d65fbee312de4b99fb1a5e4
|
refs/heads/master
| 2020-08-02T09:39:54.257203
| 2019-09-27T11:43:30
| 2019-09-27T11:43:30
| 211,306,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# settings.py: 项目配置
import os
from tornado.options import define, options
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
define('port', default=8082, type=int)
define('debug', default=True, type=bool)
app_settings = {
"debug": options.debug,
"template_path": os.path.join(BASE_DIR, 'templates'),
"static_path": os.path.join(BASE_DIR, 'static'),
}
|
[
"2809196989@qq.com"
] |
2809196989@qq.com
|
aa07d79b5463041171e9c12a10d01361b10412d2
|
f653f96c26501523d36f67330186e546b9067749
|
/20/00/9.py
|
8a4e8ca785dcdfa995155a8d1c1d57d37d7b58ce
|
[
"CC0-1.0"
] |
permissive
|
pylangstudy/201711
|
6d5fb40d3bf0b1be5310e6c85ac23c76a7f9db56
|
be6222dde61373f67d25a2c926868b602463c5cc
|
refs/heads/master
| 2022-11-13T22:11:52.127874
| 2017-11-29T23:32:17
| 2017-11-29T23:32:17
| 109,062,692
| 0
| 1
| null | 2022-10-20T07:22:56
| 2017-10-31T23:22:16
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('bar')
parser.parse_args(['XXX'])
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--foo-bar', '--foo')
parser.add_argument('-x', '-y')
parser.parse_args('-f 1 -x 2'.split())
parser.parse_args('--foo 1 -y 2'.split())
parser = argparse.ArgumentParser()
parser.add_argument('--foo', dest='bar')
parser.parse_args('--foo XXX'.split())
|
[
"pylangstudy@yahoo.co.jp"
] |
pylangstudy@yahoo.co.jp
|
8c222de0453c329c322e024d9cbbc2e7f7988275
|
37448df3b6375f69471d2c8723ada56505d3dbbc
|
/apps/entity/apps.py
|
bc2f2558cf2082ba8093a0670a171d0e5e475865
|
[] |
no_license
|
williamsko/payhouse
|
4df63ac32069b2803d53451089ff572de054eea9
|
bcee45b09f0a889e35accd652c02522b15edbf7a
|
refs/heads/master
| 2022-12-08T23:52:22.206285
| 2020-02-09T20:23:36
| 2020-02-09T20:23:36
| 209,165,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
from django.apps import AppConfig
class EntityConfig(AppConfig):
name = 'entity'
def ready(self):
import entity.signals # noqa
|
[
"wdesouza-prestataire@ina.fr"
] |
wdesouza-prestataire@ina.fr
|
8183baa66dbb0e983f4690fd1ca40a8bc1f5d061
|
14c1ae3c47863babc40e473c8d55383459612e2b
|
/mysignal/mysignal.py
|
933287a23881a705566351a28ea31de5a129bb91
|
[
"WTFPL"
] |
permissive
|
hz-b/MSc_FOFB-Simulation
|
009852321dad92dd88912c1cc818d544e8f50f78
|
dc9a43f54bf7e570915d0360d102ad04f3cb761d
|
refs/heads/master
| 2021-01-12T01:42:45.320526
| 2016-08-22T09:43:37
| 2016-08-22T09:43:37
| 78,421,032
| 1
| 0
| null | 2017-01-09T11:01:37
| 2017-01-09T11:01:37
| null |
UTF-8
|
Python
| false
| false
| 7,858
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
import sympy as sy
def poly_to_sympy(num, den, symbol='s', simplify=True):
""" Convert Scipy's LTI instance to Sympy expression """
s = sy.Symbol(symbol)
G = sy.Poly(num, s) / sy.Poly(den, s)
return sy.simplify(G) if simplify else G
def poly_from_sympy(xpr, symbol='s'):
""" Convert Sympy transfer function polynomial to Scipy LTI """
s = sy.Symbol(symbol)
num, den = sy.simplify(xpr).as_numer_denom() # expressions
p_num_den = sy.poly(num, s), sy.poly(den, s) # polynomials
c_num_den = [sy.expand(p).all_coeffs() for p in p_num_den] # coefficients
# convert to floats
l_num, l_den = [sy.lambdify((), c)() for c in c_num_den]
return l_num, l_den
def TF_from_signal(y, u, fs, method='correlation', plot=False, plottitle=''):
if len(y.shape) == 1:
y = y.reshape((1, y.size))
M, N = y.shape
H_all = np.zeros((M, int(N/2)), dtype=complex)
fr = np.fft.fftfreq(N, 1/fs)[:int(N/2)]
if method == "correlation":
a = signal.correlate(u, u, "same")
else:
a = u
if plot:
plt.figure()
for k in range(M):
if method == "correlation":
c = signal.correlate(y[k, :], u, "same")
else:
c = y[k, :]
A = np.fft.fft(a)
idx = np.where(A == 0)[0]
C = np.fft.fft(c)
C[idx] = 0
A[idx] = 1
H = C / A
H = H[:int(N/2)]
H_all[k, :] = H
if plot:
ax1 = plt.subplot(211)
ax1.plot(fr, abs(H))
ax2 = plt.subplot(212)
ax2.plot(fr, np.unwrap(np.angle(H)))
if plot:
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.grid(which="both")
ax2.set_xscale('log')
ax2.grid(which="both")
ax1.set_title(plottitle)
return H_all, fr
class TF(signal.TransferFunction):
""" Transfer function
"""
def __init__(self, *args):
if len(args) not in [2, 4]:
raise ValueError("2 (num, den) or 4 (A, B, C, D) arguments "
"expected, not {}.".format((len(args))))
if len(args) == 2:
super().__init__(args[0], args[1])
else:
A, B, C, D = args
n, d = signal.ss2tf(A, B, C, D)
super().__init__(n, d)
def __neg__(self):
return TF(-self.num, self.den)
def __mul__(self, other):
self_s = self.to_sympy()
other_s = self._check_other(other)
return TF.from_sympy(self_s * other_s)
def __truediv__(self, other):
self_s = self.to_sympy()
other_s = self._check_other(other)
return TF.from_sympy(self_s / other_s)
def __rtruediv__(self, other):
self_s = self.to_sympy()
other_s = self._check_other(other)
return TF.from_sympy(other_s / self_s)
def __add__(self, other):
self_s = self.to_sympy()
other_s = self._check_other(other)
return TF.from_sympy(self_s + other_s)
def __sub__(self, other):
self_s = self.to_sympy()
other_s = self._check_other(other)
return TF.from_sympy(self_s - other_s)
def __rsub__(self, other):
self_s = self.to_sympy()
other_s = self._check_other(other)
return TF.from_sympy(other_s - self_s)
def _check_other(self, other):
if type(other) in [int, float, complex]:
return other
else:
return other.to_sympy()
# symmetric behaviour for commutative operators
__rmul__ = __mul__
__radd__ = __add__
def to_sympy(self, symbol='s', simplify=True):
""" Convert Scipy's LTI instance to Sympy expression """
return poly_to_sympy(self.num, self.den, 's', simplify)
def from_sympy(xpr, symbol='s'):
""" Convert Sympy transfer function polynomial to Scipy LTI """
num, den = poly_from_sympy(xpr, symbol)
return TF(num, den)
def as_poly_s(self):
return self.to_sympy()
def as_poly_z(self, Ts):
[numz], denz, _ = signal.cont2discrete((self.num, self.den), Ts,
method='bilinear')
return poly_to_sympy(numz, denz, 'z')
def apply_f(self, u, x, Ts):
if self.den.size == 1 and self.num.size == 1:
return u*self.num[0]/self.den[0], x
if type(u) is not np.ndarray:
u = np.array([[u]]).T
else:
if u.ndim == 1:
u = u.reshape((u.size, 1))
elif u.shape[1] != 1:
u = u.T
A_t, B_t, C_t, D_t = signal.tf2ss(self.num, self.den)
(A, B, C, D, _) = signal.cont2discrete((A_t, B_t, C_t, D_t), Ts,
method='bilinear')
A = np.kron(np.eye(u.size), A)
B = np.kron(np.eye(u.size), B)
C = np.kron(np.eye(u.size), C)
D = np.kron(np.eye(u.size), D)
x_vec = x.reshape((x.size, 1))
x1_vec = A.dot(x_vec) + B.dot(u)
y = C.dot(x_vec) + D.dot(u)
# put back in same order
if type(u) is not np.ndarray:
y = y[0, 0]
else:
if u.ndim == 1:
y = y.reshape(y.size)
elif u.shape[1] != 1:
y = y.T
if np.any(abs(y.imag) > 0):
print('y has complex part {}'.format(y))
print((A, B, C, D))
return y.reshape(y.size).real, x1_vec.reshape(x.shape)
def plot_hw(self, w=None, ylabel=None, bode=False, xscale='log', yscale='log',
figsize=None):
w, H = signal.freqresp((self.num,self.den), w)
if bode:
y = 20*np.log10(abs(H))
x = w
yscale = 'linear'
xlabel = r"Angular frequency $\omega$ [in rad/s]"
else:
if yscale == 'db':
y = 20*np.log10(abs(H))
yscale = 'linear'
else:
y = abs(H)
yscale = 'log'
x = w/2/np.pi
xlabel = r"Frequency f [in Hz]"
plt.figure(figsize=figsize)
plt.subplot(2, 1, 1)
plt.plot(x, y)
plt.yscale(yscale)
plt.xlabel(xlabel)
plt.xscale(xscale)
#plt.yticks(np.arange(-120,20,30))
plt.grid(which="both")
plt.ylabel(ylabel if ylabel is not None else "Amplitude")
plt.subplot(2, 1, 2)
plt.plot(x, np.unwrap(np.angle(H))*180/np.pi)
plt.xscale(xscale)
plt.grid(which="both")
plt.yticks(np.arange(-110,30,40))
plt.xlabel(xlabel)
plt.ylabel("Phase [in deg]")
plt.tight_layout(True)
def plot_step(self, ylabel=None, figsize=None):
t, y = signal.step((self.num, self.den))
n_zeros = int(t.size * 0.1)
T = t[1]
r = np.concatenate((np.zeros(n_zeros), np.ones(t.size)))
t = np.concatenate(((np.arange(n_zeros)-n_zeros)*T, t))
y = np.concatenate((np.zeros(n_zeros), y))
plt.figure(figsize=figsize)
plt.plot(t, r)
plt.plot(t, y)
plt.xlabel('Time [in s]')
plt.ylabel(ylabel if ylabel is not None else "Amplitude")
plt.tight_layout()
class PID(TF):
def __init__(self, P, I, D):
tf = TF([P], [1])
if I != 0:
tf += TF([I], [1, 0])
if D != 0:
tf += TF([D, 0], [D/8, 1])
super().__init__(tf.num, tf.den)
self.kP = P
self.kI = I
self.kD = D
def apply_fd(self, e, Ts):
return (self.kP*e[:, -1] + self.kI*np.sum(e, axis=1)*Ts
+ self.kD*(e[:, -1]-e[:, -2])/Ts)
def apply_f(self, e, x, Ts):
return TF.apply_f(self, e, x, Ts)
|
[
"olivier@churlaud.com"
] |
olivier@churlaud.com
|
71c57adceb32539990a21ff9eaf0cd83f2419dbf
|
2a608e9830157ba2842d3ce2f9eb5acac0d304eb
|
/day_6_part_2.py
|
bfb0d36c7d8eaa9901a76d4f4a98045c56d88d0e
|
[
"MIT"
] |
permissive
|
Korred/advent_of_code_2016
|
71709773176dee4cb736b88ce459d4bfe1ae8133
|
8b6e9b51569bf75da9b0fc56cc9e7c1c4dddeb5e
|
refs/heads/master
| 2021-01-11T10:37:15.441378
| 2016-12-22T11:38:35
| 2016-12-22T11:38:35
| 76,345,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,783
|
py
|
recording = '''jtfxgqec
zxoeuddn
anlfufma
dxuuyxkg
ttnewhlw
sjoyeiry
rgfwwdhw
qymxsllk
forftdvy
rzmnmewh
hogawihi
mtsyexba
mrjzqqfk
ypmkexpg
pjuyopgv
rtqquvaj
evubmlrq
bqlrtuce
ndidnbps
vqukosam
mzdyfkcd
rrbwdimb
uhnvxgly
aaimxpcv
acxvinqj
muaeikzy
lhzbosjd
fflqqiit
unfhzfrs
gmwoyvob
cculubmy
zqbugcwa
ijouicwt
bildjjww
ugksmnps
ivawibvu
igzteede
foehssxo
pkeevvlt
xumuixyw
okhhtycj
xhblffye
iqapgjqe
lkhpntum
wuzxgwow
bkkpfguu
bnqctsdi
cwncjrwn
eivhabsi
bwdicgfm
kowiourk
dhbzuztx
gibitfxo
wmrlhenb
wfzmjvwh
zddjirfg
fafhmiwf
ddhvufhg
qdwnlzqp
nhsnngut
uacmfgop
morcixux
sfdxrgqy
tezzvctv
dnnmtkfp
dygdzcib
efurreri
npvpklix
svpbdgyw
mcntltzd
inwkhxlx
sajfgeoi
nwkqrspt
qtzqsksv
mtncajjk
etarsvxr
eyaeeauy
gqnctylg
uerywmma
hjrxhtjb
zdsdyfzp
zhgrrhvd
yvxqyalf
rlgwftff
xczvgpzq
yydydclu
rzltbrro
jforpzau
zskadlfz
dqbqdsgv
bcwjltvc
byfoamgd
cpefdmso
ocuetyke
vlqrfnpp
ggikwydh
eakpyuov
osaguhlz
ylmrfvee
nvdvqpzm
pudbbuhh
bwmqdpyv
proscvgy
cetkcpjw
sbhcqeya
fgnyltmf
qcspgopp
bdhnemmy
tczkhihl
yduxunvr
dtxerncl
xnxeaayt
rvlcbgts
vpavzjqs
oueloufw
mubbhyna
nptmeppg
ojjfbuzz
lusboycs
gurmmorr
kefddaka
cpvpszit
bfvthzpm
owgcvdjo
simxphmv
rxedvjyw
hmeieuxr
vgqhcapz
vwtvbain
aobnhdsx
hkpshsjs
jxgegczu
xbsfxesk
pqhifeaj
triurorr
rnkufaxl
hmrqfoaw
veghzoxa
zbvgbpcm
rqrnbylj
txaawlta
uuksnfel
jqvycrvw
cdttmdpc
wojvbrzp
qvnuinon
gnpguyvh
cgbkpzbu
pdaqhlan
muiykslt
prvzlunm
whhcrchz
cahjhrkl
zifdgfpq
wanlienf
sfrnozvi
mwmykvyh
fbdfzgut
wfrviilb
ucaopfgo
fjhuikma
hdmizjdj
xngpfwvn
rueojtjg
xvtssxtx
vvcgzidf
xtehcxki
xksbfbso
osnzpqmy
isrnjkxh
utleakmz
dthmtbdt
plregxuh
amoeprsy
tmyhzhqd
csxqavbe
jmojlysw
slebxnbl
ldzryqmj
ajejyudk
ynhgnjhw
mdibxxxw
rvtcmesd
jmnwqddq
hppfoplc
nrcbjynz
kcqnjzue
mthvgjxm
ykztdbcv
etqqnhuz
tezkopgq
fwhwkqmz
fozpkzfy
hbbtlcog
hdvjqwyh
xuljsrvz
abskreoo
aedeydgc
dcyigvqf
ntpcvvgk
iiwgzkhl
zofhlqlx
veumtlae
qibdapwq
xpgpwirt
wvnnautq
wfhlgmdg
yqcrvdgx
srdufrbu
vycrvkpx
flwxzkim
enxayqxm
dgpntiaj
qedfutmp
vfdovine
dgrvjfjt
dqxxjahk
hnxpblyp
nnadwbsc
krmqqgwf
efykkzeb
lkrmrwqw
vfzayrwt
chopbnyf
vbydrtln
azmlestl
sqcyddvi
zdcubjok
afshwptc
sjgpuoch
bnfylydl
rsyxsbzi
psyuvyzx
npngqypd
xejayhdk
aqfmvjfi
tpffksph
uekwkjnj
ljsjimwm
hbgzjlig
ngssshxx
icitlosb
unxryqyt
nzpujfti
lupxnzhe
kxglfnic
ecewosbs
htlqxpiq
clqgnyfd
yyiozvar
mbvjgmyc
srhwhlin
casmlryr
ebuzskkp
iewhdqtr
oyidcobe
avptvltf
mfheqaxl
shqnezrq
xrpkzuvb
soxdjwba
aitmzlds
rpmpozpd
ccgxauky
gsstsjyx
bzeolqal
vfhddmuc
wfbbmqfv
pumxmnhj
qumdxkns
xymraott
uthlccig
ezpalags
giftxymr
ujjacleo
cgwgmktp
istetgdl
azedmaao
bnlfwyoq
orcwhbek
amswhkum
yxupesxu
mlzvqsrg
solkxzby
tbaxnjdu
xwbsiquk
hsftntsn
ajraaorz
mwmycrff
ymnbrbpj
uyfscatq
kzkgmbeh
libgpgnr
kxlgthxc
vzjbobyx
isqessab
ehursvof
guwrjnbi
xivkphwn
rurrmdmi
nqijeuzq
jambocej
qrtidktb
sbzvehmq
aikgzrsq
lgydnujf
twafyzry
nxhtklba
xhyaqyqe
xgvdfcrf
wdieppsd
iabrfmdm
doijaavc
oxydttkg
qsqiofwv
titrvjym
mwojqcku
tewiyhjx
jlqbksqd
knycvoks
tmcbnvhv
ekksoxmz
mgvommal
hrosnzeu
fzeymbek
evqxcukn
ilkpvdvl
rclpjbkb
tdpitlei
zvvzuucc
pzdgwnfz
mralxxlz
wywkawzh
hmazaakd
llltvbex
ihsmefpz
rzzgkjyz
srjqpeoq
jrczcdna
uuyskwop
yeuiaepa
vzppcwnn
oqhxixdo
xkwpfsij
cmsoiogl
ngbmaeue
lmqttyrj
yhgjxfmx
lwfgjnyp
ibbkjgra
gaxsotzr
paugisvs
pcqqauqi
pweuwnqs
jcbrscrj
ovtsgcnh
oscsgtqn
hkpwmhwk
pmdgwclk
owmskdhh
qutyussr
atdkvmzl
oqslriwe
wafjwfxp
ipcqlsxv
kzurbnoh
lfhfzwqo
ucybqwrj
tgnblzgm
lhwlniea
tlxymfbu
bcyvlkvt
glpacpjk
rjagzpnu
fyjpvhaq
cjtzwtdu
dkaqawts
pjoovtlv
xsnwqixw
swcftfed
cadigksp
fnsmxccx
cbxmdxvb
hpyqnpjq
jzpvphmo
kdkpubul
kiajwwta
uyeuctbe
yetyzqxw
fgeemnbl
brprbvgj
xszwwlea
ygunyguo
jwplrcbq
fejndxnx
oxsmkcqm
ldwkbpsk
cmzuxrst
jaoadiiu
oxcpkgbc
nyulhuci
bdwfqtkv
ehxvnzyd
cizuemsb
lbqyqduk
kqweswcd
tqnicuzh
utyaiaeu
osjdgvtj
qmrxcaoa
qiltxgvv
qklfgyss
lpjebmuo
bvebkous
yifrmeoa
jzgntlep
wadcknde
kaikclag
tucuhehr
bvwhuwzn
uvlecxgy
rzyxjhmo
dyyfwjgv
vocjkohi
ylyflktq
raltxpqg
eitypruw
pfbmopgm
qerushjt
xykophcv
amjhrlhi
uqkjhdhn
kkohprfw
hvsmtnfd
uxgiqmqc
npxwplcj
ltchgces
exiyyief
ysmvbqso
zpyvuhqz
lkvwronk
vxilskkl
cxfypwcd
jhrczkmf
rdedtejq
gmxcrlzi
jumwfmnn
gkynzdtd
dfdkxggc
yldclxhz
fsxvbwyj
ioiupzio
lxyqvncv
rsgsviny
osgcimej
tecqrgkq
tozohtwt
kmlowfrf
hhpiukqe
xlxlkjwf
ntvtoexx
zzvsvdow
yluidajg
vumkynvp
vaxipwwg
pqymmoif
sgjzogut
jppwszzn
gvvaibqu
lwjotuil
srflotab
ibnblmjm
kvcsdivb
wqrpzmvr
gcmqdezs
vrizdyfo
vtqnsjbf
jwocjmvb
fjkiiowl
ctjhmmrq
pcckqfki
wqolxgfg
gbsdyrbc
giqmfqwb
fodfpvyl
nxdzwvzz
hpnatltw
adjjyhjd
aoguhvmv
yyeanoir
baojaygs
ovkebbjb
pmykvfex
zeooykoa
uuozuxjb
kxxvbhbr
jxbchjlr
qhiwdonk
dnvfwwfh
kjfrlslh
wionbrdf
qgkjarob
kwplsxso
txgelygh
vlmziqwf
wbetqqkp
qfkocear
wrvonhyr
sbiqrcri
lnwzitce
bctyrwph
kallfwzc
zfqwanet
bevnljjr
kwqsktan
gjviqwlu
zflsnpig
wzaufqvr
uvxhutav
diejbica
ojciaexn
zyjoxrwi
djkodeiz
gsinkcqk
jkonssuq
eychyabp
fkcogwnr
kkioyrnn
inqxlztu
cqnbxxks
ipwmpdmm
moozfajm
irjaimrw
ojihmanb
hzoszxzc
ajjvxqqi
ohkfkijd
nlsahrpv
zizxtmxa
gjtnrurd
pyqghfuj
fltnnyfe
goxagvfp
nplhpkiy
dlwgyvby
fzrfhcgh
zaiuostp
jdjojfkw
thksqbjh
qopcwnht
ewkljwho
qguaeaac
wxzzxgcc
nlnuuhdu
ihtzrqay
nmtdbkhp
yasxhulm
drzjobfy
qpgcjdxn
aegbxmjb
bbuxsffr
zevjcgzn
pgbqezxk
qdlepjko
zbtzvicm
ssjdcggg
ugrtxalo
tsbvnppt
rboleppu
gywfqiwz
skgzeqhu
hzuggbcf
dkegaxap
zijcjrkm
jtfkeoog
fyvtrvig
gophbeoj
ieatnihe
vlaauxgz
mxnheqkz
mftwybny
ebawojuj
dyrvecbs
lrrcwang
qswijdeu
wkuszdax
ecaokzfc
pmbznspx
tjqrztdv
mwdxruge
whutfdqy
zpfwqvox
fkqapoid
bodleqbn
kpxiuodk
johmsncc
enhamlol
yhtydoss'''.split("\n")
from collections import Counter
# get length of message
m_len = len(recording[0])
occurence_list = [[] for i in range(m_len)]
code = ""
for line in recording:
for e, i in enumerate(line):
occurence_list[e].append(i)
for entry in occurence_list:
mc = Counter(entry).most_common()[-1][0] # <--- only this was changed
code += mc
print("Code: ", code)
|
[
"noreply@github.com"
] |
Korred.noreply@github.com
|
ebe40c80ffb741da326f2574f07345b6dafe1bc5
|
0bd284f7cc7fab96c76998cc8043a5474fe83671
|
/full_program/part15B_merge_diff_chunks.py
|
35c2b601abf4dc5d7d28d85367532a69648fe137
|
[] |
no_license
|
bgallag6/SolarProject
|
46fd40c66a1a33b19ed0b96271abda1f0a367d83
|
53f844604a2e819b24815f5b13f35fc154539d28
|
refs/heads/master
| 2021-01-11T23:35:57.410978
| 2019-04-03T18:53:39
| 2019-04-03T18:53:39
| 78,604,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 6 16:04:57 2018
@author: Brendan
"""
import numpy as np
import sys
import yaml
import os
size = int(sys.argv[1])
stream = open('specFit_config.yaml', 'r')
cfg = yaml.load(stream)
directory = cfg['fits_dir']
date = cfg['date']
wavelength = cfg['wavelength']
mmap_derotate = cfg['mmap_derotate']
save_temp = cfg['save_temp']
#directory = 'S:'
#date = '20130626'
#wavelength = 171
#size = 16
cube_temp = []
# load derotated cube chunks
for i in range(size):
temp = np.load('%s/DATA/Temp/%s/%i/chunk_%i_of_%i.npy' % (directory, date, wavelength, i+1, size))
cube_temp.append(temp)
cube_final = np.vstack(cube_temp) # stack chunks into final derotated array
del cube_temp
if mmap_derotate == "y":
orig_shape = np.array([cube_final.shape[0], cube_final.shape[1], cube_final.shape[2]])
# create memory-mapped array with similar datatype and shape to original array
mmap_arr = np.memmap('%s/DATA/Temp/%s/%i/derotated_mmap.npy' % (directory, date, wavelength), dtype='%s' % cube_final.dtype, mode='w+', shape=tuple(orig_shape))
# write data to memory-mapped array
mmap_arr[:] = cube_final[:]
# save memory-mapped array dimensions to use when loading
np.save('%s/DATA/Temp/%s/%i/derotated_mmap_shape.npy' % (directory, date, wavelength), orig_shape)
# save original array if specified
if save_temp == "y":
np.save('%s/DATA/Temp/%s/%i/derotated.npy' % (directory, date, wavelength), cube_final)
if save_temp == "n":
for j in range(size):
fn = '%s/DATA/Temp/%s/%i/chunk_%i_of_%i.npy' % (directory, date, wavelength, j+1, size)
## if file exists, delete it ##
if os.path.isfile(fn):
os.remove(fn)
else: ## Show an error ##
print("Error: %s file not found" % fn)
# flush memory changes to disk, then remove memory-mapped object and original array
del mmap_arr
del cube_final
else:
np.save('%s/DATA/Temp/%s/%i/derotated.npy' % (directory, date, wavelength), cube_final)
|
[
"bgallag6@gmu.edu"
] |
bgallag6@gmu.edu
|
a35de49d058abb362dd85af4f6b2e58e2d3d16a1
|
24fef9c1cdd1829bd8160feeafa9982530b9e17f
|
/examples/simple.py
|
a1ac45f869972b350fb71c2107fe190175e8645f
|
[
"BSD-3-Clause"
] |
permissive
|
sprockets/sprockets.logging
|
e011e6ba151ad8d20f5e15c1aa8b8bc65865fc46
|
37ff1180f91696547a25fed2228c829c1a8fcb17
|
refs/heads/master
| 2021-01-16T19:00:22.419596
| 2015-12-11T17:59:01
| 2015-12-11T17:59:01
| 37,081,330
| 1
| 2
|
BSD-3-Clause
| 2021-11-01T12:04:27
| 2015-06-08T17:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
import logging
import sys
import sprockets.logging
formatter = logging.Formatter('%(levelname)s %(message)s {%(context)s}')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
handler.addFilter(sprockets.logging.ContextFilter(properties=['context']))
logging.Logger.root.addHandler(handler)
logging.Logger.root.setLevel(logging.DEBUG)
# Outputs: INFO Hi there {None}
logging.info('Hi there')
# Outputs: INFO No KeyError {bah}
logging.info('No KeyError', extra={'context': 'bah'})
# Outputs: INFO Now with context! {foo}
adapted = logging.LoggerAdapter(logging.Logger.root, extra={'context': 'foo'})
adapted.info('Now with context!')
|
[
"daves@aweber.com"
] |
daves@aweber.com
|
5afb442366deb72a4ed57ace517a81b771406e4c
|
283ce303a34e3632568ffc5fb3502fc6cb8e0525
|
/python_file/blog.py
|
a778224f7aef4571e9014298d95bbedf52fb25a3
|
[] |
no_license
|
billkunghappy/The_main_page
|
d245bed6cd6e8ce3724f8944ca9a26ae7f3da2d7
|
ba08670e214527da3d3c11cd8c4c9a80be5f8987
|
refs/heads/master
| 2021-01-22T07:13:43.468146
| 2014-12-18T15:04:05
| 2014-12-18T15:04:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,642
|
py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import basehandler
from basehandler import BaseHandler
from google.appengine.ext import db
import signup
from signup import User_data
from signup import hash_salt
import hashlib
# deliete parent in db
# def blog_key(name='default'):
# return db.Key.from_path('blogs', name)
def hash_str(s):
return hashlib.md5(s).hexdigest()
def make_secure_val(s,salt):
return "%s|%s" % (s, hash_str(s+salt))
def check_secure_val(h,salt):
val=h.split('|')[0]
if h==make_secure_val(val,salt):
return val
class Post(db.Model):
subject=db.StringProperty(required=True)
content=db.TextProperty(required=True)
created=db.DateTimeProperty(auto_now_add=True)
username=db.StringProperty(required=True)
def as_dict(self):
time_fmt = '%c'
d = {'subject': self.subject,
'content': self.content,
'created': self.created.strftime(time_fmt),
'username': self.username}
return d
# Last_time=db.DateTimeProperty(auto_now=True)
# def render(self):
# self._render_text =self.content.replace('\n','<br>')
# return self.render_str('post.html',p=self)
class Newpost(BaseHandler):
def get(self):
cookie=self.request.cookies.get('user_key')
user=None
if cookie:
user=check_secure_val(cookie,hash_salt)
if user:
userkey=db.get(user)
if userkey:
userhash=make_secure_val(str(userkey.username),hash_salt)
self.response.headers.add_header('Set-Cookie','user=%s'%userhash)
self.render('blog_input.html')
else:
self.redirect("/Blog")
def post(self):
subject=self.request.get("subject")
content=self.request.get("content")
name=self.request.cookies.get('user')
username=check_secure_val(name,hash_salt)
terror=""
werror=""
if username:
if subject!="" and content!="":
w=Post(subject=subject,content=content,username=username)
w.put()
self.redirect('/Blog')
else:
if subject=="":
terror="You didn't enter the title!"
if content=="":
werror="You didn't enter any word!"
self.render('blog_input.html',subject=subject,content=content,terror=terror,werror=werror)
else:
self.redirect('/Blog')
# class Blog_new(BaseHandler):
# def get(self):
# key.db.from_path('post',int(post_id),parent=blog_key)
# post=db.get(key)
# if not post:
# self.error(404)
# return
# self.render("blog_new",post=post)
class Blog(BaseHandler):
def get(self):
posts=db.GqlQuery('select * from Post order by created desc limit 10')
if self.format == 'html':
self.render('blog.html',posts=posts)
else:
return self.render_json([p.as_dict() for p in posts])
|
[
"billkung.happy@gmail.com"
] |
billkung.happy@gmail.com
|
c48e0acd0b93aaa16ad29b6b933ddccd5bbcaa1e
|
18b053edfbc8b58a2a9c25f65508d251440fb419
|
/ap2final.py
|
d9d0c3d40b032a71b4d866a4ab4933c394417131
|
[] |
no_license
|
pedromsilva99/labi_weather_tcp
|
3bd46fac4dc53cf6af6762ef8da184a84bccd102
|
f112b682a591b28ffb70201a446f7226a4cfe32b
|
refs/heads/main
| 2023-03-26T02:16:16.328684
| 2021-03-25T22:14:04
| 2021-03-25T22:14:04
| 351,587,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,557
|
py
|
import sys, socket, csv, json, hashlib, binascii
from random import randint
from Crypto.Cipher import AES
import base64
def main():
tcp_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_s.connect(("193.136.92.147", 8080))
#res = input("Quer usar mensagens encriptadas? Y/n ")
#if res in ['Y', 'y']:
# encrypt(tcp_s)
#else:
#f_connect(tcp_s)
f_connect(tcp_s)
dict_data = tcp_s.recv(4096).decode("utf-8")
print(dict_data)
fich = open('Dados.csv','w')
writer = csv.DictWriter(fich, fieldnames=['WIND','HUMIDITY','TEMPERATURE'], delimiter=",")
writer.writeheader()
temps = 0
cont = 0
while 1:
j_data = tcp_s.recv(4096).decode("utf-8")
try:
data_s = json.loads(j_data)
print_csv(data_s, fich, writer)
weather_info(data_s, cont, temps)
if cont == 3:
cont = 0
temps = 0
cont = cont + 1
temps = temps + data_s['TEMPERATURE']
except:
continue
print(j_data)
fich.close()
tcp_s.close()
def weather_info(data_s, cont, temps): #função para fazer print no terminal da informação do tempo
if cont == 3:
media = temps/3
if media < 20:
print("A média da temperatura é %f. Leve um casaco!" % (media))
else:
print("A média da temperatura é %f. Está um tempo agradável." % (media))
def print_csv(data_s, fich, writer): #função para escrever no documento CSV a informação
writer.writerow({'WIND': data_s["WIND"], 'HUMIDITY': data_s["HUMIDITY"], 'TEMPERATURE': data_s["TEMPERATURE"]})
fich.flush()
def f_connect(tcp_s): #função para receber e enviar o TOKEN
con_data = "CONNECT\n"
tcp_s.send(con_data.encode("utf-8"))
data = tcp_s.recv(4096).decode("utf-8")
print(data)
try:
dict_token = json.loads(data)
read_data = ("READ "+str(dict_token["TOKEN"])+"\n")
tcp_s.send(read_data.encode("utf-8"))
except:
main()
# Não conseguimos pôr a parte da encriptação devido a um erro na função recv_data
#~ def encrypt(tcp_s):
#~ p = 2**33
#~ g = 49985642365
#~ a = randint(0,9)
#~ A = pow(g,a,p)
#~ con_data = "CONNECT "+str(A)+","+str(p)+","+str(g)+"\n"
#~ tcp_s.send(con_data.encode("utf-8"))
#~ data = tcp_s.recv(4096).decode("utf-8")
#~ print(data)
#~ try:
#~ raw_B = json.loads(data)
#~ B = raw_B['B']
#~ read_data = "READ "+str(raw_B['TOKEN'])+"\n"
#~ except:
#~ encrypt(tcp_s)
#~ X = pow(B,a,p)
#~ key = hashlib.md5()
#~ key.update(str(X).encode("utf-8"))
#~ X = key.hexdigest()
#~ X = X[0:16]
#~ cipher = AES.new(X)
#~ lst_block = len(read_data) % cipher.block_size
#~ if lst_block != cipher.block_size :
#~ p = cipher.block_size - len(read_data)
#~ read_data = read_data + chr(p) * p
#~ data = cipher.encrypt(read_data)
#~ data = base64.b64encode(data)+"\n".encode("utf-8")
#~ tcp_s.send(data)
#~ data = recv_data(tcp_s, X).decode("utf-8")
#~ fich = open('Dados.csv','w')
#~ writer = csv.DictWriter(fich, fieldnames=['WIND','HUMIDITY','TEMPERATURE'], delimiter=",")
#~ writer.writeheader()
#~ temps = 0
#~ cont = 0
#~ while 1:
#~ try:
#~ data = json.loads(recv_data(tcp_s, X).decode("utf-8"))
#~ print_csv(data, fich, writer)
#~ weather_info(data, cont, temps)
#~ if cont == 3:
#~ cont = 0
#~ temps = 0
#~ cont = cont + 1
#~ temps = temps + data_s['TEMPERATURE']
#~ print(data)
#~ except:
#~ continue
#~ fich.close()
#~ tcp_s.close()
#~ def recv_data(tcp_s, X):
#~ cipher = AES.new(X)
#~ data = tcp_s.recv(4096)
#~ data = base64.b64decode(data)
#~ data = cipher.decrypt(data)
#~ p = data[len(data)-1]
#~ data = data[0:len(data)-p]
#~ return data
main()
|
[
"pedromsilva99@ua.pt"
] |
pedromsilva99@ua.pt
|
fb48dd21d053b996a5c472cecc4e2896db4f5c60
|
2729328611dcacaeeae2d9e6f0937f32d7b6d33e
|
/scripts/1d-mol.py
|
8626d83dc45643a181c4a472402969ad66c1f160
|
[] |
no_license
|
BourgValentin/B2-Python
|
3563994b2890bf56600f9a0a7ecb08f261db9a76
|
aedb6246492243b1454ab6e861d689838410b262
|
refs/heads/master
| 2020-04-02T09:14:27.023888
| 2018-11-11T21:27:13
| 2018-11-11T21:27:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
#!/usr/bin/python3.6
####################
#Le jeu consiste a trouver un nombre entre 1 et 1OO
#
#Il est possible d'appuyer sur 'q' pour quitter
#
#Appuyer sur CTRL+C affichera un message puis eteindra le programme
####################
import re
import random
import signal
import sys
pattern = re.compile("^[1-9][0-9]?$|^100$|^[q]$")
def kill(sig, frame):
print('Tu as entré CTRL+C, le programme va s\'arréter')
sys.exit(0)
signal.signal(signal.SIGINT, kill)
print("Trouvez un nombre entre 1 et 100 : ")
random_nbr = random.randint(1,100)
user_input = input("Saisie :")
while not pattern.match(user_input):
print("Erreur : Veuillez entrer un nombre ou q pour quitter : ")
user_input = input("Saisie :")
else:
pass
while pattern.match(user_input) and user_input != 'q':
if int(user_input) > random_nbr:
print("Plus petit : ")
user_input = input("Saisie :")
elif int(user_input) < random_nbr:
print("Plus grand : ")
user_input = input("Saisie :")
elif int(user_input) == random_nbr:
print("Vous avez gagné !")
break
if user_input == 'q':
print("La réponse était : ", random_nbr)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e6038327cf20250247c84a651dcc30768538ba60
|
9402b2b379d633959431711ea2151ebdb58e30db
|
/trackme/bin/getlistdef.py
|
dff23efcc86e7afc8ddb8bd66d65dad83ff20d6e
|
[
"Apache-2.0"
] |
permissive
|
densetax2/trackme
|
2ffdcfc78cb40f1a240e9325fec008d8f934604d
|
7143d807f67529f2f2faa1fb8d5fbfd388dc45d1
|
refs/heads/master
| 2022-11-17T21:14:42.342309
| 2020-07-05T12:41:21
| 2020-07-05T12:41:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,413
|
py
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import app
import os,sys
splunkhome = os.environ['SPLUNK_HOME']
sys.path.append(os.path.join(splunkhome, 'etc', 'apps', 'trackme', 'lib'))
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
from splunklib import six
@Configuration()
class CountMatchesCommand(StreamingCommand):
""" Counts the number of non-overlapping matches to a regular expression in a set of fields.
##Syntax
.. code-block::
countmatches fieldname=<field> pattern=<regular_expression> <field-list>
##Description
A count of the number of non-overlapping matches to the regular expression specified by `pattern` is computed for
each record processed. The result is stored in the field specified by `fieldname`. If `fieldname` exists, its value
is replaced. If `fieldname` does not exist, it is created. Event records are otherwise passed through to the next
pipeline processor unmodified.
##Example
Count the number of words in the `text` of each tweet in tweets.csv and store the result in `word_count`.
.. code-block::
| inputlookup tweets | countmatches fieldname=word_count pattern="\\w+" text
"""
fieldname = Option(
doc='''
**Syntax:** **fieldname=***<fieldname>*
**Description:** Name of the field that will hold the match count''',
require=True, validate=validators.Fieldname())
outname = Option(
doc='''
**Syntax:** **outname=***<outname>*
**Description:** Name of the outpuf field that will hold the index name''',
require=True, validate=validators.Fieldname())
pattern = Option(
doc='''
**Syntax:** **pattern=***<regular-expression>*
**Description:** Regular expression pattern to match''',
require=True, validate=validators.RegularExpression())
def stream(self, records):
self.logger.debug('CountMatchesCommand: %s', self) # logs command line
pattern = self.pattern
outname = self.outname
count = 0
whitelist = ""
for record in records:
for fieldname in self.fieldnames:
matches = pattern.findall(six.text_type(record[fieldname].decode("utf-8")))
count += len(matches)
record[self.fieldname] = count
if whitelist != "":
whitelist = str(whitelist) + "|" + str(record)
else:
whitelist = str(record)
# whitelist is empty
if count == 0:
whitelist = "[('" + str(outname) + "', '*')]"
yield {'_raw': str(whitelist)}
dispatch(CountMatchesCommand, sys.argv, sys.stdin, sys.stdout, __name__)
|
[
"guilhem.marchand@gmail.com"
] |
guilhem.marchand@gmail.com
|
aaa5ad87911a42ebbb247e8b73f56da709fefbcd
|
7bd0954e956993df19d833810f9d71b60e2ebb9a
|
/phasor/matrix/linalg.py
|
3a6788b4d1a7ce9f510a4e048f3678bca9bf17d9
|
[
"Apache-2.0"
] |
permissive
|
aa158/phasor
|
5ee0cec4f816b88b0a8ac298c330ed48458ec3f2
|
fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d
|
refs/heads/master
| 2021-10-22T09:48:18.556091
| 2019-03-09T18:56:05
| 2019-03-09T18:56:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
#from builtins import zip, range
import numpy as np
def stackrange(rangetup):
rangestack = []
for v in rangetup:
try:
iter(v)
except TypeError:
rangestack.append(list(range(v)))
else:
rangestack.append(v)
iterstack = [iter(rangestack[0])]
tupstack = [()]
while True:
while len(iterstack) < len(rangestack):
if not iterstack:
break
try:
nval = next(iterstack[-1])
tupstack.append(tupstack[-1] + (nval,))
iterstack.append(iter(rangestack[len(iterstack)]))
except StopIteration:
iterstack.pop()
tupstack.pop()
continue
if not iterstack:
break
ptup = tupstack.pop()
for v in iterstack.pop():
yield ptup + (v,)
def linalg_solve_bcast(M, V):
#print(M.shape, V.shape)
#assert(M.shape[2:] == V.shape[1:])
if M.shape[:-2] == () and V.shape[:-1] == ():
return np.linalg.solve(M, V)
else:
b = np.broadcast(M[..., 0, 0], V[..., 0])
rtype = np.find_common_type([], [M.dtype, V.dtype])
rvec = np.empty(b.shape + M.shape[-1:], dtype = rtype)
idx = 0
for idx in stackrange(b.shape):
idxM = tuple((0 if iM == 1 else iB) for iM, iB in zip(M.shape[:-2], idx))
idxV = tuple((0 if iV == 1 else iB) for iV, iB in zip(V.shape[:-1], idx))
Mred = M[idxM + (slice(None), slice(None))]
Vred = V[idxV + (slice(None),)]
Vsol = np.linalg.solve(Mred, Vred)
rvec[idx + (slice(None),)] = Vsol
return rvec
|
[
"Lee.McCuller@gmail.com"
] |
Lee.McCuller@gmail.com
|
8e639334544826247ad7d472b44bb501b4a7028c
|
bf9b262fb305e2ed815e079323115fbc4c7f6bfb
|
/leetcode_198.py
|
d7bf652f5f62f6767a0e58a1d52dab2e3e6c9255
|
[] |
no_license
|
Rockrs/Algorithm_DS
|
5c3a7a2dc0214b32f664f14194a09957e384b747
|
b3e462a6cba7a164818e71e480ca9f8cb91e2bca
|
refs/heads/master
| 2023-02-10T13:42:52.605408
| 2021-01-05T02:16:32
| 2021-01-05T02:16:32
| 288,692,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums)==0:
return 0
for i in range(len(nums)-3,-1,-1):
if i==len(nums)-3:
nums[i]+= nums[i+2]
else:
nums[i]+= max(nums[i+2],nums[i+3])
return max(nums)
|
[
"shrmabhishek2012@gmail.com"
] |
shrmabhishek2012@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.