blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
518ce45cbabbf0f9d33a1e3185282ce2e4d73607
|
66288980da0b51d46e3c3d042711e3b33f3f081a
|
/exchange_rater/services/shared/utils/math_utils.py
|
3e8057bd603ac16049f99e0f74793d55235faa6a
|
[] |
no_license
|
eduardolujan/exchange_rates
|
77bb3bf809ec9a41a418af067d058ebf90aa9d6e
|
dc2ac2a0a18bf400bbc4a4de06bf441c7043ef0f
|
refs/heads/main
| 2023-02-04T12:00:20.683137
| 2020-12-27T01:12:51
| 2020-12-27T01:12:51
| 320,956,914
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
# -*- coding: utf-8 -*-
import math
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
|
[
"eduardo.lujan.p@gmail.com"
] |
eduardo.lujan.p@gmail.com
|
0c073690cd3bab9f31f78e34018b57cd06cb07d6
|
246438f5e84c83755253e26740857a339aa5ad0f
|
/beginDjango/firstProject/news/forms.py
|
5e99c89c60d3e896f30857260bb58049f5b77eb4
|
[] |
no_license
|
reboot-13/first_django_project
|
2a93cd1dee1de3d01dfbddb8ff551b5030b2a811
|
73b666a5ba790496b7f9986f71696e429f556c95
|
refs/heads/main
| 2023-06-09T21:02:41.159688
| 2021-07-04T10:54:02
| 2021-07-04T10:54:02
| 382,825,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
from .models import Article
from django.forms import ModelForm, TextInput, Textarea, DateTimeInput
class ArticleForm(ModelForm):
class Meta:
model = Article
fields = ['author_name', 'title', 'anons', 'full_text', 'date']
widgets = {
'author_name': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Имя автора'
}),
'title': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Название статьи'
}),
'anons': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Анонс статьи'
}),
'full_text': Textarea(attrs={
'class': 'form-control',
'placeholder': 'Текст статьи'
}),
'date': DateTimeInput(attrs={
'class': 'form-control',
'placeholder': 'Дата'
})
}
|
[
"roma.ermilov.02@gmail.com"
] |
roma.ermilov.02@gmail.com
|
f3edf3d502f972e8a9a02a4641e0e7a0d768e175
|
5c4e7323aed9ab4bfef31aecbaf1d5f0c650d05d
|
/study_further/python_core/decorator_demo3.py
|
733c3266abb1a51faff9095e49f71372645096cd
|
[] |
no_license
|
chenmingrang/python_study
|
dbb4f65bae75033625e5935c95ef5e34d442d440
|
c6e0a3d1482f1154f26c208d9fb5563de353b82f
|
refs/heads/master
| 2020-09-03T02:36:53.315299
| 2018-01-10T06:02:13
| 2018-01-10T06:02:13
| 94,408,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
def outer1(func):
print("==outer1==")
def inner1():
print("==inner1==")
func()
return inner1
def outer2(func):
print("==outer2==")
def inner2():
print("==inner2==")
func()
return inner2
#只要Python解释器执行到这个代码,那么就会自动的进行装饰,而不是等到调用的时候才进行装饰
@outer1
@outer2
def foo():
print("==foo==")
#调用foo之前已经完成了装饰
print("*="*15, "foo is called", "*="*15)
foo()
|
[
"chenmingrang@126.com"
] |
chenmingrang@126.com
|
25b7a9381c7fab8b704950f07857e10d2912c034
|
40f12fcbc62daaacc518cddbb7a289ed4b2ff2eb
|
/mainapp/mainapp/urls.py
|
f76eab325ac2c51ef1619ef9bf3bee6f63dc155b
|
[] |
no_license
|
KhayrulloevDD/deals-csv
|
6100f3bd772c09139f1c12bee3a3d5dccd1120cf
|
510afdb8a3716adb9090c21136230df4217c7ee9
|
refs/heads/main
| 2023-07-05T12:52:35.523506
| 2021-08-02T10:24:12
| 2021-08-02T10:24:12
| 387,248,178
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
"""mainapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('readcsv.urls'))
]
|
[
"dodullo.khayrulloev@gmail.com"
] |
dodullo.khayrulloev@gmail.com
|
d2f3e9e7ba601cf371f65a1dbdaf14f28dbfe5c8
|
fde354b21d6e12b71ee06849329fef9820bdfbf5
|
/problem_set_1.1.py
|
b019a6633ce599feb5e4829233a49dc00ec11e44
|
[
"Giftware"
] |
permissive
|
Ramesh29/Introduction-to-Computer-Science-and-Programming-Using-Python
|
492e0abec60e298e5e9efe79a401b037a245314d
|
4fecdc727eaa830ac49ab7e70381264b1a4f89f9
|
refs/heads/master
| 2023-03-10T12:00:49.693911
| 2021-02-26T02:30:03
| 2021-02-26T02:30:03
| 279,165,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
"""
Assume s is a string of lower case characters.
Write a program that counts up the number of vowels contained in the string s. Valid vowels are: 'a', 'e', 'i', 'o', and 'u'. For example, if s = 'azcbobobegghakl', your program should print:
"""
s = "ramesh"
list = [ 'a', 'e', 'i', 'o', 'u']
count = 0
for c in s:
if c in list:
count = count + 1
print("Number of vowels:" , count)
|
[
"boolean29@gmail.com"
] |
boolean29@gmail.com
|
a6a81aaefd9aed9ccf620c41df2ec4d01a782f94
|
b69312f2e6fcfe7b6820424d6c23df334a619f32
|
/lib/other.py
|
f778b41fec4365f2ab4420a7acb73226280f6178
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
runhani/py-faster-rcnn-resnet-imagenet
|
96e021d699670859960bbe1b3f7ec5613a614fab
|
da1b65537b14e7580ebe257a50b0f1342e1d9828
|
refs/heads/master
| 2021-05-10T14:46:08.386181
| 2018-01-24T05:51:02
| 2018-01-24T05:51:02
| 118,530,513
| 0
| 0
| null | 2018-01-22T23:44:19
| 2018-01-22T23:44:18
| null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
def get_dataset_split_name(im_file):
parts = im_file.split("/")
for p in parts[::-1]:
if p in ['train', 'val', 'test']:
return p
return None
|
[
"tianzhi0549@163.com"
] |
tianzhi0549@163.com
|
0f55465514af40569da7b3a7ce2de5e8a893375b
|
f39fe325b2dec9786a82ac52316e5654093795e1
|
/src/dblog/dblog/settings.py
|
bad3484a82ac5b67db00876ba3b1c523a210bf15
|
[] |
no_license
|
EfraimGENC/dblog
|
0afc6bc5dd0de909f27583858796d5c3557ba94a
|
36a857ad3136085b8158c87bea9f3eebfda20d23
|
refs/heads/main
| 2023-08-26T07:41:07.618969
| 2021-11-01T22:38:32
| 2021-11-01T22:38:32
| 422,436,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,327
|
py
|
"""
Django settings for dblog project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
from django.utils.translation import ugettext_lazy as _
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, 'dblog/.env'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
'django.contrib.postgres',
# Project's Apps
'apps.account',
'apps.home',
'apps.blog',
# 3'th Part Apps
'phonenumber_field',
'imagekit',
'rest_framework',
'rest_framework.authtoken',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dblog.urls.main'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('DATABASE_NAME'),
'USER': env('DATABASE_USER'),
'PASSWORD': env('DATABASE_PASS'),
'HOST': 'db',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'tr-tr'
TIME_ZONE = 'Europe/Istanbul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/staticfiles/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "staticfiles"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Custom Account
AUTH_USER_MODEL = 'account.Profile'
AUTH_PROFILE_MODULE = 'account.Profile'
# Locale
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
LANGUAGES = (
('tr-tr', _('Turkish')),
('en-us', _('English')),
)
# Sites Framwork
SITE_ID = 1
# Email Defaults
# EMAIL_HOST='smtp.yandex.com.tr'
# EMAIL_PORT=465
# EMAIL_HOST_USER='noreply@domain.com'
# EMAIL_HOST_PASSWORD='supersecret'
# EMAIL_USE_TLS=False
# EMAIL_USE_SSL=True
# DEFAULT_FROM_EMAIL = 'Name <noreply@domain.com>'
# REST Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
],
'DEFAULT_FILTER_BACKENDS': [
'rest_framework.filters.OrderingFilter',
'rest_framework.filters.SearchFilter',
'django_filters.rest_framework.DjangoFilterBackend',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
}
|
[
"efraim@kavimdigital.com"
] |
efraim@kavimdigital.com
|
a098ab4734cc42713bce829f2aadc75ca70433f5
|
f7f4efda78012c4207c44ec1d4711aaf5e8d6a6b
|
/weatherApp/weatherApp/urls.py
|
3ae05fe80f4a8803b62d2db4abdc1011874d7d73
|
[] |
no_license
|
arizona167/weather-App
|
81709e709a4a12cd71492f56a216b747d2583bdd
|
e1afb7955c0e6ec3b79e8a1da4de38458322a960
|
refs/heads/master
| 2020-06-22T22:26:14.157418
| 2019-07-23T11:08:43
| 2019-07-23T11:08:43
| 198,416,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""weatherApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('weather.urls'))
]
|
[
"igwegbearinze@gmail.com"
] |
igwegbearinze@gmail.com
|
299cb20505e402e26105a20c9fc54d0e79d8948a
|
702ac710929e7b852e5dde2a9a489c16939abe2a
|
/venv/Turtles/Random.py
|
2c37b6fdda1cddbd59b74a2393c570daba505468
|
[] |
no_license
|
Madoune-SidAli/PythonTutorials
|
cb5b00e19a76632f423b0ce74b49c20747a378f8
|
0aecde9bf373e1449b1affeba0179760af04e132
|
refs/heads/master
| 2020-07-02T10:07:20.998595
| 2019-08-14T15:42:58
| 2019-08-14T15:42:58
| 201,496,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
import turtle
import random
colors = ["red", "orange", "yellow", "green", "blue", "purple"]
t = turtle.Turtle()
t.width(20)
for step in range(100):
# Change this to use a random number.
angle = random.randint(-90,90)
# Change this to use a random color.
color = random.choice(colors)
t.color(color)
t.right(angle)
t.forward(10)
|
[
"madoune.sidali@gmail.com"
] |
madoune.sidali@gmail.com
|
166e19e6e6fa003af0ca706d5bfc2c23f59c0025
|
2cd518db5301e9e38269dbcc773af72c64ae9d47
|
/Course1- Algorithmic Toolbox/week2_algorithmic_warmup/2_last_digit_of_fibonacci_number/fibonacci_last_digit.py
|
b3dc67aac62a0127f6cec49d996a33b1746f7ad0
|
[] |
no_license
|
praneeth1808/DSA-Coursera
|
a0acb37550f336de10559ab9d89b32ac5507b5a3
|
5c3433eab504d0a10841cdf00d082e628f110d3b
|
refs/heads/master
| 2023-05-08T09:18:08.947894
| 2021-06-01T12:48:01
| 2021-06-01T12:48:01
| 275,324,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# Uses python3
import sys
def get_fibonacci_last_digit_naive(n):
Feb_series=[0, 1, 1, 2, 3, 5, 8, 3, 1, 4, 5, 9, 4, 3, 7, 0, 7, 7, 4, 1, 5, 6, 1, 7, 8, 5, 3, 8, 1, 9, 0, 9, 9, 8, 7, 5, 2, 7, 9, 6, 5, 1, 6, 7, 3, 0, 3, 3, 6, 9, 5, 4, 9, 3, 2, 5, 7, 2, 9, 1]
return Feb_series[n%60]
if __name__ == '__main__':
# input = sys.stdin.read()
n = int(input())
print(get_fibonacci_last_digit_naive(n))
|
[
"praneeth3331997@gmail.com"
] |
praneeth3331997@gmail.com
|
0fdd40f2c6759e234a4ad52b5bbc7a0847a796b3
|
0d2b74a707e21d85d6e663a8b558ee72e81f92f9
|
/MyTraining_latest/MyTraining/001.Interview_programs/General_interview_programs/Armstrong.py
|
6ca0ca567d004fcb5dfd267526e9368c2c2712b7
|
[] |
no_license
|
anubeig/python-material
|
1e0f0d51f74206e01e9f1bec97a2e81e0a125c68
|
aef734073f40b2b45104726becf6205aa313283f
|
refs/heads/master
| 2022-07-21T00:50:43.417845
| 2019-09-28T12:23:13
| 2019-09-28T12:23:13
| 211,494,935
| 0
| 1
| null | 2022-07-13T19:14:45
| 2019-09-28T12:11:04
|
Python
|
UTF-8
|
Python
| false
| false
| 490
|
py
|
#Armstrong number
num = int(input("Enter a number: "))
# Changed num variable to string,
# and calculated the length (number of digits)
order = len(str(num))
# initialize sum
sum = 0
# find the sum of the cube of each digit
temp = num
while temp > 0:
digit = temp % 10
print(digit)
sum += digit ** order
print(sum)
temp = temp // 10
print(temp)
# display the result
if num == sum:
print(num,"is an Armstrong number")
else:
print(num,"is not an Armstrong number")
|
[
"anu.mogal4@gmail.com"
] |
anu.mogal4@gmail.com
|
86fce9123b4b88751dd8d3f74ed6146220fd8d22
|
fc785c142e4d38138db557bbd862bd651c293340
|
/scrapyTest/settings.py
|
4b0f571919d66219d17632df2d21d0124ea91964
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
fver1004/Library_Information_Extract
|
132c9e7438f66d22da262cf6d4abb79d5655e4d4
|
39a991b5963489874fe358c6d157547dc57e80c9
|
refs/heads/master
| 2021-01-20T05:13:50.811099
| 2017-05-26T09:53:08
| 2017-05-26T09:53:08
| 89,762,789
| 5
| 1
| null | 2017-05-22T02:48:41
| 2017-04-29T04:07:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,169
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for scrapyTest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapyTest'
SPIDER_MODULES = ['scrapyTest.spiders']
NEWSPIDER_MODULE = 'scrapyTest.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapyTest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapyTest.middlewares.ScrapytestSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrapyTest.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrapyTest.pipelines.ScrapytestPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"fver1004@gmail.com"
] |
fver1004@gmail.com
|
fca30051d0949eb304f4d934b5a6fa5a7e75edca
|
82ca64c6a819f3e2cb41057f2df9f758cedee28a
|
/robot1/btc_usdt/robot_fun.py
|
e0d3222356ab5c9b8cebcbffc74381b08a17dec8
|
[] |
no_license
|
seanxxxx/coinx
|
619a18f9b2d7f83076083055bfccf0c5e404f665
|
eb1a7ed430c546cf02ddcc79f436200b218d5244
|
refs/heads/master
| 2023-01-28T03:09:10.358463
| 2018-09-07T07:49:19
| 2018-09-07T07:49:19
| 146,564,986
| 0
| 1
| null | 2022-12-20T14:20:06
| 2018-08-29T07:52:37
|
Python
|
UTF-8
|
Python
| false
| false
| 10,203
|
py
|
# -*- coding:utf-8 -*-
import random,requests
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='coinx.log',
filemode='w')
# 定义一个Handler打印INFO及以上级别的日志到sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# 设置日志打印格式
#format='%(asctime)s - %(levelname)s: %(message)s'
formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
console.setFormatter(formatter)
# 将定义好的console日志handler添加到root logger
logging.getLogger('').addHandler(console)
#以中心价格随机获取区间价格
def getPrice(lastPrice):
minusNum = lastPrice-lastPrice*random.uniform(0.0001,0.001)
addNum = lastPrice+lastPrice*random.uniform(0.0001,0.001)
price = round(random.uniform(minusNum,addNum),2)
logging.info(u"当前获取的价格区间是:[%f,%f]" % (minusNum,addNum))
return price
#获取Bitstamp的最新成交价
def getMarketPrice(url):
req = requests.post(url)
lastPrice = req.json()['last']
logging.info("----------------------------------------------------------------")
logging.info("********** 当前BTC/USDT市场交易价格为:【%s】**********" %lastPrice)
logging.info("----------------------------------------------------------------")
return lastPrice
#获取用户token
def get_access_token(url_login,email,password):
headers = {"content-type": "application/json"}
login_data = {"email": email, "password": password}
login_request = requests.post(url_login, headers=headers, json=login_data)
access_token = login_request.json()['data']['access_token']
# logging.info("access_token:"+access_token)
# print(login_request.text)
return access_token
#挂买单
def order_buy(email,price,url_order,access_token,tradePairCode,count):
price = getPrice(price)
# count = 0
# if tradePairCode == 'btc_usdt':
# count = round(random.uniform(0.001, data['tradePair']['btc_usdtMaxCount']), 3)
# elif tradePairCode == 'eth_usdt':
# count = round(random.uniform(0.001, data['tradePair']['eth_usdtMaxCount']), 3)
# elif tradePairCode == 'xrp_usdt':
# count = round(random.uniform(0.001, data['tradePair']['xrp_usdtMaxCount']), 3)
# elif tradePairCode == 'bch_usdt':
# count = round(random.uniform(0.001, data['tradePair']['bch_usdtMaxCount']), 3)
# elif tradePairCode == 'eos_usdt':
# count = round(random.uniform(0.001, data['tradePair']['eos_usdtMaxCount']), 3)
# elif tradePairCode == 'ltc_usdt':
# count = round(random.uniform(0.001, data['tradePair']['ltc_usdtMaxCount']), 3)
# elif tradePairCode == 'ada_usdt':
# count = round(random.uniform(0.001, data['tradePair']['ada_usdtMaxCount']), 3)
# elif tradePairCode == 'xlm_usdt':
# count = round(random.uniform(0.001, data['tradePair']['xlm_usdtMaxCount']), 3)
# elif tradePairCode == 'iota_usdt':
# count = round(random.uniform(0.001, data['tradePair']['iota_usdtMaxCount']), 3)
# elif tradePairCode == 'eth_btc':
# count = round(random.uniform(0.001, data['tradePair']['eth_btcMaxCount']), 3)
# elif tradePairCode == 'xrp_btc':
# count = round(random.uniform(0.001, data['tradePair']['xrp_btcMaxCount']), 3)
# elif tradePairCode == 'bch_btc':
# count = round(random.uniform(0.001, data['tradePair']['bch_btcMaxCount']), 3)
# elif tradePairCode == 'eos_btc':
# count = round(random.uniform(0.001, data['tradePair']['eos_btcMaxCount']), 3)
# elif tradePairCode == 'ltc_btc':
# count = round(random.uniform(0.001, data['tradePair']['ltc_btcMaxCount']), 3)
# elif tradePairCode == 'ada_btc':
# count = round(random.uniform(0.001, data['tradePair']['ada_btcMaxCount']), 3)
# elif tradePairCode == 'xlm_btc':
# count = round(random.uniform(0.001, data['tradePair']['xlm_btcMaxCount']), 3)
# elif tradePairCode == 'iota_btc':
# count = round(random.uniform(0.001, data['tradePair']['iota_btcMaxCount']), 3)
# elif tradePairCode == 'xrp_eth':
# count = round(random.uniform(0.001, data['tradePair']['xrp_ethMaxCount']), 3)
# elif tradePairCode == 'eos_eth':
# count = round(random.uniform(0.001, data['tradePair']['eos_ethMaxCount']), 3)
# elif tradePairCode == 'ltc_eth':
# count = round(random.uniform(0.001, data['tradePair']['ltc_ethMaxCount']), 3)
# elif tradePairCode == 'ada_eth':
# count = round(random.uniform(0.001, data['tradePair']['ada_ethMaxCount']), 3)
# elif tradePairCode == 'xlm_eth':
# count = round(random.uniform(0.001, data['tradePair']['xlm_ethMaxCount']), 3)
# elif tradePairCode == 'iota_eth':
# count = round(random.uniform(0.001, data['tradePair']['iota_ethMaxCount']), 3)
chip_order_headers = {"content-type":"application/json","access_token":access_token}
order_data = {
"direction":"buy",
"orderType":'100',
"price":price,
"count":count,
"tradePairCode":tradePairCode
}
buy_request = requests.post(url_order, headers=chip_order_headers, json=order_data)
#print(up_buy_request.text)
logging.info(u"交易币对: %s 用户:%s 挂【买】单的价格:%s 挂【买】单的数量:%s"
% (tradePairCode.upper(),email.split('@')[0],price,count)+u" 状态:"+buy_request.json()['msg'])
return buy_request
#挂卖单
def order_sell(email,price,url_order,access_token,tradePairCode,count):
price = getPrice(price)
# count = 0
# if tradePairCode == 'btc_usdt':
# count = round(random.uniform(0.001, data['tradePair']['btc_usdtMaxCount']), 3)
# elif tradePairCode == 'eth_usdt':
# count = round(random.uniform(0.001, data['tradePair']['eth_usdtMaxCount']), 3)
# elif tradePairCode == 'xrp_usdt':
# count = round(random.uniform(0.001, data['tradePair']['xrp_usdtMaxCount']), 3)
# elif tradePairCode == 'bch_usdt':
# count = round(random.uniform(0.001, data['tradePair']['bch_usdtMaxCount']), 3)
# elif tradePairCode == 'eos_usdt':
# count = round(random.uniform(0.001, data['tradePair']['eos_usdtMaxCount']), 3)
# elif tradePairCode == 'ltc_usdt':
# count = round(random.uniform(0.001, data['tradePair']['ltc_usdtMaxCount']), 3)
# elif tradePairCode == 'ada_usdt':
# count = round(random.uniform(0.001, data['tradePair']['ada_usdtMaxCount']), 3)
# elif tradePairCode == 'xlm_usdt':
# count = round(random.uniform(0.001, data['tradePair']['xlm_usdtMaxCount']), 3)
# elif tradePairCode == 'iota_usdt':
# count = round(random.uniform(0.001, data['tradePair']['iota_usdtMaxCount']), 3)
# elif tradePairCode == 'eth_btc':
# count = round(random.uniform(0.001, data['tradePair']['eth_btcMaxCount']), 3)
# elif tradePairCode == 'xrp_btc':
# count = round(random.uniform(0.001, data['tradePair']['xrp_btcMaxCount']), 3)
# elif tradePairCode == 'bch_btc':
# count = round(random.uniform(0.001, data['tradePair']['bch_btcMaxCount']), 3)
# elif tradePairCode == 'eos_btc':
# count = round(random.uniform(0.001, data['tradePair']['eos_btcMaxCount']), 3)
# elif tradePairCode == 'ltc_btc':
# count = round(random.uniform(0.001, data['tradePair']['ltc_btcMaxCount']), 3)
# elif tradePairCode == 'ada_btc':
# count = round(random.uniform(0.001, data['tradePair']['ada_btcMaxCount']), 3)
# elif tradePairCode == 'xlm_btc':
# count = round(random.uniform(0.001, data['tradePair']['xlm_btcMaxCount']), 3)
# elif tradePairCode == 'iota_btc':
# count = round(random.uniform(0.001, data['tradePair']['iota_btcMaxCount']), 3)
# elif tradePairCode == 'xrp_eth':
# count = round(random.uniform(0.001, data['tradePair']['xrp_ethMaxCount']), 3)
# elif tradePairCode == 'eos_eth':
# count = round(random.uniform(0.001, data['tradePair']['eos_ethMaxCount']), 3)
# elif tradePairCode == 'ltc_eth':
# count = round(random.uniform(0.001, data['tradePair']['ltc_ethMaxCount']), 3)
# elif tradePairCode == 'ada_eth':
# count = round(random.uniform(0.001, data['tradePair']['ada_ethMaxCount']), 3)
# elif tradePairCode == 'xlm_eth':
# count = round(random.uniform(0.001, data['tradePair']['xlm_ethMaxCount']), 3)
# elif tradePairCode == 'iota_eth':
# count = round(random.uniform(0.001, data['tradePair']['iota_ethMaxCount']), 3)
chip_order_headers = {"content-type":"application/json","access_token":access_token}
order_data = {
"direction":"sell",
"orderType":'100',
"price":price,
"count":count,
"tradePairCode":tradePairCode
}
sell_request = requests.post(url_order, headers=chip_order_headers, json=order_data)
#print(up_buy_request.text)
logging.info(u"交易币对: %s 用户:%s 挂【卖】单的价格:%s 挂【卖】单的数量:%s"
% (tradePairCode.upper(),email.split('@')[0],price,count)+u" 状态:"+sell_request.json()['msg'])
return sell_request
#挂单
def takeOrder(orderType,price,count,url_order,access_token):
chip_order_headers = {"content-type":"application/json","access_token":access_token}
order_data = {
"direction":orderType,
"orderType":'200',
"price":price,
"count":count,
"tradePairCode":"btc_usdt"
}
request = requests.post(url_order, headers=chip_order_headers, json=order_data)
return request
#撤单
def cancelOrder(url_cancel,orderId,remark,access_token):
cancel_headers = {"content-type":"application/json","access_token":access_token}
order_data = {
"orderId":orderId,
"remark":remark
}
cancel_request = requests.post(url_cancel, headers=cancel_headers, json=order_data)
return cancel_request
def batchBuyOrder(price,count,url_order,access_token):
return
def batchSellOrder(price,count,url_order,access_token):
return
|
[
"xuxuan@lanlingdai.net"
] |
xuxuan@lanlingdai.net
|
9adea1167d1ab7652373a420618b940fa3669dac
|
96721554b1da5a4ccf8c3b0c88c06d0f6d79a6b1
|
/scripts/conversion_hosts/virt-v2v-wrapper.py
|
dcf5c9bb5c08271ca04de79e4973e42c3f3e7937
|
[
"Apache-2.0"
] |
permissive
|
jthadden/RHS-Infrastructure_Migration
|
7ab55f3d97ceef69f5830c4e17b2d66c186c9a8e
|
dc638868b00cdbfe5635f8e4cb4bbf9381efb6ed
|
refs/heads/master
| 2020-04-09T17:47:40.017291
| 2018-11-20T07:45:58
| 2018-11-20T07:45:58
| 160,491,413
| 0
| 0
|
Apache-2.0
| 2018-12-05T09:13:18
| 2018-12-05T09:13:18
| null |
UTF-8
|
Python
| false
| false
| 28,974
|
py
|
#!/usr/bin/python2
#
# Copyright (c) 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from contextlib import contextmanager
import json
import logging
import os
import pycurl
import re
import signal
import sys
import tempfile
import time
import ovirtsdk4 as sdk
import six
from urlparse import urlparse
if six.PY2:
import subprocess32 as subprocess
DEVNULL = open(os.devnull, 'r+')
else:
import subprocess
xrange = range
DEVNULL = subprocess.DEVNULL
# Wrapper version
VERSION = "6"
LOG_LEVEL = logging.DEBUG
STATE_DIR = '/tmp'
TIMEOUT = 300
VDSM_LOG_DIR = '/var/log/vdsm/import'
VDSM_MOUNTS = '/rhev/data-center/mnt'
VDSM_UID = 36
VDSM_CA = '/etc/pki/vdsm/certs/cacert.pem'
# For now there are limited possibilities in how we can select allocation type
# and format. The best thing we can do now is to base the allocation on type of
# target storage domain.
PREALLOCATED_STORAGE_TYPES = (
sdk.types.StorageType.CINDER,
sdk.types.StorageType.FCP,
sdk.types.StorageType.GLUSTERFS,
sdk.types.StorageType.ISCSI,
sdk.types.StorageType.POSIXFS,
)
# Tweaks
VDSM = True
# We cannot use the libvirt backend in virt-v2v and have to use direct backend
# for several reasons:
# - it is necessary on oVirt host when running as root; and we need to run as
# root when using export domain as target (we use vdsm user for other
# targets)
# - SSH transport method cannot be used with libvirt because it does not pass
# SSH_AUTH_SOCK env. variable to the QEMU process
DIRECT_BACKEND = True
def error(msg):
"""
Function to produce an error and terminate the wrapper.
WARNING: This can be used only at the early initialization stage! Do NOT
use this once the password files are written or there are any other
temporary data that should be removed at exit. This function uses
sys.exit() which overcomes the code responsible for removing the files.
"""
logging.error(msg)
sys.stderr.write(msg)
sys.exit(1)
def make_vdsm(data):
"""Makes sure the process runs as vdsm user"""
uid = os.geteuid()
if uid == VDSM_UID:
# logging.debug('Already running as vdsm user')
return
elif uid == 0:
# We need to drop privileges and become vdsm user, but we also need the
# proper environment for the user which is tricky to get. The best
# thing we can do is spawn another instance. Unfortunately we have
# already read the data from stdin.
# logging.debug('Starting instance as vdsm user')
cmd = '/usr/bin/sudo'
args = [cmd, '-u', 'vdsm']
args.extend(sys.argv)
wrapper = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = wrapper.communicate(json.dumps(data))
# logging.debug('vdsm instance finished')
sys.stdout.write(out)
sys.stderr.write(err)
# logging.debug('Terminating root instance')
sys.exit(wrapper.returncode)
sys.stderr.write('Need to run as vdsm user or root!\n')
sys.exit(1)
def daemonize():
"""Properly deamonizes the process and closes file desriptors."""
sys.stderr.flush()
sys.stdout.flush()
pid = os.fork()
if pid != 0:
# Nothing more to do for the parent
sys.exit(0)
os.setsid()
pid = os.fork()
if pid != 0:
# Nothing more to do for the parent
sys.exit(0)
os.umask(0)
os.chdir('/')
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# Re-initialize cURL. This is necessary to re-initialze the PKCS #11
# security tokens in NSS. Otherwise any use of SDK after the fork() would
# lead to the error:
#
# A PKCS #11 module returned CKR_DEVICE_ERROR, indicating that a
# problem has occurred with the token or slot.
#
pycurl.global_cleanup()
pycurl.global_init(pycurl.GLOBAL_ALL)
class OutputParser(object):
COPY_DISK_RE = re.compile(br'.*Copying disk (\d+)/(\d+) to.*')
DISK_PROGRESS_RE = re.compile(br'\s+\((\d+\.\d+)/100%\)')
NBDKIT_DISK_PATH_RE = re.compile(
br'nbdkit: debug: Opening file (.*) \(.*\)')
OVERLAY_SOURCE_RE = re.compile(
br' *overlay source qemu URI: json:.*"file\.path": ?"([^"]+)"')
VMDK_PATH_RE = re.compile(
br'/vmfs/volumes/(?P<store>[^/]*)/(?P<vm>[^/]*)/'
+ br'(?P<disk>.*)(-flat)?\.vmdk')
RHV_DISK_UUID = re.compile(br'disk\.id = \'(?P<uuid>[a-fA-F0-9-]*)\'')
def __init__(self, v2v_log):
self._log = open(v2v_log, 'rbU')
self._current_disk = None
self._current_path = None
def parse(self, state):
line = None
while line != b'':
line = self._log.readline()
m = self.COPY_DISK_RE.match(line)
if m is not None:
try:
self._current_disk = int(m.group(1))-1
self._current_path = None
state['disk_count'] = int(m.group(2))
logging.info('Copying disk %d/%d',
self._current_disk+1, state['disk_count'])
if state['disk_count'] != len(state['disks']):
logging.warning(
'Number of supplied disk paths (%d) does not match'
' number of disks in VM (%s)',
len(state['disks']),
state['disk_count'])
except ValueError:
logging.exception('Conversion error')
# VDDK
m = self.NBDKIT_DISK_PATH_RE.match(line)
if m is not None:
self._current_path = m.group(1).decode()
if self._current_disk is not None:
logging.info('Copying path: %s', self._current_path)
self._locate_disk(state)
# SSH
m = self.OVERLAY_SOURCE_RE.match(line)
if m is not None:
path = m.group(1).decode()
# Transform path to be raltive to storage
self._current_path = self.VMDK_PATH_RE.sub(
br'[\g<store>] \g<vm>/\g<disk>', path)
if self._current_disk is not None:
logging.info('Copying path: %s', self._current_path)
self._locate_disk(state)
m = self.DISK_PROGRESS_RE.match(line)
if m is not None:
if self._current_path is not None and \
self._current_disk is not None:
try:
state['disks'][self._current_disk]['progress'] = \
float(m.group(1))
logging.debug('Updated progress: %s', m.group(1))
except ValueError:
logging.exception('Conversion error')
else:
logging.debug('Skipping progress update for unknown disk')
m = self.RHV_DISK_UUID.match(line)
if m is not None:
path = state['disks'][self._current_disk]['path']
disk_id = m.group('uuid')
state['internal']['disk_ids'][path] = disk_id
logging.debug('Path \'%s\' has disk id=\'%s\'', path, disk_id)
return state
def close(self):
self._log.close()
def _locate_disk(self, state):
if self._current_disk is None:
# False alarm, not copying yet
return
# NOTE: We assume that _current_disk is monotonic
for i in xrange(self._current_disk, len(state['disks'])):
if state['disks'][i]['path'] == self._current_path:
if i == self._current_disk:
# We have correct index
logging.debug('Found path at correct index')
else:
# Move item to current index
logging.debug('Moving path from index %d to %d', i,
self._current_disk)
d = state['disks'].pop(i)
state['disks'].insert(self._current_disk, d)
return
# Path not found
logging.debug('Path \'%s\' not found in %r', self._current_path,
state['disks'])
state['disks'].insert(
self._current_disk,
{
'path': self._current_path,
'progress': 0,
})
@contextmanager
def log_parser(v2v_log):
parser = None
try:
parser = OutputParser(v2v_log)
yield parser
finally:
if parser is not None:
parser.close()
@contextmanager
def sdk_connection(data):
connection = None
url = urlparse(data['rhv_url'])
username = url.username if url.username is not None else 'admin@internal'
try:
insecure = data['insecure_connection']
connection = sdk.Connection(
url=str(data['rhv_url']),
username=str(username),
password=str(data['rhv_password']),
ca_file=str(data['rhv_cafile']),
log=logging.getLogger(),
insecure=insecure,
)
yield connection
finally:
if connection is not None:
connection.close()
def is_iso_domain(path):
"""
Check if domain is ISO domain. @path is path to domain metadata file
"""
try:
logging.debug('is_iso_domain check for %s', path)
with open(path, 'r') as f:
for line in f:
if line.rstrip() == 'CLASS=Iso':
return True
except OSError:
logging.exception('Failed to read domain metadata')
except IOError:
logging.exception('Failed to read domain metadata')
return False
def find_iso_domain():
"""
Find path to the ISO domain from available domains mounted on host
"""
if not os.path.isdir(VDSM_MOUNTS):
logging.error('Cannot find RHV domains')
return None
for sub in os.walk(VDSM_MOUNTS):
if 'dom_md' in sub[1]:
# This looks like a domain so focus on metadata only
try:
del sub[1][sub[1].index('master')]
except ValueError:
pass
try:
del sub[1][sub[1].index('images')]
except ValueError:
pass
continue
if 'blockSD' in sub[1]:
# Skip block storage domains, we don't support ISOs there
del sub[1][sub[1].index('blockSD')]
if 'metadata' in sub[2] and \
os.path.basename(sub[0]) == 'dom_md' and \
is_iso_domain(os.path.join(sub[0], 'metadata')):
return os.path.join(
os.path.dirname(sub[0]),
'images',
'11111111-1111-1111-1111-111111111111')
return None
def write_state(state):
state = state.copy()
del state['internal']
with open(state_file, 'w') as f:
json.dump(state, f)
def wrapper(data, state, v2v_log, agent_sock=None):
v2v_args = [
'/usr/bin/virt-v2v', '-v', '-x',
data['vm_name'],
'-of', data['output_format'],
'--bridge', 'ovirtmgmt',
'--root', 'first'
]
if data['transport_method'] == 'vddk':
v2v_args.extend([
'-i', 'libvirt',
'-ic', data['vmware_uri'],
'-it', 'vddk',
'-io', 'vddk-libdir=%s' % '/opt/vmware-vix-disklib-distrib',
'-io', 'vddk-thumbprint=%s' % data['vmware_fingerprint'],
'--password-file', data['vmware_password_file'],
])
elif data['transport_method'] == 'ssh':
v2v_args.extend([
'-i', 'vmx',
'-it', 'ssh',
])
if 'rhv_url' in data:
v2v_args.extend([
'-o', 'rhv-upload',
'-oc', data['rhv_url'],
'-os', data['rhv_storage'],
'-op', data['rhv_password_file'],
'-oo', 'rhv-cafile=%s' % data['rhv_cafile'],
'-oo', 'rhv-cluster=%s' % data['rhv_cluster'],
'-oo', 'rhv-direct',
])
if data['insecure_connection']:
v2v_args.extend(['-oo', 'rhv-verifypeer=%s' %
('false' if data['insecure_connection'] else
'true')])
elif 'export_domain' in data:
v2v_args.extend([
'-o', 'rhv',
'-os', data['export_domain'],
])
if 'allocation' in data:
v2v_args.extend([
'-oa', data['allocation']
])
if 'network_mappings' in data:
for mapping in data['network_mappings']:
v2v_args.extend(['--bridge', '%s:%s' %
(mapping['source'], mapping['destination'])])
# Prepare environment
env = os.environ.copy()
env['LANG'] = 'C'
if DIRECT_BACKEND:
logging.debug('Using direct backend. Hack, hack...')
env['LIBGUESTFS_BACKEND'] = 'direct'
if 'virtio_win' in data:
env['VIRTIO_WIN'] = data['virtio_win']
if agent_sock is not None:
env['SSH_AUTH_SOCK'] = agent_sock
proc = None
with open(v2v_log, 'w') as log:
logging.info('Starting virt-v2v as: %r, environment: %r',
v2v_args, env)
proc = subprocess.Popen(
v2v_args,
stdin=DEVNULL,
stderr=subprocess.STDOUT,
stdout=log,
env=env,
)
try:
state['started'] = True
state['pid'] = proc.pid
write_state(state)
with log_parser(v2v_log) as parser:
while proc.poll() is None:
state = parser.parse(state)
write_state(state)
time.sleep(5)
logging.info('virt-v2v terminated with return code %d',
proc.returncode)
state = parser.parse(state)
except Exception:
logging.exception('Error while monitoring virt-v2v')
if proc.poll() is None:
logging.info('Killing virt-v2v process')
proc.kill()
state['return_code'] = proc.returncode
write_state(state)
if proc.returncode != 0:
state['failed'] = True
write_state(state)
def write_password(password, password_files):
pfile = tempfile.mkstemp(suffix='.v2v')
password_files.append(pfile[1])
os.write(pfile[0], bytes(password.encode('utf-8')))
os.close(pfile[0])
return pfile[1]
def spawn_ssh_agent(data):
try:
out = subprocess.check_output(['ssh-agent'])
logging.debug('ssh-agent: %s' % out)
sock = re.search(br'^SSH_AUTH_SOCK=([^;]+);', out, re.MULTILINE)
pid = re.search(br'^echo Agent pid ([0-9]+);', out, re.MULTILINE)
if not sock or not pid:
logging.error(
'Incomplete match of ssh-agent output; sock=%r; pid=%r',
sock, pid)
return None, None
agent_sock = sock.group(1).decode()
agent_pid = int(pid.group(1))
logging.info('SSH Agent started with PID %d', agent_pid)
except subprocess.CalledProcessError:
logging.error('Failed to start ssh-agent')
return None, None
env = os.environ.copy()
env['SSH_AUTH_SOCK'] = agent_sock
cmd = ['ssh-add']
if 'ssh_key_file' in data:
logging.info('Using custom SSH key')
cmd.append(data['ssh_key_file'])
else:
logging.info('Using SSH key(s) from ~/.ssh')
ret_code = subprocess.call(cmd, env=env)
if ret_code != 0:
logging.error('Failed to add SSH keys to the agent! ssh-add'
' terminated with return code %d', ret_code)
os.kill(agent_pid, signal.SIGTERM)
return None, None
return agent_pid, agent_sock
def check_install_drivers(data):
if 'virtio_win' in data and os.path.isabs(data['virtio_win']):
full_path = data['virtio_win']
else:
iso_domain = find_iso_domain()
iso_name = data.get('virtio_win')
if iso_name is not None:
if iso_domain is None:
error('ISO domain not found')
else:
if iso_domain is None:
# This is not an error
logging.warning('ISO domain not found' +
' (but install_drivers is true).')
data['install_drivers'] = False
return
# (priority, pattern)
patterns = [
(4, br'RHV-toolsSetup_([0-9._]+)\.iso'),
(3, br'RHEV-toolsSetup_([0-9._]+)\.iso'),
(2, br'oVirt-toolsSetup_([a-z0-9._-]+)\.iso'),
(1, br'virtio-win-([0-9.]+).iso'),
]
patterns = [(p[0], re.compile(p[1], re.IGNORECASE))
for p in patterns]
best_name = None
best_version = None
best_priority = -1
for fname in os.listdir(iso_domain):
if not os.path.isfile(os.path.join(iso_domain, fname)):
continue
for priority, pat in patterns:
m = pat.match(fname)
if not m:
continue
version = m.group(1)
logging.debug('Matched ISO %r (priority %d)',
fname, priority)
if best_version is None or \
(best_version < version and
best_priority <= priority):
best_name = fname
best_version = version
if best_name is None:
# Nothing found, this is not an error
logging.warn('Could not find any ISO with drivers' +
' (but install_drivers is true).')
data['install_drivers'] = False
return
iso_name = best_name
full_path = os.path.join(iso_domain, iso_name)
if not os.path.isfile(full_path):
error("'virtio_win' must be a path or file name of image in "
"ISO domain")
data['virtio_win'] = full_path
logging.info("virtio_win (re)defined as: %s", data['virtio_win'])
def handle_cleanup(data, state):
with sdk_connection(data) as conn:
disks_service = conn.system_service().disks_service()
transfers_service = conn.system_service().image_transfers_service()
disk_ids = state['internal']['disk_ids'].values()
# First stop all active transfers...
try:
transfers = transfers_service.list()
transfers = [t for t in transfers if t.image.id in disk_ids]
if len(transfers) == 0:
logging.debug('No active transfers to cancel')
for transfer in transfers:
logging.info('Canceling transfer id=%s for disk=%s',
transfer.id, transfer.image.id)
transfer_service = transfers_service.image_transfer_service(
transfer.id)
transfer_service.cancel()
# The incomplete disk will be removed automatically
disk_ids.remove(transfer.image.id)
except sdk.Error:
logging.exception('Failed to cancel transfers')
# ... then delete the uploaded disks
logging.info('Removing disks: %r', disk_ids)
endt = time.time() + TIMEOUT
while len(disk_ids) > 0:
for disk_id in disk_ids:
try:
disk_service = disks_service.disk_service(disk_id)
disk = disk_service.get()
if disk.status != sdk.types.DiskStatus.OK:
continue
logging.info('Removing disk id=%s', disk_id)
disk_service.remove()
disk_ids.remove(disk_id)
except sdk.Error:
logging.exception('Failed to remove disk id=%s',
disk_id)
if time.time() > endt:
logging.error('Timed out waiting for disks: %r', disk_ids)
break
time.sleep(1)
###########
# Read and parse input -- hopefully this should be safe to do as root
data = json.load(sys.stdin)
# NOTE: this is just pre-check to find out whether we can run as vdsm user at
# all. This is not validation of the input data!
if 'export_domain' in data:
# Need to be root to mount NFS share
VDSM = False
# Cannot use libvirt backend as root on VDSM host due to permissions
DIRECT_BACKEND = True
if VDSM:
make_vdsm(data)
# The logging is delayed after we now which user runs the wrapper. Otherwise we
# would have two logs.
log_tag = '%s-%d' % (time.strftime('%Y%m%dT%H%M%S'), os.getpid())
v2v_log = os.path.join(VDSM_LOG_DIR, 'v2v-import-%s.log' % log_tag)
wrapper_log = os.path.join(VDSM_LOG_DIR, 'v2v-import-%s-wrapper.log' % log_tag)
state_file = os.path.join(STATE_DIR, 'v2v-import-%s.state' % log_tag)
logging.basicConfig(
level=LOG_LEVEL,
filename=wrapper_log,
format='%(asctime)s:%(levelname)s: %(message)s (%(module)s:%(lineno)d)')
logging.info('Wrapper version %s, uid=%d', VERSION, os.getuid())
logging.info('Will store virt-v2v log in: %s', v2v_log)
logging.info('Will store state file in: %s', state_file)
password_files = []
try:
# Make sure all the needed keys are in data. This is rather poor
# validation, but...
if 'vm_name' not in data:
error('Missing vm_name')
# Output file format (raw or qcow2)
if 'output_format' in data:
if data['output_format'] not in ('raw', 'qcow2'):
error('Invalid output format %r, expected raw or qcow2' %
data['output_format'])
else:
data['output_format'] = 'raw'
# Transports (only VDDK for now)
if 'transport_method' not in data:
error('No transport method specified')
if data['transport_method'] not in ('ssh', 'vddk'):
error('Unknown transport method: %s', data['transport_method'])
if data['transport_method'] == 'vddk':
for k in [
'vmware_fingerprint',
'vmware_uri',
'vmware_password',
]:
if k not in data:
error('Missing argument: %s' % k)
# Targets (only export domain for now)
if 'rhv_url' in data:
for k in [
'rhv_cluster',
'rhv_password',
'rhv_storage',
]:
if k not in data:
error('Missing argument: %s' % k)
if 'rhv_cafile' not in data:
logging.info('Path to CA certificate not specified,'
' trying VDSM default: %s', VDSM_CA)
data['rhv_cafile'] = VDSM_CA
elif 'export_domain' in data:
pass
else:
error('No target specified')
# Network mappings
if 'network_mappings' in data:
if isinstance(data['network_mappings'], list):
for mapping in data['network_mappings']:
if not all(k in mapping for k in ("source", "destination")):
error("Both 'source' and 'destination' must be provided"
+ " in network mapping")
else:
error("'network_mappings' must be an array")
# Virtio drivers
if 'virtio_win' in data:
# This is for backward compatibility
data['install_drivers'] = True
if 'install_drivers' in data:
check_install_drivers(data)
else:
data['install_drivers'] = False
# Insecure connection
if 'insecure_connection' not in data:
data['insecure_connection'] = False
if data['insecure_connection']:
logging.info('SSL verification is disabled for oVirt SDK connections')
# Allocation type
if 'allocation' in data:
if data['allocation'] not in ('preallocated', 'sparse'):
error('Invalid value for allocation type: %r' % data['allocation'])
else:
# Check storage domain type and decide on suitable allocation type
# Note: This is only temporary. We should get the info from the caller
# in the future.
domain_type = None
with sdk_connection(data) as c:
service = c.system_service().storage_domains_service()
domains = service.list(search='name="%s"' %
str(data['rhv_storage']))
if len(domains) != 1:
error('Found %d domains matching "%s"!' % data['rhv_storage'])
domain_type = domains[0].storage.type
logging.info('Storage domain "%s" is of type %r', data['rhv_storage'],
domain_type)
data['allocation'] = 'sparse'
if domain_type in PREALLOCATED_STORAGE_TYPES:
data['allocation'] = 'preallocated'
logging.info('... selected allocation type is %s', data['allocation'])
#
# NOTE: don't use error() beyond this point!
#
# Store password(s)
logging.info('Writing password file(s)')
if 'vmware_password' in data:
data['vmware_password_file'] = write_password(data['vmware_password'],
password_files)
if 'rhv_password' in data:
data['rhv_password_file'] = write_password(data['rhv_password'],
password_files)
if 'ssh_key' in data:
data['ssh_key_file'] = write_password(data['ssh_key'],
password_files)
# Create state file before dumping the JSON
state = {
'disks': [],
'internal': {
'disk_ids': {},
},
}
try:
if 'source_disks' in data:
logging.debug('Initializing disk list from %r',
data['source_disks'])
for d in data['source_disks']:
state['disks'].append({
'path': d,
'progress': 0})
state['disk_count'] = len(data['source_disks'])
write_state(state)
# Send some useful info on stdout in JSON
print(json.dumps({
'v2v_log': v2v_log,
'wrapper_log': wrapper_log,
'state_file': state_file,
}))
# Let's get to work
logging.info('Daemonizing')
daemonize()
agent_pid = None
agent_sock = None
if data['transport_method'] == 'ssh':
agent_pid, agent_sock = spawn_ssh_agent(data)
if agent_pid is None:
raise RuntimeError('Failed to start ssh-agent')
wrapper(data, state, v2v_log, agent_sock)
if agent_pid is not None:
os.kill(agent_pid, signal.SIGTERM)
except Exception:
# No need to log the exception, it will get logged below
logging.error('An error occured, finishing state file...')
state['failed'] = True
write_state(state)
raise
finally:
if 'failed' in state:
# Perform cleanup after failed conversion
logging.debug('Cleanup phase')
try:
handle_cleanup(data, state)
finally:
state['finished'] = True
write_state(state)
# Remove password files
logging.info('Removing password files')
for f in password_files:
try:
os.remove(f)
except OSError:
logging.exception('Error while removing password file: %s' % f)
state['finished'] = True
write_state(state)
except Exception:
logging.exception('Wrapper failure')
# Remove password files
logging.info('Removing password files')
for f in password_files:
try:
os.remove(f)
except OSError:
logging.exception('Error removing password file: %s' % f)
# Re-raise original error
raise
logging.info('Finished')
|
[
"mperezco@redhat.com"
] |
mperezco@redhat.com
|
5e9625f39b845f6d5fc10816f8c8718226a23d79
|
82470c6c4819f8b874c92b2e036c6bb6dd1d365b
|
/features/steps/product_page_select_departmen.py
|
ac373b24adf88e943ffbfa1f09c85fb49c2b9ed2
|
[] |
no_license
|
lion7500000/python-selenium-automation
|
87589891211bf327bffda50134c4da918709fd2b
|
a2f3701661ddf713edb9750150a97e7cd8adb967
|
refs/heads/master
| 2021-08-08T10:16:44.644725
| 2021-07-29T03:10:54
| 2021-07-29T03:10:54
| 210,194,357
| 0
| 0
| null | 2019-09-22T18:23:33
| 2019-09-22T18:23:32
| null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
from selenium.webdriver.common.by import By
from behave import then, when
@when ('Select Books department')
def select_department(context):
context.app.menu_page.select_books_department()
@when ('Select Amazon Fresh department')
def select_department(context):
context.app.menu_page.select_amazon_fresh_department()
@when ('Search for {text}')
def input_search_text_in_select_departmen(context,text):
context.app.menu_page.input_search_text_in_select_departmen(text)
@when( 'Search product {text}' )
def input_search_text_in_select_departmen(context, text):
context.app.menu_page.input_search_text_in_select_departmen( text )
@then ('{departmen} department is selected')
def verify_select_departmen(context,departmen):
context.app.menu_page.verify_select_departmen(departmen)
@then ('{departmen} department is selected in departmen')
def verify_select_departmen(context,departmen):
context.app.menu_page.verify_select_departmen(departmen)
|
[
"lion7500000@gmail.com"
] |
lion7500000@gmail.com
|
64fae846bf8afbc0467465427c8d3fdc5bb020dd
|
7328d17dad85fc1607d506321a1d6bdfa2f76c5c
|
/implicit fields/IMGAN/model.py
|
04b28a14970fa00ca1e86b98a5ed50c3cfa524d7
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Xemnas0/3d-shape-reconstruction
|
5cfcbb45ac2eaaf31d01c18fead983acb74f83fb
|
9f01f3071b7b7a266629fe2e70e796ee332b5436
|
refs/heads/master
| 2020-07-28T21:31:28.294418
| 2019-12-23T10:14:00
| 2019-12-23T10:14:00
| 209,544,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,004
|
py
|
import os
import sys
import time
import math
from glob import glob
import tensorflow as tf
import numpy as np
import h5py
import cv2
import mcubes
from tqdm import tqdm, trange
from ops import *
class IMAE(object):
def __init__(self, sess, real_size, batch_size_input, is_training=False, z_dim=128, ef_dim=32, gf_dim=128,
dataset_name='default', checkpoint_dir=None, sample_dir=None, data_dir='./data'):
"""
Args:
too lazy to explain
"""
self.sess = sess
# progressive training
# 1-- (16, 16*16*16)
# 2-- (32, 16*16*16*2)
# 3-- (64, 32*32*32)
# 4-- (128, 32*32*32*4)
self.real_size = real_size # output point-value voxel grid size in training
self.batch_size_input = batch_size_input # training batch size (virtual, batch_size is the real batch_size)
self.batch_size = 16 * 16 * 16 * 4 # adjust batch_size according to gpu memory size in training
if self.batch_size_input < self.batch_size:
self.batch_size = self.batch_size_input
self.input_size = 64 # input voxel grid size
self.z_dim = z_dim
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.data_dir = data_dir
if os.path.exists(self.data_dir + '/' + self.dataset_name + '.hdf5'):
self.data_dict = h5py.File(self.data_dir + '/' + self.dataset_name + '.hdf5', 'r')
self.data_points = self.data_dict['points_' + str(self.real_size)][:]
self.data_values = self.data_dict['values_' + str(self.real_size)][:]
self.data_voxels = self.data_dict['voxels'][:]
if self.batch_size_input != self.data_points.shape[1]:
print("error: batch_size!=data_points.shape")
exit(0)
if self.input_size != self.data_voxels.shape[1]:
print("error: input_size!=data_voxels.shape")
exit(0)
else:
if is_training:
print("error: cannot load " + self.data_dir + '/' + self.dataset_name + '.hdf5')
exit(0)
else:
print("warning: cannot load " + self.data_dir + '/' + self.dataset_name + '.hdf5')
if not is_training:
self.real_size = 64 # output point-value voxel grid size in testing
self.test_size = 32 # related to testing batch_size, adjust according to gpu memory size
self.batch_size = self.test_size * self.test_size * self.test_size # do not change
# get coords
dima = self.test_size
dim = self.real_size
self.aux_x = np.zeros([dima, dima, dima], np.uint8)
self.aux_y = np.zeros([dima, dima, dima], np.uint8)
self.aux_z = np.zeros([dima, dima, dima], np.uint8)
multiplier = int(dim / dima)
multiplier2 = multiplier * multiplier
multiplier3 = multiplier * multiplier * multiplier
for i in range(dima):
for j in range(dima):
for k in range(dima):
self.aux_x[i, j, k] = i * multiplier
self.aux_y[i, j, k] = j * multiplier
self.aux_z[i, j, k] = k * multiplier
self.coords = np.zeros([multiplier3, dima, dima, dima, 3], np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
self.coords[i * multiplier2 + j * multiplier + k, :, :, :, 0] = self.aux_x + i
self.coords[i * multiplier2 + j * multiplier + k, :, :, :, 1] = self.aux_y + j
self.coords[i * multiplier2 + j * multiplier + k, :, :, :, 2] = self.aux_z + k
self.coords = (self.coords + 0.5) / dim * 2.0 - 1.0
self.coords = np.reshape(self.coords, [multiplier3, self.batch_size, 3])
self.build_model()
def build_model(self):
self.vox3d = tf.placeholder(shape=[1, self.input_size, self.input_size, self.input_size, 1], dtype=tf.float32)
self.z_vector = tf.placeholder(shape=[1, self.z_dim], dtype=tf.float32)
self.point_coord = tf.placeholder(shape=[self.batch_size, 3], dtype=tf.float32)
self.point_value = tf.placeholder(shape=[self.batch_size, 1], dtype=tf.float32)
self.E = self.encoder(self.vox3d, phase_train=True, reuse=False)
self.G = self.generator(self.point_coord, self.E, phase_train=True, reuse=False)
self.sE = self.encoder(self.vox3d, phase_train=False, reuse=True)
self.sG = self.generator(self.point_coord, self.sE, phase_train=False, reuse=True)
self.zG = self.generator(self.point_coord, self.z_vector, phase_train=False, reuse=True)
self.loss = tf.reduce_mean(tf.square(self.point_value - self.G))
self.saver = tf.train.Saver(max_to_keep=10)
def generator(self, points, z, phase_train=True, reuse=False):
with tf.variable_scope("simple_net") as scope:
if reuse:
scope.reuse_variables()
zs = tf.tile(z, [self.batch_size, 1])
pointz = tf.concat([points, zs], 1)
print("pointz", pointz.shape)
h1 = lrelu(linear(pointz, self.gf_dim * 16, 'h1_lin'))
h1 = tf.concat([h1, pointz], 1)
h2 = lrelu(linear(h1, self.gf_dim * 8, 'h4_lin'))
h2 = tf.concat([h2, pointz], 1)
h3 = lrelu(linear(h2, self.gf_dim * 4, 'h5_lin'))
h3 = tf.concat([h3, pointz], 1)
h4 = lrelu(linear(h3, self.gf_dim * 2, 'h6_lin'))
h4 = tf.concat([h4, pointz], 1)
h5 = lrelu(linear(h4, self.gf_dim, 'h7_lin'))
h6 = tf.nn.sigmoid(linear(h5, 1, 'h8_lin'))
return tf.reshape(h6, [self.batch_size, 1])
def encoder(self, inputs, phase_train=True, reuse=False):
with tf.variable_scope("encoder") as scope:
if reuse:
scope.reuse_variables()
d_1 = conv3d(inputs, shape=[4, 4, 4, 1, self.ef_dim], strides=[1, 2, 2, 2, 1], scope='conv_1')
d_1 = lrelu(batch_norm(d_1, phase_train))
d_2 = conv3d(d_1, shape=[4, 4, 4, self.ef_dim, self.ef_dim * 2], strides=[1, 2, 2, 2, 1], scope='conv_2')
d_2 = lrelu(batch_norm(d_2, phase_train))
d_3 = conv3d(d_2, shape=[4, 4, 4, self.ef_dim * 2, self.ef_dim * 4], strides=[1, 2, 2, 2, 1],
scope='conv_3')
d_3 = lrelu(batch_norm(d_3, phase_train))
d_4 = conv3d(d_3, shape=[4, 4, 4, self.ef_dim * 4, self.ef_dim * 8], strides=[1, 2, 2, 2, 1],
scope='conv_4')
d_4 = lrelu(batch_norm(d_4, phase_train))
d_5 = conv3d(d_4, shape=[4, 4, 4, self.ef_dim * 8, self.z_dim], strides=[1, 1, 1, 1, 1], scope='conv_5',
padding="VALID")
d_5 = tf.nn.sigmoid(d_5)
return tf.reshape(d_5, [1, self.z_dim])
def train(self, config):
ae_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.loss)
self.sess.run(tf.global_variables_initializer())
batch_idxs = len(self.data_points)
batch_index_list = np.arange(batch_idxs)
batch_num = int(self.batch_size_input / self.batch_size)
if self.batch_size_input % self.batch_size != 0:
print("batch_size_input % batch_size != 0")
exit(0)
counter = 0
start_time = time.time()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
counter = checkpoint_counter + 1
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in range(counter, config.epoch):
np.random.shuffle(batch_index_list)
avg_loss = 0
avg_num = 0
pbar = tqdm(range(0, batch_idxs))
for idx in pbar:
for minib in range(batch_num):
dxb = batch_index_list[idx]
batch_voxels = self.data_voxels[dxb:dxb + 1]
batch_points_int = self.data_points[dxb, minib * self.batch_size:(minib + 1) * self.batch_size]
batch_points = (batch_points_int + 0.5) / self.real_size * 2.0 - 1.0
batch_values = self.data_values[dxb, minib * self.batch_size:(minib + 1) * self.batch_size]
# Update AE network
_, errAE = self.sess.run([ae_optim, self.loss],
feed_dict={
self.vox3d: batch_voxels,
self.point_coord: batch_points,
self.point_value: batch_values,
})
avg_loss += errAE
avg_num += 1
if (idx % 16 == 0):
pbar.set_description("Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, loss: %.8f, avgloss: %.8f" % (
epoch, config.epoch, idx, batch_idxs, time.time() - start_time, errAE, avg_loss / avg_num))
if idx == batch_idxs - 1:
model_float = np.zeros([self.real_size, self.real_size, self.real_size], np.float32)
real_model_float = np.zeros([self.real_size, self.real_size, self.real_size], np.float32)
for minib in range(batch_num):
dxb = batch_index_list[idx]
batch_voxels = self.data_voxels[dxb:dxb + 1]
batch_points_int = self.data_points[dxb, minib * self.batch_size:(minib + 1) * self.batch_size]
batch_points = (batch_points_int + 0.5) / self.real_size * 2.0 - 1.0
batch_values = self.data_values[dxb, minib * self.batch_size:(minib + 1) * self.batch_size]
model_out = self.sess.run(self.sG,
feed_dict={
self.vox3d: batch_voxels,
self.point_coord: batch_points,
})
model_float[
batch_points_int[:, 0], batch_points_int[:, 1], batch_points_int[:, 2]] = np.reshape(
model_out, [self.batch_size])
real_model_float[
batch_points_int[:, 0], batch_points_int[:, 1], batch_points_int[:, 2]] = np.reshape(
batch_values, [self.batch_size])
img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8)
img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8)
img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8)
cv2.imwrite(config.sample_dir + "/" + str(epoch) + "_1t.png", img1)
cv2.imwrite(config.sample_dir + "/" + str(epoch) + "_2t.png", img2)
cv2.imwrite(config.sample_dir + "/" + str(epoch) + "_3t.png", img3)
img1 = np.clip(np.amax(real_model_float, axis=0) * 256, 0, 255).astype(np.uint8)
img2 = np.clip(np.amax(real_model_float, axis=1) * 256, 0, 255).astype(np.uint8)
img3 = np.clip(np.amax(real_model_float, axis=2) * 256, 0, 255).astype(np.uint8)
cv2.imwrite(config.sample_dir + "/" + str(epoch) + "_1i.png", img1)
cv2.imwrite(config.sample_dir + "/" + str(epoch) + "_2i.png", img2)
cv2.imwrite(config.sample_dir + "/" + str(epoch) + "_3i.png", img3)
print("[sample]")
if idx == batch_idxs - 1:
self.save(config.checkpoint_dir, epoch)
def test_interp(self, config):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
interp_size = 8
idx1 = 0
idx2 = 3
batch_voxels1 = self.data_voxels[idx1:idx1 + 1]
batch_voxels2 = self.data_voxels[idx2:idx2 + 1]
model_z1 = self.sess.run(self.sE,
feed_dict={
self.vox3d: batch_voxels1,
})
model_z2 = self.sess.run(self.sE,
feed_dict={
self.vox3d: batch_voxels2,
})
batch_z = np.zeros([interp_size, self.z_dim], np.float32)
for i in range(interp_size):
batch_z[i] = model_z2 * i / (interp_size - 1) + model_z1 * (interp_size - 1 - i) / (interp_size - 1)
dima = self.test_size
dim = self.real_size
multiplier = int(dim / dima)
multiplier2 = multiplier * multiplier
for t in range(interp_size):
model_float = np.zeros([self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
minib = i * multiplier2 + j * multiplier + k
model_out = self.sess.run(self.zG,
feed_dict={
self.z_vector: batch_z[t:t + 1],
self.point_coord: self.coords[minib],
})
model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape(model_out,
[
self.test_size,
self.test_size,
self.test_size])
img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8)
img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8)
img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8)
cv2.imwrite(config.sample_dir + "/interp/" + str(t) + "_1t.png", img1)
cv2.imwrite(config.sample_dir + "/interp/" + str(t) + "_2t.png", img2)
cv2.imwrite(config.sample_dir + "/interp/" + str(t) + "_3t.png", img3)
thres = 0.5
vertices, triangles = mcubes.marching_cubes(model_float, thres)
# mcubes.export_mesh(vertices, triangles, config.sample_dir + "/interp/" + "out" + str(t) + ".dae", str(t))
mcubes.export_obj(vertices, triangles, config.sample_dir + "/interp/" + "out" + str(t) + ".obj")
print("[sample interpolation]")
def test(self, config):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
dima = self.test_size
dim = self.real_size
multiplier = int(dim / dima)
multiplier2 = multiplier * multiplier
for t in range(16):
model_float = np.zeros([self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32)
batch_voxels = self.data_voxels[t:t + 1]
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
minib = i * multiplier2 + j * multiplier + k
model_out = self.sess.run(self.sG,
feed_dict={
self.vox3d: batch_voxels,
self.point_coord: self.coords[minib],
})
model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape(model_out,
[
self.test_size,
self.test_size,
self.test_size])
img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8)
img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8)
img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8)
cv2.imwrite(config.sample_dir + "/ae/" + str(t) + "_1t.png", img1)
cv2.imwrite(config.sample_dir + "/ae/" + str(t) + "_2t.png", img2)
cv2.imwrite(config.sample_dir + "/ae/" + str(t) + "_3t.png", img3)
thres = 0.5
# Generated sample
vertices, triangles = mcubes.marching_cubes(model_float, thres)
# mcubes.export_mesh(vertices, triangles, config.sample_dir + "/" + "out" + str(t) + ".dae", str(t))
mcubes.export_obj(vertices, triangles, config.sample_dir + "/ae/" + "out" + str(t) + ".obj")
# Original sample
batch_voxels = batch_voxels[0, ..., 0]
vertices, triangles = mcubes.marching_cubes(batch_voxels, thres)
mcubes.export_obj(vertices, triangles, config.sample_dir + "/ae/" + "out" + str(t) + "_original" + ".obj")
print("[sample]")
def get_z(self, config):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
hdf5_path = self.data_dir + '/' + self.dataset_name + '_z.hdf5'
chair_num = len(self.data_voxels)
hdf5_file = h5py.File(hdf5_path, mode='w')
hdf5_file.create_dataset("zs", [chair_num, self.z_dim], np.float32)
for idx in tqdm(range(0, chair_num)):
# print(idx)
batch_voxels = self.data_voxels[idx:idx + 1]
z_out = self.sess.run(self.sE,
feed_dict={
self.vox3d: batch_voxels,
})
hdf5_file["zs"][idx, :] = np.reshape(z_out, [self.z_dim])
print(hdf5_file["zs"].shape)
hdf5_file.close()
print("[z]")
def test_z(self, config, batch_z, dim):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
dima = self.test_size
multiplier = int(dim / dima)
multiplier2 = multiplier * multiplier
multiplier3 = multiplier * multiplier * multiplier
# get coords 256
aux_x = np.zeros([dima, dima, dima], np.int32)
aux_y = np.zeros([dima, dima, dima], np.int32)
aux_z = np.zeros([dima, dima, dima], np.int32)
for i in range(dima):
for j in range(dima):
for k in range(dima):
aux_x[i, j, k] = i * multiplier
aux_y[i, j, k] = j * multiplier
aux_z[i, j, k] = k * multiplier
coords = np.zeros([multiplier3, dima, dima, dima, 3], np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
coords[i * multiplier2 + j * multiplier + k, :, :, :, 0] = aux_x + i
coords[i * multiplier2 + j * multiplier + k, :, :, :, 1] = aux_y + j
coords[i * multiplier2 + j * multiplier + k, :, :, :, 2] = aux_z + k
coords = (coords + 0.5) / dim * 2.0 - 1.0
coords = np.reshape(coords, [multiplier3, self.batch_size, 3])
for t in tqdm(range(batch_z.shape[0])):
model_float = np.zeros([dim + 2, dim + 2, dim + 2], np.float32)
for i in tqdm(range(multiplier)):
for j in range(multiplier):
for k in range(multiplier):
# print(t, i, j, k)
minib = i * multiplier2 + j * multiplier + k
model_out = self.sess.run(self.zG,
feed_dict={
self.z_vector: batch_z[t:t + 1],
self.point_coord: coords[minib],
})
model_float[aux_x + i + 1, aux_y + j + 1, aux_z + k + 1] = np.reshape(model_out,
[dima, dima, dima])
img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8)
img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8)
img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8)
cv2.imwrite(config.sample_dir + "/" + str(t) + "_1t.png", img1)
cv2.imwrite(config.sample_dir + "/" + str(t) + "_2t.png", img2)
cv2.imwrite(config.sample_dir + "/" + str(t) + "_3t.png", img3)
thres = 0.5
vertices, triangles = mcubes.marching_cubes(model_float, thres)
# mcubes.export_mesh(vertices, triangles, config.sample_dir + "/" + "out" + str(t) + ".dae", str(t))
mcubes.export_obj(vertices, triangles, config.sample_dir + "/" + "out" + str(t) + ".obj")
# print("[sample GAN]")
def test_z_pc(self, config, batch_z, dim):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
dima = self.test_size
multiplier = int(dim / dima)
multiplier2 = multiplier * multiplier
multiplier3 = multiplier * multiplier * multiplier
# get coords 256
aux_x = np.zeros([dima, dima, dima], np.int32)
aux_y = np.zeros([dima, dima, dima], np.int32)
aux_z = np.zeros([dima, dima, dima], np.int32)
for i in range(dima):
for j in range(dima):
for k in range(dima):
aux_x[i, j, k] = i * multiplier
aux_y[i, j, k] = j * multiplier
aux_z[i, j, k] = k * multiplier
coords = np.zeros([multiplier3, dima, dima, dima, 3], np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
coords[i * multiplier2 + j * multiplier + k, :, :, :, 0] = aux_x + i
coords[i * multiplier2 + j * multiplier + k, :, :, :, 1] = aux_y + j
coords[i * multiplier2 + j * multiplier + k, :, :, :, 2] = aux_z + k
coords = (coords + 0.5) / dim * 2.0 - 1.0
coords = np.reshape(coords, [multiplier3, self.batch_size, 3])
n_pc_points = 2048
thres = 0.5
hdf5_file = h5py.File(self.dataset_name + "_im_gan_sample.hdf5", 'w')
hdf5_file.create_dataset("points", [batch_z.shape[0], n_pc_points, 3], np.float32)
for t in range(batch_z.shape[0]):
print(t)
model_float = np.zeros([dim + 2, dim + 2, dim + 2], np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
minib = i * multiplier2 + j * multiplier + k
model_out = self.sess.run(self.zG,
feed_dict={
self.z_vector: batch_z[t:t + 1],
self.point_coord: coords[minib],
})
model_float[aux_x + i + 1, aux_y + j + 1, aux_z + k + 1] = np.reshape(model_out,
[dima, dima, dima])
vertices, triangles = mcubes.marching_cubes(model_float, thres)
mcubes.export_mesh(vertices, triangles, config.sample_dir + "/" + "out" + str(t) + ".dae", str(t))
np.random.shuffle(vertices)
vertices = (vertices - dim / 2 - 0.5) / dim
vertices_out = np.zeros([n_pc_points, 3], np.float32)
vertices_len = vertices.shape[0]
for i in range(n_pc_points):
vertices_out[i] = vertices[i % vertices_len]
hdf5_file["points"][t, :, :] = vertices_out
hdf5_file.close()
@property
def model_dir(self):
return "{}_{}".format(
self.dataset_name, self.input_size)
def save(self, checkpoint_dir, step):
model_name = "IMAE.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
|
[
"fnuzzo@kth.se"
] |
fnuzzo@kth.se
|
0d1aa2e4ed4578759cc2d39895281cc4a4ccc2d7
|
b5f6b262cc1f599b9ca10cec6475831b1f8c812b
|
/app/crud.py
|
8d214e87fb37d774a89e90feb20bc0e732759df2
|
[] |
no_license
|
githubgobi/fastapi-study
|
5f0a59907182967a4417c6340963ed6baddde011
|
bce9f6f3ba6dc4c74f7320a7fe92b9a05698a070
|
refs/heads/main
| 2023-03-03T19:48:57.325092
| 2021-02-15T03:44:47
| 2021-02-15T03:44:47
| 338,316,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
from sqlalchemy.orm import Session
from . import models, schemas
def get_user_by_username(db: Session, username: str):
return db.query(models.UserInfo).filter(models.UserInfo.username == username).first()
def get_user(db: Session, user_id: int):
return db.query(models.UserInfo).filter(models.User.id == user_id).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.UserInfo).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserCreate):
fake_hashed_password = user.password + "notreallyhashed"
db_user = models.UserInfo(username=user.username, password=fake_hashed_password, fullname=user.fullname)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
|
[
"IGS@IGS-0075.igsc.in"
] |
IGS@IGS-0075.igsc.in
|
4da370404c3fda913fbc8f6e04a7c737f2165fff
|
445ca5459dfe1a59b3acee8b5ce29e2e610e4631
|
/week8/informatics4/E.py
|
d3db7062076af7a9e88c9fe26a51fe3f79b05931
|
[] |
no_license
|
luizasabyr/WebDevelopment2020
|
6cd0328bf9c3053055541631c18ee42c6dda78d3
|
abec4f7674b4833fe93a0430460f1d31a8462469
|
refs/heads/master
| 2020-12-23T14:05:30.676259
| 2020-03-27T06:08:02
| 2020-03-27T06:08:02
| 237,175,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
n = int(input())
a = list(map(int,input().strip().split()))[:n]
length = len(a)
cnt = 0
for i in range(1, length):
if (a[i - 1] > 0) and (a[i] > 0) or (a[i - 1] < 0) and (a[i] < 0):
cnt = 1
break
if cnt == 1:
print("YES")
else:
print("NO")
|
[
"noreply@github.com"
] |
luizasabyr.noreply@github.com
|
081c6fd251ac0a825028f8d9824d00d75264742d
|
8155b79931c702175e697e7bcda8cafee881b0df
|
/src/Payload2Std.py
|
874b705419cf395df3f79f2899e5d1923206a536
|
[
"MIT"
] |
permissive
|
Hing9/Payload2Std-IDA
|
cf68a03f07d28fce3b704ea02d3720ce078b4ff3
|
ffaaa903301cbc700667bd8052d3027e482d6601
|
refs/heads/master
| 2020-05-30T06:22:28.338863
| 2019-05-31T19:06:51
| 2019-05-31T19:06:51
| 189,578,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
from __future__ import print_function
import idaapi
def PLUGIN_ENTRY():
from Payload2Std.plugin import Payload2StdPlugin
return Payload2StdPlugin()
|
[
"soj0703@naver.com"
] |
soj0703@naver.com
|
977ef34be9f77e2b77cd391bfbee7e8541ad6414
|
330d17694e530bc75f703ba7af93037b30c19b5b
|
/yt_scripts/SpectraTools.py
|
5e7ab2eb3919803e0e89131c893fcf415ba7e68b
|
[] |
no_license
|
aemerick/projects
|
3d7b64ec1d64d157739c9b2b2e79eb9f1314bca5
|
3a49420e038459d814a36e7b02f5c18d4d7fb7ff
|
refs/heads/master
| 2021-06-03T04:01:59.823590
| 2019-09-26T04:39:55
| 2019-09-26T04:39:55
| 18,304,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,134
|
py
|
import numpy as np
import astropy.constants as C
def _generate_species():
"""
Populates a dictionary containing the constants for species used to
calculate equivalent width.
"""
speciesList = {'Lya':\
{'f': 0.4164, # oscillator strength
'glambda' : 7616.0, # cm/s
'wl0' : 1215.7, # Angstroms
}\
}
return speciesList
def Wlambda(NArray, bArray, species=None,speciesDict=None):
"""
Given paramaters, calculate the equivalent width of
a given line.
species:
Can specify a species to load pre-calculated constants (oscillator
strength, gamma, etc.). Currently supports: Lyman Alpha as "Lya"
speciesDict:
Can be used to specify constants for the species. Has the following
values: . If both species and speciesDict are specified, speciesDict
takes priority.
"""
c = C.c.value * 100.0 # speed of light in cm/s
if not speciesDict == None:
#Do something here
print "yay things"
else: # else, check species name against known list
speciesList = _generate_species() # make dict of species
speciesDict = speciesList[species]
Wl = np.zeros( np.size(NArray) )
t0 = _tau_o(NArray,bArray,speciesDict)
glambda = speciesDict['glambda']
limit = 1.25393 # as defined in Draine 9.27
for i in np.arange(np.size(Wl)):
N = NArray[i] # cm^-2
b = bArray[i] * 1000.0 * 100.0 # km/s -> cm/s
tau = t0[i]
if tau <= limit:
Wl[i] = (np.pi)**0.5 * b * tau / (c * (1.0 + tau/(2.0*(2.0**0.5))))
else:
Wl[i] = (\
(2.0*b/c)**2 * np.log(tau/np.log(2.0)) +\
b*glambda*(tau-limit)/(c*c*(np.pi)**0.5)\
)**0.5
return Wl*speciesDict['wl0']*1000.0 # returns wlambda
def _tau_o(NArray,bArray,speciesDict):
"""
Calculate tau o as per Draine 9.8 - 9.10. Ignoring the correction for
stimulated emission, as valid only for non-radio transitions.
"""
# convert things to the right units and pull from speciesDcit
f = speciesDict['f']
wl0 = speciesDict['wl0'] * 1.0E-8 # converts A -> cm
bArray = bArray * 1000.0 * 100.0 # converts km/s -> cm/s
const = 1.497E-2 # constant. cm^2/s
to = const * NArray*f*wl0/bArray
return to
def calcN(Wlambda,species,b=30.0):
"""
Calculates the column density of a given line provided the
equivalent width and species. For saturated lines, the doppler broadening
value is important. If no b values is provided, b is assumed 30 km/s
Parameters
----------
W :
Equivalent widths in mA
species : string
Species name. Currently supports "Lya"
b : optinoal
Doppler broadening values. Must be an array of length equal to that
of the equivalent width array. If not, b is assumed to be 30 km/s.
Default = 30 km/s
"""
speciesList = _generate_species()
speciesDict = speciesList[species]
wo = speciesDict['wo'] # natural line wavelength in A
f = speciesDict['f'] # oscillator strength
if np.size(Wlambda) != np.size(b):
b = 30.0 # km/s
def linear(Wl,b):
return N
def flat(Wl,b):
return N
def damped(Wl,b):
return N
cases = {'linear' : linear,
'flat' : flat,
'damped' : damped}
for i in np.arange(0,np.size(Wlambda)):
Wl = Wlambda[i]
W = Wlambda / (1000.0*wo) # mA / 1000*A - > mA/mA
if Wl :
case = "linear"
elif Wl :
case = "flat"
elif Wl :
case = "damped"
N = cases[case](Wl,b)
return N
|
[
"emerick@astro.columbia.edu"
] |
emerick@astro.columbia.edu
|
e725c508ad8d3854f0ba54753f994f0bcc0e39e6
|
fe5ed850257cc8af4df10de5cffe89472eb7ae0b
|
/19.框架学习/爬虫学习/01.爬虫三大库/04.Lxml库和Xpath语法/3种爬虫模式对比.py
|
76144bd637c9d50b98d29e84c63b2fd44cdae50e
|
[] |
no_license
|
hujianli94/Python-code
|
a0e6fe6362868407f31f1daf9704063049042d9e
|
fe7fbf59f1bdcbb6ad95a199262dd967fb04846c
|
refs/heads/master
| 2020-09-13T01:11:34.480999
| 2019-11-19T05:29:59
| 2019-11-19T05:29:59
| 222,614,662
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,908
|
py
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
# auther; 18793
# Date:2019/7/12 16:01
# filename: 3种爬虫模式对比.py
# 爬取数据只做返回,不存储
import requests
import re
from bs4 import BeautifulSoup
from lxml import etree
import time
# 加入请求头
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
}
urls = ["https://www.qiushibaike.com/text/page/{}".format(str(i)) for i in range(1, 5)] # 构造url
def re_scraper(url):
'''
:param url:
:return: 正则爬取的时间
'''
res = requests.get(url,headers=headers)
ids = re.findall("<h2>(.*?)</h2>", res.text, re.S)
contents = re.findall('<div class="content">.*?<span>(.*?)</span>', res.text, re.S)
laughs = re.findall('<span class="stats-vote"><i class="number">(\d+)</i> 好笑</span>', res.text, re.S)
comments = re.findall('<i class="number">(\d+)</i> 评论', res.text, re.S)
for id, content, laugh, comment in zip(ids, contents, laughs, comments):
info = {
"id": id,
"content": content,
"laugh": laugh,
"comment": comments[0]
}
return info
def bs_scraper(url):
'''
:param url: Beautifulsoup爬取时间
:return:
'''
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text, 'lxml')
ids = soup.select(" a > h2")
contents = soup.select("div > span")
laughs = soup.select("span.stats-vote > i")
comments = soup.select("i.number")
for id, content, laugh, comment in zip(ids, contents, laughs, comments):
info = {
'id': id.get_text(),
'content': content.get_text(),
'laugh': laugh.get_text(),
'comment': comment.get_text()
}
return info
def lxml_scraper(url):
'''
:param url:
:return:lxml爬虫爬取时间
'''
res = requests.get(url, headers=headers)
selector = etree.HTML(res.text)
url_infos = selector.xpath('//div[@class="article block untagged mb15 typs_hot"]')
try:
for url_info in url_infos:
id = url_info.xpath("div[1]/a[2]/h2/text()")[0]
content = url_info.xpath("a[1]/div/span/text()")[0]
laugh = url_info.xpath("div[2]/span[1]/i/text()")[0]
comment = url_info.xpath("div[2]/span[2]/a/i/text()")[0]
info = {
"id": id,
"content": content,
"laugh": laugh,
"comment": comment
}
return info
except IndexError:
pass # 异常忽略掉
if __name__ == '__main__':
for name, scraper in [("RE_exoressions", re_scraper), ("BeautifulSoup", bs_scraper), ("Lxml", lxml_scraper)]:
start = time.time()
for url in urls:
scraper(url)
end = time.time()
print(name, end - start)
|
[
"1879324764@qq.com"
] |
1879324764@qq.com
|
90c7b357e310a8f120241367bacc3be511f44ba6
|
d2d02766d4ff17b7dcf8b5439d99ddc015649b93
|
/Topico1/settings.py
|
cbae2588c94ef34c6016cdde7e9745c1d38c1567
|
[] |
no_license
|
mathyas221/Encuesta-Gap
|
add0e405cc86fce8536f57fa26e12a514c0a4763
|
d4e522bfbefb7732d29680904e4c20d68a8d5e34
|
refs/heads/master
| 2020-06-04T05:48:45.861388
| 2019-06-14T07:28:04
| 2019-06-14T07:28:04
| 191,894,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,279
|
py
|
"""
Django settings for Topico1 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-go04b_4l6eb)ppy)4*&b1uc4)$460k!2f+n95^9f7(7((*tbt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Questions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Topico1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Topico1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"mathyastejos@gmail.com"
] |
mathyastejos@gmail.com
|
9fb284dae3774205aaf73fcaaefeff2b2630655e
|
fc0259a7d33d996d087e711e660421558e126014
|
/working-time.py
|
d96447ab96ad7d9e5266b17fa9d564261d2ca010
|
[] |
no_license
|
marthemagnificent/working_time
|
a7d927fdf7194b7bc4ef262daa11e4895b421ec4
|
76a27257b6c6d417c97ad12096b9cb70953b1a05
|
refs/heads/main
| 2023-02-28T00:03:40.637547
| 2021-02-03T23:41:46
| 2021-02-03T23:41:46
| 335,401,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
# Name: Markayla Brown
# working-time.py
#
# Problem: Calculate the working time of employees on a team and outputs them.
# Certification of Authenticity:
# <include one of the following>
# I certify that this assignment is entirely my own work.
# I certify that this assignment is my own work, but I
# discussed it with: <Name(s)>
def main():
#variables for time
hours, minutes = 0, 0
tempHours, tempMinutes = 0, 0
#ask the user how many employees are on the team
employees = eval(input("How many employees are on your team? "))
#loopemp = employees + 1
#loop to count the employees
for i in range(employees):
#creates a clean number for the sentence asking about each employee
empsent = i + 1
#sets the times entered to temp values to be added to actual time
#these variables hold the user amounts temp
newHours, newMinutes = eval(input("How much time did employee #" + str(empsent) + " work? "))
#add input hours to temp for math
tempHours = tempHours + newHours
#adds input minutes to temp for math
tempMinutes = tempMinutes + newMinutes
#math to turn the minutes into hours
carryHours = tempMinutes // 60
#math to get remainder minutes from full minute amount
minutes = tempMinutes % 60
#add carry over hours to hours
hours = tempHours + carryHours
print("Your team has worked " + str(hours) + " hours and " + str(minutes) +" minutes")
#print(loopemp)
print(empsent)
main()
|
[
"markaylabrown45@yahoo.com"
] |
markaylabrown45@yahoo.com
|
5d1033b72f189c41bc453a4359642380538f0a22
|
9ab010aa3407437dab0811349b681be72fc94baf
|
/src/2020_06_02/for05.py
|
3c0a17d51a149955fda0edaf314fa54a1892732b
|
[] |
no_license
|
lilacman888/pythonExam
|
9bbbaedb9c301cd0e16da7bf7f946a09fac9ef81
|
6b9974a1abcbbeace6cabfdecac8dd3e04c1b437
|
refs/heads/master
| 2022-09-17T09:58:21.247717
| 2020-06-02T08:13:45
| 2020-06-02T08:13:45
| 267,808,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# 반복문 : for문
# for 변수 in range():
# 반복 실행할 문장
# 1 ~ 100까지 홀수의 합과 짝수의 합을 구하는 프로그램을 작성
# 단 for문을 1번만 사용해서 작성
odd = even = 0
for i in range(1,101): # 1~100
if i%2 == 0: # 짝수
even += i
else:
odd += i
print('1~100까지 홀수의 합 : ', odd)
print('1~100까지 짝수의 합 : ', even)
|
[
"lilacman888@gmail.com"
] |
lilacman888@gmail.com
|
601c83701c4d7f4cda5562602cc5a382576b7441
|
2581689b5867c4004d735d45cbe502a8ba4c70aa
|
/main.py
|
5416c29a9908f78c630af3e5de3e6a0c9672f909
|
[] |
no_license
|
Arseni1919/DRL_course_exercise_1
|
583912c6bbcda57c9b7eaa59cf30edc18667591a
|
fe8c19ca7888d2c5980ebb11339d0eea29708702
|
refs/heads/master
| 2023-08-10T16:36:00.299455
| 2021-10-04T14:30:20
| 2021-10-04T14:30:20
| 267,364,210
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,884
|
py
|
# -*- coding: utf-8 -*-
from World import World
import numpy as np
import random
# random.seed(0)
# --------------------- PARAMETERS --------------------- #
r = -0.04
teta = 0.0001
omega = 0.9
# ------------------------------------------------------ #
# --------------------- CONSTANTS ---------------------- #
N = 1
E = 2
S = 3
W = 4
ACTIONS = [N, E, S, W]
FINAL_STATE_CELLS = [0, 6, 12, 13, 14]
BAD_CELLS = [0, 6, 13, 14]
GOOD_CELLS = [12]
# ------------------------------------------------------ #
class Cell:
def __init__(self, num, n, s, w, e):
self.num = num
self.n = n
self.e = e
self.s = s
self.w = w
# create cells and tell them who are their neighbours
field = {
0: Cell(0, 0, 1, 0, 4),
1: Cell(1, 0, 2, 1, 5),
2: Cell(2, 1, 3, 2, 6),
3: Cell(3, 2, 3, 3, 7),
4: Cell(4, 4, 5, 0, 8),
5: Cell(5, 4, 6, 1, 9),
6: Cell(6, 5, 7, 2, 10),
7: Cell(7, 6, 7, 3, 11),
8: Cell(8, 8, 9, 4, 12),
9: Cell(9, 8, 10, 5, 13),
10: Cell(10, 9, 11, 6, 14),
11: Cell(11, 10, 11, 7, 15),
12: Cell(12, 12, 13, 8, 12),
13: Cell(13, 12, 14, 9, 13),
14: Cell(14, 13, 15, 10, 14),
15: Cell(15, 14, 15, 11, 15),
}
def transition_model(new_state, state, action):
if state in FINAL_STATE_CELLS:
raise ValueError('Game Over')
if action == N:
if field[state].n == new_state:
return 0.8
if new_state in [field[state].w, field[state].e]:
return 0.1
if action == S:
if field[state].s == new_state:
return 0.8
if new_state in [field[state].w, field[state].e]:
return 0.1
if action == W:
if field[state].w == new_state:
return 0.8
if new_state in [field[state].n, field[state].s]:
return 0.1
if action == E:
if field[state].e == new_state:
return 0.8
if new_state in [field[state].n, field[state].s]:
return 0.1
return 0
def reward_function(state):
if state in BAD_CELLS:
return -1
if state in GOOD_CELLS:
return 1
return r
def initiate_values(nStates):
values = {}
for state in range(nStates):
values[state] = 0
return values
def max_action_value(curr_world, state, curr_values):
if state in FINAL_STATE_CELLS:
return reward_function(state), 1
value = None
best_action = 1
for counter, action in enumerate(ACTIONS):
curr_sum = get_value_on_action(state, action, curr_values, curr_world)
if counter == 0:
value = curr_sum
if curr_sum > value:
value = curr_sum
best_action = action
return value, best_action
def get_policy(curr_world, curr_values):
curr_policy = []
for state in range(curr_world.nStates):
_, action = max_action_value(curr_world, state, curr_values)
curr_policy.append([action])
return np.array(curr_policy)
# ------------------------------ #
def value_iteration(curr_world):
values = initiate_values(curr_world.nStates)
delta = teta
while not delta < teta:
delta = 0
for state in range(curr_world.nStates):
value = values[state]
values[state], _ = max_action_value(curr_world, state, values)
delta = max(delta, abs(value - values[state]))
policy = get_policy(curr_world, values)
return values, policy
# ------------------------------ #
def initialize_policy(curr_world):
policy = []
for state in range(curr_world.nStates):
# policy.append([random.choice(ACTIONS)])
policy.append([1])
return policy
def get_value_on_action(state, action, values, curr_world):
if state in FINAL_STATE_CELLS:
return reward_function(state)
value = 0
for new_state in range(curr_world.nStates):
value += transition_model(new_state, state, action) * (reward_function(state) + omega * values[new_state])
return value
def policy_evaluation(policy, curr_world):
values = initiate_values(curr_world.nStates)
delta = teta
while not delta < teta:
delta = 0
for state in range(curr_world.nStates):
value = values[state]
values[state] = get_value_on_action(state, policy[state][0], values, curr_world)
delta = max(delta, abs(value - values[state]))
return values
def policy_improvement(values, curr_world):
policy = []
for state in range(curr_world.nStates):
max_q_val = None
max_action_val = ACTIONS[0]
for index, action in enumerate(ACTIONS):
q_val = get_value_on_action(state, action, values, curr_world)
if index == 0:
max_q_val = q_val
max_action_val = action
if q_val > max_q_val:
max_q_val = q_val
max_action_val = action
policy.append([max_action_val])
return policy
# ------------------------------ #
def policy_iteration(curr_world):
policy = initialize_policy(curr_world)
values = {}
policy_stable = False
while not policy_stable:
values = policy_evaluation(policy, curr_world)
new_policy = policy_improvement(values, curr_world)
policy_stable = True
for i in range(len(policy)):
if policy[i][0] != new_policy[i][0]:
policy_stable = False
break
policy = new_policy
world.plot_value(values)
world.plot_policy(np.array(policy))
return values, np.array(policy)
# ------------------------------ #
if __name__ == "__main__":
world = World()
# world.plot()
# final_values, final_policy = value_iteration(world)
final_values, final_policy = policy_iteration(world)
# world.plot_value(final_values)
# world.plot_policy(final_policy)
|
[
"1919ars@gmail.com"
] |
1919ars@gmail.com
|
0826cc895b7abf8174eba312935e6a86428910af
|
35e5093651eb7213b7f8f0cd0ace352913f64dfb
|
/gip/helper_pedidos.py
|
0e0a3643ec407bcc0249ccab7be787ecb8d538f1
|
[] |
no_license
|
juanma2/GIP
|
aaf62f38fda9c8baac8bb36b7fc8d3a0ed3425d2
|
fe26b5b2b00a519aafd8bf03e0cfc5bc467e5146
|
refs/heads/master
| 2021-01-19T16:19:13.735368
| 2017-04-14T12:47:47
| 2017-04-14T12:47:47
| 88,261,580
| 0
| 0
| null | 2017-04-14T11:18:02
| 2017-04-14T11:18:02
| null |
UTF-8
|
Python
| false
| false
| 4,285
|
py
|
import datetime
import ast
from gip.models import Pedidos, HistoricoListas
from django.contrib.auth.models import User
def send_order(pedido,proveedor,c_pedido):
cliente = pedido['cliente']
print "***************************************************************"
print "implement email, or, whatever needed in gip/helper_pedidos.py"
print cliente
orden = pedido['orden']
print orden
precio = pedido['precio']
print precio
total = 0.0
u = User.objects.filter(id=pedido['cliente']['user_id'])
for i in pedido['orden']:
total += orden[i]*precio[i]
print total
#once that your pedido is ready, you should set all the items as "active"
p = Pedidos(producto_serializado=pedido, proveedor_id = proveedor.id, total = total , fecha_creacion = datetime.datetime.now())
p.save()
p.cliente.add(pedido['cliente']['id'])
print "let's try"
try:
print p.id
history = HistoricoListas(pedido_id = p, listas_serializado = c_pedido)
history.save()
history.id
print "history saved!!"
except:
print "something went wrong with order "+pedido.id+" trying to save listas... but we arae not gonna block them"
print "***************************************************************"
return True
def generate_modales_historico(pedidos):
print "we are tryig to get the historical pedidos"
modales = ''
for pedido in pedidos:
header = """
<div class="modal" onclick="" id="modaldemostrarpedido_{0}">
<div class="modal-dialog modal-lg" id="customer-order">
<div class="modal-content">
<div class="modal-header modal-header-info">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">X</button>
<h4 class="modal-title">ID.Pedido: {1}</h4>
</div>
<div class="modal-body">
<table class="table">
<thead>
<tr>
<th>Referencia</th>
<th colspan="2">Nombre del producto</th>
<th>Precio/ud</th>
<th>Total</th>
</tr>
</thead>
<tbody>
""".format(pedido.codigo,pedido.codigo)
products = ast.literal_eval(pedido.producto_serializado)
body = ''
for i in products['orden']:
body += """
<tr>
<td>{0}</td>
<td>
<!-- <a href="s#fichamodal" data-toggle="modal">
<img src="img/.jpg" alt="">
</a> -->
</td>
<td><a href="s#fichamodal" data-toggle="modal">{1}</a></td>
<td>{2}</td>
<td>{3}</td>
</tr>
""".format(i,products['descripcion'][i].encode("utf-8"),products['orden'][i], products['orden'][i] * products['precio'][i])
foot = """
<tfoot>
<tr>
<th colspan="4" class="text-right">Total</th>
<th>{0}</th>
<th></th>
</tr>
</tfoot>
</table>
</div>
<div class="modal-footer">
<button type="reset" class="btn btn-default" data-dismiss="modal">Cerrar</button>
</div>
</div>
</div>
<!--acaba modal muestra pedido tipo 1-->
</div>""".format(pedido.total)
modales += header+body+foot
print modales
return modales
|
[
"bvcelari@gmail.com"
] |
bvcelari@gmail.com
|
c9d088d88e05ade0be73cd950e96e589c8977c4f
|
66a89bb2ded504f77f9fb9af2b365a4888c01004
|
/graph_construction/module_maps/build_module_maps.py
|
1c555e7b68977736e1d6e6b35b08de82a026659f
|
[] |
no_license
|
GageDeZoort/interaction_network_paper
|
1f4cf5a8673d4dd67341214cf2eb27c9fee660b2
|
45d313611009b38b2d17b5d20d51ae6a80ebf4a4
|
refs/heads/pytorch_geometric
| 2023-04-19T02:45:57.717764
| 2021-10-08T23:12:19
| 2021-10-08T23:12:19
| 337,429,781
| 21
| 10
| null | 2022-01-05T19:57:20
| 2021-02-09T14:24:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,594
|
py
|
import os
import sys
import numpy as np
import pandas as pd
import trackml
from trackml.dataset import load_event
from trackml.dataset import load_dataset
pixel_layers = [(8,2), (8,4), (8,6), (8,8),
(7,14), (7,12), (7,10),
(7,8), (7,6), (7,4), (7,2),
(9,2), (9,4), (9,6), (9,8),
(9,10), (9,12), (9,14)]
layer_pairs = [(0,1), (1,2), (2,3),
(0,4), (1,4), (2,4),
(4,5), (5,6), (6,7), (7,8), (8,9), (9,10),
(0,11), (1,11), (2,11),
(11,12), (12,13), (13,14), (14,15), (15,16), (16,17)]
pt_min = float(sys.argv[1])
train_sample = 2
indir = '/tigress/jdezoort/train_{}'.format(train_sample)
evtid_base = 'event00000'
evtids = os.listdir(indir) #[evtid_base+str(i) for i in np.arange(1000, , 1)]
evtids = [evtid.split('-')[0] for evtid in evtids if 'hits' in evtid]
module_labels = {}
hits, cells, particles, truth = load_event(os.path.join(indir, evtids[0]))
hits_by_loc = hits.groupby(['volume_id', 'layer_id'])
hits = pd.concat([hits_by_loc.get_group(pixel_layers[i]).assign(layer=i)
for i in range(len(pixel_layers))])
for lid, lhits in hits.groupby('layer'):
module_labels[lid] = np.unique(lhits['module_id'].values)
module_maps = {(i,j): np.zeros((np.max(module_labels[i])+1, np.max(module_labels[j])+1))
for (i,j) in layer_pairs}
total_connections = []
for i, evtid in enumerate(evtids):
print(i, evtid)
hits, cells, particles, truth = load_event(os.path.join(indir, evtid))
hits_by_loc = hits.groupby(['volume_id', 'layer_id'])
hits = pd.concat([hits_by_loc.get_group(pixel_layers[i]).assign(layer=i)
for i in range(len(pixel_layers))])
pt = np.sqrt(particles.px**2 + particles.py**2)
particles['pt'] = pt
particles = particles[pt > pt_min]
truth = (truth[['hit_id', 'particle_id']]
.merge(particles[['particle_id', 'pt']], on='particle_id'))
r = np.sqrt(hits.x**2 + hits.y**2)
phi = np.arctan2(hits.y, hits.x)
hits = (hits[['hit_id', 'z', 'layer', 'module_id']]
.assign(r=r, phi=phi)
.merge(truth[['hit_id', 'particle_id', 'pt']], on='hit_id'))
hits = (hits.loc[
hits.groupby(['particle_id', 'layer'], as_index=False).r.idxmin()
]).assign(evtid=evtid)
hits_by_loc = hits.groupby('layer')
for lp in layer_pairs:
hits0 = hits_by_loc.get_group(lp[0])
hits1 = hits_by_loc.get_group(lp[1])
keys = ['evtid', 'particle_id', 'module_id', 'r', 'phi', 'z']
hit_pairs = hits0[keys].reset_index().merge(
hits1[keys].reset_index(), on='evtid', suffixes=('_1', '_2'))
pid1, pid2 = hit_pairs['particle_id_1'], hit_pairs['particle_id_2']
hit_pairs = hit_pairs[pid1==pid2]
mid1, mid2 = hit_pairs['module_id_1'].values, hit_pairs['module_id_2'].values
r1, r2 = hit_pairs['r_1'].values, hit_pairs['r_2'].values
for i in range(len(mid1)):
module_maps[lp][mid1[i]][mid2[i]]+=1
connections = 0
for module_map in module_maps.values():
connections += np.sum(module_map > 0)
total_connections.append(connections)
pt_lookup = {0.5: '0p5', 0.6: '0p6', 0.7: '0p7', 0.8: '0p8',
0.9: '0p9', 1.0: '1', 1.1: '1p1', 1.2: '1p2',
1.3: '1p3', 1.4: '1p4', 1.5: '1p5', 1.6: '1p6',
1.7: '1p7', 1.8: '1p8', 1.9: '1p9', 2.0: '2'}
pt_str = pt_lookup[pt_min]
with open(f'module_map_{train_sample}_{pt_str}GeV.npy', 'wb') as f:
np.save(f, module_maps)
|
[
"jgdezoort@gmail.com"
] |
jgdezoort@gmail.com
|
9a0d1b2385be47262406834cb80e1f15cf4e0136
|
cfd3789ab9e5138d584a5c35ece1fb68a40b8f43
|
/day_16/day16_1.py
|
c33ec7cd6004feadb8acfa265e5bef2ecfe90c39
|
[] |
no_license
|
math-foo/advent-of-code-2015
|
d685ee845b1fc2343970b21e2b7cdea67af72681
|
fff7b82f63b379305e8d09593764829fe23564df
|
refs/heads/master
| 2021-01-10T07:22:18.374502
| 2016-01-01T02:14:59
| 2016-01-01T02:14:59
| 48,017,153
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
#!/usr/bin/python
aunts = open('input.txt', 'r').readlines()
sues = {}
for aunt in aunts:
elems = aunt.split(' ')
sue_info = {}
for i in xrange(2,len(elems),2):
key = elems[i].strip(':')
value = int(elems[i+1].strip(','))
sue_info[key] = value
sues[elems[1].strip(':')] = sue_info
known_info = {'children': 3,
'cats': 7,
'samoyeds': 2,
'pomeranians': 3,
'akitas': 0,
'vizslas': 0,
'goldfish': 5,
'trees': 3,
'cars': 2,
'perfumes': 1}
for sue in sues:
sue_info = sues[sue]
match = True
for key in sue_info:
if sue_info[key] != known_info[key]:
match = False
break
if match:
print sue
break
|
[
"caelyn@ceruleanrodent.com"
] |
caelyn@ceruleanrodent.com
|
11466c7195688c9844d0b358637f508dadb88727
|
710e96b1435bc43cc260512df75af5dd3b2afd13
|
/code/1086.py
|
0b5a7f95a7b670d6d483a0922ab29b01df6a596b
|
[
"MIT"
] |
permissive
|
minssoj/Learning_Algorithm_Up
|
94ca8166c9a5d87917cf033ad8415871684241c4
|
45ec4e2eb4c07c9ec907a74dbd31370e1645c50b
|
refs/heads/main
| 2023-01-08T20:52:32.983756
| 2020-11-05T17:49:45
| 2020-11-05T17:49:45
| 301,926,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
# [기초-종합] 그림 파일 저장용량 계산하기(설명)
# minso.jeong@daum.net
'''
문제링크 : https://www.codeup.kr/problem.php?id=1086
'''
w, h, b = map(int, input().split())
print('{:.2f} MB'.format(w * h * b/ (8 *(2 ** 20))))
|
[
"minso.jeong@daum.net"
] |
minso.jeong@daum.net
|
91a2a80b3abc43fae5326615100f02782ef09b2d
|
b5ed3abaf6025745a1755c36db67e257311b8070
|
/math_m2/trailing_zeros.py
|
dda2066ee2d6753cad95449f36ddcc458fc80c10
|
[] |
no_license
|
larafonse/cti-ips-2020
|
bf2de6444eeca64a35d4854bc093480f1f4b2ceb
|
2ecdd9fe8a59eea83c90fba3ef6b18fb1cdefdda
|
refs/heads/master
| 2022-11-28T12:03:03.919506
| 2020-08-11T04:19:44
| 2020-08-11T04:19:44
| 277,982,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
def fast_trailing_zero_factorial(n):
i = 5
count = 0
while(n/i >= 1):
count+=(n//i)
i*=5
return count
if __name__ == "__main__":
print(fast_trailing_zero_factorial(int(input())))
|
[
"nlarafonseca@csumb.edu"
] |
nlarafonseca@csumb.edu
|
3ed94483e209e0a44ee8dd48855dccf9115e3edf
|
0a483ba886af2b75e0b4529e211b777f32cb927f
|
/venv/bin/jupyter-trust
|
cf4fc483dab57da16edbb4b0a6a414045583265c
|
[] |
no_license
|
controlidco/python-data-exercises
|
d6796a0eee9d5e735da0f16bc5c1d4c8f35b5b58
|
b90666153f072e2c6e1777b905021097846cb846
|
refs/heads/master
| 2020-09-25T03:46:09.867917
| 2019-07-31T13:44:38
| 2019-07-31T13:44:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
#!/Users/otniel/Developer/pythoncourse/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from nbformat.sign import TrustNotebookApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(TrustNotebookApp.launch_instance())
|
[
"benamara.otniel@gmail.com"
] |
benamara.otniel@gmail.com
|
|
60fa2920856e582a27596de204b598f28ef7a603
|
4b1009917c0c0ff2dcb0aa9df450d735a72e86cb
|
/display-server/node_modules/socket.io/node_modules/engine.io/node_modules/ws/node_modules/bufferutil/build/config.gypi
|
c14531a0c15c6c9b1d8ac723701d7ee6c53678a9
|
[
"MIT"
] |
permissive
|
stu-smith/jarvis
|
4f793dc29f7cdff2bd5163ebee528679a6b71ba9
|
d0b50e86e88a54e6f1984530d953d03490146370
|
refs/heads/master
| 2021-01-10T03:07:00.916466
| 2016-02-16T20:18:48
| 2016-02-16T20:18:48
| 51,866,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,154
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/stu/.node-gyp/0.10.37",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.28 node/v0.10.37 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/stu/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/stu/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "2",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "0.10.37",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/stu/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}
|
[
"stusmith1977@gmail.com"
] |
stusmith1977@gmail.com
|
11ba89ca34f48062674ca6cc401253eca058c762
|
944086a35957e5831bb10e10cb924ef9ca9cdd07
|
/ingredients/commons.py
|
d51876296490bec4189b18372ff4da71f312f569
|
[
"Apache-2.0"
] |
permissive
|
kata-ai/wikiner
|
cc0256b308bdd468529f96da23c18c6d52caa9f1
|
bb2090c51881dd08f19ba852c38f739f93fe7697
|
refs/heads/master
| 2022-12-23T05:08:46.407929
| 2019-12-18T02:27:25
| 2019-12-18T02:27:25
| 223,879,867
| 4
| 2
| null | 2022-12-16T18:32:15
| 2019-11-25T06:45:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
import re
def tag(sentence, tokens, predicted):
results, prev, last_idx = [], None, 0
for i, _ in enumerate(tokens):
pred = "O"
pred_label = "O"
try:
pred = predicted[i]
pred_label = predicted[i].split("-")
# idx = np.argmax(pred)
# score = pred[idx]
except Exception as e:
pass
if len(pred_label) == 2:
prefix, label = pred_label
else:
prefix = 'O'
label = pred
start_idx = last_idx + sentence[last_idx:].index(tokens[i])
end_idx = start_idx + len(tokens[i])
if prefix in ['I', 'E', 'O']:
if label == prev:
results[-1]['end'] = end_idx
else:
# mislabelled or 'O'
results.append({
'start': start_idx,
'end': end_idx,
'tagname': label,
})
elif prefix in ['B', 'S']:
results.append({
'start': start_idx,
'end': end_idx,
'tagname': label,
})
last_idx = end_idx
prev = label
for i, pred in enumerate(results):
results[i]['span'] = sentence[pred['start']:pred['end']]
return results
def word_tokenize(sentence, sep=r'(\W+)?'):
return [x.strip() for x in re.split(sep, sentence) if x.strip()]
|
[
"fariz@yesbossnow.com"
] |
fariz@yesbossnow.com
|
81f6a58a5384c9dbc0bfaa1102c46721e4091001
|
a7f855efff14e0b15cffb3f035d8dc9f7f102afe
|
/mfb/extraWin/UTpackages/UTisocontour/isocontour.py
|
d857eb7a8199fd36b5ec4c785e21cb4375b49836
|
[] |
no_license
|
BlenderCN-Org/FlipbookApp
|
76fcd92644c4e18dd90885eeb49e5aecae28f6f0
|
0df2acebf76b40105812d2e3af8f0ef4784ab74c
|
refs/heads/master
| 2020-05-27T14:33:25.330291
| 2014-07-10T17:47:29
| 2014-07-10T17:47:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,128
|
py
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.4
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_isocontour', [dirname(__file__)])
except ImportError:
import _isocontour
return _isocontour
if fp is not None:
try:
_mod = imp.load_module('_isocontour', fp, pathname, description)
finally:
fp.close()
return _mod
_isocontour = swig_import_helper()
del swig_import_helper
else:
import _isocontour
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
CONTOUR_UCHAR = _isocontour.CONTOUR_UCHAR
CONTOUR_USHORT = _isocontour.CONTOUR_USHORT
CONTOUR_FLOAT = _isocontour.CONTOUR_FLOAT
CONTOUR_2D = _isocontour.CONTOUR_2D
CONTOUR_3D = _isocontour.CONTOUR_3D
CONTOUR_REG_2D = _isocontour.CONTOUR_REG_2D
CONTOUR_REG_3D = _isocontour.CONTOUR_REG_3D
NO_COLOR_VARIABLE = _isocontour.NO_COLOR_VARIABLE
class DatasetInfo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DatasetInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DatasetInfo, name)
__repr__ = _swig_repr
__swig_setmethods__["datatype"] = _isocontour.DatasetInfo_datatype_set
__swig_getmethods__["datatype"] = _isocontour.DatasetInfo_datatype_get
if _newclass:datatype = _swig_property(_isocontour.DatasetInfo_datatype_get, _isocontour.DatasetInfo_datatype_set)
__swig_setmethods__["meshtype"] = _isocontour.DatasetInfo_meshtype_set
__swig_getmethods__["meshtype"] = _isocontour.DatasetInfo_meshtype_get
if _newclass:meshtype = _swig_property(_isocontour.DatasetInfo_meshtype_get, _isocontour.DatasetInfo_meshtype_set)
__swig_setmethods__["nvars"] = _isocontour.DatasetInfo_nvars_set
__swig_getmethods__["nvars"] = _isocontour.DatasetInfo_nvars_get
if _newclass:nvars = _swig_property(_isocontour.DatasetInfo_nvars_get, _isocontour.DatasetInfo_nvars_set)
__swig_setmethods__["ntime"] = _isocontour.DatasetInfo_ntime_set
__swig_getmethods__["ntime"] = _isocontour.DatasetInfo_ntime_get
if _newclass:ntime = _swig_property(_isocontour.DatasetInfo_ntime_get, _isocontour.DatasetInfo_ntime_set)
__swig_setmethods__["dim"] = _isocontour.DatasetInfo_dim_set
__swig_getmethods__["dim"] = _isocontour.DatasetInfo_dim_get
if _newclass:dim = _swig_property(_isocontour.DatasetInfo_dim_get, _isocontour.DatasetInfo_dim_set)
__swig_setmethods__["orig"] = _isocontour.DatasetInfo_orig_set
__swig_getmethods__["orig"] = _isocontour.DatasetInfo_orig_get
if _newclass:orig = _swig_property(_isocontour.DatasetInfo_orig_get, _isocontour.DatasetInfo_orig_set)
__swig_setmethods__["span"] = _isocontour.DatasetInfo_span_set
__swig_getmethods__["span"] = _isocontour.DatasetInfo_span_get
if _newclass:span = _swig_property(_isocontour.DatasetInfo_span_get, _isocontour.DatasetInfo_span_set)
__swig_setmethods__["minext"] = _isocontour.DatasetInfo_minext_set
__swig_getmethods__["minext"] = _isocontour.DatasetInfo_minext_get
if _newclass:minext = _swig_property(_isocontour.DatasetInfo_minext_get, _isocontour.DatasetInfo_minext_set)
__swig_setmethods__["maxext"] = _isocontour.DatasetInfo_maxext_set
__swig_getmethods__["maxext"] = _isocontour.DatasetInfo_maxext_get
if _newclass:maxext = _swig_property(_isocontour.DatasetInfo_maxext_get, _isocontour.DatasetInfo_maxext_set)
__swig_setmethods__["minvar"] = _isocontour.DatasetInfo_minvar_set
__swig_getmethods__["minvar"] = _isocontour.DatasetInfo_minvar_get
if _newclass:minvar = _swig_property(_isocontour.DatasetInfo_minvar_get, _isocontour.DatasetInfo_minvar_set)
__swig_setmethods__["maxvar"] = _isocontour.DatasetInfo_maxvar_set
__swig_getmethods__["maxvar"] = _isocontour.DatasetInfo_maxvar_get
if _newclass:maxvar = _swig_property(_isocontour.DatasetInfo_maxvar_get, _isocontour.DatasetInfo_maxvar_set)
def _dim(self) -> "void" : return _isocontour.DatasetInfo__dim(self)
def _orig(self) -> "void" : return _isocontour.DatasetInfo__orig(self)
def _span(self) -> "void" : return _isocontour.DatasetInfo__span(self)
def _minext(self) -> "void" : return _isocontour.DatasetInfo__minext(self)
def _maxext(self) -> "void" : return _isocontour.DatasetInfo__maxext(self)
def __init__(self):
this = _isocontour.new_DatasetInfo()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_DatasetInfo
__del__ = lambda self : None;
DatasetInfo_swigregister = _isocontour.DatasetInfo_swigregister
DatasetInfo_swigregister(DatasetInfo)
class Seed(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Seed, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Seed, name)
__repr__ = _swig_repr
__swig_setmethods__["min"] = _isocontour.Seed_min_set
__swig_getmethods__["min"] = _isocontour.Seed_min_get
if _newclass:min = _swig_property(_isocontour.Seed_min_get, _isocontour.Seed_min_set)
__swig_setmethods__["max"] = _isocontour.Seed_max_set
__swig_getmethods__["max"] = _isocontour.Seed_max_get
if _newclass:max = _swig_property(_isocontour.Seed_max_get, _isocontour.Seed_max_set)
__swig_setmethods__["cell_id"] = _isocontour.Seed_cell_id_set
__swig_getmethods__["cell_id"] = _isocontour.Seed_cell_id_get
if _newclass:cell_id = _swig_property(_isocontour.Seed_cell_id_get, _isocontour.Seed_cell_id_set)
def __init__(self):
this = _isocontour.new_Seed()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_Seed
__del__ = lambda self : None;
Seed_swigregister = _isocontour.Seed_swigregister
Seed_swigregister(Seed)
class SeedData(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SeedData, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SeedData, name)
__repr__ = _swig_repr
__swig_setmethods__["nseeds"] = _isocontour.SeedData_nseeds_set
__swig_getmethods__["nseeds"] = _isocontour.SeedData_nseeds_get
if _newclass:nseeds = _swig_property(_isocontour.SeedData_nseeds_get, _isocontour.SeedData_nseeds_set)
__swig_setmethods__["seeds"] = _isocontour.SeedData_seeds_set
__swig_getmethods__["seeds"] = _isocontour.SeedData_seeds_get
if _newclass:seeds = _swig_property(_isocontour.SeedData_seeds_get, _isocontour.SeedData_seeds_set)
def __init__(self):
this = _isocontour.new_SeedData()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_SeedData
__del__ = lambda self : None;
SeedData_swigregister = _isocontour.SeedData_swigregister
SeedData_swigregister(SeedData)
class Signature(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Signature, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Signature, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _isocontour.Signature_name_set
__swig_getmethods__["name"] = _isocontour.Signature_name_get
if _newclass:name = _swig_property(_isocontour.Signature_name_get, _isocontour.Signature_name_set)
__swig_setmethods__["nval"] = _isocontour.Signature_nval_set
__swig_getmethods__["nval"] = _isocontour.Signature_nval_get
if _newclass:nval = _swig_property(_isocontour.Signature_nval_get, _isocontour.Signature_nval_set)
__swig_setmethods__["fx"] = _isocontour.Signature_fx_set
__swig_getmethods__["fx"] = _isocontour.Signature_fx_get
if _newclass:fx = _swig_property(_isocontour.Signature_fx_get, _isocontour.Signature_fx_set)
__swig_setmethods__["fy"] = _isocontour.Signature_fy_set
__swig_getmethods__["fy"] = _isocontour.Signature_fy_get
if _newclass:fy = _swig_property(_isocontour.Signature_fy_get, _isocontour.Signature_fy_set)
def getFx(self, *args) -> "void" : return _isocontour.Signature_getFx(self, *args)
def getFy(self, *args) -> "void" : return _isocontour.Signature_getFy(self, *args)
def __init__(self):
this = _isocontour.new_Signature()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_Signature
__del__ = lambda self : None;
Signature_swigregister = _isocontour.Signature_swigregister
Signature_swigregister(Signature)
class SliceData(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SliceData, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SliceData, name)
__repr__ = _swig_repr
__swig_setmethods__["width"] = _isocontour.SliceData_width_set
__swig_getmethods__["width"] = _isocontour.SliceData_width_get
if _newclass:width = _swig_property(_isocontour.SliceData_width_get, _isocontour.SliceData_width_set)
__swig_setmethods__["height"] = _isocontour.SliceData_height_set
__swig_getmethods__["height"] = _isocontour.SliceData_height_get
if _newclass:height = _swig_property(_isocontour.SliceData_height_get, _isocontour.SliceData_height_set)
__swig_setmethods__["datatype"] = _isocontour.SliceData_datatype_set
__swig_getmethods__["datatype"] = _isocontour.SliceData_datatype_get
if _newclass:datatype = _swig_property(_isocontour.SliceData_datatype_get, _isocontour.SliceData_datatype_set)
__swig_setmethods__["ucdata"] = _isocontour.SliceData_ucdata_set
__swig_getmethods__["ucdata"] = _isocontour.SliceData_ucdata_get
if _newclass:ucdata = _swig_property(_isocontour.SliceData_ucdata_get, _isocontour.SliceData_ucdata_set)
__swig_setmethods__["usdata"] = _isocontour.SliceData_usdata_set
__swig_getmethods__["usdata"] = _isocontour.SliceData_usdata_get
if _newclass:usdata = _swig_property(_isocontour.SliceData_usdata_get, _isocontour.SliceData_usdata_set)
__swig_setmethods__["fdata"] = _isocontour.SliceData_fdata_set
__swig_getmethods__["fdata"] = _isocontour.SliceData_fdata_get
if _newclass:fdata = _swig_property(_isocontour.SliceData_fdata_get, _isocontour.SliceData_fdata_set)
def __init__(self):
this = _isocontour.new_SliceData()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_SliceData
__del__ = lambda self : None;
SliceData_swigregister = _isocontour.SliceData_swigregister
SliceData_swigregister(SliceData)
class Contour2dData(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Contour2dData, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Contour2dData, name)
__repr__ = _swig_repr
__swig_setmethods__["nvert"] = _isocontour.Contour2dData_nvert_set
__swig_getmethods__["nvert"] = _isocontour.Contour2dData_nvert_get
if _newclass:nvert = _swig_property(_isocontour.Contour2dData_nvert_get, _isocontour.Contour2dData_nvert_set)
__swig_setmethods__["nedge"] = _isocontour.Contour2dData_nedge_set
__swig_getmethods__["nedge"] = _isocontour.Contour2dData_nedge_get
if _newclass:nedge = _swig_property(_isocontour.Contour2dData_nedge_get, _isocontour.Contour2dData_nedge_set)
def __init__(self):
this = _isocontour.new_Contour2dData()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_Contour2dData
__del__ = lambda self : None;
Contour2dData_swigregister = _isocontour.Contour2dData_swigregister
Contour2dData_swigregister(Contour2dData)
class Contour3dData(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Contour3dData, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Contour3dData, name)
__repr__ = _swig_repr
__swig_setmethods__["nvert"] = _isocontour.Contour3dData_nvert_set
__swig_getmethods__["nvert"] = _isocontour.Contour3dData_nvert_get
if _newclass:nvert = _swig_property(_isocontour.Contour3dData_nvert_get, _isocontour.Contour3dData_nvert_set)
__swig_setmethods__["ntri"] = _isocontour.Contour3dData_ntri_set
__swig_getmethods__["ntri"] = _isocontour.Contour3dData_ntri_get
if _newclass:ntri = _swig_property(_isocontour.Contour3dData_ntri_get, _isocontour.Contour3dData_ntri_set)
__swig_setmethods__["vfun"] = _isocontour.Contour3dData_vfun_set
__swig_getmethods__["vfun"] = _isocontour.Contour3dData_vfun_get
if _newclass:vfun = _swig_property(_isocontour.Contour3dData_vfun_get, _isocontour.Contour3dData_vfun_set)
__swig_setmethods__["colorvar"] = _isocontour.Contour3dData_colorvar_set
__swig_getmethods__["colorvar"] = _isocontour.Contour3dData_colorvar_get
if _newclass:colorvar = _swig_property(_isocontour.Contour3dData_colorvar_get, _isocontour.Contour3dData_colorvar_set)
__swig_setmethods__["fmin"] = _isocontour.Contour3dData_fmin_set
__swig_getmethods__["fmin"] = _isocontour.Contour3dData_fmin_get
if _newclass:fmin = _swig_property(_isocontour.Contour3dData_fmin_get, _isocontour.Contour3dData_fmin_set)
__swig_setmethods__["fmax"] = _isocontour.Contour3dData_fmax_set
__swig_getmethods__["fmax"] = _isocontour.Contour3dData_fmax_get
if _newclass:fmax = _swig_property(_isocontour.Contour3dData_fmax_get, _isocontour.Contour3dData_fmax_set)
def __init__(self):
this = _isocontour.new_Contour3dData()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_Contour3dData
__del__ = lambda self : None;
Contour3dData_swigregister = _isocontour.Contour3dData_swigregister
Contour3dData_swigregister(Contour3dData)
class ConDataset(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ConDataset, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ConDataset, name)
__repr__ = _swig_repr
__swig_setmethods__["vnames"] = _isocontour.ConDataset_vnames_set
__swig_getmethods__["vnames"] = _isocontour.ConDataset_vnames_get
if _newclass:vnames = _swig_property(_isocontour.ConDataset_vnames_get, _isocontour.ConDataset_vnames_set)
__swig_setmethods__["nsfun"] = _isocontour.ConDataset_nsfun_set
__swig_getmethods__["nsfun"] = _isocontour.ConDataset_nsfun_get
if _newclass:nsfun = _swig_property(_isocontour.ConDataset_nsfun_get, _isocontour.ConDataset_nsfun_set)
__swig_setmethods__["sfun"] = _isocontour.ConDataset_sfun_set
__swig_getmethods__["sfun"] = _isocontour.ConDataset_sfun_get
if _newclass:sfun = _swig_property(_isocontour.ConDataset_sfun_get, _isocontour.ConDataset_sfun_set)
__swig_setmethods__["data"] = _isocontour.ConDataset_data_set
__swig_getmethods__["data"] = _isocontour.ConDataset_data_get
if _newclass:data = _swig_property(_isocontour.ConDataset_data_get, _isocontour.ConDataset_data_set)
__swig_setmethods__["plot"] = _isocontour.ConDataset_plot_set
__swig_getmethods__["plot"] = _isocontour.ConDataset_plot_get
if _newclass:plot = _swig_property(_isocontour.ConDataset_plot_get, _isocontour.ConDataset_plot_set)
def getSignature(self, *args) -> "Signature *" : return _isocontour.ConDataset_getSignature(self, *args)
def __init__(self):
this = _isocontour.new_ConDataset()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _isocontour.delete_ConDataset
__del__ = lambda self : None;
ConDataset_swigregister = _isocontour.ConDataset_swigregister
ConDataset_swigregister(ConDataset)
def setVerboseLevel(*args) -> "void" :
return _isocontour.setVerboseLevel(*args)
setVerboseLevel = _isocontour.setVerboseLevel
def newDatasetUnstr(*args) -> "ConDataset *" :
return _isocontour.newDatasetUnstr(*args)
newDatasetUnstr = _isocontour.newDatasetUnstr
def newDatasetReg(*args) -> "ConDataset *" :
return _isocontour.newDatasetReg(*args)
newDatasetReg = _isocontour.newDatasetReg
def loadDataset(*args) -> "ConDataset *" :
return _isocontour.loadDataset(*args)
loadDataset = _isocontour.loadDataset
def getDatasetInfo(*args) -> "DatasetInfo *" :
return _isocontour.getDatasetInfo(*args)
getDatasetInfo = _isocontour.getDatasetInfo
def getVariableNames(*args) -> "char **" :
return _isocontour.getVariableNames(*args)
getVariableNames = _isocontour.getVariableNames
def getSeedCells(*args) -> "SeedData *" :
return _isocontour.getSeedCells(*args)
getSeedCells = _isocontour.getSeedCells
def getNumberOfSignatures(*args) -> "int" :
return _isocontour.getNumberOfSignatures(*args)
getNumberOfSignatures = _isocontour.getNumberOfSignatures
def getSignatureFunctions(*args) -> "Signature *" :
return _isocontour.getSignatureFunctions(*args)
getSignatureFunctions = _isocontour.getSignatureFunctions
def getSignatureValues(*args) -> "float *" :
return _isocontour.getSignatureValues(*args)
getSignatureValues = _isocontour.getSignatureValues
def getSlice(*args) -> "SliceData *" :
return _isocontour.getSlice(*args)
getSlice = _isocontour.getSlice
def getContour2d(*args) -> "Contour2dData *" :
return _isocontour.getContour2d(*args)
getContour2d = _isocontour.getContour2d
def getContour3d(*args) -> "Contour3dData *" :
return _isocontour.getContour3d(*args)
getContour3d = _isocontour.getContour3d
def saveContour2d(*args) -> "void" :
return _isocontour.saveContour2d(*args)
saveContour2d = _isocontour.saveContour2d
def saveContour3d(*args) -> "void" :
return _isocontour.saveContour3d(*args)
saveContour3d = _isocontour.saveContour3d
def writeIsoComponents(*args) -> "void" :
return _isocontour.writeIsoComponents(*args)
writeIsoComponents = _isocontour.writeIsoComponents
def clearDataset(*args) -> "void" :
return _isocontour.clearDataset(*args)
clearDataset = _isocontour.clearDataset
def newDatasetRegFloat3D(*args) -> "ConDataset *" :
return _isocontour.newDatasetRegFloat3D(*args)
newDatasetRegFloat3D = _isocontour.newDatasetRegFloat3D
def delDatasetReg(*args) -> "void" :
return _isocontour.delDatasetReg(*args)
delDatasetReg = _isocontour.delDatasetReg
def delContour3d(*args) -> "void" :
return _isocontour.delContour3d(*args)
delContour3d = _isocontour.delContour3d
def newDatasetRegShort3D(*args) -> "ConDataset *" :
return _isocontour.newDatasetRegShort3D(*args)
newDatasetRegShort3D = _isocontour.newDatasetRegShort3D
def newDatasetRegUchar3D(*args) -> "ConDataset *" :
return _isocontour.newDatasetRegUchar3D(*args)
newDatasetRegUchar3D = _isocontour.newDatasetRegUchar3D
def setOrig3D(*args) -> "void" :
return _isocontour.setOrig3D(*args)
setOrig3D = _isocontour.setOrig3D
def setSpan3D(*args) -> "void" :
return _isocontour.setSpan3D(*args)
setSpan3D = _isocontour.setSpan3D
def newDatasetRegFloat2D(*args) -> "ConDataset *" :
return _isocontour.newDatasetRegFloat2D(*args)
newDatasetRegFloat2D = _isocontour.newDatasetRegFloat2D
def newDatasetRegShort2D(*args) -> "ConDataset *" :
return _isocontour.newDatasetRegShort2D(*args)
newDatasetRegShort2D = _isocontour.newDatasetRegShort2D
def newDatasetRegUchar2D(*args) -> "ConDataset *" :
return _isocontour.newDatasetRegUchar2D(*args)
newDatasetRegUchar2D = _isocontour.newDatasetRegUchar2D
def setOrig2D(*args) -> "void" :
return _isocontour.setOrig2D(*args)
setOrig2D = _isocontour.setOrig2D
def setSpan2D(*args) -> "void" :
return _isocontour.setSpan2D(*args)
setSpan2D = _isocontour.setSpan2D
def getContour3dData(*args) -> "void" :
return _isocontour.getContour3dData(*args)
getContour3dData = _isocontour.getContour3dData
def getContour2dData(*args) -> "void" :
return _isocontour.getContour2dData(*args)
getContour2dData = _isocontour.getContour2dData
# This file is compatible with both classic and new-style classes.
string2Float = _isocontour.string2Float
getSliceArray = _isocontour.getSliceArray
|
[
"mike.c.pan@gmail.com"
] |
mike.c.pan@gmail.com
|
d06216331ce8ca32ca14753c0bc10553d05e2cda
|
e234808a354c2aab816d6dac98b8fded08139bec
|
/olympiads_mospolytech/account/admin.py
|
1251e5c33ecd5bca7b9a923f9f3fc0a3268ab1c6
|
[] |
no_license
|
yellowpearl/mpolymps
|
ac7c15788fa66afa8028622b6dcb140969196451
|
c4e14b8a46af5b788ba5077fa6705a13b0c09382
|
refs/heads/master
| 2023-03-25T20:46:49.615667
| 2021-03-26T11:32:55
| 2021-03-26T11:32:55
| 326,702,383
| 0
| 0
| null | 2021-01-27T18:56:51
| 2021-01-04T14:03:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 262
|
py
|
from django.contrib import admin
from django.contrib.auth.models import User
from .models import *
admin.site.register(EmailConfirmation)
admin.site.register(OlympsUser)
admin.site.register(ResetPasswords)
admin.site.register(Chat)
admin.site.register(Message)
|
[
"yellowpearl@yandex.ru"
] |
yellowpearl@yandex.ru
|
5bd0a70c30bf7375ce96d66edd4d0bde895f6c18
|
05bdb561010ba50d3b5a70e8cbe2ed29572b961c
|
/class4/while1.py
|
ecd826a2b47fe15dbe603af2bb3618718fb04e16
|
[] |
no_license
|
FundamentalsModernSoftware/fall2018
|
4d45137ceff9323627e934789cc65c94c8148e86
|
c2fb9e4596d1d0fd51afdd5e294022e3aebefdf9
|
refs/heads/master
| 2020-03-26T19:18:08.146935
| 2018-10-10T18:32:10
| 2018-10-10T18:32:10
| 145,257,676
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
i = 1
while i <= 4:
print(i)
i = i + 1
print('Done!')
|
[
"james.grimmelmann@cornell.edu"
] |
james.grimmelmann@cornell.edu
|
f3e883b27fc8bdb5821e68751c7cb54ce59df007
|
85fc4f8cc2a700151ef8c53672cb9222741f778f
|
/CheckOut_ApkName/check_update_apkname.py
|
0c9b3e38a8a0b4b1e6702c97cf03dcb81e105a48
|
[] |
no_license
|
Dragon-Zpl/MyGoogleCrawl
|
f148c1d478d1f18738dc3555ea023e76fc98230f
|
9471357447dd09c62c223c9307cbe7774f9b164d
|
refs/heads/master
| 2020-04-26T22:38:46.022085
| 2019-04-16T06:51:49
| 2019-04-16T06:51:49
| 173,878,780
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,976
|
py
|
import asyncio
import time
from random import choice
from AllSetting import GetSetting
from CrawlProxy.ForeignProxyCrawl.crawl_foreigh_auto import crawl_fn
from Database_Option.Get_Mysql_pool import GetMysqlPool
from Database_Option.redis_option import RedisOption
from Parsing import ParsingData
from Request_Web.AllRequest import InitiateRequest
class CheckUpdateApkname:
def __init__(self):
self.setting = GetSetting()
self.loop = self.setting.get_loop()
self.session = self.setting.get_session()
self.lock = asyncio.Lock()
self.crawl_proxy = crawl_fn()
self.parsing = ParsingData()
self.get_pool = GetMysqlPool()
self.loop.run_until_complete(asyncio.ensure_future(self.get_pool.init_pool()))
self.get_redis = RedisOption()
self._Request = InitiateRequest()
self.apknames = set()
self.proxies = []
self.all_data_list = []
self.printf = self.setting.get_logger()
self.country_dict = {
# 'us': '&hl=en&gl=us',
'zh': '&hl=zh&gl=us',
'zhtw': '&hl=zh_TW&gl=us',
'ko': '&hl=ko&gl=us',
'ar': '&hl=ar&gl=us',
'jp': '&hl=ja&gl=us',
}
async def _get_proxy(self):
async with self.lock:
if len(self.proxies) < 3:
self.proxies = await self.crawl_proxy.run(self.session)
try:
proxy = choice(self.proxies)
return proxy
except:
await self._get_proxy()
async def check_app_version(self, data, time=3, proxy=None):
"""
检查美国的版本是否更新
"""
now_pkgname = data["pkgname"]
now_app_version = await self.get_pool.find_pkgname(now_pkgname)
apk_url = "https://play.google.com/store/apps/details?id=" + now_pkgname
for i in range(3):
if proxy is None:
proxy = await self._get_proxy()
try:
datas = await self._Request.get_request(self.session,apk_url,proxy)
if datas:
analysis_data = self.parsing.analysis_country_data(datas)
# 判断是否已经可下载
if analysis_data is None:
data_return = {}
data_return["pkgname"] = now_pkgname
data_return["is_update"] = 0
return data_return, None
analysis_data["country"] = "us"
analysis_data["pkgname"] = now_pkgname
analysis_data["url"] = apk_url
check_app_version = analysis_data["app_version"]
change_time = self.parsing.change_time('us', analysis_data["update_time"])
if change_time is not None:
analysis_data["update_time"] = change_time
# 数据库中版本不为空,且检查版本与数据库相同或者检查版本为空时,不更新
if now_app_version is not None and (check_app_version == now_app_version or check_app_version is None):
data_return = {}
data_return["app_version"] = now_app_version
data_return["pkgname"] = now_pkgname
data_return["is_update"] = 0
else:
data_return = {}
data_return["app_version"] = check_app_version
data_return["pkgname"] = now_pkgname
data_return["is_update"] = 1
return data_return, analysis_data
else:
self.printf.info("data is none")
except Exception as e:
if str(e) == "":
self.printf.info("错误数据"+str(data))
self.printf.info(str(e))
else:
# 失败三次重新放入redis中
self.printf.info('失败三次重新放入redis')
data_return = {}
data_return["pkgname"] = now_pkgname
data_return["is_update"] = 2
return data_return, None
async def check_other_coutry(self, data, time=3, proxy=None):
'''
获取其他国家的数据
'''
for country in self.country_dict:
pkgname = data["pkgname"]
apk_url = "https://play.google.com/store/apps/details?id=" + pkgname + self.country_dict[country]
if proxy == None:
proxy = await self._get_proxy()
for i in range(3):
try:
datas = await self._Request.get_request(self.session, apk_url, proxy)
if datas:
check_app_data = self.parsing.analysis_country_data(datas)
if check_app_data is None:
break
check_app_data["pkgname"] = pkgname
check_app_data["country"] = country
check_app_data["url"] = apk_url
change_time = self.parsing.change_time(country, check_app_data["update_time"])
if change_time is not None:
check_app_data["update_time"] = change_time
self.all_data_list.append(check_app_data)
break
except Exception as e:
if str(e) == "":
self.printf.info("错误数据" + str(data))
self.printf.info(str(e))
else:
return None
def _get_pkgdata_redis(self,start):
"""
从redis中获取pkg的数据
"""
pkg_datas = []
for i in range(100):
end = time.time()
if (end -start) > 20:
return pkg_datas
pkg_data = self.get_redis.get_redis_pkgname()
pkg_datas.append(pkg_data)
return pkg_datas
def _build_check_tasks(self, results):
'''
创建检查美国信息的任务队列
:param results:
:return: 需要检查并要存入redis的pkg数据的字典,需要存入mysql美国的pkg数据的字典(两个字典)
'''
check_tasks = []
for result in results:
task = asyncio.ensure_future(self.check_app_version(result))
check_tasks.append(task)
return check_tasks
def _task_ensure_future(self, func, data, tasks):
task = asyncio.ensure_future(func(data))
tasks.append(task)
def _build_other_insert(self, check_results):
'''
遍历以美国为基准的需要更新的数据,分别更新redis, 创建检查其他国家的任务队列和将美国数据插入mysql的任务队列
:param check_results:
:return: 存入mysql的任务队列和检查其他国家的任务队列
'''
save_mysql_tasks = []
check_other_tasks = []
for check_result in check_results:
try:
data_return, analysis_data = check_result
if data_return is not None and data_return["is_update"] == 2:
self.get_redis.update_pkgname_redis(data_return)
if analysis_data is not None:
self._task_ensure_future(self.get_pool.insert_mysql_, analysis_data, save_mysql_tasks)
if data_return is not None and data_return["is_update"] == 1:
self._task_ensure_future(self.check_other_coutry, data_return, check_other_tasks)
except Exception as e:
self.printf.info('错误信息:' + str(e))
return save_mysql_tasks, check_other_tasks
def run(self):
"""
从redis中获取pkg数据->检查美国的包是否有更新->更新redis->以美国为基准获取其他国家有版本更新的包的数据->存入数据库
"""
while True:
start = time.time()
pkg_datas = self._get_pkgdata_redis(start)
check_tasks = self._build_check_tasks(pkg_datas)
if len(check_tasks) >= 1:
check_results = self.loop.run_until_complete(asyncio.gather(*check_tasks))
save_mysql_tasks, check_other_tasks = self._build_other_insert(check_results)
if len(check_other_tasks) >= 1:
self.loop.run_until_complete(asyncio.wait(check_other_tasks))
for result_list in self.all_data_list:
if result_list is not None:
task = self.get_pool.insert_mysql_(result_list)
save_mysql_tasks.append(task)
self.all_data_list = []
if len(save_mysql_tasks) >= 1:
self.loop.run_until_complete(asyncio.wait(save_mysql_tasks))
|
[
"peilong.zhuang@office.feng.com"
] |
peilong.zhuang@office.feng.com
|
e8f2bfbfd36c90a2ef801eb317ca5acb38f55ceb
|
385fc235ccd59307f611ab8d3f9bfa025ffab9cd
|
/helloPV.py
|
62b154793fd5854e08b5c3711c4435a944bb1e68
|
[] |
no_license
|
denkovade/hellopy
|
136193811eea357a26a2ef2499fada91e52fa9ae
|
311184a51709304a5ba4fe0759d2aa9fe8b1ba48
|
refs/heads/master
| 2021-01-09T05:30:15.024229
| 2017-02-06T07:48:52
| 2017-02-06T07:48:52
| 80,781,037
| 0
| 0
| null | 2017-02-06T06:00:24
| 2017-02-02T23:57:33
|
Python
|
UTF-8
|
Python
| false
| false
| 124
|
py
|
#!/usr/bin/env python
def hello():
return "Hello, PV!"
def main():
print hello()
if __name__ == "__main__":
main()
|
[
"denkova.de@gmail.com"
] |
denkova.de@gmail.com
|
b8c51955e251f8f1f7397c08dbb129d119c9879e
|
ecb6b752523a126ef17895854b18e02df41c4cfe
|
/app_backend/views/user.py
|
63e2ce8ff4c284a65c2a17364590cdc75c896040
|
[
"MIT"
] |
permissive
|
zhanghe06/bearing_project
|
cd6a1b2ba509392da37e5797a3619454ca464276
|
25729aa7a8a5b38906e60b370609b15e8911ecdd
|
refs/heads/master
| 2023-05-27T17:23:22.561045
| 2023-05-23T09:26:07
| 2023-05-23T09:39:14
| 126,219,603
| 2
| 5
|
MIT
| 2022-12-08T03:11:27
| 2018-03-21T17:54:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 21,015
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: user.py
@time: 2018-04-04 17:33
"""
from __future__ import unicode_literals
import json
from datetime import datetime
from flask import (
request,
flash,
render_template,
url_for,
redirect,
abort,
jsonify,
Blueprint,
)
from flask_babel import gettext as _
from flask_login import login_required, current_user
from app_backend import app
from app_backend import excel
from app_backend.api.buyer_order import count_buyer_order
from app_backend.api.delivery import count_delivery
from app_backend.api.enquiry import count_enquiry
from app_backend.api.purchase import count_purchase
from app_backend.api.quotation import count_quotation
from app_backend.api.sales_order import count_sales_order
from app_backend.api.user import (
get_user_rows,
get_user_pagination,
get_user_row_by_id,
add_user,
edit_user,
user_current_stats,
user_former_stats)
from app_backend.api.user_auth import (
add_user_auth,
edit_user_auth, get_user_auth_row)
from app_backend.forms.user import (
UserSearchForm,
UserAddForm,
UserEditForm,
)
from app_backend.models.model_bearing import User
from app_backend.permissions import permission_role_administrator
from app_backend.permissions.user import (
permission_user_section_add,
permission_user_section_search,
permission_user_section_stats,
permission_user_section_export,
permission_user_section_get,
permission_user_section_edit,
permission_user_section_del,
)
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT_OPTION
from app_common.maps.operations import OPERATION_EXPORT, OPERATION_DELETE
from app_common.maps.status_delete import (
STATUS_DEL_OK,
STATUS_DEL_NO)
from app_common.maps.status_verified import STATUS_VERIFIED_OK
from app_common.maps.type_auth import TYPE_AUTH_ACCOUNT
from app_common.tools import json_default
# 定义蓝图
bp_user = Blueprint('user', __name__, url_prefix='/user')
# 加载配置
DOCUMENT_INFO = app.config.get('DOCUMENT_INFO', {})
PER_PAGE_BACKEND = app.config.get('PER_PAGE_BACKEND', 20)
AJAX_SUCCESS_MSG = app.config.get('AJAX_SUCCESS_MSG', {'result': True})
AJAX_FAILURE_MSG = app.config.get('AJAX_FAILURE_MSG', {'result': False})
@bp_user.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_user_section_search.require(http_exception=403)
def lists():
"""
用户列表
:return:
"""
template_name = 'user/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user lists')
# 搜索条件
form = UserSearchForm(request.form)
search_condition = [
User.status_delete == STATUS_DEL_NO,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.name.data:
search_condition.append(User.name == form.name.data)
if form.role_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(User.role_id == form.role_id.data)
if form.start_create_time.data:
search_condition.append(User.create_time >= form.start_create_time.data)
if form.end_create_time.data:
search_condition.append(User.create_time <= form.end_create_time.data)
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_user_section_export.can():
abort(403)
column_names = User.__table__.columns.keys()
query_sets = get_user_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('user lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_user_section_del.can():
abort(403)
user_ids = request.form.getlist('user_id')
# 检查删除权限
permitted = True
for user_id in user_ids:
# 检查是否正在使用
# 1、报价
if count_quotation(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 2、销售订单
if count_sales_order(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 3、销售出货
if count_delivery(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 4、询价
if count_enquiry(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 5、采购订单
if count_buyer_order(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 6、采购进货
if count_purchase(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for user_id in user_ids:
current_time = datetime.utcnow()
user_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_user(user_id, user_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_user_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
# @bp_user.route('/search.html', methods=['GET', 'POST'])
# @login_required
# @permission_user_section_search.require(http_exception=403)
# def search():
# """
# 用户搜索
# :return:
# """
# template_name = 'customer/search_modal.html'
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('Customer Search')
#
# # 搜索条件
# form = UserSearchForm(request.form)
# form.owner_uid.choices = get_sales_user_list()
# # app.logger.info('')
#
# search_condition = [
# Customer.status_delete == STATUS_DEL_NO,
# ]
# if request.method == 'POST':
# # 表单校验失败
# if not form.validate_on_submit():
# flash(_('Search Failure'), 'danger')
# # 单独处理csrf_token
# if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
# map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
# else:
# if form.company_type.data != default_choice_option_int:
# search_condition.append(Customer.company_type == form.company_type.data)
# if form.company_name.data:
# search_condition.append(Customer.company_name.like('%%%s%%' % form.company_name.data))
# # 翻页数据
# pagination = get_customer_pagination(form.page.data, PER_PAGE_BACKEND_MODAL, *search_condition)
#
# # 渲染模板
# return render_template(
# template_name,
# form=form,
# pagination=pagination,
# **document_info
# )
@bp_user.route('/<int:user_id>/info.html')
@login_required
@permission_user_section_get.require(http_exception=403)
def info(user_id):
"""
用户详情
:param user_id:
:return:
"""
# 详情数据
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
abort(404)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
abort(410)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user info')
# 渲染模板
return render_template('user/info.html', user_info=user_info, **document_info)
@bp_user.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_user_section_add.require(http_exception=403)
def add():
"""
创建用户
:return:
"""
template_name = 'user/add.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user add')
# 加载创建表单
form = UserAddForm(request.form)
# 进入创建页面
if request.method == 'GET':
# 渲染页面
return render_template(
template_name,
form=form,
**document_info
)
# 处理创建请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 表单校验成功
# 创建用户基本信息
current_time = datetime.utcnow()
user_data = {
'name': form.name.data,
'salutation': form.salutation.data,
'mobile': form.mobile.data,
'tel': form.tel.data,
'fax': form.fax.data,
'email': form.email.data,
'role_id': form.role_id.data,
'create_time': current_time,
'update_time': current_time,
}
user_id = add_user(user_data)
if not user_id:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 创建用户认证信息
user_auth_data = {
'user_id': user_id,
'type_auth': TYPE_AUTH_ACCOUNT,
'auth_key': form.name.data,
'auth_secret': '123456', # 默认密码
'status_verified': STATUS_VERIFIED_OK,
'create_time': current_time,
'update_time': current_time,
}
result = add_user_auth(user_auth_data)
if result:
flash(_('Add Success'), 'success')
return redirect(request.args.get('next') or url_for('user.lists'))
# 创建操作失败
else:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
@bp_user.route('/<int:user_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_user_section_edit.require(http_exception=403)
def edit(user_id):
"""
用户编辑
"""
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
abort(404)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
abort(410)
template_name = 'user/edit.html'
# 加载编辑表单
form = UserEditForm(request.form)
form.id.data = user_id # id 仅作为编辑重复校验
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user edit')
# 进入编辑页面
if request.method == 'GET':
# 表单赋值
form.id.data = user_info.id
form.name.data = user_info.name
form.salutation.data = user_info.salutation
form.mobile.data = user_info.mobile
form.tel.data = user_info.tel
form.fax.data = user_info.fax
form.email.data = user_info.email
form.role_id.data = user_info.role_id
form.create_time.data = user_info.create_time
form.update_time.data = user_info.update_time
# 渲染页面
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 处理编辑请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Edit Failure'), 'danger')
# flash(form.errors, 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 非系统角色,仅能修改自己的信息
if not permission_role_administrator.can():
if getattr(current_user, 'id') != form.id.data:
flash(_('Permission denied, only the user\'s own information can be modified'), 'danger')
# flash(form.errors, 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 表单校验成功
# 编辑用户基本信息
current_time = datetime.utcnow()
user_data = {
'name': form.name.data,
'salutation': form.salutation.data,
'mobile': form.mobile.data,
'tel': form.tel.data,
'fax': form.fax.data,
'email': form.email.data,
'role_id': form.role_id.data,
'update_time': current_time,
}
result = edit_user(user_id, user_data)
if not result:
# 编辑操作失败
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
user_auth_row = get_user_auth_row(user_id=user_id)
if not user_auth_row:
# 编辑操作失败
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 编辑用户认证信息
user_auth_data = {
'user_id': user_id,
'type_auth': TYPE_AUTH_ACCOUNT,
'auth_key': form.name.data,
'update_time': current_time,
}
result = edit_user_auth(user_auth_row.id, user_auth_data)
if not result:
# 编辑操作失败
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 编辑操作成功
flash(_('Edit Success'), 'success')
return redirect(request.args.get('next') or url_for('user.lists'))
@bp_user.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
"""
用户删除
:return:
"""
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
# 检查删除权限
if not permission_user_section_del.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求方法
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求参数
user_id = request.args.get('user_id', 0, type=int)
if not user_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查是否正在使用
# 报价、订单
if count_quotation(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
user_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_user(user_id, user_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
@bp_user.route('/ajax/stats', methods=['GET', 'POST'])
@login_required
def ajax_stats():
"""
获取用户统计
:return:
"""
time_based = request.args.get('time_based', 'hour')
result_user_current = user_current_stats(time_based)
result_user_former = user_former_stats(time_based)
line_chart_data = {
'labels': [label for label, _ in result_user_current],
'datasets': [
{
'label': '在职',
'backgroundColor': 'rgba(220,220,220,0.5)',
'borderColor': 'rgba(220,220,220,1)',
'pointBackgroundColor': 'rgba(220,220,220,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_user_current]
},
{
'label': '离职',
'backgroundColor': 'rgba(151,187,205,0.5)',
'borderColor': 'rgba(151,187,205,1)',
'pointBackgroundColor': 'rgba(151,187,205,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_user_former]
}
]
}
return json.dumps(line_chart_data, default=json_default)
@bp_user.route('/stats.html')
@login_required
@permission_user_section_stats.require(http_exception=403)
def stats():
"""
用户统计
:return:
"""
# 统计数据
time_based = request.args.get('time_based', 'hour')
if time_based not in ['hour', 'date', 'month']:
abort(404)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user stats')
# 渲染模板
return render_template(
'user/stats.html',
time_based=time_based,
**document_info
)
@bp_user.route('/<int:user_id>/stats.html')
@login_required
@permission_user_section_stats.require(http_exception=403)
def stats_item(user_id):
"""
用户统计明细
:param user_id:
:return:
"""
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
abort(404)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
abort(410)
# 统计数据
user_stats_item_info = get_user_row_by_id(user_id)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user stats item')
# 渲染模板
return render_template(
'user/stats_item.html',
user_stats_item_info=user_stats_item_info,
**document_info
)
|
[
"zhang_he06@163.com"
] |
zhang_he06@163.com
|
81a4be138e4d622067c8481ce5bd36adc911a700
|
f409b79a66ffe524b1d67bb87402bcc184473594
|
/app/models/music_playlist.py
|
9c66bb37eaac6fce92fc9cd3d89fcd62631fd06d
|
[] |
no_license
|
LaTCheatam/sound-burrow
|
47bdfc5fb7a895ab976334df753c5d86d63725c2
|
ba0ee7796eee4c79587374d0312db46aa0a4aae8
|
refs/heads/main
| 2023-04-26T14:28:22.168720
| 2021-05-10T20:05:28
| 2021-05-10T20:05:28
| 364,056,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
from .db import db
music_playlist = db.Table(
"music_playlists",
db.Column(
"playlist_id",
db.Integer,
db.ForeignKey("playlists.id"),
primary_key=True),
db.Column(
"music_id",
db.Integer,
db.ForeignKey("musics.id"),
primary_key=True)
)
|
[
"obsidyenmoon@gmail.com"
] |
obsidyenmoon@gmail.com
|
23a59206c4ecad33e41d00479191210ddc7d3d39
|
1597f5dd0fc2f75c516a4ec63006d87b422fc884
|
/Legacy/Character_sheet/races.py
|
c51b3ba71d14711feee432be2547a81361067b80
|
[] |
no_license
|
Dan-Mead/DnD
|
e885036ac8d74c913a6d815115096b8425f2bba7
|
e55285df6e02eb6c27393bdf906f0898ebce82a9
|
refs/heads/master
| 2023-04-30T22:33:24.301837
| 2021-03-01T16:57:09
| 2021-03-01T16:57:09
| 274,371,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,228
|
py
|
import inspect
import sys
import helper_functions as f
from glossary import attrs, skills_dict
class race:
def add_race_modifiers(self, char):
char.info.Race = self.race_name
char.stats.size.Race = self.size
char.stats.speed.Race = self.speed
char.proficiencies.languages.Race = self.languages
for trait in vars(self).keys():
if trait == 'attributes':
for attr in self.attributes:
char.attributes[attr[0]]['race'] = attr[1]
elif trait == 'skills':
for skill in self.skills:
char.skills[skill].prof += [self.race_name]
elif trait == 'feats':
from feats import get_feat
for feat in self.feats:
new_feat = get_feat(feat, self.race_name)
char.feats[feat] = new_feat
char.feats[feat].initial_effects(char)
elif trait == 'features':
from features import get_feature
for feature in self.features:
new_feature = get_feature(feature)
char.features[self.race_name][feature] = new_feature
new_feature.initial_effects(char)
elif trait not in ['race_name', 'size', 'speed', 'languages']:
raise Exception(f"{trait} included which hasn't been added.")
def get_race(char, race_choice):
races = {}
for race in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if not race[1].__subclasses__():
races[race[0].replace("_", " ")] = race[1]
race = races[race_choice](char)
return race
class Human_Base(race):
def __init__(self, char):
self.race_name = "Human"
self.size = 'Medium'
self.speed = 30
self.languages = f.add_language(char.proficiencies.languages, 'Common',
1)
class Human(Human_Base):
def __init__(self, char):
super().__init__(char)
self.attributes = [(attr, 1) for attr in attrs]
class Human_Variant(Human_Base):
def __init__(self, char):
super().__init__(char)
self.attributes = [(attr, 1) for attr in f.add_attributes(attrs, 2)]
self.skills = f.add_skill(char.skills, skills_dict.keys(), 1)
self.feats = f.add_feat(char, 1)
class Half_Orc(race):
def __init__(self, char):
self.race_name = "Half-Orc"
self.size = "Medium"
self.speed = 30
self.attributes = [("STR", 2), ("CON", 1), ("INT", -2)]
self.features = ["Darkvision", "Relentless Endurance", "Savage Attacks"]
self.skills = ["intimidation"]
self.languages = ["Common", "Orc"]
class Test(race):
def __init__(self, char):
self.race_name = "Test Race"
self.size = "Medium"
self.speed = 30
self.languages = ["Common"]
# self.languages = f.add_language(char.proficiencies.languages, 'Common', 1)
# self.attributes = [(attr, 1) for attr in f.add_attributes(attrs, 2)]
# self.feats = f.add_feat(char, 1)
self.features = ["Darkvision", "Relentless Endurance", "Savage Attacks"]
|
[
"danmead8@gmail.com"
] |
danmead8@gmail.com
|
f84017b04e420b59e0e68cb32ea7d83c2b3eb0e9
|
27df47cd5284b298dd600e367f9ec2437fb3bc22
|
/setup.whd.py
|
2cd0c09528091a9b7d219bbe1fd2baa262e24581
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
ashwani2k/cc-utils
|
29b9c381f652903065f44de33c408fdf1e1f2df9
|
414921992403013d903725a5649103900d88c95d
|
refs/heads/master
| 2022-11-17T16:30:46.137421
| 2020-07-10T08:12:02
| 2020-07-10T08:12:02
| 278,617,577
| 0
| 0
| null | 2020-07-10T11:32:47
| 2020-07-10T11:32:47
| null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
import setuptools
import os
own_dir = os.path.abspath(os.path.dirname(__file__))
def requirements():
yield 'gardener-cicd-libs'
yield 'gardener-cicd-cli'
with open(os.path.join(own_dir, 'requirements.whd.txt')) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
def modules():
return [
]
def version():
with open(os.path.join(own_dir, 'ci', 'version')) as f:
return f.read().strip()
setuptools.setup(
name='gardener-cicd-whd',
version=version(),
description='Gardener CI/CD Webhook Dispatcher',
python_requires='>=3.8.*',
py_modules=modules(),
packages=['whd'],
package_data={
'ci':['version'],
},
install_requires=list(requirements()),
entry_points={
},
)
|
[
"christian.cwienk@sap.com"
] |
christian.cwienk@sap.com
|
61f58dbbbbf3a9c7c2141c31686ef310aeedc77b
|
c4ee9811b04b5340068a1f0f59469a1f3187b892
|
/fermentation/Drivers/MAX31865.py
|
03284ae6b989c9132e67f921770641fe43e18f11
|
[] |
no_license
|
aunsbjerg/fermentationpi
|
c3037f728ea370a04a539490398f83de045575cb
|
ad48862e187dcb1f47f006def73ddaedf20e0ffe
|
refs/heads/master
| 2020-05-14T09:14:25.637521
| 2019-04-22T15:29:44
| 2019-04-22T16:11:45
| 181,735,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,865
|
py
|
#!/usr/bin/python
# -*- coding: utf-8; python-indent-offset: 4; -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Stephen P. Smith
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import math
import RPi.GPIO as GPIO
def resistance_to_celsius(resistance, rtd_nominal=100.0):
"""
Converts a resistance value to temperature in celsius, given a nominal RTD value.
http://www.analog.com/media/en/technical-documentation/application-notes/AN709_0.pdf
"""
RTD_A = 3.9083e-3
RTD_B = -5.775e-7
Z1 = -RTD_A
Z2 = RTD_A * RTD_A - (4 * RTD_B)
Z3 = (4 * RTD_B) / rtd_nominal
Z4 = 2 * RTD_B
temp = Z2 + (Z3 * resistance)
temp = (math.sqrt(temp) + Z1) / Z4
if temp >= 0:
return temp
rpoly = resistance
temp = -242.02
temp += 2.2228 * rpoly
rpoly *= resistance # square
temp += 2.5859e-3 * rpoly
rpoly *= resistance # ^3
temp -= 4.8260e-6 * rpoly
rpoly *= resistance # ^4
temp -= 2.8183e-8 * rpoly
rpoly *= resistance # ^5
temp += 1.5243e-10 * rpoly
return temp
class MAX31865:
"""
Reading Temperature from the MAX31865 with GPIO using the Raspberry Pi.
Any 4 pins can be used to establish software based SPI to MAX31865.
Adapted from: https://github.com/hackenbergstefan/MAX31865
"""
REGISTERS = {
'config': 0,
'rtd_msb': 1,
'rtd_lsb': 2,
'high_fault_threshold_msb': 3,
'high_fault_threshold_lsb': 4,
'low_fault_threshold_msb': 5,
'low_fault_threshold_lsb': 6,
'fault_status': 7,
}
"""
Definition of register addresses. (https://datasheets.maximintegrated.com/en/ds/MAX31865.pdf)
Name ReadAddress WriteAddress PorState Access
Configuration 00h 80h 00h R/W
RTD MSBs 01h — 00h R
RTD LSBs 02h — 00h R
High Fault Threshold MSB 03h 83h FFh R/W
High Fault Threshold LSB 04h 84h FFh R/W
Low Fault Threshold MSB 05h 85h 00h R/W
Low Fault Threshold LSB 06h 86h 00h R/W
Fault Status 07h — 00h R
"""
REGISTERS_WRITE_MASK = 0x80
"""Mask to be ORed to register addresses when writing."""
REGISTER_CONFIGURATION_ONE_SHOT = 0b10100010
"""
Configuration 0b10110010 == 0xB2:
bit 7: Vbias -> 1 (ON)
bit 6: Conversion Mode -> 0 (MANUAL)
bit 5: 1-shot -> 1 (ON)
bit 4: 3-wire select -> 0 (2 or 4 wire config)
bit 3-2: fault detection cycle -> 0 (none)
bit 1: fault status clear -> 1 (clear any fault)
bit 0: 50/60 Hz filter select -> 0 (60Hz)
"""
REGISTER_CONFIGURATION_ONE_SHOT_3_WIRE = REGISTER_CONFIGURATION_ONE_SHOT | 0b00010000
"""
Configuration 0b10110010 == 0xB2:
bit 7: Vbias -> 1 (ON)
bit 6: Conversion Mode -> 0 (MANUAL)
bit 5: 1-shot -> 1 (ON)
bit 4: 3-wire select -> 1 (3 wire config)
bit 3-2: fault detection cycle -> 0 (none)
bit 1: fault status clear -> 1 (clear any fault)
bit 0: 50/60 Hz filter select -> 0 (60Hz)
"""
def __init__(self, cs_pin, miso_pin, mosi_pin, clk_pin, ref_resistor=430.0, rtd_nominal=100.0, number_of_wires=2):
assert(number_of_wires >= 2 and number_of_wires <= 4)
self._offset = 0.0
self._cs_pin = cs_pin
self._miso_pin = miso_pin
self._mosi_pin = mosi_pin
self._clk_pin = clk_pin
self._ref_resistor = ref_resistor
self._rtd_nominal = rtd_nominal
self._number_of_wires = number_of_wires
self._setup_GPIO()
def _setup_GPIO(self):
"""
Setup GPIOs for SPI connection:
CS: Chip Select (also called SS)
CLK: Serial Clock
MISO: Master In Slave Out (SDO at slave)
MOSI: Master Out Slave In (SDI at slave)
"""
GPIO.setup(self._cs_pin, GPIO.OUT)
GPIO.setup(self._miso_pin, GPIO.IN)
GPIO.setup(self._mosi_pin, GPIO.OUT)
GPIO.setup(self._clk_pin, GPIO.OUT)
GPIO.output(self._cs_pin, GPIO.HIGH)
GPIO.output(self._clk_pin, GPIO.LOW)
GPIO.output(self._mosi_pin, GPIO.LOW)
def __enter__(self):
return self
def __exit__(self, *k):
pass
def offset(self, offset):
"""
Adjust the temperature offset in celsius.
Offset will be added to the temperature reading in temperature()
"""
self._offset = offset
def temperature(self):
"""
Read out temperature. Conversion to °C included.
"""
rtd = self._read_rtd()
resistance = self._read_resistance(rtd)
return resistance_to_celsius(resistance, rtd_nominal=self._rtd_nominal) + self._offset
def _write_register(self, register, data):
"""
Write data to register.
:param register: Either name or address of register.
:param data: Single byte to be written.
"""
GPIO.output(self._cs_pin, GPIO.LOW)
if isinstance(register, str):
register = self.REGISTERS[register]
register |= self.REGISTERS_WRITE_MASK
self._send(register)
self._send(data)
GPIO.output(self._cs_pin, GPIO.HIGH)
def _read_register(self, register):
"""
Read data from register.
:param register: Either name or address of register.
:return: One byte of data.
"""
GPIO.output(self._cs_pin, GPIO.LOW)
if isinstance(register, str):
register = self.REGISTERS[register]
self._send(register)
data = self._recv()
GPIO.output(self._cs_pin, GPIO.HIGH)
return data
def _read_registers(self):
"""
Read all registers.
:return: List of 8 bytes data.
"""
# NOTE: Reusage of self.read_register is slower but more clean.
data = [self._read_register(r) for r in range(len(self.REGISTERS))]
return data
def _read_rtd(self):
"""
Read RTD from sensor board
"""
if self._number_of_wires == 3:
self._write_register('config', MAX31865.REGISTER_CONFIGURATION_ONE_SHOT_3_WIRE)
else:
self._write_register('config', MAX31865.REGISTER_CONFIGURATION_ONE_SHOT)
# Sleep to wait for conversion (Conversion time is less than 100ms)
time.sleep(0.1)
temp = self._read_register('rtd_msb')
temp = (temp << 8) | self._read_register('rtd_lsb')
# Check if error bit was set
if temp & 0x01:
raise MAX31865FaultError(self)
return temp >> 1
def _read_resistance(self, rtd):
resistance = rtd / 32768
return resistance * self._ref_resistor
def _send(self, byte):
"""
Send one byte via configured SPI.
"""
for bit in range(8):
GPIO.output(self._clk_pin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self._mosi_pin, GPIO.HIGH)
else:
GPIO.output(self._mosi_pin, GPIO.LOW)
byte <<= 1
GPIO.output(self._clk_pin, GPIO.LOW)
def _recv(self):
"""
Receive one byte via configured SPI.
"""
byte = 0x00
for bit in range(8):
GPIO.output(self._clk_pin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self._miso_pin):
byte |= 0x1
GPIO.output(self._clk_pin, GPIO.LOW)
return byte
class MAX31865FaultError(Exception):
"""
Fault handling of MAX31865.
MAX31865 includes onchip fault detection.
TODO: Improve fault detection. Currently only status register is read.
"""
def __init__(self, max31865):
self.max31865 = max31865
super(MAX31865FaultError, self).__init__(self.status_message())
def status_message(self):
"""
10 Mohm resistor is on breakout board to help
detect cable faults
bit 7: RTD High Threshold / cable fault open
bit 6: RTD Low Threshold / cable fault short
bit 5: REFIN- > 0.85 x VBias -> must be requested
bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested
bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested
bit 2: Overvoltage / undervoltage fault
bits 1,0 don't care
"""
status = self.max31865._read_register('fault_status')
if status & 0x80:
return "High threshold limit (Cable fault/open)"
if status & 0x40:
return "Low threshold limit (Cable fault/short)"
if status & 0x04:
return "Overvoltage or Undervoltage Error"
|
[
"mikkelaunsbjerg@gmail.com"
] |
mikkelaunsbjerg@gmail.com
|
4e8f0f09dabd4343c18439e5ff5521876f172bb6
|
e2b0849c0fb6575aadc57cb91b473990317b9162
|
/Arreglos/Multidimensionales/Help/Multiplicacion.py
|
27f4443735f10d7828c362084fc4d45d9bca5df0
|
[] |
no_license
|
YaelGF/Estructura-Datos
|
486e9c544e2359daaf48dbf2ce049a98b6d2bb4d
|
3e5d96862629569cae3c6cdcc3fb834752ac54da
|
refs/heads/main
| 2023-04-10T22:19:52.077363
| 2021-04-22T18:51:03
| 2021-04-22T18:51:03
| 345,890,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
import numpy as np
#multiplicacion
arr1 = np.array([[1,2,3],[4,5,6],[7,8,9]])
arr2 = np.array([[9,8,7],[6,5,4],[3,2,1]])
arr = np.zeros((3,3))
for r in range(0,3):
for c in range(0,3):
for k in range(0,3):
arr[r,c] += arr1[r,k] * arr2[k,c]
print(arr)
|
[
"1719110736@utectulancingo.edu.mx"
] |
1719110736@utectulancingo.edu.mx
|
a5f5319c815f5da20cfbecf8e71dfa35a6cd7a84
|
06bd9a58a56202096713ea56b8508e2eb6ac2d01
|
/test.py
|
0f610fa15685c4975d272a27eaafc88d1d50d773
|
[] |
no_license
|
patelanuj28/bottle
|
f4656396780ded39d0403f3fb9324f7770e96aca
|
f4b7a7ee3146f16a472094b2d4054242e5bfa65f
|
refs/heads/master
| 2020-05-29T19:21:03.533546
| 2013-12-08T04:23:06
| 2013-12-08T04:23:06
| 15,018,073
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from bottle import route, run
from bottle import * # or route
#from function import *
debug(True)
@get('/') # or @route(’/login’)
@get('/login') # or @route(’/login’)
def login():
return '''
<form action="/login" method="post">
Username: <input name="username" type="text" />
Password: <input name="password" type="password" />
<input value="Login" type="submit" />
</form> '''
def check_logib(self, username, password):
if(username == "admin" and password == "admin"):
return True
else:
return False
@post('/login') # or @route(’/login’, method=’POST’)
def do_login():
username = request.forms.get('username')
password = request.forms.get('password')
if check_login(username, password):
return "<p>Your login information was correct.</p>"
else:
return "<p>Login failed.</p>"
@error(404)
def error404(error):
return 'Nothing here, sorry'
run(host='localhost', port=8080, debug=True)
|
[
"patelanuj28@gmail.com"
] |
patelanuj28@gmail.com
|
4b0cf62503921723253b96546bf05b8cfd198c7b
|
e9d0a40f78a8019102a824ab80285c76410c41ec
|
/tests/operators/gpu/test_ms_resize_nearest_neighbor_grad.py
|
cd90af1f88cb19b3c85d5f46d96627100d6eb36a
|
[
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
x200510iong/akg
|
80aefca89738dd6190132971c8cd291fc2b38f4a
|
e7c12916b3870d3ba4cfa9b3dcec3eef55915429
|
refs/heads/master
| 2022-12-23T21:37:37.673056
| 2020-09-27T07:36:27
| 2020-09-27T07:36:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,036
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
import time
import akg.topi as topi
from akg.ops.poly_gpu import resize_nearest_neighbor_grad_manual, resize_nearest_neighbor_grad_auto
from gen_random import random_gaussian
from akg.utils import kernel_exec as utils
from tensorio import compare_tensor
def resize_nearest_grad(grad, size, align_corners, dtype):
inshape = grad.shape
if align_corners:
scale_h = (inshape[2] - 1) / (size[0] - 1)
scale_w = (inshape[3] - 1) / (size[1] - 1)
else:
scale_h = inshape[2] / size[0]
scale_w = inshape[3] / size[1]
oshape = (inshape[0], inshape[1], size[0], size[1])
output = np.full(oshape, np.nan, dtype)
for n in range(oshape[0]):
for c in range(oshape[1]):
for h in range(oshape[2]):
for w in range(oshape[3]):
if align_corners:
in_h = int(round(scale_h * h));
in_w = int(round(scale_w * w));
else:
epsilon = 1e-5
in_h = int(floor(scale_h * h));
in_w = int(floor(scale_w * w));
in_h = max(min(in_h, inshape[2]-1), 0)
in_w = max(min(in_w, inshape[3]-1), 0)
output[n, c, h, w] = grad[n, c, in_h, in_w]
return output
def gen_data(shape, size, align_corners, dtype):
support_list = {"float16": np.float16, "float32": np.float32}
grad = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
expect = resize_nearest_grad(grad, size, align_corners, dtype)
outshape = [shape[0], shape[1], size[0], size[1]]
output = np.full(outshape, np.nan, dtype)
return grad, output, expect
def test_ms_resize_grad(shape, size, dtype, align_corners, poly_sch=False):
op_attr = [size, align_corners]
if poly_sch:
mod = utils.op_build(resize_nearest_neighbor_grad_auto, [shape], [dtype], op_attr, attrs={"target":"cuda"})
else:
mod = utils.op_build(resize_nearest_neighbor_grad_manual, [shape], [dtype], op_attr)
data, output, expect = gen_data(shape, size, align_corners, dtype)
output = utils.mod_launch(mod, (data, output), expect = expect)
compare_res = compare_tensor(output, expect, rtol=5e-03, atol=1e-08)
if __name__ == '__main__':
test_ms_resize_grad((32, 32, 64, 64), (128, 128), 'float16', True)
|
[
"zhangrenwei1@huawei.com"
] |
zhangrenwei1@huawei.com
|
4e6e1573f29a52149bacc3d5e2dfb0c49392cbf4
|
7ff555c8adf2dccc4195eb28567897d61594bd2c
|
/nav/publisher.py
|
2f8aa737dae67e647d17376f5945696e9c1405bb
|
[] |
no_license
|
TetrisCat/auto_nav
|
af4d8ba799e03f077c534648b353d51cf4615d9e
|
46c1c55a646dde98f3cc1f010adb264397918224
|
refs/heads/master
| 2020-12-18T20:18:49.862413
| 2020-04-24T12:41:29
| 2020-04-24T12:41:29
| 235,510,835
| 0
| 0
| null | 2020-01-22T06:12:31
| 2020-01-22T06:12:31
| null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
import rospy
import time
import math
from std_msgs.msg import String
class pubRpi:
def __init__(self):
self.node = rospy.init_node('pub2rpi',anonymous = True)
self.pubR = rospy.Publisher('cmd_rotate',String,queue_size = 10)
self.pubS = rospy.Publisher('cmd_stepper',String,queue_size = 10)
def publish_rotate(self,signal):
valtoPub = '-1' if signal < 0 else '1'
self.pubR.publish(valtoPub)
def publish_stepper(self,signal):
valtoPub = '-1' if signal < -5 else '10' if signal < 5 else '1'
self.pubS.publish(valtoPub)
|
[
"adricpjw@gmail.com"
] |
adricpjw@gmail.com
|
4f467685fd2537342ece8b024cefcc5f2c17aab5
|
945e10c87af8c555dfde1ed96597598473816039
|
/contact/models.py
|
d198b82b316e394cb9e5f45ba3be1febc50fc92b
|
[] |
no_license
|
samuelbustamante/sanluisautomotores
|
87943cd99091d86e897ed9f41e9fcb10568b3f36
|
3ed4a6490f945fd060fd71010d1eb06bcff5abab
|
refs/heads/master
| 2020-04-06T16:19:09.037639
| 2012-08-29T15:43:49
| 2012-08-29T15:43:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
class Message(models.Model):
full_name = models.CharField(max_length=100)
email = models.EmailField()
message = models.TextField()
|
[
"samuelbustamant@gmail.com"
] |
samuelbustamant@gmail.com
|
00226ee05791fa621c70b3d172311a1631ccaef2
|
1298b777c595babcaa2592ec67a8cb5455b2225f
|
/biedronki.py
|
eca9e342092ba3aae9d19c38086a32f83cb4fb4a
|
[] |
no_license
|
mateusz-bondarczuk/Biedronki
|
50e5df9efe2d0d289d851947546c446c997b3909
|
6ea75d1a58c229936bd7a9a568dbd6f206c05259
|
refs/heads/master
| 2021-04-03T13:01:40.079815
| 2020-03-18T22:29:45
| 2020-03-18T22:29:45
| 248,356,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,274
|
py
|
#!/usr/bin/python3
# Copyrights (C) 2020 Mateusz Bondarczuk
# Napisane przy pomocy podręcznika "PYTHON Kurs Programowania Na Prostych Przykładach" Biblioteczka Komputer Świat
import pygame
import os
import random
import threading
pygame.init()
#rozmiar okna gry
szer = 600
wys = 600
#lista wartości kierunku ruchu biedronek
#wektory = [-10, 0, 10]
coPokazuje = "menu"
punkty = 0.0
vx, vy = 0, 0
iloscBiedronek = 10
screen = pygame.display.set_mode((szer,wys))
def napisz(tekst, x, y, rozmiar) :
cz = pygame.font.SysFont("Conacry", rozmiar)
rend = cz.render(tekst, 1, (255,100,100))
x = (szer - rend.get_rect().width)/2
# y = (wys - rend.get_rect().height)/2
screen.blit(rend, (x,y))
def dodPunkt():
global punkty
if coPokazuje == "gramy" :
punkty += 0.1
def zerPunkty():
global punkty
punkty = 0
class Biedronka() :
def __init__(self, x, y, vx, vy):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.szerB = 32
self.wysB = 32
self.ksztalt = pygame.Rect(self.x, self.y, self.szerB, self.wysB)
self.grafika = pygame.image.load(os.path.join('bied.png'))
def rysuj(self):
screen.blit(self.grafika, (self.x, self.y))
def ruch(self):
self.x += self.vx
self.y += self.vy
self.ksztalt = pygame.Rect(self.x, self.y, self.szerB, self.wysB)
def czyZezarla(self, robal):
if self.ksztalt.colliderect(robal):
return True
else:
return False
class Mszyca():
def __init__(self, x, y):
self.x = x
self.y = y
self.szerM = 32
self.wysM = 32
self.ksztalt = pygame.Rect(self.x, self.y, self.szerM, self.wysM)
self.grafika = pygame.image.load(os.path.join('mszyca.png'))
def rysuj(self):
screen.blit(self.grafika, (self.x, self.y))
def ruch(self, vx, vy):
self.x += vx
self.y += vy
self.ksztalt = pygame.Rect(self.x, self.y, self.szerM, self.wysM)
#stworzmy biedry
biedry = []
def stworzBiedry():
global biedry
for i in range(iloscBiedronek):
#bx = random.randint(0, 568)
#by = random.randint(0, 568)
# tworzy biedronki w danej pozycji na ekranie(bx,by) i poruszające się w jednym z 8 kierunków(np. 10,10 lub 10,0)
#biedra = Biedronka(bx, by, random.choice(wektory), random.choice(wektory))
biedra = Biedronka(random.randint(0, 568), random.randint(0, 568), random.randint(-10, 10), random.randint(-10, 10))
# eliminacja biedronek, które stoja w miejscu
while biedra.vx == 0 and biedra.vy == 0 :
#biedra = Biedronka(bx, by, random.choice(wektory), random.choice(wektory))
#biedra = Biedronka(bx, by, random.randint(-10, 10), random.randint(-10, 10))
biedra = Biedronka(random.randint(0, 568), random.randint(0, 568), random.randint(-10, 10), random.randint(-10, 10))
biedry.append(biedra)
stworzBiedry()
while True:
dodPunkt()
#reakcje na naciśnięcie klawiszy i ikon w oknie gry
for event in pygame.event.get() :
if event.type == pygame.QUIT :
pygame.quit()
quit()
#ruch mszycy
if event.type == pygame.KEYDOWN :
#ruch w górę
if event.key == pygame.K_UP :
vx = 0
vy = -10
#ruch w dół
elif event.key == pygame.K_DOWN :
vx = 0
vy = 10
#ruch w lewo
elif event.key == pygame.K_LEFT :
vx = -10
vy = 0
#ruch w prawo
elif event.key == pygame.K_RIGHT :
vx = 10
vy = 0
elif event.key == pygame.K_ESCAPE :
pygame.quit()
quit()
elif event.key == pygame.K_SPACE :
if coPokazuje != "gramy" :
# tworzymy nieruchomą mszyce w losowym miejscu na planszy
mx = random.randint(0, 568)
my = random.randint(0, 568)
m = Mszyca(mx, my)
vx, vy = 0, 0
coPokazuje = "gramy"
zerPunkty()
#usun stare biedry
biedry = []
#utwórz nowe biedry
stworzBiedry()
screen.fill((0,128,0))
if coPokazuje == "menu" :
napisz("Naciśnij spację aby rozpocząć.", 20, 300, 36)
grafika = pygame.image.load(os.path.join("bied.png"))
for i in range(5):
x = random.randint(100, 500)
y = random.randint(100, 200)
screen.blit(grafika, (x, y))
pygame.time.wait(500)
elif coPokazuje == "gramy":
#narysuj biedry na planszy i wpraw je w ruch
for b in biedry:
b.rysuj()
b.ruch()
#spraw aby odbiły się od krawędzi planszy
for b in biedry:
#odbicie od lewej i prawej ściany
if b.x <= 0 or (b.x + b.szerB) >= szer :
b.vx = b.vx * -1
#odbicie od górnej i dolnej ściany
elif b.y <= 0 or (b.y + b.wysB) >= wys :
b.vy = b.vy * -1
# wpraw ją w ruch
m.ruch(vx, vy)
#narysuj mszyce na ekranie
m.rysuj()
#odbicie mszycy od lewej i prawej ściany
if m.x <= 0 or (m.x + m.szerM) >= szer :
vx = vx * -1
#odbicie mszycy od górnej i dolnej ściany
elif m.y <= 0 or (m.y + m.wysM) >= wys :
vy = vy * -1
# jak biedra zdeży się z mszycą
for b in biedry :
if b.czyZezarla(m.ksztalt) :
coPokazuje = "koniec"
napisz("PUNKTY: " + str(round(punkty)), 100, 50, 32)
#szybkosc poruszania sie obiektów
pygame.time.wait(80)
elif coPokazuje == "koniec" :
napisz("KONIEC GRY!!!", 100, 150, 56)
napisz("PUNKTY: "+str(round(punkty)), 100, 350, 32)
napisz("naciśnij spację aby zagrać jeszcze raz ", 100, 400, 28)
napisz("lub ESC aby zakończyć grę ", 100, 430, 28)
#odświeżenie ekranu
pygame.display.update()
|
[
"mateusz.bondarczuk@gmail.com"
] |
mateusz.bondarczuk@gmail.com
|
9ff1e4fa352ee15f2ec730f85626ad1c7a382d0d
|
f6e919b03d4bbde65bfa26ee59e05568f623a65d
|
/Dis.py
|
1c743a9bfe8662d35a198c1fe0eaacfbfde23f1f
|
[] |
no_license
|
chrisleewoo/soundbug
|
94e223505880ad9708d22f7f35b437a5f0c5e941
|
0eab42c69a320cf1b7e02a65edb9de6e7f65e45c
|
refs/heads/master
| 2020-09-25T09:16:44.395825
| 2019-12-05T21:23:38
| 2019-12-05T21:23:38
| 225,972,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
import sys
import dis
with open('code2Trace.dat') as c2T:
cnt=1
new_string = ''
for line in c2T:
#read_line = t2D.readline()
if (line == '**EOT**'):
c2T.close()
break
if (".py" in line):
chopped_line = ''
marker = False
for c in line:
#need to remove the front bit
if c.startswith('\t'):
pass
elif (marker):
chopped_line += c
cnt += 1
#new_string += chopped_line
if c == ':':
marker = True
new_string += chopped_line
#print( chopped_line)
#dis.dis('this = 2')
#print(new_string)
dis.dis(new_string)
print('**EOD**')
|
[
"noreply@github.com"
] |
chrisleewoo.noreply@github.com
|
606e8d98c93f59c9b0e6a06f3d12dd5a142a0fb8
|
16f2b1f89179cb6f398db396991e0fdec483a942
|
/mainapp/migrations/0018_auto_20210215_2247.py
|
2d1f2bf0a74ee3f503afa7bd07b444517f99f6ba
|
[] |
no_license
|
alexeyklem/shop
|
a497c2ce340461da63f40cab28bf7e34ff41b215
|
78b47c069e899f4bdfe6a424b27c3e84ca062738
|
refs/heads/master
| 2023-03-02T06:19:07.265287
| 2021-02-15T22:29:42
| 2021-02-15T22:29:42
| 335,368,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
# Generated by Django 3.1.6 on 2021-02-15 19:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainapp', '0017_auto_20210212_0248'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.customer', verbose_name='Владелец'),
),
migrations.AlterField(
model_name='cartproduct',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.customer', verbose_name='Покупатель'),
),
migrations.AlterField(
model_name='customer',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
migrations.AlterField(
model_name='order',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_orders', to='mainapp.customer', verbose_name='Покупатель'),
),
]
|
[
"KlepaN567@gmail.com"
] |
KlepaN567@gmail.com
|
d299b1906bc983e5616962d0ea76e3ed65cd2b48
|
d6d87140d929262b5228659f89a69571c8669ec1
|
/airbyte-cdk/python/unit_tests/sources/declarative/auth/test_token_auth.py
|
2c54e8883b8dcdf127259eb2b1314dba794d2b27
|
[
"MIT",
"Elastic-2.0"
] |
permissive
|
gasparakos/airbyte
|
b2bb2246ec6a10e1f86293da9d86c61fc4a4ac65
|
17c77fc819ef3732fb1b20fa4c1932be258f0ee9
|
refs/heads/master
| 2023-02-22T20:42:45.400851
| 2023-02-09T07:43:24
| 2023-02-09T07:43:24
| 303,604,219
| 0
| 0
|
MIT
| 2020-10-13T06:18:04
| 2020-10-13T06:06:17
| null |
UTF-8
|
Python
| false
| false
| 3,666
|
py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import logging
import pytest
import requests
from airbyte_cdk.sources.declarative.auth.token import ApiKeyAuthenticator, BasicHttpAuthenticator, BearerAuthenticator
from requests import Response
LOGGER = logging.getLogger(__name__)
resp = Response()
config = {"username": "user", "password": "password", "header": "header"}
options = {"username": "user", "password": "password", "header": "header"}
@pytest.mark.parametrize(
"test_name, token, expected_header_value",
[
("test_static_token", "test-token", "Bearer test-token"),
("test_token_from_config", "{{ config.username }}", "Bearer user"),
("test_token_from_options", "{{ options.username }}", "Bearer user"),
],
)
def test_bearer_token_authenticator(test_name, token, expected_header_value):
"""
Should match passed in token, no matter how many times token is retrieved.
"""
token_auth = BearerAuthenticator(token, config, options=options)
header1 = token_auth.get_auth_header()
header2 = token_auth.get_auth_header()
prepared_request = requests.PreparedRequest()
prepared_request.headers = {}
token_auth(prepared_request)
assert {"Authorization": expected_header_value} == prepared_request.headers
assert {"Authorization": expected_header_value} == header1
assert {"Authorization": expected_header_value} == header2
@pytest.mark.parametrize(
"test_name, username, password, expected_header_value",
[
("test_static_creds", "user", "password", "Basic dXNlcjpwYXNzd29yZA=="),
("test_creds_from_config", "{{ config.username }}", "{{ config.password }}", "Basic dXNlcjpwYXNzd29yZA=="),
("test_creds_from_options", "{{ options.username }}", "{{ options.password }}", "Basic dXNlcjpwYXNzd29yZA=="),
],
)
def test_basic_authenticator(test_name, username, password, expected_header_value):
"""
Should match passed in token, no matter how many times token is retrieved.
"""
token_auth = BasicHttpAuthenticator(username=username, password=password, config=config, options=options)
header1 = token_auth.get_auth_header()
header2 = token_auth.get_auth_header()
prepared_request = requests.PreparedRequest()
prepared_request.headers = {}
token_auth(prepared_request)
assert {"Authorization": expected_header_value} == prepared_request.headers
assert {"Authorization": expected_header_value} == header1
assert {"Authorization": expected_header_value} == header2
@pytest.mark.parametrize(
"test_name, header, token, expected_header, expected_header_value",
[
("test_static_token", "Authorization", "test-token", "Authorization", "test-token"),
("test_token_from_config", "{{ config.header }}", "{{ config.username }}", "header", "user"),
("test_token_from_options", "{{ options.header }}", "{{ options.username }}", "header", "user"),
],
)
def test_api_key_authenticator(test_name, header, token, expected_header, expected_header_value):
"""
Should match passed in token, no matter how many times token is retrieved.
"""
token_auth = ApiKeyAuthenticator(header=header, api_token=token, config=config, options=options)
header1 = token_auth.get_auth_header()
header2 = token_auth.get_auth_header()
prepared_request = requests.PreparedRequest()
prepared_request.headers = {}
token_auth(prepared_request)
assert {expected_header: expected_header_value} == prepared_request.headers
assert {expected_header: expected_header_value} == header1
assert {expected_header: expected_header_value} == header2
|
[
"noreply@github.com"
] |
gasparakos.noreply@github.com
|
1030289cd14076ec5a0b85f5b22cb1dce6bd11df
|
7dbbf1d7bc43c13f7fcb5f337a6b3664dba09129
|
/pydbgen/pbclass/protoc_gen_json.py
|
807718cc31acba2cd9237246d9b1700dea12839b
|
[
"MIT"
] |
permissive
|
ppolxda/pydbgen
|
12b082a93539e3aef6a002cd895acc5ca8589df6
|
000d1eb87c272a9549c99cd890c9866ae0c56147
|
refs/heads/master
| 2022-06-09T11:25:42.656398
| 2019-12-22T06:01:14
| 2019-12-22T06:01:14
| 200,993,919
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,162
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
import six
import json
import argparse
import itertools
from collections import OrderedDict
from google.protobuf.compiler import plugin_pb2 as plugin
from google.protobuf.descriptor_pb2 import FieldOptions
from google.protobuf.descriptor_pb2 import DescriptorProto
from google.protobuf.descriptor_pb2 import EnumDescriptorProto
from google.protobuf.descriptor import FieldDescriptor
from pydbgen.pbclass import data_define_pb2
MY_OPTIONS = [
getattr(data_define_pb2, key)
for key in FieldOptions._extensions_by_name.keys()
if hasattr(data_define_pb2, key)
]
LABEL_CHANGE = {
FieldDescriptor.LABEL_OPTIONAL: 'optional',
FieldDescriptor.LABEL_REQUIRED: 'required',
FieldDescriptor.LABEL_REPEATED: 'repeated',
}
TYPE_CHANGE = {
FieldDescriptor.TYPE_DOUBLE: 'double',
FieldDescriptor.TYPE_FLOAT: 'float',
FieldDescriptor.TYPE_INT64: 'int64',
FieldDescriptor.TYPE_UINT64: 'uint64',
FieldDescriptor.TYPE_INT32: 'int32',
FieldDescriptor.TYPE_FIXED64: 'fixed64',
FieldDescriptor.TYPE_FIXED32: 'fixed32',
FieldDescriptor.TYPE_BOOL: 'bool',
FieldDescriptor.TYPE_STRING: 'string',
FieldDescriptor.TYPE_GROUP: 'group',
FieldDescriptor.TYPE_MESSAGE: 'message',
FieldDescriptor.TYPE_BYTES: 'bytes',
FieldDescriptor.TYPE_UINT32: 'uint32',
FieldDescriptor.TYPE_ENUM: 'enum',
FieldDescriptor.TYPE_SFIXED32: 'sfixed32',
FieldDescriptor.TYPE_SFIXED64: 'sfixed64',
FieldDescriptor.TYPE_SINT32: 'sint32',
FieldDescriptor.TYPE_SINT64: 'sint64'
}
TYPE_DEFVAL = {
FieldDescriptor.TYPE_DOUBLE: 0.0,
FieldDescriptor.TYPE_FLOAT: 0.0,
FieldDescriptor.TYPE_INT64: 0,
FieldDescriptor.TYPE_UINT64: 0,
FieldDescriptor.TYPE_INT32: 0,
FieldDescriptor.TYPE_FIXED64: 0,
FieldDescriptor.TYPE_FIXED32: 0,
FieldDescriptor.TYPE_BOOL: False,
FieldDescriptor.TYPE_STRING: '',
FieldDescriptor.TYPE_GROUP: '',
FieldDescriptor.TYPE_MESSAGE: '',
FieldDescriptor.TYPE_BYTES: '',
FieldDescriptor.TYPE_UINT32: 0,
FieldDescriptor.TYPE_ENUM: 0,
FieldDescriptor.TYPE_SFIXED32: 0,
FieldDescriptor.TYPE_SFIXED64: 0,
FieldDescriptor.TYPE_SINT32: 0,
FieldDescriptor.TYPE_SINT64: 0
}
class EnumPathIndex(object):
"""PROTOC PATH INDEX."""
NAME = 1
FIELD = 2
NESTED = 3
MESSAGE = 4
ENUM = 5
SERVICE = 6
class Cmdoptions(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='pydbgen.pbclass.protoc_gen_json')
parser.add_argument('-o', '--output',
type=str, default=None,
help='ouput path')
parser.add_argument('-e', '--encoding', default='utf8',
help='output encoding(default: utf8)')
args = parser.parse_args()
self.output = args.output
self.encoding = args.encoding
def strip(val):
while val and val[0] == '/':
val = val[1:]
return val.strip()
def _locations(locations, pathtype, i, last_path=tuple()):
# location.leading_comments
# location.trailing_comments
# location.leading_detached_comments
# result = locations[local_path + (EnumPathIndex.FIELD, i)]
full_path = last_path + (pathtype, i)
result = locations.get(full_path, None)
if result is None:
class EnumLog(object):
trailing_comments = ''
leading_comments = ''
leading_detached_comments = ''
return EnumLog
return result
def default_json(name, typename, comment='', fields={},
options={}, nesteds={}, enums={}):
assert isinstance(fields, dict)
assert isinstance(options, dict)
assert isinstance(nesteds, dict)
return OrderedDict([
("type", typename),
("name", name),
("comment", comment),
("fields", fields),
("options", options),
("enums", enums),
("nesteds", nesteds),
])
def field_json(name, value, type, defval,
comment, options={}, soptions={}):
options.update(soptions)
return OrderedDict([
("name", name),
("value", value),
("type", type),
("defval", defval),
("comment", comment),
("options", options),
])
def enums2json(items, locations, path=tuple()):
# assert isinstance(items, list)
assert isinstance(locations, dict)
assert isinstance(path, tuple)
# location.leading_comments
# location.trailing_comments
# location.leading_detached_comments
result = {}
for index, item in enumerate(items):
local_path = (EnumPathIndex.ENUM, index)
cur_path = path + local_path
assert isinstance(item, EnumDescriptorProto)
result[item.name] = default_json(
item.name, 'enum',
comment=strip(_locations(locations, EnumPathIndex.ENUM, index, path).leading_comments), # noqa
fields=OrderedDict([(
v.name, field_json(
v.name, v.number, 'int32', 0,
strip(_locations(locations, EnumPathIndex.FIELD, i, cur_path).trailing_comments))) # noqa
for i, v in enumerate(item.value)
]))
return result
def message2json(items, locations, path=tuple()):
# assert isinstance(items, list)
assert isinstance(locations, dict)
assert isinstance(path, tuple)
result = {}
for index, item in enumerate(items):
local_path = (EnumPathIndex.MESSAGE, index)
cur_path = path + local_path
assert isinstance(item, DescriptorProto)
result[item.name] = default_json(
item.name, 'message',
comment=strip(_locations(locations, EnumPathIndex.MESSAGE, index).leading_comments), # noqa
nesteds=message2json(item.nested_type, locations, cur_path),
enums=enums2json(item.enum_type, locations, cur_path),
fields=OrderedDict([(
v.name, field_json(
v.name, v.number,
TYPE_CHANGE.get(v.type, '--'),
v.default_value if v.default_value else TYPE_DEFVAL.get(v.type, ''), # noqa
strip(_locations(locations, EnumPathIndex.FIELD, i, cur_path).trailing_comments), # noqa
options=OrderedDict([
('label', LABEL_CHANGE[v.label]),
('type_name', v.type_name),
('extendee', v.extendee),
('default_value', v.default_value),
('json_name', v.json_name),
]),
soptions=OrderedDict([
(
val.name,
v.options.Extensions[val]
)
for val in MY_OPTIONS
if v.options.HasExtension(val)
]))
) for i, v in enumerate(item.field)
]),
options=OrderedDict([
('message_set_wire_format', item.options.message_set_wire_format), # noqa
('no_standard_descriptor_accessor', item.options.no_standard_descriptor_accessor), # noqa
('deprecated', item.options.deprecated), # noqa
])
)
return result
def generate_json(request, step_files=['pydbgen', 'google/protobuf']):
for filename in request.file_to_generate:
output = OrderedDict([
("type", "root"),
("name", "root"),
("package", "root"),
("filename", filename),
("comment", "root"),
("enums", {}),
("nesteds", {}),
])
for proto_file in request.proto_file:
step = False
for i in step_files:
if proto_file.name.replace('\\', '/').find(i) >= 0:
step = True
if step:
continue
if proto_file.name != filename:
continue
output['filename'] = proto_file.name
output['package'] = proto_file.package
locations = proto_file.source_code_info.location
locations = {
tuple(location.path): location
for location in locations
}
enums = enums2json(proto_file.enum_type, locations)
inset = set(output['enums'].keys()).intersection(set(enums.keys()))
if inset:
raise TypeError('enum name duplicate[{}]'.format(inset))
output['enums'].update(enums)
msgs = message2json(proto_file.message_type, locations)
inset = set(output['nesteds'].keys()).intersection(
set(msgs.keys()))
if inset:
raise TypeError('messages name duplicate[{}]'.format(inset))
output['nesteds'].update(msgs)
yield filename, output
def generate_code(opts, request, response):
for filename, output in generate_json(request):
fout = response.file.add()
if opts.output:
fout.name = opts.output
else:
fout.name = filename + '.json'
fout.content = json.dumps(output, indent=4)
# open('test.json', 'w').write(
# json.dumps(generate_json(request), indent=4))
def main():
# Read request message from stdin
OPTS = Cmdoptions()
if six.PY2:
DATA = sys.stdin.read()
else:
DATA = sys.stdin.buffer.read()
# open('test.dat', 'wb').write(DATA)
# DATA = open('test.dat', 'rb').read()
# Parse request
REQUEST = plugin.CodeGeneratorRequest()
REQUEST.ParseFromString(DATA)
# Create response
RESPONSE = plugin.CodeGeneratorResponse()
# Generate code
generate_code(OPTS, REQUEST, RESPONSE)
# Serialise response message
OUTPUT = RESPONSE.SerializeToString()
# Write to stdout
if six.PY2:
sys.stdout.write(OUTPUT)
else:
sys.stdout.buffer.write(OUTPUT)
if __name__ == '__main__':
main()
|
[
"ppol850564@gmail.com"
] |
ppol850564@gmail.com
|
edb66058fa8c0b5659b39a912a8d1c75956908eb
|
bffaa3797ad90d2d48b636e0f2e974dbb870f078
|
/pik/core/models/historized.py
|
c75da492b294980e5535bf47545e6350a17a56a5
|
[
"MIT"
] |
permissive
|
pik-software/pik-django-utils
|
df061ef7e9a59f97db85468164e7dc470d197b07
|
84ff77ef359f333e53232e09db8a59beed8624b4
|
refs/heads/master
| 2023-07-07T00:00:45.624024
| 2021-12-17T07:26:51
| 2021-12-17T07:26:51
| 127,616,073
| 8
| 4
|
MIT
| 2023-09-13T13:49:42
| 2018-04-01T09:38:48
|
Python
|
UTF-8
|
Python
| false
| false
| 202
|
py
|
from django.db import models
from simple_history.models import HistoricalRecords
class Historized(models.Model):
history = HistoricalRecords(inherit=True)
class Meta:
abstract = True
|
[
"pahaz.white@gmail.com"
] |
pahaz.white@gmail.com
|
fa6d2837020c359156534f31435378b02d606b4a
|
90d13ffb6fa1988242886c3e55e4b555fa7d8ad1
|
/Three_Part_Dev_Michael/2013_10_07_test/plan/__init__.py
|
34572cf0774869c49e649315d8a94c30ac5bf348
|
[] |
no_license
|
mclumd/erewhon_systems
|
2c798cd303ca2cb19e80c93c88303af8b9aed5a6
|
93655a96415a01d8f5e49a1f2c244cbfd22b65f2
|
refs/heads/master
| 2021-01-17T16:22:53.528996
| 2016-08-03T19:35:52
| 2016-08-03T19:35:52
| 64,771,684
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13
|
py
|
import pyplan
|
[
"cmaxey@payday.cs.umd.edu"
] |
cmaxey@payday.cs.umd.edu
|
f5de1971d194c0eb53a46ad2d4c31fcb29907bf3
|
d2970ef359537f553e86dc05015b265611bd8f4f
|
/Akash/iD Game Plan Examples/BlockCipher.py
|
ccbc84938982ca4f5510095ed98347aebd0ff130
|
[] |
no_license
|
idcrypt3/camp_2019_07_07
|
cc68c28f9c84a0ad6ac893cb65a0a48502a09af6
|
4c748b60f1553072dbda9d4d226b39a32548521f
|
refs/heads/master
| 2020-06-17T08:23:30.734953
| 2019-07-17T16:29:55
| 2019-07-17T16:29:55
| 195,860,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,528
|
py
|
## Code snippet 1
def pad_message(message, block_size=4):
# Take string message as input and return blocks of message bytes (integers)
message_list = []
chunk = 0
block_count = len(message) // block_size + 1
for c in range(block_count * block_size):
# Shift byte right to make space for the next byte. Most significant bit is from the first character!
chunk = chunk << 8
if c < len(message):
chunk += ord(message[c])
else:
chunk += 0
# Add the chunk if it exceeds block size - 1 (since the next character would push it past the block size)
if chunk.bit_length() > (block_size - 1) * 8:
message_list.append(chunk)
chunk = 0
return message_list
## Code snippet 2
def rebuild_message(message_list, block_size=4):
message = ""
for i in range(len(message_list)):
chunk = message_list[i]
for c in range(block_size):
number = (chunk >> (8 * (block_size - 1 - c))) % 2 ** 8
message += chr(number)
return message
## Code snippet 3
def apply_shift(message_list, key, block_size=4):
# Shift characters up Unicode value based on key value and block count
cipher_list = []
bit_max = block_size * 8
for i in range(len(message_list)):
# Iterate through each chunk in the message list
chunk = message_list[i]
# Rotate the bits in the chunk
carry = chunk % (2 ** key)
carry = carry << (bit_max - key)
cipher = (chunk >> key) + carry
cipher_list.append(cipher)
return cipher_list
## Code snippet 4
def undo_shift(cipher_list, key, block_size=4):
# Rotate bits back to original position
message_list = []
bit_max = block_size * 8
for i in range(len(cipher_list)):
# Iterate through each chunk in the message list
chunk = cipher_list[i]
# Rotate the bits in the chunk
carry = chunk % (2 ** (bit_max - key))
carry = carry << key
number = (chunk >> (bit_max - key)) + carry
message_list.append(number)
return message_list
## Code snippet 5
plaintext = "abcdefGHIJKLMNOpqr!@#$%123"
# Set the key as the number of bits to rotate in each block
key = 20
text_list = pad_message(plaintext)
# print(text_list)
cipher_list = apply_shift(text_list, key)
# print(cipher_list)
cipher = rebuild_message(cipher_list)
print(cipher)
message_list = undo_shift(cipher_list, key)
message = rebuild_message(message_list)
print(message)
|
[
"idcrypt3@gmail.com"
] |
idcrypt3@gmail.com
|
127526fece6a1143164daa6c117d6a64beca8f84
|
a524f7ab59b8c9fa124c68d6e17a1b4cd0c0062b
|
/DFS/increasingOrderSearchTree/Solution.py
|
ccb8baefbffdf62dc9c1552c2d89a203ed6388e5
|
[] |
no_license
|
sulenn/leetcode_python
|
796b1c9cc52446717f01cda8075eb54db479d4cb
|
238880a43fac9f2abdfb4202e5d03ce4f1b1e95d
|
refs/heads/master
| 2020-08-06T22:33:06.111130
| 2019-12-15T12:52:51
| 2019-12-15T12:52:51
| 213,183,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def increasingBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
valueList = self.inOrder(root)
if not valueList:
return None
root, curRoot = TreeNode(valueList[0])
for i in valueList[1:]:
curRoot.right = TreeNode(i)
curRoot = curRoot.right
return root
def inOrder(self, root):
if not root:
return []
valueList = []
valueList += self.inOrder(root.left)
valueList.append(root.val)
valueList += self.inOrder(root.right)
return valueList
|
[
"273409891@qq.com"
] |
273409891@qq.com
|
51c350f3ab04036faeac750ca7a1c092a00b985f
|
e753c46bd9bef1a81ef2c48826877c6cc604248d
|
/exercises/fizz.py
|
1d5d1b277c115da2a934b92bd14858592cf99a47
|
[] |
no_license
|
martadrozsa/curso-coursera-python
|
30699b24898ab4b8abf4e86b6473220a70863b39
|
aebbd3a75718d2834c63bd2ff5385312997a8e7f
|
refs/heads/main
| 2023-07-03T06:42:33.012412
| 2021-08-04T18:57:25
| 2021-08-04T18:57:25
| 392,355,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
# Receba um número inteiro na entrada e imprima: Fizz (se o número for divisível por 3)
# Caso contrário, imprima o mesmo número que foi dado na entrada.
number = int(input("Enter number: "))
remainder = number % 3
is_divisible = remainder == 0
if is_divisible:
print("Fizz")
else:
print(number)
|
[
"marta.denisczwicz@gmail.com"
] |
marta.denisczwicz@gmail.com
|
2e2ea0ae1f07937be98c14ecad25d65168805933
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/ospf/lsustats1h.py
|
43030853dd76a0d3121f6c35a084d838e8d89808
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37,960
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LsuStats1h(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.ospf.LsuStats1h", "Ospf Lsu Packets")
counter = CounterMeta("lsuPeerTxPkts", CounterCategory.COUNTER, "packets", "LSU Packets To Peer")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "lsuPeerTxPktsLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "lsuPeerTxPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "lsuPeerTxPktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "lsuPeerTxPktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "lsuPeerTxPktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "lsuPeerTxPktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "lsuPeerTxPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "lsuPeerTxPktsBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "lsuPeerTxPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "lsuPeerTxPktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "lsuPeerTxPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "lsuPeerTxPktsRate"
meta._counters.append(counter)
counter = CounterMeta("lsuForLsreqPkts", CounterCategory.COUNTER, "packets", "LSU Packets For LSREQ Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "lsuForLsreqPktsLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "lsuForLsreqPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "lsuForLsreqPktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "lsuForLsreqPktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "lsuForLsreqPktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "lsuForLsreqPktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "lsuForLsreqPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "lsuForLsreqPktsBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "lsuForLsreqPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "lsuForLsreqPktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "lsuForLsreqPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "lsuForLsreqPktsRate"
meta._counters.append(counter)
counter = CounterMeta("lsuRexmitPkts", CounterCategory.COUNTER, "packets", "LSU Retransmission Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "lsuRexmitPktsLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "lsuRexmitPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "lsuRexmitPktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "lsuRexmitPktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "lsuRexmitPktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "lsuRexmitPktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "lsuRexmitPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "lsuRexmitPktsBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "lsuRexmitPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "lsuRexmitPktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "lsuRexmitPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "lsuRexmitPktsRate"
meta._counters.append(counter)
counter = CounterMeta("lsuFirstTxPkts", CounterCategory.COUNTER, "packets", "LSU First Tx Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "lsuFirstTxPktsLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "lsuFirstTxPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "lsuFirstTxPktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "lsuFirstTxPktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "lsuFirstTxPktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "lsuFirstTxPktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "lsuFirstTxPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "lsuFirstTxPktsBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "lsuFirstTxPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "lsuFirstTxPktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "lsuFirstTxPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "lsuFirstTxPktsRate"
meta._counters.append(counter)
meta.moClassName = "ospfLsuStats1h"
meta.rnFormat = "CDospfLsuStats1h"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Ospf Lsu Packets stats in 1 hour"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.ospf.IfStats")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.ospf.LsuStats")
meta.rnPrefixes = [
('CDospfLsuStats1h', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "lsuFirstTxPktsAvg", "lsuFirstTxPktsAvg", 48890, PropCategory.IMPLICIT_AVG)
prop.label = "LSU First Tx Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsAvg", prop)
prop = PropMeta("str", "lsuFirstTxPktsBase", "lsuFirstTxPktsBase", 48885, PropCategory.IMPLICIT_BASELINE)
prop.label = "LSU First Tx Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsBase", prop)
prop = PropMeta("str", "lsuFirstTxPktsCum", "lsuFirstTxPktsCum", 48886, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LSU First Tx Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsCum", prop)
prop = PropMeta("str", "lsuFirstTxPktsLast", "lsuFirstTxPktsLast", 48884, PropCategory.IMPLICIT_LASTREADING)
prop.label = "LSU First Tx Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsLast", prop)
prop = PropMeta("str", "lsuFirstTxPktsMax", "lsuFirstTxPktsMax", 48889, PropCategory.IMPLICIT_MAX)
prop.label = "LSU First Tx Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsMax", prop)
prop = PropMeta("str", "lsuFirstTxPktsMin", "lsuFirstTxPktsMin", 48888, PropCategory.IMPLICIT_MIN)
prop.label = "LSU First Tx Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsMin", prop)
prop = PropMeta("str", "lsuFirstTxPktsPer", "lsuFirstTxPktsPer", 48887, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LSU First Tx Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsPer", prop)
prop = PropMeta("str", "lsuFirstTxPktsRate", "lsuFirstTxPktsRate", 48895, PropCategory.IMPLICIT_RATE)
prop.label = "LSU First Tx Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsRate", prop)
prop = PropMeta("str", "lsuFirstTxPktsSpct", "lsuFirstTxPktsSpct", 48891, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LSU First Tx Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsSpct", prop)
prop = PropMeta("str", "lsuFirstTxPktsThr", "lsuFirstTxPktsThr", 48892, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LSU First Tx Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("lsuFirstTxPktsThr", prop)
prop = PropMeta("str", "lsuFirstTxPktsTr", "lsuFirstTxPktsTr", 48894, PropCategory.IMPLICIT_TREND)
prop.label = "LSU First Tx Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsTr", prop)
prop = PropMeta("str", "lsuFirstTxPktsTrBase", "lsuFirstTxPktsTrBase", 48893, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "LSU First Tx Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuFirstTxPktsTrBase", prop)
prop = PropMeta("str", "lsuForLsreqPktsAvg", "lsuForLsreqPktsAvg", 48911, PropCategory.IMPLICIT_AVG)
prop.label = "LSU Packets For LSREQ Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsAvg", prop)
prop = PropMeta("str", "lsuForLsreqPktsBase", "lsuForLsreqPktsBase", 48906, PropCategory.IMPLICIT_BASELINE)
prop.label = "LSU Packets For LSREQ Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsBase", prop)
prop = PropMeta("str", "lsuForLsreqPktsCum", "lsuForLsreqPktsCum", 48907, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LSU Packets For LSREQ Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsCum", prop)
prop = PropMeta("str", "lsuForLsreqPktsLast", "lsuForLsreqPktsLast", 48905, PropCategory.IMPLICIT_LASTREADING)
prop.label = "LSU Packets For LSREQ Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsLast", prop)
prop = PropMeta("str", "lsuForLsreqPktsMax", "lsuForLsreqPktsMax", 48910, PropCategory.IMPLICIT_MAX)
prop.label = "LSU Packets For LSREQ Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsMax", prop)
prop = PropMeta("str", "lsuForLsreqPktsMin", "lsuForLsreqPktsMin", 48909, PropCategory.IMPLICIT_MIN)
prop.label = "LSU Packets For LSREQ Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsMin", prop)
prop = PropMeta("str", "lsuForLsreqPktsPer", "lsuForLsreqPktsPer", 48908, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LSU Packets For LSREQ Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsPer", prop)
prop = PropMeta("str", "lsuForLsreqPktsRate", "lsuForLsreqPktsRate", 48916, PropCategory.IMPLICIT_RATE)
prop.label = "LSU Packets For LSREQ Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsRate", prop)
prop = PropMeta("str", "lsuForLsreqPktsSpct", "lsuForLsreqPktsSpct", 48912, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LSU Packets For LSREQ Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsSpct", prop)
prop = PropMeta("str", "lsuForLsreqPktsThr", "lsuForLsreqPktsThr", 48913, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LSU Packets For LSREQ Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("lsuForLsreqPktsThr", prop)
prop = PropMeta("str", "lsuForLsreqPktsTr", "lsuForLsreqPktsTr", 48915, PropCategory.IMPLICIT_TREND)
prop.label = "LSU Packets For LSREQ Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsTr", prop)
prop = PropMeta("str", "lsuForLsreqPktsTrBase", "lsuForLsreqPktsTrBase", 48914, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "LSU Packets For LSREQ Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuForLsreqPktsTrBase", prop)
prop = PropMeta("str", "lsuPeerTxPktsAvg", "lsuPeerTxPktsAvg", 48932, PropCategory.IMPLICIT_AVG)
prop.label = "LSU Packets To Peer average value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsAvg", prop)
prop = PropMeta("str", "lsuPeerTxPktsBase", "lsuPeerTxPktsBase", 48927, PropCategory.IMPLICIT_BASELINE)
prop.label = "LSU Packets To Peer baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsBase", prop)
prop = PropMeta("str", "lsuPeerTxPktsCum", "lsuPeerTxPktsCum", 48928, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LSU Packets To Peer cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsCum", prop)
prop = PropMeta("str", "lsuPeerTxPktsLast", "lsuPeerTxPktsLast", 48926, PropCategory.IMPLICIT_LASTREADING)
prop.label = "LSU Packets To Peer current value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsLast", prop)
prop = PropMeta("str", "lsuPeerTxPktsMax", "lsuPeerTxPktsMax", 48931, PropCategory.IMPLICIT_MAX)
prop.label = "LSU Packets To Peer maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsMax", prop)
prop = PropMeta("str", "lsuPeerTxPktsMin", "lsuPeerTxPktsMin", 48930, PropCategory.IMPLICIT_MIN)
prop.label = "LSU Packets To Peer minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsMin", prop)
prop = PropMeta("str", "lsuPeerTxPktsPer", "lsuPeerTxPktsPer", 48929, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LSU Packets To Peer periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsPer", prop)
prop = PropMeta("str", "lsuPeerTxPktsRate", "lsuPeerTxPktsRate", 48937, PropCategory.IMPLICIT_RATE)
prop.label = "LSU Packets To Peer rate"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsRate", prop)
prop = PropMeta("str", "lsuPeerTxPktsSpct", "lsuPeerTxPktsSpct", 48933, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LSU Packets To Peer suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsSpct", prop)
prop = PropMeta("str", "lsuPeerTxPktsThr", "lsuPeerTxPktsThr", 48934, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LSU Packets To Peer thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("lsuPeerTxPktsThr", prop)
prop = PropMeta("str", "lsuPeerTxPktsTr", "lsuPeerTxPktsTr", 48936, PropCategory.IMPLICIT_TREND)
prop.label = "LSU Packets To Peer trend"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsTr", prop)
prop = PropMeta("str", "lsuPeerTxPktsTrBase", "lsuPeerTxPktsTrBase", 48935, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "LSU Packets To Peer trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuPeerTxPktsTrBase", prop)
prop = PropMeta("str", "lsuRexmitPktsAvg", "lsuRexmitPktsAvg", 48953, PropCategory.IMPLICIT_AVG)
prop.label = "LSU Retransmission Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsAvg", prop)
prop = PropMeta("str", "lsuRexmitPktsBase", "lsuRexmitPktsBase", 48948, PropCategory.IMPLICIT_BASELINE)
prop.label = "LSU Retransmission Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsBase", prop)
prop = PropMeta("str", "lsuRexmitPktsCum", "lsuRexmitPktsCum", 48949, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LSU Retransmission Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsCum", prop)
prop = PropMeta("str", "lsuRexmitPktsLast", "lsuRexmitPktsLast", 48947, PropCategory.IMPLICIT_LASTREADING)
prop.label = "LSU Retransmission Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsLast", prop)
prop = PropMeta("str", "lsuRexmitPktsMax", "lsuRexmitPktsMax", 48952, PropCategory.IMPLICIT_MAX)
prop.label = "LSU Retransmission Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsMax", prop)
prop = PropMeta("str", "lsuRexmitPktsMin", "lsuRexmitPktsMin", 48951, PropCategory.IMPLICIT_MIN)
prop.label = "LSU Retransmission Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsMin", prop)
prop = PropMeta("str", "lsuRexmitPktsPer", "lsuRexmitPktsPer", 48950, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LSU Retransmission Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsPer", prop)
prop = PropMeta("str", "lsuRexmitPktsRate", "lsuRexmitPktsRate", 48958, PropCategory.IMPLICIT_RATE)
prop.label = "LSU Retransmission Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsRate", prop)
prop = PropMeta("str", "lsuRexmitPktsSpct", "lsuRexmitPktsSpct", 48954, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LSU Retransmission Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsSpct", prop)
prop = PropMeta("str", "lsuRexmitPktsThr", "lsuRexmitPktsThr", 48955, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LSU Retransmission Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("lsuRexmitPktsThr", prop)
prop = PropMeta("str", "lsuRexmitPktsTr", "lsuRexmitPktsTr", 48957, PropCategory.IMPLICIT_TREND)
prop.label = "LSU Retransmission Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsTr", prop)
prop = PropMeta("str", "lsuRexmitPktsTrBase", "lsuRexmitPktsTrBase", 48956, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "LSU Retransmission Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("lsuRexmitPktsTrBase", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
80141b88864352a3da42ab6cd661e2218e992d2c
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/starshield/apis/DeleteDNSRecordRequest.py
|
f0c3297faa9d2743710cb3f081ef643dc9e9ae02
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168
| 2023-08-30T12:00:25
| 2023-08-30T12:00:25
| 126,276,169
| 18
| 36
|
Apache-2.0
| 2023-09-07T06:54:49
| 2018-03-22T03:47:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DeleteDNSRecordRequest(JDCloudRequest):
"""
"""
def __init__(self, parameters, header=None, version="v1"):
super(DeleteDNSRecordRequest, self).__init__(
'/zones/{zone_identifier}/dns_records/{identifier}', 'DELETE', header, version)
self.parameters = parameters
class DeleteDNSRecordParameters(object):
def __init__(self,zone_identifier, identifier):
"""
:param zone_identifier:
:param identifier:
"""
self.zone_identifier = zone_identifier
self.identifier = identifier
|
[
"jdcloud-api@jd.com"
] |
jdcloud-api@jd.com
|
ef9c19b3eb103b74a30574d32c605d338f2bd7a8
|
8285b710101a4da39b1bfb7e2900d590c4b1ef13
|
/08-distance-calc.py
|
5fd9e0fdaaec4ae06407a1b6e08764e244321141
|
[] |
no_license
|
aiyenggar/regioncitations
|
1097cbe1f94552eae5cadc4e551d95157eb5d8ab
|
b8ba34b83522b404d652ca4fdc1ae5aeb193a438
|
refs/heads/master
| 2021-04-29T22:54:33.538482
| 2020-03-23T05:06:28
| 2020-03-23T05:06:28
| 78,220,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,846
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 18 08:44:34 2019
@author: aiyenggar
"""
import csv
import pandas as pd
import geopy.distance
import time
def dump(dictionary, filename):
with open(filename, 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(['latlong', 'urban_area', 'minimum_distance'])
for nextkey in dictionary.keys():
spl = nextkey.split(":")
writer.writerow([spl[0], spl[1], dictionary[nextkey]])
csvFile.close()
# read the latlongid to ua1 mapping into latlong_urbanarea
latlong_urbanarea = pd.read_csv(ut.latlongUrbanAreaFile, usecols = ['latlongid', 'ua1', 'latitude', 'longitude'], dtype={'latlongid':int, 'ua1':int, 'latitude':float, 'longitude':float})
# set mindist to the circumference of the earth (a high value)
latlong_urbanarea['mindist']=round(2 * 3.14159 * 6371,2)
latlong_urbanarea['near_latlong']=""
latlong_urbanarea['near_urbanarea']=""
latlong_urbanarea.sort_values(['latitude', 'longitude'], ascending=[True, True])
# we want to restrict our search for an urban area nearby to a bounding box +- 0.3 degrees on latitutde but not longitude
treshold = 0.30
dist_dict = {}
# master is that pandas table where the point is already identified within an urbanarea
master = latlong_urbanarea[latlong_urbanarea['ua1'] != -1]
missing = latlong_urbanarea[latlong_urbanarea['ua1'] == -1]
csvFile = open(ut.distancesFile, 'w')
writer = csv.writer(csvFile)
writer.writerow(['l_latlongid', 'r_latlongid', 'distance'])
neighbours = {}
prev_line_seen=0
treshold_lines=1500
max_lines = len(latlong_urbanarea.index)
# we look for unlabelled points in the vicinity of labelled points (rather than the other way)
for mindex, masterow in master.iterrows():
a = masterow['latitude']
b = masterow['longitude']
l=(a,b)
# all unlabelled points within the bounding box of this labelled point
lowert = a - treshold
highert = a + treshold
cutdf = missing[(missing['latitude'] < highert) & (missing['latitude'] > lowert)]
for nindex, nrow in cutdf.iterrows():
c = nrow['latitude']
d = nrow['longitude']
r=(c, d)
key = tuple([a, b, c, d])
if key not in dist_dict:
distance = round(geopy.distance.geodesic(l,r).km,2)
dist_dict[key] = distance
# save all the calculated distances so as to avoid calculating again
if dist_dict[key] < 30.01: # need to write only once
writer.writerow([nrow['latlongid'], masterow['latlongid'], dist_dict[key]])
if (mindex > prev_line_seen + treshold_lines):
print(time.strftime("%Y-%m-%d %H:%M:%S") + " Processed till index " + str(mindex) + " of " + str(max_lines))
prev_line_seen = mindex
csvFile.flush()
dist_dict = {}
csvFile.close()
|
[
"aiyenggar@users.noreply.github.com"
] |
aiyenggar@users.noreply.github.com
|
e0cfacb1680baaad5f710f171d3d8177c3a5eea0
|
c89543dd926c1787c40616ed174a3d1371c54449
|
/superset/tags/core.py
|
6c4f56a2e66f217a3a8df5a3026d0e3a310987de
|
[
"Apache-2.0",
"OFL-1.1"
] |
permissive
|
j420247/incubator-superset
|
7c7bff330393f0e91f5e67782f35efe8c735250a
|
c9b9b7404a2440a4c9d3173f0c494ed40f7fa2bd
|
refs/heads/master
| 2023-03-11T21:53:16.827919
| 2023-02-03T19:04:17
| 2023-02-03T19:04:17
| 157,780,350
| 1
| 1
|
Apache-2.0
| 2023-03-07T00:14:51
| 2018-11-15T22:24:29
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,962
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel
def register_sqla_event_listeners() -> None:
import sqlalchemy as sqla
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.sql_lab import SavedQuery
from superset.tags.models import (
ChartUpdater,
DashboardUpdater,
DatasetUpdater,
FavStarUpdater,
QueryUpdater,
)
sqla.event.listen(SqlaTable, "after_insert", DatasetUpdater.after_insert)
sqla.event.listen(SqlaTable, "after_update", DatasetUpdater.after_update)
sqla.event.listen(SqlaTable, "after_delete", DatasetUpdater.after_delete)
sqla.event.listen(Slice, "after_insert", ChartUpdater.after_insert)
sqla.event.listen(Slice, "after_update", ChartUpdater.after_update)
sqla.event.listen(Slice, "after_delete", ChartUpdater.after_delete)
sqla.event.listen(Dashboard, "after_insert", DashboardUpdater.after_insert)
sqla.event.listen(Dashboard, "after_update", DashboardUpdater.after_update)
sqla.event.listen(Dashboard, "after_delete", DashboardUpdater.after_delete)
sqla.event.listen(FavStar, "after_insert", FavStarUpdater.after_insert)
sqla.event.listen(FavStar, "after_delete", FavStarUpdater.after_delete)
sqla.event.listen(SavedQuery, "after_insert", QueryUpdater.after_insert)
sqla.event.listen(SavedQuery, "after_update", QueryUpdater.after_update)
sqla.event.listen(SavedQuery, "after_delete", QueryUpdater.after_delete)
def clear_sqla_event_listeners() -> None:
import sqlalchemy as sqla
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.sql_lab import SavedQuery
from superset.tags.models import (
ChartUpdater,
DashboardUpdater,
DatasetUpdater,
FavStarUpdater,
QueryUpdater,
)
sqla.event.remove(SqlaTable, "after_insert", DatasetUpdater.after_insert)
sqla.event.remove(SqlaTable, "after_update", DatasetUpdater.after_update)
sqla.event.remove(SqlaTable, "after_delete", DatasetUpdater.after_delete)
sqla.event.remove(Slice, "after_insert", ChartUpdater.after_insert)
sqla.event.remove(Slice, "after_update", ChartUpdater.after_update)
sqla.event.remove(Slice, "after_delete", ChartUpdater.after_delete)
sqla.event.remove(Dashboard, "after_insert", DashboardUpdater.after_insert)
sqla.event.remove(Dashboard, "after_update", DashboardUpdater.after_update)
sqla.event.remove(Dashboard, "after_delete", DashboardUpdater.after_delete)
sqla.event.remove(FavStar, "after_insert", FavStarUpdater.after_insert)
sqla.event.remove(FavStar, "after_delete", FavStarUpdater.after_delete)
sqla.event.remove(SavedQuery, "after_insert", QueryUpdater.after_insert)
sqla.event.remove(SavedQuery, "after_update", QueryUpdater.after_update)
sqla.event.remove(SavedQuery, "after_delete", QueryUpdater.after_delete)
|
[
"noreply@github.com"
] |
j420247.noreply@github.com
|
3268c3ee936318afe2dd9aace2de1008066d265e
|
f2d5498e47ff24810c462ea9b05d6520beb65eea
|
/4-auth/bookshelf/crud.py
|
6ee765b53e700698947dc0d92c8490b9ebf66c58
|
[] |
no_license
|
kennethleekk/Hackhub-python
|
b3388d47fbd4540bab7e6f727c667b15d8c5ba92
|
44c9adc68cf778383e695fd8a6629aed700f5c17
|
refs/heads/master
| 2022-12-04T17:17:03.255394
| 2020-08-20T13:30:04
| 2020-08-20T13:30:04
| 289,013,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,241
|
py
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from bookshelf import get_model, oauth2, storage
from flask import Blueprint, current_app, redirect, render_template, request, \
session, url_for
from google.oauth2 import id_token
from google.auth.transport import requests
sonbalance = 100
crud = Blueprint('crud', __name__)
def upload_image_file(file):
"""
Upload the user-uploaded file to Google Cloud Storage and retrieve its
publicly-accessible URL.
"""
if not file:
return None
public_url = storage.upload_file(
file.read(),
file.filename,
file.content_type
)
current_app.logger.info(
"Uploaded file %s as %s.", file.filename, public_url)
return public_url
@crud.route("/")
def main_route():
if 'profile' in session:
return list_mine()
else:
return render_template("main_for_anonymous.html")
@crud.route("/search")
def list():
token = request.args.get('page_token', None)
if token:
token = token.encode('utf-8')
books, next_page_token = get_model().list(cursor=token)
return render_template(
"list.html",
books=books,
next_page_token=next_page_token)
@crud.route("/appDetails_new")
def appDetails_new():
data = request.form.to_dict(flat=True)
books = get_model().read(data['id'])
return json.dumps(books)
@crud.route("/appDetails_onlyone")
def appDetails_onlyone():
appList = [
{
"title": "Funds Transfer",
"identifier": "fundstransfer",
"thumbnail": "./appJSONs/thumbnails/image1.jpg",
"keywords": "fundstransfer banking digital customerfacing retail rbwm allscreens",
"content" : {
"descriptionText": "Description text goes here .......maximum limit 200 chars",
"additionalResources": [
"./appJSONs/resources/fundstransfer/image1",
"./appJSONs/resources/fundstransfer/image2",
"./appJSONs/resources/fundstransfer/video1"
],
"githubLink" : "https://github.com/snehaagarwal6108/fundTranser.git",
"nexusLink" : "http://nexus/abc"
}
}
]
return json.dumps(appList)
@crud.route("/appDetails")
def appDetails():
appList = [
{
"title": "Funds Transfer",
"identifier": "fundstransfer",
"thumbnail": "./appJSONs/thumbnails/image1.jpg",
"keywords": "fundstransfer banking digital customerfacing retail rbwm allscreens",
"content" : {
"descriptionText": "Description text goes here .......maximum limit 200 chars",
"additionalResources": [
"./appJSONs/resources/fundstransfer/image1",
"./appJSONs/resources/fundstransfer/image2",
"./appJSONs/resources/fundstransfer/video1"
],
"githubLink" : "https://github.com/snehaagarwal6108/fundTranser.git",
"nexusLink" : "http://nexus/abc"
}
},
{
"title": "Login",
"identifier": "login",
"thumbnail": "./thumbnails/fundstransfer",
"keywords": "input userlogin login allscreens",
"content" : {
"descriptionText": "Description text goes here .......maximum limit 200 chars",
"additionalResources": [
"./appJSONs/resources/fundstransfer/image1",
"./appJSONs/resources/fundstransfer/image2",
"./appJSONs/resources/fundstransfer/video1"
],
"githubLink" : "https://github.com/snehaagarwal6108/loginPage.git",
"nexusLink" : "http://nexus/abc"
}
},
{
"title": "Calendar",
"identifier": "calendar",
"thumbnail": "./thumbnails/calendar",
"keywords": "calendar date dateandtime datetime",
"content" : {
"descriptionText": "Description text goes here .......maximum limit 200 chars",
"additionalResources": [
"./appJSONs/resources/fundstransfer/image1",
"./appJSONs/resources/fundstransfer/image2",
"./appJSONs/resources/fundstransfer/video1"
],
"githubLink" : "https://github.com/snehaagarwal6108/calendar.git",
"nexusLink" : "http://nexus/abc"
}
},
{
"title": "Bill Payment",
"identifier": "billpayment",
"thumbnail": "./appJSONs/thumbnails/image1.jpg",
"keywords":"billpayment digital retail rbwm allscreens",
"content" : {
"descriptionText": "Description text goes here .......maximum limit 200 chars",
"additionalResources": [
"./appJSONs/resources/fundstransfer/image1",
"./appJSONs/resources/fundstransfer/image2",
"./appJSONs/resources/fundstransfer/video1"
],
"githubLink" : "https://github.com/pallaviteli/stencil",
"nexusLink" : "http://nexus/abc"
}
},
{
"title": "Mailbox",
"thumbnail": "./appJSONs/thumbnails/image1.jpg",
"keywords": "mailbox email notification",
"identifier": "mailbox",
"content" : {
"descriptionText": "Description text goes here .......maximum limit 200 chars",
"additionalResources": [
"./appJSONs/resources/fundstransfer/image1",
"./appJSONs/resources/fundstransfer/image2",
"./appJSONs/resources/fundstransfer/video1"
],
"githubLink" : "https://github.com/snehaagarwal6108/MailBox.git",
"nexusLink" : "http://nexus/abc"
}
},
{
"title": "Pin Reset",
"identifier": "pinreset",
"thumbnail": "./appJSONs/thumbnails/image1.jpg",
"keywords":
"banking pinreset digital retail rbwm allscreens",
"content" : {
"descriptionText": "Description text goes here .......maximum limit 200 chars",
"additionalResources": [
"./appJSONs/resources/fundstransfer/image1",
"./appJSONs/resources/fundstransfer/image2",
"./appJSONs/resources/fundstransfer/video1"
],
"githubLink" : "https://github.com/pallaviteli/stencil",
"nexusLink" : "http://nexus/abc"
}
}
]
return json.dumps(appList)
@crud.route('/appList', methods=['GET'])
def appList():
appList = get_model().getAppList()
return json.dumps(appList)
@crud.route("/searchEvent")
def listEvent():
token = request.args.get('page_token', None)
if token:
token = token.encode('utf-8')
events, next_page_token = get_model().listEvent(cursor=token)
return render_template(
"listEvent.html",
events=events,
next_page_token=next_page_token)
# [START list_mine]
@crud.route("/mine")
@oauth2.required
def list_mine():
token = request.args.get('page_token', None)
if token:
token = token.encode('utf-8')
books, next_page_token = get_model().list_by_user(
user_id=session['profile']['email'],
cursor=token)
userRole = get_model().getUserRole(userId=session['profile']['email'])
session['userRole'] = userRole
return render_template(
"list.html",
books=books,
next_page_token=next_page_token)
# [END list_mine]
# [START list_search]
@crud.route("/list_search", methods=['GET', 'POST'])
def list_search():
token = request.args.get('page_token', None)
if token:
token = token.encode('utf-8')
books, next_page_token = get_model().list_by_filter(
_description=request.form.get('description', ''),
cursor=token)
return render_template(
"list_search.html",
books=books,
next_page_token=next_page_token,
description=request.form.get('description', ''))
# [END list_search]
@crud.route('/<id>')
def view(id):
book = get_model().read(id)
return render_template("view.html", book=book)
@crud.route('/detail/<id>')
def detailview(id):
book = get_model().read(id)
return render_template("viewForm.html", book=book)
@crud.route('/event/<id>')
def viewEvent(id):
event = get_model().readEvent(id)
return render_template("viewEvent.html", event=event)
# [START add]
@crud.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
data = request.form.to_dict(flat=True)
# If an image was uploaded, update the data to point to the new image.
image_url = upload_image_file(request.files.get('image'))
if image_url:
data['imageUrl'] = image_url
# If the user is logged in, associate their profile with the new book.
if 'profile' in session:
data['createdBy'] = session['profile']['name']
data['createdById'] = session['profile']['email']
book = get_model().create(data)
return redirect(url_for('.view', id=book['id']))
return render_template("form.html", action="Add", book={})
# [END add]
# [START addEvent]
@crud.route('/addEvent', methods=['GET', 'POST'])
def addEvent():
if request.method == 'POST':
data = request.form.to_dict(flat=True)
# If an image was uploaded, update the data to point to the new image.
#image_url = upload_image_file(request.files.get('image'))
#if image_url:
#data['imageUrl'] = image_url
# If the user is logged in, associate their profile with the new book.
if 'profile' in session:
data['createdBy'] = session['profile']['name']
data['createdById'] = session['profile']['email']
event = get_model().createEvent(data)
return redirect(url_for('.viewEvent', id=event['id']))
return render_template("formEvent.html", action="Add", event={})
# [END add]
# [START addEvent]
@crud.route('/hackhub', methods=['GET', 'POST', 'DELETE', 'PUT'])
def hackhub():
if request.method == 'GET':
requestdata = request.form.to_dict(flat=True)
if 'id' in requestdata:
resonsedata = get_model().getHackhubDetail(requestdata['id'])
else:
resonsedata = get_model().getHackhubList()
return json.dumps(resonsedata)
if request.method == 'POST':
requestdata = request.form.to_dict(flat=True)
#for k, v in requestdata.items():
#print(k, v)
attachment1 = upload_image_file(request.files.get('attachment1'))
if attachment1:
requestdata['attachmenturl1'] = attachment1
requestdata.pop('attachment1', None)
attachment2 = upload_image_file(request.files.get('attachment2'))
if attachment2:
requestdata['attachmenturl2'] = attachment2
requestdata.pop('attachment2', None)
attachment3 = upload_image_file(request.files.get('attachment3'))
if attachment3:
requestdata['attachmenturl3'] = attachment3
requestdata.pop('attachment3', None)
attachment4 = upload_image_file(request.files.get('attachment4'))
if attachment4:
requestdata['attachmenturl4'] = attachment4
requestdata.pop('attachment4', None)
attachment5 = upload_image_file(request.files.get('attachment5'))
if attachment5:
requestdata['attachmenturl5'] = attachment5
requestdata.pop('attachment5', None)
resonsedata = get_model().createHackhub(requestdata)
return json.dumps(resonsedata)
if request.method == 'DELETE':
requestdata = request.form.to_dict(flat=True)
resonsedata = get_model().deleteHackhub(requestdata['id'])
return json.dumps(resonsedata)
if request.method == 'PUT':
requestdata = request.form.to_dict(flat=True)
attachment1 = upload_image_file(request.files.get('attachment1'))
if attachment1:
requestdata['attachmenturl1'] = attachment1
requestdata.pop('attachment1', None)
attachment2 = upload_image_file(request.files.get('attachment2'))
if attachment2:
requestdata['attachmenturl2'] = attachment2
requestdata.pop('attachment2', None)
attachment3 = upload_image_file(request.files.get('attachment3'))
if attachment3:
requestdata['attachmenturl3'] = attachment3
requestdata.pop('attachment3', None)
attachment4 = upload_image_file(request.files.get('attachment4'))
if attachment4:
requestdata['attachmenturl4'] = attachment4
requestdata.pop('attachment4', None)
attachment5 = upload_image_file(request.files.get('attachment5'))
if attachment5:
requestdata['attachmenturl5'] = attachment5
requestdata.pop('attachment5', None)
resonsedata = get_model().updateHackhub(requestdata)
return json.dumps(resonsedata)
# [END add]
@crud.route('/hackhub/<id>', methods=['GET'])
def hackhubid(id):
if request.method == 'GET':
resonsedata = get_model().getHackhubDetail(id)
return json.dumps(resonsedata)
# [END add]
# [START addEvent]
@crud.route('/addEventNoCheck', methods=['GET', 'POST'])
def addEventNoCheck():
if request.method == 'POST':
data = request.form.to_dict(flat=True)
# If an image was uploaded, update the data to point to the new image.
#image_url = upload_image_file(request.files.get('image'))
#if image_url:
#data['imageUrl'] = image_url
image_url = upload_image_file(request.files.get('present'))
event = get_model().createEvent(data)
return json.dumps(image_url)
return render
@crud.route('/<id>/edit', methods=['GET', 'POST'])
def edit(id):
book = get_model().read(id)
if request.method == 'POST':
data = request.form.to_dict(flat=True)
image_url = upload_image_file(request.files.get('image'))
if image_url:
data['imageUrl'] = image_url
book = get_model().update(data, id)
return redirect(url_for('.view', id=book['id']))
return render_template("form.html", action="Edit", book=book)
@crud.route('/<id>/editEvent', methods=['GET', 'POST'])
def editEvent(id):
event = get_model().readEvent(id)
if request.method == 'POST':
data = request.form.to_dict(flat=True)
image_url = upload_image_file(request.files.get('image'))
#if image_url:
#data['imageUrl'] = image_url
event = get_model().update(data, id)
return redirect(url_for('.viewEvent', id=event['id']))
return render_template("formEvent.html", action="Edit", event=event)
@crud.route('/<id>/delete')
def delete(id):
get_model().delete(id)
return redirect(url_for('.list'))
@crud.route('/balance')
def balance():
return {'name': 'son', 'balance': sonbalance}
@crud.route('/oauth/<token>', methods=['GET', 'POST'])
def oauth(token):
print(token)
try:
print("123")
# Specify the CLIENT_ID of the app that accesses the backend:
idinfo = id_token.verify_oauth2_token(token, requests.Request(), "1062436335774-1ch8mfvedkbggu8gtpr76106t0k63aru.apps.googleusercontent.com")
print("abc")
# Or, if multiple clients access the backend server:
# idinfo = id_token.verify_oauth2_token(token, requests.Request())
# if idinfo['aud'] not in [CLIENT_ID_1, CLIENT_ID_2, CLIENT_ID_3]:
# raise ValueError('Could not verify audience.')
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise ValueError('Wrong issuer.')
# If auth request is from a G Suite domain:
# if idinfo['hd'] != GSUITE_DOMAIN_NAME:
# raise ValueError('Wrong hosted domain.')
# ID token is valid. Get the user's Google Account ID from the decoded token.
userid = idinfo['sub']
print(idinfo)
except ValueError:
print("ValueError")
# Invalid token
pass
return idinfo
|
[
"56199344+kennethleekk@users.noreply.github.com"
] |
56199344+kennethleekk@users.noreply.github.com
|
9ad605cc33ca49dfd845db8299e039635bb5dc13
|
170a0006eed0c22a463b2304acca579805954aba
|
/ai/main.py
|
dc91cfb278f70a9caf589acc873db3789462526d
|
[] |
no_license
|
nav3van/trading_bot
|
2b5c4707a6ba365b68bd771556d9298c4cbe84f7
|
532c588e712a38746d0f97c445b36db91a8dac84
|
refs/heads/master
| 2023-02-10T19:51:53.586869
| 2021-01-08T21:13:44
| 2021-01-08T21:13:44
| 328,013,563
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,374
|
py
|
#!/usr/bin/env python3.6
import argparse
import functools
import multiprocessing
import subprocess
import matplotlib.pyplot as plt
import numpy as np
from ai.tf import neural_net
def get_training_data(training_steps):
# Make an array of 300 samples values starting with -1 and ending with 1
sample_data = np.linspace(-1, 1, training_steps)
# Restructure our sample data so each value is in it's own array instead of
# having all values in the same giant array. 1d -> 2d
x_train = sample_data[:, np.newaxis]
# Create a normal distribution to introduce noise into the sample data
# mean = 0
# standard deviation = 0.05
# shape = same as our x_data's shape
noise = np.random.normal(0, 0.05, x_train.shape)
y_train = np.square(x_train) - 0.5 + noise
return x_train, y_train
def get_plot(x_data, y_data, show_plot=False):
class Plot:
def __init__(self, _x_data, _y_data, _show_plot):
self._show_plot = _show_plot
if self._show_plot:
fig = plt.figure()
self._x_data = _x_data
self._y_data = _y_data
self._plot_lines = []
# 1 plot on a grid with 1 row and 1 column
self._grid = fig.add_subplot(1, 1, 1)
self._grid.scatter(
self._x_data,
self._y_data
)
self._plot = functools.partial(
self._grid.plot,
color='r',
linestyle='-',
linewidth=5
)
def update(self, predicted_value):
if self._show_plot:
if self._plot_lines:
self._grid.lines.remove(self._plot_lines[0])
self._plot_lines = self._plot(
self._x_data,
predicted_value,
)
plt.pause(
interval=0.1
)
return Plot(x_data, y_data, show_plot)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('training_steps', default=1000, nargs='?', type=int)
parser.add_argument('--dashboard', action='store_true')
parser.add_argument('--plot', action='store_true')
return parser.parse_args()
def main():
args = parse_args()
training_steps = args.training_steps
tensorboard_dashboard = args.dashboard
show_plot = args.plot
x_train, y_train = get_training_data(training_steps)
queue = multiprocessing.Queue()
if tensorboard_dashboard:
proc = subprocess.Popen(
['tensorboard', '--host=127.0.0.1', '--logdir=logs', '--port=8080'],
stdout=subprocess.PIPE
)
multiprocessing.Process(
target=neural_net.run,
args=(
queue,
training_steps,
x_train,
y_train,
)
).start()
# plot = get_plot(x_train, y_train, show_plot=show_plot)
# for i in range(training_steps):
# predicted_value, error_rate = queue.get()
# if i % 50 == 0:
# plot.update(predicted_value)
# print(f'{i}/{training_steps}) Error Rate: {error_rate}')
input('Done!')
try:
proc.kill()
except NameError:
pass
if __name__ == '__main__':
main()
|
[
"ehenri@arbor.net"
] |
ehenri@arbor.net
|
03206ecbe04795a00eb3793a7ae1b72424c0a606
|
8ad15ebf0d8107eb2e4b08eb24335183d6f1cd5e
|
/Chapter_03/计算字符个数.py
|
cfbd069c30217150121df5ffadbbb5c25528909f
|
[] |
no_license
|
Zhang-Zhi-ZZ/PythonPKUPractice
|
eca808854937e26364c71762c3852a51a35bf838
|
71bc32fb99212fd5fac63eff517ad1205a03c35c
|
refs/heads/master
| 2022-06-10T06:36:16.170178
| 2020-05-02T03:52:28
| 2020-05-02T03:52:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
s = str(input())
s1 = str(s.lower().split()[0])
c = str(s.lower().split()[-1])
if len(s1) == 0:
exit()
if len(c) == 0:
exit()
if len(c) >= 1:
print(s1.count(c))
|
[
"zhang325200@gmail.com"
] |
zhang325200@gmail.com
|
d7a21bdd148944476cbbb6d6c874aa4ac9b4984e
|
f5cfca032f9074bf4d8844e64566e09112595626
|
/zendesk_tickets_machine/tickets/migrations/0012_auto_20161214_0157.py
|
d78c0e15bec5517b6b4b319f780d75caa2eb17fd
|
[
"MIT"
] |
permissive
|
prontotools/zendesk-tickets-machine
|
15b08cbce4264f042bfdb58e6f16792b84f5c05e
|
1a0376038021e6277739907bb4e7393acebde8c6
|
refs/heads/develop
| 2021-06-20T10:31:48.873580
| 2019-07-25T00:09:12
| 2019-07-25T00:09:12
| 75,986,569
| 3
| 8
|
MIT
| 2021-06-10T20:19:37
| 2016-12-09T00:27:45
|
Python
|
UTF-8
|
Python
| false
| false
| 846
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-14 01:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0011_auto_20161214_0146'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='private_comment',
field=models.CharField(blank=True, max_length=500, null=True),
),
migrations.AlterField(
model_name='ticket',
name='tags',
field=models.CharField(blank=True, max_length=300, null=True),
),
migrations.AlterField(
model_name='ticket',
name='zendesk_ticket_id',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
[
"kan@prontomarketing.com"
] |
kan@prontomarketing.com
|
786f9d675bff9f75ea206a483a300b5a7869433e
|
c16fb74fd2fd69d65cd813a3d23d5e7b61f9808f
|
/xueqiu/downloader_p3.py
|
ae3cf3e67a90d53a71fb48d1464ac0d7f9a45786
|
[] |
no_license
|
nightqiuhua/selenium_crawler_xueqiu
|
9d3f9d10b2fdb5a479269e6d14cc52c97945ec31
|
0c68eeed7033c28f81def5f94351f2fbb42ca079
|
refs/heads/master
| 2020-03-20T17:54:53.943464
| 2018-06-16T13:36:45
| 2018-06-16T13:36:45
| 137,568,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,375
|
py
|
import urllib.request
import urllib.parse
import socket
from datetime import datetime
import time
import random
import gzip
import re
import json
from selenium import webdriver
DEFAULT_DELAY = 2
DEFAULT_TIMEOUT = 200
DEFAULT_RETRIES = 1
DEFAULT_CHROME_PATH = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe'
DEFAULT_SEED_URL = 'https://xueqiu.com/hq'
class Throttle:
def __init__(self,delay):
self.delay = delay
self.domains = {}
def wait(self,url):
domain = urllib.parse.urlparse(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_sec = self.delay-(datetime.now() - last_accessed).seconds
if sleep_sec>0:
time.sleep(sleep_sec)
self.domains[domain] = datetime.now()
class Downloader:
def __init__(self,delay=DEFAULT_DELAY,proxies=None,num_retries=DEFAULT_RETRIES,timeout=DEFAULT_TIMEOUT,driver_path=DEFAULT_CHROME_PATH,seed_url=DEFAULT_SEED_URL,cache=None):
socket.setdefaulttimeout(timeout)
self.throttle = Throttle(delay)
self.num_tries=num_retries
self.cache = cache
self.driver = webdriver.Chrome(driver_path)
self.seed_url = seed_url
def __call__(self,url):
result = None
if self.cache:
try:
result = self.cache[url]
except KeyError:
pass
else:
if self.num_tries > 0 and 500<= result['code'] <600:
result = None
if result is None:
self.throttle.wait(url)
result = self.download(url,s_url=self.seed_url,num_tries=self.num_tries)
if self.cache:
self.cache[url] = result
#print(result['html'])
return result['html']
def download(self,url,s_url,num_tries):
print('Downloading seed url:',s_url)
self.driver.get(s_url)
time.sleep(3)
print('Downloading:',url)
try:
#发送请求
self.driver.get(url)
time.sleep(2)
html = self.driver.page_source
code = 200
except Exception as e:
print('Download error',e)
html = ' '
if hasattr(e,'code'):
code = e.code
if num_tries>0 and 500<=code<600:
html = self.download(url,num_tries-1)
else:
code = -1
#self.driver.close()
return {'html':html,'code':code}
if __name__ == '__main__':
seed_url = 'https://xueqiu.com/stock/cata/stocklist.json?page=3&size=90&order=desc&orderby=percent&type=11%2C12'
D = Downloader()
html = D(url=seed_url)
print('html=',html)
print('type(html)',type(html))
|
[
"208408@whut.edu.cn"
] |
208408@whut.edu.cn
|
4981ccd67c286b157f71d1c1322ab43d69b68044
|
508e7a80242e68748b9d98626aa80931d341654e
|
/utils/datadownload.py
|
9b2321d50498ebc971772579811969c71d71c19d
|
[] |
no_license
|
Frans06/tsprediction
|
974c2033536b41d660d0bb0cfd4235f48cec5dbc
|
2939da9f1c19029e00aadd11060f71b830411af5
|
refs/heads/master
| 2020-03-30T16:27:20.075665
| 2018-10-15T16:22:31
| 2018-10-15T16:22:31
| 151,409,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
"""
This is a plot module for enviroment variable definitions
Example:
import and inherit Class::
import plots
Class Config define global and enviromental Variables.
Todo:
* For module TODOs
* You have to also use ``sphinx.ext.todo`` extension
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
class Data():
'''
Create or dowload some data for test or train
'''
SEQ_LEN = 10
def __int__(self):
self.to_csv('train.csv', 1000) # 1000 sequences
self.to_csv('valid.csv', 50)
def create_time_series(self):
'''
create a random time series as signal data
'''
freq = (np.random.random()*0.5) + 0.1
ampl = np.random.random() + 0.5 # 0.5 to 1.5
x_axis = np.sin(np.arange(0, self.SEQ_LEN) * freq) * ampl
return x_axis
def to_csv(self, filename, sequences):
'''
write data to csv
'''
with open(filename, 'w') as ofp:
for line in range(0, sequences):
seq = self.create_time_series()
line = ",".join(map(str, seq))
ofp.write(line + '\n')
def plot(self):
'''
plotting generated data
'''
for _ in range(0, 5):
sns.tsplot(self.create_time_series())# 5 series
if __name__ == "__main__":
DATA = Data()
DATA.plot()
plt.show()
|
[
"franscaraveli@gmail.com"
] |
franscaraveli@gmail.com
|
6d7c3b2284ba2801f5a452ecb0b796a039689a55
|
61744d85bbf2aefdf0fc27006acc2e742db9e152
|
/misoKG-master/unittests/test_util.py
|
ffae33d812ddb34df8199afecdbbf1d399e827ab
|
[] |
no_license
|
sunatthegilddotcom/perovskite-4
|
896da29694830a6b98c33050f1aa41258310bd59
|
dd21c8b6217c5859783a6a92e9b082aeea98f9e8
|
refs/heads/master
| 2021-07-03T13:36:08.618860
| 2017-09-25T02:18:44
| 2017-09-25T02:18:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
import numpy
from moe.optimal_learning.python.data_containers import HistoricalData
from moe.optimal_learning.python.geometry_utils import ClosedInterval
from moe.optimal_learning.python.python_version.domain import TensorProductDomain as pythonTensorProductDomain
from moe.optimal_learning.python.python_version.gaussian_process import GaussianProcess
from multifidelity_KG.model.covariance_function import MixedSquareExponential
from pes.covariance import ProductKernel
__author__ = 'jialeiwang'
def get_random_gp_data(space_dim, num_is, num_data_each_is, kernel_name):
""" Generate random gp data
:param space_dim:
:param num_is:
:param num_data_each_is:
:param kernel_name: currently it's either 'mix_exp' or 'prod_ker'
:return:
"""
sample_var = 0.01
if kernel_name == "mix_exp":
hyper_params = numpy.random.uniform(size=(num_is+1)*(space_dim+1))
cov = MixedSquareExponential(hyper_params, space_dim+1, num_is)
elif kernel_name == "prod_ker":
hyper_params = numpy.random.uniform(size=(num_is+1)*(num_is+2)/2+space_dim+1)
cov = ProductKernel(hyper_params, space_dim+1, num_is+1)
else:
raise NotImplementedError("invalid kernel")
python_search_domain = pythonTensorProductDomain([ClosedInterval(bound[0], bound[1]) for bound in numpy.repeat([[-10., 10.]], space_dim+1, axis=0)])
data = HistoricalData(space_dim+1)
init_pts = python_search_domain.generate_uniform_random_points_in_domain(2)
init_pts[:,0] = numpy.zeros(2)
data.append_historical_data(init_pts, numpy.zeros(2), numpy.ones(2) * sample_var)
gp = GaussianProcess(cov, data)
points = python_search_domain.generate_uniform_random_points_in_domain(num_data_each_is)
for pt in points:
for i in range(num_is):
pt[0] = i
val = gp.sample_point_from_gp(pt, sample_var)
data.append_sample_points([[pt, val, sample_var], ])
gp = GaussianProcess(cov, data)
return hyper_params, data
|
[
"hwcxy2008@yahoo.com"
] |
hwcxy2008@yahoo.com
|
f97951372cdf93323daf822c02f98334af5e9cc1
|
951a84f6fafa763ba74dc0ad6847aaf90f76023c
|
/Solu1038.py
|
83fd33ead5ac7a894bb8650f3c1a5cb59e1b7494
|
[] |
no_license
|
SakuraGo/leetcodepython3
|
37258531f1994336151f8b5c8aec5139f1ba79f8
|
8cedddb997f4fb6048b53384ac014d933b6967ac
|
refs/heads/master
| 2020-09-27T15:55:28.353433
| 2020-02-15T12:00:02
| 2020-02-15T12:00:02
| 226,550,406
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
# 1038. 从二叉搜索树到更大和树
# 给出二叉搜索树的根节点,该二叉树的节点值各不相同,修改二叉树,使每个节点 node 的新值等于原树中大于或等于 node.val 的值之和。
#
# 提醒一下,二叉搜索树满足下列约束条件:
#
# 节点的左子树仅包含键小于节点键的节点。
# 节点的右子树仅包含键大于节点键的节点。
# 左右子树也必须是二叉搜索树。
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
##做后序遍历求和,更改节点val
def __init__(self):
self._sum = 0
def houxubianli(self,node:TreeNode):
if node is None:
return
if node.right is not None:
self.houxubianli(node.right)
self._sum += node.val
node.val = self._sum
if node.left is not None:
self.houxubianli(node.left)
def bstToGst(self, root: TreeNode) -> TreeNode:
if root is not None:
self.houxubianli(root)
return root
|
[
"452681917@qq.com"
] |
452681917@qq.com
|
6550363a57ad1b6db87f3cf5d6435274ef15d7aa
|
bb53229d1f296f8b7b3f7eb623673031474a4664
|
/robot/envs/spaces/utils.py
|
68b587bb7aabf4062db73c3a283532a2a35d4e9a
|
[] |
no_license
|
hzaskywalker/torch_robotics
|
03f2de64972d47752e45ae0a0c30334bf6c52c6c
|
0f3d5a46e81d734d514bffcbf4ed84cdcdbd4c86
|
refs/heads/master
| 2023-07-28T17:04:17.915787
| 2021-09-14T04:30:55
| 2021-09-14T04:30:55
| 405,022,434
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
import numpy as np
import torch
from collections import OrderedDict
TYPE_DICT = {
np.dtype('float32'): torch.float,
np.dtype('float64'): torch.float,
np.dtype('int64'): torch.long,
np.dtype('int32'): torch.long,
}
def cat(out, dim):
if isinstance(out[0], np.ndarray):
return np.concatenate(out, axis=dim)
else:
return torch.cat(out, dim=dim)
def serialize(v, is_batch):
if isinstance(v, np.ndarray) or isinstance(v, torch.Tensor):
if is_batch:
return v.reshape(v.shape[0], -1)
else:
return v.reshape(-1)
else:
return cat([serialize(v, is_batch) for _, v in v.items()], dim=-1)
def size(shape):
if isinstance(shape, OrderedDict):
return sum([size(i) for i in shape.values()])
elif isinstance(shape, tuple) or isinstance(shape, list):
return int(np.prod(shape))
else:
raise NotImplementedError
return shape.size
def deserialize(v, shape, is_batch):
if isinstance(v, np.ndarray) or isinstance(v, torch.Tensor):
if is_batch:
return v.reshape(v.shape[0], *shape)
else:
return v.reshape(*shape)
elif isinstance(shape, OrderedDict):
raise NotImplementedError
l = 0
out = OrderedDict()
for i, spec in shape.items():
s = size(spec)
d = v[l:l + s] if not is_batch else v[:, l:l + s]
out[i] = deserialize(d, spec, is_batch)
l += s
return out
def to_numpy(v):
if isinstance(v, np.ndarray):
return v
elif isinstance(v, torch.Tensor):
return v.detach().cpu().numpy()
elif isinstance(v, OrderedDict):
return OrderedDict([(i, to_numpy(v))for i, v in v.items()])
else:
raise NotImplementedError
def to_tensor(data, device):
if isinstance(data, torch.Tensor):
return data.to(device)
elif isinstance(data, np.ndarray):
return torch.tensor(data, dtype=TYPE_DICT[data.dtype], device=device)
elif isinstance(data, OrderedDict):
return OrderedDict([(i, to_tensor(v, device)) for i, v in data.items()])
else:
raise NotImplementedError
|
[
"hzaskywalker@gmail.com"
] |
hzaskywalker@gmail.com
|
a4c01c7f985b89905f2d4d7b79e5eff433bea81d
|
ce7f06c834579bc10385d0ee3a1238f66bc9bad4
|
/Malware_Imaging/project/models/simple_colored_cnns.py
|
fb0607cdfe7c354255e7942a9766fd8fd9227f59
|
[
"MIT"
] |
permissive
|
FullPint/INSuRE-Malware-Imaging
|
90cdada4e731c9ea1de8223538da9c338ba80e73
|
e1906821305b0eb32e85605c23e4c8821dd156fb
|
refs/heads/master
| 2020-05-17T06:24:52.442688
| 2019-05-03T02:39:11
| 2019-05-03T02:39:11
| 183,558,149
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
# User Defined Imports
from Malware_Imaging.project.models.models_config import DEFAULT_INPUT_SIZES
from Malware_Imaging.project.models.image_generators import get_simple_color_settings
from Malware_Imaging.project.models.make_simple_gray_scale_cnn import (
make_simple_gray_scale_cnn,
)
def run_small_colored_cnn():
size = DEFAULT_INPUT_SIZES["SMALL-COLOR"]
train_gen_settings, valid_gen_settings, train_flow_settings, valid_flow_settings = get_simple_color_settings(
size
)
make_simple_gray_scale_cnn(
size,
"small_colored_v1",
train_gen_settings,
valid_gen_settings,
train_flow_settings,
valid_flow_settings,
)
def run_medium_colored_cnn():
size = DEFAULT_INPUT_SIZES["MEDIUM-COLOR"]
train_gen_settings, valid_gen_settings, train_flow_settings, valid_flow_settings = get_simple_color_settings(
size
)
make_simple_gray_scale_cnn(
size,
"medium_colored_sv1",
train_gen_settings,
valid_gen_settings,
train_flow_settings,
valid_flow_settings,
)
def run_large_colored_cnn():
size = DEFAULT_INPUT_SIZES["LARGE-COLOR"]
train_gen_settings, valid_gen_settings, train_flow_settings, valid_flow_settings = get_simple_color_settings(
size
)
make_simple_gray_scale_cnn(
size,
"large_colored_v1",
train_gen_settings,
valid_gen_settings,
train_flow_settings,
valid_flow_settings,
)
|
[
"davila.alec@gmail.com"
] |
davila.alec@gmail.com
|
f9da787f6c75897ec064f52e7a4927aafdffb2ad
|
78faa6c18d7ced1338323cf542d2abfc436f3ff2
|
/mighty_rover/nodes/move_obs_3.py
|
7ea30c89a6592a2b89f45c26a4922e07a1e44823
|
[] |
no_license
|
klab-2021/pf1_dev
|
06a78ea73851fc6c9c03ef3d8e94aed01bcda139
|
1cff5fc7df22ee2dffdd74c3c12a9dc92692948b
|
refs/heads/master
| 2023-06-30T20:19:16.380880
| 2021-07-27T09:40:24
| 2021-07-27T09:40:24
| 389,831,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
#!/usr/bin/env python
#################################################################################
# Copyright 2018 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
# Authors: Gilbert #
import rospy
import time
from geometry_msgs.msg import Twist
from gazebo_msgs.msg import ModelState, ModelStates
class Moving():
def __init__(self):
self.pub_model = rospy.Publisher('gazebo/set_model_state', ModelState, queue_size=1)
self.moving()
def moving(self):
while not rospy.is_shutdown():
obstacle = ModelState()
model = rospy.wait_for_message('gazebo/model_states', ModelStates)
for i in range(len(model.name)):
if model.name[i] == 'unit_cylinder_3':
obstacle.model_name = 'unit_cylinder_3'
obstacle.pose = model.pose[i]
obstacle.twist = Twist()
obstacle.twist.linear.x = 6
self.pub_model.publish(obstacle)
time.sleep(4)
if model.name[i] == 'unit_cylinder_3':
obstacle.model_name = 'unit_cylinder_3'
obstacle.pose = model.pose[i]
obstacle.twist = Twist()
obstacle.twist.linear.x = -5
self.pub_model.publish(obstacle)
time.sleep(4)
def main():
rospy.init_node('moving_obstacle')
moving = Moving()
if __name__ == '__main__':
main()
|
[
"klab2021.pf1@gmail.com"
] |
klab2021.pf1@gmail.com
|
62a0bf5fe5020083f2fbc613f042dd35a512e463
|
a2672fe19f0dfc29fd8000837c1b01f4d10ad227
|
/comment/views.py
|
d8dd2611d9655a118ea9e98456e779aeada94a93
|
[
"MIT"
] |
permissive
|
HaibaraAi123/DjangoBlog-chenfeng123.cn
|
fa6fa4eb446acba713291e6976a9e6dddd73b0e9
|
ca5a90a4ad91e383a5ff25131488527f5733e216
|
refs/heads/master
| 2022-11-23T22:21:18.343102
| 2020-08-05T06:46:25
| 2020-08-05T06:46:25
| 285,197,046
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from myblog.models import Article
from .forms import CommentForm
from .models import BlogComment
# Create your views here.
@login_required(login_url='/userprofile/login')
def post_comment(request, article_id, parent_comment_id=None):
article = get_object_or_404(Article, id=article_id)
if request.method == 'POST':
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.article = article
new_comment.author = request.user
# 二级回复
if parent_comment_id:
parent_comment = BlogComment.object.get(id=parent_comment_id)
new_comment.parent_id = parent_comment.get_root().id
new_comment.reply_to = parent_comment.user
new_comment.save()
return HttpResponse('200')
new_comment.save()
return redirect("myblog:detail", id=article_id)
else:
return HttpResponse("error!")
elif request.method == 'GET':
comment_form = CommentForm()
context = {
'comment_form': comment_form,
'article_id': article_id,
'parent_comment_id': parent_comment_id,
}
return render(request, 'comment/reply.html', context)
else:
return HttpResponse("Error! Only get or post!")
|
[
"1224574671@qq.com"
] |
1224574671@qq.com
|
9fbe50b06a7870c22d1e6c6b98cd817577078729
|
d70cdc9a46676acb4c4396cd69f1e8253921858b
|
/app/views/__init__.py
|
e78a3cbea7c080c7ad4312988d3cb8c9d64732a5
|
[] |
no_license
|
Lemon000/Blog
|
c57105b1260bcfa13bb413713fb8e2bd57400b15
|
4654a94a72039c8b4664c15787ca6d5a56c67d61
|
refs/heads/master
| 2022-12-13T04:31:46.968468
| 2020-01-08T08:41:45
| 2020-01-08T08:41:45
| 232,509,848
| 0
| 0
| null | 2022-12-08T01:07:18
| 2020-01-08T07:57:39
|
HTML
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
from .main import main
from .user import user
from .posts import posts
from .admin import admin
# 蓝本配置元祖
DEFAULT_BLUEPRINT = (
# 蓝本前缀
(main, ''),
(user, '/user'),
(posts, '/posts'),
(admin, '/admin')
)
# 注册蓝本
def config_blueprint(app):
for blue_print, url_prefix in DEFAULT_BLUEPRINT:
app.register_blueprint(blue_print, url_prefix=url_prefix)
|
[
"2221487809@qq.com"
] |
2221487809@qq.com
|
d8a696dd4e3ed56236c684116cb6ccc574e6e05a
|
baeebadd5786ee0e96626741da650d5c2e277b52
|
/lib/bes/shell_framework/shell_framework_defaults.py
|
84933a2b7fb902449ecc26b067f1a36fb9ecd007
|
[
"Apache-2.0"
] |
permissive
|
reconstruir/bes
|
e75b25868d0a97a1a167d92b62e11996cb6c4774
|
b9dd35b518848cea82e43d5016e425cc7dac32e5
|
refs/heads/master
| 2023-08-31T21:11:45.482193
| 2023-08-19T20:07:04
| 2023-08-19T20:07:04
| 421,598,217
| 0
| 1
|
NOASSERTION
| 2022-12-21T11:50:29
| 2021-10-26T22:08:30
|
Python
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os
class shell_framework_defaults(object):
ADDRESS = 'https://gitlab.com/rebuilder/bes_shell.git'
FRAMEWORK_BASENAME = 'bes_shell_framework'
REVISION_BASENAME = 'bes_shell_framework_revision.txt'
REVISION = 'latest'
DEST_DIR = os.getcwd()
|
[
"git@fateware.com"
] |
git@fateware.com
|
637ccc02876759cda876e955d0f064102b0fe89d
|
89b6e970ed3ac0e02b6ea04482510d0fbd84983e
|
/tests.py
|
23eaa500119987d13380767c51892f133c83dbab
|
[] |
no_license
|
sirjordan/yamp
|
00aab9361621c384610a9c9af2d608d445aef709
|
28e364815b2447823f890f1e95b918f17ffe1919
|
refs/heads/master
| 2021-01-11T17:06:21.664118
| 2017-01-22T18:45:12
| 2017-01-22T18:45:12
| 79,719,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
import unittest
import dices
class YampDiceTests(unittest.TestCase):
def test_roll_dices(self):
_dices = dices.roll_dices()
self.assertEqual(len(_dices), 5)
def test_score_matching(self):
self.assertEqual(dices.score_matching([1, 1, 1, 3, 4], 1), 3)
self.assertEqual(dices.score_matching([2, 2, 2, 5, 6], 2), 6)
self.assertEqual(dices.score_matching([3, 3, 3, 3, 4], 3), 12)
self.assertEqual(dices.score_matching([4, 4, 5, 5, 5], 4), 8)
self.assertEqual(dices.score_matching([1, 1, 2, 2, 5], 5), 5)
self.assertEqual(dices.score_matching([1, 3, 6, 6, 6], 6), 18)
self.assertEqual(dices.score_matching([1, 3, 6, 6, 6], 2), 0)
def test_score_n_of_a_kind_throw_ex(self):
args = ([1, 3, 6, 6, 6], 5)
self.assertRaises(ValueError, dices.score_n_of_a_kind, *args)
def test_score_n_of_a_kind(self):
self.assertEqual(dices.score_n_of_a_kind([2, 3, 4, 4, 4], 3), 17)
self.assertEqual(dices.score_n_of_a_kind([4, 5, 5, 5, 5], 4), 24)
self.assertEqual(dices.score_n_of_a_kind([1, 2, 3, 4, 5], 4), 0)
def test_count_n_of_a_kind(self):
self.assertEqual(dices.count_equal([2, 3, 4, 4, 4], 4), 3)
self.assertEqual(dices.count_equal([2, 3, 4, 4, 4], 2), 1)
self.assertEqual(dices.count_equal([2, 3, 4, 4, 4], 6), 0)
def test_score_full(self):
self.assertEqual(dices.score_full([2, 2, 5, 5, 5]), 25)
self.assertEqual(dices.score_full([2, 2, 5, 5, 1]), 0)
self.assertEqual(dices.score_full([3, 1, 3, 1, 1]), 25)
self.assertEqual(dices.score_full([1, 2, 3, 4, 5]), 0)
self.assertEqual(dices.score_full([2, 3, 3, 3, 2]), 25)
def test_score_chance(self):
self.assertEqual(dices.score_chance([1, 1, 3, 3, 5]), 13)
def test_score_yamp(self):
self.assertEqual(dices.score_yamp([1, 1, 1, 1, 1]), 50)
self.assertEqual(dices.score_yamp([1, 2, 1, 1, 1]), 0)
def test_score_straight(self):
self.assertEqual(dices.score_straight([1, 2, 3, 4, 6], 4), 30)
self.assertEqual(dices.score_straight([2, 3, 4, 5, 6], 4), 30)
self.assertEqual(dices.score_straight([1, 2, 3, 4, 5], 5), 40)
self.assertEqual(dices.score_straight([2, 3, 4, 5, 6], 5), 40)
self.assertEqual(dices.score_straight([1, 2, 3, 2, 2], 5), 0)
self.assertEqual(dices.score_straight([2, 3, 2, 5, 3], 5), 0)
self.assertEqual(dices.score_straight([1, 3, 4, 5, 6], 4), 30)
self.assertEqual(dices.score_straight([6, 5, 4, 3, 1], 4), 30)
self.assertEqual(dices.score_straight([6, 2, 3, 4, 5], 5), 40)
def test_score_straight_throw_ex(self):
args = ([1, 3, 6, 6, 6], 3)
self.assertRaises(ValueError, dices.score_straight, *args)
if __name__ == '__main__':
unittest.main()
|
[
"sirjordan1988@gmail.com"
] |
sirjordan1988@gmail.com
|
3ad6baac15e6e6e76d87073d0fed291bcbd36b0d
|
fd5fa2545f42f6f216c543ac6c741e02909dcc60
|
/src/modules/web/java.py
|
af09045c5ded92c9928953a2c2ff44af21773283
|
[] |
no_license
|
noptrix/nullscan
|
2a192d6ee9d6498cccfbc90b5b240ed2d00559a8
|
ddc052c8d7d43a60fc00ea40d85111d5bd7a282e
|
refs/heads/master
| 2023-02-12T16:33:11.547160
| 2021-01-06T20:00:32
| 2021-01-06T20:00:32
| 255,212,633
| 52
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*- ########################################################
# ____ _ __ #
# ___ __ __/ / /__ ___ ______ ______(_) /___ __ #
# / _ \/ // / / (_-</ -_) __/ // / __/ / __/ // / #
# /_//_/\_,_/_/_/___/\__/\__/\_,_/_/ /_/\__/\_, / #
# /___/ team #
# #
# nullscan #
# A modular framework designed to chain and automate security tests #
# #
# FILE #
# java.py #
# #
# AUTHOR #
# noptrix@nullsecurity.net #
# #
################################################################################
# sys imports
# own imports
from modules.libs.base import Base, tool, timeout
class Java(Base):
""" Java module """
def __init__(self, target, opts):
""" init """
Base.__init__(self, target, opts)
self.host, self.port, self.scheme, self.path = self._parse_url(self.target)
return
@tool
def jexboss_web(self):
"""
DESCR: Check for known java deserialization vulns against JBoss, Jenkins,
and Apache Struts2. (ext)
TOOLS: jexboss
"""
self._jexboss(self.host, self.port, 'jexboss_web', scheme=self.scheme)
return
# EOF
|
[
"noptrix@nullsecurity.net"
] |
noptrix@nullsecurity.net
|
befad40b0a452ae5a9050b897e18ce3770d880a1
|
22557e9243fe8cae4205c738eeb0c0fe8a74a996
|
/src/main/python/SentimentAnalayzerRNNFinal.py
|
b1b82ed3252b1fdec728cd91c01cc74b19e0300e
|
[] |
no_license
|
piyumalanthony/sentiment-tagger
|
6896c3c5bf241ad3affd54af53b86fe44101010f
|
e4515621085fd2dde1b0c2e09e5bbbfe266d5477
|
refs/heads/master
| 2020-12-19T01:26:16.816291
| 2019-06-12T23:14:56
| 2019-06-12T23:14:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,469
|
py
|
# coding: utf-8
import numpy as np
import pandas as pd
import datetime
from random import randint
from gensim.models import word2vec
from gensim.models.fasttext import FastText
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
word2vec_model_name = "../../../corpus/analyzed/saved_models/word2vec_model_skipgram_300_10"
# word2vec_model_name = "../../../corpus/analyzed/saved_models/fasttext_model_skipgram_300_10"
# word2vec_model_name = "../../../corpus/analyzed/saved_models/wiki.si.bin"
num_features = 300
max_sentence_length = 50
batchSize = 24
lstmUnits = 64
numClasses = 2
iterations = 30000
labels = tf.placeholder(tf.int32, [batchSize, numClasses])
data = tf.placeholder(tf.float32, [batchSize, max_sentence_length, num_features])
def main():
# convert_to_vectors()
train_data_vectors, train_data_labels, test_data_vectors, test_data_labels = load_vectors()
print("Running tesnsorflow simulation.....")
loss, accuracy, prediction_values, optimizer = neural_network_model()
train_neural_network(loss, accuracy, optimizer, train_data_vectors, train_data_labels)
accuracy, precision, recall, f1 = measure_neural_network(accuracy, prediction_values, test_data_vectors, test_data_labels)
print("Accuracy: ", accuracy)
print("Precision: ", precision)
print("Recall: ", recall)
print("F1 Score: ", f1)
def convert_to_vectors():
comments = pd.read_csv("../../../corpus/analyzed/comments_tagged_remove.csv", ";")
train_data, test_data = train_test_split(comments, test_size=0.4, random_state=0)
train_data_vectors, train_data_labels = comments_to_vectors(train_data)
test_data_vectors, test_data_labels = comments_to_vectors(test_data)
np.save('./vectors/train_data_vectors.npy', train_data_vectors)
np.save('./vectors/train_data_labels.npy', train_data_labels)
np.save('./vectors/test_data_vectors.npy', test_data_vectors)
np.save('./vectors/test_data_labels.npy', test_data_labels)
def load_vectors():
train_data_vectors = np.load('./vectors/train_data_vectors.npy')
train_data_labels = np.load('./vectors/train_data_labels.npy')
test_data_vectors = np.load('./vectors/test_data_vectors.npy')
test_data_labels = np.load('./vectors/test_data_labels.npy')
return train_data_vectors, train_data_labels, test_data_vectors, test_data_labels
def comments_to_vectors(data):
model = word2vec.Word2Vec.load(word2vec_model_name) #loading word2vec model, this is the correct old one
# model = FastText.load_fasttext_format("../../../corpus/analyzed/saved_models/wiki.si.bin") #loading word2vec model
# model = FastText.load_fasttext_format("../../../corpus/analyzed/saved_models/fasttext_model_skipgram_300.bin") #loading word2vec model
comment_vectors = []
comment_labels = []
for comment in data["comment"]:
comment_vectors.append(get_sentence_vector(model, comment))
for label in data["label"]:
if label == "POSITIVE":
comment_labels.append([0, 1])
else:
comment_labels.append([1, 0])
return np.array(comment_vectors), comment_labels
def get_sentence_vector(model, sentence):
sentence_vector = np.zeros([max_sentence_length, num_features])
counter = 0
index2word_set = set(model.wv.index2word)
for word in sentence.split():
if word in index2word_set:
sentence_vector[counter] = model[word]
counter += 1
if (counter == max_sentence_length):
break
else:
print("word not in word2vec model: " + word)
return sentence_vector
def get_batch(size, data, label):
batch_data = np.empty((size, max_sentence_length, num_features), dtype=float)
batch_label = []
for i in range(size):
random_int = randint(0, len(data) - 1)
batch_data[i] = data[random_int]
batch_label.append(label[random_int])
return batch_data, batch_label
def get_batch_order(size, data, label, batch_no):
batch_data = data[batch_no * size : (batch_no + 1) * size]
batch_label = label[batch_no * size : (batch_no + 1) * size]
return batch_data, batch_label
def neural_network_model():
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstmUnits)
lstm_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell, output_keep_prob=0.75)
value, _ = tf.nn.dynamic_rnn(lstm_cell, data, dtype=tf.float32)
weight = tf.Variable(tf.truncated_normal([lstmUnits, numClasses]))
bias = tf.Variable(tf.constant(0.1, shape=[numClasses]))
value = tf.transpose(value, [1, 0, 2])
last = tf.gather(value, int(value.get_shape()[0]) - 1)
prediction = (tf.matmul(last, weight) + bias)
correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
prediction_values = tf.argmax(prediction, 1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=labels))
optimizer = tf.train.AdamOptimizer().minimize(loss)
return loss, accuracy, prediction_values, optimizer
def train_neural_network(loss, accuracy, optimizer, train_data, train_labels):
sess = tf.InteractiveSession()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
tf.summary.scalar('Loss', loss)
tf.summary.scalar('Accuracy', accuracy)
merged = tf.summary.merge_all()
logdir = "tensorboard/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + "/"
writer = tf.summary.FileWriter(logdir, sess.graph)
for i in range(iterations):
# Next Batch of reviews
next_batch, next_batch_labels = get_batch(batchSize, train_data, train_labels)
sess.run(optimizer, {data: next_batch, labels: next_batch_labels})
# Write summary to Tensorboard
if (i % 50 == 0):
summary = sess.run(merged, {data: next_batch, labels: next_batch_labels})
writer.add_summary(summary, i)
# Save the network every 10,000 training iterations
if (i % 9999 == 0 and i != 0):
save_path = saver.save(sess, "models/pretrained_lstm.ckpt", global_step=i)
print("saved to %s" % save_path)
writer.close()
def measure_neural_network(accuracy, prediction_values, test_data, test_labels):
sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('models'))
overall_accuracy = 0
all_predictions = []
test_iterations = 80
for i in range(test_iterations):
next_batch, next_batch_labels = get_batch_order(batchSize, test_data, test_labels, i)
accuracy_this_batch = (sess.run(accuracy, {data: next_batch, labels: next_batch_labels})) * 100
predictions_this_batch = sess.run(prediction_values, {data: next_batch, labels: next_batch_labels})
overall_accuracy = overall_accuracy + accuracy_this_batch
all_predictions = all_predictions + predictions_this_batch.tolist()
print("Accuracy for this batch:", accuracy_this_batch)
true_labels = tf.argmax(test_labels, 1).eval()
precision = precision_score(true_labels.tolist()[0:batchSize * test_iterations], all_predictions)
f1 = f1_score(true_labels.tolist()[0:batchSize * test_iterations], all_predictions)
recall = recall_score(true_labels.tolist()[0:batchSize * test_iterations], all_predictions)
overall_accuracy = overall_accuracy / (test_iterations * 100)
print(confusion_matrix(true_labels.tolist()[0:batchSize * test_iterations], all_predictions).ravel())
return overall_accuracy, precision, recall, f1
main()
# 0.891712707182
# 0.853146853146853
#
# fn = tp(1-0.891712707182)/0.891712707182
# fp = tp(1-0.853146853146853)/0.853146853146853
#
# fn = tp(0.12143742255306716)
# fp = tp(0.1721311475409838)
#
#
# fn = 885*(1-0.891712707182)/0.891712707182
# fp = 885*(1-0.853146853146853)/0.853146853146853
#
# fasttext
# ('Accuracy: ', 0.8619791641831398)
# ('Precision: ', 0.8772874058127018)
# ('Recall: ', 0.8419421487603306)
# ('F1 Score: ', 0.8592514496573538)
# ('Accuracy: ', 0.8661458320915699)
# ('Precision: ', 0.8967813540510544)
# ('Recall: ', 0.8347107438016529)
# ('F1 Score: ', 0.864633493846977)
# skipgram 300_10
# [852 100 160 808]
# Accuracy: 0.8651041679084301
# Precision: 0.8898678414096917
# Recall: 0.8347107438016529
# F1 Score: 0.861407249466951
# gensim.fastext 300_10 homemade
# [865 87 159 809]
# Accuracy: 0.8697916679084301
# Precision: 0.9029017857142857
# Recall: 0.8357438016528925
# F1 Score: 0.8680257510729613
# [855 97 143 825]
# Accuracy: 0.8750000044703483
# Precision: 0.8947939262472885
# Recall: 0.8522727272727273
# F1 Score: 0.873015873015873
# fasttext pretrained
# [821 131 195 773]
# Accuracy: 0.8333333313465119
# Precision: 0.8550884955752213
# Recall: 0.7985537190082644
# F1 Score: 0.8258547008547009
# [803 149 200 768]
# Accuracy: 0.8192708320915699
# Precision: 0.8375136314067612
# Recall: 0.7933884297520661
# F1 Score: 0.8148541114058356
# fasttext homemade
# [872 80 171 797]
# Accuracy: 0.8703125007450581
# Precision: 0.9087799315849487
# Recall: 0.8233471074380165
# F1 Score: 0.8639566395663957
# [861 91 148 820]
# Accuracy: 0.8770833320915699
# Precision: 0.9001097694840834
# Recall: 0.8471074380165289
# F1 Score: 0.8728046833422033
|
[
"theisuru@gmail.com"
] |
theisuru@gmail.com
|
84f5662ca89b35ab581b56f3308fd2648169d269
|
789fe602dbd2d36fcd42cfd729758790a526055d
|
/collections_orderdict.py
|
5ba3ea87c139167936740aaa0201bc276d4a3423
|
[] |
no_license
|
charukhandelwal/Hackerrank
|
604b6be1b11c718b9c99e4a3eae9544ff629576d
|
8ac84c481804d5b7a00fffc566312037ccb00685
|
refs/heads/master
| 2022-12-29T01:12:11.247527
| 2020-10-17T17:10:50
| 2020-10-17T17:10:50
| 304,927,588
| 1
| 0
| null | 2020-10-17T17:10:51
| 2020-10-17T17:03:23
| null |
UTF-8
|
Python
| false
| false
| 2,249
|
py
|
"""
collections.OrderedDict
An OrderedDict is a dictionary that remembers the order of the keys that were inserted first. If a new entry overwrites an existing entry, the original insertion position is left unchanged.
Example
Code
>>> from collections import OrderedDict
>>>
>>> ordinary_dictionary = {}
>>> ordinary_dictionary['a'] = 1
>>> ordinary_dictionary['b'] = 2
>>> ordinary_dictionary['c'] = 3
>>> ordinary_dictionary['d'] = 4
>>> ordinary_dictionary['e'] = 5
>>>
>>> print ordinary_dictionary
{'a': 1, 'c': 3, 'b': 2, 'e': 5, 'd': 4}
>>>
>>> ordered_dictionary = OrderedDict()
>>> ordered_dictionary['a'] = 1
>>> ordered_dictionary['b'] = 2
>>> ordered_dictionary['c'] = 3
>>> ordered_dictionary['d'] = 4
>>> ordered_dictionary['e'] = 5
>>>
>>> print ordered_dictionary
OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)])
Task
You are the manager of a supermarket.
You have a list of N items together with their prices that consumers bought on a particular day.
Your task is to print each item_name and net_price in order of its first occurrence.
item_name = Name of the item.
net_price = Quantity of the item sold multiplied by the price of each item.
Input Format
The first line contains the number of items, N.
The next N lines contains the item's name and price, separated by a space.
Constraints
0<N≤100
Output Format
Print the item_name and net_price in order of its first occurrence.
Sample Input
9
BANANA FRIES 12
POTATO CHIPS 30
APPLE JUICE 10
CANDY 5
APPLE JUICE 10
CANDY 5
CANDY 5
CANDY 5
POTATO CHIPS 30
Sample Output
BANANA FRIES 12
POTATO CHIPS 60
APPLE JUICE 20
CANDY 20
Explanation
BANANA FRIES: Quantity bought: 1, Price: 12
Net Price: 12
POTATO CHIPS: Quantity bought: 2, Price: 30
Net Price: 60
APPLE JUICE: Quantity bought: 2, Price: 10
Net Price: 20
CANDY: Quantity bought: 4, Price: 5
Net Price: 20
"""
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import OrderedDict,defaultdict
od = OrderedDict()
m = defaultdict(list)
for i in xrange(input()):
k = raw_input().split()
k[0] = ' '.join(k[0:len(k)-1])
k[1] = int(k[len(k)-1])
k = k[0:2]
m[k[0]].append(k[1])
od[k[0]] = i
for i in od:
print i,sum(m[i])
|
[
"ayush.aceit@gmail.com"
] |
ayush.aceit@gmail.com
|
c42471b2570abccab19f497acfe0fc3d2f29ebf5
|
0869d7edac80e8aebe951682a2cc311a083eade3
|
/Python/example_controllers/core_concepts/image_capture.py
|
d702e8daf8c765c5d81bbd10f4bc8da68847dedd
|
[
"BSD-2-Clause"
] |
permissive
|
threedworld-mit/tdw
|
7d5b4453832647733ff91ad7a7ce7ec2320454c1
|
9df96fba455b327bb360d8dd5886d8754046c690
|
refs/heads/master
| 2023-09-01T11:45:28.132298
| 2023-08-31T16:13:30
| 2023-08-31T16:13:30
| 245,492,977
| 427
| 75
|
BSD-2-Clause
| 2023-09-14T17:36:12
| 2020-03-06T18:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.add_ons.third_person_camera import ThirdPersonCamera
from tdw.add_ons.image_capture import ImageCapture
from tdw.backend.paths import EXAMPLE_CONTROLLER_OUTPUT_PATH
"""
Example implementation of the ImageCapture add-on.
"""
c = Controller()
object_id = c.get_unique_id()
camera = ThirdPersonCamera(position={"x": 2, "y": 1.6, "z": -0.6},
look_at=object_id,
avatar_id="a")
c.add_ons.append(camera)
# Add the ImageCapture add-on.
path = EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath("image_capture")
print(f"Images will be save to: {path.resolve()}")
capture = ImageCapture(path=path, avatar_ids=["a"], pass_masks=["_img", "_id"])
c.add_ons.append(capture)
# This will create the scene and the object.
# Then, the ThirdPersonCamera add-on will create an avatar.
# Then, the ImageCapture add-on will save an image to disk.
resp = c.communicate([TDWUtils.create_empty_room(12, 12),
c.get_add_object(model_name="iron_box",
position={"x": 1, "y": 0, "z": -0.5},
object_id=object_id)])
c.communicate({"$type": "terminate"})
|
[
"alters@mit.edu"
] |
alters@mit.edu
|
e83dff7ce94dc215a8a6efb2a0e98eab8e9361bd
|
f347ddf8f11b748b09646aabd3c4d807e49d6e86
|
/reports/views/personals.py
|
9d672995cc9fd30458c06d69b93be67d73c12078
|
[] |
no_license
|
gitavk/fcbp
|
b630a8570b46557ee0ffd20ae1baa57741147766
|
02ffcc54a805861a098952b388bfd28ec69b176a
|
refs/heads/master
| 2021-01-17T02:19:58.572362
| 2018-11-12T07:09:07
| 2018-11-12T07:09:07
| 39,645,922
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,676
|
py
|
"""
Statistic of the client Personals.
"""
# -*- coding: utf-8 -*-
from datetime import timedelta, datetime
from collections import defaultdict
from django.db.models import Sum
from django.utils.translation import ugettext as _
from employees.models import Employee
from clients.models import ClientPersonal, UseClientPersonal
from products.models import Personal
from reports import styles
from .base import Report
class ActivePersonal(Report):
file_name = 'list_active_personal'
sheet_name = 'report'
tpl_start_row = 7
table_headers = [
(_('client'), 6000),
(_('# uid'), 2000),
(_('card number'), 2000),
(_('phone'), 4000),
(_('tariff'), 6000),
(_('attribute'), 2000),
(_('amount'), 2000),
(_('date begin'), 4000),
(_('date end'), 4000),
(_('used'), 2000),
(_('last visit'), 3000),
(_('prolongation'), 2000),
(_('tariff club card'), 2000),
(_('club card period'), 6000),
(_('schedule s'), 6000),
(_('extra clients'), 6000),
(_('coach'), 4000),
]
table_styles = {
7: styles.styled,
8: styles.styled,
10: styles.styled,
14: styles.style_cw
}
def initial(self, request, *args, **kwargs):
super(ActivePersonal, self).initial(request, *args, **kwargs)
self.total_main_rows = 0
self.products = defaultdict(int)
self.row_heiht = 26*20
try:
personal_id = int(self.request.query_params.get('pc'))
self.personal = Personal.objects.get(pk=personal_id)
except (ValueError, Personal.DoesNotExist):
self.personal = 'all'
def get_fdate(self):
return datetime.now()
def get_title(self, **kwargs):
msg = _('list active club cards')
msg += _(' created at: {date}.')
tdate = self.get_fdate().strftime('%d.%m.%Y %H:%M')
return msg.format(date=tdate)
def get_data(self):
rows = []
data = ClientPersonal.objects.filter(status=1).order_by('date_end')
if self.personal != 'all':
data = data.filter(personal=self.personal)
for row in data:
fname = row.client.full_name
uid = row.client.uid
card = row.client.card
phone = row.client.mobile or row.client.phone or ''
tariff = row.personal.short_name
amount = row.summ_amount
date_begin = row.date_begin.strftime('%d.%m.%Y')
date_end = row.date_end.strftime('%d.%m.%Y')
# last visits info
visits = row.visits.all()
if visits:
last_visit = visits.last().date.strftime('%d.%m.%Y')
else:
last_visit = ''
prolongation = row.prolongation.aggregate(
days=Sum('days')).get('days', '')
# club card data if need for current personalcard
cc_name = ''
cc_period = ''
if row.product.club_card_only and row.client.active_cc_first:
club_card = row.client.active_cc_first
cc_name = club_card.short_name
dbegin = club_card.date_begin.strftime('%d.%m.%Y')
dend = club_card.date_end.strftime('%d.%m.%Y')
cc_period = "{}-{}".format(dbegin, dend)
# generate credits shedule
schedule = []
for payment in row.schedule_payments():
if payment[0] <= self.get_fdate():
continue
pdate = payment[0].strftime('%d.%m.%Y')
pamount = "{:,}".format(payment[1]).replace(',', ' ')
schedule.append("%s - %s" % (pamount, pdate))
schedule = "; \n".join(schedule)
# extra clients info
extra_clients = row.get_extra_clients
main_ecs = ", ".join([ec.initials for ec in extra_clients])
instructor = row.instructor.initials if row.instructor else ''
# line for main client
self.total_main_rows += 1
self.products[tariff] += 1
rows.append((
fname, uid, card, phone, tariff, 1, amount,
date_begin, date_end, len(visits), last_visit,
prolongation, cc_name, cc_period,
schedule, main_ecs, instructor
))
# lines for extra xlients
for curr_ec in extra_clients:
fname = curr_ec.full_name
uid = curr_ec.uid
phone = curr_ec.mobile or curr_ec.phone or ''
# club card data if need for current personalcard
cc_name = ''
cc_period = ''
if row.product.club_card_only and curr_ec.active_cc_first:
club_card = curr_ec.active_cc_first
cc_name = club_card.short_name
dbegin = club_card.date_begin.strftime('%d.%m.%Y')
dend = club_card.date_end.strftime('%d.%m.%Y')
cc_period = "{}-{}".format(dbegin, dend)
slave_ecs = [ec for ec in extra_clients if ec != curr_ec]
slave_ecs += [row.client]
slave_ecs = ",".join([ec.initials for ec in slave_ecs])
self.products[tariff] += 1
rows.append((
fname, uid, phone, tariff, 0, amount, date_begin, date_end,
len(visits), last_visit, prolongation, cc_name, cc_period,
schedule, slave_ecs, instructor
))
return rows
def write_heads(self):
self.ws.write_merge(
self.row_num, self.row_num, 3, 5, _('tariff'), styles.styleh)
if self.personal == 'all':
self.ws.write(self.row_num, 6, _('all'), styles.styleh)
else:
self.ws.write_merge(
self.row_num, self.row_num, 6, 9,
self.personal.name, styles.styleh)
self.row_num += 2
super(ActivePersonal, self).write_heads()
def write_bottom(self):
self.ws.write_merge(
self.row_num, self.row_num, 0, 1, _('total cards'))
self.ws.write(self.row_num, 2, self.total_main_rows, styles.styleh)
for row_num, product in enumerate(self.products, self.row_num + 1):
self.ws.write_merge(row_num, row_num, 0, 1, product)
self.ws.write(
row_num, 2, self.products.get(product), styles.styleh)
class UsePersonals(Report):
"""Personals visits by dates with employee info"""
file_name = 'trainers_personal'
sheet_name = 'report'
tpl_start_row = 7
table_headers = [
(_('date'), 3000),
(_('time'), 3000),
(_('card number'), 2000),
(_('client'), 6000),
(_('tariff'), 6000),
(_('coach'), 5000),
]
table_styles = {
0: styles.styled,
1: styles.stylet,
}
def initial(self, request, *args, **kwargs):
super(UsePersonals, self).initial(request, *args, **kwargs)
self.products = defaultdict(int)
try:
personal_id = int(self.request.query_params.get('c'))
self.instructor = Employee.objects.get(pk=personal_id)
except (ValueError, Employee.DoesNotExist):
self.instructor = 'all'
def get_title(self, **kwargs):
msg = _('use personals by trainers')
msg += _(' created at: {date}.')
date = datetime.now().strftime('%d.%m.%Y %H:%M')
return msg.format(date=date)
def write_title(self):
super(UsePersonals, self).write_title()
msg = _('from: {fdate} to {tdate}')
fdate = self.get_fdate().strftime('%d.%m.%Y')
tdate = self.get_tdate().strftime('%d.%m.%Y')
msg = msg.format(fdate=fdate, tdate=tdate)
ln_head = len(self.table_headers) - 1
self.ws.write_merge(1, 1, 0, ln_head, msg, styles.styleh)
def get_data(self):
rows = []
fdate = self.get_fdate()
tdate = self.get_tdate() + timedelta(1)
tdate = tdate.replace(hour=0, minute=0, second=0)
data = UseClientPersonal.objects.filter(
date__range=(fdate, tdate)).order_by('date')
if self.instructor != 'all':
data = data.filter(instructor=self.instructor)
for row in data:
client = row.client_personal.client.full_name
card = row.client_personal.client.card
tariff = row.client_personal.product.short_name
self.products[tariff] += 1
instructor = row.instructor.initials if row.instructor else ''
rows.append((
row.date, row.date.strftime('%H:%M'), card,
client, tariff, instructor
))
return rows
def write_heads(self):
self.ws.write_merge(
self.row_num, self.row_num, 0, 2, _('coach'), styles.styleh)
if self.instructor == 'all':
self.ws.write(self.row_num, 3, _('all'), styles.styleh)
else:
self.ws.write_merge(
self.row_num, self.row_num, 3, 6,
self.instructor.initials, styles.styleh)
self.row_num += 2
super(UsePersonals, self).write_heads()
def write_bottom(self):
self.ws.write_merge(
self.row_num, self.row_num, 0, 3,
_('total use personals by period'))
self.ws.write(self.row_num, 4,
sum(self.products.values()), styles.styleh)
self.row_num += 1
self.ws.write_merge(
self.row_num, self.row_num, 0, 3, _('total by tariff'))
for row_num, product in enumerate(self.products, self.row_num + 1):
self.ws.write_merge(row_num, row_num, 0, 1, product)
self.ws.write(
row_num, 2, self.products.get(product), styles.styleh)
class TotalPersonals(Report):
file_name = 'total_personals'
sheet_name = 'total_personals'
tpl_start_row = 5
table_headers = [
(_('tariff'), 10000),
(_('total'), 4000),
]
def get_title(self, **kwargs):
return _('total personals')
def write_title(self):
super(TotalPersonals, self).write_title()
msg = _('from: {fdate} to {tdate}')
fdate = self.get_fdate().strftime('%d.%m.%Y')
tdate = self.get_tdate().strftime('%d.%m.%Y')
msg = msg.format(fdate=fdate, tdate=tdate)
ln_head = len(self.table_headers) - 1
self.ws.write_merge(1, 1, 0, ln_head, msg, styles.styleh)
def personals_list(self):
fdate = self.get_fdate().date()
tdate = self.get_tdate().date() + timedelta(1)
personals = ClientPersonal.objects.filter(payment__date__range=(fdate, tdate))
# generate filter to get only first payment
filter_pk = []
for personal in personals:
if personal.first_payment.date.date() >= fdate:
filter_pk.append(personal.pk)
return personals.filter(pk__in=filter_pk)
def get_data(self):
rows = []
personals = self.personals_list()
data_pks = personals.values('personal__pk')
data = Personal.objects.filter(
pk__in=data_pks).order_by('max_visit', 'short_name')
total = 0
for row in data:
line = []
line.append(row.short_name)
cnt = personals.filter(personal=row).count()
line.append(cnt)
total += cnt
rows.append(line)
rows.append((_('total'), total))
return rows
def write_bottom(self):
pass
|
[
"avk@alarstudios.com"
] |
avk@alarstudios.com
|
15e5e94a53f74e158e336e3611b445f5c8cb02a2
|
99c4761c4f3388708152dcc21d0415eac76e80c1
|
/wequant/ltc_revere.py
|
0c57bfe89bc71b7f46eca598e5957c2655fbb9ef
|
[] |
no_license
|
zhuoyikang/finance
|
e7e43f0c6eba357db6c5a5989f4b0429ae82c0db
|
2489ab49c783c3f1ba15fc37fb0de8a792edbc0a
|
refs/heads/master
| 2021-09-23T22:08:01.401791
| 2018-09-28T06:43:31
| 2018-09-28T06:43:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,058
|
py
|
# 注:该策略仅供参考和学习,不保证收益。
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# 策略代码总共分为三大部分,1)PARAMS变量 2)initialize函数 3)handle_data函数
# 请根据指示阅读。或者直接点击运行回测按钮,进行测试,查看策略效果。
# 策略名称:BOLL指标策略
# 策略详细介绍:https://wequant.io/study/strategy.boll.html
# 关键词:价格通道、价格突破。
# 方法:
# 1)利用均值和标准差构建价格区间
# 2)以价格超越轨道作为突破信号,向上突破买入,向下突破卖出
import numpy as np
import talib
# 阅读1,首次阅读可跳过:
# PARAMS用于设定程序参数,回测的起始时间、结束时间、滑点误差、初始资金和持仓。
# 可以仿照格式修改,基本都能运行。如果想了解详情请参考新手学堂的API文档。
PARAMS = {
"start_time": "2017-09-06 00:00:00",
"end_time": "2017-09-08 8:00:00",
"commission": 0.001, # 此处设置交易佣金
"slippage": 0.001, # 此处设置交易滑点
"account_initial": {"huobi_cny_cash": 100000,
"huobi_cny_ltc": 0},
}
# 阅读2,遇到不明白的变量可以跳过,需要的时候回来查阅:
# initialize函数是两大核心函数之一(另一个是handle_data),用于初始化策略变量。
# 策略变量包含:必填变量,以及非必填(用户自己方便使用)的变量
def initialize(context):
# 设置回测频率, 可选:"1m", "5m", "15m", "30m", "60m", "4h", "1d", "1w"
context.frequency = "30m"
# 设置回测基准, 比特币:"huobi_cny_btc", 莱特币:"huobi_cny_ltc", 以太坊:"huobi_cny_ltc"
context.benchmark = "huobi_cny_ltc"
# 设置回测标的, 比特币:"huobi_cny_btc", 莱特币:"huobi_cny_ltc", 以太坊:"huobi_cny_ltc"
context.security = "huobi_cny_ltc"
# 设置计算布林线的参数
# 布林线的长度(回看时间窗口为20个bar)
context.user_data.period_window = 14
# 布林线的宽度(2倍标准差)
context.user_data.standard_deviation_range = 2
context.user_data.bbands_opt_width_m = 60
# 阅读3,策略核心逻辑:
# handle_data函数定义了策略的执行逻辑,按照frequency生成的bar依次读取并执行策略逻辑,直至程序结束。
# handle_data和bar的详细说明,请参考新手学堂的解释文档。
def handle_data(context):
# 获取历史数据
hist = context.data.get_price(context.security, count=context.user_data.period_window + context.user_data.bbands_opt_width_m + 1, frequency=context.frequency)
if len(hist.index) < (context.user_data.period_window + context.user_data.bbands_opt_width_m + 1):
context.log.warn("bar的数量不足, 等待下一根bar...")
return
# 获取收盘价
prices = np.array(hist["close"])
# 初始化做多/做空信号
long_signal_triggered = False
short_signal_triggered = False
# 使用talib计算布林线的上中下三条线
upper, middle, lower = talib.BBANDS(prices, timeperiod=context.user_data.period_window, nbdevup=context.user_data.standard_deviation_range, nbdevdn=context.user_data.standard_deviation_range, matype=talib.MA_Type.SMA)
# 获取最新价格
current_price = context.data.get_current_price(context.security)
# 生成交易信号
if current_price > upper[-1]: # 穿越上轨,买入信号
# long_signal_triggered = True
short_signal_triggered = True
if current_price < lower[-1]: # 穿越下轨,卖出信号
# short_signal_triggered = True
long_signal_triggered = True
context.log.info("当前 价格为:%s, 上轨为:%s, 下轨为: %s" % (current_price, upper[-1], lower[-1]))
# 根据信号买入/卖出
if short_signal_triggered:
context.log.info("价格穿越下轨,产生卖出信号")
if context.account.huobi_cny_ltc >= HUOBI_CNY_LTC_MIN_ORDER_QUANTITY:
# 卖出信号,且不是空仓,则市价单全仓清空
context.log.info("正在卖出 %s" % context.security)
context.log.info("卖出数量为 %s" % context.account.huobi_cny_ltc)
context.order.sell(context.security, quantity=str(context.account.huobi_cny_ltc))
else:
context.log.info("仓位不足,无法卖出")
elif long_signal_triggered:
context.log.info("价格穿越上轨,产生买入信号")
if context.account.huobi_cny_cash >= HUOBI_CNY_LTC_MIN_ORDER_CASH_AMOUNT:
# 买入信号,且持有现金,则市价单全仓买入
context.log.info("正在买入 %s" % context.security)
context.log.info("下单金额为 %s 元" % context.account.huobi_cny_cash)
context.order.buy(context.security, cash_amount=str(context.account.huobi_cny_cash))
else:
context.log.info("现金不足,无法下单")
else:
context.log.info("无交易信号,进入下一根bar")
|
[
"zhuoyikang@gmail.com"
] |
zhuoyikang@gmail.com
|
cd6c3e95476cb104ea83f9a922640fb929d2fc7a
|
5b3e01b0dbc683c79c222c26aac0aacb642cc2e2
|
/prompt_toolkit/contrib/ssh/server.py
|
ad68c577f29dc6714f7ac86467924b128f8087d8
|
[
"BSD-3-Clause"
] |
permissive
|
Stabledog/python-prompt-toolkit
|
31b15c46f482dad8baa862b0089ee3fe11cf1304
|
4eabc69ad69f00d8f19070798140ec47f3d13a25
|
refs/heads/master
| 2020-05-31T03:03:36.537595
| 2019-06-02T14:28:09
| 2019-06-02T19:10:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,374
|
py
|
"""
Utility for running a prompt_toolkit application in an asyncssh server.
"""
import asyncio
import traceback
from typing import Awaitable, Callable, Optional, TextIO, cast
import asyncssh
from prompt_toolkit.application.current import AppSession, create_app_session
from prompt_toolkit.data_structures import Size
from prompt_toolkit.input.posix_pipe import PosixPipeInput
from prompt_toolkit.output.vt100 import Vt100_Output
__all__ = [
'PromptToolkitSession',
'PromptToolkitSSHServer',
]
class PromptToolkitSession(asyncssh.SSHServerSession):
def __init__(self, interact: Callable[[], Awaitable[None]]) -> None:
self.interact = interact
self._chan = None
self.app_session: Optional[AppSession] = None
# PipInput object, for sending input in the CLI.
# (This is something that we can use in the prompt_toolkit event loop,
# but still write date in manually.)
self._input = PosixPipeInput()
# Output object. Don't render to the real stdout, but write everything
# in the SSH channel.
class Stdout:
def write(s, data):
if self._chan is not None:
self._chan.write(data.replace('\n', '\r\n'))
def flush(s):
pass
self._output = Vt100_Output(cast(TextIO, Stdout()),
self._get_size, write_binary=False)
def _get_size(self) -> Size:
"""
Callable that returns the current `Size`, required by Vt100_Output.
"""
if self._chan is None:
return Size(rows=20, columns=79)
else:
width, height, pixwidth, pixheight = self._chan.get_terminal_size()
return Size(rows=height, columns=width)
def connection_made(self, chan):
self._chan = chan
def shell_requested(self) -> bool:
return True
def session_started(self) -> None:
asyncio.ensure_future(self._interact())
async def _interact(self) -> None:
if self._chan is None:
# Should not happen.
raise Exception('`_interact` called before `connection_made`.')
# Disable the line editing provided by asyncssh. Prompt_toolkit
# provides the line editing.
self._chan.set_line_mode(False)
with create_app_session(input=self._input, output=self._output) as session:
self.app_session = session
try:
await self.interact()
except BaseException:
traceback.print_exc()
finally:
# Close the connection.
self._chan.close()
def terminal_size_changed(self, width, height, pixwidth, pixheight):
# Send resize event to the current application.
if self.app_session and self.app_session.app:
self.app_session.app._on_resize()
def data_received(self, data, datatype):
self._input.send_text(data)
class PromptToolkitSSHServer(asyncssh.SSHServer):
"""
Run a prompt_toolkit application over an asyncssh server.
This takes one argument, an `interact` function, which is called for each
connection. This should be an asynchronous function that runs the
prompt_toolkit applications. This function runs in an `AppSession`, which
means that we can have multiple UI interactions concurrently.
Example usage:
.. code:: python
async def interact() -> None:
await yes_no_dialog("my title", "my text").run_async()
prompt_session = PromptSession()
text = await prompt_session.prompt_async("Type something: ")
print_formatted_text('You said: ', text)
server = PromptToolkitSSHServer(interact=interact)
loop = get_event_loop()
loop.run_until_complete(
asyncssh.create_server(
lambda: MySSHServer(interact),
"",
port,
server_host_keys=["/etc/ssh/..."],
)
)
loop.run_forever()
"""
def __init__(self, interact: Callable[[], Awaitable[None]]) -> None:
self.interact = interact
def begin_auth(self, username):
# No authentication.
return False
def session_requested(self) -> PromptToolkitSession:
return PromptToolkitSession(self.interact)
|
[
"jonathan@slenders.be"
] |
jonathan@slenders.be
|
8ef9b4a37e8c02b5e9db9da4aabde348a53e9097
|
9b1d4f059d9883d9f06a32e7c6c640376f27b7eb
|
/ProyectoDjango/asgi.py
|
48acbd1449daa9dc14881c9151124cae158bbd89
|
[
"MIT"
] |
permissive
|
JeanContreras12/ColungaRepo
|
dccdf5d5ab7c632edc7db58ac3b8b4b70c70feb6
|
af59e07f31b3d56ebdf02431a2967134985c1624
|
refs/heads/main
| 2023-06-12T18:43:27.619078
| 2021-07-09T15:23:35
| 2021-07-09T15:23:35
| 371,485,778
| 1
| 0
|
MIT
| 2021-07-06T15:56:20
| 2021-05-27T19:40:54
|
CSS
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
ASGI config for ProyectoDjango project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProyectoDjango.settings')
application = get_asgi_application()
|
[
"j.contrerasmanriquez@uandresbello.edu"
] |
j.contrerasmanriquez@uandresbello.edu
|
999257106ee01f01b171590672d6dd8c48ff5e0a
|
0a93b53463fbe848913b7ce6227f6b3f4713fdf2
|
/Visualization/dahakianapi/formrun.py
|
ff7da07f1958c9c739050f97f79442ca585972ab
|
[
"MIT"
] |
permissive
|
kopok2/BooXchange-Recommendation
|
83c53f4ad4ffcd53b1533fc8a28f3aaee9187540
|
b0e6327a8d4d0eb4719488c53469ac2018c3ee53
|
refs/heads/master
| 2021-07-06T09:11:30.853943
| 2020-11-29T21:35:09
| 2020-11-29T21:35:09
| 211,632,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
"""Form run interface module
This module implements form running interface used to run app's forms in parallel.
"""
import sys
import os
import subprocess
from multiprocessing import Process
from dahakianapi.asynccomm import AsyncCommPoster
from json import JSONDecodeError
python_interpreter_path = sys.executable
class FormRunInterface:
"""Form start interface."""
def __init__(self, path, name, direct_path=False):
"""Initialize interface process."""
self.path = path
self.name = name
self.direct_path = direct_path
self.main_process = Process(target=self.subprocess_caller)
if not self.direct_path:
self.scan_interface = AsyncCommPoster('no_target', self.name)
def subprocess_caller(self):
"""Run form in subprocess."""
if not self.direct_path:
subprocess.call([python_interpreter_path, os.getcwd() + '\\' + self.path])
else:
subprocess.call([python_interpreter_path, self.path])
def run(self):
"""Start a process."""
self.main_process.start()
def reset(self):
"""Reset form to be rerunned."""
self.main_process = Process(target=self.subprocess_caller)
def scan_for_run(self):
"""Scan whether form is about to be run."""
try:
curr_msg = self.scan_interface.read()
if curr_msg['cmd'] == "Run":
self.scan_interface.post_to_self('None')
return True
else:
return False
except:
print(sys.exc_info()[0])
return False
def scan_for_killed(self):
"""Scan whether form is about to be killed."""
try:
curr_msg = self.scan_interface.read()
if curr_msg['cmd'] == "Killed":
self.scan_interface.post_to_self('None')
return True
elif curr_msg['cmd'] == "Kill":
return False
else:
return False
except JSONDecodeError:
print(sys.exc_info()[0])
return False
|
[
"oleszek.karol@gmail.com"
] |
oleszek.karol@gmail.com
|
31457aaac9611a052fe9d52bae408f75a2d42776
|
e692c25a70b0930f16805768ab808aba60b05063
|
/oop.py
|
853b711ad0b8965748fbd25198393b5ae203affa
|
[] |
no_license
|
VICUBARAL/firststeps
|
9a7317f7fe2085fdc0c004e9940d2050a62fcc4a
|
a52892272e883b708cd25c37f78d9c429f6f5ba5
|
refs/heads/master
| 2023-04-23T15:51:39.409257
| 2021-05-13T17:21:43
| 2021-05-13T17:21:43
| 367,093,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
class Employee:
num_of_emps = 0
raise_amt = 1.04
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.email = first + '.' + last + '@email.com'
self.pay = pay
Employee.num_of_emps += 1
def fullname (self):
return '{} {}'.format(self.first, self.last)
def apply_raise(self):
self.pay = int (self.pay * self.raise_amt)
@classmethod
def set_raise_amt(cls, amount):
cls.raise_amt = amount
emp_1 = Employee ('Victoria','Baral', 50000)
emp_2 = Employee ('Prueba', 'Empleado',60000)
print(Employee.raise_amt)
print (emp_1.raise_amt)
print (emp_2.raise_amt)
|
[
"62349218+VictoriaBaral@users.noreply.github.com"
] |
62349218+VictoriaBaral@users.noreply.github.com
|
1dd64e033bff0788b62d6ca278fc90b00e628a23
|
a5f1bf55a73268b556e070a8ad3391c87e2d0e8f
|
/consumo_eletrico.py
|
edf4c6cb83e58cd75430198a4688bc07653aed56
|
[] |
no_license
|
TerminalBen/dash_graphs
|
f85d24c5b8daa27a87fd89588c12b9585d56bdc0
|
22be18a697b99c444e1a7de93a72c777f7231231
|
refs/heads/master
| 2023-08-17T00:50:27.688704
| 2021-01-30T11:46:19
| 2021-01-30T11:46:19
| 275,002,341
| 0
| 0
| null | 2021-09-22T19:19:47
| 2020-06-25T19:50:09
|
Python
|
UTF-8
|
Python
| false
| false
| 8,659
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
import consumo_eletrico_data as ed
external_stylesheets1 = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
years=[2016,2017,2018,2019,2020]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets1)
server = app.server
app.layout = html.Div(children=[
html.H1(children='Apuramento Faturas Eletricidade'),
html.Div(children='''
Valores em Kilowatts
'''),
dcc.Graph(
id='MS',
figure={
'data': [
{'x': ed.year, 'y': ed.ms2017,
'type': 'bar', 'name': '2017'},
{'x': ed.year, 'y': ed.ms2018,
'type': 'bar', 'name': '2018'},
{'x': ed.year, 'y': ed.ms2019,
'type': 'bar', 'name': '2019'},
{'x': ed.year, 'y': ed.ms2020,
'type': 'bar', 'name': '2020'},
],
'layout': go.Layout(
title='Monte Sossego Rua 1',
yaxis={'title': 'Valores em Kw'},
colorway = ['#ff7f0e', '#2ca02c', '#d62728', '#9467bd'],
),
}
),
html.Div([
html.P ('Houve uma baixa no consumo de energia no mês de Dezembro de 2020, por estar com 2 (dois) aparelhos de ar condicionado avariados.')
],
style = {'textAlign':'right','margin-top':1,'font-size':11}),
dcc.Graph(
id='sum_ms',
figure={
'data': [go.Pie(labels=['2017','2018', '2019','2020'], values=[ed.s1, ed.s2, ed.s3,ed.sms2020], hole=0.3,sort=False)],
'layout': go.Layout(
title='Comparação Anual',
#yaxis={'title': 'Valores em Kw'},
colorway = ['#ff7f0e', '#2ca02c', '#d62728', '#9467bd'],
),
}
#figure.update_traces(textinfo='value')
),
dcc.Graph(
id='Camp',
figure={
'data': [
{'x': ed.year, 'y': ed.camp2016,
'type':'bar','name': '2016'},
{'x': ed.year, 'y': ed.camp2017,
'type': 'bar', 'name': '2017'},
{'x': ed.year, 'y': ed.camp2018,
'type': 'bar', 'name': '2018'},
{'x': ed.year, 'y': ed.camp2019,
'type': 'bar', 'name': '2019'},
{'x': ed.year, 'y': ed.camp2020,
'type': 'bar', 'name': '2020'},
],
'layout': go.Layout (
title='Consumo Eletricidade Campinho',
yaxis={'title':'Valores em Kw (x1000)'},
)
}
),
#html.Div([
# html.P ('Valores em Kilowats')
# ],
# style = {'textAlign':'left','margin-top':0,'font-size':11}
#),
html.Div([
html.P ('Inicio funcionamento Painel solar em 03 de Setembro de 2018'),
html.P ('Arranque das camaras Frigorificas em 01 de Maio de 2018'),
html.P ('Houve um aumento de consumo de energia no mês de dezembro de 2020, devido à fraca produção do sistema solar.')
],
style = {'textAlign':'right','margin-top':0.5,'font-size':11}
),
dcc.Graph(
id='sum_camp',
figure={
'data': [go.Pie(labels=['2016','2017','2018', '2019','2020'], values=[ed.scamp2016,ed.s4, ed.s5, ed.s6,ed.scamp2020], hole=0.3,sort=False)],
'layout': {'title': 'Comparação Anual'}
}
),
dcc.Graph(
id='sj',
figure={
'data': [
{'x': ed.year, 'y': ed.sj2018,
'type': 'bar', 'name': '2018'},
{'x': ed.year, 'y': ed.sj2019,
'type': 'bar', 'name': '2019'},
{'x': ed.year, 'y': ed.sj2020,
'type': 'bar', 'name': '2020',
},
],
'layout': go.Layout(
title='Minimercado Rua São João',
yaxis={'title': 'Valores em Kw'},
colorway = ['#2ca02c', '#d62728', '#9467bd'],
)
}
),
html.Div([
html.P ('Colocação painel solar 26 de Agosto de 2018'),
html.P ('Houve um aumento de consumo de energia no mês de dezembro de 2020, devido à fraca produção do sistema solar.')
],
style = {'textAlign':'right','margin-top':1,'font-size':11}),
dcc.Graph(
id='sum_sj',
figure={
'data': [go.Pie(labels=['2018', '2019','2020'], values=[ed.s8, ed.s9,ed.ssj2020], hole=0.3,sort=False)],
'layout': {'title': 'Comparação Anual','colorway':['#2ca02c', '#d62728', '#9467bd']}
}
),
dcc.Graph(
id='escr_cid',
figure={
'data': [
{'x': ed.year, 'y': ed.ec2016,
'type':'bar','name':'2016'},
{'x': ed.year, 'y': ed.ec2017,
'type': 'bar', 'name': '2017'},
{'x': ed.year, 'y': ed.ec2018,
'type': 'bar', 'name': '2018'},
{'x': ed.year, 'y': ed.ec2019,
'type': 'bar', 'name': '2019'},
{'x': ed.year, 'y': ed.ec2020,
'type': 'bar', 'name': '2020'},
],
'layout': go.Layout (
title='Consumo eletricidade Posto de Venda/Escritório',
yaxis={'title':'Valores em Kw'},
#xaxis={'title':'Paineis Solares em funcionamento a partir de 31 de Março de 2017'}
)
}
),
html.Div([
html.P ('Paineis Solares em funcionamento a partir de 31 de Março de 2017'),
html.P ('Junçao de Contadores Posto de Venda/Escritorio em 14 de Julho de 2018')
],
style = {'textAlign':'right','margin-top':0.5,'font-size':11}
),
dcc.Graph(
id='sum_cid',
figure={
'data': [go.Pie(labels=['2016','2017','2018', '2019','2020'], values=[ed.sumec2016,ed.sumec2017, ed.sumec2018, ed.sumec2019,ed.sec2020], hole=0.3,sort=False)],
'layout': {'title': 'Comparação Anual','colorway':[]}
}
),
dcc.Graph(
id='rib',
figure={
'data': [
{'x': ed.year, 'y': ed.rib2017,
'type': 'bar', 'name': '2017'},
{'x': ed.year, 'y': ed.rib2018,
'type': 'bar', 'name': '2018'},
{'x': ed.year, 'y': ed.rib2019,
'type': 'bar', 'name': '2019'},
{'x': ed.year, 'y': ed.rib2020,
'type': 'bar', 'name': '2020'},
],
'layout': go.Layout(
title='Ribeirinha',
yaxis={'title': 'Valores em Kw'},
colorway = ['#ff7f0e', '#2ca02c', '#d62728', '#9467bd'],
),
}
),
dcc.Graph(
id='sum_rib',
figure={
'data': [go.Pie(labels=['2017', '2018', '2019','2020'], values=[ed.s16, ed.s17, ed.s18,ed.srib2020], hole=0.3,sort=False)],
'layout': go.Layout(
title='Comparação Anual',
#yaxis={'title': 'Valores em Kw'},
colorway = ['#ff7f0e', '#2ca02c', '#d62728', '#9467bd'],
),
}
),
dcc.Graph(
id='pn',
figure={
'data': [
{'x': ed.year, 'y': ed.pn2017,
'type': 'bar', 'name': '2017'},
{'x': ed.year, 'y': ed.pn2018,
'type': 'bar', 'name': '2018'},
{'x': ed.year, 'y': ed.pn2019,
'type': 'bar', 'name': '2019'},
{'x': ed.year, 'y': ed.pn2020,
'type': 'bar', 'name': '2020'},
],
'layout': go.Layout(
title='Porto Novo',
yaxis={'title': 'Valores em Kw'},
colorway = ['#ff7f0e', '#2ca02c', '#d62728', '#9467bd'],
),
}
),
html.Div([
html.P ('Colocação painel solar 29 de Setembro de 2018'),
#html.P ('Valores em Kilowats')
],
style = {'textAlign':'right','margin-top':1,'font-size':11}
),
dcc.Graph(
id='sum_pn',
figure={
'data': [go.Pie(labels=['2017','2018', '2019','2020'], values=[ed.s19, ed.s20, ed.s21,ed.spn2020], hole=0.3,sort=False)],
'layout': {'title': 'Comparação Anual','colorway':['#ff7f0e', '#2ca02c', '#d62728', '#9467bd']}
}
),
])
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"bentolima100@gmail.com"
] |
bentolima100@gmail.com
|
fcef827ee14df9e33527c492c1072a3edffbd1f4
|
cb00fe13083728c540076600e41710081b7ef0ce
|
/site/venv/Lib/site-packages/pip/_vendor/urllib3/util/queue.py
|
88842e94c8f8b3d508d6866edf9ed1e12da053ac
|
[] |
no_license
|
Wamadahama/ner-framework
|
069464342512bab9f429b11be735f6cb487afb74
|
4cb699469ec8733f74cb67f67af995e734231974
|
refs/heads/master
| 2023-03-28T14:28:42.791453
| 2021-03-29T22:27:33
| 2021-03-29T22:27:33
| 209,624,138
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
import collections
from ..packages import six
from ..packages.six.moves import queue
if six.PY2:
# Queue is imported for side effects on MS Windows. See issue #229.
pass
class LifoQueue(queue.Queue):
def _init(self, _):
self.queue = collections.deque()
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
[
"evanscruzen@gmail.com"
] |
evanscruzen@gmail.com
|
1a36a483e5e54b2562473bff6396d0d53625c163
|
58405d760478914f861239920fd613b2fef624e1
|
/tests/test_source.py
|
8b1ab98db7201698a7df72357c4dc5606c4b606e
|
[
"LicenseRef-scancode-sata"
] |
permissive
|
HASSAN1A/News-App
|
61a560bd24aea6b3fda957d5e6afed1d510d268f
|
cda736e7274b54b4ee53ee671c6e2c6ac8dd39d1
|
refs/heads/master
| 2023-03-25T04:31:36.846114
| 2020-10-20T16:13:22
| 2020-10-20T16:13:22
| 304,731,238
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import unittest
from app.models import Source
class SourceTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Source class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_source = Source('abc-news','ABC News','A thrilling news source')
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
|
[
"okothhassanjuma@gmail.com"
] |
okothhassanjuma@gmail.com
|
dad3a89b7b3fc9dc24c4542db1b382193308b89f
|
b096d5258393144cf33c755eec6e1a9dadd7a4ee
|
/sourceControlApp/migrations/0017_auto_20141119_0507.py
|
3607274abde18f18eb9f7acb9bba438b2ab3ce75
|
[] |
no_license
|
gittrdone/sourcecontrol
|
c5b41ad40d475cca95863a889d88b718b36bd672
|
1e1fab1c11c01d71b424bb486353b3002a5a6f73
|
refs/heads/master
| 2020-05-17T05:24:52.339606
| 2015-04-08T04:46:02
| 2015-04-08T04:46:02
| 23,799,825
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sourceControlApp', '0016_auto_20141119_0343'),
]
operations = [
migrations.AlterField(
model_name='codeauthor',
name='repository',
field=models.ForeignKey(blank=True, to='sourceControlApp.GitStore', null=True),
),
migrations.AlterField(
model_name='commit',
name='repository',
field=models.ForeignKey(blank=True, to='sourceControlApp.GitStore', null=True),
),
migrations.AlterField(
model_name='usergitstore',
name='git_store',
field=models.ForeignKey(blank=True, to='sourceControlApp.GitStore', null=True),
),
]
|
[
"tpatikorn@hotmail.com"
] |
tpatikorn@hotmail.com
|
c25887904d18b0e4aa6748423e20f5ff86b50d0c
|
fd119ce1f9ca063f49a5ec0c80b428957eaba62e
|
/part_3_proj_traj .py
|
c1ceaccb19dd456cb69219548caf244e3ad2a346
|
[] |
no_license
|
siba987/Embedded-Systems-Project
|
35c4e77d2f0ede7a190f704a53cd4d48e4f9f45a
|
3dd47669906e4912eb850a6e469a62e969f7d176
|
refs/heads/master
| 2020-03-15T05:46:25.446408
| 2018-05-03T19:12:17
| 2018-05-03T19:12:17
| 131,993,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,498
|
py
|
import tuio
import socket
import time
import math
import datetime
TCP_IP = '192.168.1.1'
TCP_PORT = 2001
BUFFER_SIZE = 1024
forward = b'\xff\0\x01\0\xff'
bckwd = b'\xff\0\x02\0\xff'
stop = b'\xff\0\x00\0\xff'
rot_r = b'\xFF\x00\x03\x00\xFF'
rot_l = b'\xFF\x00\x04\x00\xFF'
save_cam_angle = b'\xFF\x32\x00\x00\xFF'
reset_cam_angle = b'\xFF\x33\x00\x00\xFF'
def new_cnct(): #setting up a new connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
return s
def set_speed_high(): #function for setting a relatively higher speed
skt = new_cnct()
skt.send(b'\xFF\x02\x01\x12\xFF')
skt.send(b'\xFF\x02\x02\x12\xFF')
skt.close()
def set_speed_med(): #function for medium speed
skt = new_cnct()
skt.send(b'\xFF\x02\x01\x12\xFF')
skt.send(b'\xFF\x02\x02\x12\xFF')
skt.close()
def set_speed_low(): #function for lower speed
skt = new_cnct()
skt.send(b'\xFF\x02\x01\x0a\xFF')
skt.send(b'\xFF\x02\x02\x0a\xFF')
skt.close()
def move_fwd_2(): #function to move forward with own socket connection
skt = new_cnct()
skt.send(forward)
skt.close()
def move_fwd(s): #function which when passed socket object only sends forward hex command
s.send(forward)
def move_stp(s): #function which when passed socket object only sends stop hex command
s.send(stop)
def move_stp_2(): #function to stop with own socket connection
skt = new_cnct()
skt.send(stop)
skt.close()
def rotate_r_3(): #function to rotate right incrementally (with delay) with own socket connection
skt = new_cnct()
skt.send(rot_r)
time.sleep(0.03)
skt.send(stop)
skt.close()
def rotate_l_3(): #function to rotate left incrementally (with delay) with own socket connection
skt = new_cnct()
skt.send(rot_l)
time.sleep(0.03)
skt.send(stop)
skt.close()
def check_direction(agl_r, agl_s, is_right): #returning final angle robot must reach
if(is_right):
final_angle = agl_s - agl_r
else:
final_angle = agl_s + agl_r
return final_angle
def check_overflow(agl_r, agl_s, is_right):
if(is_right):
dif = agl_s - agl_r
if(dif < 0):
agl_s += 360
else:
sum_agl = agl_s + agl_r
if(sum_agl > 360):
agl_s -= 360
return agl_s
def check_case(sx, sy, ex, ey): #function to determine position of end fm relative to robot and calculate theta
#pass in start and end coordinates
theta = math.degrees(math.atan(abs(ey-sy)/abs(ex-sx)))
if(sx>ex):
if(sy<ey):
case = 'nw'
else:
case = 'sw'
else:
if(sy<ey):
case = 'ne'
else:
case = 'se'
return theta, case
def det_rot_angle(agl_s, theta, case):
#agl_s corresponds to starting angle of the robot
#rot_angle is the angle the robot must rotate by to reach correct axis
if(case == "nw"):
if((agl_s > (180-theta)) and (agl_s < 360)):
rot_angle = agl_s - 180 + theta
is_right = 1
else:
rot_angle = 180 - agl_s - theta
is_right = 0
elif(case == "se"):
if(agl_s > 0) and (agl_s <= 180):
rot_angle = agl_s + theta
# rot_angle = -theta
is_right = 1
elif(agl_s > 180) and (agl_s <= (360 - theta)):
rot_angle = 360 - agl_s - theta
is_right = 0
else:
rot_angle = agl_s - 360 + theta
is_right = 1
elif(case == "ne"):
if(agl_s < theta):
rot_angle = theta - agl_s
is_right = 0
elif((agl_s > theta) and (agl_s < 270)):
rot_angle = agl_s - theta
is_right = 1
elif(agl_s > 270) and (agl_s < 360):
rot_angle = 360 - agl_s + theta
is_right = 0
elif(case == "sw"):
if(agl_s < (180 + theta)):
rot_angle = 180 - agl_s + theta
is_right = 0
elif(agl_s >= (180 + theta)):
rot_angle = agl_s - 180 - theta
is_right = 1
agl_s = check_overflow(rot_angle, agl_s, is_right) #accounting for overflow of >360 or <0, change starting angle value if necessary
rot_angle = check_direction(rot_angle, agl_s, is_right)
return rot_angle, is_right
def det_fm_info_end(): #function to retrieve information of goal/end point
tracking = tuio.Tracking()
while 1:
tracking.update()
for obj in tracking.objects():
if(obj.id == 3):
print obj
tracking.stop()
return obj.angle, obj.xpos, obj.ypos
def det_fm_info_start(): #function to retrieve initial information of robot's starting point
tracking = tuio.Tracking()
while 1:
tracking.update()
for obj in tracking.objects():
if(obj.id == 0):
# print obj
tracking.stop()
return obj.angle, obj.xpos, obj.ypos
def check_xy(xr, yr, goal_x, goal_y, xy_range): #checking if x and y position of robot is within specified error range
if(xr <= (goal_x + xy_range)) and (xr >= (goal_x - xy_range)):
if(yr <= (goal_y + xy_range)) and (yr >= (goal_y - xy_range)):
return True
return False
def det_next_pos(idx): #keeping track of destination x and y coordinates
x_points = [0.85, 0.83, 0.77, 0.68, 0.56, 0.44, 0.32, 0.23, 0.17, 0.15]
y_points = [0.5, 0.72, 0.84, 0.8, 0.62, 0.38, 0.2, 0.16, 0.28, 0.5]
return x_points[idx], y_points[idx]
def main():
move_stp_2()
set_speed_med()
agl_range = 8
xy_range = 0.01
for i in range(1, 10):
ts = time.time()
cur_ts = ts
ar, xr, yr = det_fm_info_start() #getting intitial robot information
xn, yn = det_next_pos(i) #determining next position information
theta, case = check_case(xr, yr, xn, yn) #retrieving theta and scenario information
agl, is_right = det_rot_angle(ar, theta, case) #determining final angle robot must rotate to and in which direction
while cur_ts < ts + 4: #while loop is broken once current position path surpasses 4s, cur_time is compared with time the robot began at for this position
cur_ts = time.time()#current time is tracked
ar, xr, yr = det_fm_info_start() #must continually update robot position as it moves
theta, case = check_case(xr, yr, xn, yn) #allowing for error corrections along the way
agl, is_right = det_rot_angle(ar, theta, case)
if(((ar < (agl+agl_range)) and (ar > (agl-agl_range))) and not(check_xy(xr, yr, xn, yn, xy_range))):
move_fwd_2() #moves forward if within angle range but not x/y range
elif(not((ar < (agl+agl_range)) and (ar > (agl-agl_range)))):
#rotates in appropriate direction depending on assigned direction
if(is_right):
rotate_r_3()
else:
rotate_l_3()
else:
move_stp_2()
print(xr, yr, xn, yn, cur_ts)
main()
|
[
"noreply@github.com"
] |
siba987.noreply@github.com
|
3a135b64acbaac7fbd448db5267cdfe1e8981cea
|
ba4bcfdfc70062bc904bd6490faaf66c5c4c7345
|
/TestResult_Project_ver0.3/DataBase_Results/Perform_Score/spec2000-1core/perform_spec2000_1core_CFP_2csv.py
|
89cbd10b46e1847be6a84d16629eaa295aadd83b
|
[] |
no_license
|
jianxiamage/Proj_TestResults
|
8ac5cf9c2eb20685e6e03aea8999a07768154bf4
|
4a5c2c7c44babad754ac0755787022308888713d
|
refs/heads/master
| 2022-11-28T09:23:34.617677
| 2020-04-23T07:44:03
| 2020-04-23T07:44:03
| 203,519,769
| 0
| 0
| null | 2022-11-22T00:34:30
| 2019-08-21T06:20:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys #引入模块
import os
import traceback
import ConfigParser
reload(sys)
sys.setdefaultencoding('utf-8')
#------------------------------------------
ResultPath='/data/'
detailDir='Detail'
PointsPath='Points_Files'
#------------------------------------------
#section = 'spec2000-1core-CFP'
#------------------------------------------
#防止自动将ini文件中的键名转换成小写
class myconf(ConfigParser.ConfigParser):
def __init__(self,defaults=None):
ConfigParser.ConfigParser.__init__(self,defaults=None)
def optionxform(self, optionstr):
return optionstr
#将ini文件中的section内容写入csv文件开头,用以标明各个字段名称
#注意的是写入section行到csv时是覆盖模式"w")
def read_iniHead(section,inputFile,outputFile):
config = myconf()
config.readfp(open(inputFile))
f = open(outputFile,"w")
options = config.options(section)
optionStr = ','.join(options)
print(optionStr)
f.write('Tag,node_num,' + optionStr + '\n')
#将各个字段的值写入csv文件
def read_ini(section,inputFile,outputFile,num,Tag):
config = myconf()
config.readfp(open(inputFile))
f = open(outputFile,"a")
j=1
dicts = {}
#section = 'stream-1core'
for option in config.options(section):
dicts[option] = config.get(section, option)
value = dicts[option]
#print 'section:%s,option:%s,value:%s' %(section,option,value)
print(value)
j = j + 1
print('===============================================')
values = dicts.values()
values_Str = ','.join(values)
print(values_Str)
f.write(Tag + ',' + num + ',' + values_Str+'\n')
print('===============================================')
return 0
if __name__=='__main__':
try:
#输入参数
test_type = sys.argv[1]
test_platform = sys.argv[2]
test_case = sys.argv[3]
test_Tag = sys.argv[4]
section = 'spec2000-1core-CFP'
#拼接目标文件名
caseDir='spec2000-1core' #区分浮点型和整型,_1core,输入参数含有CFP,但此处需要去掉
ResultIniPath = ResultPath + str(test_type) + '/' + str(test_platform) + '/' + str(detailDir) + '/' + str(caseDir) + '/' + str(PointsPath)
iniFilePre = test_case + '_'
iniFileEnd = '.ini'
MaxCount=3 #并发节点最大为3个
#iniFileName='stream_1core_1.ini'
iniFileName = ResultIniPath + '/' + iniFilePre + '1' + iniFileEnd
#csvFileName='stream_1cor.csv'
csvFileName = ResultIniPath + '/' + test_case +'.csv'
result_code = read_iniHead(section,iniFileName,csvFileName)
#遍历所有并发节点ini文件(正常情况下为:3个)
for i in range(1,MaxCount+1):
#iniFileName = iniFilePre + str(i) + iniFileEnd
iniFileName = ResultIniPath + '/' + iniFilePre + str(i) + iniFileEnd
print(iniFileName)
print('-----------------------')
result_code = read_ini(section,iniFileName,csvFileName,str(i),test_Tag)
except Exception as E:
#print('str(Exception):', str(Exception))
print('str(e):', str(E))
#print('repr(e):', repr(E))
#print('traceback.print_exc(): ', traceback.print_exc())
sys.exit(1)
|
[
"jianxiamage@163.com"
] |
jianxiamage@163.com
|
f22f654240c4e2b94bb4442b5a54342c12031ee0
|
46390b01256fd4a0dbf3de12b5b3b2248b36a3d5
|
/gallery/plot_transforms.py
|
032dd584c26ea395043052559bde511dcc1c2afa
|
[
"BSD-3-Clause"
] |
permissive
|
DevPranjal/vision
|
ba7e4f79b17189ff621e718d21ce3380f64b80df
|
ec40ac3ab84b90b2bb422f98b4d57b89d424676c
|
refs/heads/master
| 2023-06-29T23:15:13.577246
| 2021-06-10T14:10:39
| 2021-06-10T14:10:39
| 375,726,988
| 2
| 0
|
BSD-3-Clause
| 2021-06-10T14:30:58
| 2021-06-10T14:30:58
| null |
UTF-8
|
Python
| false
| false
| 10,421
|
py
|
"""
==========================
Illustration of transforms
==========================
This example illustrates the various transforms available in :ref:`the
torchvision.transforms module <transforms>`.
"""
from PIL import Image
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as T
plt.rcParams["savefig.bbox"] = 'tight'
orig_img = Image.open(Path('assets') / 'astronaut.jpg')
# if you change the seed, make sure that the randomly-applied transforms
# properly show that the image can be both transformed and *not* transformed!
torch.manual_seed(0)
def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0]) + with_orig
fig, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
row = [orig_img] + row if with_orig else row
for col_idx, img in enumerate(row):
ax = axs[row_idx, col_idx]
ax.imshow(np.asarray(img), **imshow_kwargs)
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if with_orig:
axs[0, 0].set(title='Original image')
axs[0, 0].title.set_size(8)
if row_title is not None:
for row_idx in range(num_rows):
axs[row_idx, 0].set(ylabel=row_title[row_idx])
plt.tight_layout()
####################################
# Pad
# ---
# The :class:`~torchvision.transforms.Pad` transform
# (see also :func:`~torchvision.transforms.functional.pad`)
# fills image borders with some pixel values.
padded_imgs = [T.Pad(padding=padding)(orig_img) for padding in (3, 10, 30, 50)]
plot(padded_imgs)
####################################
# Resize
# ------
# The :class:`~torchvision.transforms.Resize` transform
# (see also :func:`~torchvision.transforms.functional.resize`)
# resizes an image.
resized_imgs = [T.Resize(size=size)(orig_img) for size in (30, 50, 100, orig_img.size)]
plot(resized_imgs)
####################################
# CenterCrop
# ----------
# The :class:`~torchvision.transforms.CenterCrop` transform
# (see also :func:`~torchvision.transforms.functional.center_crop`)
# crops the given image at the center.
center_crops = [T.CenterCrop(size=size)(orig_img) for size in (30, 50, 100, orig_img.size)]
plot(center_crops)
####################################
# FiveCrop
# --------
# The :class:`~torchvision.transforms.FiveCrop` transform
# (see also :func:`~torchvision.transforms.functional.five_crop`)
# crops the given image into four corners and the central crop.
(top_left, top_right, bottom_left, bottom_right, center) = T.FiveCrop(size=(100, 100))(orig_img)
plot([top_left, top_right, bottom_left, bottom_right, center])
####################################
# Grayscale
# ---------
# The :class:`~torchvision.transforms.Grayscale` transform
# (see also :func:`~torchvision.transforms.functional.to_grayscale`)
# converts an image to grayscale
gray_img = T.Grayscale()(orig_img)
plot([gray_img], cmap='gray')
####################################
# Random transforms
# -----------------
# The following transforms are random, which means that the same transfomer
# instance will produce different result each time it transforms a given image.
#
# ColorJitter
# ~~~~~~~~~~~
# The :class:`~torchvision.transforms.ColorJitter` transform
# randomly changes the brightness, saturation, and other properties of an image.
jitter = T.ColorJitter(brightness=.5, hue=.3)
jitted_imgs = [jitter(orig_img) for _ in range(4)]
plot(jitted_imgs)
####################################
# GaussianBlur
# ~~~~~~~~~~~~
# The :class:`~torchvision.transforms.GaussianBlur` transform
# (see also :func:`~torchvision.transforms.functional.gaussian_blur`)
# performs gaussian blur transform on an image.
blurrer = T.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 5))
blurred_imgs = [blurrer(orig_img) for _ in range(4)]
plot(blurred_imgs)
####################################
# RandomPerspective
# ~~~~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomPerspective` transform
# (see also :func:`~torchvision.transforms.functional.perspective`)
# performs random perspective transform on an image.
perspective_transformer = T.RandomPerspective(distortion_scale=0.6, p=1.0)
perspective_imgs = [perspective_transformer(orig_img) for _ in range(4)]
plot(perspective_imgs)
####################################
# RandomRotation
# ~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomRotation` transform
# (see also :func:`~torchvision.transforms.functional.rotate`)
# rotates an image with random angle.
rotater = T.RandomRotation(degrees=(0, 180))
rotated_imgs = [rotater(orig_img) for _ in range(4)]
plot(rotated_imgs)
####################################
# RandomAffine
# ~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomAffine` transform
# (see also :func:`~torchvision.transforms.functional.affine`)
# performs random affine transform on an image.
affine_transfomer = T.RandomAffine(degrees=(30, 70), translate=(0.1, 0.3), scale=(0.5, 0.75))
affine_imgs = [affine_transfomer(orig_img) for _ in range(4)]
plot(affine_imgs)
####################################
# RandomCrop
# ~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomCrop` transform
# (see also :func:`~torchvision.transforms.functional.crop`)
# crops an image at a random location.
cropper = T.RandomCrop(size=(128, 128))
crops = [cropper(orig_img) for _ in range(4)]
plot(crops)
####################################
# RandomResizedCrop
# ~~~~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomResizedCrop` transform
# (see also :func:`~torchvision.transforms.functional.resized_crop`)
# crops an image at a random location, and then resizes the crop to a given
# size.
resize_cropper = T.RandomResizedCrop(size=(32, 32))
resized_crops = [resize_cropper(orig_img) for _ in range(4)]
plot(resized_crops)
####################################
# RandomInvert
# ~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomInvert` transform
# (see also :func:`~torchvision.transforms.functional.invert`)
# randomly inverts the colors of the given image.
inverter = T.RandomInvert()
invertered_imgs = [inverter(orig_img) for _ in range(4)]
plot(invertered_imgs)
####################################
# RandomPosterize
# ~~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomPosterize` transform
# (see also :func:`~torchvision.transforms.functional.posterize`)
# randomly posterizes the image by reducing the number of bits
# of each color channel.
posterizer = T.RandomPosterize(bits=2)
posterized_imgs = [posterizer(orig_img) for _ in range(4)]
plot(posterized_imgs)
####################################
# RandomSolarize
# ~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomSolarize` transform
# (see also :func:`~torchvision.transforms.functional.solarize`)
# randomly solarizes the image by inverting all pixel values above
# the threshold.
solarizer = T.RandomSolarize(threshold=192.0)
solarized_imgs = [solarizer(orig_img) for _ in range(4)]
plot(solarized_imgs)
####################################
# RandomAdjustSharpness
# ~~~~~~~~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomAdjustSharpness` transform
# (see also :func:`~torchvision.transforms.functional.adjust_sharpness`)
# randomly adjusts the sharpness of the given image.
sharpness_adjuster = T.RandomAdjustSharpness(sharpness_factor=2)
sharpened_imgs = [sharpness_adjuster(orig_img) for _ in range(4)]
plot(sharpened_imgs)
####################################
# RandomAutocontrast
# ~~~~~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomAutocontrast` transform
# (see also :func:`~torchvision.transforms.functional.autocontrast`)
# randomly applies autocontrast to the given image.
autocontraster = T.RandomAutocontrast()
autocontrasted_imgs = [autocontraster(orig_img) for _ in range(4)]
plot(autocontrasted_imgs)
####################################
# RandomEqualize
# ~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomEqualize` transform
# (see also :func:`~torchvision.transforms.functional.equalize`)
# randomly equalizes the histogram of the given image.
equalizer = T.RandomEqualize()
equalized_imgs = [equalizer(orig_img) for _ in range(4)]
plot(equalized_imgs)
####################################
# AutoAugment
# ~~~~~~~~~~~
# The :class:`~torchvision.transforms.AutoAugment` transform
# automatically augments data based on a given auto-augmentation policy.
# See :class:`~torchvision.transforms.AutoAugmentPolicy` for the available policies.
policies = [T.AutoAugmentPolicy.CIFAR10, T.AutoAugmentPolicy.IMAGENET, T.AutoAugmentPolicy.SVHN]
augmenters = [T.AutoAugment(policy) for policy in policies]
imgs = [
[augmenter(orig_img) for _ in range(4)]
for augmenter in augmenters
]
row_title = [str(policy).split('.')[-1] for policy in policies]
plot(imgs, row_title=row_title)
####################################
# Randomly-applied transforms
# ---------------------------
#
# Some transforms are randomly-applied given a probability ``p``. That is, the
# transformed image may actually be the same as the original one, even when
# called with the same transformer instance!
#
# RandomHorizontalFlip
# ~~~~~~~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomHorizontalFlip` transform
# (see also :func:`~torchvision.transforms.functional.hflip`)
# performs horizontal flip of an image, with a given probability.
hflipper = T.RandomHorizontalFlip(p=0.5)
transformed_imgs = [hflipper(orig_img) for _ in range(4)]
plot(transformed_imgs)
####################################
# RandomVerticalFlip
# ~~~~~~~~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomVerticalFlip` transform
# (see also :func:`~torchvision.transforms.functional.vflip`)
# performs vertical flip of an image, with a given probability.
vflipper = T.RandomVerticalFlip(p=0.5)
transformed_imgs = [vflipper(orig_img) for _ in range(4)]
plot(transformed_imgs)
####################################
# RandomApply
# ~~~~~~~~~~~
# The :class:`~torchvision.transforms.RandomApply` transform
# randomly applies a list of transforms, with a given probability.
applier = T.RandomApply(transforms=[T.RandomCrop(size=(64, 64))], p=0.5)
transformed_imgs = [applier(orig_img) for _ in range(4)]
plot(transformed_imgs)
|
[
"noreply@github.com"
] |
DevPranjal.noreply@github.com
|
4d73ef6e2788f7795f74025da206752fac277b97
|
02aafed62a17b714079326abb4f77f089f4623f6
|
/touchscreen/testcal.py
|
d3cd19431305ff1399ec60a6343843616bb024c8
|
[
"MIT"
] |
permissive
|
amirgon/lv_mpy_examples
|
89e08197a3a556945823c65873853a761160a676
|
a4ccb16b17f915fb85d66facec2978166151af2b
|
refs/heads/main
| 2023-03-19T00:38:19.300618
| 2021-03-12T09:59:40
| 2021-03-12T09:59:40
| 344,953,085
| 0
| 0
|
MIT
| 2021-03-05T22:45:54
| 2021-03-05T22:45:53
| null |
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
#!/opt/bin/lv_micropython -i
import lvgl as lv
import display_driver
import time
from micropython -i import const
CIRCLE_SIZE = const(20)
TP_MAX_VALUE = const(10000)
def check():
point = lv.point_t()
indev = lv.indev_get_act()
indev.get_point(point)
print("click position: x: %d, y: %d"%(point.x,point.y))
circ_area.set_pos(point.x - CIRCLE_SIZE // 2,
point.y - CIRCLE_SIZE // 2)
def show_text(txt):
label_main.set_text(txt)
label_main.set_align(lv.label.ALIGN.CENTER)
label_main.set_pos((HRES - label_main.get_width() ) // 2,
(VRES - label_main.get_height()) // 2)
disp = lv.scr_act().get_disp()
HRES = disp.driver.hor_res
VRES = disp.driver.ver_res
# Create a big transparent button screen to receive clicks
style_transp = lv.style_t()
style_transp.init()
style_transp.set_bg_opa(lv.STATE.DEFAULT, lv.OPA.TRANSP)
big_btn = lv.btn(lv.scr_act(), None)
big_btn.set_size(TP_MAX_VALUE, TP_MAX_VALUE)
big_btn.add_style(lv.btn.PART.MAIN,style_transp)
big_btn.set_layout(lv.LAYOUT.OFF)
label_main = lv.label(lv.scr_act(), None)
style_circ = lv.style_t()
style_circ.init()
show_text("Click/drag on screen\n" + \
"to check calibration")
big_btn.set_event_cb(lambda obj, event: check() if event == lv.EVENT.PRESSING else None)
circ_area = lv.obj(lv.scr_act(), None)
circ_area.set_size(CIRCLE_SIZE, CIRCLE_SIZE)
circ_area.add_style(lv.STATE.DEFAULT,style_circ)
circ_area.set_click(False)
|
[
"uli.raich@gmail.com"
] |
uli.raich@gmail.com
|
b8009fdb590637a563909e6f62d86b7b016014d8
|
839b8d3b691bc07fdd48d2a569d8b86c3cd8b4d1
|
/tests/test_iterator.py
|
6a566b89b5a14be81ca0b696f16625adfe31364a
|
[
"MIT"
] |
permissive
|
arghavanMor/flutes
|
15bdd23fa1e289769a770c3093f2325f5b2525ab
|
3b7c518ad8d7d1386cea623a10e844db340826fe
|
refs/heads/master
| 2023-05-14T04:59:00.540352
| 2020-07-20T22:11:30
| 2020-07-20T22:11:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,889
|
py
|
import operator
import pytest
import flutes
from .utils import check_iterator
def test_chunk() -> None:
check_iterator(flutes.chunk(3, range(10)),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]])
check_iterator(flutes.chunk(6, range(5)),
[[0, 1, 2, 3, 4]])
def test_take() -> None:
check_iterator(flutes.take(5, range(10000000)),
[0, 1, 2, 3, 4])
check_iterator(flutes.take(5, range(2)),
[0, 1])
def test_drop() -> None:
check_iterator(flutes.drop(5, range(10)),
[5, 6, 7, 8, 9])
check_iterator(flutes.drop(5, range(2)), # type: ignore[misc]
[])
def test_drop_until() -> None:
check_iterator(flutes.drop_until(lambda x: x > 5, range(10)),
[6, 7, 8, 9])
def test_split_by() -> None:
check_iterator(flutes.split_by(range(10), criterion=lambda x: x % 3 == 0),
[[1, 2], [4, 5], [7, 8]])
check_iterator(flutes.split_by(" Split by: ", empty_segments=True, separator=' '),
[[], ['S', 'p', 'l', 'i', 't'], ['b', 'y', ':'], []])
def test_scanl() -> None:
check_iterator(flutes.scanl(operator.add, [1, 2, 3, 4], 0),
[0, 1, 3, 6, 10])
check_iterator(flutes.scanl(lambda s, x: x + s, ['a', 'b', 'c', 'd']),
['a', 'ba', 'cba', 'dcba'])
def test_scanr() -> None:
check_iterator(flutes.scanr(operator.add, [1, 2, 3, 4], 0),
[10, 9, 7, 4, 0])
check_iterator(flutes.scanr(lambda s, x: x + s, ['a', 'b', 'c', 'd']),
['abcd', 'bcd', 'cd', 'd'])
def test_LazyList() -> None:
l = flutes.LazyList(range(100))
assert l[50] == 50
assert l[70:90] == list(range(70, 90))
assert l[-2] == 98
l = flutes.LazyList(range(100))
with pytest.raises(TypeError, match="__len__"):
len(l)
for i, x in enumerate(l):
assert i == x
assert len(l) == 100
for i, x in enumerate(l):
assert i == x
def test_Range() -> None:
def _check_range(*args):
r = flutes.Range(*args)
gold = list(range(*args))
assert len(r) == len(gold)
check_iterator(r, gold)
assert r[1:-1] == gold[1:-1]
assert r[-2] == gold[-2]
_check_range(10)
_check_range(1, 10 + 1)
_check_range(1, 11, 2)
def test_MapList() -> None:
l = flutes.MapList(lambda x: x * x, list(range(100)))
assert l[15] == 15 * 15
check_iterator(l[20:-10], [x * x for x in range(20, 100 - 10)])
assert len(l) == 100
check_iterator(l, [x * x for x in range(100)])
import bisect
a = [1, 2, 3, 4, 5]
pos = bisect.bisect_left(flutes.MapList(lambda x: x * x, a), 10)
assert pos == 3
b = [2, 3, 4, 5, 6]
pos = bisect.bisect_left(flutes.MapList(lambda i: a[i] * b[i], flutes.Range(len(a))), 10)
assert pos == 2
|
[
"huzecong@gmail.com"
] |
huzecong@gmail.com
|
ab48d81b092f72c832d2f75826e46cf93efab8ef
|
c0b02bf77168b5770a86e8beb341738083a253a7
|
/pymada/setup.py
|
9a794523db9d1cd19191f746c8835d6eb25b977c
|
[] |
no_license
|
nhoss2/pymada
|
61fe4df9c107b66df14bf1384716f370e6d23102
|
ec537e938a593e830ca82249b82531a881430dd5
|
refs/heads/master
| 2022-12-13T20:44:09.745576
| 2020-05-27T17:05:46
| 2020-05-27T17:05:46
| 204,399,744
| 0
| 1
| null | 2022-12-08T09:31:54
| 2019-08-26T05:06:07
|
Python
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
from setuptools import setup, find_packages
setup(
name="pymada",
version="0.1.0",
url="https://github.com/nhoss2/pymada",
license="",
author="Nafis Hossain",
author_email="nafis@labs.im",
description="pymada",
packages=find_packages(),
install_requires=[
'apache-libcloud',
'django',
'djangorestframework',
'requests',
'gunicorn',
'flask',
'kubernetes',
'cryptography',
'click',
'pyyaml',
'tabulate',
'pillow'
],
entry_points={
'console_scripts': ['pymada=pymada.cli:cli']
},
classifiers=[],
)
|
[
"nafis@labs.im"
] |
nafis@labs.im
|
cffaedfae2b94bc933a329ab61f87cb4dacae1e5
|
5862c7e8df5f756867e2ac05e3d37d23e2d2c5b6
|
/network_caller/net_translate.py
|
7e2ee781c310b455854e26b1f479f2c4d91f67dd
|
[] |
no_license
|
GKaramiMP/ASL2PET
|
1015f74b47e0604ec38f5f596d79ecf2999bcb3b
|
4b4f816581b61beda67b5874e1026c98abc117b1
|
refs/heads/master
| 2022-07-01T20:26:13.531824
| 2020-05-12T09:13:17
| 2020-05-12T09:13:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,368
|
py
|
import time
import shutil
import os
# from functions.densenet_unet import _densenet_unet
# from functions.networks.dense_unet2 import _densenet_unet
import numpy as np
import SimpleITK as sitk
import tensorflow as tf
import logging
from cnn.multi_stage_denseunet import multi_stage_densenet
from cnn.unet import unet
# import wandb
from reader import *
from reader.data_reader import *
from reader.image_class import *
from losses.ssim_loss import SSIM,multistage_SSIM
from losses.mse import mean_squared_error
from threads import *
from settings import *
import psutil
# calculate the dice coefficient
from shutil import copyfile
from reader.patch_extractor import _patch_extractor_thread
from reader.data_reader import _read_data
# --------------------------------------------------------------------------------------------------------
class net_translate:
def __init__(self,data_path,server_path , Logs):
settings.init()
self.data_path=data_path
self.validation_samples=200
self.Logs = Logs
self.LOGDIR = server_path + self.Logs + '/'
self.learning_rate = .001
self.total_epochs=1000
self.no_sample_per_each_itr = 1000
self.sample_no = 2000000
self.img_width = 500
self.img_height = 500
self.asl_size = 77
self.pet_size = 63
self.display_validation_step=5
self.batch_no_validation=10
self.batch_no=10
self.parent_path = '/exports/lkeb-hpc/syousefi/Code/'
self.chckpnt_dir = self.parent_path + self.Logs + '/unet_checkpoints/'
def copytree(self,src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def save_file(self,file_name,txt):
with open(file_name, 'a') as file:
file.write(txt)
def count_number_trainable_params(self):
'''
Counts the number of trainable variables.
'''
tot_nb_params = 0
for trainable_variable in tf.trainable_variables():
shape = trainable_variable.get_shape() # e.g [D,F] or [W,H,C]
current_nb_params = self.get_nb_params_shape(shape)
tot_nb_params = tot_nb_params + current_nb_params
return tot_nb_params
def get_nb_params_shape(self,shape):
'''
Computes the total number of params for a given shap.
Works for any number of shapes etc [D,F] or [W,H,C] computes D*F and W*H*C.
'''
nb_params = 1
for dim in shape:
nb_params = nb_params * int(dim)
return nb_params
def run_net(self):
self.alpha_coeff=1
'''read path of the images for train, test, and validation'''
_rd = _read_data(self.data_path)
train_data, validation_data, test_data=_rd.read_data_path()
# ======================================
bunch_of_images_no=1
_image_class_vl = image_class(validation_data,
bunch_of_images_no=bunch_of_images_no,
is_training=0,inp_size=self.asl_size,out_size=self.pet_size)
_patch_extractor_thread_vl = _patch_extractor_thread(_image_class=_image_class_vl,
img_no=bunch_of_images_no,
mutex=settings.mutex,
is_training=0,
)
_fill_thread_vl = fill_thread(validation_data,
_image_class_vl,
mutex=settings.mutex,
is_training=0,
patch_extractor=_patch_extractor_thread_vl,
)
_read_thread_vl = read_thread(_fill_thread_vl, mutex=settings.mutex,
validation_sample_no=self.validation_samples, is_training=0)
_fill_thread_vl.start()
_patch_extractor_thread_vl.start()
_read_thread_vl.start()
# ======================================
bunch_of_images_no = 7
_image_class_tr = image_class(train_data,
bunch_of_images_no=bunch_of_images_no,
is_training=1,inp_size=self.asl_size,out_size=self.pet_size
)
_patch_extractor_thread_tr = _patch_extractor_thread(_image_class=_image_class_tr,
img_no=bunch_of_images_no,
mutex=settings.mutex,
is_training=1,
)
_fill_thread = fill_thread(train_data,
_image_class_tr,
mutex=settings.mutex,
is_training=1,
patch_extractor=_patch_extractor_thread_tr,
)
_read_thread = read_thread(_fill_thread, mutex=settings.mutex, is_training=1)
_fill_thread.start()
_patch_extractor_thread_tr.start()
_read_thread.start()
# ======================================
# asl_plchld= tf.placeholder(tf.float32, shape=[None, None, None, 1])
# t1_plchld= tf.placeholder(tf.float32, shape=[None, None, None, 1])
# pet_plchld= tf.placeholder(tf.float32, shape=[None, None, None, 1])
asl_plchld = tf.placeholder(tf.float32, shape=[None, self.asl_size, self.asl_size, 1])
t1_plchld = tf.placeholder(tf.float32, shape=[None, self.asl_size, self.asl_size, 1])
pet_plchld = tf.placeholder(tf.float32, shape=[None, self.pet_size, self.pet_size, 1])
ave_loss_vali = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool, name='is_training')
is_training_bn = tf.placeholder(tf.bool, name='is_training_bn')
# cnn_net = unet() # create object
# y,augmented_data = cnn_net.unet(t1=t1_plchld, asl=asl_plchld, pet=pet_plchld, is_training_bn=is_training_bn)
msdensnet = multi_stage_densenet()
y,augmented_data,loss_upsampling11,loss_upsampling2 = msdensnet.multi_stage_densenet(asl_img=asl_plchld,
t1_img=t1_plchld,
pet_img=pet_plchld,
input_dim=77,
is_training=is_training)
show_img=augmented_data[0][:, :, :, 0, np.newaxis]
tf.summary.image('00: input_asl', show_img, 3)
show_img = augmented_data[1][:, :, :, 0, np.newaxis]
tf.summary.image('01: input_t1', show_img, 3)
show_img = augmented_data[2][:, :, :, 0, np.newaxis]
tf.summary.image('02: target_pet', show_img, 3)
show_img = y[:, :, :, 0, np.newaxis]
tf.summary.image('03: output_pet', show_img, 3)
#
# show_img = loss_upsampling11[:, :, :, 0, np.newaxis]
# tf.summary.image('04: loss_upsampling11', show_img, 3)
# #
# show_img = loss_upsampling22[:, :, :, 0, np.newaxis]
# tf.summary.image('05: loss_upsampling22', show_img, 3)
print('*****************************************')
print('*****************************************')
print('*****************************************')
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# devices = sess.list_devices()
# print(devices)
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
print('*****************************************')
print('*****************************************')
print('*****************************************')
train_writer = tf.summary.FileWriter(self.LOGDIR + '/train' , graph=tf.get_default_graph())
# train_writer.flush()
validation_writer = tf.summary.FileWriter(self.LOGDIR + '/validation' , graph=sess.graph)
# validation_writer.flush()
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)
# train_writer.close()
# validation_writer.close()
loadModel = 0
# self.loss = ssim_loss()
with tf.name_scope('cost'):
# ssim_val,denominator,ssim_map=SSIM(x1=augmented_data[-1], x2=y,max_val=1.0)
# cost = tf.reduce_mean((1.0 - ssim_val), name="cost")
ssim_val=tf.reduce_mean(multistage_SSIM(x1=pet_plchld, x2=y,level1=loss_upsampling11, level2=loss_upsampling2,max_val=1.5)[0])
cost = tf.reduce_mean((ssim_val), name="cost")
# mse=mean_squared_error(labels=augmented_data[-1],logit=y)
# cost = tf.reduce_mean(mse , name="cost")
tf.summary.scalar("cost", cost)
# tf.summary.scalar("denominator", denominator)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, ).minimize(cost)
with tf.name_scope('validation'):
average_validation_loss = ave_loss_vali
tf.summary.scalar("average_validation_loss", average_validation_loss)
sess.run(tf.global_variables_initializer())
logging.debug('total number of variables %s' % (
np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
summ=tf.summary.merge_all()
point = 0
itr1 = 0
if loadModel:
chckpnt_dir=''
ckpt = tf.train.get_checkpoint_state(chckpnt_dir)
saver.restore(sess, ckpt.model_checkpoint_path)
point=np.int16(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
itr1=point
# with tf.Session() as sess:
print("Number of trainable parameters: %d" % self.count_number_trainable_params())
# patch_radius = 49
'''loop for epochs'''
for epoch in range(self.total_epochs):
while self.no_sample_per_each_itr*int(point/self.no_sample_per_each_itr)<self.sample_no:
print("epoch #: %d" %(epoch))
startTime = time.time()
step = 0
self.beta_coeff=1+1 * np.exp(-point/2000)
# =============validation================
if itr1 % self.display_validation_step ==0:
'''Validation: '''
loss_validation = 0
acc_validation = 0
validation_step = 0
dsc_validation=0
while (validation_step * self.batch_no_validation <settings.validation_totalimg_patch):
[validation_asl_slices, validation_pet_slices,validation_t1_slices] = _image_class_vl.return_patches_validation( validation_step * self.batch_no_validation, (validation_step + 1) *self.batch_no_validation)
if (len(validation_asl_slices)<self.batch_no_validation) | (len(validation_pet_slices)<self.batch_no_validation) | (len(validation_t1_slices)<self.batch_no_validation) :
_read_thread_vl.resume()
time.sleep(0.5)
# print('sleep 3 validation')
continue
tic=time.time()
[loss_vali,out,augmented_dataout,] = sess.run([ cost,y,augmented_data,],
feed_dict={asl_plchld:validation_asl_slices ,
t1_plchld: validation_t1_slices,
pet_plchld: validation_pet_slices,
is_training: False,
ave_loss_vali: -1,
is_training_bn:False,
})
elapsed=time.time()-tic
loss_validation += loss_vali
validation_step += 1
if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):
print('nan problem')
process = psutil.Process(os.getpid())
print(
'%d - > %d: elapsed_time:%d loss_validation: %f, memory_percent: %4s' % (
validation_step,validation_step * self.batch_no_validation
, elapsed, loss_vali, str(process.memory_percent()),
))
# end while
settings.queue_isready_vl = False
acc_validation = acc_validation / (validation_step)
loss_validation = loss_validation / (validation_step)
dsc_validation = dsc_validation / (validation_step)
if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):
print('nan problem')
_fill_thread_vl.kill_thread()
print('******Validation, step: %d , accuracy: %.4f, loss: %f*******' % (
itr1, acc_validation, loss_validation))
[sum_validation] = sess.run([summ],
feed_dict={asl_plchld: validation_asl_slices,
t1_plchld: validation_t1_slices,
pet_plchld: validation_pet_slices,
is_training: False,
ave_loss_vali: loss_validation,
is_training_bn: False,
})
validation_writer.add_summary(sum_validation, point)
validation_writer.flush()
print('end of validation---------%d' % (point))
# end if
'''loop for training batches'''
while(step*self.batch_no<self.no_sample_per_each_itr):
[train_asl_slices,train_pet_slices,train_t1_slices] = _image_class_tr.return_patches( self.batch_no)
if (len(train_asl_slices)<self.batch_no)|(len(train_pet_slices)<self.batch_no)\
|(len(train_t1_slices)<self.batch_no):
#|(len(train_t1_slices)<self.batch_no):
time.sleep(0.5)
_read_thread.resume()
continue
tic=time.time()
[ loss_train1,out,augmented_dataout,opt] = sess.run([ cost,y,augmented_data,optimizer],
feed_dict={asl_plchld: train_asl_slices,
t1_plchld: train_t1_slices,
pet_plchld: train_pet_slices,
is_training: True,
ave_loss_vali: -1,
is_training_bn: True})
elapsed=time.time()-tic
[sum_train] = sess.run([summ],
feed_dict={asl_plchld: train_asl_slices,
t1_plchld: train_t1_slices,
pet_plchld: train_pet_slices,
is_training: False,
ave_loss_vali: loss_train1,
is_training_bn: False
})
train_writer.add_summary(sum_train,point)
train_writer.flush()
step = step + 1
process = psutil.Process(os.getpid())
print(
'point: %d, elapsed_time:%d step*self.batch_no:%f , LR: %.15f, loss_train1:%f,memory_percent: %4s' % (
int((point)),elapsed,
step * self.batch_no, self.learning_rate, loss_train1,
str(process.memory_percent())))
point=int((point))#(self.no_sample_per_each_itr/self.batch_no)*itr1+step
if point%100==0:
'''saveing model inter epoch'''
chckpnt_path = os.path.join(self.chckpnt_dir,
('densenet_unet_inter_epoch%d_point%d.ckpt' % (epoch, point)))
saver.save(sess, chckpnt_path, global_step=point)
itr1 = itr1 + 1
point=point+1
endTime = time.time()
#==============end of epoch:
'''saveing model after each epoch'''
chckpnt_path = os.path.join(self.chckpnt_dir, 'densenet_unet.ckpt')
saver.save(sess, chckpnt_path, global_step=epoch)
print("End of epoch----> %d, elapsed time: %d" % (epoch, endTime - startTime))
|
[
"s.yousefi.radi@lumc.nl"
] |
s.yousefi.radi@lumc.nl
|
dc963bbe8bfec548b608d7fb9fe92a5d0c40a51c
|
cdaa450bde480c84341cf2c5f8b6906d86c3db34
|
/E-commerce/store/models.py
|
c0d6ad2de67dc312b4904891d52c81cf3a642395
|
[] |
no_license
|
MUSKANJASSAL/drf
|
6194c5baa9002b168112a37e56c0878efbff47e2
|
9c5b8ee53780412a49559c60ff9180a7efc62f85
|
refs/heads/master
| 2023-04-05T11:22:03.176193
| 2021-04-10T04:42:17
| 2021-04-10T04:42:17
| 330,596,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
# from django.contrib.auth.models import User
# Create your models here.
# Build tables in database
from django.conf import settings
from django.db import models
from django.urls import reverse
class ProductManager(models.Manager):
def get_queryset(self):
return super(ProductManager, self).get_queryset().filter(is_active=True)
class Category(models.Model):
name = models.CharField(max_length=255, db_index=True)
slug = models.SlugField(max_length=255, unique=True)
class Meta:
verbose_name_plural = 'categories'
def get_absolute_url(self):
return reverse('store:category_list', args=[self.slug])
def __str__(self):
return self.name
class Product(models.Model):
category = models.ForeignKey(Category, related_name='product', on_delete=models.CASCADE)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='product_creator')
title = models.CharField(max_length=255)
author = models.CharField(max_length=255, default='admin')
description = models.TextField(blank=True)
image = models.ImageField(upload_to='images/', default='images/default.png')
slug = models.SlugField(max_length=255)
price = models.DecimalField(max_digits=6, decimal_places=2)
in_stock = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
objects = models.Manager()
products = ProductManager()
class Meta:
verbose_name_plural = 'Products'
ordering = ('-created',)
def get_absolute_url(self):
return reverse('store:product_detail', args=[self.slug])
def __str__(self):
return self.title
|
[
"muskan124.jassal@gmail.com"
] |
muskan124.jassal@gmail.com
|
9d3917531b8c8ec0f17ba1d7b8e711c1d9ddec5d
|
c61ffae44fa892107308680d68dbd74ebe084252
|
/TensorFlow2/config.py
|
1b665b98d795594866a0e139341ef9d8891d4d85
|
[
"MIT"
] |
permissive
|
jalehman27/VRP_DRL_MHA
|
37236f2ff7043c7f8b57d3904ef7b48e43f5ff1d
|
ef59ecc091bab77e112534d3e3ae73bddeeeeadc
|
refs/heads/master
| 2023-03-23T06:24:44.464120
| 2021-01-12T02:14:48
| 2021-01-12T02:14:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,210
|
py
|
import pickle
import os
import argparse
from datetime import datetime
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', metavar = 'M', type = str, default = 'train', choices = ['train', 'test'], help = 'train or test')
parser.add_argument('--seed', metavar = 'SE', type = int, default = 123, help = 'random seed number for inference, reproducibility')
parser.add_argument('-n', '--n_customer', metavar = 'N', type = int, default = 20, help = 'number of customer nodes, time sequence')
# train config
parser.add_argument('-b', '--batch', metavar = 'B', type = int, default = 512, help = 'batch size')
parser.add_argument('-bs', '--batch_steps', metavar = 'BS', type = int, default = 2500, help = 'number of samples = batch * batch_steps')
parser.add_argument('-bv', '--batch_verbose', metavar = 'BV', type = int, default = 10, help = 'print and logging during training process')
parser.add_argument('-nr', '--n_rollout_samples', metavar = 'R', type = int, default = 10000, help = 'baseline rollout number of samples')
parser.add_argument('-e', '--epochs', metavar = 'E', type = int, default = 20, help = 'total number of samples = epochs * number of samples')
parser.add_argument('-em', '--embed_dim', metavar = 'EM', type = int, default = 128, help = 'embedding size')
parser.add_argument('-nh', '--n_heads', metavar = 'NH', type = int, default = 8, help = 'number of heads in MHA')
parser.add_argument('-c', '--tanh_clipping', metavar = 'C', type = float, default = 10., help = 'improve exploration; clipping logits')
parser.add_argument('-ne', '--n_encode_layers', metavar = 'NE', type = int, default = 3, help = 'number of MHA encoder layers')
parser.add_argument('--lr', metavar = 'LR', type = float, default = 1e-4, help = 'initial learning rate')
parser.add_argument('-wb', '--warmup_beta', metavar = 'WB', type = float, default = 0.8, help = 'exponential moving average, warmup')
parser.add_argument('-we', '--wp_epochs', metavar = 'WE', type = int, default = 1, help = 'warmup epochs')
parser.add_argument('--islogger', action = 'store_false', help = 'flag csv logger default true')
parser.add_argument('-ld', '--log_dir', metavar = 'LD', type = str, default = './Csv/', help = 'csv logger dir')
parser.add_argument('-wd', '--weight_dir', metavar = 'MD', type = str, default = './Weights/', help = 'model weight save dir')
parser.add_argument('-pd', '--pkl_dir', metavar = 'PD', type = str, default = './Pkl/', help = 'pkl save dir')
parser.add_argument('-cd', '--cuda_dv', metavar = 'CD', type = str, default = '0', help = 'os CUDA_VISIBLE_DEVICE')
args = parser.parse_args()
return args
class Config():
def __init__(self, **kwargs):
for k, v in kwargs.items():
self.__dict__[k] = v
self.task = 'VRP%d_%s'%(self.n_customer, self.mode)
self.dump_date = datetime.now().strftime('%m%d_%H_%M')
for x in [self.log_dir, self.weight_dir, self.pkl_dir]:
os.makedirs(x, exist_ok = True)
self.pkl_path = self.pkl_dir + self.task + '.pkl'
self.n_samples = self.batch * self.batch_steps
def dump_pkl(args, verbose = True, param_log = True):
cfg = Config(**vars(args))
with open(cfg.pkl_path, 'wb') as f:
pickle.dump(cfg, f)
print('--- save pickle file in %s ---\n'%cfg.pkl_path)
if verbose:
print(''.join('%s: %s\n'%item for item in vars(cfg).items()))
if param_log:
path = '%sparam_%s_%s.csv'%(cfg.log_dir, cfg.task, cfg.dump_date)#cfg.log_dir = ./Csv/
with open(path, 'w') as f:
f.write(''.join('%s,%s\n'%item for item in vars(cfg).items()))
def load_pkl(pkl_path, verbose = True):
if not os.path.isfile(pkl_path):
raise FileNotFoundError('pkl_path')
with open(pkl_path, 'rb') as f:
cfg = pickle.load(f)
if verbose:
print(''.join('%s: %s\n'%item for item in vars(cfg).items()))
os.environ['CUDA_VISIBLE_DEVICE'] = cfg.cuda_dv
return cfg
def file_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', metavar = 'P', type = str,
default = 'Pkl/VRP20_train.pkl', help = 'file path, pkl or h5 only')
args = parser.parse_args()
return args
def test_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', metavar = 'P', type = str, required = True,
help = 'Weights/VRP***_train_epoch***.h5, h5 file required')
parser.add_argument('-b', '--batch', metavar = 'B', type = int, default = 2, help = 'batch size')
parser.add_argument('-n', '--n_customer', metavar = 'N', type = int, default = 20, help = 'number of customer nodes, time sequence')
parser.add_argument('-s', '--seed', metavar = 'S', type = int, default = 123, help = 'random seed number for inference, reproducibility')
parser.add_argument('-t', '--txt', metavar = 'T', type = str, help = 'if you wanna test out on text file, example: ../OpenData/A-n53-k7.txt')
parser.add_argument('-d', '--decode_type', metavar = 'D', type = str, required = True, choices = ['greedy', 'sampling'], help = 'greedy or sampling required')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = arg_parser()
dump_pkl(args)
# cfg = load_pkl(file_parser().path)
# for k, v in vars(cfg).items():
# print(k, v)
# print(vars(cfg)[k])#==v
|
[
"310rnomeado@gmail.com"
] |
310rnomeado@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.