blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
077ec552dbc44f8f13d2f90ce68f5e930071324a
|
fd0bf99070d83466101869f1a13e3cc408a2a101
|
/python/046_Fastq_PE_Filter_by_ID.py
|
65884572200d11e7fd2a41d45cf066a36dc4366c
|
[] |
no_license
|
hkkenneth/lihs
|
eabf11173b5f09bdf70ebb6bb58e9bde711e03d8
|
02360939ca9e06e041ce21c99b729a2e12a28411
|
refs/heads/master
| 2021-01-10T10:04:05.807656
| 2013-03-04T15:24:58
| 2013-03-04T15:24:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
# Author: Kenneth Lui <hkkenneth@gmail.com>
# Last Updated on: 20-02-2013
## Usage: python ~/code/python/046_Fastq_PE_Filter_by_ID.py <ID LIST> <IN LIST OUTPUT FASTQ> <OUT LIST OUTPUT FASTQ> <INPUT FASTQ> <READ ID SEPARATER (DEFAULT SPACE)>
## This is PE in the sense that it tries to parse the paired ID!!
## ID in the list are only the "common part" (e.g. no /1 , /2) of the IDs
## Take a fastq file, can split the sequences into 2 files according to whether the id exists in the list
## for the in list output fastq file, the order is the same as the id list
## if the output fastq is not needed, use "-"
import sys
if len(sys.argv) < 4:
raise SystemExit, 'use grep "##" ~/code/python/019f_Fastq_PE_Remove_Duplicates_With_Stat.py to get usage'
def linesToFile(lines, f):
for line in lines:
f.write(line)
read_id_sep = " "
if len(sys.argv) > 5:
read_id_sep = sys.argv[5]
id_dict = {}
id_list = []
for line in open(sys.argv[1], 'r'):
id_list.append(line[:-1])
id_dict[line[:-1]] = []
if sys.argv[2] == "-":
f1 = None
else:
f1 = open(sys.argv[2], 'w')
if sys.argv[3] == "-":
f2 = None
else:
f2 = open(sys.argv[3], 'w')
f1in = open(sys.argv[4], 'r')
f1lines = f1in.readlines()
i = 0
while i < len(f1lines):
id = f1lines[i][1:f1lines[i].find(read_id_sep)]
if id in id_dict:
if f1 is not None:
id_dict[id] = f1lines[i:(i+4)]
elif f2 is not None:
linesToFile(f1lines[i:(i+4)], f2)
i += 4
if f1 is not None:
for id in id_list:
if id in id_dict:
linesToFile(id_dict[id], f1)
if f1 is not None:
f1.close()
if f2 is not None:
f2.close()
|
[
"hkkenneth@gmail.com"
] |
hkkenneth@gmail.com
|
a85b8f79b55a165efd14937ca5ad057cb91f0f75
|
8390a7853ac31d5163e3111b7ce7b298c974ebfe
|
/fengzhuang/page/dzh_page.py
|
924857a367ff7569681d24e53c77b03125b35736
|
[] |
no_license
|
pivosxbmc/ALL_project
|
36d37f19a5f03558ea1e162f518a261fecfa0f59
|
34f47a39432eb1e1dff94a0420f7eaa1e2c49b4b
|
refs/heads/master
| 2023-01-24T15:23:20.930950
| 2020-11-26T10:02:47
| 2020-11-26T10:02:47
| 305,939,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
#coding=utf-8
'下一个页面handle'
import sys
sys.path.append('D:\\jx\\fengzhuang')
from base.find_element import FindElement
import time
class H_multi_Page(object):
"""H5界面"""
def __init__(self, driver):
self.elements = FindElement(driver,'Multi')
def get_multi_H5_button_element(self):
return self.elements.get_multi_element('H_home_click')
def get_multi_home_msgbox_button_element(self):
return self.elements.get_multi_element('H_home_msgbox_button')
|
[
"1094491399@qq.com"
] |
1094491399@qq.com
|
d891da04d501abe4b1f6da6ca84babc9ccac723d
|
d7fb8eacd8a1aae8fe6eb49111f93090b7e87ce0
|
/backend/tstcr2020102701_dev_14091/settings.py
|
e74bd1f3e5a9f57d885d8b38f60ca2550b592ad3
|
[] |
no_license
|
crowdbotics-apps/tstcr2020102701-dev-14091
|
4d5bcfc2b0aa29e67cebcd8948258b75e8ad9c6b
|
cc6ba4999444c7e93943f76af75c2506048bf2b6
|
refs/heads/master
| 2023-01-03T05:09:02.457778
| 2020-10-28T21:59:17
| 2020-10-28T21:59:17
| 307,772,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,125
|
py
|
"""
Django settings for tstcr2020102701_dev_14091 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tstcr2020102701_dev_14091.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tstcr2020102701_dev_14091.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
70b49f18abf43bf5e67f80ab9e45eba1399e8cd0
|
e8274f167fd219ef78241ba8ea89e5d5875ed794
|
/cloud/quantum/quantum/openstack/common/lockutils.py
|
9f4eddf57c95bd92e8539b8e2c97039dc97bb433
|
[
"Apache-2.0"
] |
permissive
|
virt2x/folsomCloud
|
02db0147f7e0f2ab0375faf4f36ca08272084152
|
e6fd612dd77f35a72739cf4d4750e9795c0fa508
|
refs/heads/master
| 2021-01-01T17:26:28.405651
| 2013-10-17T12:36:04
| 2013-10-17T12:36:04
| 13,647,787
| 0
| 1
| null | 2020-07-24T08:25:22
| 2013-10-17T12:10:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,455
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from quantum.openstack.common import cfg
from quantum.openstack.common import fileutils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap
|
[
"quan.xu@intel.com"
] |
quan.xu@intel.com
|
96e74a51787d9206d2e4ddd5c9531473c08384c5
|
593dff0c5746603268417a702a00cd3355f47f3a
|
/hq_extracter.py
|
0637fa456196047256f299fb62689b0330057cc7
|
[] |
no_license
|
vc2309/Blue-sky-tools
|
e508f2cb0fd240a95b812ed53f2ac6ed3ea1cd64
|
055b06cc9865808f3d0665dc9c95aba6b401fe69
|
refs/heads/master
| 2021-09-10T18:55:52.616954
| 2018-03-31T07:14:01
| 2018-03-31T07:14:01
| 104,192,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
import pandas as pd
ifiles=['sjs_all_feb_report_m.csv']
floors=['HQ - G/F'
,'HQ - 2S'
,'HQ - 2N'
,'HQ - 3S'
,'HQ - 3N'
,'HQ - 4N'
,'HQ - 5S'
,'HQ - 5N'
,'HQ - 6S'
,'HQ - 6N'
,'HQ - 7S'
,'HQ - 7N'
,'HQ - 8S'
,'HQ - 8N'
,'HQ - 9S'
,'HQ - 9N'
,'HQ - AC'
,'HQ - 11'
,'HQ - 12'
,'HQ - 13'
,'HQ - Lift'
,'HQ - 10']
def extract_hq(file):
print("here")
df=pd.read_csv(file)
hq_df=pd.DataFrame()
floor=[]
for f in floors:
floor.append(df[df['location']==f])
hq_df=pd.concat(floor)
print(hq_df.head())
hq_df.to_csv('hq_jan.csv')
def main():
for file in ifiles:
extract_hq(file)
if __name__=='__main__' :
print("ok")
main()
|
[
"wishiknew.vishnu@gmail.com"
] |
wishiknew.vishnu@gmail.com
|
1c90deae299ed6a990528539c555580748edee2a
|
bc441bb06b8948288f110af63feda4e798f30225
|
/tuna_service_sdk/model/pipeline/build_pb2.pyi
|
b2d4c34548e7bc31341d04a0ced2cc56bb0cfe4a
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,876
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from tuna_service_sdk.model.pipeline.build_status_pb2 import (
BuildStatus as tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus,
)
from tuna_service_sdk.model.pipeline.git_meta_pb2 import (
GitMeta as tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Build(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Artifact(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
packageName = ... # type: typing___Text
versionName = ... # type: typing___Text
ctime = ... # type: typing___Text
packageId = ... # type: typing___Text
versionId = ... # type: typing___Text
def __init__(self,
*,
packageName : typing___Optional[typing___Text] = None,
versionName : typing___Optional[typing___Text] = None,
ctime : typing___Optional[typing___Text] = None,
packageId : typing___Optional[typing___Text] = None,
versionId : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Build.Artifact: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Build.Artifact: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"ctime",b"ctime",u"packageId",b"packageId",u"packageName",b"packageName",u"versionId",b"versionId",u"versionName",b"versionName"]) -> None: ...
id = ... # type: typing___Text
sender = ... # type: typing___Text
created = ... # type: builtin___int
yaml_string = ... # type: typing___Text
number = ... # type: typing___Text
events = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
@property
def git_meta(self) -> tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta: ...
@property
def artifact(self) -> Build.Artifact: ...
@property
def status(self) -> tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
git_meta : typing___Optional[tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta] = None,
sender : typing___Optional[typing___Text] = None,
artifact : typing___Optional[Build.Artifact] = None,
created : typing___Optional[builtin___int] = None,
yaml_string : typing___Optional[typing___Text] = None,
status : typing___Optional[tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus] = None,
number : typing___Optional[typing___Text] = None,
events : typing___Optional[typing___Iterable[typing___Text]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Build: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Build: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"artifact",b"artifact",u"git_meta",b"git_meta",u"status",b"status"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"artifact",b"artifact",u"created",b"created",u"events",b"events",u"git_meta",b"git_meta",u"id",b"id",u"number",b"number",u"sender",b"sender",u"status",b"status",u"yaml_string",b"yaml_string"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
a651b0618f5c534aaaa2bbb5b2564e0bd4c71eec
|
cfe487ab4cd3631ee561f932f99c922d9818c63e
|
/bankingsystem/urls.py
|
9b1c997e3d047dd6bb515cd1000c8237902cec26
|
[] |
no_license
|
rsoorya/basic-banking-system
|
5ab1a228d927e616c5b137dbae2ce293eaf9d686
|
473df1556026b2e76fe9fa0c04822c7f6027a44c
|
refs/heads/master
| 2023-03-20T08:01:08.227325
| 2021-03-11T19:06:13
| 2021-03-11T19:06:13
| 346,365,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
"""bankingsystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('banktransfers.urls'))
]
|
[
"sooryaramarao18@gmail.com"
] |
sooryaramarao18@gmail.com
|
3066b1e5e881cfd20fc1567f1c361379ae7cbc4e
|
966939f62c0c84b71f4f79db4a8b19cb70b6eaa2
|
/patientrecord/myapp/urls.py
|
53d8a54abc99a528e0900e30381f08a001afb23a
|
[] |
no_license
|
mudassir-cm/djangoprojects
|
ee4eba71d90631e2c75925b8b1fa3dea3a88420f
|
b6cbd3746a8795778c28b6ef22d726dbfe0c88a9
|
refs/heads/master
| 2023-06-22T00:32:36.381689
| 2021-07-15T16:05:20
| 2021-07-15T16:05:20
| 373,089,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
from django.urls import path
from myapp import views
urlpatterns = [
path('dashboard/', views.dashboard, name='dashboard'),
path('usersignup/', views.usersignup, name='usersignup'),
path('userlogin/', views.userlogin, name='userlogin'),
path('userlogout/', views.userlogout, name='userlogout'),
path('addpatient/', views.addpatient, name='addpatient'),
path('updatepatient/<int:id>/', views.updatepatient, name='updatepatient'),
path('deletepatient/<int:id>/', views.deletepatient, name='deletepatient'),
path('patienthistory/<int:id>/', views.patienthistory, name='patienthistory'),
path('addpatienthistory/<int:id>/', views.addpatienthistory, name='addpatienthistory'),
path('deletepatienthistory/<int:id>/', views.deletepatienthistory, name='deletepatienthistory'),
path('updatepatienthistory/<int:id>', views.updatepatienthistory, name='updatepatienthistory'),
]
|
[
"mudasir2021@gmail.com"
] |
mudasir2021@gmail.com
|
9ac476d6b34148a27108c3759f98e25aad93b6dd
|
5cd7b7ffc787587faa3bbd1dae6f2c9255961ca2
|
/graph_traversals.py
|
fbbfe6397fe31b98fefda607de0f28536d39cd28
|
[] |
no_license
|
caroljunq/data-structure-exercises
|
af5d71c4ec07282f4dadcbf86e09e78ffa72eae6
|
b884b1c88220381dfa5c36d954f4761a35f990df
|
refs/heads/master
| 2020-03-08T01:38:31.973299
| 2019-04-02T12:26:08
| 2019-04-02T12:26:08
| 127,836,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
def BFS1(graph,start):
queue = [start]
visited = [start]
while queue:
v = queue.pop(0)
print(v)
for u in graph[v]:
if u not in visited:
visited.append(u)
queue.append(u)
def BFS_distance(graph,start):
queue = [start]
visited = [start]
distances = {k: 0 for k in graph.keys()}
while queue:
v = queue.pop(0)
for u,d in graph[v]:
if u not in visited:
visited.append(u)
queue.append(u)
distances[u] += distances[v] + d
print(distances)
def dfs(graph, start, visited):
visited.append(start)
for v in graph[start]:
if v not in visited:
dfs(graph,v,visited)
return visited
graph1 = {
'A': ['B','S'],
'B': ['A'],
'C': ['S','D','E','F'],
'D': ['C'],
'E': ['C','H'],
'F': ['C','G'],
'G': ['F','H','S'],
'H': ['E','G'],
'S':['A','C','G']
}
# BFS1(graph1,'A')
graph2 = {
'A': [('C',5),('B',5),('E',5)],
'B': [('A',5),('D',3)],
'C': [('A',5),('E',6),('D',4)],
'D': [('B',3),('C',4)],
'E': [('A',5),('C',6)],
}
graph3 = {
'A': [('B',3)],
'B': [('C',6),('D',1),('E',5)],
'C': [('E',6)],
'D': [('E',7)],
'E': [],
}
BFS_distance(graph3,'A')
print(dfs(graph1,'A',[]))
|
[
"junqueiracarolina@live.com"
] |
junqueiracarolina@live.com
|
8321df0e4633da445505ba2fe5a951ef62ef9419
|
00b8dff516dde0bb5b05fe82ee9bed20b80ce410
|
/PythonCode Meilenstein 6/venv/Lib/site-packages/tensorflow_core/_api/v2/compat/v1/ragged/__init__.py
|
8c32616f901edd904041b1ccb18863f89b4bc128
|
[] |
no_license
|
georgerich/MyCo-Gruppe-3
|
2f6ef26f9eb7ff0745c045fc84f8300d31e51d03
|
fae4426f8b1e56c01906762686c6eb287073fc5b
|
refs/heads/master
| 2020-08-27T13:45:06.672132
| 2020-01-30T03:36:49
| 2020-01-30T03:36:49
| 217,384,271
| 5
| 1
| null | 2020-01-29T23:18:03
| 2019-10-24T19:55:31
|
Python
|
UTF-8
|
Python
| false
| false
| 8,415
|
py
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Ragged Tensors.
This package defines ops for manipulating ragged tensors (`tf.RaggedTensor`),
which are tensors with non-uniform shapes. In particular, each `RaggedTensor`
has one or more *ragged dimensions*, which are dimensions whose slices may have
different lengths. For example, the inner (column) dimension of
`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices
(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed
description of ragged tensors, see the `tf.RaggedTensor` class documentation
and the [Ragged Tensor Guide](/guide/ragged_tensors).
### Additional ops that support `RaggedTensor`
Arguments that accept `RaggedTensor`s are marked in **bold**.
* `tf.batch_gather`(**params**, **indices**, name=`None`)
* `tf.bitwise.bitwise_and`(**x**, **y**, name=`None`)
* `tf.bitwise.bitwise_or`(**x**, **y**, name=`None`)
* `tf.bitwise.bitwise_xor`(**x**, **y**, name=`None`)
* `tf.bitwise.invert`(**x**, name=`None`)
* `tf.bitwise.left_shift`(**x**, **y**, name=`None`)
* `tf.bitwise.right_shift`(**x**, **y**, name=`None`)
* `tf.clip_by_value`(**t**, clip_value_min, clip_value_max, name=`None`)
* `tf.concat`(**values**, axis, name=`'concat'`)
* `tf.debugging.check_numerics`(**tensor**, message, name=`None`)
* `tf.dtypes.cast`(**x**, dtype, name=`None`)
* `tf.dtypes.complex`(**real**, **imag**, name=`None`)
* `tf.dtypes.saturate_cast`(**value**, dtype, name=`None`)
* `tf.dynamic_partition`(**data**, **partitions**, num_partitions, name=`None`)
* `tf.expand_dims`(**input**, axis=`None`, name=`None`, dim=`None`)
* `tf.gather_nd`(**params**, **indices**, name=`None`, batch_dims=`0`)
* `tf.gather`(**params**, **indices**, validate_indices=`None`, name=`None`, axis=`None`, batch_dims=`0`)
* `tf.identity`(**input**, name=`None`)
* `tf.io.decode_base64`(**input**, name=`None`)
* `tf.io.decode_compressed`(**bytes**, compression_type=`''`, name=`None`)
* `tf.io.encode_base64`(**input**, pad=`False`, name=`None`)
* `tf.math.abs`(**x**, name=`None`)
* `tf.math.acos`(**x**, name=`None`)
* `tf.math.acosh`(**x**, name=`None`)
* `tf.math.add_n`(**inputs**, name=`None`)
* `tf.math.add`(**x**, **y**, name=`None`)
* `tf.math.angle`(**input**, name=`None`)
* `tf.math.asin`(**x**, name=`None`)
* `tf.math.asinh`(**x**, name=`None`)
* `tf.math.atan2`(**y**, **x**, name=`None`)
* `tf.math.atan`(**x**, name=`None`)
* `tf.math.atanh`(**x**, name=`None`)
* `tf.math.ceil`(**x**, name=`None`)
* `tf.math.conj`(**x**, name=`None`)
* `tf.math.cos`(**x**, name=`None`)
* `tf.math.cosh`(**x**, name=`None`)
* `tf.math.digamma`(**x**, name=`None`)
* `tf.math.divide_no_nan`(**x**, **y**, name=`None`)
* `tf.math.divide`(**x**, **y**, name=`None`)
* `tf.math.equal`(**x**, **y**, name=`None`)
* `tf.math.erf`(**x**, name=`None`)
* `tf.math.erfc`(**x**, name=`None`)
* `tf.math.exp`(**x**, name=`None`)
* `tf.math.expm1`(**x**, name=`None`)
* `tf.math.floor`(**x**, name=`None`)
* `tf.math.floordiv`(**x**, **y**, name=`None`)
* `tf.math.floormod`(**x**, **y**, name=`None`)
* `tf.math.greater_equal`(**x**, **y**, name=`None`)
* `tf.math.greater`(**x**, **y**, name=`None`)
* `tf.math.imag`(**input**, name=`None`)
* `tf.math.is_finite`(**x**, name=`None`)
* `tf.math.is_inf`(**x**, name=`None`)
* `tf.math.is_nan`(**x**, name=`None`)
* `tf.math.less_equal`(**x**, **y**, name=`None`)
* `tf.math.less`(**x**, **y**, name=`None`)
* `tf.math.lgamma`(**x**, name=`None`)
* `tf.math.log1p`(**x**, name=`None`)
* `tf.math.log_sigmoid`(**x**, name=`None`)
* `tf.math.log`(**x**, name=`None`)
* `tf.math.logical_and`(**x**, **y**, name=`None`)
* `tf.math.logical_not`(**x**, name=`None`)
* `tf.math.logical_or`(**x**, **y**, name=`None`)
* `tf.math.logical_xor`(**x**, **y**, name=`'LogicalXor'`)
* `tf.math.maximum`(**x**, **y**, name=`None`)
* `tf.math.minimum`(**x**, **y**, name=`None`)
* `tf.math.multiply`(**x**, **y**, name=`None`)
* `tf.math.negative`(**x**, name=`None`)
* `tf.math.not_equal`(**x**, **y**, name=`None`)
* `tf.math.pow`(**x**, **y**, name=`None`)
* `tf.math.real`(**input**, name=`None`)
* `tf.math.reciprocal`(**x**, name=`None`)
* `tf.math.reduce_any`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)
* `tf.math.reduce_max`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)
* `tf.math.reduce_mean`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)
* `tf.math.reduce_min`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)
* `tf.math.reduce_prod`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)
* `tf.math.reduce_sum`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)
* `tf.math.rint`(**x**, name=`None`)
* `tf.math.round`(**x**, name=`None`)
* `tf.math.rsqrt`(**x**, name=`None`)
* `tf.math.sign`(**x**, name=`None`)
* `tf.math.sin`(**x**, name=`None`)
* `tf.math.sinh`(**x**, name=`None`)
* `tf.math.sqrt`(**x**, name=`None`)
* `tf.math.square`(**x**, name=`None`)
* `tf.math.squared_difference`(**x**, **y**, name=`None`)
* `tf.math.subtract`(**x**, **y**, name=`None`)
* `tf.math.tan`(**x**, name=`None`)
* `tf.math.truediv`(**x**, **y**, name=`None`)
* `tf.math.unsorted_segment_max`(**data**, **segment_ids**, num_segments, name=`None`)
* `tf.math.unsorted_segment_mean`(**data**, **segment_ids**, num_segments, name=`None`)
* `tf.math.unsorted_segment_min`(**data**, **segment_ids**, num_segments, name=`None`)
* `tf.math.unsorted_segment_prod`(**data**, **segment_ids**, num_segments, name=`None`)
* `tf.math.unsorted_segment_sqrt_n`(**data**, **segment_ids**, num_segments, name=`None`)
* `tf.math.unsorted_segment_sum`(**data**, **segment_ids**, num_segments, name=`None`)
* `tf.ones_like`(**tensor**, dtype=`None`, name=`None`, optimize=`True`)
* `tf.rank`(**input**, name=`None`)
* `tf.realdiv`(**x**, **y**, name=`None`)
* `tf.reduce_all`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)
* `tf.size`(**input**, name=`None`, out_type=`tf.int32`)
* `tf.squeeze`(**input**, axis=`None`, name=`None`, squeeze_dims=`None`)
* `tf.stack`(**values**, axis=`0`, name=`'stack'`)
* `tf.strings.as_string`(**input**, precision=`-1`, scientific=`False`, shortest=`False`, width=`-1`, fill=`''`, name=`None`)
* `tf.strings.join`(**inputs**, separator=`''`, name=`None`)
* `tf.strings.length`(**input**, name=`None`, unit=`'BYTE'`)
* `tf.strings.reduce_join`(**inputs**, axis=`None`, keepdims=`False`, separator=`''`, name=`None`)
* `tf.strings.regex_full_match`(**input**, pattern, name=`None`)
* `tf.strings.regex_replace`(**input**, pattern, rewrite, replace_global=`True`, name=`None`)
* `tf.strings.strip`(**input**, name=`None`)
* `tf.strings.substr`(**input**, pos, len, name=`None`, unit=`'BYTE'`)
* `tf.strings.to_hash_bucket_fast`(**input**, num_buckets, name=`None`)
* `tf.strings.to_hash_bucket_strong`(**input**, num_buckets, key, name=`None`)
* `tf.strings.to_hash_bucket`(**input**, num_buckets, name=`None`)
* `tf.strings.to_hash_bucket`(**input**, num_buckets, name=`None`)
* `tf.strings.to_number`(**input**, out_type=`tf.float32`, name=`None`)
* `tf.strings.unicode_script`(**input**, name=`None`)
* `tf.tile`(**input**, multiples, name=`None`)
* `tf.truncatediv`(**x**, **y**, name=`None`)
* `tf.truncatemod`(**x**, **y**, name=`None`)
* `tf.where`(**condition**, **x**=`None`, **y**=`None`, name=`None`)
* `tf.zeros_like`(**tensor**, dtype=`None`, name=`None`, optimize=`True`)n
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.ops.ragged.ragged_array_ops import boolean_mask
from tensorflow.python.ops.ragged.ragged_array_ops import stack_dynamic_partitions
from tensorflow.python.ops.ragged.ragged_concat_ops import stack
from tensorflow.python.ops.ragged.ragged_factory_ops import constant
from tensorflow.python.ops.ragged.ragged_factory_ops import constant_value
from tensorflow.python.ops.ragged.ragged_factory_ops import placeholder
from tensorflow.python.ops.ragged.ragged_functional_ops import map_flat_values
from tensorflow.python.ops.ragged.ragged_math_ops import range
from tensorflow.python.ops.ragged.ragged_tensor_value import RaggedTensorValue
from tensorflow.python.ops.ragged.segment_id_ops import row_splits_to_segment_ids
from tensorflow.python.ops.ragged.segment_id_ops import segment_ids_to_row_splits
del _print_function
|
[
"chzeit02@hs-esslingen.de"
] |
chzeit02@hs-esslingen.de
|
709471af2874d6df8eb12fa07ee2cbbce98113aa
|
cba0d86ea6e8f21a2616a0ebcdf53a96449d3ccb
|
/python/codestat.py
|
ce8de83199feb2bf70eedc13625344a2e8fc2bbb
|
[] |
no_license
|
zaopuppy/base
|
1bfb4b6bddc60c0138872f4a62ea0518aa4054df
|
6bf09bb02120a8edeb1160da0332d5465e6dd556
|
refs/heads/master
| 2020-04-15T09:58:47.744981
| 2015-01-17T08:20:15
| 2015-01-17T08:20:15
| 8,814,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,911
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import os.path
from functools import reduce
if sys.version_info.major != 3:
raise Exception("Wrong python major version")
class FileInfo():
def __init__(self):
pass
class TextStatisticHandler():
def __init__(self):
self.last_byte = None
self.line_no = 0
def handle(self, buf):
self.last_byte = buf[-1]
self.line_no = reduce(lambda x, _: x + 1,
filter(lambda x: x == ord('\r'), buf),
self.line_no)
def end(self):
if self.last_byte is None:
return
if self.last_byte not in b'\r\n':
self.line_no += 1
def get(self):
info = FileInfo()
info.line_no = self.line_no
return info
def dump(self):
return self.line_no
class XmlStatisticHandler():
def __init__(self):
self.last_byte = None
self.line_no = 0
def handle(self, buf):
self.last_byte = buf[-1]
def end(self):
if self.last_byte is None:
return
def get(self):
info = FileInfo()
info.line_no = self.line_no
return info
def dump(self):
return self.line_no
class PythonStatisticHandler():
def __init__(self):
self.line_no = 0
self.begin_of_line = True
self.ignore_to_end = False
def handle(self, buf):
for b in buf:
if b == ord('#'):
if self.begin_of_line:
self.ignore_to_end = True
self.begin_of_line = False
elif b == ord('\n'):
if not self.ignore_to_end and not self.begin_of_line:
self.line_no += 1
self.ignore_to_end = False
self.begin_of_line = True
elif b in b' \r\t':
# begin_of_line = False
pass
else:
self.begin_of_line = False
def end(self):
if not self.begin_of_line and not self.ignore_to_end:
self.line_no += 1
def get(self):
info = FileInfo()
info.line_no = self.line_no
return info
def dump(self):
return self.line_no
class CppStatisticHandler():
COMMENT_NONE = 0
# "//"
COMMENT_LINE = 1
# "/" --+--> "//"
# |
# +--> "/*"
COMMENT_PRE = 2
# "/* "
COMMENT_BLOCK = 3
# "*" --> "*/"
COMMENT_POST_BLOCK = 4
def __init__(self):
self.line_no = 0
self.comment_type = self.COMMENT_NONE
# for skipping blank line
self.has_code = False
def handle(self, buf):
for b in buf:
# print("type: {}, b: {}".format(self.comment_type, chr(b)))
if self.comment_type == self.COMMENT_NONE:
if b == ord('/'):
self.comment_type = self.COMMENT_PRE
elif b in b' \r\t':
# ignore
pass
elif b == ord('\n'):
if self.has_code:
self.line_no += 1
self.has_code = False
else:
self.has_code = True
elif self.comment_type == self.COMMENT_LINE:
if b == ord('\n'):
self.comment_type = self.COMMENT_NONE
self.has_code = False
elif self.comment_type == self.COMMENT_PRE:
if b == ord('/'):
self.comment_type = self.COMMENT_LINE
elif b == ord('*'):
self.comment_type = self.COMMENT_BLOCK
else:
if b == ord('\n'):
self.line_no += 1
self.has_code = False
else:
self.has_code = True
self.comment_type = self.COMMENT_NONE
elif self.comment_type == self.COMMENT_BLOCK:
if b == ord('*'):
self.comment_type = self.COMMENT_POST_BLOCK
elif b == ord('\n'):
if self.has_code:
self.line_no += 1
self.has_code = False
elif self.comment_type == self.COMMENT_POST_BLOCK:
if b == ord('/'):
self.comment_type = self.COMMENT_NONE
elif b == ord('\n'):
self.has_code = False
else:
raise Exception("Unknown comment type, something was wrong, tell me: zhaoyi.zero@gmail.com")
def end(self):
if self.has_code:
self.line_no += 1
def get(self):
info = FileInfo()
info.line_no = self.line_no
return info
def dump(self):
return self.line_no
def statistic_text(f):
handler = TextStatisticHandler()
with open(f, "rb") as fp:
for buf in iter(lambda: fp.read(1024), b''):
handler.handle(buf)
handler.end()
print("{}: {}".format(f, handler.dump()))
return handler.get()
def statistic_xml(f):
handler = XmlStatisticHandler()
with open(f, "rb") as fp:
for buf in iter(lambda: fp.read(1024), b''):
handler.handle(buf)
handler.end()
handler.end()
print("{}: {}".format(f, handler.dump()))
return handler.get()
# doesn't support unicode file yet
def statistic_python(f):
handler = PythonStatisticHandler()
with open(f, "rb") as fp:
for buf in iter(lambda: fp.read(1024), b''):
handler.handle(buf)
handler.end()
print("{}: {}".format(f, handler.dump()))
return handler.get()
def statistic_cpp(f):
handler = CppStatisticHandler()
with open(f, "rb") as fp:
for buf in iter(lambda: fp.read(1024), b''):
handler.handle(buf)
handler.end()
print("{}: {}".format(f, handler.dump()))
return handler.get()
STATISTIC_HANDLERS = {
".py": statistic_python,
".cc": statistic_cpp,
".java": statistic_cpp,
".txt": statistic_text,
}
def get_type_by_file_name(file_name):
""" all in lower cause """
file_name = file_name.lower()
idx = file_name.rfind(".")
if idx >= 0:
return file_name[idx:]
else:
return None
def statistic_dir(d):
for dir_path, dir_names, file_names in os.walk(d):
for f in file_names:
yield statistic_file(os.path.join(dir_path, f))
def statistic_file(f):
file_names = os.path.basename(f)
file_type = get_type_by_file_name(file_names)
handler = STATISTIC_HANDLERS.get(file_type, None)
if handler is None:
print(type(f))
print("file [{}] (as type {}) doesn't support".format(f, file_type))
return None
info = handler(f)
info.type = file_type
return info
def statistic(f):
stat_info = {}
if os.path.isdir(f):
for info in statistic_dir(f):
if info is None:
continue
if info.type not in stat_info.keys():
stat_info[info.type] = 0
stat_info[info.type] += info.line_no
else:
stat_info["XX"] = statistic_file(f).line_no
for item in stat_info.items():
print("{}: {}".format(item[0], item[1]))
def main():
args = sys.argv[1:]
file_list = args
# file_list = filter(lambda x: not x.startswith("--"), args)
# opt_list = filter(lambda x: x.startswith("--"), args)
for f in file_list:
statistic(f)
if __name__ == "__main__":
main()
|
[
"zhaoyi.zero@gmail.com"
] |
zhaoyi.zero@gmail.com
|
0b3c1a3a47cc1a7f7fa584af3bf3fc47ba2f9311
|
8a8082e184835f051ba5369a028b33ef7841a4c9
|
/src/slacken/__init__.py
|
b0c75ad6836aa3dd5ece6746cc4ef667459cabd0
|
[
"MIT"
] |
permissive
|
alistair-broomhead/slacken
|
f49d32830e2d92dc8f0c2a1c1eb86f7366f46406
|
d8c2a6ae35b2ae982e97fb1782e8a5a6340c5605
|
refs/heads/master
| 2021-03-12T21:42:15.696327
| 2013-05-30T16:38:05
| 2013-05-30T16:38:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
"""
Tools for working with restful apis
"""
__author__ = 'Alistair Broomhead'
from slacken.dicttypes import AttrDict
from slacken.rest_access import RESTaccess
from slacken.xml_accessor import XMLAccessor
|
[
"alistair.broomhead@mindcandy.com"
] |
alistair.broomhead@mindcandy.com
|
d6975104cb7ecc5f7e92a88667057e945d1059b0
|
2016f22390c4d91d848a142805411085e90935fa
|
/core/forms.py
|
9a06ef4295d643b1215e10cf219f5ceff9a90ebd
|
[] |
no_license
|
matheuslins/SearchSystem
|
c4de76fbca0e5833deff9f22b7f1e5d5fcb623fc
|
37f3c36ddbb078991dc1cd2d68051109c99ed520
|
refs/heads/master
| 2021-01-12T11:40:43.454847
| 2016-11-07T14:02:00
| 2016-11-07T14:02:00
| 72,256,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
from django import forms
from .models import Box, BoxLog
class BoxForm(forms.ModelForm):
class Meta:
model = Box
fields = ['name', 'number', 'content']
class BoxLogForm(forms.ModelForm):
class Meta:
model = BoxLog
fields = ['box']
|
[
"msl@cin.ufpe.br"
] |
msl@cin.ufpe.br
|
e62326a6ccb2c56f1fd73a7e059c705feebf05ab
|
78632e8b5a9e2193ad313731d1adbf769fa989b3
|
/day4/puzzle.py
|
87d4e5d9e6c11e957d472684c4438e9fdb846726
|
[] |
no_license
|
SudheerBabuGitHub/AOC2020
|
0a1677175b8bf89f6b2e9b2784074291bee4edbe
|
dfd40e9f4f3a04aba8f4f9fabb2b63676bdd2671
|
refs/heads/master
| 2023-02-09T05:13:20.727016
| 2020-12-25T02:25:18
| 2020-12-25T02:25:18
| 318,072,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,029
|
py
|
def get_byr (record):
#byr (Birth Year) - four digits; at least 1920 and at most 2002.
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
byr = 0
for idx,c in enumerate(chararray):
if idx==recordlen-3-4:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "byr":
idx += 4
valid = True
for i in range(4):
if chararray[idx+i] < '0' or chararray[idx+i] > '9':
valid = False
break
if not valid:
break
if chararray[idx+4] != ' ' and chararray[idx+4] != '\n':
break
byr = int(chararray[idx] + chararray[idx+1] + chararray[idx+2] + chararray[idx+3])
if byr >= 1920 and byr <= 2002:
foundbyr = True
break
else:
continue
return foundbyr
def get_iyr (record):
#iyr (Issue Year) - four digits; at least 2010 and at most 2020.
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
iyr = 0
for idx,c in enumerate(chararray):
if idx==recordlen-3-4:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "iyr":
idx += 4
valid = True
for i in range(4):
if chararray[idx+i] < '0' or chararray[idx+i] > '9':
valid = False
break
if not valid:
break
if chararray[idx+4] != ' ' and chararray[idx+4] != '\n':
break
iyr = int(chararray[idx] + chararray[idx+1] + chararray[idx+2] + chararray[idx+3])
if iyr >= 2010 and iyr <= 2020:
foundbyr = True
break
else:
continue
return foundbyr
def get_eyr (record):
#eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
eyr = 0
for idx,c in enumerate(chararray):
if idx==recordlen-3-4:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "eyr":
idx += 4
valid = True
for i in range(4):
if chararray[idx+i] < '0' or chararray[idx+i] > '9':
valid = False
break
if not valid:
break
if chararray[idx+4] != ' ' and chararray[idx+4] != '\n':
break
eyr = int(chararray[idx] + chararray[idx+1] + chararray[idx+2] + chararray[idx+3])
if eyr >= 2020 and eyr <= 2030:
foundbyr = True
break
else:
continue
return foundbyr
def get_hgt (record):
""""
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
"""
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
unit = ""
value = ""
hgt = 0
for idx,c in enumerate(chararray):
if idx==recordlen-3-4:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "hgt":
idx += 4
i = 0
while chararray[idx+i] != ' ' and chararray[idx+i] != '\n':
value += chararray[idx+i]
i += 1
unit = chararray[idx+i-2]+chararray[idx+i-1]
if unit != "cm" and unit != "in":
break
if chararray[idx+i-2] == ':':
break
hgt = int(value[0:-2])
if unit == "cm" and (hgt >= 150 and hgt<=193):
foundbyr = True
elif unit == "in" and (hgt >= 59 and hgt<=76):
foundbyr = True
break
else:
continue
return foundbyr
def get_hcl (record):
#hcl(HairColor) - a # followed by exactly six characters 0-9 or a-f.
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
for idx,c in enumerate(chararray):
if idx==recordlen-3-7:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "hcl":
idx += 4
if chararray[idx] != '#':
break
idx += 1
valid = True
for i in range(6):
if chararray[idx + i] < '0' or chararray[idx + i] > 'f':
valid = False
break
if chararray[idx + i] > '9' and chararray[idx + i] < 'a':
valid = False
break
if not valid:
break
if chararray[idx+6] != ' ' and chararray[idx+6] != '\n':
break
foundbyr = True
break
else:
continue
return foundbyr
def get_ecl (record):
#ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
ecl = ""
colours = ["amb","blu","brn","gry","grn","hzl","oth"]
for idx,c in enumerate(chararray):
if idx==recordlen-3-3:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "ecl":
idx += 4
ecl = chararray[idx] + chararray[idx + 1] + chararray[idx + 2]
if chararray[idx+3] != ' ' and chararray[idx+3] != '\n':
break
for colour in colours:
if colour == ecl:
foundbyr = True
break
break
else:
continue
return foundbyr
def get_pid (record):
#pid (Passport ID) - a nine-digit number, including leading zeroes.
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
for idx,c in enumerate(chararray):
if idx==recordlen-3-9:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "pid":
idx += 4
valid = True
for i in range(9):
if chararray[idx+i] <'0' or chararray[idx+i] > '9':
valid = False
break
if not valid:
break
if chararray[idx+9] != ' ' and chararray[idx+9] != '\n':
break
foundbyr = True
break
else:
continue
return foundbyr
def get_cid (record):
#cid (Country ID) - ignored, missing or not.
chararray = list(record)
recordlen = len(record)-1
foundbyr = False
for idx,c in enumerate(chararray):
if idx==recordlen-3:
break
elif chararray[idx]+chararray[idx+1]+chararray[idx+2] == "cid":
foundbyr = True
break
else:
continue
return foundbyr
file = open("input.txt","r")
lines = file.readlines()
validcnt = 0
record = ""
for line in lines:
if line != "\n":
record += line
continue
else:
if get_byr(record) == False:
#print("missing byr")
record = ""
continue
if get_iyr(record) == False:
#print("missing iyr")
record = ""
continue
if get_eyr(record) == False:
#print("missing eyr")
record = ""
continue
if get_hgt(record) == False:
#print("missing hgt")
record = ""
continue
if get_hcl(record) == False:
#print("missing hcl")
record = ""
continue
if get_ecl(record) == False:
#print("missing ecl")
record = ""
continue
if get_pid(record) == False:
#print("missing pid")
record = ""
continue
#get_cid(record)
record = ""
validcnt += 1
print(validcnt)
|
[
"75408000+SudheerBabuGitHub@users.noreply.github.com"
] |
75408000+SudheerBabuGitHub@users.noreply.github.com
|
04f5d0325e7a0300279d870a928bcd03205e6b62
|
bcf84ea70de9f49bb8506a34af3fcafa35421f4a
|
/wlb_app_dev.py
|
1997776e7c825b02ddae4b40bc3d4211c8bb6868
|
[] |
no_license
|
awoltman/App_Dev
|
44b2c46e85869471a59641a13e9815b09819ebc3
|
710ca49188c42fdd7920fd96d9fd44a0b304fc07
|
refs/heads/master
| 2020-06-10T21:29:47.103560
| 2019-07-01T19:31:48
| 2019-07-01T19:31:48
| 193,755,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,263
|
py
|
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
import matplotlib.pyplot as plt
import xlwt
from datetime import datetime
import sqlite3
import time
#def init():
conn = sqlite3.connect('test_data.db')
client = ModbusClient(mesthod = 'tcp', host = '10.81.7.195', port = 8899)
UNIT = 0x01
c = conn.cursor()
time_temp = ()
'''
## Initialized the database(.db) file ##
c.execute("CREATE TABLE TEMPS ('Time', 'T1', 'T2', 'T3', 'T4')")
c.execute("CREATE TABLE FREEZE_TIMES ('Time', 'Freeze_Time_1', 'Freeze_Time 2', 'Freeze Time 3', 'Freeze Time 4', 'Freeze Time 5', 'Freeze Time 6', 'Freeze Time 7','Freeze Time 8', 'Freeze Time 9', 'Freeze Time 10',\
'Freeze Time 11', 'Freeze Time 12', 'Freeze Time 13', 'Freeze Time 14', 'Freeze Time 15', 'Freeze Time 16', 'Freeze Time 17','Freeze Time 18', 'Freeze Time 19', 'Freeze Time 20')")
'''
'''
## Setting up styles for Excel ##
style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on',num_format_str='#,##0.00')
style1 = xlwt.easyxf(num_format_str='D-MMM-YY')
wb = xlwt.Workbook()
ws = wb.add_sheet('Tempurature Data')
ws.write(0, 1, 'T1', style0)
ws.write(0, 2, 'T2', style0)
ws.write(0, 3, 'T3', style0)
ws.write(0, 4, 'T4', style0)
ws.write(0, 4, 'Time', style0)
'''
i = 0
def record_temps():
try:
while True:
#named_tuple = time.localtime() # get struct_time
time_string = time.strftime("%m/%d/%Y %H:%M.%S")
Freezetime_temp = client.read_holding_registers(574,20,unit = UNIT)
f_one = Freezetime_temp.registers[0]
f_two = Freezetime_temp.registers[1]
f_three = Freezetime_temp.registers[2]
f_four = Freezetime_temp.registers[3]
f_five = Freezetime_temp.registers[4]
f_six = Freezetime_temp.registers[5]
f_seven = Freezetime_temp.registers[6]
f_eight = Freezetime_temp.registers[7]
f_nine = Freezetime_temp.registers[8]
f_ten = Freezetime_temp.registers[9]
f_eleven = Freezetime_temp.registers[10]
f_twelve = Freezetime_temp.registers[11]
f_thirteen = Freezetime_temp.registers[12]
f_fourteen = Freezetime_temp.registers[13]
f_fifteen = Freezetime_temp.registers[14]
f_sixteen = Freezetime_temp.registers[15]
f_seventeen = Freezetime_temp.registers[16]
f_eighteen = Freezetime_temp.registers[17]
f_nineteen = Freezetime_temp.registers[18]
f_twenty = Freezetime_temp.registers[19]
time_temp = [time_string,f_one,f_two,f_three,f_four,f_five,f_six,f_seven,f_eight,f_nine,f_ten,f_eleven,f_twelve,f_thirteen,f_fourteen,f_fifteen,f_sixteen,f_seventeen,f_eighteen,f_nineteen,f_twenty]
c.execute("INSERT INTO FREEZE_TIMES values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", time_temp)
Temps_store = client.read_holding_registers(6,4,unit =UNIT)
temp_temp = (time_string, Temps_store.registers[0],Temps_store.registers[1],Temps_store.registers[2],Temps_store.registers[3])
c.execute("INSERT INTO TEMPS values (?,?,?,?,?)", temp_temp)
conn.commit()
'''
##This section is for writing to Excel##
ws.write(ex, 0, time_string, style1)
ws.write(ex, 1, Temps_temp.registers[0], style0)
ws.write(ex, 2, Temps_temp.registers[1], style0)
ws.write(ex, 3, Temps_temp.registers[2], style0)
ws.write(ex, 4, Temps_temp.registers[3], style0)
'''
except KeyboardInterrupt:
'''
## used to save EXCEL file once done collecting data ##
wb.save('temp.xls')
'''
conn.close()
select()
def reset_default():
client.write_registers(451,1,unit =UNIT)
pass
def select():
print('C for collect')
print('D for done')
print('R for reset defaults')
g = input('Enter what you would like to do:')
if(g == 'C'):
record_temps()
elif(g == 'D'):
client.close()
elif(g == 'R'):
reset_default()
else:
select()
def login():
pass
def new_user():
pass
def fogot_id():
pass
def diplay_rt():
pass
select()
|
[
"awoltman@hawk.iit.edu"
] |
awoltman@hawk.iit.edu
|
52d086cf4cdc8386facda306ca5a7b87da497e67
|
dfdceb3a1893cc52c33dc31c69170a6a60840d7d
|
/logAnalysis.py
|
2e7bff57a89bb2049b60ce599d0fd18b70c55083
|
[] |
no_license
|
psk84/Python-Practice
|
6bb8aa10da8186610cf83f040f3c72d002424a4a
|
e5f66d7998109759f96a59d5afab66059fb3adec
|
refs/heads/master
| 2021-01-23T03:28:32.301392
| 2015-01-19T09:48:24
| 2015-01-19T09:48:24
| 29,351,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
import sys
import pymongo
# file insert
#f = open("C:/Dev/test.txt", 'w')
#for i in range(1, 11):
# data = "[%d send]success.\n" % i
# f.write(data)
#for j in range(12, 23):
# data = "[%d send]fail.\n" % j
# f.write(data)
#f.close()
success_count = 0;
fail_count = 0;
f = open("C:/Dev/test.txt", 'r')
while 1:
line = f.readline()
if not line: break
print(line)
if "success" in line:
success_count = success_count + 1
else :
fail_count = fail_count + 1
f.close()
print("success=%d" % success_count)
print("fail=%d" % fail_count)
# mongo db insert
connection = pymongo.MongoClient("mongodb://localhost")
db = connection.terrydb
users = db.users
# db data delete
users.remove()
doc = {'_id':'myid', 'successCount': success_count,'failCount': fail_count}
try:
users.insert(doc)
except:
print("fail")
|
[
"seungkyupark84@gmail.com"
] |
seungkyupark84@gmail.com
|
6fead26c5691ec527b0a25f5b1bb51407b45423b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03433/s234156745.py
|
8af8f4093c0e131eec273745a1b4cdfd8539bffb
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
n = int(input().rstrip())
a = int(input().rstrip())
if n % 500 <= a:
print('Yes')
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3ef5e4152c3be2e2571bcf1fd3c78b7e282b5057
|
bf874dd4c151a097616cb48710862d352f82b8ee
|
/learning_logs/migrations/0002_entry.py
|
bfe7f6179815e9fa1f1f9d7def65014e759d771b
|
[] |
no_license
|
whjwhjwhj/PythonLearningOffice
|
0fd2763b17ab8bd35640d794916c566774ce31c2
|
a828a621c7da468507db0185801f92b22e85a531
|
refs/heads/master
| 2020-04-03T13:55:53.547106
| 2018-11-26T03:10:34
| 2018-11-26T03:10:34
| 155,303,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
# Generated by Django 2.1.2 on 2018-10-29 06:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('data_added', models.DateTimeField(auto_now_add=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='learning_logs.Topic')),
],
options={
'verbose_name_plural': 'entries',
},
),
]
|
[
"wei_hj@aliyun.com"
] |
wei_hj@aliyun.com
|
87416760e8d527e89eda7274e938fa35d0f5862c
|
ec551303265c269bf1855fe1a30fdffe9bc894b6
|
/topic12_backtrack/T37_solveSudoku/interview.py
|
aa39e66a9273588c348549634ece2fa51180ca9a
|
[] |
no_license
|
GongFuXiong/leetcode
|
27dbda7a5ced630ae2ae65e19d418ebbc65ae167
|
f831fd9603592ae5bee3679924f962a3ebce381c
|
refs/heads/master
| 2023-06-25T01:05:45.683510
| 2021-07-26T10:05:25
| 2021-07-26T10:05:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,380
|
py
|
'''
37. 解数独
编写一个程序,通过已填充的空格来解决数独问题。
一个数独的解法需遵循如下规则:
数字 1-9 在每一行只能出现一次。
数字 1-9 在每一列只能出现一次。
数字 1-9 在每一个以粗实线分隔的 3x3 宫内只能出现一次。
空白格用 '.' 表示。
'''
class Solution:
def solveSudoku(self, board):
"""
Do not return anything, modify board in-place instead.
"""
# 把所有没填数字的位置找到
all_points = []
for i in range(9):
for j in range(9):
if board[i][j] == ".":
all_points.append([i, j])
# check函数是为了检查是否在point位置k是合适的
def check(point, k):
row_i = point[0]
col_j = point[1]
for i in range(9):
# 检查 行
if i != row_i and board[i][col_j] == k:
return False
# 检查 列
if i != col_j and board[row_i][i] == k:
return False
# 检查块
for i in range(row_i//3*3 , row_i//3*3+3):
for j in range(col_j//3*3, col_j//3*3+3):
if i != row_i and j != col_j and board[i][j] == k:
return False
return True
def backtrack(i):
# 回溯终止条件
if i == len(all_points):
return True
for j in range(1, 10):
# 检查是否合适
if check(all_points[i],str(j)):
# 合适就把位置改过来
board[all_points[i][0]][all_points[i][1]] = str(j)
if backtrack(i+1): # 回溯下一个点
return True
board[all_points[i][0]][all_points[i][1]] = "."# 不成功把原来改回来
return False
backtrack(0)
print(f"board:{board}")
if __name__ == "__main__":
solution = Solution()
while 1:
str1 = input()
if str1 != "":
nums = [[c for c in s.split(",")] for s in str1.split(";")]
print(f"nums:{nums}")
res = solution.permute(nums)
print(res)
else:
break
|
[
"958747457@qq.com"
] |
958747457@qq.com
|
96e741b71f0fdae9ef6c8ea3f4c3df4f559e42b5
|
09deb7c2156929e8a65dca55d2142ced87f55bb0
|
/Exam5.py
|
fe9d27fc80599987e3b72355971118978307332f
|
[] |
no_license
|
nyirweb/python-four
|
9495218a01f5ea8e7a3aba7617a2b61db262f2af
|
185172d57760a3ce9f4ef56da626f8269d1d3e09
|
refs/heads/master
| 2020-04-16T11:07:47.173221
| 2019-01-13T15:58:35
| 2019-01-13T15:58:35
| 165,524,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
# Kérj be egy egész óra értéket. Ha a szám nem 0 és 24 óra között van,
# akkor adjon hibaüzenetet, egyébként köszönjön el a program a napszaknak megfelelően!
# 4-9: Jó reggelt!, 10-17: Jó napot!, 18-21: Jó estét!, 22-3: Jó éjszakát!
ido = int(input("Add meg az időt!"))
elkoszones = ["Jó reggelt!","Jó napot!","Jó estét!","Jó éjszakát!"]
if(ido >= 0 and ido <=24):
if(ido>=4 and ido<=9):
print(elkoszones[0])
if(ido>=10 and ido<=17):
print(elkoszones[1])
if(ido>=18 and ido<=21):
print(elkoszones[2])
if(ido>=22):
print(elkoszones[3])
if(ido<=3):
print(elkoszones[3])
else:
print("Sajnos nem egész óra értéket adtál meg!")
|
[
"noreply@github.com"
] |
nyirweb.noreply@github.com
|
8d8d34714355ffbcb9dc8397fb7b9605238dd8de
|
bc1c69344fe601eec08a9f411205b436da84a7cf
|
/1_python/0730/project/problem_d.py
|
ffa8f7075dfafb0466982031c30c50dbabdc2174
|
[] |
no_license
|
mooncs/TIL
|
774f01f1ccb2b827060fa0fd8d574bfb560b5443
|
0919d8c894fe8bf9644d72db6a0e474191c9d719
|
refs/heads/main
| 2023-08-15T07:35:05.055894
| 2021-09-16T05:34:51
| 2021-09-16T05:34:51
| 386,134,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
import requests
from tmdb import TMDBHelper
from pprint import pprint
def recommendation(title):
"""
제목에 해당하는 영화가 있으면
해당 영화의 id를 기반으로 추천 영화 목록을 출력.
추천 영화가 없을 경우 [] 출력.
영화 id검색에 실패할 경우 None 출력.
"""
tmdb_helper = TMDBHelper('83b326269660ac3171fddfc110d21cc7')
# 추천작을 검색하는 url을 완성하기 위해서는 영화의 id가 필요하다.
# get_movie_id를 이용하여 영화의 id를 우선적으로 가져온다.
movie_title = tmdb_helper.get_movie_id(title)
# 영화의 id가 None이면 None을 반환
if movie_title == None:
return movie_title
# 영화의 id가 있으면 get_request_url와 f-string을 활용하여 url을 가져온다.
else:
url = tmdb_helper.get_request_url(f'/movie/{movie_title}/recommendations', language='ko')
data = requests.get(url)
reco_data = data.json()
reco_lst = reco_data.get('results')
# 추천작의 영화 제목을 담을 빈리스트를 생성하고, 반복문을 통해 추천작의 정보를 모아서 반환한다.
reco_title = []
for i in range( len(reco_lst) ):
reco_title.append( reco_lst[i].get('title') )
return reco_title
if __name__ == '__main__':
pprint(recommendation('기생충'))
pprint(recommendation('그래비티'))
pprint(recommendation('검색할 수 없는 영화'))
|
[
"csmoong@naver.com"
] |
csmoong@naver.com
|
283c023713ac72b6528ac5f38534be1c28255c27
|
34f425d7e511fa19220f77f0dbb6c6585451ab14
|
/bin/generate_rules.py
|
3acaeb921652be97e7639346b7a51772c5707f2b
|
[
"CC-BY-4.0"
] |
permissive
|
stamhe/YousList
|
409d2703005a0152d73af2cf0418f2ebb4d17d7e
|
0fbc3cf6db069ea0162babd8a3aed05bf8d0e9be
|
refs/heads/master
| 2021-08-23T03:04:09.268088
| 2017-12-02T17:54:58
| 2017-12-02T20:01:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,808
|
py
|
#!/usr/bin/env python
import os
import sys
import fileinput
import re
import uuid
import six
if sys.version_info[:2] >= (2, 7):
import json
from collections import OrderedDict
else:
import simplejson as json
from ordereddict import OrderedDict
pwd = os.path.dirname(os.path.abspath(__file__))
root = os.path.dirname(pwd)
class FilterParser:
# For scheme, see Appendix A of http://www.ietf.org/rfc/rfc2396.txt
DOMAIN_PREFIX = '^[a-z0-9+_.]+:/+(?:[^/]+\\.)?'
def __init__(self, name='Generated Package', basepkg=None):
self.pkg = OrderedDict()
self.id_dict = {}
self.rules = []
if basepkg:
try:
f = open(basepkg)
obj = json.load(f, object_pairs_hook=OrderedDict)
orig_pkg = obj[0]
self.pkg['id'] = orig_pkg['id']
self.pkg['name'] = orig_pkg['name']
for rule in orig_pkg['rules']:
self.id_dict[rule['name']] = rule['id']
finally:
f.close()
if 'id' not in self.pkg:
self.pkg['id'] = str(uuid.uuid4())
if 'name' not in self.pkg:
self.pkg['name'] = name
def parse(self):
for line in fileinput.input():
self._parse_rule(line)
self.pkg['rules'] = self.rules
if six.PY2:
sys.stdout.write(
json.dumps([self.pkg], ensure_ascii=False,
indent=4, separators=(',', ': ')) \
.encode('utf-8'))
else:
sys.stdout.write(
json.dumps([self.pkg], ensure_ascii=False,
indent=4, separators=(',', ': ')))
def _parse_rule(self, line):
if six.PY2:
line = line.strip().decode('utf-8')
else:
line = line.strip()
if not line or line.startswith('!') or re.match('\[Adblock.*\]', line):
return
if '##' in line:
# Element hiding rule
self._parse_hiding_rule(line)
elif line.startswith('#@#'):
sys.stderr.write('Skipping this rule: ' + line + '\n')
elif '#@#' in line:
# Element hiding exception rule
raise Exception('Cannot handle this rule: ' + line)
else:
# Blocking rule
self._parse_blocking_rule(line)
def _parse_hiding_rule(self, line):
rule = OrderedDict()
name = line
if name in self.id_dict:
rule['id'] = self.id_dict[name]
else:
rule['id'] = str(uuid.uuid4())
rule['name'] = name
urls, css = line.split('##', 2)
if ',' in urls:
url_list = urls.split(',')
for url in url_list:
self._parse_hiding_rule(url + '##' + css)
return
url = urls
trigger = OrderedDict()
if url:
trigger['url-filter'] = self.DOMAIN_PREFIX + url.replace('.', '\\.')
else:
trigger['url-filter'] = '.*'
action = OrderedDict()
action['type'] = 'css-display-none'
action['selector'] = css
content = OrderedDict()
content['trigger'] = trigger
content['action'] = action
rule['content'] = content
self.rules.append(rule)
def _parse_blocking_rule(self, line):
rule = OrderedDict()
splits = line.split('$', 2)
if len(splits) < 2:
splits.append('')
url, options = splits
name = url.lstrip('||').rstrip('^')
url = url.rstrip('^').strip('*')
if options:
name += '$' + options
if name in self.id_dict:
rule['id'] = self.id_dict[name]
else:
rule['id'] = str(uuid.uuid4())
rule['name'] = name
trigger = {}
# * Adblock Plus' filterToRegExp:
# https://github.com/adblockplus/adblockpluscore/blob/master/lib/common.js
# * uBlock Origin's strToRegex:
# https://github.com/gorhill/uBlock/blob/master/src/js/static-net-filtering.js
url_regex = url
for search, replace in [[r'\*+', '*'],
[r'\^\|$', '^'],
[r'[.+?${}()|[\]\\]', r'\\\g<0>'],
['\*', '.*'],
[r'^\\\|\\\|', self.DOMAIN_PREFIX],
[r'^\\\|', '^'],
[r'\\\|$', '$']]:
url_regex = re.sub(search, replace, url_regex)
trigger['url-filter'] = url_regex
opt_dict = self._parse_options(options)
trigger.update(opt_dict)
trigger_ordered_keys = ['url-filter',
'resource-type',
'load-type',
'if-domain',
'unless-domain']
trigger_ordered_dict = OrderedDict()
for key in trigger_ordered_keys:
if key in trigger:
trigger_ordered_dict[key] = trigger[key]
action = OrderedDict()
action['type'] = 'block'
content = OrderedDict()
content['trigger'] = trigger_ordered_dict
content['action'] = action
rule['content'] = content
self.rules.append(rule)
def _parse_options(self, options):
opt_dict = {}
if options:
options = options.split(',')
else:
options = []
for option in options:
splits = option.split('=', 2)
if len(splits) < 2:
splits.append('')
opt_key, opt_val = splits
if opt_key == 'domain':
domains = opt_val.split('|')
if_domain = []
unless_domain = []
for domain in domains:
if domain.startswith('~'):
unless_domain.append(domain.lstrip('~'))
else:
if_domain.append(domain)
if len(if_domain) and len(unless_domain):
raise Exception('Cannot handle these domains: ' + opt_val)
elif len(if_domain):
opt_dict['if-domain'] = if_domain
elif len(unless_domain):
opt_dict['unless-domain'] = unless_domain
elif opt_key == 'script':
opt_dict['resource-type'] = ['script']
elif opt_key == 'third-party':
opt_dict['load-type'] = ['third-party']
else:
raise Exception('Cannot handle this option: ' + opt_key)
return opt_dict
orig_pkg = os.path.join(root, 'Rules.1blockpkg')
parser = FilterParser(basepkg=orig_pkg)
parser.parse()
|
[
"yousbe@gmail.com"
] |
yousbe@gmail.com
|
fc62026ad385c261dc340d5914e1490389de7b69
|
16abd82b9523f0fc7ae6df0aac11fd03e2e3d9f3
|
/boards/tests/test_views.py
|
c6631a2dcbefbde8dc9659cd11ccf5750f89b5e0
|
[] |
no_license
|
msm3858/projektforum
|
cf5255a5781f3536db56cf1b680557ca876f8221
|
c6a0abda9f147d3578e430012780bda3eb4f20b5
|
refs/heads/master
| 2021-09-10T10:03:32.962523
| 2018-03-24T06:26:18
| 2018-03-24T06:26:18
| 124,791,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,560
|
py
|
from django.test import TestCase
from django.urls import reverse, resolve
from ..views import home, board_topics, new_topic
from ..models import Board, Topic, Post, User
from ..forms import NewTopicForm
# Create your tests here.
#########################
# TEST HOME
#########################
class HomeTests(TestCase):
def setUp(self):
self.board = Board.objects.create(
name='Django', description='Django board.')
url = reverse('boards:home')
self.response = self.client.get(url)
def test_home_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_home_url_resolves_home_view(self):
view = resolve('/')
self.assertEquals(view.func, home)
def test_home_view_contains_link_to_topics_page(self):
board_topics_url = reverse(
'boards:board_topics', kwargs={'pk': self.board.pk})
self.assertContains(
self.response, 'href="{0}"'.format(board_topics_url))
#########################
# TEST BOARD
#########################
class BoardTopicsTests(TestCase):
def setUp(self):
Board.objects.create(
name='Django', description='Django board.')
def test_board_topics_view_success_status_code(self):
url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_board_topics_view_not_found_status_code(self):
url = reverse('boards:board_topics', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_board_topics_url_resolves_board_topics_view(self):
view = resolve('/boards/1/')
self.assertEquals(view.func, board_topics)
def test_board_topics_view_contains_link_back_to_homepage(self):
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(board_topics_url)
homepage_url = reverse('boards:home')
self.assertContains(response, 'href="{0}"'.format(homepage_url))
def test_board_topics_view_contains_navigation_links(self):
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
homepage_url = reverse('boards:home')
new_topic_url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(board_topics_url)
self.assertContains(response, 'href="{0}"'.format(homepage_url))
self.assertContains(response, 'href="{0}"'.format(new_topic_url))
#########################
# TEST NEW TOPIC
#########################
class NewTopicTests(TestCase):
def setUp(self):
Board.objects.create(name='Django', description='Django board.')
User.objects.create_user(
username='marcin', email='msm@msm.com', password='123')
def test_new_topic_view_success_status_code(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_new_topic_view_not_fount_status_code(self):
url = reverse('boards:new_topic', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_new_topic_view_reselves_board_topics_view(self):
view = resolve('/boards/1/new/')
self.assertEquals(view.func, new_topic)
def test_new_topic_view_contains_link_back_to_board_topics_view(self):
new_topic_url = reverse('boards:new_topic', kwargs={'pk': 1})
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(new_topic_url)
self.assertContains(response, 'href="{0}"'.format(board_topics_url))
def test_csrf(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertContains(response, 'csrfmiddlewaretoken')
def test_new_topic_valid_post_data(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
data = {
'subject': 'Test title',
'message': 'Lorem ipsum dolor sit amet'
}
response = self.client.post(url, data)
self.assertTrue(Topic.objects.exists())
self.assertTrue(Post.objects.exists())
def test_new_topic_invalid_post_data(self):
'''
Invalid post data should not redirect
The expected behaviour is to show the form again with validation errors
'''
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.post(url, {})
form = response.context.get('form')
self.assertEquals(response.status_code, 200)
self.assertTrue(form.errors)
def test_new_topic_invalid_post_data_empty_fields(self):
'''
Invalid post data should not redirect
The expected behaviour is to show the form again with validation errors
'''
url = reverse('boards:new_topic', kwargs={'pk': 1})
data = {
'subject': '',
'message': ''
}
response = self.client.post(url, data)
self.assertEquals(response.status_code, 200)
self.assertFalse(Topic.objects.exists())
self.assertFalse(Post.objects.exists())
def test_contains_form(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
form = response.context.get('form')
self.assertIsInstance(form, NewTopicForm)
|
[
"="
] |
=
|
d38c04f002d4092d4b95e17d4581f523d0a76782
|
6de1ea8c6840f714af12edde76395e21854b0214
|
/app.py
|
f3e1b07d7f37d7d5020ced8f70852fa72f315b43
|
[] |
no_license
|
VNgit/Youtube_Downloader
|
97bbff4b3ecc060b2f0456042aa7b83e0717117c
|
0688cd38992c843d194d5eb9bd4d9029c78bbfb6
|
refs/heads/master
| 2020-08-28T17:44:03.517436
| 2019-10-26T11:39:37
| 2019-10-26T11:39:37
| 217,773,126
| 1
| 0
| null | 2019-10-26T21:46:51
| 2019-10-26T21:46:51
| null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
from flask import Flask,redirect,render_template,request,url_for
from flask_sqlalchemy import SQLAlchemy
from pytube import YouTube
import os
app = Flask(__name__)
# creatimg configs
app.config['SQLALCHEMY_DATABASE_URI']='postgresql://postgres:morgan8514@127.0.0.1:5432/YD'
# dialect+driver://username:password@host:port/database
app.config['SECRET_KEY'] = 'secret_key'
db=SQLAlchemy(app)
from models.download_info import YD
@app.before_first_request
def create():
db.create_all
@app.route('/', methods=['POST','GET'])
def home():
if request.method=='POST':
recieved_url= request.form['users_input_url']
print('url recieved')
# python function to download video
def download_yt(url):
print('getting video...')
yt = YouTube(url)
print('getting streams')
streams=yt.streams.first()
print('getting video title...')
# print(yt.title)
print('downloading video...')
yt.streams.download()
print('#####Download complete#####')
# calling function to download
download_video = download_yt(recieved_url)
return render_template('home.html')
return render_template('home.html')
if __name__ == "__main__":
app.run(debug=True)
|
[
"morgangicheha4@gmail.com"
] |
morgangicheha4@gmail.com
|
8dbb60941dda40d486d7ee5f240f7fbb4e73da62
|
227654cd915b560b14f49f388d4256a0ce968b16
|
/agent/models.py
|
f8f5cca2af918c3cf49ac20911596ad878b08d19
|
[] |
no_license
|
chitharanjanpraveen/insurance_rdbms_project
|
cb7d976def7ce3b1a962c4703c53518bcacebb9a
|
7a41e9a688efdd216001bf100ae59ac2653a15eb
|
refs/heads/master
| 2020-03-09T15:37:40.403717
| 2018-04-25T05:06:05
| 2018-04-25T05:06:05
| 128,864,014
| 0
| 2
| null | 2018-04-25T05:06:06
| 2018-04-10T02:48:11
|
Python
|
UTF-8
|
Python
| false
| false
| 918
|
py
|
from django.db import models
# Create your models here.
class office(models.Model):
office_name = models.CharField(max_length=50, primary_key=True)
adress = models.CharField(max_length=120)
phone_no = models.IntegerField()
manager_name = models.CharField(max_length=40)
def __str__(self):
return str(self.office_name)
class agent(models.Model):
fname = models.CharField(max_length=80)
lname = models.CharField(max_length=60)
address=models.CharField(max_length=120)
phone_no = models.CharField(max_length=20)
sex = models.CharField(max_length=1)
age = models.IntegerField()
dob = models.DateField()
pass_word = models.CharField(max_length=50)
agentid = models.AutoField(primary_key=True)
agent_office_name = models.ForeignKey(office, on_delete=models.CASCADE)
def __str__(self):
return str(self.agentid)
|
[
"chitharanjancharan@gmail.com"
] |
chitharanjancharan@gmail.com
|
d0e06b60a764d60c8b89feca2c614b4bed4c4f35
|
a9d93637a75bf2d074a06897dbf8404657ff2606
|
/app.py
|
37ddb0fc7ebc9e176753fc7f2e0ae527472080e5
|
[] |
no_license
|
Blackscure/flsk-wolf
|
76f82ca190a0656e0403f03bb2c2d7bba490b0ed
|
ef84adcac76f40239848020e0da5dca68b7a5f0f
|
refs/heads/master
| 2020-03-30T12:48:28.492523
| 2018-10-02T13:29:25
| 2018-10-02T13:29:25
| 151,241,709
| 0
| 0
| null | 2018-10-02T13:27:48
| 2018-10-02T11:11:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
movie_names = ['Avator',
'Pirate of Carribbean',
'Spectre',
'The dark Knight Rises',
'John Carter',
'Spiderman',
'Tangled']
movies = {
'Avator': {'critical_review': 732, 'duration': 170, 'indb_score':7.9},
'Pirate of Carribbean': {'critical_review': 303, 'duration': 230, 'indb_score':8.9},
'Spectre': {'critical_review': 702, 'duration': 180, 'indb_score':7.9},
'The dark Knight Rises': {'critical_review': 172, 'duration': 150, 'indb_score':7.3},
'John Carter': {'critical_review': 422, 'duration': 130, 'indb_score':4.9},
'Spiderman': {'critical_review': 832, 'duration': 120, 'indb_score':3.9},
'Tangled': {'critical_review': 392, 'duration': 110, 'indb_score':7.2},
}
return render_template('index.html', movie_names=movie_names, movies=movies)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'),404
if __name__ == '__main__':
app.run(debug=True)
|
[
"Wekesabuyahi@gmail.com"
] |
Wekesabuyahi@gmail.com
|
e0025fafdb890a62fc827f31bb52fdb01549d239
|
183741e464b55916e042fce7467e1318470d3aca
|
/exercises/rosalind-python/ini3.py
|
988e7d5194ae8bd1b9828275ca0d484922b05973
|
[] |
no_license
|
phss/playground
|
418cda5116a7f47f12dc2d8474e7f8f29be03ab8
|
6e1072d81fc6d06b3ad62eedbde3342ea0044e23
|
refs/heads/master
| 2023-07-19T22:59:04.272038
| 2021-12-31T12:46:08
| 2021-12-31T12:46:08
| 74,849
| 2
| 0
| null | 2023-07-06T21:08:18
| 2008-11-12T12:49:46
|
CSS
|
UTF-8
|
Python
| false
| false
| 244
|
py
|
s = "SP0M7gw0bYZbOQfPTVGPCZ8PlethodondDFgwZc0DqjmMtII8qC0Xu05qnUxqT46YJqh44YRNNPs0ZGlQmHvaXGAEr1dCNayFD47rY5U1YMXqomiiNKnBN7BlEbeYZaHeqHs8L0T8znplagopuslnG0iuTK77I6ex5T."
a, b, c, d = map(int, "23 31 140 146".split())
print s[a:b+1], s[c:d+1]
|
[
"paulo.schneider@gmail.com"
] |
paulo.schneider@gmail.com
|
300c13b7d14b8eeb64fe0620787ba963d4b4a22d
|
3c03ecb8e066f2d4eac73a469a75e5906734c66c
|
/_2019_2020/Classworks/_21_08_02_2020/_4.py
|
bb2dfe36b4e2eba4992e43a24a81bc1310665095
|
[] |
no_license
|
waldisjr/JuniorIT
|
af1648095ec36535cc52770b114539444db4cd0b
|
6a67e713708622ae13db6d17b48e43e3d10611f2
|
refs/heads/master
| 2023-03-26T06:29:06.423163
| 2021-03-27T06:27:34
| 2021-03-27T06:27:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
file = open('1(4.py)', 'w')
for i in range(1000):
file.write(f"{i}\n")
file.close()
|
[
"waldis_jr@outlook.com"
] |
waldis_jr@outlook.com
|
733a2ad7b12dde095d29b9fa83d43bb259c76031
|
dee4bd3227686c6c1e115287b17b879def3c445f
|
/django_us_markets/app.py
|
9324b0f3b50a8288d90036e1b6e433bf8bee6b4f
|
[
"MIT"
] |
permissive
|
TabbedOut/django_us_markets
|
6f24cd8a22f1d1573f949b53f6f871ffee48389e
|
2acb3001a4969fcf5f49217bc7bc677c823301cb
|
refs/heads/master
| 2021-01-10T15:08:18.950544
| 2016-04-08T20:52:08
| 2016-04-08T20:52:08
| 55,809,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
from django.apps import AppConfig
class USMarketsAppConfig(AppConfig):
name = 'django_us_markets'
verbose_name = 'US Markets'
|
[
"nosamanuel@gmail.com"
] |
nosamanuel@gmail.com
|
8bed32b1d24b6e065eedde91cef18790224a81ef
|
f757fc2a0f70e7cb25e390f603b8580eb8fe5cfd
|
/Week_2-Simple_Programs/4.-Functions/Exercise_gcd_recur.py
|
545cf01012a3ae71c3d12fffe52ee3f85532162c
|
[] |
no_license
|
Raj-Yadav/MITx-6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python
|
03a9659f6d6a1234d60a2f6b9d315dc782ba6b2d
|
b394681f478fedf877183eb16be55a883531eea4
|
refs/heads/master
| 2020-09-04T06:41:28.469232
| 2018-04-29T09:42:40
| 2018-04-29T09:42:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
# Exercise: gcd recur
# (5/5 points)
# ESTIMATED TIME TO COMPLETE: 6 minutes
# The greatest common divisor of two positive integers is the largest integer that divides each of them without remainder. For example,
# gcd(2, 12) = 2
# gcd(6, 12) = 6
# gcd(9, 12) = 3
# gcd(17, 12) = 1
# A clever mathematical trick (due to Euclid) makes it easy to find greatest common divisors. Suppose that a and b are two positive
# integers:
# If b = 0, then the answer is a
# Otherwise, gcd(a, b) is the same as gcd(b, a % b)
# Write a function gcdRecur(a, b) that implements this idea recursively. This function takes in two positive integers and returns one
# integer.
def gcdRecur(a, b):
'''
a, b: positive integers
returns: a positive integer, the greatest common divisor of a & b.
'''
if b == 0:
return a
return gcdRecur(b, a % b)
|
[
"dlujanschi2@gmail.com"
] |
dlujanschi2@gmail.com
|
11b308168ba28877a5c958de927939f0d6578a0b
|
868d1bd002a66bce3f86054b00a69c49f285126f
|
/books/01.DeepLearningScratch/chapter02/02.ArrayAND/AND.py
|
f9a2f0072f5a98a98a74d7b66abc9f98a71a7f43
|
[] |
no_license
|
doukheeWon-gmail/DeepLearningStudy
|
cf81ac5867373c8028519133a1cca80024f8f0ff
|
d346d0572c45e2f2229bd14e5aadeb077074ffa9
|
refs/heads/master
| 2023-03-16T19:05:49.594092
| 2021-03-08T09:03:46
| 2021-03-08T09:03:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
#coding: utf-8
import numpy as np
def AND(x1, x2):
x = np.array([x1,x2])
w = np.array([0.5,0.5])
b = -0.7
tmp = np.sum(w*x) + b
if tmp <= 0:
return 0
else:
return 1
|
[
"fain9301@yahoo.com"
] |
fain9301@yahoo.com
|
52f8d22f90a6a6870ff064d288a72be4c6ab50de
|
7d78a18fcb8f34cc84e9439bd19cf491e3e0ec49
|
/Code/Particle_Identification/msc-hpc/hpc-mini-1/model8.py
|
7fca90d0b9552dd533fb15cee80aeff0c4a24a33
|
[] |
no_license
|
PsycheShaman/MSc-thesis
|
62767951b67b922ce5a21cad5bdb258998b7d2ea
|
34504499df64c7d6cc7c89af9618cd58d6378e8e
|
refs/heads/master
| 2022-03-12T07:17:57.309357
| 2019-12-10T21:17:39
| 2019-12-10T21:17:39
| 151,471,442
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,794
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 16 18:47:05 2019
@author: gerhard
"""
import glob
import numpy as np
#P_files = glob.glob("C:/Users/gerhard/Documents/msc-thesis-data/P_*.pkl", recursive=True)
x_files = glob.glob("/scratch/vljchr004/1_8_to_2_2_GeV/x_*.pkl")
y_files = glob.glob("/scratch/vljchr004/1_8_to_2_2_GeV/y_*.pkl")
#x_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\x_*.pkl")
#y_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\y_*.pkl")
import pickle
print("loading first x pickle........................................................................................")
with open(x_files[0], 'rb') as x_file0:
x = pickle.load(x_file0)
print("loading first y pickle........................................................................................")
with open(y_files[0], 'rb') as y_file0:
y = pickle.load(y_file0)
#with open(P_files[0], 'rb') as P_file0:
# P = pickle.load(P_file0)
x.shape = (x.shape[1],x.shape[2],x.shape[3])
print("x.shape")
print(x.shape)
print("recursively adding x pickles........................................................................................")
for i in x_files[1:]:
with open(i,'rb') as x_file:
print(i)
xi = pickle.load(x_file)
xi.shape = (xi.shape[1],xi.shape[2],xi.shape[3])
print("xi.shape")
print(xi.shape)
x = np.concatenate((x,xi),axis=0)
print("recursively adding y pickles........................................................................................")
for i in y_files[1:]:
with open(i,'rb') as y_file:
yi = pickle.load(y_file)
y = np.concatenate((y,yi),axis=None)
#for i in P_files[1:]:
# with open(i,'rb') as P_file:
# Pi = pickle.load(P_file)
# P = np.concatenate((P,Pi),axis=None)
#x_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/cnn/x_*.npy")
#y_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/cnn/y_*.npy")
#
#print("recursively adding x numpys........................................................................................")
#
#for i in x_files[0:]:
# with open(i,'rb') as x_file:
# print(i)
# xi = np.load(x_file)
# x = np.concatenate((x,xi),axis=0)
#
#print("recursively adding y numpys........................................................................................")
#
#for i in y_files[0:]:
# with open(i,'rb') as y_file:
# yi = np.load(y_file)
# y = np.concatenate((y,yi),axis=None)
nz = np.array([np.count_nonzero(i) for i in x])
zeros = np.where(nz==0)
x = np.delete(x,zeros,axis=0)
y = np.delete(y,zeros)
#P = np.delete(P,zeros)
x.shape = (x.shape[0],x.shape[1],x.shape[2],1)
#x.shape = (x.shape[0],x.shape[2],x.shape[1])
print("x.shape after reshape for lstm")
print(x.shape)
#GeV_range2 = np.where(P>=1.8 and P<=2.2)
#
#x = x[GeV_range2,:,:,:]
#y = y[GeV_range2]
electrons = np.where(y==1)
electrons = electrons[0]
pions = np.where(y==0)
pions = pions[0]
pions = pions[0:electrons.shape[0]]
x_1 = x[electrons,:,:]
x_2 = x[pions,:,:]
x = np.vstack((x_1,x_2))
y_1 = y[electrons]
y_2 = y[pions]
y = np.concatenate((y_1,y_2),axis=None)
ma = np.max(x)
x = x/ma
#ma = np.amax(x,axis=2)
#
#x = np.divide(x,ma)
#check the division above before running!!!!!!!!!!!1
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state=123456)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
import tensorflow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, LSTM, Bidirectional, TimeDistributed
model = Sequential()
model.add(Conv2D(32,(6,6),input_shape=(17,24,1),padding="same",activation="relu"))
model.add(Conv2D(64,(6,6),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64,(4,4),padding="same",activation="relu"))
model.add(Conv2D(128,(4,4),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(128,(3,3),padding="same",activation="relu"))
model.add(Conv2D(256,(3,3),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(256,(3,3),padding="same",activation="relu"))
model.add(Conv2D(512,(3,3),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(1024,activation="relu"))
model.add(Dense(1024,activation="relu"))
model.add(Dense(512,activation="relu"))
model.add(Dense(512,activation="relu"))
model.add(Dense(256,activation="relu"))
model.add(Dense(256,activation="relu"))
model.add(Dense(128,activation="relu"))
model.add(Dense(128,activation="relu"))
model.add(Dense(64,activation="relu"))
model.add(Dense(32,activation="relu"))
model.add(Dense(2,activation="softmax"))
adam = tensorflow.keras.optimizers.Adam()
# Let's train the model using RMSprop
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['accuracy'])
batch_size=32
epochs=50
history=model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
shuffle=True)#,
#class_weight=class_weights)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/hpc-mini/model8_history1.png', bbox_inches='tight')
# summarize history for loss
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/hpc-mini/model8_history2.png', bbox_inches='tight')
model.probs = model.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/hpc-mini/model8_results.csv", np.array(model.probs), fmt="%s")
np.savetxt("/home/vljchr004/hpc-mini/model8_y_test.csv", np.array(y_test), fmt="%s")
model.save('/home/vljchr004/hpc-mini/model8_.h5') # creates a HDF5 file 'my_model.h5'
del model
print("<-----------------------------done------------------------------------------>")
|
[
"christiaan.viljoen@cern.ch"
] |
christiaan.viljoen@cern.ch
|
089e4ed108db3fef009b0d7feab3fd86866630e7
|
61ff23ae86e6a4bc74b0893e7f3b9600416f9dd7
|
/mipt/acm.mipt.ru/042/test1.py
|
1a9c52180c01920df8a0e559b4c3c0fac59c2fca
|
[] |
no_license
|
sergia-ch/inf
|
ee99c560300310cfadac01ff95a2d42869efcf31
|
3c73d6efc9e1c107d720680f6e4865edbb6fb185
|
refs/heads/master
| 2023-01-30T18:08:52.866094
| 2020-12-07T19:13:24
| 2020-12-07T19:13:24
| 15,929,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33
|
py
|
print 'ABCD'*249
print 'ABC'*331
|
[
"etoestja@yandex.ru"
] |
etoestja@yandex.ru
|
0c1bb1c57fb00f0ac5712f22c6993c18079bff76
|
10c3eb5229186bb24b2ed64a7054e36aacd94931
|
/submit_sunnybrook_unet_lstm_multi.py
|
60a485c674940beb056630b92284ee469446f060
|
[] |
no_license
|
alexliyang/cardiac-segmentation-cc
|
d493bfa66ee2802632f04c5f298e35ee510a39a1
|
c78b0a39600467060531c98f4207df0c4240abd4
|
refs/heads/master
| 2021-04-06T11:22:32.746009
| 2017-12-29T02:33:31
| 2017-12-29T02:33:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,413
|
py
|
#!/usr/bin/env python2.7
import re, sys, os
import shutil, cv2
import numpy as np
from train_sunnybrook_unet_lstm_mul import read_contour, map_all_contours, export_all_contours, map_endo_contours
from helpers import reshape, get_SAX_SERIES, draw_result
from unet_lstm_multi_model import unet_lstm_multi_model, dice_coef_endo_each, dice_coef_myo_each
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
VAL_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
ONLINE_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart1',
'OnlineDataContours')
ONLINE_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart1',
'OnlineDataDICOM')
ONLINE_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart1',
'OnlineDataOverlay')
SAVE_VAL_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_val_submission')
SAVE_ONLINE_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_online_submission')
def create_submission(contours, data_path, output_path, contour_type = 'a'):
if contour_type == 'a':
weights = 'model_logs/temp_weights.hdf5'
else:
sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
num_phases = 5
crop_size = 128
input_shape = (num_phases, crop_size, crop_size, 1)
num_classes = 3
images, masks = export_all_contours(contours, data_path, output_path, crop_size, num_classes=num_classes)
model = unet_lstm_multi_model(input_shape, num_classes, weights=weights, contour_type=contour_type, transfer=True)
pred_masks = model.predict(images, batch_size=32, verbose=1)
print('\nEvaluating dev set ...')
result = model.evaluate(images, masks, batch_size=32)
result = np.round(result, decimals=10)
print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
num = 0
for c_type in ['i', 'm']:
for idx, ctr in enumerate(contours):
img, mask = read_contour(ctr, data_path, num_classes)
h, w, d = img.shape
if c_type == 'i':
tmp = pred_masks[idx,...,2]
elif c_type == 'm':
tmp = pred_masks[idx,...,1]
tmp = tmp[..., np.newaxis]
tmp = reshape(tmp, to_shape=(h, w, d))
tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in case: {:s}; image: {:d}'.format(ctr.case, ctr.img_no))
coords = np.ones((1, 1, 1, 2), dtype='int')
if c_type == 'i':
man_filename = ctr.ctr_endo_path[ctr.ctr_endo_path.rfind('\\')+1:]
elif c_type == 'm':
man_filename = ctr.ctr_epi_path[ctr.ctr_epi_path.rfind('\\')+1:]
auto_filename = man_filename.replace('manual', 'auto')
img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual', 'IRCCI-expert')
auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto', 'FCN')
img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
dcm = 'IM-0001-%04d.dcm' % (ctr.img_no)
#dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
dcm_path = os.path.join(data_path, ctr.case, 'DICOM', dcm)
overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
for dirpath in [man_full_path, auto_full_path, img_full_path, overlay_full_path]:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if 'DICOM' in dirpath:
src = dcm_path
dst = os.path.join(dirpath, img_filename)
shutil.copyfile(src, dst)
elif 'Overlay' in dirpath:
draw_result(ctr, data_path, overlay_full_path, c_type, coords)
else:
dst = os.path.join(auto_full_path, auto_filename)
if not os.path.exists(auto_full_path):
os.makedirs(auto_full_path)
with open(dst, 'wb') as f:
for cd in coords:
cd = np.squeeze(cd)
if cd.ndim == 1:
np.savetxt(f, cd, fmt='%d', delimiter=' ')
else:
for coord in cd:
np.savetxt(f, coord, fmt='%d', delimiter=' ')
print('\nNumber of multiple detections: {:d}'.format(num))
dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(c_type))
with open(dst_eval, 'wb') as f:
f.write(('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result))).encode('utf-8'))
f.close()
# Detailed evaluation:
detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format(c_type))
evalEndoArr = dice_coef_endo_each(masks, pred_masks)
evalMyoArr = dice_coef_myo_each(masks, pred_masks)
caseArr = [ctr.case for ctr in contours]
imgArr = [ctr.img_no for ctr in contours]
resArr = np.transpose([caseArr, imgArr, evalEndoArr, evalMyoArr])
np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
#np.savetxt(f, '\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
def create_endo_submission(endos, data_path, output_path, contour_type = 'a'):
if contour_type == 'a':
weights = 'model_logs/temp_weights.hdf5'
else:
sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
num_phases = 5
crop_size = 128
input_shape = (num_phases, crop_size, crop_size, 1)
num_classes = 3
images, masks = export_all_contours(endos, data_path, output_path, crop_size, num_classes=num_classes)
model = unet_lstm_multi_model(input_shape, num_classes, weights=weights, contour_type=contour_type, transfer=True)
pred_masks = model.predict(images, batch_size=8, verbose=1)
print('\nEvaluating dev set ...')
result = model.evaluate(images, masks, batch_size=8)
result = np.round(result, decimals=10)
print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
num = 0
c_type = 'i'
for idx, ctr in enumerate(endos):
img, mask = read_contour(ctr, data_path, num_classes)
h, w, d = img.shape
if c_type == 'i':
tmp = pred_masks[idx, ..., 2]
elif c_type == 'm':
tmp = pred_masks[idx, ..., 1]
tmp = tmp[..., np.newaxis]
tmp = reshape(tmp, to_shape=(h, w, d))
tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in case: {:s}; image: {:d}'.format(ctr.case, ctr.img_no))
coords = np.ones((1, 1, 1, 2), dtype='int')
if c_type == 'i':
man_filename = ctr.ctr_endo_path[ctr.ctr_endo_path.rfind('\\') + 1:]
elif c_type == 'm':
man_filename = ctr.ctr_epi_path[ctr.ctr_epi_path.rfind('\\') + 1:]
auto_filename = man_filename.replace('manual', 'auto')
img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual', 'IRCCI-expert')
auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto', 'FCN')
img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
dcm = 'IM-0001-%04d.dcm' % (ctr.img_no)
# dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
dcm_path = os.path.join(data_path, ctr.case, 'DICOM', dcm)
overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
for dirpath in [man_full_path, auto_full_path, img_full_path, overlay_full_path]:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if 'DICOM' in dirpath:
src = dcm_path
dst = os.path.join(dirpath, img_filename)
shutil.copyfile(src, dst)
elif 'Overlay' in dirpath:
draw_result(ctr, data_path, overlay_full_path, c_type, coords)
else:
dst = os.path.join(auto_full_path, auto_filename)
if not os.path.exists(auto_full_path):
os.makedirs(auto_full_path)
with open(dst, 'wb') as f:
for cd in coords:
cd = np.squeeze(cd)
if cd.ndim == 1:
np.savetxt(f, cd, fmt='%d', delimiter=' ')
else:
for coord in cd:
np.savetxt(f, coord, fmt='%d', delimiter=' ')
print('\nNumber of multiple detections: {:d}'.format(num))
dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(c_type))
with open(dst_eval, 'wb') as f:
f.write(('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result))).encode('utf-8'))
f.close()
# Detailed evaluation:
detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format(c_type))
evalEndoArr = dice_coef_endo_each(masks, pred_masks)
evalMyoArr = dice_coef_myo_each(masks, pred_masks)
caseArr = [ctr.case for ctr in endos]
imgArr = [ctr.img_no for ctr in endos]
resArr = np.transpose([caseArr, imgArr, evalEndoArr, evalMyoArr])
np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
if __name__== '__main__':
contour_type = 'a'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission_unet_multi'
print('\nProcessing online '+contour_type+' contours...')
online_ctrs = list(map_all_contours(ONLINE_CONTOUR_PATH))
online_endos = list(map_endo_contours(ONLINE_CONTOUR_PATH))
create_submission(online_ctrs, ONLINE_IMG_PATH, ONLINE_OVERLAY_PATH, contour_type)
create_endo_submission(online_endos, ONLINE_IMG_PATH, ONLINE_OVERLAY_PATH, contour_type)
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_val_submission_unet_multi'
print('\nProcessing val '+contour_type+' contours...')
val_ctrs = list(map_all_contours(VAL_CONTOUR_PATH))
val_endos = list(map_endo_contours(VAL_CONTOUR_PATH))
create_submission(val_ctrs, VAL_IMG_PATH, VAL_OVERLAY_PATH, contour_type)
create_endo_submission(val_endos, VAL_IMG_PATH, VAL_OVERLAY_PATH, contour_type)
print('\nAll done.')
|
[
"congchao120@163.com"
] |
congchao120@163.com
|
cfb6ce942daaff03042e09cdb5aa421640b2b65e
|
48b449a147004ff9b01228376376b455a17a1905
|
/Hello World/Scripts/easy_install-3.7-script.py
|
93ba8682f27e62c1ce8fd5dce61f78296ff4bb7f
|
[] |
no_license
|
Sanzid-Imran/PyShop
|
1df0b2989114f3e56519857399bb6ae7c5835247
|
74bb53e3bc06e927ff03f8936f1272d1d863aafb
|
refs/heads/master
| 2020-07-11T13:54:09.382190
| 2019-08-26T21:16:31
| 2019-08-26T21:16:31
| 204,559,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
#!"E:\Python\Python Projects\PyShop\Hello World\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"sanzidimran@gmail.com"
] |
sanzidimran@gmail.com
|
e2665c86a8e4a5ab790a047591db59cf721964c1
|
fcd8f4935dac3b0b89fa21ace968cb872663a11f
|
/Python/JewelsAndStones.py
|
a11bcaf9c7ab3609654741d8575cf5eb3068895d
|
[] |
no_license
|
peterer0625/Portfolio
|
48dab8829455ecff19da9532f23fa42c1d3652f7
|
274d52b571b53ada51551fcbf1872642823cf5af
|
refs/heads/master
| 2022-10-28T10:22:06.908252
| 2022-10-23T14:35:53
| 2022-10-23T14:35:53
| 117,188,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
# coding: utf-8
# In[23]:
#You're given strings J representing the types of stones that are jewels, and S representing the stones you have. Each character in S is a type of stone you have. You want to know how many of the stones you have are also jewels.
#The letters in J are guaranteed distinct, and all characters in J and S are letters. Letters are case sensitive, so "a" is considered a different type of stone from "A".
#Example 1:
#Input: J = "aA", S = "aAAbbbb"
#Output: 3
#Example 2:
#Input: J = "z", S = "ZZ"
#Output: 0
#Note:
#S and J will consist of letters and have length at most 50.
#The characters in J are distinct.
class Solution:
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
result = 0
for letter in S:
if letter in J:
result = result + 1
return result
J = "aA"
S = "aAAbbbb"
ret = Solution().numJewelsInStones(J, S)
print(ret)
|
[
"peterer0625@gmail.com"
] |
peterer0625@gmail.com
|
dd54ff3664693406afaf577c7eb07363b7f5de25
|
47c7e2a674c393d4c50766b783e247d01d37fec2
|
/Frontend/src/Server/API/python/offer_writing.py
|
eba824ed3cba86f464189bbe1c5e3a7c1d428eea
|
[] |
no_license
|
WendyMin/LearningSystem
|
cdfc8c9cf151d600640dca5f20fe620565aff456
|
b2bf05ab42472feed989f11672274826ae8f6947
|
refs/heads/master
| 2021-04-03T04:59:11.531568
| 2020-06-08T12:49:06
| 2020-06-08T12:49:06
| 124,355,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
# coding=utf-8
from __future__ import division
import json
import MySQLdb
import time
import datetime
import random
import urllib
import json
from urllib import urlencode
from urllib import quote
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def get_sentence():
conn = MySQLdb.Connect(host = '127.0.0.1',user = 'root', passwd = '123456', db = 'gyc_f_e', \
port = 3306,charset='utf8')
cur = conn.cursor()
select_num = []
while True:
a = random.randint(1,300)
if a not in select_num:
select_num.append(int(a))
if len(select_num) == 3:
break
# print select_num
result_list = []
for j in range(len(select_num)):
re_dict = {}
sql = 'select en_sentence,ch_sentence from writing_sentence WHERE id = "%s"' % (select_num[j])
cur.execute(sql)
result = cur.fetchall()
re_dict['id'] = str(select_num[j])
re_dict['english'] = str(result[0][0]).decode('utf-8')
re_dict['chinese'] = str(result[0][1]).decode('utf-8')
if j == 2:
re_dict['type'] = '1'
else:
re_dict['type'] = '0'
result_list.append(re_dict)
# print result_list
jsondata = json.dumps(result_list,ensure_ascii=False)
print jsondata
if __name__ == '__main__':
get_sentence()
|
[
"min711s9d31@126.com"
] |
min711s9d31@126.com
|
cc6ad7fc575cee1468c6abf40ef02c2fae92bd9d
|
06cde6aef06a8d7a71862a339d2c68716f1ed792
|
/venv/bin/f2py
|
ef04aa82ada83c2c82a232a016bea0c21608f3dc
|
[] |
no_license
|
MagnusAFyhr/CryptoTensor
|
fc6a0942bea443bb0b966a977bd4ab168b5e568e
|
2ab3856f67c87fca9112a05984db43adfdf09fe0
|
refs/heads/master
| 2022-12-20T05:48:45.111232
| 2020-10-06T01:15:25
| 2020-10-06T01:15:25
| 301,040,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
#!/Users/magnusfyhr/PycharmProjects/CryptoTensor/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"magnus.fyhr@marquette.edu"
] |
magnus.fyhr@marquette.edu
|
|
4e1efa5fc68c6cf783d434aebf74d1157be0268f
|
6c1b28fce483c873f627104c8c58c90af54ef22a
|
/approach_3_solution_2.py
|
cbb82862b98488db1ddca6a2e88b15cc2ed1fb8c
|
[] |
no_license
|
rajkan01/hands_on_code_review
|
dc873857a7d73f75c9d2caa5bba3fa93ba56a4a2
|
ac28dabd6eb0d46345714208741ff57345f95149
|
refs/heads/master
| 2023-09-04T00:20:16.741717
| 2021-10-23T15:45:34
| 2021-10-25T10:56:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
# Approach 3 - Solution 2
from string import ascii_lowercase as lowercase_letters
def is_pangram(sentence):
actual_bits = 0
expected_bits = 0b11111111111111111111111111
for i, char in enumerate(sentence):
if char.isalpha():
letter_index = ord(char.lower()) - ord("a")
bit_shift = 1 << letter_index
actual_bits = actual_bits | bit_shift
return expected_bits == actual_bits
# Approach 3 - Solution 2 intentionally doesn't contain any comments.
# As discussed in the course, this is a practice problem for you: apply Approach 3 - study the code of others -- to this solution.
|
[
"amy.m.haddad@gmail.com"
] |
amy.m.haddad@gmail.com
|
bd29a3919e9e554eae311ed596991eb065b7db1f
|
b210903908d418d471e0df3b93c5f290ec1c05a9
|
/gluon2pytorch/gluon2pytorch.py
|
ced44d371fe483100a99ec280b38330ca6939d3d
|
[
"MIT"
] |
permissive
|
chipper1/gluon2pytorch
|
d7bcf71900172484f1e26c46ba6f051aa1e7d773
|
e0fd770a28b1a8bf4d0aa352f360bf5765e8347d
|
refs/heads/master
| 2020-04-19T07:49:38.974250
| 2019-01-22T13:17:23
| 2019-01-22T13:17:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,078
|
py
|
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import mxnet as mx
import numpy as np
# Import comverters
from .layers import CONVERTERS
# Import PyTorch model template
from .pytorch_model_template import pytorch_model_template
def eval_model(pytorch_source, pytorch_dict, module_name):
# Tricky code
torch
nn
F
exec(pytorch_source)
globals()[module_name] = locals()[module_name]
pytorch_model = locals()[module_name]()
pytorch_model.load_state_dict(pytorch_dict)
return pytorch_model
def render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name):
"""
Render model.
"""
inits = [i for i in inits if len(i) > 0]
output = pytorch_model_template.format(**{
'module_name': pytorch_module_name,
'module_name_lower': pytorch_module_name.lower(),
'inits': '\n'.join(inits),
'inputs': ', '.join(['x' + str(i) for i in inputs]),
'calls': '\n'.join(calls),
'outputs': ', '.join(['x' + str(i) for i in outputs]),
})
if dst_dir is not None:
import os
import errno
try:
os.makedirs(dst_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(os.path.join(dst_dir, pytorch_module_name.lower() + '.py'), 'w+') as f:
f.write(output)
f.close()
torch.save(pytorch_dict, os.path.join(dst_dir, pytorch_module_name.lower() + '.pt'))
return output
def gluon2pytorch(net, args, dst_dir, pytorch_module_name, debug=True):
"""
Function to convert a model.
"""
x = [mx.nd.array(np.ones(i)) for i in args]
x = net(*x)
# Get network params
params = net.collect_params()
# Create a symbol to trace net
# x = mx.sym.var('data')
x = [mx.sym.var('__input__' + str(i)) for i in range(len(args))]
sym = net(*x)
if len(sym) > 1:
group = mx.sym.Group(sym)
else:
group = sym
# Get JSON-definition of the model
json_model = json.loads(group.tojson())['nodes']
# Create empty accumulators
nodes = []
is_skipped = []
pytorch_dict = {}
inits = []
calls = []
inputs = []
outputs = [i[0] for i in json.loads(group.tojson())['heads']]
last = 0
# Trace model
for i, node in enumerate(json_model):
# If the node has 'null' op, it means, that it's not a real op, but only parameter
# TODO: convert constants
if node['op'] == 'null':
if node['name'].find('__input__') == 0:
inputs.append(int(node['name'][9:]))
is_skipped.append(1)
continue
# It's not 'null'
is_skipped.append(0)
# Create dict with necessary node parameters
op = {
'name': node['name'][:-4],
'type': node['op'],
}
print(op, node)
if len(node['inputs']) > 0:
orginal_inputs = [i for i in np.array(node['inputs'])[:, 0] if i in inputs]
op['inputs'] = [i for i in np.array(node['inputs'])[:, 0] if is_skipped[i] != 1 or i in orginal_inputs]
else:
print(json_model)
op['inputs'] = []
try:
# Not all nodes have 'attrs'
op['attrs'] = node['attrs']
except KeyError:
op['attrs'] = {}
# Debug output
if debug:
print(op)
print('__')
# Append new node to list
nodes.append(op)
# If operation is in available convertors, convert it
if op['type'] in CONVERTERS:
init_str, call_str = CONVERTERS[op['type']](i, op, nodes, params, pytorch_dict)
inits.append(init_str)
calls.append(call_str)
else:
raise AttributeError('Layer isn\'t supported')
pytorch_source = render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name)
return eval_model(pytorch_source, pytorch_dict, pytorch_module_name)
|
[
"nerox8664@gmail.com"
] |
nerox8664@gmail.com
|
e2c904a328be7a8a19032ae1b3c61f04cdb20fc9
|
e9ce17a00c4fee7db160297fb3691f739dff099b
|
/exproject1/settings.py
|
532f3762255047f2162c2e3e34192454f470e858
|
[] |
no_license
|
srikanthmadhu30/dgex1
|
f214173ef3ae36bb475ceddeb22f62449908ae03
|
6ee7c1ac6704aa61297fb18b754f13ecaf2d3699
|
refs/heads/master
| 2020-05-24T00:00:00.950687
| 2019-05-16T10:33:35
| 2019-05-16T10:33:35
| 187,007,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,113
|
py
|
"""
Django settings for exproject1 project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j@*@p8q12ch8xu1jij1hk@t5a-gf*8bs!@(&-l4gdg-t&bss&s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'exproject1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'exproject1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"srikanthmadhu30@gmail.com"
] |
srikanthmadhu30@gmail.com
|
cb9c434cf3db0a544c87a06b7d7f61c2d219ffc2
|
3862936fe9fb54aabaa53889e9140c80b0e7cca9
|
/Programs/HackerRank/NumPy/Floor, Ceil and Rint.py
|
eaba91f2c1cbffb055e5ad8ebe63a2edc23c3f23
|
[] |
no_license
|
MZen2610/TrainingPython
|
5e7f3a86b31bd1661d5bd4dbc0836704d6052ad1
|
c0c86a56fcdf132c9a0610e32831caa4a9829d14
|
refs/heads/master
| 2020-09-06T02:54:50.919509
| 2019-12-01T11:57:35
| 2019-12-01T11:57:35
| 220,296,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
# floor
# The tool floor returns the floor of the input element-wise.
# The floor of is the largest integer where .
#
# import numpy
#
# my_array = numpy.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
# print numpy.floor(my_array) #[ 1. 2. 3. 4. 5. 6. 7. 8. 9.]
# ceil
# The tool ceil returns the ceiling of the input element-wise.
# The ceiling of is the smallest integer where .
#
# import numpy
#
# my_array = numpy.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
# print numpy.ceil(my_array) #[ 2. 3. 4. 5. 6. 7. 8. 9. 10.]
# rint
# The rint tool rounds to the nearest integer of input element-wise.
#
# import numpy
#
# my_array = numpy.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
# print numpy.rint(my_array) #[ 1. 2. 3. 4. 6. 7. 8. 9. 10.]
# Sample Input
#
# 1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8 9.9
# Sample Output
#
# [ 1. 2. 3. 4. 5. 6. 7. 8. 9.]
# [ 2. 3. 4. 5. 6. 7. 8. 9. 10.]
# [ 1. 2. 3. 4. 6. 7. 8. 9. 10.]
import numpy as np
a = input().strip().split(' ')
# my_array = np.array(a, dtype= np.float64) # преобразовать тип можно так или
my_array = np.array(a).astype(float) # так
# print(my_array)
print(str(np.floor(my_array)).replace('.', '. ').replace('[', '[ ').replace(' ]', ']'))
print(str(np.ceil(my_array)).replace('.', '. ').replace('[', '[ ').replace(' ]', ']'))
print(str(np.rint(my_array)).replace('.', '. ').replace('[', '[ ').replace(' ]', ']'))
|
[
"33858149+MZen1980@users.noreply.github.com"
] |
33858149+MZen1980@users.noreply.github.com
|
abfc5ca9be07e7df6784355027cea51be8d6ed5e
|
db24b324b4ae8033250b02987cc4271a40db4e54
|
/student/migrations/0002_auto_20200320_1058.py
|
71adee97d59a07a4463d26aead518761bee29ce1
|
[] |
no_license
|
brightcomputers/brightcomputers
|
1c7634b779ae5f9b6fe16b78e44a5da7013fc12c
|
98637dac87523783cadc0c580d1697df876e0b55
|
refs/heads/master
| 2022-12-11T13:42:07.970062
| 2020-05-01T05:33:01
| 2020-05-01T05:33:01
| 247,194,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 3.0.2 on 2020-03-20 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='studentdetails',
name='phoneno',
field=models.CharField(max_length=12, null=True),
),
]
|
[
"brightcomputersinfo@gmail.com"
] |
brightcomputersinfo@gmail.com
|
2236dda5b28e62a2e8fcef93e101f6d305b84d8a
|
1a8e45bbbcd263473959656d3c35fda724257559
|
/TwitterAnalysis/explore/login.py
|
f5a9e9e78f512e8cab0fe3fbf9b4f03e668aefbe
|
[] |
no_license
|
claudianorscini/Project
|
730e98607fe1cfa5a42923fb7620d13369b46e52
|
0ee8477a1dca95e138133808dff8a0334c34d0e3
|
refs/heads/master
| 2021-01-22T13:47:59.777917
| 2017-08-25T07:34:23
| 2017-08-25T07:34:23
| 100,685,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
import tweepy
#consumer key,access token, access secret
ckey = "U7A6sqsqmeQHvipknVGmdjzwT"
csecret = "zTDbhjrqJcGLsUn3S9V4SYV5BQi61C1XbrxObSLUbFFxJKTnFV"
atoken = "891974438585081857-3Uw4GCUWC7FJGLRQumzYeLnio5COWT9"
asecret = "S0QriqKzg1JFCHehOvwqhf4ICXfNlNVjJr3r5yNd7k9hG"
def authentication():
auth = tweepy.OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
return auth
|
[
"claudia.norscini@gmail.com"
] |
claudia.norscini@gmail.com
|
ff205523f1fbaee9e28ed6e8585cb6a773b6de04
|
850f599b0afb3ad83893e5f3c037c1738c7ebd7e
|
/cryptocurrency/crypto/urls.py
|
9fa347b317ff01a189c3ce9b04da5ced95bad6d9
|
[] |
no_license
|
sparshk/crypto_news
|
f782dfc298bef7d2c65ce558325d1e6c812a7055
|
f1808bbad8a1f8e1477dd02aff7de0abd63560b4
|
refs/heads/master
| 2020-03-25T04:33:20.042227
| 2018-08-03T08:55:21
| 2018-08-03T08:55:21
| 143,401,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
"""cryptocurrency URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [ path('', views.home, name="home"),
path('prices/', views.prices, name="prices")
]
|
[
"noreply@github.com"
] |
sparshk.noreply@github.com
|
4267a7d9a4abdcec70ee181694c1027b497830dc
|
f8d026eb1cf6bb114e14b1c0f4c285d4f8175212
|
/polls/urls.py
|
84ce1192c295553bef11553b23119c4bbf0774a6
|
[] |
no_license
|
PanYubug/mysite
|
e5e73cc3fea3fffad1d3c6f2fd060236935ed2cf
|
cc76047dd4ab96cc73162a04a7a0ffd959d481b0
|
refs/heads/master
| 2022-11-11T19:14:08.425513
| 2020-07-09T17:04:26
| 2020-07-09T17:04:26
| 277,275,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.index, name='index'),
# ex: /polls/5/
path('<int:question_id>/', views.detail, name='detail'),
# path('specifics/<int:question_id>/', views.detail, name='detail'),
# ex: /polls/5/results/
path('<int:question_id>/results/', views.results, name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
[
"344146473@qq.com"
] |
344146473@qq.com
|
c25400f6d2c5e46a0896fc7f06afbd1cc43b2450
|
6977691187176f78db054b2d6e8565ed0276ad53
|
/common/get_ieng_teacher_pro_headers.py
|
d027b3c4f13e095c9155f659b4ff01ff0f4b4d88
|
[] |
no_license
|
Lee-hsien-sen/python-excel-unittest
|
094ecbe2fcae389bc4dc6857dd59cfdb4706b7c8
|
0baa7a4fa3cd38b90894372d02cc972688dba9bc
|
refs/heads/master
| 2023-02-13T14:33:38.416121
| 2021-01-12T14:53:29
| 2021-01-12T14:53:29
| 329,016,687
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,729
|
py
|
import re
import requests
import json
#
# # 在Cookie Version 0中规定空格、方括号、圆括号、等于号、逗号、双引号、斜杠、问号、@,冒号,分号等特殊符号都不能作为Cookie的内容。
#
def get_ieng_teacher_pro_headers(openid):
# 以下数据获取《IENG公众号》的MELEDEV
method = 'POST'
url = 'https://wxteacher.tope365.com/teacher/login'
headers = {}
# headers = {'content-type': 'application/json'}
headers["Content-Type"] = "application/json; charset=UTF-8"
data = json.dumps(openid)
print(data)
if openid == '':
# headers["Content-Type"] = "application/json; charset=UTF-8"
wx_cookie = ""
print(
"url_openid为空的数据------------------------------------------------------------=========================================================================================")
else:
session = ieng_wx_session(method, url, data, headers) # 判断接口方法函数
wx_cookie = 'JSESSIONID_COOKIE=' + session
print(wx_cookie)
return wx_cookie
def ieng_wx_session(method, url, data, headers):
try:
print("每请求一次getmeiledevsessions接口,则打印一次-----------------------------------------")
if method =='POST':
r = requests.post(url,data=data,headers=headers)
elif method == 'GET': #get的data用params来传递#get的data用params来传递#get的data用params来传递#get的data用params来传递
r = requests.get(url, params=data, headers=headers) #get的data用params来传递#get的data用params来传递#get的data用params来传递
elif method == 'PUT':
r = requests.put(url,data=data,headers=headers)
elif method == 'DELETE':
r = requests.delete(url)
else:
print("--------------------请求方法"+method+"不支持-----------------")
r = '接口请求出现异常'
except:
print(u'接口请求35345失败')
r = '接口请求出现异常'
try:
headers4 = dict(r.headers) # 因r.headers返回的不是dict类型,所以dict转化
print(headers4)
if 'Set-Cookie' in str(headers4):
a = headers4['Set-Cookie']
# print(type(a),a)
if 'JSESSIONID_COOKIE=' in a:
b = (re.findall('JSESSIONID_COOKIE=([\w,\-]+);', a))[0]
print("获取JSESSIONID_COOKIE成功")
else:
b=''
print('获取JSESSIONID_COOKIE失败,返回headers中Set-Cookie中未找到JSESSIONID_COOKIE')
else:
b = ''
print('获取失败,返回headers中未找到存放ieng_wx的Set-Cookie')
print("获取到接口返回的ieng_wx_cookie的值为: ", b)
except:
b = ''
print(u'当前请求,无法直接获取返回的header信息,或出现无法预料的错误')
return b
# url = 'http://meiledev.soyoung.com/v1/user/testlogin'
# headers = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac OS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN'}
# # data = {'openid': 'oESYd1cJy0UIVlFVQ8HnXvt4AMw0'}
# data = {'openid': 'oeUP30MdsPv2xwxyqXZnNXWqhlYU'}
#
# method ='GET'
# getMELEDEV = getMELEDEVsession(method, url, data,headers) # 判断接口方法函数
# MELEDEV = getMELEDEV.getMELEDEVsession()
if __name__ == "__main__":
data = {"loginName":"zhangfeng1","password":"000000"}
ieng_token = get_ieng_teacher_pro_headers(data)
print(ieng_token)
|
[
"17519476710@163.com"
] |
17519476710@163.com
|
72ffd878190fe379ff49c75827bdfe5a0f51528a
|
1a7bb0a40d9402f177f3821e3e488e3302944ce2
|
/intent/Loki_query_time.py
|
07b0aa6d3c855622422cd8dbd3234f0285ce37d0
|
[] |
no_license
|
KevinKang1211/Ticket_Bot
|
ffbc8c85a292b3516f8771fc17d3f0d8f54495b4
|
7faaf3d1add82d25e555cfd05ecf034fc81c244f
|
refs/heads/master
| 2023-03-13T17:25:08.600501
| 2021-03-05T17:04:14
| 2021-03-05T17:04:14
| 331,863,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Loki module for query_time
Input:
inputSTR str,
utterance str,
args str[],
resultDICT dict
Output:
resultDICT dict
"""
DEBUG_query_time = True
userDefinedDICT = {"大": ["大人", "成人"], "小": ["小孩", "孩童"]}
# 將符合句型的參數列表印出。這是 debug 或是開發用的。
def debugInfo(inputSTR, utterance):
if DEBUG_query_time:
print("[query_time] {} ===> {}".format(inputSTR, utterance))
def getResult(inputSTR, utterance, args, resultDICT):
debugInfo(inputSTR, utterance)
if utterance == "[19]:[47]":
# 待處理
pass
return resultDICT
|
[
"kevink861211@gmail.com"
] |
kevink861211@gmail.com
|
a124749ac00a5a369031b925acde502a8ddc32e0
|
27da9fc141cee7341f3a5b2de6556a121851b754
|
/TOC-Project-2017/fsm2.py
|
be40fa033613d02ccc9f2212d2779f498c219b6b
|
[
"MIT"
] |
permissive
|
C14036227/TOC-Project-2017-ver2
|
6b3ca54bcade78ce5a303439c985abb79aa4cdb3
|
0c958c56edf921d24cd16e1e8a457137f8e88a0c
|
refs/heads/master
| 2021-01-23T10:34:48.404606
| 2017-06-01T15:44:06
| 2017-06-01T15:44:06
| 93,073,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
from transitions.extensions import GraphMachine
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
def is_going_to_state1(self, update):
text = update.message.text
return text.lower() == 'go to state1'
def is_going_to_state2(self, update):
text = update.message.text
return text.lower() == 'go to state2'
def on_enter_state1(self, update):
update.message.reply_text("I'm entering state1")
self.go_back(update)
def on_exit_state1(self, update):
print('Leaving state1')
def on_enter_state2(self, update):
update.message.reply_text("I'm entering state2")
self.go_back(update)
def on_exit_state2(self, update):
print('Leaving state2')
|
[
"noreply@github.com"
] |
C14036227.noreply@github.com
|
fa852b15b22790660899f828bd2b36acf41ab473
|
2b477700384af7ceb67f97908f1bd5899d984596
|
/mxonline/second_day/mxonline/mxonline/settings.py
|
0c86916a2d658b263215bc8d182ed18fe7d4a103
|
[] |
no_license
|
ZhiqiKou/django
|
58b743f962e0f7d85b3610e9d09a0e1db32ba9bb
|
e3d35c981e6b91130472114b121b65fd7d5cacf8
|
refs/heads/master
| 2020-03-28T20:44:56.286125
| 2018-09-07T02:21:29
| 2018-09-07T02:21:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,544
|
py
|
"""
Django settings for mxonline project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import sys
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hw$ull9#yd)%((n32%_jx_cy+!kcr@u8-ywc_r4pg6kjmzx(f6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'organization',
'operation',
'courses',
]
# 此处重载使UserProfile生效
AUTH_USER_MODEL = "users.UserProfile"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mxonline.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mxonline.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mxonline3',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
[
"1816635208@qq.com"
] |
1816635208@qq.com
|
a0343c09cb651f68bbb731f118efdaf292f4c557
|
61cbd965173d4fe7f5c1bcea7ba2fa660460b227
|
/apps/evaluation/apis.py
|
e3ee8f85013a9330b8082da777dc6e21e75aff3b
|
[] |
no_license
|
iLaus/flask_web
|
ffea50954e1e9e9b21e388e1228e5d59e2b2e94b
|
8456880f3db289aa3019bebdac6b43a9f026638d
|
refs/heads/dev
| 2020-04-10T00:45:25.434280
| 2018-12-12T16:50:10
| 2018-12-12T16:50:10
| 160,695,310
| 1
| 1
| null | 2018-12-11T16:40:15
| 2018-12-06T15:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
from flask_restful import Api, Resource
def hello():
return 'hello world!'
class HelloWorld(Resource):
todos = {}
def get(self, todo_id):
return {"result": todo_id}
|
[
"hogon.wang@gmail.com"
] |
hogon.wang@gmail.com
|
52ea82d92a9babeebea7a0dd0d767f59a4c54501
|
11fe80e8eb954f89a0e3b9e3961f6c055d993f9a
|
/receiver.py
|
7e5d535b60c5701bb5fe93d3ae855e85e4985f9d
|
[] |
no_license
|
mehakismail/res-websokt
|
6803ca2f42cf5be5f40bd619d5021361b9f360fb
|
711d1314c9c1a041ed682aff52968ab3d4771c3d
|
refs/heads/main
| 2023-03-24T18:42:48.081992
| 2021-03-18T21:46:41
| 2021-03-18T21:46:41
| 349,225,327
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
import time
import json
import socketio
import time
sio = socketio.Client()
sio.connect('http://192.168.42.105:2494')
@sio.on('connect')
def connect():
print('connection established')
@sio.on('__TEST')
def message(data):
print(data)
|
[
"noreply@github.com"
] |
mehakismail.noreply@github.com
|
12ee95727eaf7b167e890d616a6ba937a0aed965
|
5865b61a58f3ce20fe84ce8f1eb091434da1456a
|
/backend/migrations/0022_auto_20210103_1409.py
|
93c867f257dafa18296b9f8be217709e1f226e4c
|
[] |
no_license
|
omgorganica/wh_backend
|
6e362bced1021beee5c910e754491300a9474ccc
|
bab97a3970464605e6ac57d9bec193e3843f985e
|
refs/heads/master
| 2023-03-13T13:55:41.096608
| 2021-03-20T08:11:24
| 2021-03-20T08:11:24
| 315,927,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
# Generated by Django 3.1.3 on 2021-01-03 09:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0021_auto_20210102_2034'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ['-current_balance']},
),
]
|
[
"omgorganica@yandex.ru"
] |
omgorganica@yandex.ru
|
9e5ddb8b8df7a4a0133cd8e979a21ba7c73dec76
|
a47f8facbd8ee621999ad16736f6aa059440e137
|
/water_film_imaging2.py
|
4e1417a00ae68920184e9ec7bf03b2648702912a
|
[] |
no_license
|
wethug/tES-fmri
|
e14bb207048174e404729196c4036d168317bf1d
|
2f18a050a901a8412f6240a771259e7c8bd3d66f
|
refs/heads/main
| 2023-03-19T21:44:37.916345
| 2021-03-13T12:05:46
| 2021-03-13T12:05:46
| 327,240,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
# 水膜成像
import matplotlib
import matplotlib.pyplot as plt
bar_width = 0.15
y1 = [809.09, 877.94, 867.09, 883.01, 932.71]
y2 = [743.34, 805.86, 808.02, 842.59, 899.36]
y3 = [742.76, 806.66, 806.97, 843.01, 897.21]
y4 = [745.04, 807.94, 806.09, 841.32, 900.14]
y5 = [745.32, 808.32, 809.76, 844.23, 897.98]
y6 = [745.76, 805.89, 804.98, 845.72, 899.34]
rate1 = [8.13, 8.21, 6.81, 4.58, 3.58]
rate2 = [8.20, 8.12, 6.93, 4.53, 3.81]
rate3 = [7.92, 7.97, 7.04, 4.72, 3.49]
rate4 = [7.88, 7.93, 6.61, 4.39, 3.72]
rate5 = [7.83, 8.21, 7.16, 4.22, 3.58]
x_text = ['slice1', 'slice2', 'slice3', 'slice4', 'slice5']
x_list = range(len(x_text))
# 定义中文格式
font = {'family': 'MicroSoft Yahei', 'size': 24}
matplotlib.rc('font', **font)
_, ax1 = plt.subplots(figsize=(15.84, 10.8))
ax2 = ax1.twinx()
b1 = ax1.bar([x - 2.5 * bar_width for x in x_list], y1, bar_width)
b2 = ax1.bar([x - 1.5 * bar_width for x in x_list], y2, bar_width)
b3 = ax1.bar([x - 0.5 * bar_width for x in x_list], y3, bar_width)
b4 = ax1.bar([x + 0.5 * bar_width for x in x_list], y4, bar_width)
b5 = ax1.bar([x + 1.5 * bar_width for x in x_list], y5, bar_width)
b6 = ax1.bar([x + 2.5 * bar_width for x in x_list], y5, bar_width)
l1, = ax2.plot(x_list, rate1)
l2, = ax2.plot(x_list, rate2)
l3, = ax2.plot(x_list, rate3)
l4, = ax2.plot(x_list, rate4)
l5, = ax2.plot(x_list, rate5)
label_font = {'family': 'MicroSoft Yahei', 'size': 26}
plt.xticks(range(len(x_list)), x_text)
plt.xlabel('水膜成像层数', font=label_font)
ax1.set_ylabel('SNR', font=label_font)
ax1.set_ylim(700, 950)
ax2.set_ylabel('相比于对照组下降占比/%', font=label_font)
legend_font = {'family': 'MicroSoft Yahei', 'size': 15}
plt.title('不同刺激电流对水膜成像的影响')
plt.legend(handles=[b1, b2, b3, b4, b5, b6, l1, l2, l3, l4, l5],
labels=['对照组', 'A组', 'B组', 'C组', 'D组', 'E组', 'line1', 'line2', 'line3', 'line4', 'line5'],
prop=legend_font, loc=2, bbox_to_anchor=(1.08, 1.0), borderaxespad=0.)
plt.show()
|
[
"v-runli@microsoft.com"
] |
v-runli@microsoft.com
|
c169e6fcf2692886e4ac0f87de10d9fe39168f51
|
a0e50cec23451a53d16952d4c55ee924f900ef15
|
/Wa_Tor.pyde
|
6aaac901eb7e4a8b5dbc16478e1c77cb05642624
|
[] |
no_license
|
Freidelf/FMFN05-ChaosProject
|
33b8d4c869dcc8bf13291464f9a167fca83148cb
|
d0e8859c23459b6143794e3bc7095da97d0ca48f
|
refs/heads/master
| 2020-03-17T06:29:00.646910
| 2018-05-24T08:13:14
| 2018-05-24T08:13:14
| 133,357,634
| 2
| 1
| null | 2018-05-21T09:37:32
| 2018-05-14T12:31:11
|
Python
|
UTF-8
|
Python
| false
| false
| 11,479
|
pyde
|
import random
import copy
#strogatz nonlinear dynamics and chaos
class Water:
xpos = X
ypos = Y
moved = 0
def __init__(self, X, Y):
self.xpos = X
self.ypos = Y
self.moved = 0
def move(self):
return
def isFish(self):
return False
def isShark(self):
return False
def isWater(self):
return True
cols, rows = 200,200;
Dim = 1000;
SET_INIT_COND = 0;
Fishes = 0
Sharks = 0
SAVEDfishes = 0
SAVEDsharks = 0
PLOT_LENGTH = 500
SHOW_PLOT = 0
FishArray = [0]*PLOT_LENGTH
SharkArray = [0]*PLOT_LENGTH
counter = 0
file = open('nbr_of_animals_run_1.txt', "w")
CURRENTmatrix = [[Water(x,y) for x in range(cols)] for y in range(rows)];
SAVEDmatrix = [[Water(x,y) for x in range(cols)] for y in range(rows)];
def setup():
size(Dim, Dim)
background(255)
def draw():
global Sharks
global Fishes
global counter
global FishArray
global SharkArray
counter = (counter + 1)%PLOT_LENGTH
print("antal hajar: " + str(Sharks) + "antal fiskar: " + str(Fishes))
file.writelines(str(Sharks) + "," + str(Fishes) + "\n" )
if SET_INIT_COND == 0:
initCondition()
else:
frameRate(60)
t = random.randint(1,4)
for i in range(cols):
for j in range(rows):
if t == 1:
i = -i
if t == 2:
j = -j
if t == 3:
i = -i
j = -j
chronon(i,j)
for i in range(cols):
for j in range(rows):
temp = CURRENTmatrix[i][j]
CURRENTmatrix[i][j].moved = 0
if temp.isWater():
fill(0)
elif temp.isFish():
fill(0,0,255)
elif temp.isShark():
fill(0,255,0)
rectMode(CORNER)
rect(i*Dim/cols,j*Dim/rows,Dim/cols,Dim/rows)
FishArray[counter] = Fishes
SharkArray[counter] = Sharks
if SHOW_PLOT == 1:
# fill(255)
# rect(Dim/2,2*Dim/4,Dim/2,Dim/4)
# for i in range(PLOT_LENGTH):
# fill(255,0,0)
# rect(Dim/2 + i, 3*Dim/4 - FishArray[i]/(Dim/5),1,1)
# fill(0)
fill(255)
rect(Dim/2,2*Dim/4,Dim/2,Dim/4)
for i in range(PLOT_LENGTH):
fill(0)
rect(Dim/2 + 10 + FishArray[i]/(Dim/40), 3*Dim/4 - 10 - SharkArray[i]/(Dim/40),1,1)
stroke(0)
line(Dim/2 + 10, 3*Dim/4, Dim/2 + 10, Dim/2)
line(Dim/2, 3*Dim/4 - 10, Dim, 3*Dim/4 - 10)
noStroke()
def initCondition():
noStroke()
for i in range(cols):
for j in range(rows):
temp = CURRENTmatrix[i][j]
if temp.isWater():
fill(0)
elif temp.isFish():
fill(0,0,255)
elif temp.isShark():
fill(0,255,0)
rectMode(CORNER)
rect(i*Dim/cols,j*Dim/rows,Dim/cols,Dim/rows)
def chronon(x,y):
if (CURRENTmatrix[x][y].moved == 0):
CURRENTmatrix[x][y].moved = 1
CURRENTmatrix[x][y].move()
def keyPressed():
counter = 1
global SET_INIT_COND
global SHOW_PLOT
global Fishes
global Sharks
global SAVEDmatrix
global SAVEDfishes
global SAVEDsharks
global file
if key == DELETE:
# SAVEDmatrix = copy.deepcopy(CURRENTmatrix)
SAVEDfishes = Fishes
SAVEDsharks = Sharks
for i in range(cols):
for j in range(rows):
if CURRENTmatrix [i][j].isWater():
SAVEDmatrix[i][j] = Water(i,j)
elif CURRENTmatrix[i][j].isFish():
SAVEDmatrix[i][j] = Fish(i,j)
SAVEDmatrix[i][j].ReprodTimer = CURRENTmatrix[i][j].ReprodTimer
else:
SAVEDmatrix[i][j] = Shark(i,j)
SAVEDmatrix[i][j].ReprodTimer = CURRENTmatrix[i][j].ReprodTimer
SAVEDmatrix[i][j].Energy = CURRENTmatrix[i][j].Energy
if key == BACKSPACE:
counter += 1
file = open("nbr_of_animals_run_" + str(counter) + ".txt", "w")
Fishes = SAVEDfishes
Sharks = SAVEDsharks
for i in range(cols):
for j in range(rows):
if SAVEDmatrix [i][j].isWater():
CURRENTmatrix[i][j] = Water(i,j)
elif SAVEDmatrix[i][j].isFish():
CURRENTmatrix[i][j] = Fish(i,j)
CURRENTmatrix[i][j].ReprodTimer = SAVEDmatrix[i][j].ReprodTimer
else:
CURRENTmatrix[i][j] = Shark(i,j)
CURRENTmatrix[i][j].ReprodTimer = SAVEDmatrix[i][j].ReprodTimer
CURRENTmatrix[i][j].Energy = SAVEDmatrix[i][j].Energy
if key == ENTER:
if SET_INIT_COND == 0:
SET_INIT_COND = 1
else:
SET_INIT_COND = 0
if key == TAB:
Fishes = 0
Sharks = 0
for i in range(cols):
for j in range(rows):
d = random.randint(0,500)
if d < 400:
CURRENTmatrix[i][j] = Water(i,j)
elif d < 494:
CURRENTmatrix[i][j] = Fish(i,j)
else:
CURRENTmatrix[i][j] = Shark(i,j)
def keyReleased():
global SHOW_PLOT
if key == BACKSPACE:
SHOW_PLOT = 0
# def mousePressed():
# global Fishes
# global Sharks
# if mouseButton == LEFT:
# temp = CURRENTmatrix[floor(mouseX/(Dim/rows))][floor(mouseY/(Dim/cols))]
# if temp.isWater():
# CURRENTmatrix[floor(mouseX/(Dim/rows))][floor(mouseY/(Dim/cols))] = Shark(floor(mouseX/(Dim/rows)), floor(mouseY/(Dim/cols)))
# elif temp.isShark():
# Sharks -= 1
# CURRENTmatrix[floor(mouseX/(Dim/rows))][floor(mouseY/(Dim/cols))] = Fish(floor(mouseX/(Dim/rows)), floor(mouseY/(Dim/cols)))
# elif temp.isFish:
# Fishes -=1
# CURRENTmatrix[floor(mouseX/(Dim/rows))][floor(mouseY/(Dim/cols))] = Water(floor(mouseX/(Dim/rows)), floor(mouseY/(Dim/cols)))
class Fish:
xpos = X
ypos = Y
ReprodTimer = 0.0
moved = 0
def __init__(self, X, Y):
self.xpos = X
self.ypos = Y
global Fishes
Fishes += 1
def move(self):
self.xpos = self.xpos%cols
self.ypos = self.ypos%rows
surrounding = [CURRENTmatrix[self.xpos%cols][(self.ypos + 1)%rows], CURRENTmatrix[(self.xpos + 1)%cols][self.ypos%rows], CURRENTmatrix[self.xpos%cols][(self.ypos - 1)%rows], CURRENTmatrix[(self.xpos - 1)%cols][self.ypos%rows]]
p = ["down","right","up","left"]
possibilities = []
oldx = self.xpos%rows
oldy = self.ypos%cols
for i in range(4):
if surrounding[i].isWater():
possibilities.append(p[i])
if possibilities:
decision = random.choice(possibilities)
if decision == "up":
self.ypos -= 1
elif decision == "right":
self.xpos += 1
elif decision == "down":
self.ypos += 1
elif decision == "left":
self.xpos -= 1
global toDraw
# toDraw.append((oldx%rows,oldy%cols))
# toDraw.append((self.xpos%rows,self.ypos%cols))
CURRENTmatrix[self.xpos%rows][self.ypos%cols] = self
if self.ReprodTimer > 1:
CURRENTmatrix[oldx][oldy] = Fish(oldx,oldy)
self.ReprodTimer = 0.0
else:
CURRENTmatrix[oldx][oldy] = Water(oldx,oldy)
self.ReprodTimer += 0.08
def isFish(self):
return True
def isShark(self):
return False
def isWater(self):
return False
class Shark:
moved = 0
xpos = X
ypos = Y
ReprodTimer = 0.0
Energy = 0
global Fishes
global Sharks
def __init__(self, X, Y):
self.xpos = X
self.ypos = Y
global Sharks
Sharks += 1
self.Energy = 0.7
def move(self):
if self.Energy < 0.0:
# toDraw.append((self.xpos%rows,self.ypos%cols))
CURRENTmatrix[self.xpos%cols][self.ypos%rows] = Water(self.xpos%cols, self.ypos%rows)
global Sharks
Sharks -= 1
return
self.xpos = self.xpos%cols
self.ypos = self.ypos%rows
surrounding = [CURRENTmatrix[self.xpos%cols][(self.ypos + 1)%rows], CURRENTmatrix[(self.xpos + 1)%cols][self.ypos%rows], CURRENTmatrix[self.xpos%cols][(self.ypos - 1)%rows], CURRENTmatrix[(self.xpos - 1)%cols][self.ypos%rows]]
p = ["down","right","up","left"]
possibilitiesWater = []
possibilitiesFish = []
oldx = self.xpos
oldy = self.ypos
for i in range(4):
if surrounding[i].isWater():
possibilitiesWater.append(p[i])
if surrounding[i].isFish():
possibilitiesFish.append(p[i])
if not possibilitiesFish:
if possibilitiesWater:
decision = random.choice(possibilitiesWater)
if decision == "up":
self.ypos -= 1
elif decision == "right":
self.xpos += 1
elif decision == "down":
self.ypos += 1
elif decision == "left":
self.xpos -= 1
CURRENTmatrix[self.xpos%cols][self.ypos%rows] = self
# toDraw.append((oldx%rows,oldy%cols))
# toDraw.append((self.xpos%rows,self.ypos%cols))
if self.Energy > 0.95:
CURRENTmatrix[oldx][oldy] = Shark(oldx,oldy)
else:
CURRENTmatrix[oldx][oldy] = Water(oldx,oldy)
else:
decision = random.choice(possibilitiesFish)
global Fishes
Fishes -= 1
if self.Energy < 0.85:
self.Energy +=0.15
if decision == "up":
self.ypos -= 1
elif decision == "right":
self.xpos += 1
elif decision == "down":
self.ypos += 1
elif decision == "left":
self.xpos -= 1
CURRENTmatrix[self.xpos%cols][self.ypos%rows] = self
# toDraw.append((oldx%rows,oldy%cols))
# toDraw.append((self.xpos%rows,self.ypos%cols))
if self.Energy > 0.98:
CURRENTmatrix[oldx][oldy] = Shark(oldx,oldy)
else:
CURRENTmatrix[oldx][oldy] = Water(oldx,oldy)
self.Energy -= 0.04
def isFish(self):
return False
def isShark(self):
return True
def isWater(self):
return False
|
[
"noreply@github.com"
] |
Freidelf.noreply@github.com
|
6afd366e3327f8166973d2de937fb4955ff5dd05
|
e08319a0eee6ef40eb544b1c694233ac26388de0
|
/clue/adafruit-circuitpython-bundle-6.x-mpy-20210209/examples/ssd1608_simpletest.py
|
05880b48d5ca02f48517ceed4535faf61876954e
|
[] |
no_license
|
rwhiffen/circuit-python
|
363b873b2429521296bcab5d5c88271eb5270073
|
23b3d577c9d1ed6cb48aa7f48c77911bf62cd5c3
|
refs/heads/master
| 2021-12-04T11:04:29.368458
| 2021-11-30T00:54:29
| 2021-11-30T00:54:29
| 229,330,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""Simple test script for 1.54" 200x200 monochrome display.
Supported products:
* Adafruit 1.54" Monochrome ePaper Display Breakout
* https://www.adafruit.com/product/4196
"""
import time
import board
import displayio
import adafruit_ssd1608
displayio.release_displays()
# This pinout works on a Feather M4 and may need to be altered for other boards.
spi = board.SPI() # Uses SCK and MOSI
epd_cs = board.D9
epd_dc = board.D10
epd_reset = board.D5
epd_busy = board.D6
display_bus = displayio.FourWire(
spi, command=epd_dc, chip_select=epd_cs, reset=epd_reset, baudrate=1000000
)
time.sleep(1)
display = adafruit_ssd1608.SSD1608(
display_bus, width=200, height=200, busy_pin=epd_busy
)
g = displayio.Group()
f = open("/display-ruler.bmp", "rb")
pic = displayio.OnDiskBitmap(f)
t = displayio.TileGrid(pic, pixel_shader=displayio.ColorConverter())
g.append(t)
display.show(g)
display.refresh()
print("refreshed")
time.sleep(120)
|
[
"rich@whiffen.orgw"
] |
rich@whiffen.orgw
|
baac9827d91c255af7cab59e278e6eb5be8f1fe0
|
e1db584567a8b8cc135d9beec7c4997685494dd8
|
/other/sensor_magnet/tests/test_actuators.py
|
37e8e5ec7f0a3eaa50700f73d204f40922a7d5f7
|
[
"MIT"
] |
permissive
|
gurbain/tigrillo2
|
459d4b8d950736faea7af77256a7ce11f1338fba
|
66ad26c0aff39da74ca76f712b6f01b40d383f34
|
refs/heads/master
| 2021-08-16T12:02:39.863300
| 2018-06-08T22:44:40
| 2018-06-08T22:44:40
| 113,203,747
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import serial
import sys
import time
c = serial.Serial("/dev/ttyACM0", 9600)
if not c.isOpen():
c.open()
i = 0
while i < 1000:
c.write("A90,90,90,90")
time.sleep(0.01)
while i < 1000:
for read in c.read():
sys.stdout.write(read)
i += 1
|
[
"gabrielurbain@gmail.com"
] |
gabrielurbain@gmail.com
|
6f51ba0a831c70bc2b9a14c70a2d156a6a454c37
|
76c4c6965ca962422408fe3d35b7c0dbd00af68b
|
/Python/search3_non_recursive.py
|
e386b50749881c706ca200bceedb6b858af9a78d
|
[] |
no_license
|
AnandDhanalakota/Vin-Pro
|
1a4bce0acefbff0b5d485a9eeeae3c95ccef80f6
|
4ff62d476ce8bb30ffe89b9735907e128d02b2e9
|
refs/heads/master
| 2020-03-21T10:06:15.553553
| 2017-10-13T21:15:14
| 2017-10-13T21:15:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
import os
"""
__________________d:\___________________
/ / \ \
suneeta softs vin-pro songs_____
/ \ | / \ / \
sunc++,cellsoft .... cs, asm,... sainikudu, newyork, ....
=========================================================================
===!!!=====!!!==!!!==!!!!=====!!!==!!!!!!!!!!==!!!!!!!!!!==!!!===========
===!!!=====!!!==!!!==!!!!!====!!!==!!!!!!!!====!!!!!!!!====!!!===========
===!!!=====!!!==!!!==!!!=!!===!!!==!!!=========!!!=========!!!===========
====!!!===!!!===!!!==!!!==!!==!!!==!!!!!!!=====!!!!!!!=====!!!===========
====!!!===!!!===!!!==!!!==!!==!!!==!!!!!!!=====!!!!!!!=====!!!===========
====!!!===!!!===!!!==!!!===!!=!!!==!!!=========!!!=========!!!===========
=====!!!=!!!====!!!==!!!====!!!!!==!!!!!!!!====!!!!!!!!====!!!!!!!!!=====
======!!!!!=====!!!==!!!=====!!!!==!!!!!!!!!!==!!!!!!!!!!==!!!!!!!!!!!===
=========================================================================
"""
paths=["d:\\"]
temp=[]
while 1:
for xyz in paths:
for dir in os.listdir(xyz):
path = xyz+'\\'+dir
if os.path.isdir(path):
temp+=[path]
else:
print path
if not temp :
break
paths = temp
temp = []
|
[
"vineelko@microsoft.com"
] |
vineelko@microsoft.com
|
b9235f24b706833d32ec5e0f7f82633c4ab895c0
|
2aeab3fbd7a8778760a6036557f422fc4356f761
|
/pycrescolib/globalcontroller.py
|
8ed1ac76655eb17bc33366e59a2f8678dfeac624
|
[] |
no_license
|
rynsy/pycrescolib-demo
|
8658cdcb950cbcbad1a5206317d4566af66a1e70
|
3a6091247a691888dadd0038a50fc32d49b67a80
|
refs/heads/master
| 2023-08-22T20:37:27.229250
| 2021-10-20T17:35:15
| 2021-10-20T17:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,008
|
py
|
import json
from pycrescolib.utils import decompress_param, get_jar_info, compress_param, encode_data
class globalcontroller(object):
def __init__(self, messaging):
self.messaging = messaging
def submit_pipeline(self, cadl):
message_event_type = 'CONFIG'
message_payload = dict()
message_payload['action'] = 'gpipelinesubmit'
message_payload['action_gpipeline'] = compress_param(json.dumps(cadl))
message_payload['action_tenantid'] = '0'
retry = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
# returns status and gpipeline_id
return retry
def remove_pipeline(self, pipeline_id):
message_event_type = 'CONFIG'
message_payload = dict()
message_payload['action'] = 'gpipelineremove'
message_payload['action_pipelineid'] = pipeline_id
retry = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
return retry
def get_pipeline_list(self):
message_event_type = 'EXEC'
message_payload = dict()
message_payload['action'] = 'getgpipelinestatus'
reply = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
reply = json.loads(decompress_param(reply['pipelineinfo']))['pipelines']
return reply
def get_pipeline_info(self, pipeline_id):
message_event_type = 'EXEC'
message_payload = dict()
message_payload['action'] = 'getgpipeline'
message_payload['action_pipelineid'] = pipeline_id
reply = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
reply = json.loads(decompress_param(reply['gpipeline']))
return reply
def get_pipeline_status(self, pipeline_id):
reply = self.get_pipeline_info(pipeline_id)
status_code = int(reply['status_code'])
return status_code
def get_agent_list(self, dst_region=None):
message_event_type = 'EXEC'
message_payload = dict()
message_payload['action'] = 'listagents'
if dst_region is not None:
message_payload['action_region'] = dst_region
reply = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
reply = json.loads(decompress_param(reply['agentslist']))['agents']
'''
for agent in reply:
dst_agent = agent['name']
dst_region = agent['region']
r = self.get_agent_resources(dst_region,dst_agent)
print(r)
'''
return reply
def get_agent_resources(self, dst_region, dst_agent):
message_event_type = 'EXEC'
message_payload = dict()
message_payload['action'] = 'resourceinfo'
message_payload['action_region'] = dst_region
message_payload['action_agent'] = dst_agent
reply = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
reply = json.loads(json.loads(decompress_param(reply['resourceinfo']))['agentresourceinfo'][0]['perf'])
return reply
def get_plugin_list(self):
# this code makes use of a global message to find a specific plugin type, then send a message to that plugin
message_event_type = 'EXEC'
message_payload = dict()
message_payload['action'] = 'listplugins'
result = self.messaging.global_controller_msgevent(message_event_type, message_payload)
pluginslist = json.loads(decompress_param(result['pluginslist']))
plugin_name = 'io.cresco.repo'
pluginlist = pluginslist['plugins']
for plugin in pluginlist:
if plugin['pluginname'] == plugin_name:
break;
message_payload['action'] = 'repolist'
for i in range(10):
result = self.messaging.global_plugin_msgevent(True, message_event_type, message_payload, plugin['region'], plugin['agent'], plugin['name'])
print(result)
def upload_plugin_global(self, jar_file_path):
#get data from jar
configparams = get_jar_info(jar_file_path)
# "configparams"
'''
configparams = dict()
configparams['pluginname'] = 'io.cresco.cepdemo'
configparams['version'] = '1.0.0.SNAPSHOT-2020-09-01T203900Z'
configparams['md5'] = '34de550afdac3bcabbbac99ea5a1519c'
'''
#read input file
in_file = open(jar_file_path, "rb") # opening for [r]eading as [b]inary
jar_data = in_file.read() # if you only wanted to read 512 bytes, do .read(512)
in_file.close()
message_event_type = 'CONFIG'
message_payload = dict()
message_payload['action'] = 'savetorepo'
message_payload['configparams'] = compress_param(json.dumps(configparams))
message_payload['jardata'] = encode_data(jar_data)
reply = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
# returns reply with status and pluginid
return reply
def get_region_resources(self, dst_region):
message_event_type = 'EXEC'
message_payload = dict()
message_payload['action'] = 'resourceinfo'
message_payload['action_region'] = dst_region
reply = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
#reply = json.loads(json.loads(decompress_param(reply['resourceinfo']))['agentresourceinfo'][0]['perf'])
reply = json.loads(decompress_param(reply['resourceinfo']))
return reply
def get_region_list(self):
message_event_type = 'EXEC'
message_payload = dict()
message_payload['action'] = 'listregions'
reply = self.messaging.global_controller_msgevent(True, message_event_type, message_payload)
reply = json.loads(decompress_param(reply['regionslist']))['regions']
return reply
|
[
"cbumgardner@gmail.com"
] |
cbumgardner@gmail.com
|
d485cfa23c7f446ebfa1be31d86428513cf3a031
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog/optimized_38775.py
|
aa5ba685bb6ffb0c7e77e41ce4af889ae20a5bd0
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,853
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((576.874, 540.822, 485.573), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((598.091, 565.823, 546.833), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((623.318, 587.057, 622.151), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((611.306, 453.071, 585.762), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((686.143, 680.092, 779.238), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((583.516, 558.339, 528.007), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((582.521, 557.977, 526.767), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((563.861, 574.065, 540.474), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((550.902, 557.517, 559.175), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((542.229, 538.937, 539.879), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((514.423, 537.738, 535.439), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((493.423, 550.371, 549.158), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((588.124, 560.504, 499.948), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((397.241, 545.752, 593.753), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((511.625, 649.589, 724.084), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((511.625, 649.589, 724.084), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((523.133, 647.98, 698.582), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((525.208, 637.211, 672.704), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((532.957, 627.817, 647.087), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((546.267, 619.066, 623.377), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((562.643, 609.598, 601.974), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((572.556, 598.08, 577.717), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((386.539, 560.102, 751.952), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((758.573, 634.065, 399.435), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((605.732, 621.954, 593.966), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((605.732, 621.954, 593.966), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((624.194, 600.594, 591.035), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((642.628, 579.779, 597.448), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((636.927, 556.397, 613.563), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((608.734, 497.014, 507.327), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((661.84, 608.133, 724.55), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((606.707, 539.554, 539.754), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((606.921, 539.122, 539.763), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((602.149, 532.701, 513.098), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((585.556, 539.87, 491.818), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((559.419, 549.009, 495.526), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((544.538, 565.047, 513.163), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((523.277, 578.335, 526.055), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((497.067, 580.776, 536.171), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((565.717, 630.616, 525.687), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((429.722, 525.801, 547.642), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((611.519, 627.195, 530.498), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((614.858, 606.49, 544.807), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((624.743, 560.967, 576.711), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((637.906, 513.521, 608.447), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((663.455, 481.934, 537.566), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((637.242, 473.186, 705.227), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((579.913, 519.906, 554.457), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((602.611, 526.699, 570.855), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((631.482, 529.241, 573.589), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((653.721, 545.788, 584.54), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((672.791, 567.689, 591.808), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((675.915, 594.142, 605.603), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((625.733, 581.142, 545.283), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((726.761, 616.193, 667.856), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
cafd06c3fd03d5ead92c2a9dbd0bb3a1e82d9bb7
|
29585537e2e96c169ae83cd660070ba3af0a43a9
|
/admin_confirm/file_cache.py
|
7baaefffd6d41d2e7817da142ddfd47e3bb6e475
|
[
"Apache-2.0"
] |
permissive
|
ballke-dev/django-admin-confirm
|
4c400e0d6cb3799e7d9901731db99b4a579ec06e
|
21f5a37c5ecf1fee30f95d8a2ce01207916a22f8
|
refs/heads/main
| 2023-06-23T03:54:50.326670
| 2021-07-22T17:04:13
| 2021-07-22T17:04:13
| 386,659,834
| 0
| 0
|
NOASSERTION
| 2021-07-16T14:11:48
| 2021-07-16T14:11:47
| null |
UTF-8
|
Python
| false
| false
| 3,633
|
py
|
""" FileCache - caches files for ModelAdmins with confirmations.
Code modified from: https://github.com/MaistrenkoAnton/filefield-cache/blob/master/filefield_cache/cache.py
Original copy date: April 22, 2021
---
Modified to be compatible with more versions of Django/Python
---
MIT License
Copyright (c) 2020 Maistrenko Anton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django.core.files.uploadedfile import InMemoryUploadedFile
try:
from cStringIO import StringIO as BytesIO # noqa: WPS433
except ImportError:
from io import BytesIO # noqa: WPS433, WPS440
from django.core.cache import cache
from admin_confirm.constants import CACHE_TIMEOUT
from admin_confirm.utils import log
class FileCache(object):
"Cache file data and retain the file upon confirmation."
timeout = CACHE_TIMEOUT
def __init__(self):
self.cache = cache
self.cached_keys = []
def set(self, key, upload):
"""
Set file data to cache for 1000s
:param key: cache key
:param upload: file data
"""
try: # noqa: WPS229
state = {
"name": upload.name,
"size": upload.size,
"content_type": upload.content_type,
"charset": upload.charset,
"content": upload.file.read(),
}
upload.file.seek(0)
self.cache.set(key, state, self.timeout)
log(f"Setting file cache with {key}")
self.cached_keys.append(key)
except AttributeError: # pragma: no cover
pass # noqa: WPS420
def get(self, key):
"""
Get the file data from cache using specific cache key
:param key: cache key
:return: File data
"""
upload = None
state = self.cache.get(key)
if state:
file = BytesIO()
file.write(state["content"])
upload = InMemoryUploadedFile(
file=file,
field_name="file",
name=state["name"],
content_type=state["content_type"],
size=state["size"],
charset=state["charset"],
)
upload.file.seek(0)
log(f"Getting file cache with {key}")
return upload
def delete(self, key):
"""
Delete file data from cache
:param key: cache key
"""
self.cache.delete(key)
self.cached_keys.remove(key)
def delete_all(self):
"Delete all cached file data from cache."
self.cache.delete_many(self.cached_keys)
self.cached_keys = []
|
[
"noreply@github.com"
] |
ballke-dev.noreply@github.com
|
a4b4a0a3244cac402cda7f3b4ed5278efc2fa651
|
c4b47ba53d40e861571c82f8a968a989974dc433
|
/fireball/blobs/admin.py
|
454a72b4217a2e674b995a6f5a635ca10bde368e
|
[] |
no_license
|
underlost/fireball
|
4be3e441a82f6a0fbb603b33be8493f03019392e
|
3cf312fa88860e9f2e9f34479b5b1962dae09f55
|
refs/heads/master
| 2016-09-01T18:45:18.059628
| 2013-06-03T16:26:12
| 2013-06-03T16:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
from django.contrib import admin
from fireball.blobs.models import Blob
class BlobAdmin(admin.ModelAdmin):
list_filter = ('user',)
search_fields = ['description','url',]
list_display = ('user', 'url',)
admin.site.register(Blob,BlobAdmin)
|
[
"underlost@gmail.com"
] |
underlost@gmail.com
|
f789665473eb4f3fe85e39c56c286c518c116c7a
|
895f5581d12379c507018f36c58b63920190f287
|
/ShoppingCart/urls.py
|
db7a92f20ab5b5ff56278559a6c793a21f336dda
|
[] |
no_license
|
Feras-1998/graduation-project
|
c1a4d65449b573f5c10c4059d78b423f13ad9be8
|
b93e736ecc710d7ec1f31e4db30c3c5288a7bcf5
|
refs/heads/master
| 2023-03-19T12:06:53.199292
| 2021-03-21T17:56:55
| 2021-03-21T17:56:55
| 350,102,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from django.urls import path
from ShoppingCart import ShoppingCartView as view
app_name = "ShoppingCart"
urlpatterns = [
path('viwe', view.view_shopping_cart, name='view_shopping_cart'),
path('add/product/<int:product_id>/quantity/<int:quantity>', view.add_product_to_cart, name='add_product_to_cart'),
path('edit/product/<int:product_id>/quantity/<int:quantity>', view.edit_product_quantity_cart,
name='edit_product_quantity_cart'),
path('add/product/<int:product_id>/quantity/<int:quantity>', view.add_offer_to_cart, name='add_offer_to_cart'),
path('edit/product/<int:CartOffer_id>/quantity/<int:quantity>', view.edit_offer_quantity_cart,
name='edit_offer_quantity_cart'),
path('test/', view.mainUseCase, name='mainUseCase')
]
|
[
"feras@LAPTOP-LNAH19LL"
] |
feras@LAPTOP-LNAH19LL
|
9606fc118d763b54512c8278ba1755a594d973cb
|
61e98b0302a43ab685be4c255b4ecf2979db55b6
|
/sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/environment/__init__.py
|
f760583ece5807e4a028e2fb675ec70d4f9836db
|
[
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"Apache-2.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] |
permissive
|
dzenyu/kafka
|
5631c05a6de6e288baeb8955bdddf2ff60ec2a0e
|
d69a24bce8d108f43376271f89ecc3b81c7b6622
|
refs/heads/master
| 2021-07-16T12:31:09.623509
| 2021-06-28T18:22:16
| 2021-06-28T18:22:16
| 198,724,535
| 0
| 0
|
Apache-2.0
| 2019-07-24T23:51:47
| 2019-07-24T23:51:46
| null |
UTF-8
|
Python
| false
| false
| 50,863
|
py
|
# -*- coding: utf-8 -*-
"""
sphinx.environment
~~~~~~~~~~~~~~~~~~
Global creation environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import os
import sys
import time
import types
import codecs
import fnmatch
from os import path
from glob import glob
from six import iteritems, itervalues, class_types, next
from six.moves import cPickle as pickle
from docutils import nodes
from docutils.io import NullOutput
from docutils.core import Publisher
from docutils.utils import Reporter, relative_path, get_source_line
from docutils.parsers.rst import roles
from docutils.parsers.rst.languages import en as english
from docutils.frontend import OptionParser
from sphinx import addnodes
from sphinx.io import SphinxStandaloneReader, SphinxDummyWriter, SphinxFileInput
from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict
from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \
process_only_nodes
from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir
from sphinx.util.images import guess_mimetype
from sphinx.util.i18n import find_catalog_files, get_image_filename_for_language, \
search_image_for_language
from sphinx.util.console import bold, purple
from sphinx.util.docutils import sphinx_domains
from sphinx.util.matching import compile_matchers
from sphinx.util.parallel import ParallelTasks, parallel_available, make_chunks
from sphinx.util.websupport import is_commentable
from sphinx.errors import SphinxError, ExtensionError
from sphinx.versioning import add_uids, merge_doctrees
from sphinx.transforms import SphinxContentsFilter
from sphinx.environment.managers.indexentries import IndexEntries
from sphinx.environment.managers.toctree import Toctree
default_settings = {
'embed_stylesheet': False,
'cloak_email_addresses': True,
'pep_base_url': 'https://www.python.org/dev/peps/',
'rfc_base_url': 'https://tools.ietf.org/html/',
'input_encoding': 'utf-8-sig',
'doctitle_xform': False,
'sectsubtitle_xform': False,
'halt_level': 5,
'file_insertion_enabled': True,
}
# This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files.
#
# NOTE: increase base version by 2 to have distinct numbers for Py2 and 3
ENV_VERSION = 50 + (sys.version_info[0] - 2)
dummy_reporter = Reporter('', 4, 4)
versioning_conditions = {
'none': False,
'text': is_translatable,
'commentable': is_commentable,
}
class NoUri(Exception):
"""Raised by get_relative_uri if there is no URI available."""
pass
class BuildEnvironment(object):
"""
The environment in which the ReST files are translated.
Stores an inventory of cross-file targets and provides doctree
transformations to resolve links to them.
"""
# --------- ENVIRONMENT PERSISTENCE ----------------------------------------
@staticmethod
def frompickle(srcdir, config, filename):
with open(filename, 'rb') as picklefile:
env = pickle.load(picklefile)
if env.version != ENV_VERSION:
raise IOError('build environment version not current')
if env.srcdir != srcdir:
raise IOError('source directory has changed')
env.config.values = config.values
return env
def topickle(self, filename):
# remove unpicklable attributes
warnfunc = self._warnfunc
self.set_warnfunc(None)
values = self.config.values
del self.config.values
domains = self.domains
del self.domains
managers = self.detach_managers()
# remove potentially pickling-problematic values from config
for key, val in list(vars(self.config).items()):
if key.startswith('_') or \
isinstance(val, types.ModuleType) or \
isinstance(val, types.FunctionType) or \
isinstance(val, class_types):
del self.config[key]
with open(filename, 'wb') as picklefile:
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
# reset attributes
self.attach_managers(managers)
self.domains = domains
self.config.values = values
self.set_warnfunc(warnfunc)
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
def __init__(self, srcdir, doctreedir, config):
self.doctreedir = doctreedir
self.srcdir = srcdir
self.config = config
# the method of doctree versioning; see set_versioning_method
self.versioning_condition = None
self.versioning_compare = None
# the application object; only set while update() runs
self.app = None
# all the registered domains, set by the application
self.domains = {}
# the docutils settings for building
self.settings = default_settings.copy()
self.settings['env'] = self
# the function to write warning messages with
self._warnfunc = None
# this is to invalidate old pickles
self.version = ENV_VERSION
# All "docnames" here are /-separated and relative and exclude
# the source suffix.
self.found_docs = set() # contains all existing docnames
self.all_docs = {} # docname -> mtime at the time of reading
# contains all read docnames
self.dependencies = {} # docname -> set of dependent file
# names, relative to documentation root
self.included = set() # docnames included from other documents
self.reread_always = set() # docnames to re-read unconditionally on
# next build
# File metadata
self.metadata = {} # docname -> dict of metadata items
# TOC inventory
self.titles = {} # docname -> title node
self.longtitles = {} # docname -> title node; only different if
# set differently with title directive
self.tocs = {} # docname -> table of contents nodetree
self.toc_num_entries = {} # docname -> number of real entries
# used to determine when to show the TOC
# in a sidebar (don't show if it's only one item)
self.toc_secnumbers = {} # docname -> dict of sectionid -> number
self.toc_fignumbers = {} # docname -> dict of figtype ->
# dict of figureid -> number
self.toctree_includes = {} # docname -> list of toctree includefiles
self.files_to_rebuild = {} # docname -> set of files
# (containing its TOCs) to rebuild too
self.glob_toctrees = set() # docnames that have :glob: toctrees
self.numbered_toctrees = set() # docnames that have :numbered: toctrees
# domain-specific inventories, here to be pickled
self.domaindata = {} # domainname -> domain-specific dict
# Other inventories
self.indexentries = {} # docname -> list of
# (type, string, target, aliasname)
self.versionchanges = {} # version -> list of (type, docname,
# lineno, module, descname, content)
# these map absolute path -> (docnames, unique filename)
self.images = FilenameUniqDict()
self.dlfiles = FilenameUniqDict()
# temporary data storage while reading a document
self.temp_data = {}
# context for cross-references (e.g. current module or class)
# this is similar to temp_data, but will for example be copied to
# attributes of "any" cross references
self.ref_context = {}
self.managers = {}
self.init_managers()
def init_managers(self):
managers = {}
for manager_class in [IndexEntries, Toctree]:
managers[manager_class.name] = manager_class(self)
self.attach_managers(managers)
def attach_managers(self, managers):
for name, manager in iteritems(managers):
self.managers[name] = manager
manager.attach(self)
def detach_managers(self):
managers = self.managers
self.managers = {}
for _, manager in iteritems(managers):
manager.detach(self)
return managers
def set_warnfunc(self, func):
self._warnfunc = func
self.settings['warning_stream'] = WarningStream(func)
def set_versioning_method(self, method, compare):
"""This sets the doctree versioning method for this environment.
Versioning methods are a builder property; only builders with the same
versioning method can share the same doctree directory. Therefore, we
raise an exception if the user tries to use an environment with an
incompatible versioning method.
"""
if method not in versioning_conditions:
raise ValueError('invalid versioning method: %r' % method)
condition = versioning_conditions[method]
if self.versioning_condition not in (None, condition):
raise SphinxError('This environment is incompatible with the '
'selected builder, please choose another '
'doctree directory.')
self.versioning_condition = condition
self.versioning_compare = compare
def warn(self, docname, msg, lineno=None, **kwargs):
"""Emit a warning.
This differs from using ``app.warn()`` in that the warning may not
be emitted instantly, but collected for emitting all warnings after
the update of the environment.
"""
# strange argument order is due to backwards compatibility
self._warnfunc(msg, (docname, lineno), **kwargs)
def warn_node(self, msg, node, **kwargs):
"""Like :meth:`warn`, but with source information taken from *node*."""
self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs)
def clear_doc(self, docname):
"""Remove all traces of a source file in the inventory."""
if docname in self.all_docs:
self.all_docs.pop(docname, None)
self.reread_always.discard(docname)
self.metadata.pop(docname, None)
self.dependencies.pop(docname, None)
self.titles.pop(docname, None)
self.longtitles.pop(docname, None)
self.images.purge_doc(docname)
self.dlfiles.purge_doc(docname)
for version, changes in self.versionchanges.items():
new = [change for change in changes if change[1] != docname]
changes[:] = new
for manager in itervalues(self.managers):
manager.clear_doc(docname)
for domain in self.domains.values():
domain.clear_doc(docname)
def merge_info_from(self, docnames, other, app):
"""Merge global information gathered about *docnames* while reading them
from the *other* environment.
This possibly comes from a parallel build process.
"""
docnames = set(docnames)
for docname in docnames:
self.all_docs[docname] = other.all_docs[docname]
if docname in other.reread_always:
self.reread_always.add(docname)
self.metadata[docname] = other.metadata[docname]
if docname in other.dependencies:
self.dependencies[docname] = other.dependencies[docname]
self.titles[docname] = other.titles[docname]
self.longtitles[docname] = other.longtitles[docname]
self.images.merge_other(docnames, other.images)
self.dlfiles.merge_other(docnames, other.dlfiles)
for version, changes in other.versionchanges.items():
self.versionchanges.setdefault(version, []).extend(
change for change in changes if change[1] in docnames)
for manager in itervalues(self.managers):
manager.merge_other(docnames, other)
for domainname, domain in self.domains.items():
domain.merge_domaindata(docnames, other.domaindata[domainname])
app.emit('env-merge-info', self, docnames, other)
def path2doc(self, filename):
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
"""
if filename.startswith(self.srcdir):
filename = filename[len(self.srcdir) + 1:]
for suffix in self.config.source_suffix:
if fnmatch.fnmatch(filename, '*' + suffix):
return filename[:-len(suffix)]
else:
# the file does not have docname
return None
def doc2path(self, docname, base=True, suffix=None):
"""Return the filename for the document name.
If *base* is True, return absolute path under self.srcdir.
If *base* is None, return relative path to self.srcdir.
If *base* is a path string, return absolute path under that.
If *suffix* is not None, add it instead of config.source_suffix.
"""
docname = docname.replace(SEP, path.sep)
if suffix is None:
for candidate_suffix in self.config.source_suffix:
if path.isfile(path.join(self.srcdir, docname) +
candidate_suffix):
suffix = candidate_suffix
break
else:
# document does not exist
suffix = self.config.source_suffix[0]
if base is True:
return path.join(self.srcdir, docname) + suffix
elif base is None:
return docname + suffix
else:
return path.join(base, docname) + suffix
def relfn2path(self, filename, docname=None):
"""Return paths to a file referenced from a document, relative to
documentation root and absolute.
In the input "filename", absolute filenames are taken as relative to the
source dir, while relative filenames are relative to the dir of the
containing document.
"""
if filename.startswith('/') or filename.startswith(os.sep):
rel_fn = filename[1:]
else:
docdir = path.dirname(self.doc2path(docname or self.docname,
base=None))
rel_fn = path.join(docdir, filename)
try:
# the path.abspath() might seem redundant, but otherwise artifacts
# such as ".." will remain in the path
return rel_fn, path.abspath(path.join(self.srcdir, rel_fn))
except UnicodeDecodeError:
# the source directory is a bytestring with non-ASCII characters;
# let's try to encode the rel_fn in the file system encoding
enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn))
def find_files(self, config, buildername=None):
"""Find all source files in the source dir and put them in
self.found_docs.
"""
matchers = compile_matchers(
config.exclude_patterns[:] +
config.templates_path +
config.html_extra_path +
['**/_sources', '.#*', '**/.#*', '*.lproj/**']
)
self.found_docs = set()
for docname in get_matching_docs(self.srcdir, config.source_suffix,
exclude_matchers=matchers):
if os.access(self.doc2path(docname), os.R_OK):
self.found_docs.add(docname)
else:
self.warn(docname, "document not readable. Ignored.")
# Current implementation is applying translated messages in the reading
# phase.Therefore, in order to apply the updated message catalog, it is
# necessary to re-process from the reading phase. Here, if dependency
# is set for the doc source and the mo file, it is processed again from
# the reading phase when mo is updated. In the future, we would like to
# move i18n process into the writing phase, and remove these lines.
if buildername != 'gettext':
# add catalog mo file dependency
for docname in self.found_docs:
catalog_files = find_catalog_files(
docname,
self.srcdir,
self.config.locale_dirs,
self.config.language,
self.config.gettext_compact)
for filename in catalog_files:
self.dependencies.setdefault(docname, set()).add(filename)
def get_outdated_files(self, config_changed):
"""Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
added = set()
changed = set()
if config_changed:
# config values affect e.g. substitutions
added = self.found_docs
else:
for docname in self.found_docs:
if docname not in self.all_docs:
added.add(docname)
continue
# if the doctree file is not there, rebuild
if not path.isfile(self.doc2path(docname, self.doctreedir,
'.doctree')):
changed.add(docname)
continue
# check the "reread always" list
if docname in self.reread_always:
changed.add(docname)
continue
# check the mtime of the document
mtime = self.all_docs[docname]
newmtime = path.getmtime(self.doc2path(docname))
if newmtime > mtime:
changed.add(docname)
continue
# finally, check the mtime of dependencies
for dep in self.dependencies.get(docname, ()):
try:
# this will do the right thing when dep is absolute too
deppath = path.join(self.srcdir, dep)
if not path.isfile(deppath):
changed.add(docname)
break
depmtime = path.getmtime(deppath)
if depmtime > mtime:
changed.add(docname)
break
except EnvironmentError:
# give it another chance
changed.add(docname)
break
return added, changed, removed
def update(self, config, srcdir, doctreedir, app):
"""(Re-)read all files new or changed since last update.
Store all environment docnames in the canonical format (ie using SEP as
a separator in place of os.path.sep).
"""
config_changed = False
if self.config is None:
msg = '[new config] '
config_changed = True
else:
# check if a config value was changed that affects how
# doctrees are read
for key, descr in iteritems(config.values):
if descr[1] != 'env':
continue
if self.config[key] != config[key]:
msg = '[config changed] '
config_changed = True
break
else:
msg = ''
# this value is not covered by the above loop because it is handled
# specially by the config class
if self.config.extensions != config.extensions:
msg = '[extensions changed] '
config_changed = True
# the source and doctree directories may have been relocated
self.srcdir = srcdir
self.doctreedir = doctreedir
self.find_files(config, app.buildername)
self.config = config
# this cache also needs to be updated every time
self._nitpick_ignore = set(self.config.nitpick_ignore)
app.info(bold('updating environment: '), nonl=True)
added, changed, removed = self.get_outdated_files(config_changed)
# allow user intervention as well
for docs in app.emit('env-get-outdated', self, added, changed, removed):
changed.update(set(docs) & self.found_docs)
# if files were added or removed, all documents with globbed toctrees
# must be reread
if added or removed:
# ... but not those that already were removed
changed.update(self.glob_toctrees & self.found_docs)
msg += '%s added, %s changed, %s removed' % (len(added), len(changed),
len(removed))
app.info(msg)
self.app = app
# clear all files no longer present
for docname in removed:
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
# read all new and changed files
docnames = sorted(added | changed)
# allow changing and reordering the list of docs to read
app.emit('env-before-read-docs', self, docnames)
# check if we should do parallel or serial read
par_ok = False
if parallel_available and len(docnames) > 5 and app.parallel > 1:
par_ok = True
for extname, md in app._extension_metadata.items():
ext_ok = md.get('parallel_read_safe')
if ext_ok:
continue
if ext_ok is None:
app.warn('the %s extension does not declare if it '
'is safe for parallel reading, assuming it '
'isn\'t - please ask the extension author to '
'check and make it explicit' % extname)
app.warn('doing serial read')
else:
app.warn('the %s extension is not safe for parallel '
'reading, doing serial read' % extname)
par_ok = False
break
if par_ok:
self._read_parallel(docnames, app, nproc=app.parallel)
else:
self._read_serial(docnames, app)
if config.master_doc not in self.all_docs:
raise SphinxError('master file %s not found' %
self.doc2path(config.master_doc))
self.app = None
for retval in app.emit('env-updated', self):
if retval is not None:
docnames.extend(retval)
return sorted(docnames)
def _read_serial(self, docnames, app):
for docname in app.status_iterator(docnames, 'reading sources... ',
purple, len(docnames)):
# remove all inventory entries for that file
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
self.read_doc(docname, app)
def _read_parallel(self, docnames, app, nproc):
# clear all outdated docs at once
for docname in docnames:
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
def read_process(docs):
self.app = app
self.warnings = []
self.set_warnfunc(lambda *args, **kwargs: self.warnings.append((args, kwargs)))
for docname in docs:
self.read_doc(docname, app)
# allow pickling self to send it back
self.set_warnfunc(None)
del self.app
del self.domains
del self.config.values
del self.config
return self
def merge(docs, otherenv):
warnings.extend(otherenv.warnings)
self.merge_info_from(docs, otherenv, app)
tasks = ParallelTasks(nproc)
chunks = make_chunks(docnames, nproc)
warnings = []
for chunk in app.status_iterator(
chunks, 'reading sources... ', purple, len(chunks)):
tasks.add_task(read_process, chunk, merge)
# make sure all threads have finished
app.info(bold('waiting for workers...'))
tasks.join()
for warning, kwargs in warnings:
self._warnfunc(*warning, **kwargs)
def check_dependents(self, already):
to_rewrite = (self.toctree.assign_section_numbers() +
self.toctree.assign_figure_numbers())
for docname in set(to_rewrite):
if docname not in already:
yield docname
# --------- SINGLE FILE READING --------------------------------------------
def warn_and_replace(self, error):
"""Custom decoding error handler that warns and replaces."""
linestart = error.object.rfind(b'\n', 0, error.start)
lineend = error.object.find(b'\n', error.start)
if lineend == -1:
lineend = len(error.object)
lineno = error.object.count(b'\n', 0, error.start) + 1
self.warn(self.docname, 'undecodable source characters, '
'replacing with "?": %r' %
(error.object[linestart + 1:error.start] + b'>>>' +
error.object[error.start:error.end] + b'<<<' +
error.object[error.end:lineend]), lineno)
return (u'?', error.end)
def read_doc(self, docname, app=None):
"""Parse a file and add/update inventory entries for the doctree."""
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
self.temp_data['default_domain'] = \
self.domains.get(self.config.primary_domain)
self.settings['input_encoding'] = self.config.source_encoding
self.settings['trim_footnote_reference_space'] = \
self.config.trim_footnote_reference_space
self.settings['gettext_compact'] = self.config.gettext_compact
docutilsconf = path.join(self.srcdir, 'docutils.conf')
# read docutils.conf from source dir, not from current dir
OptionParser.standard_config_files[1] = docutilsconf
if path.isfile(docutilsconf):
self.note_dependency(docutilsconf)
with sphinx_domains(self):
if self.config.default_role:
role_fn, messages = roles.role(self.config.default_role, english,
0, dummy_reporter)
if role_fn:
roles._roles[''] = role_fn
else:
self.warn(docname, 'default role %s not found' %
self.config.default_role)
codecs.register_error('sphinx', self.warn_and_replace)
# publish manually
reader = SphinxStandaloneReader(self.app, parsers=self.config.source_parsers)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, self.settings, None)
src_path = self.doc2path(docname)
source = SphinxFileInput(app, self, source=None, source_path=src_path,
encoding=self.config.source_encoding)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
doctree = pub.document
# post-processing
self.process_dependencies(docname, doctree)
self.process_images(docname, doctree)
self.process_downloads(docname, doctree)
self.process_metadata(docname, doctree)
self.create_title_from(docname, doctree)
for manager in itervalues(self.managers):
manager.process_doc(docname, doctree)
for domain in itervalues(self.domains):
domain.process_doc(self, docname, doctree)
# allow extension-specific post-processing
if app:
app.emit('doctree-read', doctree)
# store time of reading, for outdated files detection
# (Some filesystems have coarse timestamp resolution;
# therefore time.time() can be older than filesystem's timestamp.
# For example, FAT32 has 2sec timestamp resolution.)
self.all_docs[docname] = max(
time.time(), path.getmtime(self.doc2path(docname)))
if self.versioning_condition:
old_doctree = None
if self.versioning_compare:
# get old doctree
try:
with open(self.doc2path(docname,
self.doctreedir, '.doctree'), 'rb') as f:
old_doctree = pickle.load(f)
except EnvironmentError:
pass
# add uids for versioning
if not self.versioning_compare or old_doctree is None:
list(add_uids(doctree, self.versioning_condition))
else:
list(merge_doctrees(
old_doctree, doctree, self.versioning_condition))
# make it picklable
doctree.reporter = None
doctree.transformer = None
doctree.settings.warning_stream = None
doctree.settings.env = None
doctree.settings.record_dependencies = None
# cleanup
self.temp_data.clear()
self.ref_context.clear()
roles._roles.pop('', None) # if a document has set a local default role
# save the parsed doctree
doctree_filename = self.doc2path(docname, self.doctreedir,
'.doctree')
ensuredir(path.dirname(doctree_filename))
with open(doctree_filename, 'wb') as f:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# utilities to use while reading a document
@property
def docname(self):
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
@property
def currmodule(self):
"""Backwards compatible alias. Will be removed."""
self.warn(self.docname, 'env.currmodule is being referenced by an '
'extension; this API will be removed in the future')
return self.ref_context.get('py:module')
@property
def currclass(self):
"""Backwards compatible alias. Will be removed."""
self.warn(self.docname, 'env.currclass is being referenced by an '
'extension; this API will be removed in the future')
return self.ref_context.get('py:class')
def new_serialno(self, category=''):
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document.
"""
key = category + 'serialno'
cur = self.temp_data.get(key, 0)
self.temp_data[key] = cur + 1
return cur
def note_dependency(self, filename):
"""Add *filename* as a dependency of the current document.
This means that the document will be rebuilt if this file changes.
*filename* should be absolute or relative to the source directory.
"""
self.dependencies.setdefault(self.docname, set()).add(filename)
def note_included(self, filename):
"""Add *filename* as a included from other document.
This means the document is not orphaned.
*filename* should be absolute or relative to the source directory.
"""
self.included.add(self.path2doc(filename))
def note_reread(self):
"""Add the current document to the list of documents that will
automatically be re-read at the next build.
"""
self.reread_always.add(self.docname)
def note_versionchange(self, type, version, node, lineno):
self.versionchanges.setdefault(version, []).append(
(type, self.temp_data['docname'], lineno,
self.ref_context.get('py:module'),
self.temp_data.get('object'), node.astext()))
# post-processing of read doctrees
def process_dependencies(self, docname, doctree):
"""Process docutils-generated dependency info."""
cwd = getcwd()
frompath = path.join(path.normpath(self.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
if not deps:
return
for dep in deps.list:
# the dependency path is relative to the working dir, so get
# one relative to the srcdir
if isinstance(dep, bytes):
dep = dep.decode(fs_encoding)
relpath = relative_path(frompath,
path.normpath(path.join(cwd, dep)))
self.dependencies.setdefault(docname, set()).add(relpath)
def process_downloads(self, docname, doctree):
"""Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
rel_filename, filename = self.relfn2path(targetname, docname)
self.dependencies.setdefault(docname, set()).add(rel_filename)
if not os.access(filename, os.R_OK):
self.warn_node('download file not readable: %s' % filename,
node)
continue
uniquename = self.dlfiles.add_file(docname, filename)
node['filename'] = uniquename
def process_images(self, docname, doctree):
"""Process and rewrite image URIs."""
def collect_candidates(imgpath, candidates):
globbed = {}
for filename in glob(imgpath):
new_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
filename)
try:
mimetype = guess_mimetype(filename)
if mimetype not in candidates:
globbed.setdefault(mimetype, []).append(new_imgpath)
except (OSError, IOError) as err:
self.warn_node('image file %s not readable: %s' %
(filename, err), node)
for key, files in iteritems(globbed):
candidates[key] = sorted(files, key=len)[0] # select by similarity
for node in doctree.traverse(nodes.image):
# Map the mimetype to the corresponding image. The writer may
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
node['candidates'] = candidates = {}
imguri = node['uri']
if imguri.startswith('data:'):
self.warn_node('image data URI found. some builders might not support', node,
type='image', subtype='data_uri')
candidates['?'] = imguri
continue
elif imguri.find('://') != -1:
self.warn_node('nonlocal image URI found: %s' % imguri, node,
type='image', subtype='nonlocal_uri')
candidates['?'] = imguri
continue
rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
if self.config.language:
# substitute figures (ex. foo.png -> foo.en.png)
i18n_full_imgpath = search_image_for_language(full_imgpath, self)
if i18n_full_imgpath != full_imgpath:
full_imgpath = i18n_full_imgpath
rel_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
i18n_full_imgpath)
# set imgpath as default URI
node['uri'] = rel_imgpath
if rel_imgpath.endswith(os.extsep + '*'):
if self.config.language:
# Search language-specific figures at first
i18n_imguri = get_image_filename_for_language(imguri, self)
_, full_i18n_imgpath = self.relfn2path(i18n_imguri, docname)
collect_candidates(full_i18n_imgpath, candidates)
collect_candidates(full_imgpath, candidates)
else:
candidates['*'] = rel_imgpath
# map image paths to unique image names (so that they can be put
# into a single directory)
for imgpath in itervalues(candidates):
self.dependencies.setdefault(docname, set()).add(imgpath)
if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
self.warn_node('image file not readable: %s' % imgpath,
node)
continue
self.images.add_file(docname, imgpath)
def process_metadata(self, docname, doctree):
"""Process the docinfo part of the doctree as metadata.
Keep processing minimal -- just return what docutils says.
"""
self.metadata[docname] = md = {}
try:
docinfo = doctree[0]
except IndexError:
# probably an empty document
return
if docinfo.__class__ is not nodes.docinfo:
# nothing to see here
return
for node in docinfo:
# nodes are multiply inherited...
if isinstance(node, nodes.authors):
md['authors'] = [author.astext() for author in node]
elif isinstance(node, nodes.TextElement): # e.g. author
md[node.__class__.__name__] = node.astext()
else:
name, body = node
md[name.astext()] = body.astext()
for name, value in md.items():
if name in ('tocdepth',):
try:
value = int(value)
except ValueError:
value = 0
md[name] = value
del doctree[0]
def create_title_from(self, docname, document):
"""Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
titlenode = nodes.title()
longtitlenode = titlenode
# explicit title set with title directive; use this only for
# the <title> tag in HTML output
if 'title' in document:
longtitlenode = nodes.title()
longtitlenode += nodes.Text(document['title'])
# look for first section title and use that as the title
for node in document.traverse(nodes.section):
visitor = SphinxContentsFilter(document)
node[0].walkabout(visitor)
titlenode += visitor.get_entry_text()
break
else:
# document has no title
titlenode += nodes.Text('<no title>')
self.titles[docname] = titlenode
self.longtitles[docname] = longtitlenode
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
self.toctree.note_toctree(docname, toctreenode)
def get_toc_for(self, docname, builder):
"""Return a TOC nodetree -- for use on the same page only!"""
return self.toctree.get_toc_for(docname, builder)
def get_toctree_for(self, docname, builder, collapse, **kwds):
"""Return the global TOC nodetree."""
return self.toctree.get_toctree_for(docname, builder, collapse, **kwds)
def get_domain(self, domainname):
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
"""
try:
return self.domains[domainname]
except KeyError:
raise ExtensionError('Domain %r is not registered' % domainname)
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
"""Read the doctree for a file from the pickle and return it."""
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
with open(doctree_filename, 'rb') as f:
doctree = pickle.load(f)
doctree.settings.env = self
doctree.reporter = Reporter(self.doc2path(docname), 2, 5,
stream=WarningStream(self._warnfunc))
return doctree
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
if doctree is None:
doctree = self.get_doctree(docname)
# resolve all pending cross-references
self.resolve_references(doctree, docname, builder)
# now, resolve all toctree nodes
for toctreenode in doctree.traverse(addnodes.toctree):
result = self.resolve_toctree(docname, builder, toctreenode,
prune=prune_toctrees,
includehidden=includehidden)
if result is None:
toctreenode.replace_self([])
else:
toctreenode.replace_self(result)
return doctree
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
to the value of the *maxdepth* option on the *toctree* node.
If *titles_only* is True, only toplevel document titles will be in the
resulting tree.
If *collapse* is True, all branches not containing docname will
be collapsed.
"""
return self.toctree.resolve_toctree(docname, builder, toctree, prune,
maxdepth, titles_only, collapse,
includehidden)
def resolve_references(self, doctree, fromdocname, builder):
for node in doctree.traverse(addnodes.pending_xref):
contnode = node[0].deepcopy()
newnode = None
typ = node['reftype']
target = node['reftarget']
refdoc = node.get('refdoc', fromdocname)
domain = None
try:
if 'refdomain' in node and node['refdomain']:
# let the domain try to resolve the reference
try:
domain = self.domains[node['refdomain']]
except KeyError:
raise NoUri
newnode = domain.resolve_xref(self, refdoc, builder,
typ, target, node, contnode)
# really hardwired reference types
elif typ == 'any':
newnode = self._resolve_any_reference(builder, refdoc, node, contnode)
elif typ == 'doc':
newnode = self._resolve_doc_reference(builder, refdoc, node, contnode)
# no new node found? try the missing-reference event
if newnode is None:
newnode = builder.app.emit_firstresult(
'missing-reference', self, node, contnode)
# still not found? warn if node wishes to be warned about or
# we are in nit-picky mode
if newnode is None:
self._warn_missing_reference(refdoc, typ, target, node, domain)
except NoUri:
newnode = contnode
node.replace_self(newnode or contnode)
# remove only-nodes that do not belong to our builder
process_only_nodes(doctree, builder.tags, warn_node=self.warn_node)
# allow custom references to be resolved
builder.app.emit('doctree-resolved', doctree, fromdocname)
def _warn_missing_reference(self, refdoc, typ, target, node, domain):
warn = node.get('refwarn')
if self.config.nitpicky:
warn = True
if self._nitpick_ignore:
dtype = domain and '%s:%s' % (domain.name, typ) or typ
if (dtype, target) in self._nitpick_ignore:
warn = False
# for "std" types also try without domain name
if (not domain or domain.name == 'std') and \
(typ, target) in self._nitpick_ignore:
warn = False
if not warn:
return
if domain and typ in domain.dangling_warnings:
msg = domain.dangling_warnings[typ]
elif typ == 'doc':
msg = 'unknown document: %(target)s'
elif node.get('refdomain', 'std') not in ('', 'std'):
msg = '%s:%s reference target not found: %%(target)s' % \
(node['refdomain'], typ)
else:
msg = '%r reference target not found: %%(target)s' % typ
self.warn_node(msg % {'target': target}, node, type='ref', subtype=typ)
def _resolve_doc_reference(self, builder, refdoc, node, contnode):
# directly reference to document by source name;
# can be absolute or relative
docname = docname_join(refdoc, node['reftarget'])
if docname in self.all_docs:
if node['refexplicit']:
# reference with explicit title
caption = node.astext()
else:
caption = clean_astext(self.titles[docname])
innernode = nodes.inline(caption, caption)
innernode['classes'].append('doc')
newnode = nodes.reference('', '', internal=True)
newnode['refuri'] = builder.get_relative_uri(refdoc, docname)
newnode.append(innernode)
return newnode
def _resolve_any_reference(self, builder, refdoc, node, contnode):
"""Resolve reference generated by the "any" role."""
target = node['reftarget']
results = []
# first, try resolving as :doc:
doc_ref = self._resolve_doc_reference(builder, refdoc, node, contnode)
if doc_ref:
results.append(('doc', doc_ref))
# next, do the standard domain (makes this a priority)
results.extend(self.domains['std'].resolve_any_xref(
self, refdoc, builder, target, node, contnode))
for domain in self.domains.values():
if domain.name == 'std':
continue # we did this one already
try:
results.extend(domain.resolve_any_xref(self, refdoc, builder,
target, node, contnode))
except NotImplementedError:
# the domain doesn't yet support the new interface
# we have to manually collect possible references (SLOW)
for role in domain.roles:
res = domain.resolve_xref(self, refdoc, builder, role, target,
node, contnode)
if res and isinstance(res[0], nodes.Element):
results.append(('%s:%s' % (domain.name, role), res))
# now, see how many matches we got...
if not results:
return None
if len(results) > 1:
nice_results = ' or '.join(':%s:' % r[0] for r in results)
self.warn_node('more than one target found for \'any\' cross-'
'reference %r: could be %s' % (target, nice_results),
node)
res_role, newnode = results[0]
# Override "any" class with the actual role type to get the styling
# approximately correct.
res_domain = res_role.split(':')[0]
if newnode and newnode[0].get('classes'):
newnode[0]['classes'].append(res_domain)
newnode[0]['classes'].append(res_role.replace(':', '-'))
return newnode
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
return self.indices.create_index(builder, group_entries=group_entries, _fixre=_fixre)
def collect_relations(self):
traversed = set()
def traverse_toctree(parent, docname):
if parent == docname:
self.warn(docname, 'self referenced toctree found. Ignored.')
return
# traverse toctree by pre-order
yield parent, docname
traversed.add(docname)
for child in (self.toctree_includes.get(docname) or []):
for subparent, subdocname in traverse_toctree(docname, child):
if subdocname not in traversed:
yield subparent, subdocname
traversed.add(subdocname)
relations = {}
docnames = traverse_toctree(None, self.config.master_doc)
prevdoc = None
parent, docname = next(docnames)
for nextparent, nextdoc in docnames:
relations[docname] = [parent, prevdoc, nextdoc]
prevdoc = docname
docname = nextdoc
parent = nextparent
relations[docname] = [parent, prevdoc, None]
return relations
def check_consistency(self):
"""Do consistency checks."""
for docname in sorted(self.all_docs):
if docname not in self.files_to_rebuild:
if docname == self.config.master_doc:
# the master file is not included anywhere ;)
continue
if docname in self.included:
# the document is included from other documents
continue
if 'orphan' in self.metadata[docname]:
continue
self.warn(docname, 'document isn\'t included in any toctree')
|
[
"alex.barreto@databricks.com"
] |
alex.barreto@databricks.com
|
fc67a4b14f117567e54950ce6518e138da60cd34
|
11b7a20881ef60d8de74885ca3de9e549eb9ca41
|
/temp.py
|
7eca941e0b829654d9fcf88cfb71b715a7b640ff
|
[] |
no_license
|
Buvi14/BIlling-app
|
030d6c3f65cd69a3890f5c392e9b6763313f219a
|
c9b0f8ea798ff89a6082f9436b43e0c489a0633c
|
refs/heads/master
| 2020-03-22T11:02:55.381723
| 2018-07-08T11:41:24
| 2018-07-08T11:41:24
| 139,943,011
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#Plotting the graph for given data
import jason
from sklearn.externals import joblib
import matplotlib.pyplot as mp #plotting the graph
import pandas as pd #for import the data
mydata=pd.read_csv('example.csv') #import the data
x=input("Enter the product_id/product_value/price:") #x-axis values
y=input("Enter the product_id/product_value/price:") #y-axis values
mydata.plot.bar(x,y)
mp.show() #showing the histogram
|
[
"noreply@github.com"
] |
Buvi14.noreply@github.com
|
596411c05f2c94b4b357beb48a6cac370bb39083
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/GL/ARB/seamless_cube_map.py
|
25e63a7d3f689d0ff11cc0c81f81b889b4c44394
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974
| 2021-01-22T10:51:14
| 2021-01-22T10:51:14
| 307,847,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
'''OpenGL extension ARB.seamless_cube_map
This module customises the behaviour of the
OpenGL.raw.GL.ARB.seamless_cube_map to provide a more
Python-friendly API
Overview (from the spec)
When sampling from cube map textures, a three-dimensional texture
coordinate is used to select one of the cube map faces and generate
a two dimensional texture coordinate ( s t ), at which a texel is
sampled from the determined face of the cube map texture. Each face
of the texture is treated as an independent two-dimensional texture,
and the generated ( s t ) coordinate is subjected to the same
clamping and wrapping rules as for any other two dimensional texture
fetch.
Although it is unlikely that the generated ( s t ) coordinate lies
significantly outside the determined cube map face, it is often the
case that the locations of the individual elements required during a
linear sampling do not lie within the determined face, and their
coordinates will therefore be modified by the selected clamping and
wrapping rules. This often has the effect of producing seams or
other discontinuities in the sampled texture.
This extension allows implementations to take samples from adjacent
cube map faces, providing the ability to create seamless cube maps.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/seamless_cube_map.txt
'''
from OpenGL.raw.GL.ARB.seamless_cube_map import _EXTENSION_NAME
def glInitSeamlessCubeMapARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
[
"rudnik49@gmail.com"
] |
rudnik49@gmail.com
|
de5077e4e7e2995c4bcfdc1792f7dab4aef7c588
|
d1105ac79b89138a9dd6233b881c541041ee8512
|
/chap3_case2_radiationLineConstraint/heatEq_code_generator.py
|
e5a672d6d9eda59b5b3c797d0ba229bf0ff2c59e
|
[] |
no_license
|
bowenfan96/mpc-rom
|
4db2a165118b07926e5988a92155e740e5d4f18b
|
a5ab132ea58ea5a4f2229f53eaee1bd180cc6e7b
|
refs/heads/master
| 2023-07-27T12:25:28.804007
| 2021-09-07T13:27:03
| 2021-09-07T13:27:03
| 378,615,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
# This file exists to parse A and B matrices into pyomo entries
# While it seems silly, this is necessary because pyomo's simulator (which calls casadi) does not support matrices
# Namely, it cannot simulate variables indexed by more than 1 set (so each variable can only be indexed by time)
# It also doesn't support if statements within the model, so this seems to be the only way
import numpy as np
N = 20
# ----- GENERATE THE MODEL MATRICES -----
# Apply the method of lines on the heat equation to generate the A matrix
# Length of the rod = 1 m
# Number of segments = number of discretization points - 1 (as 2 ends take up 2 points)
length = 1
num_segments = N - 1
# Thermal diffusivity alpha
alpha = 0.1
segment_length = length / num_segments
# Constant
c = alpha / (segment_length ** 2)
# Generate A matrix
A_mat = np.zeros(shape=(N, N))
for row in range(A_mat.shape[0]):
for col in range(A_mat.shape[1]):
if row == col:
A_mat[row][col] = -2
elif abs(row - col) == 1:
A_mat[row][col] = 1
else:
A_mat[row][col] = 0
# Multiply constant to all elements in A
A = c * A_mat
# Generate B matrix
# Two sources of heat at each end of the rod
num_heaters = 2
B_mat = np.zeros(shape=(N, num_heaters))
# First heater on the left
B_mat[0][0] = 1
# Second heater on the right
B_mat[N - 1][num_heaters - 1] = 1
# Multiply constant to all elements in B
B = c * B_mat
for i in range(1, 19):
print("self.model.x{} = Var(self.model.time)".format(i))
print("self.model.x{}_dot = DerivativeVar(self.model.x{}, wrt=self.model.time)".format(i, i))
print("self.model.x{}[0].fix(x_init[{}])".format(i, i))
for i in range(1, 19):
print(
'''
def _ode_x{}(m, _t):
return m.x{}_dot[_t] == self.A[{}][{}-1] * m.x{}[_t] + self.A[{}][{}] * m.x{}[_t] + self.A[{}][{}+1] * m.x{}[_t]
self.model.x{}_ode = Constraint(self.model.time, rule=_ode_x{})\n
'''.format(i, i, i, i, i-1, i, i, i, i, i, i+1, i, i)
)
for i in range(1, 19):
print("temp_x.append(value(self.model.x{}[time]))".format(i))
for i in range(20):
print(" + (m.x{}[_t] ** 4 - env_temp ** 4)".format(i))
|
[
"bowen@users.noreply.github.com"
] |
bowen@users.noreply.github.com
|
9cbc8919bbd13651a03f5b1e0bce41ea11c7531a
|
66b72447770c0848f8f9b89c2d4c2473fb9a0581
|
/0938.range_sum_of_bst_E.py
|
ede7558859e324f7364560fd9a3387c53e70503c
|
[] |
no_license
|
zvant/LeetCodeSolutions
|
5d27a25dbc0ec522bde61d436f88e81c67e1367a
|
ca2c7959c3ea8912abc0be5575272479a5ef3cb0
|
refs/heads/master
| 2022-09-21T06:26:17.520429
| 2022-09-08T02:40:16
| 2022-09-08T02:40:16
| 140,584,965
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
# https://leetcode.com/problems/range-sum-of-bst/
# 2021/10
# 288 ms
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# class Solution:
# def BFS(self, root, L, R):
# if root is None:
# return
# if root.val > L:
# self.BFS(root.left, L, R)
# if root.val < R:
# self.BFS(root.right, L, R)
# if root.val >= L and root.val <= R:
# self.summary += root.val
# def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
# self.summary = 0
# self.BFS(root, L, R)
# return self.summary
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
total = 0
S = [root]
while len(S) > 0:
node = S.pop()
if not node is None:
x = node.val
if x >= L and x <= R:
total += x
S.append(node.left)
S.append(node.right)
return total
|
[
"kzz1994@gmail.com"
] |
kzz1994@gmail.com
|
a7e02fbd7355c82ec1aa61752bec20b5cb11b8ab
|
0d381e2cb2d66f7ef92c3efff5e6f93aa803963b
|
/solved/firstBadVersion.py
|
61e638086b85708f08f2c839bd9e734efb372d0d
|
[] |
no_license
|
shannonmlance/leetcode
|
2242ce3b1b636d89728860e9bfa7b266d6a2fd6b
|
f63e7c1df256b5524f7e891d40620946a618fee2
|
refs/heads/master
| 2021-06-27T22:02:09.346343
| 2021-04-29T00:44:03
| 2021-04-29T00:44:03
| 225,467,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
# You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
# Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
# You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
# Example:
# Given n = 5, and version = 4 is the first bad version.
# call isBadVersion(3) -> false
# call isBadVersion(5) -> true
# call isBadVersion(4) -> true
# Then 4 is the first bad version.
class Solution:
def firstBadVersion(self, n):
# make recursive call, passing in 1 and the given number of versions as the starting parameters
c = self.rFindBadVersion(1, n)
# return the number from the recursive call
return c
# recursive binary search method
def rFindBadVersion(self, s, e):
# if the given start number is equal to the given end number, return the end number, as this is the first bad version
if s == e:
return e
# find the middle by subtracting the start from the end and dividing the difference, then add the start to the quotient
m = (e - s)//2 + s
# make the "api" call
# if the response is false
if not self.isBadVersion(m):
# change the start number to equal the middle number, plus one
s = m + 1
# if the response is true
else:
# change the end number to equal the middle number
e = m
# repeat the recursive call, passing in the updated start and end numbers
return self.rFindBadVersion(s, e)
# boolean "api" call that returns whether the given version is the first bad version
def isBadVersion(self, v):
# define the first bad version's number
firstBadVersion = 46
# if the given version is less than the first bad version, return false
if v < firstBadVersion:
return False
# if the given version is not less than the first bad version, return true
else:
return True
n = 45
s = Solution()
a = s.firstBadVersion(n)
print(a)
|
[
"shannon.lance@sap.com"
] |
shannon.lance@sap.com
|
fcd0b3996dcc8bf3891d3ed563e44c660b62677b
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/D/dmsilv/facebook_fans.py
|
3fc1f0e56bfce614a8af5c9b37936e98b95a0c94
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,020
|
py
|
# Blank Python
import scraperwiki
from BeautifulSoup import BeautifulSoup
#define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['Page Name', 'Fans'])
#scrape the fan section
def scrape_fans(soup):
data_table = soup.find("table",{ "class" : "uiGrid"}) #find the pages with most fans section
rows= data_table.findAll("tr") #find all the table rows
for row in rows: #loop through the rows
cells = row.findAll("td") #find all the cells
for cell in cells: #loop through the cells
#setup the data record
record={}
print cell
#table_cells=cell.findAll("p") #find all the p items
if table_cells: #if the item exists store it
record['Page Name'] = table_cells[0].text
record['Fans'] = table_cells[1].text[:-5]
scraperwiki.datastore.save(["Page Name"], record)
def scrape_page(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
#print soup.prettify()
link_table=soup.find("div", {"class" : "alphabet_list clearfix"})
#next_link=soup.findAll("a")
for link in link_table:
next_url=link['href']
#print next_url
html1 = scraperwiki.scrape(next_url)
soup1 = BeautifulSoup(html1)
scrape_fans(soup1)
#setup the base url
base_url = 'http://facebook.com/directory/pages/'
#setup the startup url
#call the scraping function
scrape_page(base_url)
# Blank Python
import scraperwiki
from BeautifulSoup import BeautifulSoup
#define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['Page Name', 'Fans'])
#scrape the fan section
def scrape_fans(soup):
data_table = soup.find("table",{ "class" : "uiGrid"}) #find the pages with most fans section
rows= data_table.findAll("tr") #find all the table rows
for row in rows: #loop through the rows
cells = row.findAll("td") #find all the cells
for cell in cells: #loop through the cells
#setup the data record
record={}
print cell
#table_cells=cell.findAll("p") #find all the p items
if table_cells: #if the item exists store it
record['Page Name'] = table_cells[0].text
record['Fans'] = table_cells[1].text[:-5]
scraperwiki.datastore.save(["Page Name"], record)
def scrape_page(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
#print soup.prettify()
link_table=soup.find("div", {"class" : "alphabet_list clearfix"})
#next_link=soup.findAll("a")
for link in link_table:
next_url=link['href']
#print next_url
html1 = scraperwiki.scrape(next_url)
soup1 = BeautifulSoup(html1)
scrape_fans(soup1)
#setup the base url
base_url = 'http://facebook.com/directory/pages/'
#setup the startup url
#call the scraping function
scrape_page(base_url)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
165067404ebd0dfb7de8fe0a0a6fe0a74b393243
|
9f90cf2f09729a3f71b0c8308c72f733906536f3
|
/seriesTaylor.py
|
bbb037200df857d4253b9bb9d184ad35c00283b9
|
[] |
no_license
|
bayronortiz/MetodosNumericos
|
5c5305defbf85a26ce5b4b232ad8da2393766a12
|
18ff1c44085e049986c40dd4e9cbe122110b177d
|
refs/heads/master
| 2016-09-06T02:40:23.028216
| 2015-12-13T15:35:44
| 2015-12-13T15:35:44
| 42,413,726
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
# -*- coding: utf-8 -*-
# Fecha: 08-Sept-2015
from math import *
# Calcula la serie Taylor de la funcion fx=Cos (x)
# Param xi:Xi -- xii: Xi+1
def fxN(xi,xii,n):
h= xii-xi
valor= 0 # Guarda el valor de la funcion
temp= 0 # Guarda el valor temporal de la funcion
negativo= False
signo=(1,-1,-1, 1)
cs= 0 #Controlador de signo
for i in range(n+1):
if i%2==0:
temp= (signo[cs]*cos(xi) * h**i)/factorial(i)
else:
temp= (signo[cs]*sin(xi) * h**i)/factorial(i)
cs+=1
if cs == 4: #Reiniciamos controlador de signo
cs= 0
valor+= temp #Sumamos a valor serie taylor
return valor
n= input("Ingrese el Orden (n)--> ")
print "Orden | f(xi+1)"
#Falta hallar el error aproximado
for i in range(n+1):
print "%d | %.9f" % (i, fxN(pi/4,pi/3,i))
|
[
"danilo@linux-vpl0.site"
] |
danilo@linux-vpl0.site
|
07765b8bdaa7390420a76f0619c07aa31769d846
|
d4f9d104479b6f9a64175a3fe8554860bf0d62b2
|
/supply_line2.py
|
22571f70a0b7b1e623ef7cc3276ea9935b7daf3d
|
[] |
no_license
|
pohily/checkio
|
9a09c9c52b4f07438cfe4e00914e8d1cfe844c5d
|
8a0a49126af6e09b9e5e6067f28efbf085cd87f6
|
refs/heads/master
| 2020-05-16T03:18:18.068186
| 2019-07-06T13:22:20
| 2019-07-06T13:22:20
| 182,674,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,842
|
py
|
coord = {'A':0, 'B':1, 'C':2, 'D':3, 'E':4, 'F':5, 'G':6, 'H':7, 'I':8, 'J':9, 'K':10, 'L':11}
back = {0:'A', 1:'B', 2:'C', 3:'D', 4:'E', 5:'F', 6:'G', 7:'H', 8:'I', 9:'J', 10:'K', 11:'L'}
checks_odd = [[-1, 0], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1]]
checks_even = [[-1, 0], [-1, 1], [0, 1], [1, 0], [0, -1], [-1, -1]]
def decode_coord(point):
return (int(point[1]) - 1, coord[point[0]])
def encode(point):
return back[point[1]] + str(point[0]+1)
def supply_line(you, depots, enemies):
#dup of our map
tmp_map = [['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', '', '', '', '', '', '', '', '', '']]
# find enemy ZOC
ZOC = set()
for enemy in enemies:
row, col = decode_coord(enemy)
ZOC.add((row, col))
tmp_map[row][col] = 'z'
if col % 2 == 0:
checks = checks_even
else:
checks = checks_odd
for check in checks:
if 0 <= row + check[0] <= 8 and 0 <= col + check[1] <= 11:
ZOC.add((row + check[0], col + check[1]))
tmp_map[row + check[0]][col + check[1]] = 'z'
# just to see
tmp = []
for p in ZOC:
tmp.append(encode(p))
print('ZOC', tmp)
# check if depots in/out of ZOC
tmp = []
for depot in depots:
row, col = decode_coord(depot)
if (row, col) not in ZOC:
tmp.append((row, col))
tmp_map[row][col] = 'D'
depots = tmp[:]
if not depots:
return None
# just to see
tmp = []
for p in depots:
tmp.append(encode(p))
#print('depots', tmp)
# find number of steps from start for every cell in map
result = []
for depot in depots:
if depot in ZOC:
continue
row, col = decode_coord(you)
start = (row, col)
visited = [start]
count = 0 #number of steps from map
tmp_map[row][col] = count
ok = False # flag point = depot
while True:
change = False
for i, r in enumerate(tmp_map):
for j, c in enumerate(r):
if c == count:
if j % 2 == 0:
checks = checks_even
else:
checks = checks_odd
for check in checks:
x = i + check[0]
y = j + check[1]
if 0 <= x <= 8 and 0 <= y <= 11:
point = (x, y)
if point == depot:
visited.append(point)
#tmp_map[x][y] = count + 1
result.append(count + 1)
ok = True
break
if point not in visited and point not in ZOC:
tmp_map[x][y] = count + 1
visited.append(point)
change = True
if not change:
if not result:
return None # depots can't be reached
else:
break # nothing during cycle
if not ok:
count += 1
'''
for i in tmp_map: # print our map step by step
print(i)
print()'''
else:
break
for i in tmp_map: # print our map
print(i)
print()
print(result)
return min(result)
print(supply_line("B7",["C2"],["E3","E4","L1","H2","C3","E8"]))
'''
if __name__ == '__main__':
assert supply_line("B4", {"F4"}, {"D4"}) == 6, 'simple'
assert supply_line("A3", {"A9", "F5", "G8"}, {"B3", "G6"}) == 11, 'multiple'
assert supply_line("C2", {"B9", "F6"}, {"B7", "E8", "E5", "H6"}) is None, 'None'
assert supply_line("E5", {"C2", "B7", "F8"}, set()) == 4, 'no enemies'
assert supply_line("A5", {"A2", "B9"}, {"B3", "B7", "E3", "E7"}) == 13, '2 depots'
print('"Run" is good. How is "Check"?')
'''
|
[
"noreply@github.com"
] |
pohily.noreply@github.com
|
982fb6dfb5536e8d2ea0d6d461feb007703ab20d
|
4cdcd0e06497bdeb793abcd98c870db414700bdd
|
/pyblp/utilities/basics.py
|
bd2060c78e628c77393492763db5863b5c7a861b
|
[
"MIT"
] |
permissive
|
markpham/pyblp
|
bc1d22e6820c0d28905615aec4e8deb79ee03efa
|
59714a0d7c46afa10dcd7575bace21026ebb00c7
|
refs/heads/master
| 2020-07-16T04:14:00.834907
| 2019-08-31T23:05:00
| 2019-08-31T23:05:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,063
|
py
|
"""Basic functionality."""
import contextlib
import functools
import inspect
import multiprocessing.pool
import re
import time
import traceback
from typing import (
Any, Callable, Container, Dict, Hashable, Iterable, Iterator, List, Mapping, Optional, Set, Sequence, Type, Tuple,
Union
)
import numpy as np
from .. import options
# define common types
Array = Any
RecArray = Any
Data = Dict[str, Array]
Options = Dict[str, Any]
Bounds = Tuple[Array, Array]
# define a pool managed by parallel and used by generate_items
pool = None
@contextlib.contextmanager
def parallel(processes: int) -> Iterator[None]:
r"""Context manager used for parallel processing in a ``with`` statement context.
This manager creates a context in which a pool of Python processes will be used by any method that requires
market-by-market computation. These methods will distribute their work among the processes. After the context
created by the ``with`` statement ends, all worker processes in the pool will be terminated. Outside of this
context, such methods will not use multiprocessing.
Importantly, multiprocessing will only improve speed if gains from parallelization outweigh overhead from
serializing and passing data between processes. For example, if computation for a single market is very fast and
there is a lot of data in each market that must be serialized and passed between processes, using multiprocessing
may reduce overall speed.
Arguments
---------
processes : `int`
Number of Python processes that will be created and used by any method that supports parallel processing.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/parallel.ipynb
.. raw:: latex
\end{examplenotebook}
"""
# validate the number of processes
if not isinstance(processes, int):
raise TypeError("processes must be an int.")
if processes < 2:
raise ValueError("processes must be at least 2.")
# start the process pool, wait for work to be done, and then terminate it
output(f"Starting a pool of {processes} processes ...")
start_time = time.time()
global pool
try:
with multiprocessing.pool.Pool(processes) as pool:
output(f"Started the process pool after {format_seconds(time.time() - start_time)}.")
yield
output(f"Terminating the pool of {processes} processes ...")
terminate_time = time.time()
finally:
pool = None
output(f"Terminated the process pool after {format_seconds(time.time() - terminate_time)}.")
def generate_items(keys: Iterable, factory: Callable[[Any], tuple], method: Callable) -> Iterator:
"""Generate (key, method(*factory(key))) tuples for each key. The first element returned by factory is an instance
of the class to which method is attached. If a process pool has been initialized, use multiprocessing; otherwise,
use serial processing.
"""
if pool is None:
return (generate_items_worker((k, factory(k), method)) for k in keys)
return pool.imap_unordered(generate_items_worker, ((k, factory(k), method) for k in keys))
def generate_items_worker(args: Tuple[Any, tuple, Callable]) -> Tuple[Any, Any]:
"""Call the the specified method of a class instance with any additional arguments. Return the associated key along
with the returned object.
"""
key, (instance, *method_args), method = args
return key, method(instance, *method_args)
def structure_matrices(mapping: Mapping) -> RecArray:
"""Structure a mapping of keys to (array or None, type) tuples as a record array in which each sub-array is
guaranteed to be at least two-dimensional.
"""
# determine the number of rows in all matrices
size = next(a.shape[0] for a, _ in mapping.values() if a is not None)
# collect matrices and data types
matrices: List[Array] = []
dtypes: List[Tuple[Union[str, Tuple[Hashable, str]], Any, Tuple[int]]] = []
for key, (array, dtype) in mapping.items():
matrix = np.zeros((size, 0)) if array is None else np.c_[array]
dtypes.append((key, dtype, (matrix.shape[1],)))
matrices.append(matrix)
# build the record array
structured = np.recarray(size, dtypes)
for dtype, matrix in zip(dtypes, matrices):
structured[dtype[0] if isinstance(dtype[0], str) else dtype[0][1]] = matrix
return structured
def update_matrices(matrices: RecArray, update_mapping: Dict) -> RecArray:
"""Update fields in a record array created by structure_matrices by re-structuring the matrices."""
mapping = update_mapping.copy()
for key in matrices.dtype.names:
if key not in mapping:
if len(matrices.dtype.fields[key]) > 2:
mapping[(matrices.dtype.fields[key][2], key)] = (matrices[key], matrices[key].dtype)
else:
mapping[key] = (matrices[key], matrices[key].dtype)
return structure_matrices(mapping)
def extract_matrix(structured_array_like: Mapping, key: Any) -> Optional[Array]:
"""Attempt to extract a field from a structured array-like object or horizontally stack field0, field1, and so on,
into a full matrix. The extracted array will have at least two dimensions.
"""
try:
matrix = np.c_[structured_array_like[key]]
return matrix if matrix.size > 0 else None
except Exception:
index = 0
parts: List[Array] = []
while True:
try:
part = np.c_[structured_array_like[f'{key}{index}']]
except Exception:
break
index += 1
if part.size > 0:
parts.append(part)
return np.hstack(parts) if parts else None
def extract_size(structured_array_like: Mapping) -> int:
"""Attempt to extract the number of rows from a structured array-like object."""
size = 0
getters = [
lambda m: m.shape[0],
lambda m: next(iter(structured_array_like.values())).shape[0],
lambda m: len(next(iter(structured_array_like.values()))),
lambda m: len(m)
]
for get in getters:
try:
size = get(structured_array_like)
break
except Exception:
pass
if size > 0:
return size
raise TypeError(
f"Failed to get the number of rows in the structured array-like object of type {type(structured_array_like)}. "
f"Try using a dictionary, a NumPy structured array, a Pandas DataFrame, or any other standard type."
)
def interact_ids(*columns: Array) -> Array:
"""Create interactions of ID columns."""
interacted = columns[0].flatten().astype(np.object)
if len(columns) > 1:
interacted[:] = list(zip(*columns))
return interacted
def output(message: Any) -> None:
"""Print a message if verbosity is turned on."""
if options.verbose:
if not callable(options.verbose_output):
raise TypeError("options.verbose_output should be callable.")
options.verbose_output(str(message))
def output_progress(iterable: Iterable, length: int, start_time: float) -> Iterator:
"""Yield results from an iterable while outputting progress updates at most every minute."""
elapsed = time.time() - start_time
next_minute = int(elapsed / 60) + 1
for index, iterated in enumerate(iterable):
yield iterated
elapsed = time.time() - start_time
if elapsed > 60 * next_minute:
output(f"Finished {index + 1} out of {length} after {format_seconds(elapsed)}.")
next_minute = int(elapsed / 60) + 1
def format_seconds(seconds: float) -> str:
"""Prepare a number of seconds to be displayed as a string."""
hours, remainder = divmod(int(round(seconds)), 60**2)
minutes, seconds = divmod(remainder, 60)
return f'{hours:02}:{minutes:02}:{seconds:02}'
def format_number(number: Any) -> str:
"""Prepare a number to be displayed as a string."""
if not isinstance(options.digits, int):
raise TypeError("options.digits must be an int.")
template = f"{{:^+{options.digits + 6}.{options.digits - 1}E}}"
formatted = template.format(float(number))
if "NAN" in formatted:
formatted = formatted.replace("+", " ")
return formatted
def format_se(se: Any) -> str:
"""Prepare a standard error to be displayed as a string."""
formatted = format_number(se)
for string in ["NAN", "-INF", "+INF"]:
if string in formatted:
return formatted.replace(string, f"({string})")
return f"({formatted})"
def format_options(mapping: Options) -> str:
"""Prepare a mapping of options to be displayed as a string."""
strings: List[str] = []
for key, value in mapping.items():
if callable(value):
value = f'{value.__module__}.{value.__qualname__}'
elif isinstance(value, float):
value = format_number(value)
strings.append(f'{key}: {value}')
joined = ', '.join(strings)
return f'{{{joined}}}'
def format_table(
header: Sequence[Union[str, Sequence[str]]], *data: Sequence, title: Optional[str] = None,
include_border: bool = True, include_header: bool = True, line_indices: Container[int] = ()) -> str:
"""Format table information as a string, which has fixed widths, vertical lines after any specified indices, and
optionally a title, border, and header.
"""
# construct the header rows
row_index = -1
header_rows: List[List[str]] = []
header = [[c] if isinstance(c, str) else c for c in header]
while True:
header_row = ["" if len(c) < -row_index else c[row_index] for c in header]
if not any(header_row):
break
header_rows.insert(0, header_row)
row_index -= 1
# construct the data rows
data_rows = [[str(c) for c in r] + [""] * (len(header) - len(r)) for r in data]
# compute column widths
widths = []
for column_index in range(len(header)):
widths.append(max(len(r[column_index]) for r in header_rows + data_rows))
# build the template
template = " " .join("{{:^{}}}{}".format(w, " |" if i in line_indices else "") for i, w in enumerate(widths))
# build the table
lines = []
if title is not None:
lines.append(f"{title}:")
if include_border:
lines.append("=" * len(template.format(*[""] * len(widths))))
if include_header:
lines.extend([template.format(*r) for r in header_rows])
lines.append(template.format(*("-" * w for w in widths)))
lines.extend([template.format(*r) for r in data_rows])
if include_border:
lines.append("=" * len(template.format(*[""] * len(widths))))
return "\n".join(lines)
def get_indices(ids: Array) -> Dict[Hashable, Array]:
"""get_indices takes a one-dimensional array input and returns a
dictionary such that the keys are the unique values of the array
and the values are the indices where the key appears in the array.
Examples
--------
>>> ids = np.array([1, 2, 1, 2, 3, 3, 1, 2])
>>> get_indices(ids)
{1: array([0, 2, 6]), 2: array([1, 3, 7]), 3: array([4, 5])}
"""
flat = ids.flatten()
sort_indices = flat.argsort(kind='mergesort')
sorted_ids = flat[sort_indices]
changes = np.ones(flat.shape, np.bool)
changes[1:] = sorted_ids[1:] != sorted_ids[:-1]
reduce_indices = np.nonzero(changes)[0]
return dict(zip(sorted_ids[reduce_indices], np.split(sort_indices, reduce_indices)[1:]))
class SolverStats(object):
"""Structured statistics returned by a generic numerical solver."""
converged: bool
iterations: int
evaluations: int
def __init__(self, converged: bool = True, iterations: int = 0, evaluations: int = 0) -> None:
"""Structure the statistics."""
self.converged = converged
self.iterations = iterations
self.evaluations = evaluations
class StringRepresentation(object):
"""Object that defers to its string representation."""
def __repr__(self) -> str:
"""Defer to the string representation."""
return str(self)
class Groups(object):
"""Computation of grouped statistics."""
sort_indices: Array
reduce_indices: Array
unique: Array
codes: Array
counts: Array
group_count: int
def __init__(self, ids: Array) -> None:
"""Sort and index IDs that define groups."""
# sort the IDs
flat = ids.flatten()
self.sort_indices = flat.argsort()
sorted_ids = flat[self.sort_indices]
# identify groups
changes = np.ones(flat.shape, np.bool)
changes[1:] = sorted_ids[1:] != sorted_ids[:-1]
self.reduce_indices = np.nonzero(changes)[0]
self.unique = sorted_ids[self.reduce_indices]
# encode the groups
sorted_codes = np.cumsum(changes) - 1
self.codes = sorted_codes[self.sort_indices.argsort()]
# compute counts
self.group_count = self.reduce_indices.size
self.counts = np.diff(np.append(self.reduce_indices, self.codes.size))
def sum(self, matrix: Array) -> Array:
"""Compute the sum of each group."""
return np.add.reduceat(matrix[self.sort_indices], self.reduce_indices)
def mean(self, matrix: Array) -> Array:
"""Compute the mean of each group."""
return self.sum(matrix) / self.counts[:, None]
def expand(self, statistics: Array) -> Array:
"""Expand statistics for each group to the size of the original matrix."""
return statistics[self.codes]
class Error(Exception):
"""Errors that are indistinguishable from others with the same message, which is parsed from the docstring."""
stack: Optional[str]
def __init__(self) -> None:
"""Optionally store the full current traceback for debugging purposes."""
if options.verbose_tracebacks:
self.stack = ''.join(traceback.format_stack())
else:
self.stack = None
def __eq__(self, other: Any) -> bool:
"""Defer to hashes."""
return hash(self) == hash(other)
def __hash__(self) -> int:
"""Hash this instance such that in collections it is indistinguishable from others with the same message."""
return hash((type(self).__name__, str(self)))
def __repr__(self) -> str:
"""Defer to the string representation."""
return str(self)
def __str__(self) -> str:
"""Replace docstring markdown with simple text."""
doc = inspect.getdoc(self)
# normalize LaTeX
while True:
match = re.search(r':math:`([^`]+)`', doc)
if match is None:
break
start, end = match.span()
doc = doc[:start] + re.sub(r'\s+', ' ', re.sub(r'[\\{}]', ' ', match.group(1))).lower() + doc[end:]
# normalize references
while True:
match = re.search(r':ref:`[a-zA-Z0-9]+:([^`]+)`', doc)
if match is None:
break
start, end = match.span()
doc = doc[:start] + re.sub(r'<[^>]+>', '', match.group(1)) + doc[end:]
# remove all remaining domains and compress whitespace
doc = re.sub(r'[\s\n]+', ' ', re.sub(r':[a-z\-]+:|`', '', doc))
# optionally add the full traceback
if self.stack is not None:
doc = f"{doc} Traceback:\n\n{self.stack}\n"
return doc
class NumericalError(Error):
"""Floating point issues."""
_messages: Set[str]
def __init__(self) -> None:
super().__init__()
self._messages: Set[str] = set()
def __str__(self) -> str:
"""Supplement the error with the messages."""
combined = ", ".join(sorted(self._messages))
return f"{super().__str__()} Errors encountered: {combined}."
class MultipleReversionError(Error):
"""Reversion of problematic elements."""
_bad: int
_total: int
def __init__(self, bad_indices: Array) -> None:
"""Store element counts."""
super().__init__()
self._bad = bad_indices.sum()
self._total = bad_indices.size
def __str__(self) -> str:
"""Supplement the error with the counts."""
return f"{super().__str__()} Number of reverted elements: {self._bad} out of {self._total}."
class InversionError(Error):
"""Problems with inverting a matrix."""
_condition: float
def __init__(self, matrix: Array) -> None:
"""Compute condition number of the matrix."""
super().__init__()
from .algebra import compute_condition_number
self._condition = compute_condition_number(matrix)
def __str__(self) -> str:
"""Supplement the error with the condition number."""
return f"{super().__str__()} Condition number: {format_number(self._condition)}."
class InversionReplacementError(InversionError):
"""Problems with inverting a matrix led to the use of a replacement such as an approximation."""
_replacement: str
def __init__(self, matrix: Array, replacement: str) -> None:
"""Store the replacement description."""
super().__init__(matrix)
self._replacement = replacement
def __str__(self) -> str:
"""Supplement the error with the description."""
return f"{super().__str__()} The inverse was replaced with {self._replacement}."
class NumericalErrorHandler(object):
"""Decorator that appends errors to a function's returned list when numerical errors are encountered."""
error: Type[NumericalError]
def __init__(self, error: Type[NumericalError]) -> None:
"""Store the error class."""
self.error = error
def __call__(self, decorated: Callable) -> Callable:
"""Decorate the function."""
@functools.wraps(decorated)
def wrapper(*args: Any, **kwargs: Any) -> Any:
"""Configure NumPy to detect numerical errors."""
detector = NumericalErrorDetector(self.error)
with np.errstate(divide='call', over='call', under='ignore', invalid='call'):
np.seterrcall(detector)
returned = decorated(*args, **kwargs)
if detector.detected is not None:
returned[-1].append(detector.detected)
return returned
return wrapper
class NumericalErrorDetector(object):
"""Error detector to be passed to NumPy's error call function."""
error: Type[NumericalError]
detected: Optional[NumericalError]
def __init__(self, error: Type[NumericalError]) -> None:
"""By default no error is detected."""
self.error = error
self.detected = None
def __call__(self, message: str, _: int) -> None:
"""Initialize the error and store the error message."""
if self.detected is None:
self.detected = self.error()
self.detected._messages.add(message)
|
[
"jeff@jeffgortmaker.com"
] |
jeff@jeffgortmaker.com
|
7315210d96c6476125cd7db17cb0c0168b6d400e
|
740f54d593e3b808c093ca8da2142c5785393df9
|
/nets/VGG16.py
|
db300b1ef3d9fa0be7022015957fc0b5bc9f2fdb
|
[
"MIT"
] |
permissive
|
sinianyutian/ssd-keras
|
d172ad19fde63a5908d3864e6513ddaf3963b5cd
|
6453d8471800c54ab2d10bb9a4adc7016eec666d
|
refs/heads/master
| 2023-04-27T09:55:55.961775
| 2021-05-09T17:02:26
| 2021-05-09T17:02:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,906
|
py
|
import keras.backend as K
from keras.layers import (Activation, Conv2D, Dense, Flatten,
GlobalAveragePooling2D, Input, MaxPooling2D, Reshape,
ZeroPadding2D, concatenate, merge)
from keras.models import Model
def VGG16(input_tensor):
#----------------------------主干特征提取网络开始---------------------------#
# SSD结构,net字典
net = {}
# Block 1
net['input'] = input_tensor
# 300,300,3 -> 150,150,64
net['conv1_1'] = Conv2D(64, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv1_1')(net['input'])
net['conv1_2'] = Conv2D(64, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv1_2')(net['conv1_1'])
net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool1')(net['conv1_2'])
# Block 2
# 150,150,64 -> 75,75,128
net['conv2_1'] = Conv2D(128, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv2_1')(net['pool1'])
net['conv2_2'] = Conv2D(128, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv2_2')(net['conv2_1'])
net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool2')(net['conv2_2'])
# Block 3
# 75,75,128 -> 38,38,256
net['conv3_1'] = Conv2D(256, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv3_1')(net['pool2'])
net['conv3_2'] = Conv2D(256, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv3_2')(net['conv3_1'])
net['conv3_3'] = Conv2D(256, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv3_3')(net['conv3_2'])
net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool3')(net['conv3_3'])
# Block 4
# 38,38,256 -> 19,19,512
net['conv4_1'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv4_1')(net['pool3'])
net['conv4_2'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv4_2')(net['conv4_1'])
net['conv4_3'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv4_3')(net['conv4_2'])
net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool4')(net['conv4_3'])
# Block 5
# 19,19,512 -> 19,19,512
net['conv5_1'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv5_1')(net['pool4'])
net['conv5_2'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv5_2')(net['conv5_1'])
net['conv5_3'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv5_3')(net['conv5_2'])
net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), padding='same',
name='pool5')(net['conv5_3'])
# FC6
# 19,19,512 -> 19,19,1024
net['fc6'] = Conv2D(1024, kernel_size=(3,3), dilation_rate=(6, 6),
activation='relu', padding='same',
name='fc6')(net['pool5'])
# x = Dropout(0.5, name='drop6')(x)
# FC7
# 19,19,1024 -> 19,19,1024
net['fc7'] = Conv2D(1024, kernel_size=(1,1), activation='relu',
padding='same', name='fc7')(net['fc6'])
# x = Dropout(0.5, name='drop7')(x)
# Block 6
# 19,19,512 -> 10,10,512
net['conv6_1'] = Conv2D(256, kernel_size=(1,1), activation='relu',
padding='same',
name='conv6_1')(net['fc7'])
net['conv6_2'] = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv6_padding')(net['conv6_1'])
net['conv6_2'] = Conv2D(512, kernel_size=(3,3), strides=(2, 2),
activation='relu',
name='conv6_2')(net['conv6_2'])
# Block 7
# 10,10,512 -> 5,5,256
net['conv7_1'] = Conv2D(128, kernel_size=(1,1), activation='relu',
padding='same',
name='conv7_1')(net['conv6_2'])
net['conv7_2'] = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv7_padding')(net['conv7_1'])
net['conv7_2'] = Conv2D(256, kernel_size=(3,3), strides=(2, 2),
activation='relu', padding='valid',
name='conv7_2')(net['conv7_2'])
# Block 8
# 5,5,256 -> 3,3,256
net['conv8_1'] = Conv2D(128, kernel_size=(1,1), activation='relu',
padding='same',
name='conv8_1')(net['conv7_2'])
net['conv8_2'] = Conv2D(256, kernel_size=(3,3), strides=(1, 1),
activation='relu', padding='valid',
name='conv8_2')(net['conv8_1'])
# Block 9
# 3,3,256 -> 1,1,256
net['conv9_1'] = Conv2D(128, kernel_size=(1,1), activation='relu',
padding='same',
name='conv9_1')(net['conv8_2'])
net['conv9_2'] = Conv2D(256, kernel_size=(3,3), strides=(1, 1),
activation='relu', padding='valid',
name='conv9_2')(net['conv9_1'])
#----------------------------主干特征提取网络结束---------------------------#
return net
|
[
"noreply@github.com"
] |
sinianyutian.noreply@github.com
|
4cbaeb52b5d1c78337c769a9cf8875e0f15e3700
|
55e5926ce5dad5b0509fa9336d2db5c7e1e65183
|
/startcamp/day02/read_csv.py
|
cf0bbbada3ddc7a46dd2d2136dfa8763ab4705f5
|
[] |
no_license
|
bearsgod/TIL
|
cb6d234138cab38a9d83c4fe1c612270ef58d3ed
|
5c9a268be6687db56587df6616edfe78118ea1fe
|
refs/heads/master
| 2020-04-12T00:40:46.825183
| 2019-01-24T08:52:43
| 2019-01-24T08:52:43
| 162,208,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
import csv
with open('lunch.csv','r',encoding='utf8') as f:
# lines = f.readlines()
items = csv.reader(f)
print(items)
for item in items:
print(item)
|
[
"bearsgod@gmail.com"
] |
bearsgod@gmail.com
|
d17cbb74786b60ae862fbc2befee64faddd8662e
|
8fe6993366229375a1f3978be5fda313476648b9
|
/.eggs/PyScaffold-2.5.11-py2.7.egg/pyscaffold/utils.py
|
8d8ce1079434d3c8d6430535e16778de9ba3b78a
|
[] |
no_license
|
ArkhipovK/NER-report3
|
9b6fe6981abc884dec6e48831dff70257ba0efae
|
150f7543050c73a89dc807fafdf75ded8ace25dd
|
refs/heads/master
| 2020-03-31T12:15:46.727011
| 2019-01-22T11:12:52
| 2019-01-22T11:12:52
| 152,209,204
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,538
|
py
|
# -*- coding: utf-8 -*-
"""
Miscellaneous utilities and tools
"""
from __future__ import absolute_import, print_function
import functools
import keyword
import os
import re
import sys
from contextlib import contextmanager
from operator import itemgetter
from distutils.filelist import FileList
from six import PY2
from .templates import licenses
from .contrib import scm_setuptools_too_old
@contextmanager
def chdir(path):
"""Contextmanager to change into a directory
Args:
path (str): path to change current working directory to
"""
curr_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curr_dir)
def is_valid_identifier(string):
"""Check if string is a valid package name
Args:
string (str): package name
Returns:
bool: True if string is valid package name else False
"""
if not re.match("[_A-Za-z][_a-zA-Z0-9]*$", string):
return False
if keyword.iskeyword(string):
return False
return True
def make_valid_identifier(string):
"""Try to make a valid package name identifier from a string
Args:
string (str): invalid package name
Returns:
str: valid package name as string or :obj:`RuntimeError`
Raises:
:obj:`RuntimeError`: raised if identifier can not be converted
"""
string = string.strip()
string = string.replace("-", "_")
string = string.replace(" ", "_")
string = re.sub('[^_a-zA-Z0-9]', '', string)
string = string.lower()
if is_valid_identifier(string):
return string
else:
raise RuntimeError("String cannot be converted to a valid identifier.")
def list2str(lst, indent=0, brackets=True, quotes=True, sep=','):
"""Generate a Python syntax list string with an indention
Args:
lst ([str]): list of strings
indent (int): indention
brackets (bool): surround the list expression by brackets
quotes (bool): surround each item with quotes
sep (str): separator for each item
Returns:
str: string representation of the list
"""
if quotes:
lst_str = str(lst)
if not brackets:
lst_str = lst_str[1:-1]
else:
lst_str = ', '.join(lst)
if brackets:
lst_str = '[' + lst_str + ']'
lb = '{}\n'.format(sep) + indent*' '
return lst_str.replace(', ', lb)
def exceptions2exit(exception_list):
"""Decorator to convert given exceptions to exit messages
This avoids displaying nasty stack traces to end-users
Args:
exception_list [Exception]: list of exceptions to convert
"""
def exceptions2exit_decorator(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except tuple(exception_list) as e:
print("ERROR: {}".format(e))
sys.exit(1)
return func_wrapper
return exceptions2exit_decorator
# from http://en.wikibooks.org/, Creative Commons Attribution-ShareAlike 3.0
def levenshtein(s1, s2):
"""Calculate the Levenshtein distance between two strings
Args:
s1 (str): first string
s2 (str): second string
Returns:
int: distance between s1 and s2
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def best_fit_license(txt):
"""Finds proper license name for the license defined in txt
Args:
txt (str): license name
Returns:
str: license name
"""
ratings = {lic: levenshtein(txt, lic.lower()) for lic in licenses}
return min(ratings.items(), key=itemgetter(1))[0]
def utf8_encode(string):
"""Encode a Python 2 unicode object to str for compatibility with Python 3
Args:
string (str): Python 2 unicode object or Python 3 str object
Returns:
str: Python 2 str object or Python 3 str object
"""
return string.encode(encoding='utf8') if PY2 else string
def utf8_decode(string):
"""Decode a Python 2 str object to unicode for compatibility with Python 3
Args:
string (str): Python 2 str object or Python 3 str object
Returns:
str: Python 2 unicode object or Python 3 str object
"""
return string.decode(encoding='utf8') if PY2 else string
def get_files(pattern):
"""Retrieve all files in the current directory by a pattern.
Use ** as greedy wildcard and * as non-greedy wildcard.
Args:
pattern (str): pattern as used by :obj:`distutils.filelist.Filelist`
Returns:
[str]: list of files
"""
filelist = FileList()
if '**' in pattern:
pattern = pattern.replace('**', '*')
anchor = False
else:
anchor = True
filelist.include_pattern(pattern, anchor)
return filelist.files
def prepare_namespace(namespace_str):
"""Check the validity of namespace_str and split it up into a list
Args:
namespace_str (str): namespace, e.g. "com.blue_yonder"
Returns:
[str]: list of namespaces, e.g. ["com", "com.blue_yonder"]
Raises:
:obj:`RuntimeError` : raised if namespace is not valid
"""
namespaces = namespace_str.split('.') if namespace_str else list()
for namespace in namespaces:
if not is_valid_identifier(namespace):
raise RuntimeError(
"{} is not a valid namespace package.".format(namespace))
return ['.'.join(namespaces[:i+1]) for i in range(len(namespaces))]
def check_setuptools_version():
"""Check that setuptools has all necessary capabilities for setuptools_scm
Raises:
:obj:`RuntimeError` : raised if necessary capabilities are not met
"""
if scm_setuptools_too_old:
raise RuntimeError(
"Your setuptools version is too old (<12). "
"Use `pip install -U setuptools` to upgrade.\n"
"If you have the deprecated `distribute` package installed "
"remove it or update to version 0.7.3.")
|
[
"tehbitardcity@gmail.com"
] |
tehbitardcity@gmail.com
|
ba1ff9e538afbada798f8e1f3ad399a00974fdcc
|
bba5101180a75bc1948b7646582ecca39f0e28c1
|
/xitou.py
|
ed56c5e653774dd35611d2eb993a162695ec1ece
|
[] |
no_license
|
neko2048/wrf_analysis_Xitou
|
18ca4fefc2f2d836950270acd33f84fc67b58dd8
|
3f78f415e499ff00b82d86862912976b8cff7667
|
refs/heads/main
| 2023-09-05T02:03:55.913184
| 2021-11-08T17:46:01
| 2021-11-08T17:46:01
| 425,917,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,900
|
py
|
from matplotlib.pyplot import *
import numpy as np
from netCDF4 import Dataset
import pandas as pd
#from wrf import getvar, interplevel
def findArgmin(data, value):
IdxMinflat = np.argmin(abs(data - value))
idxMin = np.unravel_index(IdxMinflat, data.shape)
return idxMin
class WRFData:
def __init__(self, fileDir):
self.filedir = fileDir
self.wrfdata = Dataset(fileDir)
self.xlon = self.getVar('XLONG')#self.wrfdata["XLONG"]
self.xlat = self.getVar('XLAT')#self.wrfdata["XLAT"]
self.pdTimes = self.constructLocalTime()
def delSpinUpTime(self, var):
"""remove the spin up (first 24 hours)"""
var = var[13:]
return var
def getVar(self, varName):
var = np.array(self.wrfdata[varName])
var = self.delSpinUpTime(var)
return var
def UTCtoLocal(self, UTCTimes):
LocalTimes = UTCTimes + pd.Timedelta(8, 'hour')
return LocalTimes
def constructLocalTime(self):
oriTimes = self.getVar('Times')
newTimes = []
for i in range(len(oriTimes)):
newTimes.append(pd.to_datetime("".join(char.decode("utf-8") for char in oriTimes[i]), format="%Y-%m-%d_%H:%M:%S"))
newTimes = pd.DatetimeIndex(newTimes)
LocalTimes = self.UTCtoLocal(newTimes)
return LocalTimes
class XitouData:
def __init__(self, fileDir):
self.wrfdata = WRFData(fileDir)
self.times = self.wrfdata.pdTimes
self.xitouLon = 120.7838246
self.xitouLat = 23.6759616
self.idxLon, self.idxLat = self.getXitouIdx()
self.T2 = self.getVarValue("T2") # K
self.Q2 = self.getVarValue("Q2") # kg / kg
self.Psrf = self.getVarValue("PSFC") # hPa
self.ev = self.Qv2Ev() # hPa
self.evs = self.getEvs() # hPa
self.RH = self.getRH()
def getXitouIdx(self):
idxGridLon = findArgmin(self.wrfdata.xlon, self.xitouLon)[2]
idxGridLat = np.argmin(abs(self.wrfdata.xlat[:, :, idxGridLon] - self.xitouLat))
return idxGridLon, idxGridLat
def getVarValue(self, varName):
VarField = self.wrfdata.getVar(varName)
VarValue = np.array(VarField)[:, self.idxLat, self.idxLon]
return VarValue
def Qv2Ev(self):
ev = self.Psrf/100 * self.Q2 / (self.Q2 + 0.622) # hPa
return ev
def getEvs(self):
"""
use Goff-Gratch, 1946.
input: T (K)
output: es (hPa)
"""
T = self.T2
Tst = 373.15 # boiling point (K)
ln_es = -7.90298 * (Tst / T - 1) + \
5.02808 * np.log10(Tst / T) - \
1.3816e-7 * (10 ** (11.344 * (1 - T / Tst)) -1) + \
8.1328e-3 * (10 ** (-3.49149 * (Tst / T - 1)) - 1) + \
np.log10(1013.25) # saturated vapor pressure (hPa)
es = 10 ** (ln_es)
return es
def getRH(self):
RH = self.ev / self.evs
return RH
class ModeCollector:
def __init__(self):
self.times = []
self.T2s = []
self.Q2s = []
self.RHs = []
def collectTimes(self, value):
self.times.append(value)
def collectT2(self, value):
self.T2s.append(value)
def collectQ2(self, value):
self.Q2s.append(value)
def collectRH(self, value):
self.RHs.append(value)
def collectData(self, PlaceData):
self.collectTimes(PlaceData.times)
self.collectT2(PlaceData.T2)
self.collectQ2(PlaceData.Q2)
self.collectRH(PlaceData.RH)
def squeezeList(self):
self.times = pd.DatetimeIndex(np.hstack(self.times))
self.T2s = np.hstack(self.T2s)
self.Q2s = np.hstack(self.Q2s)
self.RHs = np.hstack(self.RHs)
class DrawSys:
def __init__(self, ModeName, Mode):
self.ModeName = ModeName
self.Mode = Mode
def drawT2(self):
figure(figsize=(20, 8))
grid(True)
for i, mode in enumerate(self.ModeName):
plot(self.Mode[i].times, self.Mode[i].T2s-273, label=self.ModeName[i])
xticks(self.Mode[0].times[::12], ["{MONTH}-{DAY}-{HOUR}".format(MONTH=x.month, DAY=x.day, HOUR=x.hour) for x in self.Mode[0].times[::12]])
legend()
ylim(10, 25)
ylabel(r"[$\degree C$]")
title("T2 from {T1} to {T2}".format(T1=self.Mode[0].times[0], T2=self.Mode[0].times[-1]))
savefig("{MODE}_T2.jpg".format(MODE=self.ModeName[0][0]), dpi=300)
def drawQ2(self):
figure(figsize=(20, 8))
grid(True)
for i, mode in enumerate(self.ModeName):
plot(self.Mode[i].times, self.Mode[i].Q2s, label=self.ModeName[i])
xticks(self.Mode[0].times[::12], ["{MONTH}-{DAY}-{HOUR}".format(MONTH=x.month, DAY=x.day, HOUR=x.hour) for x in self.Mode[0].times[::12]])
legend()
ylim(0.005, 0.020)
ylabel(r"[$kg / kg$]")
title("Q2 from {T1} to {T2}".format(T1=self.Mode[0].times[0], T2=self.Mode[0].times[-1]))
savefig("{MODE}_Q2.jpg".format(MODE=self.ModeName[0][0]), dpi=300)
def drawRH2(self):
figure(figsize=(20, 8))
grid(True)
for i, mode in enumerate(self.ModeName):
plot(self.Mode[i].times, self.Mode[i].RHs*100, label=self.ModeName[i])
xticks(self.Mode[0].times[::12], ["{MONTH}-{DAY}-{HOUR}".format(MONTH=x.month, DAY=x.day, HOUR=x.hour) for x in self.Mode[0].times[::12]])
legend()
ylim(50, 110)
ylabel("%")
title("T2 from {T1} to {T2}".format(T1=self.Mode[0].times[0], T2=self.Mode[0].times[-1]))
savefig("{MODE}_RH.jpg".format(MODE=self.ModeName[0][0]), dpi=300)
if __name__ == "__main__":
#dateList = [x for x in range(15, 25)]
dateList = pd.date_range("20210415T12", periods=9, freq="D")
NC = ModeCollector()
NM = ModeCollector()
NU = ModeCollector()
WC = ModeCollector()
WM = ModeCollector()
WU = ModeCollector()
NModeName = ['NC', 'NM', 'NU']
WModeName = ['WC', 'WM', 'WU']
NMode = [NC, NM, NU]
WMode = [WC, WM, WU]
for i, mode in enumerate(NModeName):
print(mode)
for j, date in enumerate(dateList):
wrf_dir = "/home/twsand/fskao/wrfOUT43v1/{MODE}202104{DATE}/wrfout_d04_2021-04-{DATE}_12:00:00".format(MODE=mode, DATE=date.day)
NMode[i].collectData(XitouData(wrf_dir))
NMode[i].squeezeList()
Ndraw = DrawSys(NModeName, NMode)
Ndraw.drawT2()
Ndraw.drawQ2()
Ndraw.drawRH2()
for i, mode in enumerate(WModeName):
print(mode)
for j, date in enumerate(dateList):
wrf_dir = "/home/twsand/fskao/wrfOUT43v1/{MODE}202104{DATE}/wrfout_d04_2021-04-{DATE}_12:00:00".format(MODE=mode, DATE=date.day)
WMode[i].collectData(XitouData(wrf_dir))
WMode[i].squeezeList()
Wdraw = DrawSys(WModeName, WMode)
Wdraw.drawT2()
Wdraw.drawQ2()
Wdraw.drawRH2()
|
[
"uc@UCdeiMac.local"
] |
uc@UCdeiMac.local
|
85b96a62ddc3f2a8e2f40d803db0074c61582e2c
|
56ec8b23db253964156ed968dfdd1603c249ab87
|
/04-Hybrid/mezcla.py
|
3ff0990316340ff2476336fab01188bfd17ab932
|
[] |
no_license
|
JFCeron/IBIO4490
|
bf186a6402975569ee71325e03f01eeefe92226d
|
91a84d5aa4323509f42d31d6f3e590ea8bde7fc4
|
refs/heads/master
| 2020-04-20T09:07:00.834293
| 2019-05-06T04:57:55
| 2019-05-06T04:57:55
| 168,757,640
| 0
| 0
| null | 2019-02-01T20:42:10
| 2019-02-01T20:42:10
| null |
UTF-8
|
Python
| false
| false
| 2,810
|
py
|
# manipulacion de imagenes
from PIL import Image
import cv2
import matplotlib.pyplot as plt
# matematicas
import numpy as np
# imagenes para mezclar
dante = Image.open("dante.jpg")
pato = Image.open("whiteduck.jpg")
# alinear imagenes
pato = pato.rotate(-2).crop((100,80,pato.size[0]-75,pato.size[1]-30))
# ajustar tamanos
tamano = (int(np.mean([dante.size[0],pato.size[0]])),int(np.mean([dante.size[1],pato.size[1]])))
dante = dante.resize(tamano)
pato = pato.resize(tamano)
# almacenar foto editada del pato
plt.imshow(pato).write_png('pato_modificado.jpg')
# convertir a matriz
dante = np.array(dante)
pato = np.array(pato)
# filtar imagenes
blurrpato = 100
blurrdante = 10
lowpass = cv2.GaussianBlur(dante, ksize=(51,51), sigmaX=blurrdante, sigmaY=blurrdante).astype('int')
highpass = pato - cv2.GaussianBlur(pato, ksize=(51,51), sigmaX=blurrpato, sigmaY=blurrpato).astype('int')
highpass[highpass < 0] = 0
# imagen hibrida
hibrida = highpass+lowpass
hibrida[hibrida > 255] = 255
hibrida = hibrida.astype('uint8')
plt.imshow(hibrida).write_png('danteduck.jpg')
# piramide
altura = 5
espacio = 10
piramide = np.zeros((2*hibrida.shape[0] + espacio*altura,hibrida.shape[1],3)).astype('uint8')+255
piramide[0:hibrida.shape[0],:,:] = hibrida
zoom_actual = hibrida
y_actual = hibrida.shape[0]+espacio
for i in range(1,altura):
zoom_actual = cv2.pyrDown(zoom_actual)
piramide[y_actual:(y_actual+zoom_actual.shape[0]), 0:zoom_actual.shape[1],:] = zoom_actual
y_actual = y_actual+zoom_actual.shape[0]+espacio
plt.imshow(piramide).write_png('piramide.jpg')
# blended: construccion de piramides gaussianas y laplacianas
G_dante = []
L_dante = []
G_pato = []
L_pato = []
dante_actual = dante
pato_actual = pato
for i in range(5):
G_dante.append(dante_actual)
G_pato.append(pato_actual)
dante_actual = cv2.pyrDown(dante_actual)
pato_actual = cv2.pyrDown(pato_actual)
L_i_dante = G_dante[i].astype('int') - cv2.pyrUp(dante_actual)[0:G_dante[i].shape[0],0:G_dante[i].shape[1],:].astype('int')
L_i_pato = G_pato[i].astype('int') - cv2.pyrUp(pato_actual)[0:G_pato[i].shape[0],0:G_pato[i].shape[1],:].astype('int')
L_i_dante[L_i_dante < 0] = 0
L_i_pato[L_i_pato < 0] = 0
L_dante.append(L_i_dante.astype('uint8'))
L_pato.append(L_i_pato.astype('uint8'))
# concatenacion de laplacianas
concat = []
for i in range(5):
concat_i = L_dante[i]
concat_i[:,0:int(concat_i.shape[1]/2),:] = L_pato[i][:,0:int(concat_i.shape[1]/2),:]
concat.append(concat_i)
# reconstruccion de imagen blended
blended = concat[4]
for i in range(4):
blended = cv2.pyrUp(blended)
if concat[3-i].shape[1]%2 == 1:
blended = cv2.add(blended[:,0:(blended.shape[1]-1),:],concat[3-i])
else:
blended = cv2.add(blended,concat[3-i])
cv2.imwrite('blended.png',blended)
|
[
"juanfceron@gmail.com"
] |
juanfceron@gmail.com
|
f588bf0d916ba7a047741568bb2946f4fd4c309d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/53/usersdata/89/22195/submittedfiles/matriz2.py
|
cd1384dfef6761b0fbf48eaf1aa1f3eaef0a4bc4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def somaDiagonalPrincipal(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,i]
return soma
def somaDiagonalSecundaria(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,a.shape[0]-i-1]
return soma
def somaLinhas(a):
s=[]
for i in range(0,a.shape[0],1):
soma=0
for j in range(0,a.shape[1],1):
soma=soma+a[i,j]
s.append(soma)
return s
def somaColunas(a):
r=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,j]
r.append(soma)
return r
def quadradoMagico(a):
sdP=somaDiagonalPrincipal(a)
sdS=somaDiagonalSecundaria(a)
somaL=somaLinhas(a)
somaC=somaColunas(a)
contador=0
for i in range(0,len(somaL),1):
if sdP==sdS==somaL[i]==somaC[i]:
contador=contador+1
if contador==len(somaL):
return True
else:
return False
#programa principal
n=input('digite o numero de linhas da matriz:')
#n=input('digite o numero de colunas da matriz:')
matriz=np.zeros((n,n))
for i in range(0,matriz.shape[0],1):
for j in range(0,matriz.shape[1],1):
matriz[i,j]=input('digite um elemento da matriz:')
if quadradoMagico(matriz):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7b47c1b415e3ad729bdce1cdb26e32be6031bda6
|
ef66e297a49d04098d98a711ca3fda7b8a9a657c
|
/snippets/ziroom/detail.py
|
32e2f1c346b125a3b9d7882a7320a3f98a252f9a
|
[] |
no_license
|
breezy1812/MyCodes
|
34940357954dad35ddcf39aa6c9bc9e5cd1748eb
|
9e3d117d17025b3b587c5a80638cb8b3de754195
|
refs/heads/master
| 2020-07-19T13:36:05.270908
| 2018-12-15T08:54:30
| 2018-12-15T08:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
import os
from datetime import datetime
from time import sleep
from random import choice
import requests
from agents import AGENTS
url = 'http://www.ziroom.com/detail/info'
params = {
'id': '61155405',
'house_id': '60185997',
}
headers = {
'User-Agent': choice(AGENTS),
}
while True:
resp = requests.get(url, params=params, headers=headers)
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if resp.status_code != 200:
print(now, 'Failed')
sleep(5)
continue
try:
data = resp.json()['data']
status = data['status']
price = data['price']
print(now, status, price)
if status != 'tzpzz':
break
except Exception:
print(data)
sleep(10)
cmd = os.system('zsh -c "while true;do;afplay /System/Library/Sounds/Ping.aiff -v 30;done"')
|
[
"youchen.du@gmail.com"
] |
youchen.du@gmail.com
|
3487c8f46dd7e6ab9677d94e898b3c7b4ba04217
|
5107ef1b20b2fbc9305bf5b885fc830679f4cfcc
|
/pms/wfpms/wfpms/middlewares.py
|
398402955a73f3c940170ad0297eb5d326101caa
|
[] |
no_license
|
seselinfeng/pms_scrapy
|
a6540ae1550a520baf2da43cc97e02a37041d3ce
|
cf9b83bff2037d8bb4e3c98dcf94a43534e61bc1
|
refs/heads/master
| 2023-01-31T14:14:35.665679
| 2020-12-18T02:46:12
| 2020-12-18T02:46:12
| 321,839,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,751
|
py
|
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import hashlib
from urllib import parse
from scrapy import signals, Request
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
from scrapy.http import HtmlResponse
class WfpmsCouponSpiderMiddleware:
def process_request(self, request, spider):
if spider.name == 'wfpms_coupon':
# 获取爬取网站地址
spider.driver.get(request.url)
iframe = spider.driver.find_elemnt_by_xpath('')
spider.driver.switch_to.frame(iframe)
body = spider.driver.page_source
return HtmlResponse(url=spider.driver.current_url, body=body, encoding='utf-8', request=request)
else:
return None
#
# class SignSpiderMiddleware:
# def process_start_requests(self, start_requests, spider):
# # Called with the start requests of the spider, and works
# # similarly to the process_spider_output() method, except
# # that it doesn’t have a response associated.
#
# # Must return only requests (not items).
# for r in start_requests:
# yield r
#
# def process_request(self, request, spider):
# if spider.name == 'wfpms_test':
# if "http://test.wfpms.com:9000/api/GetDiscoups" in request.url:
# # 解析url
# params = parse.parse_qs(parse.urlparse(request.url.lower()).query)
# print(params)
# # 排序
# str_list = Sign.para_filter(params)
# # 拼接请求
# params_str = Sign.create_link_string(str_list) + '&token=d5b9fedec0b3ad976842e83313cb2c75d616cafa'
# # 生成签名
# sign = Sign.encryption(
# "login_chainid=440135&login_shift=a&_=1607680643688&token=d5b9fedec0b3ad976842e83313cb2c75d616cafa")
# url = "http://test.wfpms.com:9000/api/GetMebType?" + 'login_chainid=440135&login_shift=A&_=1607680643688&token=145_4239_d197b0ac6cbafe4b680aa3227ddab0411111' + f'&sign={sign}'
# request.replace(url=url)
# print(f"request.url{request.url}")
# return None
# else:
# return None
class WfpmsSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class WfpmsDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"lifq@m3joy.com"
] |
lifq@m3joy.com
|
6a29865b89c71e800aac4732a7ddf91d86ed3c24
|
f937f65707c7b0d6860475cf51eb8c859338559d
|
/stock-market-lstm/model.py
|
ad555a2d048137690c607f5c412291fd28d755e9
|
[] |
no_license
|
shashankaddagarla/lstm
|
2b7458521891aa908c588c39e34c1b354e066d3d
|
f154bf47b142389d1651c076cd9a15b710228a5c
|
refs/heads/master
| 2020-03-27T12:10:21.674717
| 2018-09-21T20:52:19
| 2018-09-21T20:52:19
| 146,530,763
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
# 10-16-18
#
# currently a two-layer lstm model with dropouts (default .25 dropout) after each layer and a dense layer at the end to produce output
import os
import time
import h5py
import numpy as np
import tensorflow as tf
from keras.layers import Activation, Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.models import Sequential
from keras.models import load_model
from keras import optimizers
activation_function = 'tanh'
loss = 'mae'
optimizer = optimizers.Adam(clipnorm = 1)
dropout = .25
def build_model(layers, activ_func=activation_function, dropout=dropout, optimizer=optimizer):
model = Sequential()
model.add(LSTM(input_shape = (layers[0], layers[1]), return_sequences = True, units= layers[2])) # first layer so required input_shape
model.add(Dropout(dropout))
model.add(LSTM(layers[3], return_sequences = False, activation = activ_func))
model.add(Dropout(dropout))
model.add(Dense(units=layers[4]))
model.add(Activation(activ_func))
start = time.time()
model.compile(loss = loss, optimizer = optimizer)
print('>>> Model compiled! Took {} seconds.'.format(time.time() - start))
return model
def save_model(model, name='my_model'):
model.save(filename+'.h5')
del model
def load_model(name):
if(os.path.isfile(name+'.h5')):
return load_model(name+'.h5')
else:
print('>>> The specified model cannot be found.')
return None
|
[
"shshnktj@stanford.edu"
] |
shshnktj@stanford.edu
|
ee4b4bbe9f8ec2bc0058fd892a1893d49a20fc81
|
2f4e21111234e70ba5dc54979411474d74158c31
|
/newMyDataStructureAlgorithm/algorithm/permutation.py
|
e5d5de5e6d74da63b532c6ccba9fe381b2714a94
|
[] |
no_license
|
happydhKim/MyDataStructureAlgorithm
|
d6db625784d085888ecb2b6cef47022518c1f821
|
b47ceb9244c170b4adb6619db581d173b829625e
|
refs/heads/main
| 2022-09-18T15:16:51.704564
| 2022-09-05T14:51:55
| 2022-09-05T14:51:55
| 132,134,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
# https://www.acmicpc.net/problem/15649
from sys import stdout
from itertools import permutations
print = stdout.write
n, m = map(int, input().split())
for k in permutations([i for i in range(1, n+1)], m):
print(' '.join(map(str, k))+'\n')
|
[
"happytime870616@gmail.com"
] |
happytime870616@gmail.com
|
2af0f12e7354e6f077015d9bbceb59604426c06b
|
976f1c40043972e4236c2699b6cf4212c773156a
|
/tags/models.py
|
c4151b5a589017b323d851e7b8e0fc8b3b217269
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
lidel/mmda
|
b13901cf33bd733ed07ff3b624143e02617f7214
|
d605a8c635167e1bd634eb04f4e78b989a7cfb74
|
refs/heads/master
| 2021-01-01T15:24:00.148173
| 2014-11-22T23:07:40
| 2014-11-22T23:07:40
| 817,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
from couchdbkit.ext.django.schema import Document, DictProperty
from mmda.engine.cache import CachedDocument
class CachedTag(Document, CachedDocument):
"""
Contains tag related meta-data fetched from various sources.
"""
cache_state = DictProperty(default={})
|
[
"m@lidel.org"
] |
m@lidel.org
|
85fe6ee3b3d0c84131b9ebdddf4227cfbc835632
|
42c9fc81ffbdb22bfde3976578337a1fe5f17c2a
|
/visualization/rviz_tools_py/setup.py
|
c2244ce79c9c1d742fa1b5b888996b4d42299e3a
|
[
"MIT"
] |
permissive
|
LiuFG/UpdatingHDmapByMonoCamera
|
988524461287bfa8b663cba756a787e4440268ef
|
dc8f06795a12669da1a8096e38851f78b2e26a62
|
refs/heads/master
| 2022-06-01T08:33:53.034821
| 2020-04-26T05:32:38
| 2020-04-26T05:32:38
| 258,131,871
| 0
| 0
|
MIT
| 2020-04-26T05:32:43
| 2020-04-23T07:49:14
|
Python
|
UTF-8
|
Python
| false
| false
| 237
|
py
|
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=[
'rviz_tools_py',
],
package_dir={'': 'src'},
)
setup(**d)
|
[
"2022087641@qq.com"
] |
2022087641@qq.com
|
f839cb133cafa4a6f32e624b50f22da03c38c619
|
433fd38347ef7cc8e2d2af3fc5f07b88b4278b12
|
/computeroots.py
|
bf2eeb14fc0ed3f807d5bac9ac661b6c38bf6b8d
|
[] |
no_license
|
PyDataWorkshop/IntroPython
|
72ffc4421488f8a84c143bfdfd974919c65b6e9f
|
078ace77ec3dc00a4a8e1a50060748cd05147acb
|
refs/heads/master
| 2021-06-22T04:57:05.987274
| 2017-08-29T11:50:57
| 2017-08-29T11:50:57
| 16,390,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
from math import sqrt
print("Quadratic function : (a * x^2) + b*x + c")
a = float(input("a: "))
b = float(input("b: "))
c = float(input("c: "))
r = b**2 - 4*a*c
if r > 0:
num_roots = 2
x1 = (((-b) + sqrt(r))/(2*a))
x2 = (((-b) - sqrt(r))/(2*a))
print("There are 2 roots: %f and %f" % (x1, x2))
elif r == 0:
num_roots = 1
x = (-b) / 2*a
print("There is one root: ", x)
else:
num_roots = 0
print("No roots, discriminant < 0.")
exit()
- See more at: http://www.w3resource.com/python-exercises/math/python-math-exercise-30.php#sthash.gypCq4M0.dpuf
|
[
"noreply@github.com"
] |
PyDataWorkshop.noreply@github.com
|
7fc8ab4fa4ffbe0793f8121162c803a687c45882
|
9a1e181a6e7a7a93f387b9269620f204160eb152
|
/backend/accounts/migrations/0007_auto_20210325_2215.py
|
e138eda697922ff3605e57761ee5d12691e41697
|
[] |
no_license
|
ha9011/django_drf_react_gabom
|
b4eb8b475879669047b24a46c9364d337effc208
|
2b29bef085361f3ce6d600c6eb954488d8bbbbed
|
refs/heads/main
| 2023-04-03T02:37:59.311406
| 2021-04-05T12:56:39
| 2021-04-05T12:56:39
| 354,827,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
# Generated by Django 3.0.11 on 2021-03-25 13:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20210306_0104'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, default='test@naver.com', max_length=254),
),
]
|
[
"ha90111@naver.com"
] |
ha90111@naver.com
|
7848ffce0b6988c96aae1efeb96a21b0d8bb93f4
|
c762ab8c1c25ffa97229a62ff43a33543093f963
|
/venv/bin/easy_install
|
06e9fd8505708c760e9975bfea10710ea86251e7
|
[
"Apache-2.0"
] |
permissive
|
nahyunkwon/multi-ttach
|
e68948d66541e85b764216efc54a82f6fc9ac044
|
971d0d93cc39f295deb23ea71146647f6db50ebc
|
refs/heads/master
| 2023-08-09T18:25:31.658950
| 2023-07-24T17:46:04
| 2023-07-24T17:46:04
| 297,783,964
| 0
| 1
|
Apache-2.0
| 2021-04-07T07:46:24
| 2020-09-22T22:08:53
|
G-code
|
UTF-8
|
Python
| false
| false
| 458
|
#!/Users/kwon/PycharmProjects/digital_fabrication_studio/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"skgus2624@gmail.com"
] |
skgus2624@gmail.com
|
|
43b853c93483bb95c43fdbf54259605d1a1f11a8
|
a9a87b95237b11b8808850110e696aa0ca3553a7
|
/usedadvan.py
|
4b64d7e6d8877f1a1a39707ea01924825f82ff09
|
[] |
no_license
|
tpanthera/testpyblock
|
3713873ab49e6fcce4a896a396e26c5794f4c704
|
d14bef2bb25040d6bff788d5f5f0e7e67ada3b04
|
refs/heads/master
| 2020-03-27T17:10:19.983182
| 2018-08-31T06:07:45
| 2018-08-31T06:07:45
| 146,832,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
# Python advance concepts used
# while solving questions in hacker earth this was common to use :
# below code will take input from user , strip will remove white spaces , split() will return list after spliting
# the given separator by spaces , here its , map will convert the every elements in list to integer and return map object.
# list constructor will convert the map object to list object.
test = list(map(int,input().strip().split()))
# Publisher/subscriber design pattern used during connected with IOT devices with mqtt protocol.
|
[
"noreply@github.com"
] |
tpanthera.noreply@github.com
|
1f31c768f9d581da1b7ad412a23ad6d4e24ce3e6
|
ede5e159641ba71a1a25a50a1328c11175459cce
|
/day-3/generate.py
|
d2f956f398b09d0fff8c03fbfb6e5c34d5a4d14d
|
[
"BSD-3-Clause"
] |
permissive
|
sbu-python-summer/python-tutorial
|
1cf23c700b7b2588680ad78a06a3582dfcce873b
|
c93ac1d75188d762df7d17df7045af39dbc1bee8
|
refs/heads/master
| 2021-01-20T18:08:21.393092
| 2017-06-16T18:50:06
| 2017-06-16T18:50:06
| 90,908,339
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
# generate data for a histogram example
import numpy as np
N = 100
a = 10*np.random.randn(N)
for i in range(N):
print("{} {}".format(i, a[i]))
|
[
"michael.zingale@stonybrook.edu"
] |
michael.zingale@stonybrook.edu
|
a7a7d34db8b105e67c352abaceb211f4a7e084c3
|
a97f789530412fc1cb83170a11811f294b139ee8
|
/疯狂Python讲义/codes/12/12.6/writebytes_test.py
|
36e61f54a785fdf550bc07aebcbd19870b13ad75
|
[] |
no_license
|
baidongbin/python
|
3cebf2cc342a15b38bf20c23f941e6887dac187a
|
1c1398bff1f1820afdd8ddfa0c95ccebb4ee836f
|
refs/heads/master
| 2021-07-21T19:23:32.860444
| 2020-03-07T11:55:30
| 2020-03-07T11:55:30
| 195,909,272
| 0
| 1
| null | 2020-07-21T00:51:24
| 2019-07-09T01:24:31
|
Python
|
UTF-8
|
Python
| false
| false
| 413
|
py
|
import os
f = open('y.txt', 'wb+')
# os.linesep 代表当前操作系统上的换行符
f.write(('我爱Python' + os.linesep).encode('utf-8'))
f.writelines((('土门壁甚坚,' + os.linesep).encode('utf-8'),
('杏园度亦难。' + os.linesep).encode('utf-8'),
('势异邺城下,' + os.linesep).encode('utf-8'),
('纵死时犹宽。' + os.linesep).encode('utf-8')))
|
[
"baidongbin@thunisoft.com"
] |
baidongbin@thunisoft.com
|
b577f0d8e1d931be4eb3ff721911ce9e9b179843
|
d1e95aa28914f4ef4a6906b6a70ae9be79b0544d
|
/Spectrum Based Fault Localization/forbes.py
|
5f303701baf931c10df9bbc595602ec8eb510778
|
[] |
no_license
|
Achal-Gupta/SpectrumBasedFaultLocalisation
|
00d886ea71d6d6131b13be4bdc85089d0c5bc813
|
7611e37085f7027be4738fc6dd5c243e3898bd07
|
refs/heads/master
| 2022-08-01T21:36:17.475108
| 2020-05-29T11:57:38
| 2020-05-29T11:57:38
| 267,845,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,530
|
py
|
import sys
import time
from datetime import datetime
start_time=datetime.now()
import pandas as pd
import numpy as np
import math
import os
import csv
cwd =os.getcwd()
version=cwd.split("/")[-1]
program_name=cwd.split("/")[-2].split("_")[0]
print(cwd)
str_cwd=cwd.replace("/"+program_name+"/"+version,"")
print(str_cwd)
f_l=0
start_time=datetime.now()
with open('faultyLine.txt') as f:
f_l = f.readline()
print("**************")
print(f_l)
print("**************")
f_l=int(f_l)
############Original##############
st1 = datetime.now()
df_train=pd.read_csv('statementResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt1= (datetime.now() -st1)
o1=ct3+1
o2=ct3+ct2
############Original with uniqueness##############
st2 = datetime.now()
df_train=pd.read_csv('uniqueResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt2= (datetime.now() -st2)
o3=ct3+1
o4=ct3+ct2
############Original with slicing##############
st3=datetime.now()
#code for retriving the sliced data
sdf=pd.read_csv('slice1.csv')
ys=np.array([sdf['In_Slice']]).T
ys=ys.tolist()
df_train=pd.read_csv('statementResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
if ys[i][0]==0:
sus_score=-999
else:
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt3= (datetime.now() -st3)
o5=ct3+1
o6=ct3+ct2
############Original with slicing and uniqueness##############
st4=datetime.now()
#code for retriving the sliced data
sdf=pd.read_csv('slice1.csv')
ys=np.array([sdf['In_Slice']]).T
ys=ys.tolist()
df_train=pd.read_csv('uniqueResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
if ys[i][0]==0:
sus_score=-999
else:
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt4= (datetime.now() -st4)
o7=ct3+1
o8=ct3+ct2
end_time=datetime.now()
csvfile=open(str_cwd+"/forbes.csv", "a+")
spamwriter1 = csv.writer(csvfile, delimiter=',')
stmt_complex=[]
stmt_complex.append(program_name);
stmt_complex.append(str(version));
#stmt_complex.append(str(sys.argv[1]));
stmt_complex.append(f_l);
stmt_complex.append(o1);
stmt_complex.append(o2);
stmt_complex.append(nwt1);
stmt_complex.append(o3);
stmt_complex.append(o4);
stmt_complex.append(nwt2);
stmt_complex.append(o5);
stmt_complex.append(o6);
stmt_complex.append(nwt3);
stmt_complex.append(o7);
stmt_complex.append(o8);
stmt_complex.append(nwt4);
spamwriter1.writerow(stmt_complex);
|
[
"noreply@github.com"
] |
Achal-Gupta.noreply@github.com
|
8bd350e303191df5a7e316990bd5b28b2a31e3a6
|
7756a341e0034b30a799ad8ef456c6b02565b188
|
/main.py
|
9099e035874bddade9630ce606277f64d883bfbc
|
[] |
no_license
|
AnishN/integration
|
c26cca7aa5af7626110f144b1e8d6eb3a71c1dec
|
b1cab2c5e5df40a68bcc31049b4bac29e5a79ca5
|
refs/heads/master
| 2020-06-16T13:54:13.758520
| 2019-07-09T14:13:56
| 2019-07-09T14:13:56
| 195,599,598
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,536
|
py
|
import timeit
setup_mktout = """
def mktout(mean_mu_alpha, errors, par_gamma):
mu10 = errors[:, 0] * math.exp(mean_mu_alpha[0])
mu11 = math.exp(par_gamma) * mu10 # mu with gamma
mu20 = errors[:, 1] * math.exp(mean_mu_alpha[1])
mu21 = math.exp(par_gamma) * mu20
alpha1 = errors[:, 2] * math.exp(mean_mu_alpha[2])
alpha2 = errors[:, 3] * math.exp(mean_mu_alpha[3])
j_is_larger = (mu10 > mu20)
threshold2 = (1 + mu10 * alpha1) / (168 + alpha1)
j_is_smaller = ~j_is_larger
threshold3 = (1 + mu20 * alpha2) / (168 + alpha2)
case1 = j_is_larger * (mu10 < 1 / 168)
case2 = j_is_larger * (mu21 >= threshold2)
case3 = j_is_larger ^ (case1 | case2)
case4 = j_is_smaller * (mu20 < 1 / 168)
case5 = j_is_smaller * (mu11 >= threshold3)
case6 = j_is_smaller ^ (case4 | case5)
t0 = ne.evaluate("case1*168+case2 * (168 + alpha1 + alpha2) / (1 + mu11 * alpha1 + mu21 * alpha2) +case3 / threshold2 +case4 * 168 +case5 * (168 + alpha1 + alpha2) / (1 + mu11 * alpha1 + mu21 * alpha2) + case6 / threshold3")
t1 = ne.evaluate("case2 * (t0 * alpha1 * mu11 - alpha1) +case3 * (t0 * alpha1 * mu10 - alpha1) +case5 * (t0 * alpha1 * mu11 - alpha1)")
t2 = 168 - t0 - t1
p12 = case2 + case5
p1 = case3 + p12
p2 = case6 + p12
return t1.sum()/10000, t2.sum()/10000, p1.sum()/10000, p2.sum()/10000
"""
setup_code = """
import integrate
import integrate_alt
import integrate_full
import numpy as np
import numexpr as ne
import math
par_mu_rho = 0.8
par_alpha_rho = 0.7
cov_epsilon = [[1, par_mu_rho], [par_mu_rho, 1]]
cov_nu = [[1, par_alpha_rho], [par_alpha_rho, 1]]
nrows = 10000
np.random.seed(123)
epsilon_sim = np.random.multivariate_normal([0, 0], cov_epsilon, nrows)
nu_sim = np.random.multivariate_normal([0, 0], cov_nu, nrows)
errors = np.concatenate((epsilon_sim, nu_sim), axis=1)
errors = np.exp(errors)
"""
setup_mean_mu_alpha = """
out = np.zeros(5, dtype=np.float64)
mean_mu_alpha = np.array([-6,-6,-1,-1], dtype=np.float64)
"""
n = 10000
out = timeit.timeit(
stmt="integrate.outer_loop_if([-6,-6,-1,-1], errors, -0.7, n)",
setup=setup_code + "n = {0}".format(n),
number=1,
)
print("outer_loop_if:", out)
out = timeit.timeit(
stmt="integrate.outer_loop([-6,-6,-1,-1], errors, -0.7, n)",
setup=setup_code + "n = {0}".format(n),
number=1,
)
print("outer_loop:", out)
out = timeit.timeit(
stmt="integrate.mktout_if([-6,-6,-1,-1], errors, -0.7)",
setup=setup_code,
number=n,
)
print("mktout_if:", out)
out = timeit.timeit(
stmt="integrate.mktout([-6,-6,-1,-1], errors, -0.7)",
setup=setup_code,
number=n,
)
print("mktout:", out)
out = timeit.timeit(
stmt="integrate_alt.outer_loop_alt([-6,-6,-1,-1], errors, -0.7, n)",
setup=setup_code + "n = {0}".format(n),
number=1,
)
print("outer_loop_alt(mktout2):", out)
out = timeit.timeit(
stmt="integrate_alt.mktout_alt([-6,-6,-1,-1], errors, -0.7)",
setup=setup_code,
number=n,
)
print("mktout2:", out)
out = timeit.timeit(
stmt="integrate_full.mktout_full(mean_mu_alpha, errors, -0.7)",
setup=setup_code + setup_mean_mu_alpha,
number=n,
)
print("mktout_full:", out)
out = timeit.timeit(
stmt="integrate_full.outer_loop_full(out, mean_mu_alpha, errors, -0.7, n)",
setup=setup_code + setup_mean_mu_alpha + "n = {0}".format(n),
number=1,
)
print("outer_loop_full:", out)
out = timeit.timeit(
stmt="mktout([-6,-6,-1,-1], errors, -0.7)",
setup=setup_code + setup_mktout,
number=n,
)
print("python:", out)
|
[
"anish.narayanan32@gmail.com"
] |
anish.narayanan32@gmail.com
|
b0e675a66588e8634b1b1524e860f5399ed48426
|
051910d10f4597cd1148207b1f5f2030c01d7ddf
|
/py/src/consts.py
|
e63df96ef48d5d3fb483637f645611bd156ae172
|
[
"MIT"
] |
permissive
|
LaplaceKorea/rosettaboy
|
831cd285e1a305690f2ee76861ccff91d77fa4a7
|
fb238cb8b73eb7903ce8b9b298896c549e75fccb
|
refs/heads/master
| 2023-08-15T15:41:36.459790
| 2021-10-12T12:13:55
| 2021-10-12T12:13:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
class Mem:
VBLANK_HANDLER = 0x40
LCD_HANDLER = 0x48
TIMER_HANDLER = 0x50
SERIAL_HANDLER = 0x58
JOYPAD_HANDLER = 0x60
VRAM_BASE = 0x8000
TILE_DATA_TABLE_0 = 0x8800
TILE_DATA_TABLE_1 = 0x8000
BACKGROUND_MAP_0 = 0x9800
BACKGROUND_MAP_1 = 0x9C00
WINDOW_MAP_0 = 0x9800
WINDOW_MAP_1 = 0x9C00
OAM_BASE = 0xFE00
JOYP = 0xFF00
SB = 0xFF01 # Serial Data
SC = 0xFF02 # Serial Control
DIV = 0xFF04
TIMA = 0xFF05
TMA = 0xFF06
TAC = 0xFF07
IF = 0xFF0F
NR10 = 0xFF10
NR11 = 0xFF11
NR12 = 0xFF12
NR13 = 0xFF13
NR14 = 0xFF14
NR20 = 0xFF15
NR21 = 0xFF16
NR22 = 0xFF17
NR23 = 0xFF18
NR24 = 0xFF19
NR30 = 0xFF1A
NR31 = 0xFF1B
NR32 = 0xFF1C
NR33 = 0xFF1D
NR34 = 0xFF1E
NR40 = 0xFF1F
NR41 = 0xFF20
NR42 = 0xFF21
NR43 = 0xFF22
NR44 = 0xFF23
NR50 = 0xFF24
NR51 = 0xFF25
NR52 = 0xFF26
LCDC = 0xFF40
STAT = 0xFF41
SCY = 0xFF42 # SCROLL_Y
SCX = 0xFF43 # SCROLL_X
LY = 0xFF44 # LY aka currently drawn line 0-153 >144 = vblank
LCY = 0xFF45
DMA = 0xFF46
BGP = 0xFF47
OBP0 = 0xFF48
OBP1 = 0xFF49
WY = 0xFF4A
WX = 0xFF4B
BOOT = 0xFF50
IE = 0xFFFF
class Interrupt:
VBLANK = 1 << 0
STAT = 1 << 1
TIMER = 1 << 2
SERIAL = 1 << 3
JOYPAD = 1 << 4
|
[
"shish@shishnet.org"
] |
shish@shishnet.org
|
f13e42f723dfc011017c86d5d9d5266a52e16dcf
|
ba694353a3cb1cfd02a6773b40f693386d0dba39
|
/sdk/python/pulumi_google_native/dataform/v1beta1/repository_workspace_iam_member.py
|
a82ed59fcb5af3a81d30a219472de2490307c4b6
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-google-native
|
cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b
|
124d255e5b7f5440d1ef63c9a71e4cc1d661cd10
|
refs/heads/master
| 2023-08-25T00:18:00.300230
| 2023-07-20T04:25:48
| 2023-07-20T04:25:48
| 323,680,373
| 69
| 16
|
Apache-2.0
| 2023-09-13T00:28:04
| 2020-12-22T16:39:01
|
Python
|
UTF-8
|
Python
| false
| false
| 13,541
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import iam as _iam
__all__ = ['RepositoryWorkspaceIamMemberArgs', 'RepositoryWorkspaceIamMember']
@pulumi.input_type
class RepositoryWorkspaceIamMemberArgs:
def __init__(__self__, *,
member: pulumi.Input[str],
name: pulumi.Input[str],
role: pulumi.Input[str],
condition: Optional[pulumi.Input['_iam.v1.ConditionArgs']] = None):
"""
The set of arguments for constructing a RepositoryWorkspaceIamMember resource.
:param pulumi.Input[str] member: Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied.
:param pulumi.Input['_iam.v1.ConditionArgs'] condition: An IAM Condition for a given binding.
"""
pulumi.set(__self__, "member", member)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "role", role)
if condition is not None:
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter
def member(self) -> pulumi.Input[str]:
"""
Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
"""
return pulumi.get(self, "member")
@member.setter
def member(self, value: pulumi.Input[str]):
pulumi.set(self, "member", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The role that should be applied.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['_iam.v1.ConditionArgs']]:
"""
An IAM Condition for a given binding.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['_iam.v1.ConditionArgs']]):
pulumi.set(self, "condition", value)
class RepositoryWorkspaceIamMember(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
member: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']] condition: An IAM Condition for a given binding.
:param pulumi.Input[str] member: Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RepositoryWorkspaceIamMemberArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
:param str resource_name: The name of the resource.
:param RepositoryWorkspaceIamMemberArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RepositoryWorkspaceIamMemberArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
member: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RepositoryWorkspaceIamMemberArgs.__new__(RepositoryWorkspaceIamMemberArgs)
__props__.__dict__["condition"] = condition
if member is None and not opts.urn:
raise TypeError("Missing required property 'member'")
__props__.__dict__["member"] = member
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
__props__.__dict__["etag"] = None
__props__.__dict__["project"] = None
super(RepositoryWorkspaceIamMember, __self__).__init__(
'google-native:dataform/v1beta1:RepositoryWorkspaceIamMember',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RepositoryWorkspaceIamMember':
"""
Get an existing RepositoryWorkspaceIamMember resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RepositoryWorkspaceIamMemberArgs.__new__(RepositoryWorkspaceIamMemberArgs)
__props__.__dict__["condition"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["member"] = None
__props__.__dict__["name"] = None
__props__.__dict__["project"] = None
__props__.__dict__["role"] = None
return RepositoryWorkspaceIamMember(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def condition(self) -> pulumi.Output[Optional['_iam.v1.outputs.Condition']]:
"""
An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The etag of the resource's IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def member(self) -> pulumi.Output[str]:
"""
Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.
"""
return pulumi.get(self, "member")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project in which the resource belongs. If it is not provided, a default will be supplied.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
return pulumi.get(self, "role")
|
[
"noreply@github.com"
] |
pulumi.noreply@github.com
|
2b68130fbff02ea4e15ff6aca8bd248f9a3d3f79
|
a0d0dafc26ebfb84ff72c61cb85c587d93257a6c
|
/KIDs/analyze_single_tone.py
|
09573a4659654e3073eecb6f0bce454336a2c07b
|
[] |
no_license
|
rmcgeehan0/submm_python_routines
|
0486d5f874b169629378374058f0fe666ebbb440
|
d37e676dc1946807edaec7143780307e2894c6c6
|
refs/heads/master
| 2020-03-27T09:32:51.519444
| 2018-08-25T20:23:47
| 2018-08-25T20:23:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,470
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from KIDs import resonance_fitting
from KIDs import calibrate
from scipy import interpolate
import pickle
from scipy.stats import binned_statistic
def calibrate_single_tone(fine_f,fine_z,gain_f,gain_z,stream_f,stream_z,plot_period = 1,interp = "quadratic"):
fig = plt.figure(3,figsize = (16,10))
plt.subplot(241,aspect = 'equal')
plt.title("Raw data")
plt.plot(np.real(stream_z[::plot_period]),np.real(stream_z[::plot_period]),'.')
plt.plot(np.real(fine_z),np.imag(fine_z),'o')
plt.plot(np.real(gain_z),np.imag(gain_z),'o')
plt.subplot(242)
plt.title("Raw data")
plt.plot(np.real(stream_z[::plot_period]),np.imag(stream_z[::plot_period]),'.')
plt.plot(np.real(fine_z),np.imag(fine_z),'o')
# normalize amplistude variation in the gain scan
amp_norm_dict = resonance_fitting.amplitude_normalization_sep(gain_f,
gain_z,
fine_f,
fine_z,
stream_f,
stream_z)
plt.subplot(243)
plt.title("Gain amplitude variation fit")
plt.plot(gain_f,10*np.log10(np.abs(gain_z)**2),'o')
plt.plot(gain_f,10*np.log10(np.abs(amp_norm_dict['normalized_gain'])**2),'o')
plt.plot(fine_f,10*np.log10(np.abs(amp_norm_dict['normalized_fine'])**2),'o')
plt.plot(gain_f,10*np.log10(np.abs(amp_norm_dict['poly_data'])**2))
plt.subplot(244)
plt.title("Data nomalized for gain amplitude variation")
plt.plot(np.real(amp_norm_dict['normalized_fine']),np.imag(amp_norm_dict['normalized_fine']),'o')
#plt.plot(gain_dict['freqs'][:,k]*10**6,np.log10(np.abs(amp_norm_dict['poly_data'])**2))
plt.plot(np.real(amp_norm_dict['normalized_stream'][::plot_period]),np.imag(amp_norm_dict['normalized_stream'][::plot_period]),'.')
#fit the gain
gain_phase = np.arctan2(np.real(amp_norm_dict['normalized_gain']),np.imag(amp_norm_dict['normalized_gain']))
tau,fit_data_phase,gain_phase_rot = calibrate.fit_cable_delay(gain_f,gain_phase)
plt.subplot(245)
plt.title("Gain phase fit")
plt.plot(gain_f,gain_phase_rot,'o')
plt.plot(gain_f,fit_data_phase)
plt.xlabel("Frequency (MHz)")
plt.ylabel("Phase")
#remove cable delay
gain_corr = calibrate.remove_cable_delay(gain_f,amp_norm_dict['normalized_gain'],tau)
fine_corr = calibrate.remove_cable_delay(fine_f,amp_norm_dict['normalized_fine'],tau)
stream_corr = calibrate.remove_cable_delay(stream_f,amp_norm_dict['normalized_stream'],tau)
plt.subplot(246)
plt.title("Cable delay removed")
plt.plot(np.real(gain_corr),np.imag(gain_corr),'o')
plt.plot(np.real(fine_corr),np.imag(fine_corr),'o')
plt.plot(np.real(stream_corr)[10:-10][::plot_period],np.imag(stream_corr)[10:-10][::plot_period],'.')
# fit a cicle to the data
xc, yc, R, residu = calibrate.leastsq_circle(np.real(fine_corr),np.imag(fine_corr))
#move the data to the origin
gain_corr = gain_corr - xc -1j*yc
fine_corr = fine_corr - xc -1j*yc
stream_corr = stream_corr - xc -1j*yc
# rotate so streaming data is at 0 pi
phase_stream = np.arctan2(np.imag(stream_corr),np.real(stream_corr))
med_phase = np.median(phase_stream)
gain_corr = gain_corr*np.exp(-1j*med_phase)
fine_corr = fine_corr*np.exp(-1j*med_phase)
stream_corr = stream_corr*np.exp(-1j*med_phase)
plt.subplot(247)
plt.title("Moved to 0,0 and rotated")
plt.plot(np.real(stream_corr)[2:-1][::plot_period],np.imag(stream_corr)[2:-1][::plot_period],'.')
plt.plot(np.real(gain_corr),np.imag(gain_corr),'o')
plt.plot(np.real(fine_corr),np.imag(fine_corr),'o')
calibrate.plot_data_circle(np.real(fine_corr)-xc,np.imag(fine_corr)-yc, 0, 0, R)
phase_fine = np.arctan2(np.imag(fine_corr),np.real(fine_corr))
use_index = np.where((-np.pi/2.<phase_fine) & (phase_fine<np.pi/2))
phase_stream = np.arctan2(np.imag(stream_corr),np.real(stream_corr))
#interp phase to frequency
f_interp = interpolate.interp1d(phase_fine, fine_f,kind = interp,bounds_error = False,fill_value = 0)
phase_small = np.linspace(np.min(phase_fine),np.max(phase_fine),1000)
freqs_stream = f_interp(phase_stream)
stream_df_over_f_all = stream_df_over_f = freqs_stream/np.mean(freqs_stream)-1.
plt.subplot(248)
plt.plot(phase_fine,fine_f,'o')
plt.plot(phase_small,f_interp(phase_small),'--')
plt.plot(phase_stream[::plot_period],freqs_stream[::plot_period],'.')
plt.ylim(np.min(freqs_stream)-(np.max(freqs_stream)-np.min(freqs_stream))*3,np.max(freqs_stream)+(np.max(freqs_stream)-np.min(freqs_stream))*3)
plt.xlim(np.min(phase_stream)-np.pi/4,np.max(phase_stream)+np.pi/4)
plt.xlabel("phase")
plt.ylabel("Frequency")
plt.savefig("calibration.pdf")
cal_dict = {'fine_z': fine_z,
'gain_z': gain_z,
'stream_z': stream_z,
'fine_freqs':fine_f,
'gain_freqs':gain_f,
'stream_corr':stream_corr,
'gain_corr':gain_corr,
'fine_corr':fine_corr,
'stream_df_over_f':stream_df_over_f_all}
pickle.dump( cal_dict, open( "cal.p", "wb" ),2 )
return cal_dict
def noise(cal_dict, sample_rate):
fft_freqs,Sxx,S_per,S_par = calibrate.fft_noise(cal_dict['stream_corr'],cal_dict['stream_df_over_f'],sample_rate)
plot_bins = np.logspace(-3,np.log10(300),100)
binnedfreq = binned_statistic(fft_freqs, fft_freqs, bins=plot_bins)[0] #bin the frequecy against itself
binnedpsd = binned_statistic(fft_freqs, np.abs(Sxx), bins=plot_bins)[0]
binnedper = binned_statistic(fft_freqs, np.abs(S_per), bins=plot_bins)[0]
binnedpar = binned_statistic(fft_freqs, np.abs(S_par), bins=plot_bins)[0]
amp_subtracted = np.abs(binnedpsd)*(binnedpar-binnedper)/binnedpar
fig = plt.figure(4,figsize = (16,6))
plt.subplot(122)
plt.title("Sxx")
#plt.loglog(fft_freqs,np.abs(Sxx))
plt.loglog(binnedfreq,np.abs(binnedpsd),linewidth = 2,label = "Sxx raw")
plt.loglog(binnedfreq,amp_subtracted,linewidth = 2,label = "raw amp subtracted")
#plt.ylim(10**-18,10**-15)
plt.ylabel("Sxx (1/Hz)")
plt.xlabel("Frequency (Hz)")
plt.legend()
plt.subplot(121)
#plt.loglog(fft_freqs,S_per)
#plt.loglog(fft_freqs,S_par)
plt.loglog(binnedfreq,binnedper,label = "amp noise")
plt.loglog(binnedfreq,binnedpar,label = "detect noise")
plt.legend()
#plt.ylim(10**2,10**6)
plt.xlabel("Frequency (Hz)")
plt.savefig("psd.pdf")
psd_dict = {'fft_freqs':fft_freqs,
'Sxx':Sxx,
'S_per':S_per,
'S_par':S_par,
'binned_freqs':binnedfreq,
'Sxx_binned':binnedpsd,
'S_per_binned':binnedper,
'S_par_binned':binnedpar,
'amp_subtracted':amp_subtracted}
#save the psd dictionary
pickle.dump( psd_dict, open("psd.p", "wb" ),2 )
return psd_dict
|
[
"Wheeler1711@gmail.com"
] |
Wheeler1711@gmail.com
|
12e36e9537e9bd48715cc87299c6710dcc8d2484
|
4341c1c4fbf30032c50b66ca6ac2d4a2bfc0e83a
|
/translate/models.py
|
6d1a98c568bcfc0c779f1c1ebfd40552bd2fb9a1
|
[] |
no_license
|
a574751346/transfer2nl
|
fec566835a62ebdc5388fcfef7526dbe72bf78d7
|
0251655603e2da0c3ca7cf597b2d7c10060804ba
|
refs/heads/master
| 2021-05-23T17:44:56.836659
| 2020-04-09T05:33:57
| 2020-04-09T05:33:57
| 253,404,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47,574
|
py
|
import tensorflow as tf
import math
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell
from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell
import utils, beam_search
def auto_reuse(fun):
"""
Wrapper that automatically handles the `reuse' parameter.
This is rather risky, as it can lead to reusing variables
by mistake.
"""
def fun_(*args, **kwargs):
try:
return fun(*args, **kwargs)
except ValueError as e:
if 'reuse' in str(e):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
return fun(*args, **kwargs)
else:
raise e
return fun_
get_variable = auto_reuse(tf.get_variable)
dense = auto_reuse(tf.layers.dense)
class CellWrapper(RNNCell):
"""
Wrapper around LayerNormBasicLSTMCell, BasicLSTMCell and MultiRNNCell, to keep
the state_is_tuple=False behavior (soon to be deprecated).
"""
def __init__(self, cell):
super(CellWrapper, self).__init__()
self.cell = cell
self.num_splits = len(cell.state_size) if isinstance(cell.state_size, tuple) else 1
@property
def state_size(self):
return sum(self.cell.state_size)
@property
def output_size(self):
return self.cell.output_size
def __call__(self, inputs, state, scope=None):
state = tf.split(value=state, num_or_size_splits=self.num_splits, axis=1)
new_h, new_state = self.cell(inputs, state, scope=scope)
return new_h, tf.concat(new_state, 1)
def multi_encoder(encoder_inputs, encoders, encoder_input_length, other_inputs=None, **kwargs):
"""
Build multiple encoders according to the configuration in `encoders`, reading from `encoder_inputs`.
The result is a list of the outputs produced by those encoders (for each time-step), and their final state.
:param encoder_inputs: list of tensors of shape (batch_size, input_length), one tensor for each encoder.
:param encoders: list of encoder configurations
:param encoder_input_length: list of tensors of shape (batch_size,) (one tensor for each encoder)
:return:
encoder outputs: a list of tensors of shape (batch_size, input_length, encoder_cell_size), hidden states of the
encoders.
encoder state: concatenation of the final states of all encoders, tensor of shape (batch_size, sum_of_state_sizes)
new_encoder_input_length: list of tensors of shape (batch_size,) with the true length of the encoder outputs.
May be different than `encoder_input_length` because of maxout strides, and time pooling.
"""
encoder_states = []
encoder_outputs = []
# create embeddings in the global scope (allows sharing between encoder and decoder)
embedding_variables = []
for encoder in encoders:
if encoder.binary:
embedding_variables.append(None)
continue
# inputs are token ids, which need to be mapped to vectors (embeddings)
embedding_shape = [encoder.vocab_size, encoder.embedding_size]
if encoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if encoder.embeddings_on_cpu else None
with tf.device(device): # embeddings can take a very large amount of memory, so
# storing them in GPU memory can be impractical
embedding = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape,
initializer=initializer)
embedding_variables.append(embedding)
new_encoder_input_length = []
for i, encoder in enumerate(encoders):
if encoder.use_lstm is False:
encoder.cell_type = 'GRU'
with tf.variable_scope('encoder_{}'.format(encoder.name)):
encoder_inputs_ = encoder_inputs[i]
encoder_input_length_ = encoder_input_length[i]
def get_cell(input_size=None, reuse=False):
if encoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse))
elif encoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm,
input_size=input_size, input_keep_prob=encoder.rnn_input_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob)
elif encoder.cell_type.lower() == 'treelstm':
# TODO
cell = None
return
else:
cell = GRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm)
if encoder.use_dropout and encoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob,
output_keep_prob=encoder.rnn_output_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob,
variational_recurrent=encoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size)
return cell
embedding = embedding_variables[i]
batch_size = tf.shape(encoder_inputs_)[0]
time_steps = tf.shape(encoder_inputs_)[1]
if embedding is not None:
flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)])
flat_inputs = tf.nn.embedding_lookup(embedding, flat_inputs)
encoder_inputs_ = tf.reshape(flat_inputs,
tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value]))
if other_inputs is not None:
encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2)
if encoder.use_dropout:
noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob,
noise_shape=noise_shape)
size = tf.shape(encoder_inputs_)[2]
noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob,
noise_shape=noise_shape)
if encoder.input_layers:
for j, layer_size in enumerate(encoder.input_layers):
if encoder.input_layer_activation is not None and encoder.input_layer_activation.lower() == 'relu':
activation = tf.nn.relu
else:
activation = tf.tanh
encoder_inputs_ = dense(encoder_inputs_, layer_size, activation=activation, use_bias=True,
name='layer_{}'.format(j))
if encoder.use_dropout:
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob)
# Contrary to Theano's RNN implementation, states after the sequence length are zero
# (while Theano repeats last state)
inter_layer_keep_prob = None if not encoder.use_dropout else encoder.inter_layer_keep_prob
parameters = dict(
inputs=encoder_inputs_, sequence_length=encoder_input_length_,
dtype=tf.float32, parallel_iterations=encoder.parallel_iterations
)
input_size = encoder_inputs_.get_shape()[2].value
state_size = (encoder.cell_size * 2 if encoder.cell_type.lower() == 'lstm' else encoder.cell_size)
def get_initial_state(name='initial_state'):
if encoder.train_initial_states:
initial_state = get_variable(name, initializer=tf.zeros(state_size))
return tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1])
else:
return None
if encoder.bidir:
rnn = lambda reuse: stack_bidirectional_dynamic_rnn(
cells_fw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
cells_bw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
initial_states_fw=[get_initial_state('initial_state_fw')] * encoder.layers,
initial_states_bw=[get_initial_state('initial_state_bw')] * encoder.layers,
time_pooling=encoder.time_pooling, pooling_avg=encoder.pooling_avg,
**parameters)
initializer = CellInitializer(encoder.cell_size) if encoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
encoder_outputs_, _, encoder_states_ = rnn(reuse=False)
except ValueError: # Multi-task scenario where we're reusing the same RNN parameters
encoder_outputs_, _, encoder_states_ = rnn(reuse=True)
else:
if encoder.time_pooling or encoder.final_state == 'concat_last':
raise NotImplementedError
if encoder.layers > 1:
cell = MultiRNNCell([get_cell(input_size if j == 0 else encoder.cell_size)
for j in range(encoder.layers)])
initial_state = (get_initial_state(),) * encoder.layers
else:
cell = get_cell(input_size)
initial_state = get_initial_state()
encoder_outputs_, encoder_states_ = auto_reuse(tf.nn.dynamic_rnn)(cell=cell,
initial_state=initial_state,
**parameters)
last_backward = encoder_outputs_[:, 0, encoder.cell_size:]
indices = tf.stack([tf.range(batch_size), encoder_input_length_ - 1], axis=1)
last_forward = tf.gather_nd(encoder_outputs_[:, :, :encoder.cell_size], indices)
last_forward.set_shape([None, encoder.cell_size])
if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states)
encoder_state_ = tf.concat(encoder_states_, axis=1)
elif encoder.final_state == 'average':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.final_state == 'average_inputs':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.bidir and encoder.final_state == 'last_both':
encoder_state_ = tf.concat([last_forward, last_backward], axis=1)
elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state
encoder_state_ = last_backward
else: # last forward hidden state
encoder_state_ = last_forward
if encoder.bidir and encoder.bidir_projection:
encoder_outputs_ = dense(encoder_outputs_, encoder.cell_size, use_bias=False, name='bidir_projection')
encoder_outputs.append(encoder_outputs_)
encoder_states.append(encoder_state_)
new_encoder_input_length.append(encoder_input_length_)
encoder_state = tf.concat(encoder_states, 1)
return encoder_outputs, encoder_state, new_encoder_input_length
def compute_energy(hidden, state, attn_size, attn_keep_prob=None, pervasive_dropout=False, layer_norm=False,
mult_attn=False, **kwargs):
if attn_keep_prob is not None:
state_noise_shape = [1, tf.shape(state)[1]] if pervasive_dropout else None
state = tf.nn.dropout(state, keep_prob=attn_keep_prob, noise_shape=state_noise_shape)
hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if pervasive_dropout else None
hidden = tf.nn.dropout(hidden, keep_prob=attn_keep_prob, noise_shape=hidden_noise_shape)
if mult_attn:
state = dense(state, attn_size, use_bias=False, name='state')
hidden = dense(hidden, attn_size, use_bias=False, name='hidden')
return tf.einsum('ijk,ik->ij', hidden, state)
else:
y = dense(state, attn_size, use_bias=not layer_norm, name='W_a')
y = tf.expand_dims(y, axis=1)
if layer_norm:
y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state')
hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden')
f = dense(hidden, attn_size, use_bias=False, name='U_a')
v = get_variable('v_a', [attn_size])
s = f + y
return tf.reduce_sum(v * tf.tanh(s), axis=2)
def compute_energy_with_filter(hidden, state, prev_weights, attn_filters, attn_filter_length,
**kwargs):
hidden = tf.expand_dims(hidden, 2)
batch_size = tf.shape(hidden)[0]
time_steps = tf.shape(hidden)[1]
attn_size = hidden.get_shape()[3].value
filter_shape = [attn_filter_length * 2 + 1, 1, 1, attn_filters]
filter_ = get_variable('filter', filter_shape)
u = get_variable('U', [attn_filters, attn_size])
prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1]))
conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME')
shape = tf.stack([tf.multiply(batch_size, time_steps), attn_filters])
conv = tf.reshape(conv, shape)
z = tf.matmul(conv, u)
z = tf.reshape(z, tf.stack([batch_size, time_steps, 1, attn_size]))
y = dense(state, attn_size, use_bias=True, name='y')
y = tf.reshape(y, [-1, 1, 1, attn_size])
k = get_variable('W', [attn_size, attn_size])
# dot product between tensors requires reshaping
hidden = tf.reshape(hidden, tf.stack([tf.multiply(batch_size, time_steps), attn_size]))
f = tf.matmul(hidden, k)
f = tf.reshape(f, tf.stack([batch_size, time_steps, 1, attn_size]))
v = get_variable('V', [attn_size])
s = f + y + z
return tf.reduce_sum(v * tf.tanh(s), [2, 3])
def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs):
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
if encoder.attn_filters:
e = compute_energy_with_filter(hidden_states, state, attn_size=encoder.attn_size,
attn_filters=encoder.attn_filters,
attn_filter_length=encoder.attn_filter_length, **kwargs)
else:
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size,
attn_keep_prob=encoder.attn_keep_prob, pervasive_dropout=encoder.pervasive_dropout,
layer_norm=encoder.layer_norm, mult_attn=encoder.mult_attn, **kwargs)
e -= tf.reduce_max(e, axis=1, keep_dims=True)
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32)
T = encoder.attn_temperature or 1.0
exp = tf.exp(e / T) * mask
weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1)
return weighted_average, weights
def no_attention(state, hidden_states, *args, **kwargs):
batch_size = tf.shape(state)[0]
weighted_average = tf.zeros(shape=tf.stack([batch_size, 0]))
weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]])
return weighted_average, weights
def average_attention(hidden_states, encoder_input_length, *args, **kwargs):
# attention with fixed weights (average of all hidden states)
lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1])
weights = tf.to_float(mask) / lengths
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs):
weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1])
weights = tf.to_float(weights)
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None,
context=None, **kwargs):
batch_size = tf.shape(state)[0]
attn_length = tf.shape(hidden_states)[1]
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
state_size = state.get_shape()[1].value
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
if pos is not None:
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
if pos is not None and encoder.attn_window_size > 0:
# `pred_edits` scenario, where we know the aligned pos
# when the windows size is non-zero, we concatenate consecutive encoder states
# and map it to the right attention vector size.
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = []
for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1):
pos_ = pos + offset
pos_ = tf.minimum(pos_, encoder_input_length - 1)
pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S>
weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length))
weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1)
weighted_average.append(weighted_average_)
weighted_average = tf.concat(weighted_average, axis=1)
weighted_average = dense(weighted_average, encoder.attn_size)
elif pos is not None:
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
else:
# Local attention of Luong et al. (http://arxiv.org/abs/1508.04025)
wp = get_variable('Wp', [state_size, state_size])
vp = get_variable('vp', [state_size, 1])
pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp))
pos = tf.floor(encoder_input_length * pos)
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size]))
idx = tf.reshape(idx, [-1, attn_length])
low = pos - encoder.attn_window_size
high = pos + encoder.attn_window_size
mlow = tf.to_float(idx < low)
mhigh = tf.to_float(idx > high)
m = mlow + mhigh
m += tf.to_float(idx >= encoder_input_length)
mask = tf.to_float(tf.equal(m, 0.0))
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size, **kwargs)
weights = softmax(e, mask=mask)
sigma = encoder.attn_window_size / 2
numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32))
div = tf.truediv(numerator, 2 * sigma ** 2)
weights *= tf.exp(div) # result of the truncated normal distribution
# normalize to keep a probability distribution
# weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
return weighted_average, weights
def attention(encoder, **kwargs):
attention_functions = {
'global': global_attention,
'local': local_attention,
'none': no_attention,
'average': average_attention,
'last_state': last_state_attention
}
attention_function = attention_functions.get(encoder.attention_type, global_attention)
return attention_function(encoder=encoder, **kwargs)
def multi_attention(state, hidden_states, encoders, encoder_input_length, pos=None, aggregation_method='sum',
prev_weights=None, **kwargs):
attns = []
weights = []
context_vector = None
for i, (hidden, encoder, input_length) in enumerate(zip(hidden_states, encoders, encoder_input_length)):
pos_ = pos[i] if pos is not None else None
prev_weights_ = prev_weights[i] if prev_weights is not None else None
hidden = beam_search.resize_like(hidden, state)
input_length = beam_search.resize_like(input_length, state)
context_vector, weights_ = attention(state=state, hidden_states=hidden, encoder=encoder,
encoder_input_length=input_length, pos=pos_, context=context_vector,
prev_weights=prev_weights_, **kwargs)
attns.append(context_vector)
weights.append(weights_)
if aggregation_method == 'sum':
context_vector = tf.reduce_sum(tf.stack(attns, axis=2), axis=2)
else:
context_vector = tf.concat(attns, axis=1)
return context_vector, weights
def attention_decoder(decoder_inputs, initial_state, attention_states, encoders, decoder, encoder_input_length,
feed_previous=0.0, align_encoder_id=0, feed_argmax=True, **kwargs):
"""
:param decoder_inputs: int32 tensor of shape (batch_size, output_length)
:param initial_state: initial state of the decoder (usually the final state of the encoder),
as a float32 tensor of shape (batch_size, initial_state_size). This state is mapped to the
correct state size for the decoder.
:param attention_states: list of tensors of shape (batch_size, input_length, encoder_cell_size),
the hidden states of the encoder(s) (one tensor for each encoder).
:param encoders: configuration of the encoders
:param decoder: configuration of the decoder
:param encoder_input_length: list of int32 tensors of shape (batch_size,), tells for each encoder,
the true length of each sequence in the batch (sequences in the same batch are padded to all have the same
length).
:param feed_previous: scalar tensor corresponding to the probability to use previous decoder output
instead of the ground truth as input for the decoder (1 when decoding, between 0 and 1 when training)
:param feed_argmax: boolean tensor, when True the greedy decoder outputs the word with the highest
probability (argmax). When False, it samples a word from the probability distribution (softmax).
:param align_encoder_id: outputs attention weights for this encoder. Also used when predicting edit operations
(pred_edits), to specifify which encoder reads the sequence to post-edit (MT).
:return:
outputs of the decoder as a tensor of shape (batch_size, output_length, decoder_cell_size)
attention weights as a tensor of shape (output_length, encoders, batch_size, input_length)
"""
assert not decoder.pred_maxout_layer or decoder.cell_size % 2 == 0, 'cell size must be a multiple of 2'
if decoder.use_lstm is False:
decoder.cell_type = 'GRU'
embedding_shape = [decoder.vocab_size, decoder.embedding_size]
if decoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if decoder.embeddings_on_cpu else None
with tf.device(device):
embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer)
input_shape = tf.shape(decoder_inputs)
batch_size = input_shape[0]
time_steps = input_shape[1]
scope_name = 'decoder_{}'.format(decoder.name)
scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
def embed(input_):
embedded_input = tf.nn.embedding_lookup(embedding, input_)
if decoder.use_dropout and decoder.word_keep_prob is not None:
noise_shape = [1, 1] if decoder.pervasive_dropout else [batch_size, 1]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape)
if decoder.use_dropout and decoder.embedding_keep_prob is not None:
size = tf.shape(embedded_input)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else [batch_size, size]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob,
noise_shape=noise_shape)
return embedded_input
def get_cell(input_size=None, reuse=False):
cells = []
for j in range(decoder.layers):
input_size_ = input_size if j == 0 else decoder.cell_size
if decoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(decoder.cell_size, reuse=reuse))
elif decoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm,
input_size=input_size_, input_keep_prob=decoder.rnn_input_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob)
else:
cell = GRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm)
if decoder.use_dropout and decoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=decoder.rnn_input_keep_prob,
output_keep_prob=decoder.rnn_output_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob,
variational_recurrent=decoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size_)
cells.append(cell)
if len(cells) == 1:
return cells[0]
else:
return CellWrapper(MultiRNNCell(cells))
def look(state, input_, prev_weights=None, pos=None):
prev_weights_ = [prev_weights if i == align_encoder_id else None for i in range(len(encoders))]
pos_ = None
if decoder.pred_edits:
pos_ = [pos if i == align_encoder_id else None for i in range(len(encoders))]
if decoder.attn_prev_word:
state = tf.concat([state, input_], axis=1)
parameters = dict(hidden_states=attention_states, encoder_input_length=encoder_input_length,
encoders=encoders, aggregation_method=decoder.aggregation_method)
context, new_weights = multi_attention(state, pos=pos_, prev_weights=prev_weights_, **parameters)
if decoder.context_mapping:
with tf.variable_scope(scope_name):
activation = tf.nn.tanh if decoder.context_mapping_activation == 'tanh' else None
use_bias = not decoder.context_mapping_no_bias
context = dense(context, decoder.context_mapping, use_bias=use_bias, activation=activation,
name='context_mapping')
return context, new_weights[align_encoder_id]
def update(state, input_, context=None, symbol=None):
if context is not None and decoder.rnn_feed_attn:
input_ = tf.concat([input_, context], axis=1)
input_size = input_.get_shape()[1].value
initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
output, new_state = get_cell(input_size)(input_, state)
except ValueError: # auto_reuse doesn't work with LSTM cells
output, new_state = get_cell(input_size, reuse=True)(input_, state)
if decoder.skip_update and decoder.pred_edits and symbol is not None:
is_del = tf.equal(symbol, utils.DEL_ID)
new_state = tf.where(is_del, state, new_state)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = new_state
return output, new_state
def update_pos(pos, symbol, max_pos=None):
if not decoder.pred_edits:
return pos
is_keep = tf.equal(symbol, utils.KEEP_ID)
is_del = tf.equal(symbol, utils.DEL_ID)
is_not_ins = tf.logical_or(is_keep, is_del)
pos = beam_search.resize_like(pos, symbol)
max_pos = beam_search.resize_like(max_pos, symbol)
pos += tf.to_float(is_not_ins)
if max_pos is not None:
pos = tf.minimum(pos, tf.to_float(max_pos))
return pos
def generate(state, input_, context):
if decoder.pred_use_lstm_state is False: # for back-compatibility
state = state[:,-decoder.cell_size:]
projection_input = [state, context]
if decoder.use_previous_word:
projection_input.insert(1, input_) # for back-compatibility
output_ = tf.concat(projection_input, axis=1)
if decoder.pred_deep_layer:
deep_layer_size = decoder.pred_deep_layer_size or decoder.embedding_size
if decoder.layer_norm:
output_ = dense(output_, deep_layer_size, use_bias=False, name='deep_output')
output_ = tf.contrib.layers.layer_norm(output_, activation_fn=tf.nn.tanh, scope='output_layer_norm')
else:
output_ = dense(output_, deep_layer_size, activation=tf.tanh, use_bias=True, name='deep_output')
if decoder.use_dropout:
size = tf.shape(output_)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else None
output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape)
else:
if decoder.pred_maxout_layer:
maxout_size = decoder.maxout_size or decoder.cell_size
output_ = dense(output_, maxout_size, use_bias=True, name='maxout')
if decoder.old_maxout: # for back-compatibility with old models
output_ = tf.nn.pool(tf.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX',
padding='SAME', strides=[2])
output_ = tf.squeeze(output_, axis=2)
else:
output_ = tf.maximum(*tf.split(output_, num_or_size_splits=2, axis=1))
if decoder.pred_embed_proj:
# intermediate projection to embedding size (before projecting to vocabulary size)
# this is useful to reduce the number of parameters, and
# to use the output embeddings for output projection (tie_embeddings parameter)
output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0')
if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer):
bias = get_variable('softmax1/bias', shape=[decoder.vocab_size])
output_ = tf.matmul(output_, tf.transpose(embedding)) + bias
else:
output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1')
return output_
state_size = (decoder.cell_size * 2 if decoder.cell_type.lower() == 'lstm' else decoder.cell_size) * decoder.layers
if decoder.use_dropout:
initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob)
with tf.variable_scope(scope_name):
if decoder.layer_norm:
initial_state = dense(initial_state, state_size, use_bias=False, name='initial_state_projection')
initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=tf.nn.tanh,
scope='initial_state_layer_norm')
else:
initial_state = dense(initial_state, state_size, use_bias=True, name='initial_state_projection',
activation=tf.nn.tanh)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
initial_output = initial_state
else:
initial_output = initial_state[:, -decoder.cell_size:]
time = tf.constant(0, dtype=tf.int32, name='time')
outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs)))
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
initial_context, _ = look(initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights)
initial_data = tf.concat([initial_state, initial_context, tf.expand_dims(initial_pos, axis=1), initial_weights],
axis=1)
context_size = initial_context.shape[1].value
def get_logits(state, ids, time): # for beam-search decoding
with tf.variable_scope('decoder_{}'.format(decoder.name)):
state, context, pos, prev_weights = tf.split(state, [state_size, context_size, 1, -1], axis=1)
input_ = embed(ids)
pos = tf.squeeze(pos, axis=1)
pos = tf.cond(tf.equal(time, 0),
lambda: pos,
lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id]))
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = state
else:
# output is always the right-most part of state. However, this only works at test time,
# because different dropout operations can be used on state and output.
output = state[:, -decoder.cell_size:]
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, ids)
elif decoder.generate_first:
output, state = tf.cond(tf.equal(time, 0),
lambda: (output, state),
lambda: update(state, input_, context, ids))
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, ids)
logits = generate(output, input_, context)
pos = tf.expand_dims(pos, axis=1)
state = tf.concat([state, context, pos, new_weights], axis=1)
return state, logits
def _time_step(time, input_, input_symbol, pos, state, output, outputs, states, weights, attns, prev_weights,
samples):
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, input_symbol)
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, input_symbol)
output_ = generate(output, input_, context)
argmax = lambda: tf.argmax(output_, 1)
target = lambda: inputs.read(time + 1)
softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1),
axis=1)
use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous)
predicted_symbol = tf.case([
(use_target, target),
(tf.logical_not(feed_argmax), softmax)],
default=argmax) # default case is useful for beam-search
predicted_symbol.set_shape([None])
predicted_symbol = tf.stop_gradient(predicted_symbol)
samples = samples.write(time, predicted_symbol)
input_ = embed(predicted_symbol)
pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id])
attns = attns.write(time, context)
weights = weights.write(time, new_weights)
states = states.write(time, state)
outputs = outputs.write(time, output_)
if not decoder.conditional_rnn and not decoder.update_first and decoder.generate_first:
output, state = update(state, input_, context, predicted_symbol)
return (time + 1, input_, predicted_symbol, pos, state, output, outputs, states, weights, attns, new_weights,
samples)
with tf.variable_scope('decoder_{}'.format(decoder.name)):
_, _, _, new_pos, new_state, _, outputs, states, weights, attns, new_weights, samples = tf.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, initial_input, initial_symbol, initial_pos, initial_state, initial_output, outputs,
weights, states, attns, initial_weights, samples),
parallel_iterations=decoder.parallel_iterations,
swap_memory=decoder.swap_memory)
outputs = outputs.stack()
weights = weights.stack() # batch_size, encoders, output time, input time
states = states.stack()
attns = attns.stack()
samples = samples.stack()
# put batch_size as first dimension
outputs = tf.transpose(outputs, perm=(1, 0, 2))
weights = tf.transpose(weights, perm=(1, 0, 2))
states = tf.transpose(states, perm=(1, 0, 2))
attns = tf.transpose(attns, perm=(1, 0, 2))
samples = tf.transpose(samples)
return outputs, weights, states, attns, samples, get_logits, initial_data
def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0,
encoder_input_length=None, feed_argmax=True, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
if encoder_input_length is None:
encoder_input_length = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
parameters = dict(encoders=encoders, decoder=decoder, encoder_inputs=encoder_inputs,
feed_argmax=feed_argmax)
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
attention_states, encoder_state, encoder_input_length = multi_encoder(
encoder_input_length=encoder_input_length, **parameters)
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous,
decoder_inputs=targets[:, :-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length,
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=target_weights)
losses = xent_loss
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def chained_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous,
chaining_strategy=None, align_encoder_id=0, chaining_non_linearity=False,
chaining_loss_ratio=1.0, chaining_stop_gradient=False, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
assert len(encoders) == 2
encoder_input_length = []
input_weights = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
input_weights.append(weights)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
parameters = dict(encoders=encoders[1:], decoder=encoders[0])
attention_states, encoder_state, encoder_input_length[1:] = multi_encoder(
encoder_inputs[1:], encoder_input_length=encoder_input_length[1:], **parameters)
decoder_inputs = encoder_inputs[0][:, :-1]
batch_size = tf.shape(decoder_inputs)[0]
pad = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32) * utils.BOS_ID
decoder_inputs = tf.concat([pad, decoder_inputs], axis=1)
outputs, _, states, attns, _, _, _ = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs,
encoder_input_length=encoder_input_length[1:], **parameters
)
chaining_loss = sequence_loss(logits=outputs, targets=encoder_inputs[0], weights=input_weights[0])
if decoder.cell_type.lower() == 'lstm':
size = states.get_shape()[2].value
decoder_outputs = states[:, :, size // 2:]
else:
decoder_outputs = states
if chaining_strategy == 'share_states':
other_inputs = states
elif chaining_strategy == 'share_outputs':
other_inputs = decoder_outputs
else:
other_inputs = None
if other_inputs is not None and chaining_stop_gradient:
other_inputs = tf.stop_gradient(other_inputs)
parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1],
other_inputs=other_inputs)
attention_states, encoder_state, encoder_input_length[:1] = multi_encoder(
encoder_input_length=encoder_input_length[:1], **parameters)
if chaining_stop_gradient:
attns = tf.stop_gradient(attns)
states = tf.stop_gradient(states)
decoder_outputs = tf.stop_gradient(decoder_outputs)
if chaining_strategy == 'concat_attns':
attention_states[0] = tf.concat([attention_states[0], attns], axis=2)
elif chaining_strategy == 'concat_states':
attention_states[0] = tf.concat([attention_states[0], states], axis=2)
elif chaining_strategy == 'sum_attns':
attention_states[0] += attns
elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'):
if chaining_strategy == 'map_attns':
x = attns
elif chaining_strategy == 'map_outputs':
x = decoder_outputs
else:
x = states
shape = [x.get_shape()[-1], attention_states[0].get_shape()[-1]]
w = tf.get_variable("map_attns/matrix", shape=shape)
b = tf.get_variable("map_attns/bias", shape=shape[-1:])
x = tf.einsum('ijk,kl->ijl', x, w) + b
if chaining_non_linearity:
x = tf.nn.tanh(x)
attention_states[0] += x
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state,
feed_previous=feed_previous, decoder_inputs=targets[:,:-1],
align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length[:1],
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:],
weights=target_weights)
if chaining_loss is not None and chaining_loss_ratio:
xent_loss += chaining_loss_ratio * chaining_loss
losses = [xent_loss, None, None]
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def softmax(logits, dim=-1, mask=None):
e = tf.exp(logits)
if mask is not None:
e *= mask
return e / tf.clip_by_value(tf.reduce_sum(e, axis=dim, keep_dims=True), 10e-37, 10e+37)
def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True):
batch_size = tf.shape(targets)[0]
time_steps = tf.shape(targets)[1]
logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value]))
targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size]))
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_)
crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps]))
log_perp = tf.reduce_sum(crossent * weights, axis=1)
if average_across_timesteps:
total_size = tf.reduce_sum(weights, axis=1)
total_size += 1e-12 # just to avoid division by 0 for all-0 weights
log_perp /= total_size
cost = tf.reduce_sum(log_perp)
if average_across_batch:
return cost / tf.to_float(batch_size)
else:
return cost
def get_weights(sequence, eos_id, include_first_eos=True):
cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))
if include_first_eos:
weights = weights[:,:-1]
shape = [tf.shape(weights)[0], 1]
weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)
return tf.stop_gradient(weights)
|
[
"574751346@qq.com"
] |
574751346@qq.com
|
1e24144a7cf422eb8aed4964ee92309dbd9dafec
|
6b1dd40d16ae6169e7ed780c5062e88d10502c85
|
/Demo/Caffe-demo/demo_train.py
|
1e72565d91468c3f044f049942af2e183e29f6f1
|
[
"MIT"
] |
permissive
|
hehuanlin123/DeepLearning
|
8a59680a341cfc525d50aa5afc3e44202ca4acc4
|
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
|
refs/heads/master
| 2022-07-12T09:26:08.617883
| 2019-06-10T11:31:37
| 2019-06-10T11:31:37
| 183,748,407
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
import sys
sys.path.append('/home/kuan/AM-softmax_caffe/python')
import caffe
solver = caffe.SGDSolver("/home/kuan/PycharmProjects/demo_cnn_net/cnn_net/alexnet/solver.prototxt")
solver.solve()
|
[
"hehuanlin@13126771609@163.com"
] |
hehuanlin@13126771609@163.com
|
6c8c5c41b634bf773212a73f580e465cefb4528b
|
0f38803a7536cbff35202d68b0eef948bd628a96
|
/common/datasets/asr/librispeech/oggzip.py
|
a17ef77e60c4187d1caa0b6028beaacda4822d57
|
[] |
no_license
|
jotix16/returnn_common
|
b41523ffebac07f54061a2c16336c56d5f826dce
|
686bcb8b1a42002b8ab1c776b5055569c0f20682
|
refs/heads/main
| 2023-05-12T01:26:31.311641
| 2021-06-03T10:08:21
| 2021-06-03T10:08:21
| 373,462,667
| 0
| 0
| null | 2021-06-03T10:08:21
| 2021-06-03T10:06:02
| null |
UTF-8
|
Python
| false
| false
| 3,736
|
py
|
from __future__ import annotations
from pathlib import Path
from typing import Dict, Any
from ..features import make_gt_features_opts
from .vocabs import bpe1k, bpe10k
from ...interface import DatasetConfig, VocabConfig
from ....data import get_common_data_path
_Parts = [
"train-clean-100", "train-clean-360", "train-other-500",
"dev-clean", "dev-other",
"test-clean", "test-other"]
_norm_stats_dir = Path(__file__).absolute().parent / "norm_stats"
class Librispeech(DatasetConfig):
def __init__(self, *,
audio_dim=50,
audio_norm: str = "per_seq",
vocab: VocabConfig = bpe1k,
train_epoch_split=20, train_random_permute=None):
"""
:param audio_norm: "global" or "per_seq". "global" tries to read from standard location in repo
"""
super(Librispeech, self).__init__()
self.audio_dim = audio_dim
self.audio_norm = audio_norm
self.vocab = vocab
self.train_epoch_split = train_epoch_split
self.train_random_permute = train_random_permute
@classmethod
def old_defaults(cls, audio_dim=40, audio_norm="global", vocab: VocabConfig = bpe10k, **kwargs) -> Librispeech:
return Librispeech(audio_dim=audio_dim, audio_norm=audio_norm, vocab=vocab, **kwargs)
def get_extern_data(self) -> Dict[str, Dict[str, Any]]:
return {
"data": {"dim": self.audio_dim},
"classes": {
"sparse": True,
"dim": self.vocab.get_num_classes(),
"vocab": self.vocab.get_opts()},
}
def get_train_dataset(self) -> Dict[str, Any]:
return self.get_dataset("train", train=True, train_partition_epoch=self.train_epoch_split)
def get_eval_datasets(self) -> Dict[str, Dict[str, Any]]:
return {
"dev": self.get_dataset("dev", train=False, subset=3000),
"devtrain": self.get_dataset("train", train=False, subset=2000)}
def get_dataset(self, key: str, *, train: bool, subset=None, train_partition_epoch=None):
files = []
parts = [part for part in _Parts if part.startswith(key)]
assert parts
for part in parts:
files += [
# (History: Changed data/dataset-ogg -> data-common/librispeech/dataset/dataset-ogg)
get_common_data_path("librispeech/dataset/dataset-ogg/%s.zip" % part),
get_common_data_path("librispeech/dataset/dataset-ogg/%s.txt.gz" % part)]
def _make_norm_arg(k: str):
if self.audio_norm == "per_seq":
return "per_seq"
if self.audio_norm == "global":
return str(_norm_stats_dir / f"stats.{self.audio_dim}.{k}.txt")
if not self.audio_norm:
return None
raise TypeError(f"Invalid audio norm {self.audio_norm}.")
d = {
"class": 'OggZipDataset',
"path": files,
"use_cache_manager": True,
"zip_audio_files_have_name_as_prefix": False,
"targets": self.vocab.get_opts(),
"audio": {
"norm_mean": _make_norm_arg("mean"),
"norm_std_dev": _make_norm_arg("std_dev"),
"num_feature_filters": self.audio_dim},
# make_gt_features_opts(dim=self.audio_dim),
} # type: Dict[str, Any]
if train:
d["partition_epoch"] = train_partition_epoch
if key == "train":
d["epoch_wise_filter"] = {
(1, 5): {'max_mean_len': 200},
(6, 10): {'max_mean_len': 500},
}
if self.train_random_permute:
d["audio"]["random_permute"] = self.train_random_permute
d["seq_ordering"] = "laplace:.1000"
else:
d["targets"]['unknown_label'] = '<unk>' # only for non-train. for train, there never should be an unknown
d["fixed_random_seed"] = 1
d["seq_ordering"] = "sorted_reverse"
if subset:
d["fixed_random_subset"] = subset # faster
return d
|
[
"zeyer@i6.informatik.rwth-aachen.de"
] |
zeyer@i6.informatik.rwth-aachen.de
|
52d1966e0b46fbd58e2c48d3edf2a752d4edf8bf
|
e4382c802e3c6d340d9ed9f2ba4e6c4068b5545b
|
/users/urls.py
|
ba84bbc2250b0a98b02c32e1b00cfea2f4c4d249
|
[] |
no_license
|
Trishala13/COC2
|
23965cc30a9f8aeda468773889611595bda6c6b0
|
e3d3efe5f4fbd282e822b26a0d66bf01bc7f6d02
|
refs/heads/master
| 2021-05-07T02:08:05.692281
| 2017-11-13T06:16:59
| 2017-11-13T06:16:59
| 110,505,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
from django.conf.urls import url
from . import views as user_views
urlpatterns = [
url(r'user_site', user_views.user_site),
url(r'sign-in', user_views.sign_in),
url(r'sign-up', user_views.sign_up),
url(r'sign_up_form',user_views.sign_up_form),
url(r'sign_in_form',user_views.sign_in_form),
url(r'complaint_form',user_views.complaint_form),
url(r'complaint',user_views.complaint),
url(r'zone_fill', user_views.zone_fill),
url(r'zone', user_views.zone),
url(r'resubmit',user_views.resubmit),
url(r'feedback_form',user_views.feedback_form),
url(r'update_fill',user_views.update_fill),
url(r'update',user_views.update),
url(r'division_fill',user_views.division_fill),
url(r'division', user_views.division),
url(r'official_login_fill', user_views.official_login_form),
url(r'official-login', user_views.official_login),
url(r'employee_site', user_views.emp_site),
url(r'garbage_fill',user_views.garbage_fill),
url(r'garbage_form_fill',user_views.garbage_entries),
url(r'garbage_form',user_views.garbage_form),
url(r'garbage', user_views.garbage),
url(r'reset_passwrd',user_views.reset_passwrd),
url(r'feedback',user_views.feedback),
url(r'signout',user_views.signout),
]
|
[
"noreply@github.com"
] |
Trishala13.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.