blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b706ea49f123c74e0b3e5e1658c9ba5ec1669b0
|
938dff8c185ec204d9f445885cea3498c51dc4b1
|
/opencv-image.py
|
1c875efdb4e52d584582f144dc189ce45d5614d6
|
[] |
no_license
|
tavares1/opencv-python
|
34b01f7564720b1fb319c4dc93efaa5ed28b4032
|
2bedf0d56448b7ea8e70725f9772a80769de00b0
|
refs/heads/master
| 2020-12-30T10:50:55.010269
| 2017-07-30T23:40:06
| 2017-07-30T23:40:06
| 98,830,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
import cv2
import numpy as np
# Create a VideoCapture object
cap = cv2.VideoCapture(0)
# Check if camera opened successfully
if (cap.isOpened() == False):
print("Unable to read camera feed")
else:
# Default resolutions of the frame are obtained.The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
while(True):
ret, frame = cap.read()
if ret == True:
# Write the frame into the file 'output.avi'
out.write(frame)
# Display the resulting frame
cv2.imshow('frame',frame)
# Press Q on keyboard to stop recording
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture and video write objects
cap.release()
out.release()
# Closes all the frames
|
[
"lucastavaresvasconcelos@gmail.com"
] |
lucastavaresvasconcelos@gmail.com
|
c43e002b85a0c45caf90f46653f69735c792bde2
|
1ee948b20d18b7fdfd9994a9f2128b13af7505a9
|
/netwerk/socket_server.py
|
4203c69492c71e651e03becb9186e4a49ed0eb4a
|
[] |
no_license
|
effevee/IoT3-micropython
|
a88a44b40bf5fe2aae058683e9d3df9f828ba554
|
9ce60b365f2f838fb4d509ccd303cc57367f8234
|
refs/heads/master
| 2020-09-16T00:11:20.498435
| 2020-04-16T08:07:37
| 2020-04-16T08:07:37
| 223,593,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
import simpleWifi
import sys
import usocket
from machine import Pin,PWM
PORT=7950
LED1=21
s=None
pwm=None
# led initialiseren
pwm = PWM(Pin(LED1),freq=100)
# wifi object aanmaken
myWifi = simpleWifi.Wifi()
# connecteren met wifi
if not(myWifi.open()):
print("Probleem met wifi")
myWifi.get_status()
sys.exit()
myWifi.get_status()
try:
addr = usocket.getaddrinfo('0.0.0.0',PORT)[0][-1]
s = usocket.socket()
s.bind(addr)
s.listen(1)
while True:
c,caddr = s.accept()
data = c.recv(4)
intensity = int(data) #duty cycle 0-100
print('Connectie met {} - duty cycle {} %'.format(caddr,intensity))
# intensiteit van LED1 aanpassen met PWM
pwm.duty(intensity)
c.close()
except Exception as e:
print("Socket probleem %s"%e)
finally:
if s != None:
s.close()
if pwm != None:
pwm.deinit()
myWifi.close()
|
[
"effevee@gmail.com"
] |
effevee@gmail.com
|
844fbef16b65d799bf204c598aa73a34c0359825
|
e64be2f6c3707da5df61a3b5ec056e57f482d3e2
|
/ptt_crawling.py
|
da412285207b164a4ccd789076a7de0b121b36d7
|
[
"MIT"
] |
permissive
|
hlshao/mining-ptt-news
|
72f0ab89cc5d64b73169f6c26b42fe4eb722953a
|
e784a98721c892349148fcd3c33f7b6515632c0c
|
refs/heads/master
| 2020-03-18T20:50:25.080138
| 2017-11-24T01:08:58
| 2017-11-24T01:10:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,920
|
py
|
#!/usr/bin/env python
from pathlib import Path
from urllib.parse import quote_plus
from time import sleep
from random import randint
import requests
import ptt_core
l = ptt_core.l
def _make_fake_browser():
fake_browser = requests.Session()
fake_browser.headers = {
#'user-agent': (
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) '
# 'AppleWebKit/537.36 (KHTML, like Gecko) '
# 'Chrome/54.0.2840.98 Safari/537.36'
#),
#'accept': (
# 'text/html,application/xhtml+xml,application/xml;q=0.9,'
# 'image/webp,*/*;q=0.8'
#),
#'accept-encoding': 'gzip, deflate, sdch, br',
#'accept-language': 'en-US,en;q=0.8,zh-TW;q=0.6,zh;q=0.4',
'cookie': 'over18=1',
}
return fake_browser
_SHARED_FAKE_BROWSER = _make_fake_browser()
_CACHE_DIR_PATH = Path('cache/')
_URL_SET_SKIPPING_CACHE = {'https://www.ptt.cc/bbs/Gossiping/index.html'}
if not _CACHE_DIR_PATH.exists():
_CACHE_DIR_PATH.mkdir()
def read_or_request(url):
# should generate valid fname for most of the systems
fname = quote_plus(url)
path = _CACHE_DIR_PATH / fname
# try cache
if url in _URL_SET_SKIPPING_CACHE:
l.info('Skip cache for {}'.format(url))
else:
try:
with path.open() as f:
l.info('Hit {}'.format(url))
return f.read()
except OSError:
l.info('Missed {}'.format(url))
# request
resp = _SHARED_FAKE_BROWSER.get(url)
text = resp.text
with path.open('w') as f:
f.write(text)
l.info('Wrote {}'.format(url))
return text
def crawl(index_url):
count = 0
prev_url = index_url
while 1:
# crawl and parse the index page
l.info('Crawl the index page {} ...'.format(prev_url))
try:
text = read_or_request(prev_url)
except OSError:
# try again
l.info('Try again ...')
text = read_or_request(prev_url)
l.info('Parse the index page {} ...'.format(prev_url))
parsed_index_d = ptt_core.parse_index_page(text)
prev_url = parsed_index_d['prev_url']
# crawl the article_url
for entry_d in parsed_index_d['entry_ds']:
article_url = entry_d['article_url']
# if deleted article
if not article_url:
continue
# skip non-news
if not entry_d['title'].startswith('[新聞]'):
continue
l.info('Crawl the article page {} ...'.format(article_url))
try:
read_or_request(article_url)
except OSError:
# try again
l.info('Try again ...')
try:
read_or_request(article_url)
except OSError:
# skip if still fail
l.info('Skip')
continue
count += 1
l.info('Sleep')
sleep(randint(0, 10)*0.001)
l.info('Got {:,} articles so far'.format(count))
if __name__ == '__main__':
from pprint import pprint
crawl('https://www.ptt.cc/bbs/Gossiping/index.html')
import sys
sys.exit()
pprint(ptt_core.parse_article_page(read_or_request(
'https://www.ptt.cc/bbs/Gossiping/M.1480355255.A.07C.html'
)))
import sys
sys.exit()
# test the index page
pprint(ptt_core.parse_index_page(read_or_request(
'https://www.ptt.cc/bbs/Gossiping/index20177.html'
)))
# test the article page #1
pprint(ptt_core.parse_article_page(read_or_request(
'https://www.ptt.cc/bbs/Gossiping/M.1480367106.A.A55.html'
)))
# test the article page #2
pprint(ptt_core.parse_article_page(read_or_request(
'https://www.ptt.cc/bbs/Gossiping/M.1480380251.A.9A4.html'
)))
|
[
"mosky.tw@gmail.com"
] |
mosky.tw@gmail.com
|
a5fda4da449af4bf8de23a5f05ba8d0fa3bee79e
|
3d37d1699917e3c2a933422fda42d9425c576935
|
/ivr_payment/settings.py
|
be6a2b98f4c236f2a56ac9f2f04143bf25e0ea61
|
[] |
no_license
|
seongwonhan88/ivr_middleware
|
c5d59f90e25fe7320b2c4ad80c2a9fc5f189f209
|
f9529c6b8b6d80a722802eb6322a6ea97f5e4d03
|
refs/heads/master
| 2022-12-12T13:21:54.583522
| 2020-02-12T00:00:12
| 2020-02-12T00:00:12
| 239,676,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,864
|
py
|
"""
Django settings for ivr_payment project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7vbh(lnq$9@=2z4@p(cl$ok^g7%^kop_%i3558u=7al_x@6ms9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'logs',
'stripe',
'django_mysql',
'celery'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'ivr_payment.middleware.CustomMiddleware',
]
ROOT_URLCONF = 'ivr_payment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ivr_payment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('IVR_DB', None),
'USER': os.environ.get('IVR_USER', None),
'PASSWORD': os.environ.get('IVR_PASSWORD', None),
'PORT': os.environ.get('IVR_PORT', None),
'HOST': os.environ.get('IVR_HOST', None),
}
}
# REDIS
REDIS_HOST = host = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', '6379')
REDIS_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/1"
CELERY_BROKER_URL = REDIS_URL
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_TIMEZONE = 'UTC'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# Stripe Settings
stripe_key = os.environ.get('STRIPE_API_KEY', None)
|
[
"seongwonhan88@gmail.com"
] |
seongwonhan88@gmail.com
|
03265cb8e094f64adc51ac4401297a26ac633bf9
|
a50e906945260351f43d57e014081bcdef5b65a4
|
/collections/ansible_collections/fortinet/fortios/plugins/modules/fortios_wireless_controller_ap_status.py
|
bd5aaef6d41098992f6960b17ee137d04a334eaa
|
[] |
no_license
|
alhamdubello/evpn-ipsec-dci-ansible
|
210cb31f4710bb55dc6d2443a590f3eb65545cf5
|
2dcc7c915167cd3b25ef3651f2119d54a18efdff
|
refs/heads/main
| 2023-06-08T10:42:35.939341
| 2021-06-28T09:52:45
| 2021-06-28T09:52:45
| 380,860,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,888
|
py
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_ap_status
short_description: Configure access point status (rogue | accepted | suppressed) in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller feature and ap_status category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.4.0
version_added: "2.9"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_ap_status:
description:
- Configure access point status (rogue | accepted | suppressed).
default: null
type: dict
suboptions:
bssid:
description:
- Access Point"s (AP"s) BSSID.
type: str
id:
description:
- AP ID.
required: true
type: int
ssid:
description:
- Access Point"s (AP"s) SSID.
type: str
status:
description:
- 'Access Point"s (AP"s) status: rogue, accepted, or supressed.'
type: str
choices:
- rogue
- accepted
- suppressed
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure access point status (rogue | accepted | suppressed).
fortios_wireless_controller_ap_status:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
wireless_controller_ap_status:
bssid: "<your_own_value>"
id: "4"
ssid: "<your_own_value>"
status: "rogue"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_wireless_controller_ap_status_data(json):
option_list = ['bssid', 'id', 'ssid',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_ap_status(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_ap_status_data = data['wireless_controller_ap_status']
filtered_data = underscore_to_hyphen(filter_wireless_controller_ap_status_data(wireless_controller_ap_status_data))
if state == "present":
return fos.set('wireless-controller',
'ap-status',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller',
'ap-status',
mkey=filtered_data['id'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller(data, fos):
if data['wireless_controller_ap_status']:
resp = wireless_controller_ap_status(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('wireless_controller_ap_status'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = 'id'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_ap_status": {
"required": False, "type": "dict", "default": None,
"options": {
"bssid": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"ssid": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["rogue",
"accepted",
"suppressed"]}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
[
"a.u.bello@bham.ac.uk"
] |
a.u.bello@bham.ac.uk
|
14a9a320dcefa38f2eaa0a1bb8681ce760c5cb6e
|
56e8fb7cc16a0cb620ceb4490eb522895a5d773f
|
/web/homaton.py
|
64f9e34b310cab829f55353ddb2e0d13b3628d78
|
[] |
no_license
|
andreasling/homaton
|
522b70c03814514978543549372432b188f5a0de
|
6f2fda41abfd6c988c323989b604f51e7336e697
|
refs/heads/master
| 2021-01-10T19:17:16.854523
| 2013-06-02T20:54:04
| 2013-06-02T20:54:04
| 6,445,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,418
|
py
|
#!/usr/bin/python
import time
import web
import RPi.GPIO as GPIO
import thread
# GPIO pin config
#gpios = [21, 22, 23, 24, 10, 9, 25, 11, 8, 7]
# all 1 2 3 4
gpios = [[ 7, 21, 22, 23, 24 ], # on
[ 8, 10, 9, 25, 11 ]] # off
hightime = 1
# setup GPIOs
GPIO.setmode(GPIO.BCM)
for ps in gpios:
for p in ps:
GPIO.setup(p, GPIO.OUT)
# test GPIOs
#for p in gpios:
# GPIO.output(p, GPIO.HIGH)
# time.sleep(0.2)
# GPIO.output(p, GPIO.LOW)
render = web.template.render("templates/")
urls = (
# '/api/form', 'api_form',
"/api/([0-9a-z]+)", "api",
"/", "index",
"/favicon.ico", "favicon"
)
#class api_form:
# def POST(self):
# print web.data()
# raise web.seeother("/")
class api:
def POST(self, switch):
print "api:"
print switch
state = web.data()
print state
switchToStateBackground(switch, state)
web.header("Cache-Control", "no-cache")
return "ok"
class index:
def GET(self):
#return render.index()
raise web.seeother("/static/index.html")
def POST(self):
input = web.input()
command = input.command
print command
switch, state = command.split(":")
switchToStateBackground(switch, state)
web.header("Cache-Control", "no-cache")
return render.index()
class favicon:
def GET(self):
raise web.seeother("/static/favicon.png")
def switchToStateBackground(switch, state):
thread.start_new_thread(switchToState, (switch, state))
def switchToState(switch, state):
#print "switch: " + switch
#print "state: " + state
#i = ((int("5" if (switch == "all") else switch)-1) * 2) + (1 if (state == "on") else 0)
#p = gpios[i]
ist = 0 if state == "on" else 1
#isw = 0 if switch == "all" else int(switch)
#isws = [1, 2, 3, 4] if switch == "all" else [int(switch)]
isws = [0] if switch == "all" else [int(switch)]
for isw in isws:
print "switch: {0}, {1}".format(switch, isw)
print "state: {0}, {1}".format(state, ist)
p = gpios[ist][isw]
print "pin: {0}".format(p)
GPIO.output(p, GPIO.HIGH)
time.sleep(hightime)
GPIO.output(p, GPIO.LOW)
return
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
#for i in range(0,10):
# GPIO.output(18, GPIO.HIGH)
# time.sleep(0.1)
# GPIO.output(18, GPIO.LOW)
# GPIO.output(24, GPIO.HIGH)
# time.sleep(0.1)
# GPIO.output(24, GPIO.LOW)
|
[
"andreas.ling@gmail.com"
] |
andreas.ling@gmail.com
|
6284506feb564ca791eb25775d6ca4e3d2ae6f48
|
20aeb3d27359e88463c82a2f7eedb0db3face4f3
|
/ecommapp/migrations/0003_producto_imagen_card.py
|
564a9bbc32b158c0537bc11eb1db8650a3506a80
|
[] |
no_license
|
rpinedaec/pacha4Grupo4
|
2d3adb158836b97147708880ea996572646e3cde
|
b05a30eeb93789c4925df05ad7bd9e8a30acb45b
|
refs/heads/master
| 2022-12-25T11:08:21.451571
| 2020-09-26T19:32:43
| 2020-09-26T19:32:43
| 294,994,684
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# Generated by Django 3.1.1 on 2020-09-21 02:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommapp', '0002_auto_20200918_2241'),
]
operations = [
migrations.AddField(
model_name='producto',
name='imagen_card',
field=models.FileField(blank=True, null=True, upload_to=''),
),
]
|
[
"rpineda@zegelipae.edu.pe"
] |
rpineda@zegelipae.edu.pe
|
6065a22d5d32932affe70feba5ea9ba9d2383715
|
2aa6527e50c278d8e3ff438bab336c97defffb73
|
/PythonETL/search104.py
|
b6fbd456fccafa82db3fedaa4982aa98b7993ecc
|
[] |
no_license
|
CYCEvans/Python_learn_Evans
|
8507828906300acd1b1f9355e3a2bced0a9d5e84
|
c5d945f7f877bd5e0b563b17db7ac99702813ff7
|
refs/heads/master
| 2020-05-25T20:29:00.002375
| 2017-07-10T13:15:54
| 2017-07-10T13:15:54
| 95,064,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
import json
from collections import Counter
from urllib.request import urlopen
#找出你要的頁面數
def getPageNumber(web):
html = urlopen(web)
jsonObj = json.load(html)
# 得到key為TOTALPAGE的總頁數(字串)
num = jsonObj.get("TOTALPAGE")
return num
#讀取資料(頁面數)
def getData(pagenum):
result = Counter()
#迭代所有頁數
for n in range(1,pagenum+1):
#api網址:cat職位名稱 page={} PCSKILL_ALL_DESC:電腦技術
web = "http://www.104.com.tw/i/apis/jobsearch.cfm?cat=2007001004&fmt=8&page={}&pgsz=200&cols=PCSKILL_ALL_DESC".format(n)
#得到json格式的文字檔
html = urlopen(web)
#得到json物件
jsonObj = json.load(html)
#得到key為data的list
lst = jsonObj.get("data")
for obj in lst:
#排除list中物件為{}或是電腦技術沒有的物件
if not (obj == {} or obj['PCSKILL_ALL_DESC'] == ''):
#電腦技術全部小寫後切片,加入Counter計數
lstB = obj['PCSKILL_ALL_DESC'].lower().split(" ")
for objB in lstB:
result[objB] +=1
return result
if __name__ == "__main__":
num = int(getPageNumber("http://www.104.com.tw/i/apis/jobsearch.cfm?cat=2007001004&fmt=8&page=1&pgsz=200&cols=PCSKILL_ALL_DESC"))
print(num)
print(getData(num).most_common(20))
|
[
"evance90@gmail.com"
] |
evance90@gmail.com
|
45d5776f6e03c42d3ba33beaea4e188612008924
|
8d3ed61bacb1ae4999a169d6035f1354a2d3b5a8
|
/simplesite/controllers/account.py
|
dbddf790f3606db45731bdace010c00d78e290e6
|
[] |
no_license
|
wulliam/simple-site
|
6a8d7e84fd4ea98a592dcff18826ac0e608f605d
|
f4b432d74fe02b6ea224cb7455b477dd43104d57
|
refs/heads/master
| 2020-06-04T18:53:41.660348
| 2011-08-16T14:17:47
| 2011-08-16T14:17:47
| 2,128,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
import logging
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from simplesite.lib.base import BaseController, render
import simplesite.lib.helpers as h
log = logging.getLogger(__name__)
class AccountController(BaseController):
def index(self):
# Return a rendered template
#return render('/account.mako')
# or, return a response
return 'Hello World'
def signinagain(self):
request.environ['paste.auth_tkt.logout_user']()
return render('/derived/account/signin.html').replace('FORM_ACTION', h.url_for('signin'))
def signin(self):
#for key, value in request.environ.items():
# log.info("key:%s - value:%s" % (key, value))
if not request.environ.get('REMOTE_USER'):
# This triggers the AuthKit middleware into displaying the sign-in form
abort(401)
else:
return render('/derived/account/signedin.html')
def signout(self):
# The actual removal of the AuthKit cookie occurs when the response passes
# through the AuthKit middleware, we simply need to display a page
# confirming the user is signed out
return render('/derived/account/signedout.html')
|
[
"wulliam@gmail.com"
] |
wulliam@gmail.com
|
4dd50886ff6dce7d84cfb42797222543c8c5e52e
|
f0f06e521407a22cffe578125608930f83677f01
|
/preprocessor.py
|
d405cc25caf3c905ba411b6811a6fc7d0568835e
|
[] |
no_license
|
NikitaTolpikin/MailRuContest
|
a90a644e8f53990aff3164043b2f3dac0b562c83
|
43f2a53eb4a19863f0ae24cffa7860e4d4bc9ce2
|
refs/heads/master
| 2020-08-13T14:17:39.716503
| 2019-10-14T08:02:39
| 2019-10-14T08:02:39
| 214,982,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,078
|
py
|
import numpy as np
from skimage.transform import resize
from skimage import measure
class Preprocessor:
def __init__(self, usr_coords, delta, mask_size=256):
self.usr_coords = usr_coords
self.delta = delta
self.mask_size = mask_size
def find_shape(self, itemId):
xmin = 5000
xmax = 0
ymin = 5000
ymax = 0
for coord in self.usr_coords[itemId]:
if coord['Xmin'] < xmin:
xmin = coord['Xmin']
if coord['Ymin'] < ymin:
ymin = coord['Ymin']
if coord['Xmax'] > xmax:
xmax = coord['Xmax']
if coord['Ymax'] > ymax:
ymax = coord['Ymax']
width = xmax - xmin
height = ymax - ymin
return [width, height], xmin, ymin
def create_usr_mask(self, itemId, shape, xmin, ymin):
msk = np.zeros(shape)
i = 0
constant = 0.5
for coord in self.usr_coords[itemId]:
x1 = coord['Xmin'] - xmin
y1 = coord['Ymin'] - ymin
x2 = coord['Xmax'] - xmin
y2 = coord['Ymax'] - ymin
msk[x1:x2, y1:y2] += 1
i += 1
msk = msk / i*2
msk += constant
(width, height) = shape
dw = int(width * self.delta)
dh = int(height * self.delta)
msk = np.pad(msk, ((dw, dw), (dh, dh)), mode='constant', constant_values=constant)
(width, height) = msk.shape
if width > height:
msk = np.pad(msk, ((0, 0), (0, width - height)), mode='constant', constant_values=constant)
elif width < height:
msk = np.pad(msk, ((0, height - width), (0, 0)), mode='constant', constant_values=constant)
return msk, dw, dh
def get_mask(self, itemId):
constant = 0.5
usr_msk_shape, xmin, ymin = self.find_shape(itemId)
usr_msk, dw, dh = self.create_usr_mask(itemId, usr_msk_shape, xmin, ymin)
width = usr_msk.shape[0]
usr_msk = resize(usr_msk, (self.mask_size, self.mask_size), mode='constant', cval=constant)
koef = float(width / self.mask_size)
usr_msk = np.expand_dims(usr_msk, -1)
return usr_msk, koef, xmin, ymin, dw, dh
def get_coords_from_mask(self, msk, koef, xmin, ymin, dw, dh, trashhold=0.5):
comp = msk[:, :, 0] > trashhold
comp = measure.label(comp)
max_sq = 0
max_reg = {'x': 0, 'y': 0, 'x2': 0, 'y2': 0}
for region in measure.regionprops(comp):
x, y, x2, y2 = region.bbox
if (x2 - x) * (y2 - y) > max_sq:
max_reg['x'] = x
max_reg['y'] = y
max_reg['x2'] = x2
max_reg['y2'] = y2
for coord in max_reg:
max_reg[coord] *= koef
if coord == 'x' or coord == 'x2':
max_reg[coord] = int(max_reg[coord] - dw + xmin)
else:
max_reg[coord] = int(max_reg[coord] - dh + ymin)
return max_reg['x'], max_reg['y'], max_reg['x2'], max_reg['y2']
|
[
"nvt7@tpu.ru"
] |
nvt7@tpu.ru
|
2c78d8d949e6bbf98a0834f3c66a632449c764f3
|
5c7f81633a75e0144e32494dcc6ec7422ff2f745
|
/homebot/__main__.py
|
cb659de43c346f05572265f744e579270209a681
|
[
"MIT"
] |
permissive
|
HazardDede/homebot
|
7370bccfd03ae54552a0f66b417627befe0a39b4
|
7a44f5470bdd84c1e7660cf48955d44a9e4c317a
|
refs/heads/master
| 2020-11-26T15:43:36.907775
| 2020-01-09T18:52:46
| 2020-01-09T18:52:46
| 229,126,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,292
|
py
|
"""Main application entrypoint."""
import asyncio
import importlib
import logging
import os
import pathlib
import py_compile
import sys
import fire # type: ignore
from homebot.assets import AssetManager
from homebot.orchestra import Orchestrator
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def _assert_config_file(config: str) -> None:
if not os.path.isfile(config):
raise FileNotFoundError(f"Configuration '{str(config)}' does not exist.")
class Runner:
"""Homebot app."""
@staticmethod
def _load_orchestrator_from_mobule(config: str) -> Orchestrator:
_assert_config_file(config)
# Set base path for configuration
AssetManager().base_path = os.path.dirname(config)
module_path = os.path.dirname(config)
sys.path.insert(0, module_path)
file_name = pathlib.Path(config).stem
module = importlib.import_module(file_name)
orchestra = None
for var in dir(module):
val = getattr(module, var)
if isinstance(val, Orchestrator):
orchestra = val
break
if not orchestra:
raise RuntimeError(f"Configuration '{str(config)}' does not include a "
f"Orchestrator.")
return orchestra
@staticmethod
def run(config: str) -> None:
"""
Runs the homebot with the specified configuration.
Args:
config (str): The config to load.
"""
orchestra = Runner._load_orchestrator_from_mobule(config)
loop = asyncio.get_event_loop()
loop.run_until_complete(orchestra.run())
@staticmethod
def validate(config: str) -> None:
"""
Validates the specified configuration.
If the config is valid the validation will be quiet; if the config is broken it
will complaint.
Args:
config (str): The config to validate.
"""
_assert_config_file(config)
# First try to compile...
py_compile.compile(config)
# ... then dummy load it
Runner._load_orchestrator_from_mobule(config)
if __name__ == '__main__':
fire.Fire(Runner) # pragma: no cover
|
[
"d.muth@gmx.net"
] |
d.muth@gmx.net
|
95551dd428d1245f87593ee0c92ad48f0eda8aeb
|
848bb1846224a368708839ce9de9b96e223261bb
|
/capacitaciones/capacitaciones/doctype/asistencia_usuarias/asistencia_usuarias.py
|
c9044574bc3afd5f68483c401ec5df007b9f63aa
|
[
"MIT"
] |
permissive
|
ErickLopez76/capacitaciones
|
3aa09a2432368dc419b25242823b4f689942eb61
|
0c4f052dfff492b1bb362b32b22b33bc4093acee
|
refs/heads/master
| 2020-05-17T02:16:25.536721
| 2019-04-25T14:19:55
| 2019-04-25T14:19:55
| 165,733,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, ericklopez and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Asistencia_usuarias(Document):
pass
|
[
"administrator@frappeserver.sinclusion.gob.sv"
] |
administrator@frappeserver.sinclusion.gob.sv
|
5ff6148349b208cbc1117241fda232a86966d433
|
e384c9fa00e024a56c9f46b55577ba044185b850
|
/File/SolvingKnapsackByGA/Solver/SolveKnapsack02.py
|
7a170fc8baa7904a5955b8e7043e09913d003688
|
[] |
no_license
|
JTNghia/AI_Subject
|
a19c741b1e7bd54b6e955a94539dde696e194fb2
|
f6b4e5edd0507c39ae276ea8123b73642fa2fea0
|
refs/heads/main
| 2023-06-15T12:05:03.541955
| 2021-07-10T10:11:34
| 2021-07-10T10:11:34
| 361,995,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,382
|
py
|
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
import random
import numpy
import matplotlib.pyplot as plt
import seaborn as sns
# import knapsack
import numpy as np
class Knapsack02Problem:
"""This class encapsulates the Knapsack 0-1 Problem from RosettaCode.org
"""
def __init__(self,i,c):
# initialize instance variables:
self.items = i
self.maxCapacity = c
# initialize the data:
# self.__initData()
def __len__(self):
"""
:return: the total number of items defined in the problem
"""
return len(self.items)
# def __initData(self,i,c):
# """initializes the RosettaCode.org knapsack 0-1 problem data
# """
# self.items = i
# self.maxCapacity = c
def getValue(self, zeroOneList):
"""
Calculates the value of the selected items in the list, while ignoring items that will cause the accumulating weight to exceed the maximum weight
:param zeroOneList: a list of 0/1 values corresponding to the list of the problem's items. '1' means that item was selected.
:return: the calculated value
"""
totalWeight = totalValue = 0
for i in range(len(zeroOneList)):
item, weight, value = self.items[i]
if totalWeight + weight <= self.maxCapacity:
totalWeight += zeroOneList[i] * weight
totalValue += zeroOneList[i] * value
return totalValue
def printItems(self, zeroOneList):
"""
Prints the selected items in the list, while ignoring items that will cause the accumulating weight to exceed the maximum weight
:param zeroOneList: a list of 0/1 values corresponding to the list of the problem's items. '1' means that item was selected.
"""
totalWeight = totalValue = 0
for i in range(len(zeroOneList)):
item, weight, value = self.items[i]
if totalWeight + weight <= self.maxCapacity:
if zeroOneList[i] > 0:
totalWeight += weight
totalValue += value
# print("- Adding {}: weight = {}, value = {}, accumulated weight = {}, accumulated value = {}".format(item, weight, value, totalWeight, totalValue))
# print("- Total weight = {}, Total value = {}".format(totalWeight, totalValue))
return [totalValue, totalWeight]
# Genetic Algorithm flow:
def GAKnapsack(i,c):
# problem constants:
# create the knapsack problem instance to be used:
# global knapsack
knapsack = Knapsack02Problem(i,c)
# Genetic Algorithm constants:b
POPULATION_SIZE = 500
P_CROSSOVER = 0.9 # probability for crossover
P_MUTATION = 0.1 # probability for mutating an individual
MAX_GENERATIONS = 500
HALL_OF_FAME_SIZE = 1
# set the random seed:
RANDOM_SEED = 42
random.seed(RANDOM_SEED)
toolbox = base.Toolbox()
# create an operator that randomly returns 0 or 1:
toolbox.register("zeroOrOne", random.randint, 0, 1)
# define a single objective, maximizing fitness strategy:
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
# create the Individual class based on list:
creator.create("Individual", list, fitness=creator.FitnessMax)
# create the individual operator to fill up an Individual instance:
toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, len(knapsack))
# create the population operator to generate a list of individuals:
toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
# fitness calculation
def knapsackValue(individual):
return knapsack.getValue(individual), # return a tuple
toolbox.register("evaluate", knapsackValue)
# genetic operators:mutFlipBit
# Tournament selection with tournament size of 3:
toolbox.register("select", tools.selTournament, tournsize=3)
# Single-point crossover:
toolbox.register("mate", tools.cxTwoPoint)
# Flip-bit mutation:
# indpb: Independent probability for each attribute to be flipped
toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/len(knapsack))
# create initial population (generation 0):
population = toolbox.populationCreator(n=POPULATION_SIZE)
# prepare the statistics object:
stats = tools.Statistics(lambda ind: ind.fitness.values)
# stats.register("max", numpy.max)
# stats.register("avg", numpy.mean)
# define the hall-of-fame object:
hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
# perform the Genetic Algorithm flow with hof feature added:
population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
# print best solution found:
best = hof.items[0]
# print("-- Best Ever Individual = ", best)
# print("-- Best Ever Fitness = ", best.fitness.values[0])
# print("-- Knapsack Items = ")
return knapsack.printItems(best)
# extract statistics:
# maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
# plot statistics:
# sns.set_style("whitegrid")
# plt.plot(maxFitnessValues, color='red')
# plt.plot(meanFitnessValues, color='green')
# plt.xlabel('Generation')
# plt.ylabel('Max / Average Fitness')
# plt.title('Max and Average fitness over Generations')
# plt.show()
# if __name__ == "__main__":
# main()
i = [
("map", 9, 150),
("compass", 13, 35),
("water", 153, 200),
("sandwich", 50, 160),
("glucose", 15, 60),
("tin", 68, 45),
("banana", 27, 60),
("apple", 39, 40),
("cheese", 23, 30),
("beer", 52, 10),
("suntan cream", 11, 70),
("camera", 32, 30),
("t-shirt", 24, 15),
("trousers", 48, 10),
("umbrella", 73, 40),
("waterproof trousers", 42, 70),
("waterproof overclothes", 43, 75),
("note-case", 22, 80),
("sunglasses", 7, 20),
("towel", 18, 12),
("socks", 4, 50),
("book", 30, 10)
]
c = 400
# print(GAKnapsack(i,c))
|
[
"19521899@gm.uit.edu.vn"
] |
19521899@gm.uit.edu.vn
|
2c19a86b1ec300188e38fb06e6c0c107747aa070
|
29ebb277f1b10899a9a10a4ce5f8308917ac0243
|
/0.Introduction/300118524.py
|
d03936f3553c2d8058ee72d4622d157641dc401c
|
[] |
no_license
|
CollegeBoreal/INF1042-202-20H-02
|
74ab26bb9ad0c85ee811b7cde7c34473d46bd8c2
|
deb2baa3a0283b9249e9a8a1b31bc05d5dff665c
|
refs/heads/master
| 2020-12-05T15:23:11.891501
| 2020-04-23T18:14:46
| 2020-04-23T18:14:46
| 232,152,653
| 0
| 2
| null | 2020-04-15T01:02:09
| 2020-01-06T17:41:00
|
Python
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 15:13:12 2020
@author: zoureni
"""
print( 10 * "BONNE ANNÉE \n")
for v in range (10):
print("BONNE ANNÉE")
print()
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
print("BONNE ANNÉE")
|
[
"zoureni@outlook.fr"
] |
zoureni@outlook.fr
|
daa092adf43067d706e068dbbc4f89bb5970b03c
|
bcad7a5210aef3cb1c7e8b4f17486a7cbc2a44db
|
/stocksDailyTransactionsDownloader.py
|
449543a7318426bc4da892194c0505a3a07e1f2b
|
[] |
no_license
|
avsolatorio/stocks_daily_data_analysis
|
597b1aac61a08fbafb2c48b47257917c684b1be4
|
bf313ed8a506e99696756d217d4d6b616370931f
|
refs/heads/master
| 2021-01-01T17:57:08.640683
| 2014-06-30T13:50:50
| 2014-06-30T13:50:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
#-------------------------------------------------------------------------------
# Name: stocksDailyTransactionsDownloader.py
# Purpose:
#
# Author: avsolatorio
#
# Created: 10/06/2014
# Copyright: (c) avsolatorio 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
from web_login import getLoginAccess
import bz2
import cPickle
import datetime
import re
import os
import time
def trimNuisancePartFromResponse(response):
pattern = '<form action=[\w\W]*?</form>' #Find a way to complete regex: <form action="transactions.cshtml" method="get">
return re.findall(pattern, response)[0]
def getCurrentDateInString():
date = datetime.datetime.now()
return date.strftime('%m_%d_%Y') #Equivalent to: 06_10_2014 which is June 10, 2014
def compressAndSaveData(data, file_name):
bz2_file = bz2.BZ2File(file_name, 'w')
bz2_file.write(data)
bz2_file.close()
def main():
symbols_names = cPickle.load(open('PSE_LISTED_STOCKS_SYMBOLS_NAMES.dict'))
url = raw_input("Please input the url for the site that you want to access: ")
access_site = url + '/Public/Default.aspx'
current_date = getCurrentDateInString()
htmlOpener = getLoginAccess(access_site)
symbols = sorted(symbols_names.keys())
for symbol in symbols:
print "Processing data for: %s" % symbol.upper()
data = ''
while not data:
try:
response = htmlOpener.open('%s/Infinity/Transactions.cshtml?Symbol=%s' % (url, symbol.upper()))
data = response.read()
except: #urllib2.URLError:
print "urllib2.URLError"
continue
final_data = trimNuisancePartFromResponse(data)
file_name = './%s/%s_%s.htm.bz2' % (symbol.upper(), symbol.upper(), current_date)
if not os.path.isdir(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
compressAndSaveData(final_data, file_name)
print "Done getting data for: ", symbol.upper()
time.sleep(1)
if __name__ == '__main__':
main()
|
[
"avsolatorio@gmail.com"
] |
avsolatorio@gmail.com
|
eeb30b55fcfad12fd654d361f80c727cb8459455
|
412b699e0f497ac03d6618fe349f4469646c6f2d
|
/env/lib/python3.8/site-packages/eth_account/account.py
|
b181bfdac99019abc2c0d6adcc2f3db62125eb7b
|
[
"MIT"
] |
permissive
|
EtienneBrJ/Portfolio
|
7c70573f02a5779f9070d6d9df58d460828176e3
|
6b8d8cf9622eadef47bd10690c1bf1e7fd892bfd
|
refs/heads/main
| 2023-09-03T15:03:43.698518
| 2021-11-04T01:02:33
| 2021-11-04T01:02:33
| 411,076,325
| 1
| 0
|
MIT
| 2021-10-31T13:43:09
| 2021-09-27T23:48:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 34,098
|
py
|
from collections.abc import (
Mapping,
)
import json
import os
import warnings
from cytoolz import (
dissoc,
)
from eth_keyfile import (
create_keyfile_json,
decode_keyfile_json,
)
from eth_keys import (
KeyAPI,
keys,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_utils.curried import (
combomethod,
hexstr_if_str,
is_dict,
keccak,
text_if_str,
to_bytes,
to_int,
)
from hexbytes import (
HexBytes,
)
from eth_account._utils.legacy_transactions import (
Transaction,
vrs_from,
)
from eth_account._utils.signing import (
hash_of_signed_transaction,
sign_message_hash,
sign_transaction_dict,
to_standard_signature_bytes,
to_standard_v,
)
from eth_account._utils.typed_transactions import (
TypedTransaction,
)
from eth_account.datastructures import (
SignedMessage,
SignedTransaction,
)
from eth_account.hdaccount import (
ETHEREUM_DEFAULT_PATH,
generate_mnemonic,
key_from_seed,
seed_from_mnemonic,
)
from eth_account.messages import (
SignableMessage,
_hash_eip191_message,
)
from eth_account.signers.local import (
LocalAccount,
)
class Account(object):
"""
The primary entry point for working with Ethereum private keys.
It does **not** require a connection to an Ethereum node.
"""
_keys = keys
_default_kdf = os.getenv('ETH_ACCOUNT_KDF', 'scrypt')
# Enable unaudited features (off by default)
_use_unaudited_hdwallet_features = False
@classmethod
def enable_unaudited_hdwallet_features(cls):
"""
Use this flag to enable unaudited HD Wallet features.
"""
cls._use_unaudited_hdwallet_features = True
@combomethod
def create(self, extra_entropy=''):
r"""
Creates a new private key, and returns it as a :class:`~eth_account.local.LocalAccount`.
:param extra_entropy: Add extra randomness to whatever randomness your OS can provide
:type extra_entropy: str or bytes or int
:returns: an object with private key and convenience methods
.. code-block:: python
>>> from eth_account import Account
>>> acct = Account.create('KEYSMASH FJAFJKLDSKF7JKFDJ 1530')
>>> acct.address
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> acct.key
HexBytes('0x8676e9a8c86c8921e922e61e0bb6e9e9689aad4c99082620610b00140e5f21b8')
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
extra_key_bytes = text_if_str(to_bytes, extra_entropy)
key_bytes = keccak(os.urandom(32) + extra_key_bytes)
return self.from_key(key_bytes)
@staticmethod
def decrypt(keyfile_json, password):
"""
Decrypts a private key.
The key may have been encrypted using an Ethereum client or :meth:`~Account.encrypt`.
:param keyfile_json: The encrypted key
:type keyfile_json: dict or str
:param str password: The password that was used to encrypt the key
:returns: the raw private key
:rtype: ~hexbytes.main.HexBytes
.. doctest:: python
>>> encrypted = {
... 'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
... 'crypto': {'cipher': 'aes-128-ctr',
... 'cipherparams': {'iv': '482ef54775b0cc59f25717711286f5c8'},
... 'ciphertext': 'cb636716a9fd46adbb31832d964df2082536edd5399a3393327dc89b0193a2be',
... 'kdf': 'scrypt',
... 'kdfparams': {},
... 'kdfparams': {'dklen': 32,
... 'n': 262144,
... 'p': 8,
... 'r': 1,
... 'salt': 'd3c9a9945000fcb6c9df0f854266d573'},
... 'mac': '4f626ec5e7fea391b2229348a65bfef532c2a4e8372c0a6a814505a350a7689d'},
... 'id': 'b812f3f9-78cc-462a-9e89-74418aa27cb0',
... 'version': 3}
>>> Account.decrypt(encrypted, 'password')
HexBytes('0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364')
"""
if isinstance(keyfile_json, str):
keyfile = json.loads(keyfile_json)
elif is_dict(keyfile_json):
keyfile = keyfile_json
else:
raise TypeError("The keyfile should be supplied as a JSON string, or a dictionary.")
password_bytes = text_if_str(to_bytes, password)
return HexBytes(decode_keyfile_json(keyfile, password_bytes))
@classmethod
def encrypt(cls, private_key, password, kdf=None, iterations=None):
"""
Creates a dictionary with an encrypted version of your private key.
To import this keyfile into Ethereum clients like geth and parity:
encode this dictionary with :func:`json.dumps` and save it to disk where your
client keeps key files.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:param str password: The password which you will need to unlock the account in your client
:param str kdf: The key derivation function to use when encrypting your private key
:param int iterations: The work factor for the key derivation function
:returns: The data to use in your encrypted file
:rtype: dict
If kdf is not set, the default key derivation function falls back to the
environment variable :envvar:`ETH_ACCOUNT_KDF`. If that is not set, then
'scrypt' will be used as the default.
.. doctest:: python
>>> from pprint import pprint
>>> encrypted = Account.encrypt(
... 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364,
... 'password'
... )
>>> pprint(encrypted)
{'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {'cipher': 'aes-128-ctr',
'cipherparams': {'iv': '...'},
'ciphertext': '...',
'kdf': 'scrypt',
'kdfparams': {'dklen': 32,
'n': 262144,
'p': 8,
'r': 1,
'salt': '...'},
'mac': '...'},
'id': '...',
'version': 3}
>>> with open('my-keyfile', 'w') as f: # doctest: +SKIP
... f.write(json.dumps(encrypted))
"""
if isinstance(private_key, keys.PrivateKey):
key_bytes = private_key.to_bytes()
else:
key_bytes = HexBytes(private_key)
if kdf is None:
kdf = cls._default_kdf
password_bytes = text_if_str(to_bytes, password)
assert len(key_bytes) == 32
return create_keyfile_json(key_bytes, password_bytes, kdf=kdf, iterations=iterations)
@combomethod
def privateKeyToAccount(self, private_key):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.from_key`.
This method will be removed in v0.5
"""
warnings.warn(
"privateKeyToAccount is deprecated in favor of from_key",
category=DeprecationWarning,
)
return self.from_key(private_key)
@combomethod
def from_key(self, private_key):
r"""
Returns a convenient object for working with the given private key.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:return: object with methods for signing and encrypting
:rtype: LocalAccount
.. doctest:: python
>>> acct = Account.from_key(
... 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364)
>>> acct.address
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> acct.key
HexBytes('0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364')
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
key = self._parsePrivateKey(private_key)
return LocalAccount(key, self)
@combomethod
def from_mnemonic(self,
mnemonic: str,
passphrase: str = "",
account_path: str = ETHEREUM_DEFAULT_PATH):
"""
Generate an account from a mnemonic.
.. CAUTION:: This feature is experimental, unaudited, and likely to change soon
:param str mnemonic: space-separated list of BIP39 mnemonic seed words
:param str passphrase: Optional passphrase used to encrypt the mnemonic
:param str account_path: Specify an alternate HD path for deriving the seed using
BIP32 HD wallet key derivation.
:return: object with methods for signing and encrypting
:rtype: LocalAccount
.. doctest:: python
>>> from eth_account import Account
>>> Account.enable_unaudited_hdwallet_features()
>>> acct = Account.from_mnemonic(
... "coral allow abandon recipe top tray caught video climb similar prepare bracket "
... "antenna rubber announce gauge volume hub hood burden skill immense add acid")
>>> acct.address
'0x9AdA5dAD14d925f4df1378409731a9B71Bc8569d'
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
if not self._use_unaudited_hdwallet_features:
raise AttributeError(
"The use of the Mnemonic features of Account is disabled by default until "
"its API stabilizes. To use these features, please enable them by running "
"`Account.enable_unaudited_hdwallet_features()` and try again."
)
seed = seed_from_mnemonic(mnemonic, passphrase)
private_key = key_from_seed(seed, account_path)
key = self._parsePrivateKey(private_key)
return LocalAccount(key, self)
@combomethod
def create_with_mnemonic(self,
passphrase: str = "",
num_words: int = 12,
language: str = "english",
account_path: str = ETHEREUM_DEFAULT_PATH):
r"""
Create a new private key and related mnemonic.
.. CAUTION:: This feature is experimental, unaudited, and likely to change soon
Creates a new private key, and returns it as a :class:`~eth_account.local.LocalAccount`,
alongside the mnemonic that can used to regenerate it using any BIP39-compatible wallet.
:param str passphrase: Extra passphrase to encrypt the seed phrase
:param int num_words: Number of words to use with seed phrase. Default is 12 words.
Must be one of [12, 15, 18, 21, 24].
:param str language: Language to use for BIP39 mnemonic seed phrase.
:param str account_path: Specify an alternate HD path for deriving the seed using
BIP32 HD wallet key derivation.
:returns: A tuple consisting of an object with private key and convenience methods,
and the mnemonic seed phrase that can be used to restore the account.
:rtype: (LocalAccount, str)
.. doctest:: python
>>> from eth_account import Account
>>> Account.enable_unaudited_hdwallet_features()
>>> acct, mnemonic = Account.create_with_mnemonic()
>>> acct.address # doctest: +SKIP
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> acct == Account.from_mnemonic(mnemonic)
True
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
if not self._use_unaudited_hdwallet_features:
raise AttributeError(
"The use of the Mnemonic features of Account is disabled by default until "
"its API stabilizes. To use these features, please enable them by running "
"`Account.enable_unaudited_hdwallet_features()` and try again."
)
mnemonic = generate_mnemonic(num_words, language)
return self.from_mnemonic(mnemonic, passphrase, account_path), mnemonic
@combomethod
def recover_message(self, signable_message: SignableMessage, vrs=None, signature=None):
r"""
Get the address of the account that signed the given message.
You must specify exactly one of: vrs or signature
:param signable_message: the message that was signed
:param vrs: the three pieces generated by an elliptic curve signature
:type vrs: tuple(v, r, s), each element is hex str, bytes or int
:param signature: signature bytes concatenated as r+s+v
:type signature: hex str or bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
.. doctest:: python
>>> from eth_account.messages import encode_defunct
>>> from eth_account import Account
>>> message = encode_defunct(text="I♥SF")
>>> vrs = (
... 28,
... '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3',
... '0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce')
>>> Account.recover_message(message, vrs=vrs)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
# All of these recover calls are equivalent:
# variations on vrs
>>> vrs = (
... '0x1c',
... '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3',
... '0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce')
>>> Account.recover_message(message, vrs=vrs)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> # Caution about this approach: likely problems if there are leading 0s
>>> vrs = (
... 0x1c,
... 0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3,
... 0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce)
>>> Account.recover_message(message, vrs=vrs)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> vrs = (
... b'\x1c',
... b'\xe6\xca\x9b\xbaX\xc8\x86\x11\xfa\xd6jl\xe8\xf9\x96\x90\x81\x95Y8\x07\xc4\xb3\x8b\xd5(\xd2\xcf\xf0\x9dN\xb3', # noqa: E501
... b'>[\xfb\xbfM>9\xb1\xa2\xfd\x81jv\x80\xc1\x9e\xbe\xba\xf3\xa1A\xb29\x93J\xd4<\xb3?\xce\xc8\xce') # noqa: E501
>>> Account.recover_message(message, vrs=vrs)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
# variations on signature
>>> signature = '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c' # noqa: E501
>>> Account.recover_message(message, signature=signature)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> signature = b'\xe6\xca\x9b\xbaX\xc8\x86\x11\xfa\xd6jl\xe8\xf9\x96\x90\x81\x95Y8\x07\xc4\xb3\x8b\xd5(\xd2\xcf\xf0\x9dN\xb3>[\xfb\xbfM>9\xb1\xa2\xfd\x81jv\x80\xc1\x9e\xbe\xba\xf3\xa1A\xb29\x93J\xd4<\xb3?\xce\xc8\xce\x1c' # noqa: E501
>>> Account.recover_message(message, signature=signature)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> # Caution about this approach: likely problems if there are leading 0s
>>> signature = 0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c # noqa: E501
>>> Account.recover_message(message, signature=signature)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
"""
message_hash = _hash_eip191_message(signable_message)
return self._recover_hash(message_hash, vrs, signature)
@combomethod
def recoverHash(self, message_hash, vrs=None, signature=None):
"""
Get the address of the account that signed the message with the given hash.
You must specify exactly one of: vrs or signature
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.recover_message`.
This method might be removed as early as v0.5
:param message_hash: the hash of the message that you want to verify
:type message_hash: hex str or bytes or int
:param vrs: the three pieces generated by an elliptic curve signature
:type vrs: tuple(v, r, s), each element is hex str, bytes or int
:param signature: signature bytes concatenated as r+s+v
:type signature: hex str or bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
"""
warnings.warn(
"recoverHash is deprecated in favor of recover_message",
category=DeprecationWarning,
)
return self._recover_hash(message_hash, vrs, signature)
@combomethod
def _recover_hash(self, message_hash, vrs=None, signature=None):
hash_bytes = HexBytes(message_hash)
if len(hash_bytes) != 32:
raise ValueError("The message hash must be exactly 32-bytes")
if vrs is not None:
v, r, s = map(hexstr_if_str(to_int), vrs)
v_standard = to_standard_v(v)
signature_obj = self._keys.Signature(vrs=(v_standard, r, s))
elif signature is not None:
signature_bytes = HexBytes(signature)
signature_bytes_standard = to_standard_signature_bytes(signature_bytes)
signature_obj = self._keys.Signature(signature_bytes=signature_bytes_standard)
else:
raise TypeError("You must supply the vrs tuple or the signature bytes")
pubkey = signature_obj.recover_public_key_from_msg_hash(hash_bytes)
return pubkey.to_checksum_address()
@combomethod
def recoverTransaction(self, serialized_transaction):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.recover_transaction`.
This method will be removed in v0.5
"""
warnings.warn(
"recoverTransaction is deprecated in favor of recover_transaction",
category=DeprecationWarning,
)
return self.recover_transaction(serialized_transaction)
@combomethod
def recover_transaction(self, serialized_transaction):
"""
Get the address of the account that signed this transaction.
:param serialized_transaction: the complete signed transaction
:type serialized_transaction: hex str, bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
.. doctest:: python
>>> raw_transaction = '0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428' # noqa: E501
>>> Account.recover_transaction(raw_transaction)
'0x2c7536E3605D9C16a7a3D7b1898e529396a65c23'
"""
txn_bytes = HexBytes(serialized_transaction)
if len(txn_bytes) > 0 and txn_bytes[0] <= 0x7f:
# We are dealing with a typed transaction.
typed_transaction = TypedTransaction.from_bytes(txn_bytes)
msg_hash = typed_transaction.hash()
vrs = typed_transaction.vrs()
return self._recover_hash(msg_hash, vrs=vrs)
txn = Transaction.from_bytes(txn_bytes)
msg_hash = hash_of_signed_transaction(txn)
return self._recover_hash(msg_hash, vrs=vrs_from(txn))
def setKeyBackend(self, backend):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.set_key_backend`.
This method will be removed in v0.5
"""
warnings.warn(
"setKeyBackend is deprecated in favor of set_key_backend",
category=DeprecationWarning,
)
self.set_key_backend(backend)
def set_key_backend(self, backend):
"""
Change the backend used by the underlying eth-keys library.
*(The default is fine for most users)*
:param backend: any backend that works in
`eth_keys.KeyApi(backend) <https://github.com/ethereum/eth-keys/#keyapibackendnone>`_
"""
self._keys = KeyAPI(backend)
@combomethod
def sign_message(self, signable_message: SignableMessage, private_key):
r"""
Sign the provided message.
This API supports any messaging format that will encode to EIP-191_ messages.
If you would like historical compatibility with
:meth:`w3.eth.sign() <web3.eth.Eth.sign>`
you can use :meth:`~eth_account.messages.encode_defunct`.
Other options are the "validator", or "structured data" standards. (Both of these
are in *DRAFT* status currently, so be aware that the implementation is not
guaranteed to be stable). You can import all supported message encoders in
``eth_account.messages``.
:param signable_message: the encoded message for signing
:param private_key: the key to sign the message with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most importantly the fields: v, r, and s
:rtype: ~eth_account.datastructures.SignedMessage
.. doctest:: python
>>> msg = "I♥SF"
>>> from eth_account.messages import encode_defunct
>>> msghash = encode_defunct(text=msg)
>>> msghash
SignableMessage(version=b'E',
header=b'thereum Signed Message:\n6',
body=b'I\xe2\x99\xa5SF')
>>> # If you're curious about the internal fields of SignableMessage, take a look at EIP-191, linked above # noqa: E501
>>> key = "0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364"
>>> Account.sign_message(msghash, key)
SignedMessage(messageHash=HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750'),
r=104389933075820307925104709181714897380569894203213074526835978196648170704563,
s=28205917190874851400050446352651915501321657673772411533993420917949420456142,
v=28,
signature=HexBytes('0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c'))
.. _EIP-191: https://eips.ethereum.org/EIPS/eip-191
"""
message_hash = _hash_eip191_message(signable_message)
return self._sign_hash(message_hash, private_key)
@combomethod
def signHash(self, message_hash, private_key):
"""
Sign the provided hash.
.. WARNING:: *Never* sign a hash that you didn't generate,
it can be an arbitrary transaction. For example, it might
send all of your account's ether to an attacker.
Instead, prefer :meth:`~eth_account.account.Account.sign_message`,
which cannot accidentally sign a transaction.
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.sign_message`.
This method will be removed in v0.6
:param message_hash: the 32-byte message hash to be signed
:type message_hash: hex str, bytes or int
:param private_key: the key to sign the message with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most
importantly the fields: v, r, and s
:rtype: ~eth_account.datastructures.SignedMessage
"""
warnings.warn(
"signHash is deprecated in favor of sign_message",
category=DeprecationWarning,
)
return self._sign_hash(message_hash, private_key)
@combomethod
def _sign_hash(self, message_hash, private_key):
msg_hash_bytes = HexBytes(message_hash)
if len(msg_hash_bytes) != 32:
raise ValueError("The message hash must be exactly 32-bytes")
key = self._parsePrivateKey(private_key)
(v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash_bytes)
return SignedMessage(
messageHash=msg_hash_bytes,
r=r,
s=s,
v=v,
signature=HexBytes(eth_signature_bytes),
)
@combomethod
def signTransaction(self, transaction_dict, private_key):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.sign_transaction`.
This method will be removed in v0.5
"""
warnings.warn(
"signTransaction is deprecated in favor of sign_transaction",
category=DeprecationWarning,
)
return self.sign_transaction(transaction_dict, private_key)
@combomethod
def sign_transaction(self, transaction_dict, private_key):
"""
Sign a transaction using a local private key.
It produces signature details and the hex-encoded transaction suitable for broadcast using
:meth:`w3.eth.sendRawTransaction() <web3.eth.Eth.sendRawTransaction>`.
To create the transaction dict that calls a contract, use contract object:
`my_contract.functions.my_function().buildTransaction()
<http://web3py.readthedocs.io/en/latest/contracts.html#methods>`_
:param dict transaction_dict: the transaction with keys:
nonce, chainId, to, data, value, gas, and gasPrice.
:param private_key: the private key to sign the data with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most
importantly the fields: v, r, and s
:rtype: AttributeDict
.. code-block:: python
>>> # EIP-1559 dynamic fee transaction (more efficient and preferred over legacy txn)
>>> dynamic_fee_transaction = {
"type": 2, # Note that the explicit type is necessary for now
"gas": 100000,
"maxFeePerGas": 2000000000,
"maxPriorityFeePerGas": 2000000000,
"data": "0x616263646566",
"nonce": 34,
"to": "0x09616C3d61b3331fc4109a9E41a8BDB7d9776609",
"value": "0x5af3107a4000",
"accessList": (
(
"0x0000000000000000000000000000000000000001",
(
"0x0100000000000000000000000000000000000000000000000000000000000000", # noqa: E501
)
),
),
"chainId": 1900,
}
>>> key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
>>> signed = Account.sign_transaction(dynamic_fee_transaction, key)
{'hash': HexBytes('0x126431f2a7fda003aada7c2ce52b0ce3cbdbb1896230d3333b9eea24f42d15b0'),
'r': 110093478023675319011132687961420618950720745285952062287904334878381994888509,
'rawTransaction': HexBytes('0x02f8b282076c2284773594008477359400830186a09409616c3d61b3331fc4109a9e41a8bdb7d9776609865af3107a400086616263646566f838f7940000000000000000000000000000000000000001e1a0010000000000000000000000000000000000000000000000000000000000000080a0f366b34a5c206859b9778b4c909207e53443cca9e0b82e0b94bc4b47e6434d3da04a731eda413a944d4ea2d2236671e586e57388d0e9d40db53044ae4089f2aec8'), # noqa: E501
's': 33674551144139401179914073499472892825822542092106065756005379322302694600392,
'v': 0}
>>> w3.eth.sendRawTransaction(signed.rawTransaction)
.. code-block:: python
>>> # legacy transaction (less efficient than EIP-1559 dynamic fee txn)
>>> legacy_transaction = {
# Note that the address must be in checksum format or native bytes:
'to': '0xF0109fC8DF283027b6285cc889F5aA624EaC1F55',
'value': 1000000000,
'gas': 2000000,
'gasPrice': 234567897654321,
'nonce': 0,
'chainId': 1
}
>>> key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
>>> signed = Account.sign_transaction(legacy_transaction, key)
{'hash': HexBytes('0x6893a6ee8df79b0f5d64a180cd1ef35d030f3e296a5361cf04d02ce720d32ec5'),
'r': 4487286261793418179817841024889747115779324305375823110249149479905075174044,
'rawTransaction': HexBytes('0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428'), # noqa: E501
's': 30785525769477805655994251009256770582792548537338581640010273753578382951464,
'v': 37}
>>> w3.eth.sendRawTransaction(signed.rawTransaction)
.. code-block:: python
>>> access_list_transaction = {
"gas": 100000,
"gasPrice": 1000000000,
"data": "0x616263646566",
"nonce": 34,
"to": "0x09616C3d61b3331fc4109a9E41a8BDB7d9776609",
"value": "0x5af3107a4000",
"type": 1,
"accessList": (
(
"0x0000000000000000000000000000000000000001",
(
"0x0100000000000000000000000000000000000000000000000000000000000000", # noqa: E501
)
),
),
"chainId": 1900,
}
>>> key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
>>> signed = Account.sign_transaction(access_list_transaction, key)
{'hash': HexBytes('0x2864ca20a74ca5e044067ad4139a22ff5a0853434f5f1dc00108f24ef5f1f783'),
'r': 105940705063391628472351883894091935317142890114440570831409400676736873197702,
'rawTransaction': HexBytes('0x01f8ad82076c22843b9aca00830186a09409616c3d61b3331fc4109a9e41a8bdb7d9776609865af3107a400086616263646566f838f7940000000000000000000000000000000000000001e1a0010000000000000000000000000000000000000000000000000000000000000080a0ea38506c4afe4bb402e030877fbe1011fa1da47aabcf215db8da8fee5d3af086a051e9af653b8eb98e74e894a766cf88904dbdb10b0bc1fbd12f18f661fa2797a4'), # noqa: E501
's': 37050226636175381535892585331727388340134760347943439553552848647212419749796,
'v': 0}
>>> w3.eth.sendRawTransaction(signed.rawTransaction)
"""
if not isinstance(transaction_dict, Mapping):
raise TypeError("transaction_dict must be dict-like, got %r" % transaction_dict)
account = self.from_key(private_key)
# allow from field, *only* if it matches the private key
if 'from' in transaction_dict:
if transaction_dict['from'] == account.address:
sanitized_transaction = dissoc(transaction_dict, 'from')
else:
raise TypeError("from field must match key's %s, but it was %s" % (
account.address,
transaction_dict['from'],
))
else:
sanitized_transaction = transaction_dict
# sign transaction
(
v,
r,
s,
encoded_transaction,
) = sign_transaction_dict(account._key_obj, sanitized_transaction)
transaction_hash = keccak(encoded_transaction)
return SignedTransaction(
rawTransaction=HexBytes(encoded_transaction),
hash=HexBytes(transaction_hash),
r=r,
s=s,
v=v,
)
@combomethod
def _parsePrivateKey(self, key):
"""
Generate a :class:`eth_keys.datatypes.PrivateKey` from the provided key.
If the key is already of type :class:`eth_keys.datatypes.PrivateKey`, return the key.
:param key: the private key from which a :class:`eth_keys.datatypes.PrivateKey`
will be generated
:type key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: the provided key represented as a :class:`eth_keys.datatypes.PrivateKey`
"""
if isinstance(key, self._keys.PrivateKey):
return key
try:
return self._keys.PrivateKey(HexBytes(key))
except ValidationError as original_exception:
raise ValueError(
"The private key must be exactly 32 bytes long, instead of "
"%d bytes." % len(key)
) from original_exception
|
[
"etiennebrxv@gmail.com"
] |
etiennebrxv@gmail.com
|
114dc63b984b8214083ad9bdce5acb87bc11c5e2
|
006ff11fd8cfd5406c6f4318f1bafa1542095f2a
|
/Validation/RecoEgamma/python/electronPostValidationSequence_cff.py
|
5a25453640a8bb3aa498bf7a182db2aedd4427fd
|
[] |
permissive
|
amkalsi/cmssw
|
8ac5f481c7d7263741b5015381473811c59ac3b1
|
ad0f69098dfbe449ca0570fbcf6fcebd6acc1154
|
refs/heads/CMSSW_7_4_X
| 2021-01-19T16:18:22.857382
| 2016-08-09T16:40:50
| 2016-08-09T16:40:50
| 262,608,661
| 0
| 0
|
Apache-2.0
| 2020-05-09T16:10:07
| 2020-05-09T16:10:07
| null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
import FWCore.ParameterSet.Config as cms
from Validation.RecoEgamma.ElectronMcSignalPostValidator_cfi import *
from Validation.RecoEgamma.ElectronMcFakePostValidator_cfi import *
electronPostValidationSequence = cms.Sequence(electronMcSignalPostValidator+electronMcFakePostValidator)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
17d2c9dbbbf8c00c6d052e5afbb985d6dd5029c5
|
7be87e6e33d96e6bea2a2a926b99dd023dc378fe
|
/Matrix/SumVal.py
|
622b74401c2215c4e7c94c25e5a6fdb906f6796b
|
[] |
no_license
|
7-RED/Numpy
|
f9d6ee87093ff5d29658c8d6f9c8c130ed521fc7
|
b49b824f9f86c6764860370555e9f52b40b0535a
|
refs/heads/master
| 2023-05-28T07:44:44.917675
| 2021-06-19T13:10:54
| 2021-06-19T13:10:54
| 345,438,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
import numpy as np
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
newarr = np.sum([arr1, arr2])
print(newarr)
|
[
"a27032522@gmail.com"
] |
a27032522@gmail.com
|
a90c8a938f8ba7c8595a5ebb62c61273f63a3afd
|
687b45e8da35b45faee3ddc0dfe485608f8ebd2e
|
/myvirta/warehouse.py
|
60da0ee12485052d190c4ea9317e97ce8a05c85e
|
[] |
no_license
|
antonsolomko/virtonomics
|
bd622e6b179969c2f9344f6a79e1cd3272e76e05
|
18c5eaa3172cef29fbf4bf8ffc333c2a90533e63
|
refs/heads/master
| 2020-04-18T12:25:57.375871
| 2019-12-10T19:35:45
| 2019-12-10T19:35:45
| 167,533,045
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
def resize_warehouses(self):
for unit_id in self.units(unit_class_kind='warehouse'):
unit = self.unit_summary(unit_id, refresh=True)
target_size = int(unit['size'] * unit['filling'] / 90) + 1
print(unit_id, unit['name'], unit['size'], '->', target_size)
self.resize_unit(unit_id, size=target_size)
|
[
"anton.kharkov@gmail.com"
] |
anton.kharkov@gmail.com
|
c80e6cc2f105bf18387107816240769b66ea91e4
|
3f8dc5d3bbe5faa9507c3941db4da446fb3b0c5c
|
/ch7/7_10_bad_solution_3.py
|
0facc289ca678d2a252ef5403f4059324550beed
|
[
"MIT"
] |
permissive
|
hajin-kim/2020-HighSchool-Python-Tutoring
|
91c626593e097dd703f4e84da6a910192f8eefda
|
352025a954bff37d21cc3d59e7d5e0f0269a1f17
|
refs/heads/main
| 2023-03-12T04:22:57.287110
| 2021-03-01T18:27:28
| 2021-03-01T18:27:28
| 325,773,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
n = int(input("입력 횟수: "))
maximum = int(input("값: "))
for i in range(n-1):
temp = int(input("값: "))
if temp > maximum:
maximum = temp
print("최댓값:", maximum)
|
[
"kimhajin@yonsei.ac.kr"
] |
kimhajin@yonsei.ac.kr
|
74a2ec94b314b57ce66e0501f9db91de0bf18cdc
|
6f6b0d746cb23e5df8d9d06d638ad3d484a7288c
|
/computations/PostTreatments.py
|
600f00d95935c3ad58034b048be9fa3f5fedffae
|
[] |
no_license
|
frosqh/TRAUMAS
|
b6637f1b4dc20f9049b97bf294e4466ef275ab66
|
7646afa6de6302408ee495ec52a5208e950a1e0e
|
refs/heads/master
| 2022-11-15T07:16:31.752698
| 2020-07-06T06:53:05
| 2020-07-06T06:53:05
| 277,134,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,095
|
py
|
from computations.CommCost import commCost
from computations.EarliestTimes import computeDFT, computeEFT
from computations.Priorities import getExitTask
def applyBSA(g, schedule: dict, verbose=False):
""" Apply BSA to an already-computed schedule
:param g: DAG to schedule
:type g: networkx.DiGraph
:param schedule: Schedule computed, to improve using BSA
:type schedule: dict[int, (int, float, float)]
:param verbose: Print non-necessary information ?
:type verbose: bool
:return: A *possibly* improved schedule in format {task : [proc, est, eft],..}
:rtype: dict[int, (int, float, float)]
"""
q = g.graph['nbproc']
procList = sorted(schedule.values(), key=lambda x: x[2], reverse=False) # Reverse = true -> 10 d'abord
procList = list(map(lambda x: x[0], procList))
procList = list(dict.fromkeys(procList))
initMakespan = schedule[getExitTask(g)][2]
for p in range(q):
if p not in procList:
procList.append(p)
for p in procList: # TODO Check which one of those two is the most effective ...
# for p in range(q):
tasks = []
for t in schedule:
if schedule[t][0] == p:
tasks.append(t)
for t in tasks:
allowedProc = []
eft = schedule[t][2]
est = schedule[t][1]
scheduleBis = dict.copy(schedule)
scheduleBis.pop(t)
dft, unused = computeDFT(g, t, p, scheduleBis, verbose, estimate=False)
if est > dft:
for py in range(q):
if py == p:
continue
esty, efty = computeEFT(g, t, py, scheduleBis, verbose, insertion=True, estimate=False)
if efty < eft:
swap = True
for s in g.successors(t):
swap = swap and schedule[s][1] >= efty + commCost(g, t, s, py, schedule[s][0],
verbose=False)
if swap:
allowedProc.append([py, esty, efty])
schedule = scheduleBis
schedule[t] = [py, esty, efty]
endMakespan = schedule[getExitTask(g)][2]
if endMakespan > initMakespan:
raise Exception("BSA increases makespan ...")
return schedule
def verifBSA(rs, r, verbose=False):
""" Check over the schedule if using BSA really improve the performance
:param rs: Sorted heuristics by (makespan, computation time)
:type rs: str[]
:param r: Full results of the extensive testing
:type r: dict[int, (int, float, float)]
:param verbose: Print non-necessary information ?
:type verbose: bool
:return: Analysis of BSA-related improvement in performance
:rtype: str
"""
totModif = 0
cntModif = 0
cntTot = 0
totTps = 0
result = ""
for k in rs:
if k.split(';')[-1] != "True":
cntTot += 1
rK = r[k]
rB = r[k[:-len("False")] + "True"]
totTps += (rK[1] - rB[1]) / (rK[1]) * 100
if rK[0] != rB[0]:
modif = (rK[0] - rB[0]) / (rK[0]) * 100
cntModif += 1
totModif += modif
if verbose:
print("Result :", "\n", "-----------------------------", "\n", "Without BSA :", rK[0], '\n',
'With BSA :',
rB[0], '\n', 'Improvement of', modif, "%")
result += "Mean improvement of " + str(round(totModif / (1 if cntModif == 0 else cntModif), 2)) + "% over " + str(
cntModif) + " heuristics concerned.\n"
result += "Mean improvement of " + str(round(totModif / cntTot, 2)) + "% over all " + str(
cntTot) + " heuristics.\n"
result += "Mean slowdown of " + str(-round(totTps / cntTot, 2)) + "% over " + str(cntTot) + " heuristics.\n"
return result
|
[
"gautier.raimondi@gmail.com"
] |
gautier.raimondi@gmail.com
|
acad35bb1e6f3f81a8b58084ccaa54b220f7b1ef
|
652a40e5f4241f692dee296ec292d5e11b803044
|
/src/src.py
|
39991bc82760602f4a15f9ec3488fa57f0b3f2ad
|
[] |
no_license
|
SSilvering/Python-Assignment-3
|
790c138bc94c465022d3446585110088cccbfe59
|
73e1956e6e430cb4f8313c2b2a07768660275ed7
|
refs/heads/master
| 2021-01-11T09:02:40.446492
| 2017-01-09T21:36:49
| 2017-01-09T21:36:49
| 77,625,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,403
|
py
|
#===============================================================================#
#---------------------------Python Assignment 3---------------------------------#
# #
# Student 1: Shai Hod - 304800402 #
# Student 2: Dudu Abutbul - 200913671 #
#===============================================================================#
# Question -1-
def make_date(year = 2000, month = 1, day = 1):
"""
This function returns a functional implementation of a mutable date form.
@type year: Integers.
@type month: Integers.
@type day: Integers.
"""
if not check_date(year, month, day): # Checks if the date is correct.
print(" An incorrect date. \nDefault date has been set.")
year, month, day = 2000, 1, 1
def dispatch(msg):
""" This function returns the requested function based on the received text. """
if msg == 'year':
return year
elif msg == 'month':
return month
elif msg == 'day':
return day
# Dispatch function.
return dispatch
def get_date(dt, val):
""" getter date. """
return dt(val)
def check_date(year, month, day):
""" This boolean function checks if the date is correct. """
if year < 2000:
return False
if month > 12:
return False
if day > 31:
return False
if month == 2:
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0: # Checks if it leap year.
if not day <= 29:
return False
elif not day <= 28:
return False
if month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12:
if not day <= 31:
return False
if month == 4 or month == 6 or month == 9 or month == 11:
if not day <= 30:
return False
return True
#------------------------------------------------------------------------------
def day(dt):
""" getter for the day of specific date. """
return get_date(dt, 'day')
def month(dt):
""" Returns the name of the month of specific date. """
if get_date(dt, 'month') == 1:
return 'January'
elif get_date(dt, 'month') == 2:
return 'February'
elif get_date(dt, 'month') == 3:
return 'March'
elif get_date(dt, 'month') == 4:
return 'April'
elif get_date(dt, 'month') == 5:
return 'May'
elif get_date(dt, 'month') == 6:
return 'June'
elif get_date(dt, 'month') == 7:
return 'July'
elif get_date(dt, 'month') == 8:
return 'August'
elif get_date(dt, 'month') == 9:
return 'September'
elif get_date(dt, 'month') == 10:
return 'October'
elif get_date(dt, 'month') == 11:
return 'November'
elif get_date(dt, 'month') == 12:
return 'December'
def year(dt):
""" getter for the year of specific date. """
return get_date(dt, 'year')
#------------------------------------------------------------------------------
def str_date(dt):
""" This function prints the date. """
if day(dt) == 1:
print('{0}st of {1}, {2}'.format(day(dt), month(dt), year(dt)))
elif day(dt) == 2:
print('{0}nd of {1}, {2}'.format(day(dt), month(dt), year(dt)))
elif day(dt) == 3:
print('{0}rd of {1}, {2}'.format(day(dt), month(dt), year(dt)))
else:
print('{0}th of {1}, {2}'.format(day(dt), month(dt), year(dt)))
#------------------------------------------------------------------------------
# Question -2-
def data_preprocessing_tree(data):
"""
This function gets a list of the files paths. It checks if the data is correct
and returns a nested tuple of root path of the files associated with the same file path.
@param data: List of files paths.
@type data: List of strings.
"""
# Enumerate paths, filter wrong paths and complete missing file type.
data = set(map(lambda fix_path: fix_path + 'txt' if fix_path[len(fix_path) - 1:] == '.' else fix_path,
filter(lambda path: True if '..' not in path and '//' not in path else False, data.split(';'))))
# Pre-stage of accumulated stage. creates list of paths without file types.
dirs = set(map(lambda path: path[:path.rfind('/')] if '.' in path else path, data))
# Building the tree.
tree = list(map(lambda dir:
(dir, tuple(map(lambda file: file[file.rfind('/') + 1:],
filter(lambda file: True if (True if (dir + file[file.rfind('/'):] in file) else False)
else False, data))))
, dirs))
return tree
def data_preprocessing_file_types(data):
"""
This function gets a list of file paths. It checks if the data is correct
and returns a list of tuples with file types and number of occurrences of each of them.
@param data: List of files paths.
@type data: List of strings.
"""
# Enumerate paths, filter wrong paths and complete missing file type.
data = set(map(lambda fix_path: fix_path + 'txt' if fix_path[len(fix_path) - 1:] == '.' else fix_path,
filter(lambda path: True if '..' not in path and '//' not in path else False, data.split(';'))))
# Pre-stage of accumulated stage. creates list of the types of the files without the path.
file_types = list(filter(lambda remove:True if not remove.find('.') else None,
map(lambda path: path[path.find('.'):] if '.' in path else path, data)))
# Build a list of tuples with file types and number of occurrences of each of them.
tree = list(
set(map(lambda type:
((type[+1:]), file_types.count(type))
,file_types)))
return tree
#------------------------------------------------------------------------------
# Question -3-
def make_currency(amount = 0.0, symbol = ''):
"""
This function stores a currency and its value.
@param amount: Currency value.
@type amount: Float.
@param symbol: Currency sign.
@type symbol: Unicode character.
"""
def dispatch(message):
""" This function returns the requested function based on the received text. """
if message == 'get_value':
return get_value
elif message == 'set_value':
return set_value
elif message == 'str': # Prints a textual representation of this currency.
print('{0}{1}'.format(symbol, amount))
elif message == 'convert':
return convert
def get_value(msg):
""" This function returns a specific element of the currency. """
if msg == 'amount':
return amount
elif msg == 'symbol':
return symbol
def set_value(msg, value):
""" This function sets a new value of a particular element of the currency. """
nonlocal amount, symbol
if msg == 'amount':
amount = value
elif msg == 'symbol':
symbol = value
def convert(func, new_sign):
""" This function converts this specific currency to another currency. """
nonlocal amount, symbol
amount = func(amount)
symbol = new_sign
# Dispatch function.
return dispatch
#------------------------------------------------------------------------------
# Question -4-
def get_reverse_map_iterator(seq, func = None):
"""
This function gets a sequence and returns a new sequence in reverse order.
This function can also gets a function that will operate on each element
in the new sequence.
"""
reverse_map_iterator = [] # Store new reverse sequence in function lexical scope.
index = len(seq)
if func :
for _ in seq:
index -= 1
reverse_map_iterator.append(func(seq[index]))
else:
for _ in seq:
index -= 1
reverse_map_iterator.append(seq[index])
def next():
""" This function returns the next element in that sequence. """
if has_more():
nonlocal index # Gets access for update the original variable.
index += 1
return reverse_map_iterator[index - 1]
else:
return 'No more items.'
def has_more():
""" This function checks whether there are more elements in sequence. """
return index < len(seq)
# Dispatch function.
return {'next':next,'has_more':has_more}
#------------------------------------------------------------------------------
# Question -5-
def make_mutable_rlist(copy=None):
"""Return a functional implementation of a mutable recursive list."""
contents = empty_rlist
def length():
return len_rlist(contents)
def get_item(ind):
return getitem_rlist(contents, ind)
def push_first(value):
nonlocal contents
contents = make_rlist(value, contents)
def pop_first():
nonlocal contents
f = first(contents)
contents = rest(contents)
return f
def str():
print('[{0}'.format(print_rlist(contents)))
def extend(list):
""" This function expands sequence that already exist. """
nonlocal contents # Gets access for update the original variable.
temp_list = make_mutable_rlist(list) # Copy the sequence that had received to new sequence.
end = len_rlist(contents)
for _ in range(end): # Copy the rest of the elements from the original sequence.
end -= 1
temp_list['push_first'](getitem_rlist(contents, end))
contents = None # Initialize variable for receiving a new sequence.
end = temp_list['length']()
for _ in range(end): # Makes new recursive list from temporary list.
end -= 1
contents = make_rlist(temp_list['get_item'](end), contents)
def iterator():
""" This function returns an iterator for this recursive list. """
index = 0
def next():
""" This function returns the next element in that sequence. """
if hasNext():
nonlocal index # Gets access for update the original variable.
index += 1
return get_item(index - 1)
else:
return 'No more items.'
def hasNext():
""" This function checks whether there are more elements in sequence. """
return index < length()
# Dispatch Dictionary.
return {'hasNext': hasNext, 'next': next}
def cut_list(start, end):
""" This function simulates the action of cutting of Python.
It cuts the original sequence and returns a new cut sequence."""
cut_list = make_mutable_rlist()
for _ in range(end):
end -= 1
cut_list['push_first'](get_item(end))
return cut_list
if copy: # Copy Constructor.
""" If function gets a sequence, it is operates like
copy constructor and copying the sequence to new one. """
new_list = make_mutable_rlist()
end = copy['length']()
for _ in range(end):
end -= 1
new_list['push_first'](copy['get_item'](end))
return new_list
# Dispatch Dictionary.
return {'length':length, 'get_item':get_item, 'push_first':push_first,
'pop_first': pop_first, 'slice':cut_list, 'extend':extend, 'get_iterator':iterator, 'str':str}
empty_rlist = None
def make_rlist(first, rest):
"""Make a recursive list from its first element and the rest."""
return (first, rest)
def first(s):
"""Return the first element of a recursive list s."""
return s[0]
def rest(s):
"""Return the rest of the elements of a recursive list s."""
return s[1]
def len_rlist(s):
"""Return the length of recursive list s."""
length = 0
while s != empty_rlist:
s, length = rest(s), length + 1
return length
def getitem_rlist(s, i):
"""Return the element at index i of recursive list s."""
while i > 0:
s, i = rest(s), i - 1
return first(s)
def print_rlist(s):
""" This function prints the rest of the recursive list. """
if rest(s) != empty_rlist:
return '{0},{1}'.format(first(s), print_rlist(rest(s)))
return '{0}]'.format(first(s))
|
[
"silvering@gmail.com"
] |
silvering@gmail.com
|
952246ef2950d04035fd2446e6e9782cb2702e84
|
26ad5739adb5ab65237dd5a91d836a8486e9be1e
|
/RegEx-PWChecker.py
|
401650ae4a17acc2b3480c7004c27fc2aac10c0b
|
[] |
no_license
|
worricek/ATBS
|
863c2825f459934d987ed0a1b4a5b03e239e24a1
|
e4add0ff9f3863853974b6d6170737d2f3e4a894
|
refs/heads/master
| 2020-05-02T19:02:04.047995
| 2019-06-12T12:28:53
| 2019-06-12T12:28:53
| 178,146,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#! /usr/bin/python3
import re
while True:
PW=input('Enter Password (Must be upper lower and number : ')
if re.match(r'([a-zA-Z0-9]){8,}', PW):
break
else:
print('Password does not match complexity requirements')
#Result=PasswordRegEx.findall(PW)
#if Result==None:
# print('Test')
# print(PasswordRegEx)
#print(Result.group())
#If Result == None:
# Print('Not complex')
#else:
# Print('Yep')
|
[
"dworrall@gmail.com"
] |
dworrall@gmail.com
|
01cad3ef3f6a1013e2abf814d57a81d3339da8da
|
4ec3965ee31407ae4a5fd2d26abf81728d346bf4
|
/exercicio019.py
|
2789b8f487fc146eec81d74e0d656e4334725feb
|
[] |
no_license
|
Gporfs/Python-s-projects
|
fe0fd560ef46846a2be904b7c89e618eecdacbbc
|
2b427eb894284d894483de566fb54cef105afa44
|
refs/heads/main
| 2023-04-12T08:39:27.762486
| 2021-05-17T12:39:52
| 2021-05-17T12:39:52
| 368,177,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
from datetime import datetime
now = datetime.now()
a = now.year
ano = int(input('Digite o seu ano de nascimento: '))
i = a - ano
if i <= 9:
print('Este atleta é classe: MIRIM!')
elif i <= 14:
print('Este atleta é classe: INFANTIL!')
elif i <= 19:
print('Este atleta é classe: JUNIOR!')
elif i < 25:
print('Este atleta é classe: SÊNIOR!')
elif i > 25:
print('Este atleta é classe: MASTER!')
|
[
"noreply@github.com"
] |
Gporfs.noreply@github.com
|
0ff395afc58a2a4e7ea7aa60dc54bda76fb4d71e
|
f1790e298bcbf7b26cacd3c27850f243c446b9eb
|
/courses/python3/ch4-POO/23_lesson/app.py
|
7e938539f6225c2733257e487b4bd8d76133a50f
|
[] |
no_license
|
misa9999/python
|
36001a1bf0eb842d00b010b02e05b01aa4dfac57
|
251c5226db1bfef4a8445b025f232a27a6924930
|
refs/heads/master
| 2023-03-04T16:25:48.610233
| 2021-02-22T21:37:51
| 2021-02-22T21:37:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from calcipv4 import CalcIPv4
calc_ipv4 = CalcIPv4(ip="192.168.0.1", mask="255.255.255.0", prefix=24)
print(f"IP: {calc_ipv4.ip}")
print(f"Mask: {calc_ipv4.mask}")
print(f"Rede: {calc_ipv4.rede}")
print(f"Broadcast: {calc_ipv4.broadcast}")
print(f"Prefix: {calc_ipv4.prefix}")
print(f"Num ips: {calc_ipv4.ips}")
|
[
"yuukixasuna00@gmailcom"
] |
yuukixasuna00@gmailcom
|
ff4d9bcb48501a51e55438aa2bddba1a8b6de716
|
ebd356a967d1fd0f9069c6de56d911f8cef95b4a
|
/day1.py
|
6257af83ea93923c81bd16e8c26cd0c9967b358d
|
[] |
no_license
|
samgonz/100DaysOfCodeDay1
|
31c4be8c51fc33930d03b17621dece11e1ccdf4c
|
36629aafc26f2379e34274a1e68dcb0a7a842c27
|
refs/heads/master
| 2023-09-01T08:17:07.909901
| 2021-10-26T06:19:53
| 2021-10-26T06:19:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
username = input("Hello User, What is your name?\n")
print(f'Wow! {username.capitalize()}! I am from there as well.')
user_born_city = input(f'What city were you born in {username.capitalize()}?\n')
print(f'Amazing, If I had to give you a band name it would be,\n{username.capitalize()} {user_born_city.capitalize()}!')
|
[
"doomteam1@gmail.com"
] |
doomteam1@gmail.com
|
dd480da5f0f64ad9a50812c3f9ea4aad4de0c538
|
888965665f2d7779c0e0bd4a2f924634474df2f9
|
/world-03/exercice-076.py
|
0ee0ed037d0d4c6b429e6ae3575b1d87b2c6c56e
|
[] |
no_license
|
glaubersabino/python-cursoemvideo
|
cfeb6f95caea3fe74b8acb8de94962ecd9b79f43
|
9026fa876a72169b2316e15d0b7fb84359b8e68e
|
refs/heads/master
| 2023-01-14T07:45:50.807910
| 2020-11-28T15:59:48
| 2020-11-28T15:59:48
| 311,781,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
# Exercício 076
# Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços, na sequência.
# No final, mostre uma listagem de preços, organizando os dados em forma tabular.
lista = ('Caderno', 3.45, 'Lápis', 2, 'Caneta', 1.5, 'Mochila', 50, 'Lapizeira', 2.63, 'Borracha', 0.56, 'Transferidor', 0.5, 'Régua', 1.95)
print('=' * 50)
print('{:^50}'.format('LISTAGEM DE PREÇOS'))
print('=' * 50)
for n in range(0, len(lista), 2):
point = 50 - (len(lista[n]) + 11)
print('{0!s:<}'.format(lista[n]), '.' * point, 'R$', '{0:6.2f}'.format(lista[n + 1]))
# print('.' * 30)
|
[
"glauber.sabino19@gmail.com"
] |
glauber.sabino19@gmail.com
|
4d513a08bd11587f5a10275e189ce5f2d237e756
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/battle/HealJokes.py
|
3c081bb8a05c1baef1a731d4e1b05604a9230632
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 84
|
py
|
from toontown.toonbase import TTLocalizer
toonHealJokes = TTLocalizer.ToonHealJokes
|
[
"jwcotejr@gmail.com"
] |
jwcotejr@gmail.com
|
4f8de00b32812ad9a6ed9d3c47d5a971dde027d3
|
48f5062f270001b4f0bccdcb40929f57703e2315
|
/tests/create_temp_data.py
|
88c269485512eb42ae3ec56ccb9f3f0b99bf9dd5
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
alkanc/fastMRI
|
f65858cfd29cea43e463cfdf9dbea80b81edef0c
|
3dc69df8b097ddc180c02a81217de494887658e9
|
refs/heads/master
| 2023-06-19T05:51:06.410108
| 2021-07-10T06:19:20
| 2021-07-10T06:19:20
| 303,914,500
| 0
| 0
|
MIT
| 2020-10-18T01:09:12
| 2020-10-14T05:45:04
| null |
UTF-8
|
Python
| false
| false
| 3,789
|
py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import h5py
import numpy as np
def create_temp_data(path):
rg = np.random.default_rng(seed=1234)
max_num_slices = 15
max_num_coils = 15
data_splits = {
"knee_data": [
"multicoil_train",
"multicoil_val",
"multicoil_test",
"multicoil_challenge",
"singlecoil_train",
"singlecoil_val",
"singlecoil_test",
"singlecoil_challenge",
],
"brain_data": [
"multicoil_train",
"multicoil_val",
"multicoil_test",
"multicoil_challenge",
],
}
enc_sizes = {
"train": [(1, 128, 64), (1, 128, 49), (1, 150, 67)],
"val": [(1, 128, 64), (1, 170, 57)],
"test": [(1, 128, 64), (1, 96, 96)],
"challenge": [(1, 128, 64), (1, 96, 48)],
}
recon_sizes = {
"train": [(1, 64, 64), (1, 49, 49), (1, 67, 67)],
"val": [(1, 64, 64), (1, 57, 47)],
"test": [(1, 64, 64), (1, 96, 96)],
"challenge": [(1, 64, 64), (1, 48, 48)],
}
metadata = {}
for dataset in data_splits:
for split in data_splits[dataset]:
fcount = 0
(path / dataset / split).mkdir(parents=True)
encs = enc_sizes[split.split("_")[-1]]
recs = recon_sizes[split.split("_")[-1]]
for i in range(len(encs)):
fname = path / dataset / split / f"file{fcount}.h5"
num_slices = rg.integers(2, max_num_slices)
if "multicoil" in split:
num_coils = rg.integers(2, max_num_coils)
enc_size = (num_slices, num_coils, encs[i][-2], encs[i][-1])
recon_size = (num_slices, recs[i][-2], recs[i][-1])
else:
enc_size = (num_slices, encs[i][-2], encs[i][-1])
recon_size = (num_slices, recs[i][-2], recs[i][-1])
data = rg.normal(size=enc_size) + 1j * rg.normal(size=enc_size)
if split.split("_")[-1] in ("train", "val"):
recon = np.absolute(rg.normal(size=recon_size)).astype(
np.dtype("<f4")
)
else:
mask = rg.integers(0, 2, size=recon_size[-1]).astype(np.bool)
with h5py.File(fname, "w") as hf:
hf.create_dataset("kspace", data=data.astype(np.complex64))
if split.split("_")[-1] in ("train", "val"):
hf.attrs["max"] = recon.max()
if "singlecoil" in split:
hf.create_dataset("reconstruction_esc", data=recon)
else:
hf.create_dataset("reconstruction_rss", data=recon)
else:
hf.create_dataset("mask", data=mask)
enc_size = encs[i]
enc_limits_center = enc_size[1] // 2 + 1
enc_limits_max = enc_size[1] - 2
padding_left = enc_size[1] // 2 - enc_limits_center
padding_right = padding_left + enc_limits_max
metadata[str(fname)] = (
{
"padding_left": padding_left,
"padding_right": padding_right,
"encoding_size": enc_size,
"recon_size": recon_size,
},
num_slices,
)
fcount += 1
return path / "knee_data", path / "brain_data", metadata
|
[
"noreply@github.com"
] |
alkanc.noreply@github.com
|
9eb0bfcce36f677bfa9c6ff59d3f99ec5c5a8f59
|
8e2124275ab3a1ceecf62a094afe9d41eb94b7b9
|
/back-end/api/product/models.py
|
9261b0e90151de7c5cc462f1b37f3ca42fe4fd84
|
[] |
no_license
|
suzanneloures/project-gs-ciencia
|
b42db60746e1b3165717a5d4c63ec2624324516b
|
14e0b8e5ac9fa0d7d6c6c6e99b7e1c56073ddb44
|
refs/heads/main
| 2023-03-03T00:19:21.715065
| 2021-02-09T00:44:59
| 2021-02-09T00:44:59
| 335,460,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from django.db import models
class Product(models.Model):
name = models.CharField(max_length=200)
description = models.TextField(max_length=100)
value = models.DecimalField(max_digits=8, decimal_places=2)
created_at = models.DateTimeField(auto_now_add=True)
|
[
"suzanneloures@gmail.com"
] |
suzanneloures@gmail.com
|
694010153addb578786b33bce2f4ce552e241879
|
62226afe584a0d7f8d52fc38ca416b19ffafcb7a
|
/hwtLib/amba/constants.py
|
d40b3f72f6947d3516cc9ef67a9a89babf429909
|
[
"MIT"
] |
permissive
|
Nic30/hwtLib
|
d08a08bdd0bf764971c4aa319ff03d4df8778395
|
4c1d54c7b15929032ad2ba984bf48b45f3549c49
|
refs/heads/master
| 2023-05-25T16:57:25.232026
| 2023-05-12T20:39:01
| 2023-05-12T20:39:01
| 63,018,738
| 36
| 8
|
MIT
| 2021-04-06T17:56:14
| 2016-07-10T21:13:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,957
|
py
|
"""
Constant used for a signals in AXI, AXI-lite interfaces.
https://static.docs.arm.com/ihi0022/d/IHI0022D_amba_axi_protocol_spec.pdf
"""
BURST_FIXED = 0b00
BURST_INCR = 0b01
BURST_WRAP = 0b10
def BYTES_IN_TRANS(n):
n = int(n)
return n.bit_length() - 1
# http://www.gstitt.ece.ufl.edu/courses/fall15/eel4720_5721/labs/refs/AXI4_specification.pdf p.65
# Normal Non-cacheable Bufferable
CACHE_DEFAULT = 3
"""
+--------------+--------------+-----------------------------------------+
| ARCACHE[3:0] | AWCACHE[3:0] | Memory type |
+==============+==============+=========================================+
| 0000 | 0000 | Device Non-bufferable |
+--------------+--------------+-----------------------------------------+
| 0001 | 0001 | Device Bufferable |
+--------------+--------------+-----------------------------------------+
| 0010 | 0010 | Normal Non-cacheable Non-bufferable |
+--------------+--------------+-----------------------------------------+
| 0011 | 0011 | Normal Non-cacheable Bufferable |
+--------------+--------------+-----------------------------------------+
+--------------+--------------+-----------------------------------------+
| 1010 | 0110 | Write-through No-allocate |
+--------------+--------------+-----------------------------------------+
| 1110 (0110) | 0110 | Write-through Read-allocate |
+--------------+--------------+-----------------------------------------+
| 1010 | 1110 (1010) | Write-through Write-allocate |
+--------------+--------------+-----------------------------------------+
| 1110 | 1110 | Write-through Read and Write-allocate |
+--------------+--------------+-----------------------------------------+
+--------------+--------------+-----------------------------------------+
| 1011 | 0111 | Write-back No-allocate |
+--------------+--------------+-----------------------------------------+
| 1111 (0111) | 0111 | Write-back Read-allocate |
+--------------+--------------+-----------------------------------------+
| 1011 | 1111 (1011) | Write-back Write-allocate |
+--------------+--------------+-----------------------------------------+
| 1111 | 1111 | Write-back Read and Write-allocate |
+--------------+--------------+-----------------------------------------+
"""
PROT_DEFAULT = 0
"""
:note: "prot" is an access permissions signals that can be used to protect
against illegal transactions.
+--------+-------+---------------------+
| PROT | Value | Function |
+========+=======+=====================+
| [0] | 0 | Unprivileged access |
+--------+-------+---------------------+
| | 1 | Privileged access |
+--------+-------+---------------------+
| [1] | 0 | Secure access |
+--------+-------+---------------------+
| | 1 | Non-secure access |
+--------+-------+---------------------+
| [2] | 0 | Data access |
+--------+-------+---------------------+
| | 1 | Instruction access |
+--------+-------+---------------------+
"""
QOS_DEFAULT = 0
LOCK_DEFAULT = 0
"""
+-------+----------+--------------------------+
| RESP | Response | Description |
+=======+==========+==========================+
| 0b00 | OKAY | Normal access success |
+-------+----------+--------------------------+
| 0b01 | EXOKAY | Exclusive access success |
+-------+----------+--------------------------+
| 0b10 | SLVERR | Slave error |
+-------+----------+--------------------------+
| 0b11 | DECERR | Decode error |
+-------+----------+--------------------------+
"""
RESP_OKAY = 0
RESP_EXOKAY = 1
RESP_SLVERR = 2
RESP_DECERR = 3
|
[
"nic30@seznam.cz"
] |
nic30@seznam.cz
|
32b868367255f0b0f79561f35256fe4eca995dc6
|
4407e8d32d9dd6b6c1a560563732fbe7f01aa38c
|
/transliterate.py
|
2b7242b763c41099e43e802efd0b7006c47ec15b
|
[] |
no_license
|
yorya11/2017-osnov-programm
|
9f41b94e01ea933400478b2e9f867c979b75e083
|
cfbe5d5434d9ec78decbf3ee6c6cd727beb365cc
|
refs/heads/master
| 2021-07-19T04:02:35.340416
| 2017-10-27T18:04:13
| 2017-10-27T18:04:13
| 103,292,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
import sys
lines = sys.stdin.readlines()
sent_id = 1
dict = str.maketrans("abčdefghijklmnoprsšzžtuvyäöABČDEFGHIJKLMNOPRSŠZŽTUVYÄÖ", "абчдэфгхийклмнопрсшзжтувюäöАБЧДЭФГХИЙКЛМНОПРСШЗЖТУВЮÄÖ")
for c in lines:
number = 1
c = c.replace('.', ' .').replace(',', ' ,').replace(':', ' :').replace(';', ' ;').replace('?', ' ?').replace('!', ' !')
c = c.split(' ')
print(sent_id)
for a in c:
print('%d\t%s\t_\t_\t_\t_\t_\t_\t_\ttranslit=%s'%(number,a,a.translate(dict)))
number=number+1
sent_id=sent_id+1
|
[
"yureva-anna@yandex.ru"
] |
yureva-anna@yandex.ru
|
bcb80121aea34561443ee82bfa8b8f9690ea5971
|
eadd6d96102ece2284a30f4271df24104d6a8710
|
/bin/pip3.6
|
624c26e3b1012d93dbda04aff9df7c9e174bb42d
|
[] |
no_license
|
abe4abraham/abranovawebsite
|
16c6cb205da815cfde90073deb2c089f8de539aa
|
0d7b0387c884567725fcd4f5482495f92274679d
|
refs/heads/master
| 2021-01-22T22:16:34.833870
| 2017-05-25T20:16:29
| 2017-05-25T20:16:29
| 92,766,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
6
|
#!/Users/abrahamnghwani/Abranova_website/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==7.1.0','console_scripts','pip3.6'
__requires__ = 'pip==7.1.0'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pip==7.1.0', 'console_scripts', 'pip3.6')()
)
|
[
"noreply@github.com"
] |
abe4abraham.noreply@github.com
|
3df31c2d6ecda74fa64f916038640d38bb5dbe0c
|
beb4e4c39a78e717230d58dd3223996a19dc0ae6
|
/T-Palet/T-Palet1.py
|
f0ded925378e8f80598d9b4c9a53b5dda31eb34a
|
[] |
no_license
|
KazyD/T-Palet
|
7020074c847757065f95d58427ec2644cd683150
|
16f36a74754bb1b4626d97559e6d183624aadaca
|
refs/heads/main
| 2023-08-19T02:08:38.479919
| 2021-09-13T09:54:34
| 2021-09-13T09:54:34
| 405,924,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
"""
#import sys
#sys.path.append('/Users/tam/prog/T-Palet')
import TObject
dummy = 0
class T_Palet():
# メソッド
def __init__(self):
self.dummy = 5
return
def pairEvaluation():
return
def soloEvaluation():
return
def ladderUp():
return
def ladderDown():
return
if __name__ == '__main__':
f = TObject()
f.print()
|
[
"yatsuhashi072020@gmail.com"
] |
yatsuhashi072020@gmail.com
|
7b9d3e6d1d4aa99b311fa20f6bbf007f8456f781
|
59eb1c9a4debeb6a43ebbcfeebbce3d90e94d927
|
/HackThisSite/programTask/t5/test.py
|
826ffc92362de08f6e7797244a7b25a582dd059b
|
[] |
no_license
|
qwee123/CTF-practice
|
455959de28441806ed4e142873ce78a90639387c
|
309490e760d44e538046caa2760dadba66fa45cd
|
refs/heads/master
| 2020-12-27T16:51:24.494668
| 2020-05-22T12:31:55
| 2020-05-22T12:31:55
| 237,977,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
a =[1,2,3,4,5,6,7,8,9]
def recurve(a):
if(len(a) == 1):
a[0] = 0
return
a[-1] = 0
recurve(a[:-1])
print('q',a,len(a))
recurve(a)
print(a)
|
[
"ben13057991@gmail.com"
] |
ben13057991@gmail.com
|
fd57a07589e888958cb91d8d2de84aa1784b14ff
|
51903ff436f6e48e0ade64eac1503d79026f5065
|
/aoc6.py
|
372e045bfad420e2fb991e9502d81414c6e4e52c
|
[] |
no_license
|
dchen327/advent-of-code-2020
|
b9d89555459b812c293e1071fef3063508f1f7ce
|
f414845fdfed2be0d650158d168ccb2bfaaaf5a0
|
refs/heads/master
| 2023-02-01T22:33:58.361334
| 2020-12-21T13:28:20
| 2020-12-21T13:28:20
| 318,808,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
from collections import defaultdict
groups = open('aoc6.txt').read().split('\n\n')
# part 1
groups_no_spaces = [g.replace('\n', '') for g in groups]
t = sum(len(set(group)) for group in groups_no_spaces)
print(t)
# part 2
t = 0
for group in groups:
ppl = group.split('\n')
num_answers = defaultdict(int)
for person in ppl:
for answer in person:
num_answers[answer] += 1
t += list(num_answers.values()).count(len(ppl))
print(t)
|
[
"davidc3287@gmail.com"
] |
davidc3287@gmail.com
|
fe46e01e56f17f5042af924762e920fb36afe416
|
57d4ae3a38f9dc459527751cd4b90e818fc192e7
|
/testScrapeDice.py
|
e012eb74310835b936aa08a810f99c833baa2422
|
[] |
no_license
|
shrprgrmr/webscrapingproject
|
8bddddb0aee65916ce14b80daa9b071479c85158
|
afbdeefe28145a824fcec1ed5005c6960785b860
|
refs/heads/master
| 2020-12-25T14:13:16.080940
| 2016-09-06T01:05:16
| 2016-09-06T01:05:16
| 67,460,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
from scrapy.selector import Selector
import re
import string
#filename="14d9010d785bd789.eml"
#A sample of Dice emails about mid-way through the database history
rootpath="C:\\Users\\Jake\\Documents\\Cont. Ed\\gyb\\gyb-0.47-windows\\gyb\\GYB-GMail-Backup-jacobbumpus@gmail.com\\";
filenames=['2016\\4\\24\\1544828cdcf8c6df.eml']#,'2016\\4\\23\\154430353dfb512c.eml','2016\\4\\22\\1543dc1cd369eee6.eml','2016\\4\\21\\15438b3dea8f5afc.eml','2016\\4\\20\\1543389ff9a7c0ef.eml','2016\\4\\19\\1542e6ab338790a4.eml','2016\\4\\18\\154293efa9a145e3.eml','2016\\4\\17\\1542416db1684692.eml','2016\\4\\16\\1541ed951a82e219.eml','2016\\4\\15\\15419cc7978d06cf.eml','2016\\4\\14\\15414a2510222ba0.eml','2016\\4\\13\\1540f7e5d115b250.eml','2016\\4\\12\\1540a577c03e1eb3.eml','2016\\4\\10\\153ffefc04e0c4aa.eml','2016\\4\\9\\153faca27324dab1.eml','2016\\4\\8\\153f5ad5412b014a.eml','2016\\4\\7\\153f07def0dbf967.eml','2016\\4\\6\\153eb57378691597.eml','2016\\4\\5\\153e64ec64c54b2f.eml','2016\\4\\4\\153e12767a62868c.eml','2016\\4\\3\\153dc023c4f15b11.eml','2016\\4\\2\\153d6d9e81ff4480.eml','2016\\4\\1\\153d196abfc2f328.eml','2016\\3\\31\\153cc8aa18af4824.eml','2016\\3\\30\\153c76adbf3bcc19.eml','2016\\3\\29\\153c23b09293bec2.eml','2016\\3\\28\\153bd0c9029f26a2.eml','2016\\3\\26\\153b2c8b90a96bae.eml','2016\\3\\25\\153ad998f0483d8d.eml','2016\\3\\9\\1535b7d3c4ff0205.eml','2016\\3\\8\\1535657a1f704053.eml','2016\\3\\7\\153513461aada424.eml','2016\\3\\5\\15346db577113605.eml','2016\\3\\4\\15341a514769efe8.eml','2016\\3\\3\\1533c76b5d28ab67.eml','2016\\3\\2\\15337766b77ec8ec.eml','2016\\3\\1\\1533241c3f05f132.eml','2016\\2\\29\\1532d1fa39ff58e6.eml','2016\\2\\28\\15327e260bd21da1.eml','2016\\2\\27\\15322ce69e9e882c.eml','2016\\2\\26\\1531d946df25fe1e.eml','2016\\2\\25\\15318eb11e926cde.eml','2016\\2\\24\\15313456ad91f92c.eml','2016\\2\\22\\15308f856cfa4b1c.eml','2016\\2\\21\\15303f05d2147efc.eml','2016\\2\\20\\152fec6cc2666f9a.eml','2016\\2\\19\\152f99e2cd3cd4c7.eml','2016\\2\\18\\152f47fd3f773a35.eml','2016\\2\\17\\152ef6712fd0264c.eml','2016\\2\\16\\152ea28b7a8f7a1d.eml','2016\\2\\15\\152e4e8d5fb97ecd.eml','2016\\2\\14\\152dfc3eee854c21.eml','2016\\2\\13\\152dad9a3dc77942.eml','2016\\2\\12\\152d591c3f264e0a.eml','2016\\2\\11\\152d076b00747470.eml','2016\\2\\10\\152cb5512f655628.eml','2016\\2\\9\\152c628879f084e3.eml','2016\\2\\8\\152c0f612ca8da52.eml','2016\\2\\7\\152bbb8c06772063.eml']
for file in filenames:
f = open(rootpath+file, 'r')
body = f.read()
body=string.replace(body,"=\n","")
print file
#JobTitle & PostingURL
try:
JobTitles=Selector(text=body).xpath("//td[@class='3D\"job-title\"']/a/text()").extract()
PostingURLs=Selector(text=body).xpath("//td[@class='3D\"job-title\"']/a/@href").extract()
except:
print "Job title & URL : Selector failed"
#JobInfo (should include "Company | City, State")
try:
JobInfoTDs=Selector(text=body).xpath("//td[@class='3D\"job-info\"']/text()").extract()
except:
print "Error selecting JobInfoTDs"
try:
for i in range(0, len(JobTitles)):
print "JobTitle: "+JobTitles[i]
print "PostingURL: "+PostingURLs[i][3:-2]
jobInfoTD=JobInfoTDs[i]
CoSplit=jobInfoTD.split(" | ")
print "PostingCompany: "+ CoSplit[0].strip()
locSplit=CoSplit[1].split(", ")
print "City: "+string.capwords(locSplit[0].strip().split("/")[0])
print "State: "+string.capwords(locSplit[1].strip().split("/")[0])
except IndexError:
print "IndexError in JobTitles, PostingURLs or JobInfoTDs"
|
[
"jacobbumpus@gmail.com"
] |
jacobbumpus@gmail.com
|
a9d8a15cf26bdbdaf9a2968a4e53081d886093bb
|
3f85a2b5ebaf040d295bd5d98c49b59e9ea82643
|
/generate_image_lists.py
|
5c5c7f3680244e04927445e7bf9a1c038a43dfcb
|
[
"Apache-2.0"
] |
permissive
|
vcg-uvic/image-matching-benchmark-baselines
|
6b69d0db384c4af90b431f421077aa0f8e1ec04f
|
01510c4d2c07cad89727013241a359bb22689a1b
|
refs/heads/master
| 2021-01-04T00:35:04.375020
| 2020-10-01T17:19:54
| 2020-10-01T17:19:54
| 292,169,250
| 19
| 1
|
Apache-2.0
| 2020-10-01T17:19:56
| 2020-09-02T03:29:45
| null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
import os
from glob import glob
src = os.path.join('..', 'imw-2020')
seqs = [os.path.basename(p) for p in glob(os.path.join(src, '*'))]
print(seqs)
if not os.path.isdir('txt'):
os.makedirs('txt')
for seq in seqs:
ims = glob(os.path.join(src, seq, '*.jpg'))
with open(os.path.join('txt', 'list-{}.txt'.format(seq)), 'w') as f:
for im in ims:
f.write('{}\n'.format(im))
|
[
"ducha.aiki@gmail.com"
] |
ducha.aiki@gmail.com
|
e1ebdd6646bc1e6c2cb80607edc5f71a9df2e53b
|
b085618bd868a47c41d1de075867799ec0c223a9
|
/application/controllers/pali_meaning.py
|
5d8daa5c94617220f13060d81abf3737d701bfa4
|
[
"MIT"
] |
permissive
|
natnaka/pali-dict-v2
|
b991b2812db1397feff5164d3bb91dff555a4a8c
|
eaeaf2170663dd0db82f0331ae7aa714962f2787
|
refs/heads/master
| 2020-03-15T15:11:59.908522
| 2018-05-21T13:48:37
| 2018-05-21T13:48:37
| 132,206,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
# encoding: utf-8
import math
from flask import Blueprint, jsonify, request
from application.models import TwarePaliWord
from application.models import ThePali
from application.models import ThePaliCompact
bp = Blueprint('pali_controller', __name__, url_prefix='/pali')
@bp.route('/meaning/<word>', methods=['GET'])
def meaning(word):
try:
similar = int(request.args.get('similarity', 60))/100.0
limit = int(request.args.get('limit', 5))
except:
similar = 0.6
limit = 5
r = TwarePaliWord.query.filter_by(word=word).first()
d = dict(success=False)
if r:
d['success'] = True
meaning = {}
for col in r.__table__.columns:
meaning[col.name] = getattr(r, col.name)
d['tware_meaning'] = meaning
r = ThePaliCompact.query.filter_by(word=word).first()
if r:
d['success'] = True
meaning = {}
for col in r.__table__.columns:
meaning[col.name] = getattr(r, col.name)
d['the_pali_meaning'] = meaning
if not d['success']:
# Find similar word
n = int(math.ceil(len(word) * similar))
prefix = word[:n]
rs = TwarePaliWord.query.filter(TwarePaliWord.word.startswith(prefix)).limit(limit).all()
means = []
for r in rs:
meaning = {}
for col in r.__table__.columns:
meaning[col.name] = getattr(r, col.name)
means.append(meaning)
d['tware_similar_words'] = means
rs = ThePaliCompact.query.filter(ThePaliCompact.word.startswith(prefix)).limit(limit).all()
means = []
for r in rs:
meaning = {}
for col in r.__table__.columns:
meaning[col.name] = getattr(r, col.name)
means.append(meaning)
d['the_pali_similar_words'] = means
return jsonify(d)
@bp.route('/meaning_v2/<word>', methods=['GET'])
def meaning_v2(word):
try:
similar = int(request.args.get('similarity', 60))/100.0
limit = int(request.args.get('limit', 5))
except:
similar = 0.6
limit = 5
r = TwarePaliWord.query.filter_by(word=word).first()
d = dict(success=False)
if r:
d['success'] = True
meaning = {}
for col in r.__table__.columns:
meaning[col.name] = getattr(r, col.name)
d['tware_meaning'] = meaning
rs = ThePali.query.filter_by(word=word).order_by(ThePali.freq.desc()).all()
if rs:
d['success'] = True
means = []
for r in rs:
meaning = {}
for col in r.__table__.columns:
meaning[col.name] = getattr(r, col.name)
means.append(meaning)
d['the_pali_meaning'] = means
if not d['success']:
# Find similar word
n = int(math.ceil(len(word) * similar))
prefix = word[:n]
rs = TwarePaliWord.query.filter(TwarePaliWord.word.startswith(prefix)).limit(limit).all()
means = []
for r in rs:
meaning = {}
for col in r.__table__.columns:
meaning[col.name] = getattr(r, col.name)
means.append(meaning)
d['similar_words'] = means
return jsonify(d)
|
[
"natnaka@gmail.com"
] |
natnaka@gmail.com
|
8ee785f3cc8700fdffa849e776b1cefd1a91e051
|
a12f0827133f18a0a08e22af97733754a2841621
|
/mqtt/config.py
|
495843eca7dde1a09667e5571acb9e5a47f13e70
|
[] |
no_license
|
andrewlorien/RPi
|
7909fea8ed5cf0e607b8df086fb4bc4619cf8e01
|
95135eddcad152999e97bfdb9e1f680db8064e91
|
refs/heads/master
| 2020-04-12T17:33:23.176588
| 2018-11-29T12:27:35
| 2018-11-29T12:27:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
import yaml
import sys
#######
# load config (extract to lib)
configFile = "config.yml"
if len(sys.argv) > 1:
configFile = sys.argv[1]
with open(configFile, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
def getValue(name, default):
if name in cfg and cfg[name] != "":
return cfg[name]
return default
|
[
"SvenDowideit@home.org.au"
] |
SvenDowideit@home.org.au
|
e54e2b65aabfa31bae7f893303793ae2aea1422a
|
e27f9f1f8bef8b1f4676df84ee3e753974d21a1c
|
/tests/ignite/engine/test_engine_state_dict.py
|
4ccfb7ea772077b826d7a31f9c39b8a0c5ad98c7
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/ignite
|
8fb275638e94e702762eec932b21dc8df7a54cb0
|
34a707e53785cf8a524589f33a570a7516fe064e
|
refs/heads/master
| 2023-09-02T00:27:22.485479
| 2023-08-31T15:10:14
| 2023-08-31T15:10:14
| 111,835,796
| 4,613
| 788
|
BSD-3-Clause
| 2023-09-13T07:46:41
| 2017-11-23T17:31:21
|
Python
|
UTF-8
|
Python
| false
| false
| 10,004
|
py
|
from collections.abc import Mapping
import pytest
import torch
from ignite.engine import Engine, Events, State
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
def test_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == 3
assert "iteration" in sd and sd["iteration"] == 0
assert "max_epochs" in sd and sd["max_epochs"] is None
assert "epoch_length" in sd and sd["epoch_length"] is None
def _test(state):
engine.state = state
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1
assert sd["iteration"] == engine.state.iteration
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
_test(State(iteration=500, epoch_length=1000, max_epochs=100))
_test(State(epoch=5, epoch_length=1000, max_epochs=100))
def test_state_dict_with_user_keys():
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
def _test(state):
engine.state = state
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1 + len(
engine.state_dict_user_keys
)
assert sd["iteration"] == engine.state.iteration
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
assert sd["alpha"] == engine.state.alpha
assert sd["beta"] == engine.state.beta
_test(State(iteration=500, epoch_length=1000, max_epochs=100, alpha=0.01, beta="Good"))
def test_state_dict_integration():
engine = Engine(lambda e, b: 1)
data = range(100)
engine.run(data, max_epochs=10)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1
assert sd["iteration"] == engine.state.iteration == 10 * 100
assert sd["epoch_length"] == engine.state.epoch_length == 100
assert sd["max_epochs"] == engine.state.max_epochs == 10
def test_load_state_dict_asserts():
engine = Engine(lambda e, b: 1)
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary"):
engine.load_state_dict("123")
with pytest.raises(ValueError, match=r"is absent in provided state_dict"):
engine.load_state_dict({})
with pytest.raises(ValueError, match=r"state_dict should contain only one of"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120})
with pytest.raises(ValueError, match=r"state_dict should contain only one of"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120, "iteration": 12, "epoch": 123})
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
with pytest.raises(ValueError, match=r"Required user state attribute"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120, "iteration": 12})
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"If epoch is provided in the state dict, epoch_length should not be None"):
engine.load_state_dict({"max_epochs": 100, "epoch": 2, "epoch_length": None})
def test_load_state_dict():
engine = Engine(lambda e, b: 1)
def _test(sd):
engine.load_state_dict(sd)
if "iteration" in sd:
assert sd["iteration"] == engine.state.iteration
elif "epoch" in sd:
assert sd["epoch"] == engine.state.epoch
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
_test({"max_epochs": 100, "epoch_length": 120, "iteration": 123})
_test({"max_epochs": 100, "epoch_length": 120, "epoch": 5})
def test_load_state_dict_with_user_keys():
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
def _test(sd):
engine.load_state_dict(sd)
if "iteration" in sd:
assert sd["iteration"] == engine.state.iteration
elif "epoch" in sd:
assert sd["epoch"] == engine.state.epoch
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
assert sd["alpha"] == engine.state.alpha
assert sd["beta"] == engine.state.beta
_test({"max_epochs": 100, "epoch_length": 120, "iteration": 123, "alpha": 0.1, "beta": "abc"})
def test_load_state_dict_integration():
engine = Engine(lambda e, b: 1)
state_dict = {"max_epochs": 100, "epoch_length": 120, "epoch": 5}
engine.load_state_dict(state_dict)
engine.add_event_handler(Events.ITERATION_COMPLETED, IterationCounter(5 * 120 + 1))
engine.add_event_handler(Events.EPOCH_COMPLETED, EpochCounter(6))
data = range(120)
engine.run(data)
def test_load_state_dict_with_params_overriding_integration():
state_dict = {"max_epochs": 100, "epoch_length": 120, "epoch": 5}
data = range(120)
# Override max_epochs
new_max_epochs = 10
engine = Engine(lambda e, b: 1)
engine.load_state_dict(state_dict)
state = engine.run(data, max_epochs=new_max_epochs)
assert state.max_epochs == new_max_epochs
assert state.iteration == state_dict["epoch_length"] * new_max_epochs
assert state.epoch == new_max_epochs
with pytest.raises(ValueError, match=r"Argument max_epochs should be greater than or equal to the start epoch"):
engine.load_state_dict(state_dict)
engine.run(data, max_epochs=3)
# Override epoch_length
with pytest.raises(ValueError, match=r"Argument epoch_length should be same as in the state"):
engine.load_state_dict(state_dict)
engine.run(data, epoch_length=90)
def test_empty_state_dict_load_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
engine.load_state_dict(sd)
def test_continue_training():
# Tests issue : https://github.com/pytorch/ignite/issues/993
max_epochs = 2
data = range(10)
engine = Engine(lambda e, b: 1)
state = engine.run(data, max_epochs=max_epochs)
assert state.max_epochs == max_epochs
assert state.iteration == len(data) * max_epochs
assert state.epoch == max_epochs
@engine.on(Events.STARTED)
def assert_continue_training():
assert engine.state.epoch == max_epochs
state = engine.run(data, max_epochs=max_epochs * 2)
assert state.max_epochs == max_epochs * 2
assert state.iteration == len(data) * max_epochs * 2
assert state.epoch == max_epochs * 2
def test_state_dict_with_user_keys_integration(dirname):
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
@engine.on(Events.STARTED)
def init_user_values(_):
engine.state.alpha = 0.1
fp = dirname / "engine.pt"
@engine.on(Events.COMPLETED)
def save_engine(_):
state_dict = engine.state_dict()
assert "alpha" in state_dict
torch.save(state_dict, fp)
engine.run([0, 1])
assert fp.exists()
state_dict = torch.load(fp)
assert "alpha" in state_dict and state_dict["alpha"] == 0.1
def test_epoch_length():
def _test(data, max_epochs, num_iters):
batch_checker = BatchChecker(data)
def update_fn(_, batch):
assert batch_checker.check(batch), f"{batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = Engine(update_fn)
engine.run(data, max_epochs=max_epochs, epoch_length=num_iters)
if num_iters is None:
num_iters = len(data)
assert engine.state.iteration == num_iters * max_epochs
assert engine.state.epoch == max_epochs
def _test_as_iter(data, max_epochs, num_iters):
batch_checker = BatchChecker(data)
def update_fn(_, batch):
assert batch_checker.check(batch), f"{batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = Engine(update_fn)
engine.run(iter(data), max_epochs=max_epochs, epoch_length=num_iters)
if num_iters is None:
num_iters = len(data)
assert engine.state.iteration == num_iters * max_epochs
assert engine.state.epoch == max_epochs
max_epochs = 10
num_iters = 21
data = torch.randint(0, 1000, size=(num_iters,))
_test(data, max_epochs, num_iters=None)
_test(data, max_epochs, num_iters)
_test(data, max_epochs, num_iters // 2)
_test(data, max_epochs, num_iters * 2)
_test_as_iter(data, 1, num_iters)
_test_as_iter(data, 2, num_iters // 2)
def test_state_custom_attrs_init():
def _test(with_load_state_dict=False):
engine = Engine(lambda e, b: None)
engine.state.alpha = 0.0
engine.state.beta = 1.0
if with_load_state_dict:
engine.load_state_dict({"iteration": 3, "max_epochs": 5, "epoch_length": 5})
@engine.on(Events.STARTED | Events.EPOCH_STARTED | Events.EPOCH_COMPLETED | Events.COMPLETED)
def check_custom_attr():
assert hasattr(engine.state, "alpha") and engine.state.alpha == 0.0
assert hasattr(engine.state, "beta") and engine.state.beta == 1.0
engine.run([0, 1, 2, 3, 4], max_epochs=5)
_test()
_test(with_load_state_dict=True)
def test_restart_training():
data = range(10)
engine = Engine(lambda e, b: 1)
state = engine.run(data, max_epochs=5)
with pytest.raises(
ValueError,
match=r"Argument max_epochs should be greater than or equal to the start epoch defined in the state: 2 vs 5. "
r"Please, .+ "
r"before calling engine.run\(\) in order to restart the training from the beginning.",
):
state = engine.run(data, max_epochs=2)
state.max_epochs = None
engine.run(data, max_epochs=2)
|
[
"noreply@github.com"
] |
pytorch.noreply@github.com
|
cc301cc6a4d6d327dec9af9c895018ae94564882
|
cae38bb6a8a4ba5adfd151048b5424849aa552a0
|
/blockman/blockman.py
|
84b511c64250dfc4c511846bc6ca689fdc05b2c1
|
[] |
no_license
|
samjar/Suhvaaage
|
3fb43e3334b387de4d453517abd4290167f707d5
|
86b4ed899c376b61e870f2500d24cb30edb9410c
|
refs/heads/master
| 2020-04-05T14:39:23.088046
| 2016-10-09T21:57:21
| 2016-10-09T21:57:21
| 56,803,778
| 0
| 1
| null | 2016-04-21T20:39:52
| 2016-04-21T20:31:12
| null |
UTF-8
|
Python
| false
| false
| 15,148
|
py
|
import pygame
import math
from color import *
# - imports the pygame module into the "pygame" namespace.
from pygame import *
from blockmanlevels import BlockManLevels
WIN_WIDTH = 960
WIN_HEIGHT = 640
HALF_WIDTH = int(WIN_WIDTH / 2)
HALF_HEIGHT = int(WIN_HEIGHT / 2)
DISPLAY = (WIN_WIDTH, WIN_HEIGHT)
# - number of bits to use for color
DEPTH = 32
# - which display modes you want to use
FLAGS = 0
#FLAGS = FULLSCREEN, RESIZEABLE
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
soundJump = mixer.Sound("jump.wav")
soundFall = mixer.Sound("fall.wav")
soundHurt = mixer.Sound("hurt.wav")
soundItem = mixer.Sound("item.wav")
soundJumpBlock = mixer.Sound("jumpblock.wav")
soundStompCharge = mixer.Sound("stompcharge.wav")
soundStomp = mixer.Sound("stomp.wav")
mixer.music.load("mathgrant_Space_Blocks.mp3")
blockLevels = BlockManLevels()
""" starts main function """
def main():
gameDisplay = display.set_mode(DISPLAY, FLAGS, DEPTH)
display.set_caption("The Incredible Block Man!")
clock = time.Clock()
mixer.music.set_volume(0.5)
mixer.music.play(1)
soundJump.set_volume(0.3)
soundFall.set_volume(0.3)
soundHurt.set_volume(0.3)
soundItem.set_volume(0.3)
soundJumpBlock.set_volume(0.3)
soundStompCharge.set_volume(0.3)
soundStomp.set_volume(0.3)
# - sets arrow keys being pressed to OFF
up = down = left = right = False
# - creates the background
bg = Surface((32, 32))
bg.convert()
bg.fill(BLACK)
# - make "entities" a sprite group
entities = pygame.sprite.Group()
# - creates player
player = Player(32, 32)
entities.add(player)
platforms = []
# - defines x, y
x = y = 0
current_level = blockLevels.current_level
level = blockLevels.levels[current_level]
def build_level(x, y):
x = y = 0
entities.add(player)
current_level = blockLevels.current_level
level = blockLevels.levels[current_level]
""" build the level """
# - checks each row and column
for row in level:
for col in row:
# - turn letters into Platforms, add to list and sprite group
if col == "P":
p = Platform(x, y)
platforms.append(p)
entities.add(p)
if col == "E":
e = ExitBlock(x, y)
platforms.append(e)
entities.add(e)
if col == "C":
c = ClearStageBlock(x, y)
platforms.append(c)
entities.add(c)
if col == "D":
d = DeathBlock(x, y)
platforms.append(d)
entities.add(d)
if col == "J":
j = JumpBlock(x, y)
platforms.append(j)
entities.add(j)
if col == "H":
h = HiddenBlock(x, y)
platforms.append(h)
entities.add(h)
if col == "F":
f = FakeBlock(x, y)
platforms.append(f)
entities.add(f)
x += 32
y += 32
x = 0
total_level_width = len(level)*25
total_level_height = len(level)*30
camera = Camera(simple_camera, total_level_width, total_level_height)
build_level(x, y)
# - create the game loop
while 1:
clock.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
raise SystemExit, "QUIT"
if event.type == KEYDOWN and event.key == K_ESCAPE:
raise SystemExit, "ESCAPE"
if event.type == KEYDOWN and event.key == K_UP:
up = True
if event.type == KEYDOWN and event.key == K_DOWN:
down = True
if event.type == KEYDOWN and event.key == K_LEFT:
left = True
if event.type == KEYDOWN and event.key == K_RIGHT:
right = True
if event.type == KEYUP and event.key == K_UP:
up = False
if event.type == KEYUP and event.key == K_DOWN:
down = False
if event.type == KEYUP and event.key == K_LEFT:
left = False
if event.type == KEYUP and event.key == K_RIGHT:
right = False
# - draws background
for y in range(20):
for x in range(50):
gameDisplay.blit(bg, (x * 32, y *32))
camera.update(player)
if player.endStage == True:
entities.empty()
platforms = []
build_level(x, y)
player.endState = False
# - updates player, then draws everything
player.update(up, down, left, right, platforms)
for e in entities:
gameDisplay.blit(e.image, camera.apply(e))
#entities.draw(gameDisplay)
pygame.display.update()
class Camera(object):
def __init__(self, camera_func, width, height):
self.camera_func = camera_func
self.state = Rect(0, 0, width, height)
def apply(self, target):
return target.rect.move(self.state.topleft)
def update(self, target):
self.state = self.camera_func(self.state, target.rect)
""" the simple camera follows the player around, with it centered on the screen always """
def simple_camera(camera, target_rect):
l, t, _, _ = target_rect
_, _, w, h = camera
return Rect(-l+HALF_WIDTH, -t+HALF_HEIGHT, w, h)
""" the complex camera is supposed to adjust itself if you're at a wall, ceiling, floor, etc
but doesn't work properly at the moment """
def complex_camera(camera, target_rect):
l, t, _, _ = target_rect
_, _, w, h = camera
l, t, _, _ = -l+HALF_WIDTH, -t+HALF_HEIGHT, w, h
l = min(0, l) # stop scrolling at the left edge
l = max(-(camera.width-WIN_WIDTH), l) # stop scrolling at the right edge
t = max(-(camera.height-WIN_HEIGHT), t) # stop scrolling at the bottom
t = min(0, t) # stop scrolling at the top
return Rect(l, t, w, h)
""" create the Entity Class that all platforms/blocks will inherit from """
class Entity(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
class Player(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.speed_x = 0
self.speed_y = 0
# - player starts out not on the ground
self.onGround = False
self.image = Surface((32, 32))
# - converts image to same pixel format as gameDisplay
self.image.convert()
self.image.fill(RED)
self.rect = Rect(x, y, 32, 32)
self.endStage = False
self.glideOn = False
self.stompCharge = False
self.stupidOnGround = False
# TODO: create code to easier change gravity, player movement
# and also easier to implement new 'physics'.
def update(self, up, down, left, right, platforms):
if up:
# - only jump if on the ground
print self.onGround
if self.onGround:
self.stupidOnGround = False
mixer.Sound.play(soundJump)
self.jump_func(7, blockLevels.gravityDirection)
if down:
if self.stompCharge is True:
pass
else:
if not self.onGround:
self.stomp_func(10, blockLevels.gravityDirection)
# - pressing down doesn't do anything yet
#pass
if left:
if self.stompCharge is True:
self.speed_x = 0
else:
self.move_left(blockLevels.gravityDirection)
if right:
if self.stompCharge is True:
self.speed_x = 0
else:
self.move_right(blockLevels.gravityDirection)
if not self.onGround:
# function defined at line 292
self.apply_gravity(blockLevels.gravity, blockLevels.gravityDirection)
# # - if player is in air, add gravity
if not(left or right):
self.stop_moving(blockLevels.gravityDirection)
# - increase in x direction
self.rect.left += self.speed_x
# - do x-axis
self.collide(self.speed_x, 0, platforms)
# - increase in y direction
self.rect.top += self.speed_y
# - assuming we're in the air
if self.stupidOnGround is True:
self.onGround = True
else:
self.onGround = False
# - do y-axis collisions
self.collide(0, self.speed_y, platforms)
print("stompCharge = " + str(self.stompCharge))
print("onGround = " + str(self.onGround))
""" the collision function """
def collide(self, speed_x, speed_y, platforms):
for p in platforms:
# - check every collision between player and platforms
if sprite.collide_rect(self, p):
if self.stompCharge is True:
mixer.Sound.play(soundStomp)
time.delay(50)
self.stompCharge = False
# - I don't really understand isistance. Yeaaaaah
if isinstance(p, ExitBlock):
event.post(event.Event(QUIT))
elif isinstance(p, ClearStageBlock):
self.endStage = True
blockLevels.current_level += 1
print blockLevels.current_level
self.rect.left = 32
self.rect.top = 32
elif isinstance(p, DeathBlock):
mixer.Sound.play(soundHurt)
time.delay(300)
self.rect.left = 32
self.rect.top = 32
elif isinstance(p, JumpBlock):
if speed_y > 0:
# - calls the jump function (doesn't work properly atm)
# - if you walk on it, it runs twice.
print("weeeeee")
mixer.Sound.play(soundJumpBlock)
self.jump_func(15, blockLevels.gravityDirection)
else:
pass
elif isinstance(p, FakeBlock):
pass
# re-locates player to the outside of platform x, y
# coords if player passes its boundaries
# TODO: add onGround() to elif statements
elif speed_x > 0:
self.rect.right = p.rect.left
# self.onGround = self.isOnGround(
# p, blockLevels.gravityDirection)
elif speed_x < 0:
self.rect.left = p.rect.right
self.onGround = self.isOnGround(
p, blockLevels.gravityDirection)
elif speed_y > 0:
self.rect.bottom = p.rect.top
# self.onGround = self.isOnGround(
# p, blockLevels.gravityDirection)
self.speed_y = 0
elif speed_y < 0:
self.rect.top = p.rect.bottom
# self.onGround = self.isOnGround(
# p, blockLevels.gravityDirection)
self.onGround = self.isOnGround(p, blockLevels.gravityDirection)
# - add the code in the comment below to disable "ceiling gliding"
# - thus making the game much harder.
# self.speed_y = 0
self.glideOn = False
# First checks gravity direction, then applies gravity in that direction.
def apply_gravity(self, gravity, direction):
if direction == 'down':
self.speed_y += gravity
if self.speed_y > 30:
self.speed_y = 0
elif direction == 'up':
self.speed_y -= gravity
if self.speed_y < -30:
self.speed_y = 0
elif direction == 'left':
self.speed_x -= gravity
if self.speed_x < -30:
self.speed_x = 0
# self.speed_y = 0
elif direction == 'right':
self.speed_x += gravity
if self.speed_x > 30:
self.speed_x = 0
# self.speed_y = 0
# Gets called when there's no left/right input
def stop_moving(self, direction):
if direction == 'down' or direction == 'up':
self.speed_x = 0
else:
self.speed_y = 0
# Gets called at left input, acts differently depending on gravity direction
def move_left(self, direction):
if direction == 'down' or direction == 'up':
self.speed_x = -5
elif direction == 'left':
self.speed_y = -5
elif direction == 'right':
self.speed_y = 5
def move_right(self, direction):
if direction == 'down' or direction == 'up':
self.speed_x = 5
elif direction == 'left':
self.speed_y = 5
elif direction == 'right':
self.speed_y = -5
def jump_func(self, jump_height, direction):
# sets the jump height to whatever was passed into the argument
self.jump_height = jump_height
if direction == 'down':
self.speed_y -= jump_height
elif direction == 'up':
self.speed_y += jump_height
elif direction == 'left':
self.speed_x = jump_height
elif direction == 'right':
self.speed_x -= jump_height
def stomp_func(self, fall_speed, direction):
self.stompCharge = True
self.fall_speed = fall_speed
mixer.Sound.play(soundStompCharge)
time.delay(350)
if direction == 'down':
self.speed_y += fall_speed
elif direction == 'up':
self.speed_y -= fall_speed
elif direction == 'left':
self.speed_x -= fall_speed
elif direction == 'right':
self.speed_x += fall_speed
# Check if player is on ground. Ground changes depending on
# current gravity direction.
def isOnGround(self, p, direction):
# If gravity direction is 'down', the function will check
# collision of players bottom border and platforms top border
# and return True if collision is detected
if direction == 'down':
if self.rect.bottom == p.rect.top:
return True
elif direction == 'up':
if self.rect.top == p.rect.bottom:
self.stupidOnGround = True
return True
elif direction == 'left':
if self.rect.left == p.rect.right:
self.stupidOnGround = True
return True
elif direction == 'right':
if self.rect.right == p.rect.left:
self.stupidOnGround = True
return True
return False
""" creates the platform class, inherit the Entity class """
class Platform(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.image = Surface((32, 32))
self.image.convert()
self.image.fill(WHITE)
self.rect = Rect(x, y, 32, 32)
""" creates the ExitBlock, inherit the platform class """
class ExitBlock(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image.fill(BLUE)
class ClearStageBlock(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image.fill(ORANGE)
""" creates the DeathBlock """
class DeathBlock(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image.fill(GREEN)
class JumpBlock(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image.fill(PINK)
class HiddenBlock(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image.fill(BLACK)
class FakeBlock(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image.fill(WHITE)
# - runs the main function
if(__name__ == "__main__"):
main()
|
[
"ayillon@gmail.com"
] |
ayillon@gmail.com
|
3cf9e1997e226fdf90d65861a968d3b101419b6c
|
1c0b11ffc61eee9ebee7e51ee56dfb7d055070b4
|
/networks/NetFactory.py
|
639f1ca91630c53f4b41c125fc928517248e9f52
|
[
"MIT"
] |
permissive
|
captaint-tao/NinaProNet
|
e8a8197e38324ab8504ac0601bf0dac3587c7e60
|
2882597465474395719fa853feb78489b4257795
|
refs/heads/main
| 2023-02-21T07:56:00.115641
| 2021-01-24T17:07:05
| 2021-01-24T17:07:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
from networks.FullConnectedNet import FCN
from networks.NinaProNet import NinaProNet
from networks.GengNet import GengNet
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
class NetFactory(object):
@staticmethod
def create(name):
if name == 'FCN':
return FCN
if name == 'NinaProNet':
return NinaProNet
if name == 'SVM':
return make_pipeline(StandardScaler(), SVC(gamma='auto'))
if name == 'GengNet':
return GengNet
# add your own networks here
print('unsupported network:', name)
exit()
|
[
"taited9160@gmail.com"
] |
taited9160@gmail.com
|
4e82b40735ae52985442dff39e9f23eea87c3926
|
915406c549e2da21a2a95f5b96d9a92b8fbd1bb9
|
/layer/functions.py
|
d983b4fdbbafdc8dc0805eb1c2b10b8379503ce6
|
[] |
no_license
|
b-bokma/data-tools
|
155aaf9b0ff9705e52e1804aebfd818cc3d54f10
|
ec965653873fd71eb8a5bfef48f4e1c868b06e73
|
refs/heads/master
| 2023-03-01T00:40:22.663618
| 2021-02-04T12:27:12
| 2021-02-04T12:27:12
| 289,483,747
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,000
|
py
|
from datetime import timedelta, datetime
import requests
import awswrangler as wr
import pandas as pd
import numpy as np
import os
def list_data(endpoint, url, token, last_updated=None):
"""
A function to list all data from a specific endpoint from Chargebee.
Returns object with all results. Based to be processed to a list using comprehension like:
[x['endpoint'] for x in list_data(endpoint,url,token)]
last_updated has the options of:
None: default behavior returns the data updated since yesterday 00:00:00
Timestamp: int (timestamp in seconds)
'All': do not limit on date
"""
output = []
offset = ""
# set last_updated to change the filter date. By default set to yesterday 00.00.00
yesterday = datetime.now() - timedelta(days=1)
yesterday_timestamp = datetime(
year=yesterday.year,
month=yesterday.month,
day=yesterday.day,
hour=0,
minute=0,
second=0).utcnow().timestamp()
while offset is not None:
if last_updated is None:
last_updated = int(yesterday_timestamp)
if str(last_updated).lower() == 'all':
params = {
"include_deleted": "true",
"offset": offset,
"sort_by[desc]": "updated_at"
}
else:
params = {
"include_deleted": "true",
"offset": offset,
"sort_by[desc]": "updated_at",
"updated_at[after]": str(last_updated)
}
print(params)
result = requests.get(
url=f"https://{url}.chargebee.com/api/v2/{endpoint}",
auth=(f"{token}", ""),
params=params
)
r = result.json()
if 'list' not in r:
print(r)
for row in r['list']:
output.append(row)
if 'next_offset' in r.keys():
offset = r['next_offset']
else:
offset = None
continue
return output
def extract_nested_lists(input_list, keyname, keys_to_add=[{"id":"subscription_id"},{"updated_at":"updated_at"}]):
"""
A function to extract nested lists from an input list.
with keys_to_add you can pass a list with dicts with from to to, so you can rename certain column names
"""
if len(input_list) > 0:
output = []
for item in input_list:
if keyname in item.keys():
output_obj = item.pop(keyname)
if isinstance(output_obj, list) is False:
output_obj = [output_obj]
for j in output_obj:
for key_to_add in keys_to_add:
for k, v in key_to_add.items():
j[v] = item[k]
output.append(j)
return output
else:
return "Passed an empty list"
def load_list_to_s3(bucket_name, input_list, list_name, date_columns, instance):
"""
With this function you load a list, create a dataframe from it.
Based on the list of column names in date_columns these are set to timestamp.
The dataframe is set to parquet and sent to s3
"""
if len(input_list) > 0:
df = pd.DataFrame(input_list)
filepath = f"s3://{bucket_name}/chargebee/{list_name}/entity={instance}/"
for column in date_columns:
if column in df.columns:
df[column] = pd.to_datetime(df[column], unit='s')
f = wr.s3.to_parquet(
df=df,
path=filepath,
dataset=True,
mode="append",
compression='snappy'
)
return f
def col_to_str(df,colname):
if colname in df.columns:
df[colname] = df[colname].astype("string")
def col_to_timestamp(df, colname, timezone='UTC'):
"""
shitload of print statements, since this is a place where I found lots of issues.
Will be removed it this proves itself.
"""
if colname in df.columns:
print("start")
print(colname, df[colname].dtypes)
print(df[colname].head())
if np.issubdtype(df[colname].dtype, np.datetime64) == False: # skips columns that are already datetime64
try:
df[colname] = df[colname].astype('Int64')
df[colname] = pd.to_datetime(
df[colname],
unit='s',
errors='coerce',
utc=True)
print(df[colname].head())
df[colname] = pd.DatetimeIndex(df[colname]).tz_convert(timezone)
print(df[colname].head())
except:
print(colname, df[colname].dtypes)
print(df[colname].head())
# replace all values that are a digit
df[colname] = pd.to_datetime(df[colname])
print(df[colname].head())
df[colname] = df[colname].dt.tz_localize(None)
print(df[colname].head())
print("done")
|
[
"b.bokma@gmail.com"
] |
b.bokma@gmail.com
|
6d5decd4ea5d9b5ef70e681b2f77e8011ca3c72c
|
647b9d0f67f8a39a2c35f00cbaf916ced8487683
|
/src/Emotion Recognition/cnn.py
|
9cf3968cd0c5a11688ccabef7ecbee3cb132d544
|
[] |
no_license
|
jianyuhe/Emotion-recognition-base-audio-and-face
|
818a84a56cb7bba38879160982cf1f38ca3c8c46
|
c806142ea73c7ce4ea301d665519639767e35491
|
refs/heads/master
| 2023-07-14T00:10:44.047905
| 2021-08-25T15:12:31
| 2021-08-25T15:12:31
| 399,857,695
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,097
|
py
|
#Importing Libraries
##Deep Learning
from tensorflow.keras.layers import Activation, Convolution2D, Dropout, Conv2D,DepthwiseConv2D,Dense,Input,Dropout, GlobalAveragePooling2D
from tensorflow.keras.layers import AveragePooling2D, BatchNormalization,Flatten,Conv2D,AveragePooling2D, BatchNormalization
from tensorflow.keras.layers import GlobalAveragePooling2D,Activation,MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten
from tensorflow.keras.models import Model,load_model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import SeparableConv2D
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Reshape,Add,Concatenate
import keras.backend as K
#Build Tensorflow 2.0 keras MobileNet model
##Depthwise Separable block for MobileNet
def depthwise_separable_block(x, nb_filter, stride=(1, 1), name=None):
x = DepthwiseConv2D((3,3), padding='same', strides=stride, depth_multiplier=1, use_bias=False, name=name+'_dpconv')(x)
x = BatchNormalization(axis=3, name=name+'_bn1')(x)
x = Activation(relu6, name=name+'_relu1')(x)
x = Conv2D(nb_filter, (1,1), padding='same', use_bias=False, strides=(1,1), name=name+'conv_2')(x)
x = BatchNormalization(axis=3, name=name+'_bn2')(x)
x = Activation(relu6, name=name+'_relu2')(x)
return x
##Conv block for Mobilenet, it a standard 3x3 convolution block
def conv_block (x, nb_filter, stride=(1,1), name=None):
x = Conv2D(nb_filter, (3,3), strides=stride, padding='same', use_bias=False, name=name+'_conv1')(x)
x = BatchNormalization(axis=3, name=name+'bn1')(x)
x = Activation(relu6, name=name+'relu')(x)
return x
##The ReLu6 activation function
def relu6(x):
return K.relu(x, max_value=6)
##MobileNet
def mobileNet (num_classes, input_size=(112,112,3), dropout=0.5):
input = Input(shape=input_size)
x = conv_block(input, 32, (2,2), name='conv_block')
x = depthwise_separable_block(x, 64, stride=(1,1), name='dep1')
x = depthwise_separable_block(x, 128, stride=(2,2), name='dep2')
x = depthwise_separable_block(x, 128, stride=(1,1), name='dep3')
x = depthwise_separable_block(x, 256, stride=(2,2), name='dep4')
x = depthwise_separable_block(x, 256, stride=(1,1), name='dep5')
x = depthwise_separable_block(x, 512, stride=(2,2), name='dep6')
x = depthwise_separable_block(x, 512, stride=(1,1), name='dep7')
x = depthwise_separable_block(x, 512, stride=(1,1), name='dep8')
x = depthwise_separable_block(x, 512, stride=(1,1), name='dep9')
x = depthwise_separable_block(x, 512, stride=(1,1), name='dep10')
x = depthwise_separable_block(x, 512, stride=(1,1), name='dep11')
x = depthwise_separable_block(x, 1024, stride=(2,2), name='dep12')
x = depthwise_separable_block(x, 1024, stride=(1,1), name='dep13')
x = GlobalAveragePooling2D()(x)
x = Reshape((1,1,1024), name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(num_classes, (1,1), padding='same', name='conv_preds')(x)
x = Reshape((num_classes,), name='reshape_2')(x)
x = Activation('softmax', name='act_softmax')(x)
model = Model(input, x, name='MobileNet')
return model
#Build ms_model_R and ms_model_M models
##Mish activation function
def mish(x):
return x * K.tanh(K.softplus(x))
##The Original residual cell Bottleneck_R and modified inverted residual cell Bottleneck_M
def bottleneck(input, in_channels, out_channels, strides, channels_expand, activation='Mish'):
feature_m = Conv2D(filters=in_channels*channels_expand, kernel_size=(1,1), strides=strides, padding='same')(input)
feature_m = Activation(mish)(feature_m) if activation == 'Mish' else layers.ReLU(6.)(feature_m)
feature_m = DepthwiseConv2D(kernel_size=(3,3), strides=1, padding='same')(feature_m)
feature_m = Activation(mish)(feature_m) if activation == 'Mish' else layers.ReLU(6.)(feature_m)
feature_m = Conv2D(filters=out_channels, kernel_size=(1,1), strides=1, padding='same')(feature_m)
feature_m_res = Conv2D(filters=out_channels, kernel_size=(1,1), strides=strides, padding='same')(input)
feature_m = Add()([feature_m, feature_m_res])
return feature_m
## Process of feature selection module
def feature_selection_modeule(input, pool_size, strides, out_channels):
avg_pool_output = AveragePooling2D(pool_size=pool_size, strides=strides)(input)
max_pool_output = MaxPooling2D(pool_size=pool_size, strides=strides)(input)
sum_output = Add()([avg_pool_output, max_pool_output])
output = Conv2D(filters= out_channels, kernel_size=(1,1), strides=1, padding='same')(sum_output)
output = GlobalAveragePooling2D()(output)
return output
## ms_model_R model
def ms_model_R(num_classes, input_size=(112,112,3), dropout_rate=0.5):
input = Input(shape=input_size)
# Initial feature extraction
feature_init = Conv2D(filters= 16, kernel_size=(3,3), strides=2, padding='same')(input)
# Bottleneck R1
feature_m1 = bottleneck(feature_init, 16, 16, 1, 1, 'Relu6')
# Bottleneck R2
feature_m2 = bottleneck(feature_m1, 16, 24, 2, 5, 'Relu6')
# Bottleneck R3
feature_m3 = bottleneck(feature_m2, 24, 24, 1, 5, 'Relu6')
# Bottleneck R3_1
feature_m3_1 = bottleneck(feature_m3, 24, 32, 1, 5, 'Relu6')
# Bottleneck R3_2
feature_m3_2 = bottleneck(feature_m3_1, 32, 32, 1, 5, 'Relu6')
# Feature selection module 1
fsm_1 = feature_selection_modeule(feature_m3_2, (4,4), 4, 32)
# Bottleneck R4
feature_m4 = bottleneck(feature_m3, 24, 32, 2, 5, 'Relu6')
# Bottleneck R5
feature_m5 = bottleneck(feature_m4, 32, 32, 1, 5, 'Relu6')
# Feature selection module 2
fsm_2 = feature_selection_modeule(feature_m5, (2,2), 2, 32)
# Bottleneck R6
feature_m6 = bottleneck(feature_m5, 32, 40, 1, 5, 'Relu6')
# Bottleneck R7
feature_m7 = bottleneck(feature_m6, 40, 40, 1, 5, 'Relu6')
# Feature selection module 3
fsm_3 = feature_selection_modeule(feature_m7, (2,2), 2, 40)
# Bottleneck R8
feature_m8 = bottleneck(feature_m7, 40, 40, 1, 5, 'Relu6')
# Bottleneck R9
feature_m9 = bottleneck(feature_m8, 40, 48, 2, 5, 'Relu6')
# Bottleneck R10
feature_m10 = bottleneck(feature_m9, 48, 64, 1, 5, 'Relu6')
fs = Conv2D(filters=64, kernel_size=(1,1), strides=1, padding='same')(feature_m10)
fs = GlobalAveragePooling2D()(fs)
# Concat
output = Concatenate()([fsm_1, fsm_2, fsm_3, fs])
output = Reshape((1,1,-1))(output)
output = Dropout(dropout_rate)(output)
output = Conv2D(filters=num_classes, kernel_size=(1,1), strides=1, padding='same')(output)
output = Activation('softmax')(output)
output = Flatten()(output)
# Model
model = Model(inputs=input, outputs=output, name='ms_model_R')
return model
## ms_model_M model
def ms_model_M(num_classes, input_size=(112,112,3), dropout_rate=0.5):
input = Input(shape=input_size)
# Initial feature extraction
feature_init = Conv2D(filters= 16, kernel_size=(3,3), strides=2, padding='same')(input)
# Bottleneck M1
feature_m1 = bottleneck(feature_init, 16, 16, 1, 1, 'Mish')
# Bottleneck M2
feature_m2 = bottleneck(feature_m1, 16, 24, 2, 5, 'Mish')
# Bottleneck M3
feature_m3 = bottleneck(feature_m2, 24, 24, 1, 5, 'Mish')
# Bottleneck M3_1
feature_m3_1 = bottleneck(feature_m3, 24, 32, 1, 5, 'Mish')
# Bottleneck M3_2
feature_m3_2 = bottleneck(feature_m3_1, 32, 32, 1, 5, 'Mish')
# Feature selection module 1
fsm_1 = feature_selection_modeule(feature_m3_2, (4,4), 4, 32)
# Bottleneck M4
feature_m4 = bottleneck(feature_m3, 24, 32, 2, 5, 'Mish')
# Bottleneck M5
feature_m5 = bottleneck(feature_m4, 32, 32, 1, 5, 'Mish')
# Feature selection module 2
fsm_2 = feature_selection_modeule(feature_m5, (2,2), 2, 32)
# Bottleneck M6
feature_m6 = bottleneck(feature_m5, 32, 40, 1, 5, 'Mish')
# Bottleneck M7
feature_m7 = bottleneck(feature_m6, 40, 40, 1, 5, 'Mish')
# Feature selection module 3
fsm_3 = feature_selection_modeule(feature_m7, (2,2), 2, 40)
# Bottleneck M8
feature_m8 = bottleneck(feature_m7, 40, 40, 1, 5, 'Mish')
# Bottleneck M9
feature_m9 = bottleneck(feature_m8, 40, 48, 2, 5, 'Mish')
# Bottleneck M10
feature_m10 = bottleneck(feature_m9, 48, 64, 1, 5, 'Mish')
fs = Conv2D(filters=64, kernel_size=(1,1), strides=1, padding='same')(feature_m10)
fs = GlobalAveragePooling2D()(fs)
# Concat
output = Concatenate()([fsm_1, fsm_2, fsm_3, fs])
output = Reshape((1,1,-1))(output)
output = Dropout(dropout_rate)(output)
output = Conv2D(filters=num_classes, kernel_size=(1,1), strides=1, padding='same')(output)
output = Activation('softmax')(output)
output = Flatten()(output)
# Model
model = Model(inputs=input, outputs=output, name = 'ms_model_M')
return model
|
[
"jianyu19951225@gmail.com"
] |
jianyu19951225@gmail.com
|
de46a19d07ec4832cc5163e96c00c9f895ec7bd3
|
54c1a2250fda397eabcfaa75a87680722bd3495c
|
/api_cloud/api_api/models.py
|
419e830b31de5d9e36483a45d1a7903fa6d56ca2
|
[] |
no_license
|
yangjing1989/API
|
ffe91acf11136be61e9dc68a2e62900a2d962ff9
|
099b51db461ddbe6b723cd22740be0c37dd117ae
|
refs/heads/master
| 2021-01-10T01:22:34.108563
| 2016-03-07T04:00:13
| 2016-03-07T04:00:13
| 51,734,749
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
from django.db import models
# Create your models here.
class ApiApi(models.Model):
name = models.CharField(max_length=100)
project_id = models.IntegerField(blank=True, null=True)
api_http_type = models.IntegerField(blank=True, null=True)
api_url = models.CharField(max_length=500, blank=True, null=True)
url_list = models.TextField(blank=True, null=True)
api_domain = models.CharField(max_length=100, blank=True, null=True)
api_method = models.IntegerField(blank=True, null=True)
api_headers = models.TextField(blank=True, null=True)
api_body_type = models.IntegerField(blank=True, null=True)
api_body_value = models.TextField(blank=True, null=True)
api_expect_result = models.TextField(blank=True, null=True)
api_real_result = models.TextField(blank=True, null=True)
api_is_success = models.IntegerField(blank=True, null=True)
remarks = models.TextField(blank=True, null=True)
creater = models.IntegerField(blank=True, null=True)
create_time = models.DateTimeField(blank=True, null=True)
update_time = models.DateTimeField(blank=True, null=True)
last_execute_time = models.DateTimeField(blank=True, null=True)
status = models.IntegerField(blank=True, null=True)
|
[
"Yang@100"
] |
Yang@100
|
9abc3d8ba0d3d46542177b5f5ecd164d77a0c121
|
15419b56c7493452d2e69ebfb62e9b211c91ab74
|
/TwitterClient.py
|
83221bf0ad15c8b268ba588d23b93d2d2de12390
|
[] |
no_license
|
DeclanJones/MarkovTwitterClient
|
bcc3167f3d00b3814b1566aa5f0fc927b7d8b8d9
|
899174e13c99aaff2ef759ef9179946425d10bbf
|
refs/heads/master
| 2021-01-10T18:00:59.051343
| 2016-01-07T16:47:45
| 2016-01-07T16:47:45
| 49,213,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,403
|
py
|
import tweepy
import datetime
import re
TWEET_SCRAPE_ARCHIVE_PATH = '/Users/declanjones/Desktop/TweetProj/Tweet_Data/Corpus/'
class TwitterClient:
'Class to GET and POST Tweets generated by Markov Chains'
def __init__(self, CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET):
self.CONSUMER_KEY = CONSUMER_KEY
self.CONSUMER_SECRET = CONSUMER_SECRET
self.ACCESS_TOKEN = ACCESS_TOKEN
self.ACCESS_TOKEN_SECRET = ACCESS_TOKEN_SECRET
self.AUTH = tweepy.OAuthHandler(self.CONSUMER_KEY, self.CONSUMER_SECRET)
self.AUTH.set_access_token(self.ACCESS_TOKEN, self.ACCESS_TOKEN_SECRET)
self.API = tweepy.API(self.AUTH)
self.ME = self.API.me()
# Method for posting tweets
def post(self, string):
try:
self.API.update_status(string)
except tweepy.TweepError:
print 'Posting Error for user: ' + self.ME.screen_name
return False
return True
# Scrape numTweets from a user to populate .txt file
def scrape(self, numTweets=100, userName='TheTweetOfGod'):
# Remove links using regular expression matching
pattern = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
# Attempt scrape
try:
user = self.API.get_user(userName)
tweetLst = self.API.user_timeline(user.id, count=numTweets)
except tweepy.TweepError:
print 'Could not scrape {0} tweets from user: {1}'.format(numTweets, userName)
return False
# Get Tweet Metadata
date_obj = datetime.date.today() - datetime.timedelta(hours=5) # set cutoff 5 hours earlier
dateCutoff = datetime.datetime.strptime(date_obj.strftime("%Y-%m-%d_%H:%M:%S"), "%Y-%m-%d_%H:%M:%S")
today = datetime.datetime.utcnow()
file = TWEET_SCRAPE_ARCHIVE_PATH + '{0}_{1}.txt'.format(userName, today)
# Record Tweets in file
if len(tweetLst) > 0:
f = open(file, 'wb')
for tweet in tweetLst:
if tweet.created_at > dateCutoff:
tweet = str(tweet.text.encode("utf-8"))
tweet = pattern.sub('', tweet)
f.write(tweet + '\n')
else: #We have already recorded this tweet
pass
f.close()
return True
|
[
"declan.jones@berkeley.edu"
] |
declan.jones@berkeley.edu
|
2996e7beefd3e3449cf21e97daa7c8fb0a786353
|
f0f5cc75d2f851a0d337d6f1cbd206f0c51da022
|
/oldVersion/RedNeuronal.py
|
cf5f8cad981c14c75e06de25180183421ef613a1
|
[] |
no_license
|
Ark93/RedNeuronal
|
8bbac95afd7d3a92cb9dae59f943bb084251310a
|
7860f20ab59b3bd3d1ceae2f304602fdfe6ae33f
|
refs/heads/master
| 2021-05-02T11:02:42.196499
| 2016-12-02T05:37:48
| 2016-12-02T05:37:48
| 49,172,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
import numpy as np
import pandas
import csv
#
#
#to open the csv file
with open('Consulta_Banxico.csv','rb')as csvfile :
datareader = csv.reader(csvfile )
rows = 0
columns
for row in datareader:
rows+=1
data = num
csvfile.close()
|
[
"santos.banuelos93@gmail.com"
] |
santos.banuelos93@gmail.com
|
f9cfc4ce6ef2f17a14806cae8dd3d37e73b7128a
|
c36e952acfdf1694b610f4c090ec969d8096977c
|
/20_Spring/AE353/Homeworks/AE353_HW2.3.py
|
9c1cd190adf3afdb9d377ad805876ae548ff12df
|
[] |
no_license
|
topher097/SeniorAE
|
a89f64c4a025819d222dfe106bd96a0fe0760e4d
|
0e566877f023c18344db7ea033e0637a9a5dcc52
|
refs/heads/master
| 2023-03-21T05:57:10.593991
| 2023-03-07T18:20:39
| 2023-03-07T18:20:39
| 206,626,410
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
import numpy as np
from scipy.linalg import expm, eig
def probFive():
A = np.array([[0.2, 0.5, 0.3], [0.4, -0.8, -0.2], [-0.7, 0.7, -0.7]])
B = np.array([[0.1], [0.5], [0.5]])
C = np.array([[-0.6, -0.1, 0.0]])
D = np.array([[0.0]])
K1 = np.array([[-30.2, -16.7, 9.5]])
K2 = np.array([[25.4, 6.3, -0.4]])
K3 = np.array([[57.0, 11.9, -3.7]])
K4 = np.array([[635.3, 432.1, -532.8]])
K5 = np.array([[69.5, 15.5, -10.0]])
Ks = [K1, K2, K3, K4, K5]
counter = 1
for K in Ks:
Am = A - np.dot(B, K)
[V, F] = eig(Am)
n = np.sqrt(F.size)
neg = False
for i in np.linspace(0, n-1):
if F[int(i)][int(i)] < 0:
neg = True
else:
neg = False
if neg:
print('K' + str(counter))
counter += 1
probFive()
|
[
"cmendres400@gmail.com"
] |
cmendres400@gmail.com
|
052b3043527285c8e3ebdfa538669b851f6b78a3
|
edbfa77f2a4ac2dca10e8f56c463f8fe54afbe64
|
/binary_search.py
|
006da95e8d7e1ebe1cc7fc55c47783c104195cc0
|
[] |
no_license
|
viiicky/Problem-Solving
|
4bc1b3ab19ff81a2e6031b34a37f1798accf5a35
|
642e6dd2c3cd65704c90d6e06a392bdae2ddd644
|
refs/heads/master
| 2023-04-15T20:09:39.077724
| 2021-04-24T18:36:30
| 2021-04-24T18:36:30
| 40,954,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
#!/usr/bin/env python3
# Recursive binary search
# If n is the total number of items, then:
# Worst Running Time: O(lgn)
# Best Running Time: O(1)
# An analogous example for binary search is searching
# for the meaning of a word in the dictionary
# or probably a name in a telephone directory
# by Vikas Prasad
def binary_search(A, v, p, q):
mid = (p + q) // 2
if q < p:
return None
if A[mid] == v:
return mid
if A[mid] < v:
return binary_search(A, v, mid+1, q)
else:
return binary_search(A, v, p, mid-1)
import argparse
parser = argparse.ArgumentParser(description='finds the position of v in A')
parser.add_argument('integers', metavar='A', type=int, nargs='+', help='sequence of integers')
parser.add_argument('value', metavar='v', type=int, help='value whose position is to be searched in A')
args = parser.parse_args()
A = args.integers
v = args.value
# A = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# v = int(input())
print('Searching', v)
print('Found at', binary_search(A, v, 0, len(A)-1))
|
[
"vikasprasad.prasad@gmail.com"
] |
vikasprasad.prasad@gmail.com
|
88119844b125f387327c3d90f3b21e7ca9247fd4
|
2ff776ed28f9d465602aabb675b896b39eceaf26
|
/examples/scoop/example_scoop_gamma.py
|
80a427ac6e7a1124202561fbc6cf00fba27332b1
|
[
"MIT"
] |
permissive
|
geochri/pyroSAR
|
c79089f817637534bf5cc558a3617f10c7a548f9
|
6768a5487853c078fa471f5a3a69facb1aa2fab6
|
refs/heads/master
| 2020-04-08T13:18:59.157326
| 2018-11-27T17:58:00
| 2018-11-27T17:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,433
|
py
|
import os
import socket
from scoop import futures
from pyroSAR.S1 import OSV
from pyroSAR.gamma import geocode
from pyroSAR import Archive
from spatialist import vector
from spatialist.ancillary import finder, multicore
"""
This script is an example usage for processing Sentinel-1 scenes with GAMMA
Run this script by calling the 'start_gamma.sh' scipt.
The following tasks are performed:
- a directory is scanned for valid Sentinel-1 scenes
- the found scenes are ingested into a spatialite database
- orbit state vector (OSV) files are downloaded to a user-defined directory (these are needed for precise orbit information)
- currently this is implemented to update a fixed directory in which all OSV files are stored
- an empty directory will first be filled with all available OSV files on the server
- a cluster job is setup using package 'scoop', which assigns a list of testsites to different cluster nodes
- for each site:
- query the SAR scenes, which overlap with your testsite and match certain criteria (e.g. sensor, acquisition mode etc.)
- filter the selected scenes by those that have already been processed and saved to the defined output directory
- do parallelized processing using package 'pathos'
"""
# the sites to be processed
# this is just an exemplary use case assuming a shapefile with different geometries for the test sites
sites = ['Egypt_Burullus', 'France_Camargue', 'Kenya_Lorian_Olbolossat', 'Sweden_Skogaryd', 'Sweden_Store-Mosse']
# the pyroSAR database file
dbfile = '/.../scenelist.db'
# the main directory for storing the processed results
maindir = '/.../swos_process'
# the directories for Sentinel-1 POE and RES orbit state vector files
# this is intended to be a fixed directory structure similar to that of ESA SNAP
# in the future all auxiliary data files will be stored in a structure defined by pyroSAR
# This is currently only used for Sentinel-1; within the processor two subdirectories will be created named
# POEORB and RESORB which will contain the respective orbit files
osvdir = '/.../.gamma/auxdata/Orbits/Sentinel-1'
def worker(sitename):
#######################################################################################
# setup general processing parameters
resolution = 20
# number of processes for Python pathos framework (multiple scenes in parallel)
parallel1 = 6
# number of parallel OpenMP threads; this is used by GAMMA internally
parallel2 = 6
os.environ['OMP_NUM_THREADS'] = str(parallel2)
#######################################################################################
# get the maximum date of the precise orbit files
# as type also 'RES' can be selected. These files are not as precise as POE and thus geocoding might not be
# quite as accurate
with OSV(osvdir) as osv:
maxdate = osv.maxdate(osvtype='POE', datetype='stop')
#######################################################################################
# define the directories for writing temporary and final results
sitedir = os.path.join(maindir, sitename)
tempdir = os.path.join(sitedir, 'proc_in')
outdir = os.path.join(sitedir, 'proc_out')
#######################################################################################
# load the test site geometry into a vector object
sites = vector.Vector('/.../testsites.shp')
# query the test site by name; a column name 'Site_Name' must be saved in your shapefile
site = sites["Site_Name='{}'".format(sitename)]
#######################################################################################
# query the database for scenes to be processed
with Archive(dbfile) as archive:
selection_proc = archive.select(vectorobject=site,
processdir=outdir,
maxdate=maxdate,
sensor=('S1A', 'S1B'),
product='GRD',
acquisition_mode='IW',
vv=1)
print('{0}: {1} scenes found for site {2}'.format(socket.gethostname(), len(selection_proc), sitename))
#######################################################################################
# define the DEM file
demfile = '{0}/{1}/DEM/{1}_srtm_utm'.format(maindir, sitename)
if not os.path.isfile(demfile):
print('DEM missing for site {}'.format(sitename))
return
#######################################################################################
# call to processing utility
if len(selection_proc) > 1:
print('start processing')
if len(selection_proc) > 1:
if len(selection_proc) < parallel1:
parallel1 = len(selection_proc)
# run the function on multiple cores in parallel
multicore(geocode, cores=parallel1, multiargs={'scene': selection_proc}, dem=demfile,
tempdir=tempdir, outdir=outdir,
targetres=resolution, scaling='db',
func_geoback=2, func_interp=0, sarsimulation=False, osvdir=osvdir, cleanup=True, allow_RES_OSV=False)
elif len(selection_proc) == 1:
scene = selection_proc[0]
# run the function on a single core
geocode(scene, dem=demfile,
tempdir=tempdir, outdir=outdir,
targetres=resolution, scaling='db',
func_geoback=2, func_interp=0, sarSimCC=False, osvdir=osvdir, cleanup=True, allow_RES_OSV=False)
return len(selection_proc)
if __name__ == '__main__':
#######################################################################################
# update Sentinel-1 GRD scene archive database
# define a directory containing zipped scene archives and list all files starting with 'S1A' or 'S1B'
archive_s1 = '/.../sentinel1/GRD'
scenes_s1 = finder(archive_s1, ['^S1[AB]'], regex=True, recursive=False)
with Archive(dbfile) as archive:
archive.insert(scenes_s1)
#######################################################################################
# download the latest orbit state vector files
with OSV(osvdir) as osv:
osv.update()
#######################################################################################
# start the processing
results = list(futures.map(worker, sites))
|
[
"john.truckenbrodt@uni-jena.de"
] |
john.truckenbrodt@uni-jena.de
|
495f003bb3f74c4e0f521c1d35fbf767f6c78ff1
|
135ce37651d617f569cf2b659967b9b16e97a725
|
/Visualizations/viz.py
|
759a665e9cb6d190d8843f96b9e78c8c3ed76251
|
[] |
no_license
|
himz/VMware-bootcamp-vchs
|
cd647b0381805776453c937cfd91c8576cec3714
|
b8b22bd35a3bd961e4059907cbd094d282eb1828
|
refs/heads/master
| 2023-03-30T05:00:10.566075
| 2014-09-05T00:15:09
| 2014-09-05T00:15:09
| 353,913,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
from flask import Flask
from flask import render_template
import utils
import mongo_utils
app = Flask(__name__)
@app.route("/")
def index():
return render_template('tl_index.html')
@app.route("/api-usage")
def get_most_used_apis():
data = utils.get_api_usage_json()
return render_template('tl_apis_most_used.html', data=data)
@app.route("/browser-usage")
def get_browser_distribution():
data = utils.get_browser_usage_json()
return render_template('tl_browser_usage.html', data=data)
@app.route("/requests/response-time-verb")
def get_response_verb():
data = {}
data['records'] = filter_records(['http_verb', 'response_time'])
return render_template('tl_requests_response_time_verb.html', data=data)
@app.route("/os-usage")
def get_os_distribution():
data = utils.get_os_usage_json()
return render_template('tl_os_usage.html', data=data)
@app.route("/api-response")
def get_api_response_times():
data = utils.get_api_response_times()
return render_template('tl_api_response_times.html', data=data)
@app.route("/verb-response")
def get_verb_response_times():
data = utils.get_verb_response_times()
return render_template('tl_verb_response_times.html', data=data)
if __name__ == "__main__":
app.run(debug=True, port=8090)
|
[
"warunsl@gmail.com"
] |
warunsl@gmail.com
|
890f2f6f244bdbbe497ce8e098712a26acf94761
|
df936dbcc38d1637c9b4f86846d470dfe422aa11
|
/src/py/mop/model/Person.py
|
9dd9dcb1077e9d2e087caa3b806cc5bb4cfb2bf0
|
[] |
no_license
|
rec/mop
|
60186e68b1dfda2512494830c6f21916399b4ee0
|
27a4c7902a1ced1c547b2d9f9442375faee75f6b
|
refs/heads/master
| 2016-09-06T16:34:48.409438
| 2012-02-27T21:21:21
| 2012-02-27T21:21:21
| 3,433,923
| 1
| 0
| null | 2016-07-20T17:39:26
| 2012-02-13T20:43:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
from google.appengine.ext import ndb
from mop.model import PostalAddress
"""A Person describes a "natural person" who will be taking one side or another
of a Parking Transaction.
A Person only exists as part of an Account, so we don't need to have a unique
index for them.
"""
class Person(ndb.Model):
prefix = ndb.StringProperty()
driversLicenseIssuer = ndb.StructuredProperty(PostalAddress.PostalAddress)
first_names = ndb.StringProperty()
last = ndb.StringProperty()
suffix = ndb.StringProperty()
driversLicense = ndb.StringProperty()
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
862178752d9888ce4509a77fe384662c28953347
|
4a2eac368e3e2216b0cd1dd70224da3ca4ee7c5e
|
/BareMetalSolution/owlbot.py
|
fa2e47441dd7d59e239932e108b67f90e5583b3d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
googleapis/google-cloud-php
|
856a940eee158eafa6f2443f8d61813779216429
|
ad50f749431287e7074279e2b4fa32d6d6c2c952
|
refs/heads/main
| 2023-09-06T05:48:31.609502
| 2023-09-05T20:27:34
| 2023-09-05T20:27:34
| 43,642,389
| 642
| 330
|
Apache-2.0
| 2023-09-13T22:39:27
| 2015-10-04T16:09:46
|
PHP
|
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/BareMetalSolution").resolve()
dest = Path().resolve()
# Added so that we can pass copy_excludes in the owlbot_main() call
_tracked_paths.add(src)
php.owlbot_main(
src=src,
dest=dest,
copy_excludes=[
src / "**/[A-Z]*_*.php"
]
)
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
# format generated clients
subprocess.run([
'npm',
'exec',
'--yes',
'--package=@prettier/plugin-php@^0.16',
'--',
'prettier',
'**/Gapic/*',
'--write',
'--parser=php',
'--single-quote',
'--print-width=80'])
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
ca8cca86f721e101cfba934d0f87e2396efcc350
|
4562c0332c4009416328edf6ba24676837c5fc2e
|
/zomatopy.py
|
7f35ce393189cf2cbb3c401b1115ddc317eeab1a
|
[] |
no_license
|
babu3009/Foodie-Chatbot
|
e05778bf091488c2886ffa4784e44746bf27a740
|
eab6cef8487d1fa8567ed6837e61bc9864c00ce8
|
refs/heads/master
| 2021-10-27T22:22:12.535497
| 2019-04-20T09:09:29
| 2019-04-20T09:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,370
|
py
|
import requests
import ast
base_url = "https://developers.zomato.com/api/v2.1/"
def initialize_app(config):
return Zomato(config)
class Zomato:
def __init__(self, config):
self.user_key = config["user_key"]
def get_categories(self):
"""
Takes no input.
Returns a dictionary of IDs and their respective category names.
"""
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "categories", headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
categories = {}
for category in a['categories']:
categories.update({category['categories']['id'] : category['categories']['name']})
return categories
def get_city_ID(self, city_name):
"""
Takes City Name as input.
Returns the ID for the city given as input.
"""
if city_name.isalpha() == False:
raise ValueError('InvalidCityName')
city_name = city_name.split(' ')
city_name = '%20'.join(city_name)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cities?q=" + city_name, headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['location_suggestions']) == 0:
raise Exception('invalid_city_name')
elif 'name' in a['location_suggestions'][0]:
city_name = city_name.replace('%20', ' ')
if str(a['location_suggestions'][0]['name']).lower() == str(city_name).lower():
return a['location_suggestions'][0]['id']
else:
raise ValueError('InvalidCityId')
def get_city_name(self, city_ID):
"""
Takes City ID as input.
Returns the name of the city ID given as input.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cities?city_ids=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if a['location_suggestions'][0]['country_name'] == "":
raise ValueError('InvalidCityId')
else:
temp_city_ID = a['location_suggestions'][0]['id']
if temp_city_ID == str(city_ID):
return a['location_suggestions'][0]['name']
def get_collections(self, city_ID, limit=None):
"""
Takes City ID as input. limit parameter is optional.
Returns dictionary of Zomato restaurant collections in a city and their respective URLs.
"""
#self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
if limit == None:
r = (requests.get(base_url + "collections?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
else:
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
else:
r = (requests.get(base_url + "collections?city_id=" + str(city_ID) + "&count=" + str(limit), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
collections = {}
for collection in a['collections']:
collections.update({collection['collection']['title'] : collection['collection']['url']})
return collections
def get_cuisines(self, city_ID):
"""
Takes City ID as input.
Returns a sorted dictionary of all cuisine IDs and their respective cuisine names.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cuisines?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['cuisines']) == 0:
raise ValueError('InvalidCityId')
temp_cuisines = {}
cuisines = {}
for cuisine in a['cuisines']:
temp_cuisines.update({cuisine['cuisine']['cuisine_id'] : cuisine['cuisine']['cuisine_name']})
for cuisine in sorted(temp_cuisines):
cuisines.update({cuisine : temp_cuisines[cuisine]})
return cuisines
def get_establishment_types(self, city_ID):
"""
Takes City ID as input.
Returns a sorted dictionary of all establishment type IDs and their respective establishment type names.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "establishments?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
temp_establishment_types = {}
establishment_types = {}
if 'establishments' in a:
for establishment_type in a['establishments']:
temp_establishment_types.update({establishment_type['establishment']['id'] : establishment_type['establishment']['name']})
for establishment_type in sorted(temp_establishment_types):
establishment_types.update({establishment_type : temp_establishment_types[establishment_type]})
return establishment_types
else:
raise ValueError('InvalidCityId')
def get_nearby_restaurants(self, latitude, longitude):
"""
Takes the latitude and longitude as inputs.
Returns a dictionary of Restaurant IDs and their corresponding Zomato URLs.
"""
try:
float(latitude)
float(longitude)
except ValueError:
raise ValueError('InvalidLatitudeOrLongitude')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "geocode?lat=" + str(latitude) + "&lon=" + str(longitude), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
nearby_restaurants = {}
for nearby_restaurant in a['nearby_restaurants']:
nearby_restaurants.update({nearby_restaurant['restaurant']['id'] : nearby_restaurant['restaurant']['url']})
return nearby_restaurants
def get_restaurant(self, restaurant_ID):
"""
Takes Restaurant ID as input.
Returns a dictionary of restaurant details.
"""
self.is_valid_restaurant_id(restaurant_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "restaurant?res_id=" + str(restaurant_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
if 'code' in a:
if a['code'] == 404:
raise('InvalidRestaurantId')
restaurant_details = {}
restaurant_details.update({"name" : a['name']})
restaurant_details.update({"url" : a['url']})
restaurant_details.update({"location" : a['location']['address']})
restaurant_details.update({"city" : a['location']['city']})
restaurant_details.update({"city_ID" : a['location']['city_id']})
restaurant_details.update({"user_rating" : a['user_rating']['aggregate_rating']})
restaurant_details.update({"average_cost_for_two" : a['average_cost_for_two']})
restaurant_details = DotDict(restaurant_details)
return restaurant_details
def restaurant_search(self, query="", latitude="", longitude="", cuisines="", limit=50000):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
cuisines = "%2C".join(cuisines.split(","))
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "search?q=" + str(query) + "&count=" + str(limit) + "&lat=" + str(latitude) + "&lon=" + str(longitude) + "&cuisines=" + str(cuisines), headers=headers).content).decode("utf-8")
return r#a = ast.literal_eval(r)
def get_location(self, query="", limit=5):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "locations?query=" + str(query) + "&count=" + str(limit), headers=headers).content).decode("utf-8")
return r
def restaurant_search_by_keyword(self, query="", cuisines="", limit=5):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
cuisines = "%2C".join(cuisines.split(","))
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "search?q=" + str(query) + "&count=" + str(limit) + "&cuisines=" + str(cuisines), headers=headers).content).decode("utf-8")
return r
def is_valid_restaurant_id(self, restaurant_ID):
"""
Checks if the Restaurant ID is valid or invalid.
If invalid, throws a InvalidRestaurantId Exception.
"""
restaurant_ID = str(restaurant_ID)
if restaurant_ID.isnumeric() == False:
raise ValueError('InvalidRestaurantId')
def is_valid_city_id(self, city_ID):
"""
Checks if the City ID is valid or invalid.
If invalid, throws a InvalidCityId Exception.
"""
city_ID = str(city_ID)
if city_ID.isnumeric() == False:
return True# raise ValueError('InvalidCityId')
def is_key_invalid(self, a):
"""
Checks if the API key provided is valid or invalid.
If invalid, throws a InvalidKey Exception.
"""
if 'code' in a:
if a['code'] == 403:
raise ValueError('InvalidKey')
def is_rate_exceeded(self, a):
"""
Checks if the request limit for the API key is exceeded or not.
If exceeded, throws a ApiLimitExceeded Exception.
"""
if 'code' in a:
if a['code'] == 440:
raise Exception('ApiLimitExceeded')
class DotDict(dict):
"""
Dot notation access to dictionary attributes
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
|
[
"noreply@github.com"
] |
babu3009.noreply@github.com
|
4133ddefe30b278ffcd6f7fd8b526d24adb5bea7
|
7aa05ca5b86e800e7884991b4edc773ccf1457c1
|
/retinaNN_training.py
|
2ce0a67511314b7520101ae09f9e9c70d097e93f
|
[] |
no_license
|
Zchhh73/DilatedUnet
|
7c4f66bd12e3a0ca6f59ee2ed69d12033e33ae32
|
e42dc7a28c79fc0b5abbdb415a451ebd0a386c83
|
refs/heads/main
| 2023-01-28T00:14:22.262000
| 2020-11-24T05:25:31
| 2020-11-24T05:25:31
| 315,527,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,803
|
py
|
import numpy as np
import configparser
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from keras.utils.vis_utils import plot_model as plot
from keras.optimizers import SGD
from keras import optimizers
import sys
sys.path.insert(0, './lib/')
from lib.help_functions import *
# function to obtain data for training/testing (validation)
from lib.extract_patches import get_data_training
import src.lovaze_softmax as ls
from keras.callbacks import LearningRateScheduler
import mylr as mlr
from keras.layers import BatchNormalization, PReLU, LeakyReLU, Conv2DTranspose
# Define the neural network
def get_unet(n_ch, patch_height, patch_width):
inputs = Input(shape=(n_ch, patch_height, patch_width))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(inputs)
conv1 = Dropout(0.2)(conv1)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
#
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(pool1)
conv2 = Dropout(0.2)(conv2)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
#
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', data_format='channels_first')(pool2)
conv3 = Dropout(0.2)(conv3)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv3)
up1 = UpSampling2D(size=(2, 2))(conv3)
up1 = concatenate([conv2, up1], axis=1)
conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(up1)
conv4 = Dropout(0.2)(conv4)
conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv4)
#
up2 = UpSampling2D(size=(2, 2))(conv4)
up2 = concatenate([conv1, up2], axis=1)
conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(up2)
conv5 = Dropout(0.2)(conv5)
conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv5)
#
conv6 = Conv2D(2, (1, 1), activation='relu', padding='same', data_format='channels_first')(conv5)
conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
conv6 = core.Permute((2, 1))(conv6)
############
conv7 = core.Activation('softmax')(conv6)
model = Model(inputs=inputs, outputs=conv7)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
model.compile(optimizer=sgd, loss='weighted_bce_loss', metrics=['accuracy'])
# adam=optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
# model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# 1、目标函数
# (1)mean_squared_error / mse 均方误差,常用的目标函数,公式为((y_pred-y_true) ** 2).mean()
# (2)mean_absolute_error / mae绝对值均差,公式为( | y_pred - y_true |).mean()
# (3)mean_absolute_percentage_error / mape公式为:(| (y_true - y_pred) / clip((| y_true |), epsilon, infinite) |).mean(axis=-1) * 100,和mae的区别就是,累加的是(预测值与实际值的差)除以(剔除不介于epsilon和infinite之间的实际值),然后求均值。
# (4)mean_squared_logarithmic_error / msle公式为: (log(clip(y_pred, epsilon, infinite) + 1) - log(clip(y_true, epsilon, infinite) + 1.)) ^ 2.mean(axis=-1),这个就是加入了log对数,剔除不介于epsilon和infinite之间的预测值与实际值之后,然后取对数,作差,平方,累加求均值。
# (5)squared_hinge公式为:(max(1 - y_truey_pred, 0)) ^ 2.mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的平方的累加均值。
# (6)hinge公式为:(max(1 - y_truey_pred, 0)).mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的的累加均值。
# (7)binary_crossentropy: 常说的逻辑回归, 就是常用的交叉熵函
# (8)categorical_crossentropy: 多分类的逻辑
#
# 2、性能评估函数:
# (1)binary_accuracy: 对二分类问题, 计算在所有预测值上的平均正确率
# (2)categorical_accuracy: 对多分类问题, 计算再所有预测值上的平均正确率
# (3)sparse_categorical_accuracy: 与categorical_accuracy相同, 在对稀疏的目标值预测时有用
# (4)top_k_categorical_accracy: 计算top - k正确率, 当预测值的前k个值中存在目标类别即认为预测正确
# (5)sparse_top_k_categorical_accuracy:与top_k_categorical_accracy作用相同,但适用于稀疏情况
return model
# Define the neural network gnet
# you need fchangeunction call "get_unet" to "get_gnet" in line 166 before use this network
def get_gnet(n_ch, patch_height, patch_width):
inputs = Input((n_ch, patch_height, patch_width))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Dropout(0.2)(conv1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
up1 = UpSampling2D(size=(2, 2))(conv1)
#
conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up1)
conv2 = Dropout(0.2)(conv2)
conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv2)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)
#
conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool1)
conv3 = Dropout(0.2)(conv3)
conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv3)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv3)
#
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool2)
conv4 = Dropout(0.2)(conv4)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv4)
#
conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool3)
conv5 = Dropout(0.2)(conv5)
conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv5)
#
up2 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up2)
conv6 = Dropout(0.2)(conv6)
conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv6)
#
up3 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up3)
conv7 = Dropout(0.2)(conv7)
conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv7)
#
up4 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up4)
conv8 = Dropout(0.2)(conv8)
conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv8)
#
pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool4)
conv9 = Dropout(0.2)(conv9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
#
conv10 = Convolution2D(2, 1, 1, activation='relu', border_mode='same')(conv9)
conv10 = core.Reshape((2, patch_height * patch_width))(conv10)
conv10 = core.Permute((2, 1))(conv10)
############
conv10 = core.Activation('softmax')(conv10)
model = Model(input=inputs, output=conv10)
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
return model
# Define the neural network
def get_dilated_unet(n_ch, patch_height, patch_width, dilaterate=3):
inputs = Input(shape=(n_ch, patch_height, patch_width))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
data_format='channels_first')(inputs)
conv1 = Dropout(0.2)(conv1)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
data_format='channels_first')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
#
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
data_format='channels_first')(pool1)
conv2 = Dropout(0.2)(conv2)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
data_format='channels_first')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
#
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
data_format='channels_first')(pool2)
conv3 = Dropout(0.2)(conv3)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
data_format='channels_first')(conv3)
up1 = UpSampling2D(size=(2, 2))(conv3)
up1 = concatenate([conv2, up1], axis=1)
conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(up1)
conv4 = Dropout(0.2)(conv4)
conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv4)
#
up2 = UpSampling2D(size=(2, 2))(conv4)
up2 = concatenate([conv1, up2], axis=1)
conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(up2)
conv5 = Dropout(0.2)(conv5)
conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv5)
#
conv6 = Conv2D(2, (1, 1), activation='relu', padding='same', data_format='channels_first')(conv5)
conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
conv6 = core.Permute((2, 1))(conv6)
############
conv7 = core.Activation('softmax')(conv6)
model = Model(inputs=inputs, outputs=conv7)
# scheduler = LearningRateScheduler(mlr.lr_scheduler)
sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
# adam=optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
# model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# 1、目标函数
# (1)mean_squared_error / mse 均方误差,常用的目标函数,公式为((y_pred-y_true) ** 2).mean()
# (2)mean_absolute_error / mae绝对值均差,公式为( | y_pred - y_true |).mean()
# (3)mean_absolute_percentage_error / mape公式为:(| (y_true - y_pred) / clip((| y_true |), epsilon, infinite) |).mean(axis=-1) * 100,和mae的区别就是,累加的是(预测值与实际值的差)除以(剔除不介于epsilon和infinite之间的实际值),然后求均值。
# (4)mean_squared_logarithmic_error / msle公式为: (log(clip(y_pred, epsilon, infinite) + 1) - log(clip(y_true, epsilon, infinite) + 1.)) ^ 2.mean(axis=-1),这个就是加入了log对数,剔除不介于epsilon和infinite之间的预测值与实际值之后,然后取对数,作差,平方,累加求均值。
# (5)squared_hinge公式为:(max(1 - y_truey_pred, 0)) ^ 2.mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的平方的累加均值。
# (6)hinge公式为:(max(1 - y_truey_pred, 0)).mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的的累加均值。
# (7)binary_crossentropy: 常说的逻辑回归, 就是常用的交叉熵函
# (8)categorical_crossentropy: 多分类的逻辑
#
# 2、性能评估函数:
# (1)binary_accuracy: 对二分类问题, 计算在所有预测值上的平均正确率
# (2)categorical_accuracy: 对多分类问题, 计算再所有预测值上的平均正确率
# (3)sparse_categorical_accuracy: 与categorical_accuracy相同, 在对稀疏的目标值预测时有用
# (4)top_k_categorical_accracy: 计算top - k正确率, 当预测值的前k个值中存在目标类别即认为预测正确
# (5)sparse_top_k_categorical_accuracy:与top_k_categorical_accracy作用相同,但适用于稀疏情况
return model
def Conv2d_BN(x, nb_filter, kernel_size, strides='same', padding='same'):
# x = Conv2D(nb_filter, kernel_size, dilation_rate=3,padding='same',data_format='channels_first')(x) #dilate_conv
x = Conv2D(nb_filter, kernel_size, padding='same', data_format='channels_first')(x)
x = BatchNormalization(axis=3)(x)
# x = LeakyReLU(alpha=0.1)(x)
x = PReLU()(x)
return x
def Conv2dT_BN(x, filters, kernel_size, strides=(2, 2), padding='same'):
x = Conv2DTranspose(filters, kernel_size, strides=strides, padding='same', data_format='channels_first')(x)
x = BatchNormalization(axis=3)(x)
x = LeakyReLU(alpha=0.1)(x)
return x
# Define the neural network
def get_dilated_bn_unet(n_ch, patch_height, patch_width, dilaterate=3):
inputs = Input(shape=(n_ch, patch_height, patch_width))
conv1 = Conv2d_BN(inputs, 32, (3, 3))
conv1 = Dropout(0.2)(conv1)
conv1 = Conv2d_BN(conv1, 32, (3, 3))
pool1 = MaxPooling2D((2, 2))(conv1)
#
conv2 = Conv2d_BN(pool1, 64, (3, 3))
conv2 = Dropout(0.2)(conv2)
conv2 = Conv2d_BN(conv2, 64, (3, 3))
pool2 = MaxPooling2D((2, 2))(conv2)
#
conv3 = Conv2d_BN(pool2, 128, (3, 3))
conv3 = Dropout(0.2)(conv3)
conv3 = Conv2d_BN(conv3, 128, (3, 3))
up1 = UpSampling2D(size=(2, 2))(conv3)
up1 = concatenate([conv2, up1], axis=1)
conv4 = Conv2d_BN(up1, 64, (3, 3))
conv4 = Dropout(0.2)(conv4)
conv4 = Conv2d_BN(conv4, 64, (3, 3))
#
up2 = UpSampling2D(size=(2, 2))(conv4)
up2 = concatenate([conv1, up2], axis=1)
conv5 = Conv2d_BN(up2, 32, (3, 3))
conv5 = Dropout(0.2)(conv5)
conv5 = Conv2d_BN(conv5, 32, (3, 3))
#
conv6 = Conv2d_BN(conv5, 2, (1, 1))
print(conv6)
print(patch_height, patch_width)
conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
conv6 = core.Permute((2, 1))(conv6)
############
conv7 = core.Activation('softmax')(conv6)
model = Model(inputs=inputs, outputs=conv7)
# scheduler = LearningRateScheduler(mlr.lr_scheduler)
sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
# adam=optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
# model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# 1、目标函数
# (1)mean_squared_error / mse 均方误差,常用的目标函数,公式为((y_pred-y_true) ** 2).mean()
# (2)mean_absolute_error / mae绝对值均差,公式为( | y_pred - y_true |).mean()
# (3)mean_absolute_percentage_error / mape公式为:(| (y_true - y_pred) / clip((| y_true |), epsilon, infinite) |).mean(axis=-1) * 100,和mae的区别就是,累加的是(预测值与实际值的差)除以(剔除不介于epsilon和infinite之间的实际值),然后求均值。
# (4)mean_squared_logarithmic_error / msle公式为: (log(clip(y_pred, epsilon, infinite) + 1) - log(clip(y_true, epsilon, infinite) + 1.)) ^ 2.mean(axis=-1),这个就是加入了log对数,剔除不介于epsilon和infinite之间的预测值与实际值之后,然后取对数,作差,平方,累加求均值。
# (5)squared_hinge公式为:(max(1 - y_truey_pred, 0)) ^ 2.mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的平方的累加均值。
# (6)hinge公式为:(max(1 - y_truey_pred, 0)).mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的的累加均值。
# (7)binary_crossentropy: 常说的逻辑回归, 就是常用的交叉熵函
# (8)categorical_crossentropy: 多分类的逻辑
#
# 2、性能评估函数:
# (1)binary_accuracy: 对二分类问题, 计算在所有预测值上的平均正确率
# (2)categorical_accuracy: 对多分类问题, 计算再所有预测值上的平均正确率
# (3)sparse_categorical_accuracy: 与categorical_accuracy相同, 在对稀疏的目标值预测时有用
# (4)top_k_categorical_accracy: 计算top - k正确率, 当预测值的前k个值中存在目标类别即认为预测正确
# (5)sparse_top_k_categorical_accuracy:与top_k_categorical_accracy作用相同,但适用于稀疏情况
return model
def resnet_unit(input, filters, kernel_size=(3, 3)):
x = Conv2DTranspose(filters, kernel_size, padding='same', data_format='channels_first')(input)
x = BatchNormalization(axis=3)(x)
x = LeakyReLU(alpha=0.1)(x)
x = Conv2DTranspose(filters, kernel_size, padding='same', data_format='channels_first')(x)
x = BatchNormalization(axis=3)(x)
x = LeakyReLU(alpha=0.1)(x)
return concatenate([input, x], axis=1)
def get_resnet34_unet(n_ch, patch_height, patch_width, dilaterate=3, filters=[64, 128, 256, 512], blocks=[3, 4, 6, 3]):
inputs = Input(shape=(n_ch, patch_height, patch_width))
x = Conv2d_BN(inputs, 32, (3, 3))
lay = []
for _, block in enumerate(blocks):
for i in range(block):
x = resnet_unit(x, filters[_])
if i == block - 1 and _ == len(filters) - 1:
lay.append(x)
elif i == block - 1:
lay.append(x)
x = MaxPooling2D((2, 2))(x)
print(lay)
up1 = UpSampling2D(size=(2, 2))(lay[3])
up1 = concatenate([lay[2], up1], axis=1)
conv1 = Conv2d_BN(up1, 128, (3, 3))
conv1 = Dropout(0.2)(conv1)
conv1 = Conv2d_BN(conv1, 128, (3, 3))
up2 = UpSampling2D(size=(2, 2))(conv1)
up2 = concatenate([lay[1], up2], axis=1)
conv2 = Conv2d_BN(up2, 64, (3, 3))
conv2 = Dropout(0.2)(conv2)
conv2 = Conv2d_BN(conv2, 64, (3, 3))
up3 = UpSampling2D(size=(2, 2))(conv2)
up3 = concatenate([lay[0], up3], axis=1)
conv3 = Conv2d_BN(up3, 32, (3, 3))
conv3 = Dropout(0.2)(conv3)
conv3 = Conv2d_BN(conv3, 32, (3, 3))
conv4 = Conv2d_BN(conv3, 2, (1, 1))
print(conv4)
conv4 = core.Reshape((2, patch_height * patch_width))(conv4)
conv4 = core.Permute((2, 1))(conv4)
############
conv5 = core.Activation('softmax')(conv4)
model = Model(inputs=inputs, outputs=conv5)
# scheduler = LearningRateScheduler(mlr.lr_scheduler)
sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
return model
def get_resnet18_unet(n_ch, patch_height, patch_width, dilaterate=3, filters=[64, 128, 256, 512], blocks=[2, 2, 2, 2]):
inputs = Input(shape=(n_ch, patch_height, patch_width))
x = Conv2d_BN(inputs, 32, (3, 3))
lay = []
for _, block in enumerate(blocks):
for i in range(block):
x = resnet_unit(x, filters[_])
if i == block - 1 and _ == len(filters) - 1:
lay.append(x)
elif i == block - 1:
lay.append(x)
x = MaxPooling2D((2, 2))(x)
print(lay)
up1 = UpSampling2D(size=(2, 2))(lay[3])
up1 = concatenate([lay[2], up1], axis=1)
conv1 = Conv2d_BN(up1, 128, (3, 3))
conv1 = Dropout(0.2)(conv1)
conv1 = Conv2d_BN(conv1, 128, (3, 3))
up2 = UpSampling2D(size=(2, 2))(conv1)
up2 = concatenate([lay[1], up2], axis=1)
conv2 = Conv2d_BN(up2, 64, (3, 3))
conv2 = Dropout(0.2)(conv2)
conv2 = Conv2d_BN(conv2, 64, (3, 3))
up3 = UpSampling2D(size=(2, 2))(conv2)
up3 = concatenate([lay[0], up3], axis=1)
conv3 = Conv2d_BN(up3, 32, (3, 3))
conv3 = Dropout(0.2)(conv3)
conv3 = Conv2d_BN(conv3, 32, (3, 3))
conv4 = Conv2d_BN(conv3, 2, (1, 1))
print(conv4)
conv4 = core.Reshape((2, patch_height * patch_width))(conv4)
conv4 = core.Permute((2, 1))(conv4)
############
conv5 = core.Activation('softmax')(conv4)
model = Model(inputs=inputs, outputs=conv5)
# scheduler = LearningRateScheduler(mlr.lr_scheduler)
sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
return model
# ========= Load settings from Config file
config = configparser.RawConfigParser()
config.read('configuration.txt')
# patch to the datasets
path_data = config.get('data paths', 'path_local')
# Experiment name
name_experiment = config.get('experiment name', 'name')
# training settings
N_epochs = int(config.get('training settings', 'N_epochs'))
batch_size = int(config.get('training settings', 'batch_size'))
# ============ Load the data and divided in patches
patches_imgs_train, patches_masks_train = get_data_training(
DRIVE_train_imgs_original=path_data + config.get('data paths', 'train_imgs_original'),
DRIVE_train_groudTruth=path_data + config.get('data paths', 'train_groundTruth'), # masks
patch_height=int(config.get('data attributes', 'patch_height')),
patch_width=int(config.get('data attributes', 'patch_width')),
N_subimgs=int(config.get('training settings', 'N_subimgs')),
inside_FOV=config.getboolean('training settings', 'inside_FOV')
# select the patches only inside the FOV (default == True)
)
# ========= Save a sample of what you're feeding to the neural network ==========
N_sample = min(patches_imgs_train.shape[0], 40)
visualize(group_images(patches_imgs_train[0:N_sample, :, :, :], 5),
'./' + name_experiment + '/' + "sample_input_imgs") # .show()
visualize(group_images(patches_masks_train[0:N_sample, :, :, :], 5),
'./' + name_experiment + '/' + "sample_input_masks") # .show()
# =========== Construct and save the model arcitecture =====
n_ch = patches_imgs_train.shape[1]
patch_height = patches_imgs_train.shape[2]
patch_width = patches_imgs_train.shape[3]
model = get_resnet18_unet(n_ch, patch_height, patch_width) # the U-net model
print("Check: final output of the network:")
print(model.output_shape)
import os
# os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# plot(model, to_file='./'+name_experiment+'/'+name_experiment + '_model.png') #check how the model looks like
json_string = model.to_json()
open('./' + name_experiment + '/' + name_experiment + '_architecture.json', 'w').write(json_string)
# ============ Training ==================================
checkpointer = ModelCheckpoint(filepath='./' + name_experiment + '/' + name_experiment + '_best_weights.h5', verbose=1,
monitor='val_loss', mode='auto',
save_best_only=True) # save at each epoch if the validation decreased
# def step_decay(epoch):
# lrate = 0.01 #the initial learning rate (by default in keras)
# if epoch==100:
# return 0.005
# else:
# return lrate
#
# lrate_drop = LearningRateScheduler(step_decay)
patches_masks_train = masks_Unet(patches_masks_train) # reduce memory consumption
model.fit(patches_imgs_train, patches_masks_train, epochs=N_epochs, batch_size=batch_size, verbose=1, shuffle=True,
validation_split=0.1, callbacks=[checkpointer])
# ========== Save and test the last model ===================
model.save_weights('./' + name_experiment + '/' + name_experiment + '_last_weights.h5', overwrite=True)
# test the model
# score = model.evaluate(patches_imgs_test, masks_Unet(patches_masks_test), verbose=0)
# print('Test score:', score[0])
# print('Test accuracy:', score[1])
#
|
[
"zlmy-pm@qq.com"
] |
zlmy-pm@qq.com
|
fbc10a823220a934babdd972482f8b3a034e3a15
|
4206cd260a398ed0423197b2056c7b6fc60e2766
|
/trees.py
|
b911fa57ebdc975da57fd9681725880185d1ead6
|
[] |
no_license
|
angiegyi/coding_interviews
|
85e5a3bc0ba84fa25b1c914a97647c3748d29c9c
|
d8302a75df13d2a5556bc616123882646cf0117e
|
refs/heads/master
| 2022-11-27T01:48:08.100951
| 2020-07-13T06:43:26
| 2020-07-13T06:43:26
| 279,232,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,824
|
py
|
# Definition for a binary tree node.
from collections import deque
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def height(self,node):
if node is None:
return 0
left = self.height(node.left)
right = self.height(node.right)
if left > right:
return self.height(left) + 1
if right > left:
return self.height(right) + 1
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
sum = 0
if root is None:
return sum
if L <= root.val <= R:
sum = sum + root.val
if root.val >= L:
sum = sum + self.rangeSumBST(root.left, L, R)
if root.val <= R:
sum = sum + self.rangeSumBST(root.right, L, R)
return sum
def inorderTraversal(self, root: TreeNode):
res = []
if root:
res = self.inorderTraversal(root.left)
res.append(root.val)
res = res + self.inorderTraversal(root.right)
return res
def preorderTraversal(self, root: TreeNode):
res = []
if root:
res.append(root.val)
res = self.inorderTraversal(root.left)
res = res + self.inorderTraversal(root.right)
return res
def postorderTraversal(self, root: TreeNode):
res = []
if root:
res = self.inorderTraversal(root.left)
res = res + self.inorderTraversal(root.right)
res.append(root.val)
return res
def auxD(self, root: TreeNode) -> int:
if not root:
return 0
else:
return max(1 + self.auxD(root.left), 1 + self.auxD(root.right))
def depthBfs(self, root: TreeNode) -> int:
if not root:
return 0
q = deque()
q.append(root)
c = 1
# storing size is a good optimisation
# do this more regularly
size = 1
while q:
for _ in range(size):
popped = q.popleft()
# ensure null checks are good
# these can trip up
if popped.left:
q.append(popped.left)
if popped.right:
q.append(popped.right)
size = len(q)
# Be careful when to add 1 to the size
# Prevent the last one from being added
# Or just subtract 1
if size > 0:
c += 1
# c += 1
#
# return c - 1
return c
def qD(self, root: TreeNode) -> int:
if not root:
return 0
q = deque()
q.append(root)
c, n = 1, 1
while q:
for _ in range(n):
curr = q.popleft()
if curr.left:
q.append(curr.left)
if curr.right:
q.append(curr.right)
n = len(q)
if n != 0:
c += 1
return c
def maxDepth(self, root: TreeNode) -> int:
if not root:
return 0
return self.auxD(root)
def size_of_tree(self,root):
if root is None:
return 0
else:
return self.size_of_tree(root.left) + 1 + self.size_of_tree(root.right)
def isSameTree(self, p, q):
p = self.bst(p)
q = self.bst(q)
print(p)
print(q)
return p == q
def bst(self, root):
output = deque()
output.append(root)
return_list = []
level = 1
while output:
for _ in range(level):
curr = output.pop()
if curr:
return_list.append(curr.val)
output.append(curr.left)
output.append(curr.right)
else:
return_list.append(None)
level = len(output)
return return_list
def isSameTree_efficient(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
# p and q are both None
if not p and not q:
return True
# one of p and q is None
if not q or not p:
return False
if p.val != q.val:
return False
return self.isSameTree(p.right, q.right) and \
self.isSameTree(p.left, q.left)
def isPalindrome(self, x):
i = 0
j = len(x) - 1
while i != j or i < len(x) or j > 0:
if i == j:
return True
if x[i] != x[j]:
return False
i += 1
if i == j:
return True
j -= 1
|
[
"angelagyi@hotmail.com"
] |
angelagyi@hotmail.com
|
c5871611d6db00e86dee889a6219f23c62fab65b
|
065abc887e834fbcb2d67b0191cd6733fafc1536
|
/mysite/blog/migrations/0002_auto_20170803_0252.py
|
f0f6aee158f22b3b430af1dc8f93f95a93baff08
|
[] |
no_license
|
vvrabets/bolog_django
|
e0ef21598d986ff0114843cebfa3705d9a638194
|
3379f3f3f7d3dbdf8f0de72c79d2c846a5fd5ef1
|
refs/heads/master
| 2021-06-21T15:38:28.021572
| 2017-08-09T21:10:38
| 2017-08-09T21:10:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-08-02 23:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments_text', models.TextField()),
],
options={
'db_table': 'comments',
},
),
migrations.AlterField(
model_name='article',
name='article_likes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='comments',
name='coments_article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article'),
),
]
|
[
"vldaimir2016@mail.ua"
] |
vldaimir2016@mail.ua
|
bc5d9e68f67edd93d87b5e151da8ebc9065fffb0
|
5018a62b28e6e4c6bd4523bf3fe573cb156bcc12
|
/backend/samples/test.py
|
953bd9b259fe85cefdbe811f00536251135ab7dc
|
[] |
no_license
|
mraahemi/Simon_Says
|
09f7a84740f5953c2bca38102357f8498ab769e0
|
1b02a04b66e3e19e22710e36010f20b79c433bae
|
refs/heads/master
| 2023-03-09T06:29:31.329126
| 2021-02-22T03:07:49
| 2021-02-22T03:07:49
| 330,078,945
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
import azure.cognitiveservices.speech as speechsdk
from azure.cognitiveservices.speech import AudioDataStream, SpeechConfig, SpeechSynthesizer, SpeechSynthesisOutputFormat
from azure.cognitiveservices.speech.audio import AudioOutputConfig
import os
# from myapi.keys import api_keys
# from django.http import JsonResponse
api_keys = {
"microsoft-speech": {
"key": "ec82cab57d764954b9b01ec6ea0d74ee",
"region": "eastus"
}
}
def speech_from_file(file):
if file[-4:] != ".wav":
print("error")
return
# return JsonResponse({"error": "file type error"})
speech_config = speechsdk.SpeechConfig(subscription=api_keys["microsoft-speech"]["key"], region=api_keys["microsoft-speech"]["region"])
audio_input = speechsdk.AudioConfig(filename=file)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_input)
result = speech_recognizer.recognize_once_async().get()
# return JsonResponse(result.text)
print(result.text)
return result.text
def voice_from_text(text, path):
filepath = "{}/speech.wav".format(path)
if os.path.isfile(filepath): os.remove(filepath)
speech_config = speechsdk.SpeechConfig(subscription=api_keys["microsoft-speech"]["key"], region=api_keys["microsoft-speech"]["region"])
audio_config = AudioOutputConfig(filename=filepath)
synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
synthesizer.speak_text_async("The Birch canoe slid on the smooth planks. Glue the sheet to the dark blue background.")
return path
|
[
"me@yucanwu.com"
] |
me@yucanwu.com
|
f30ae4e7b07ca0d1987f351c9bee7ab42dc002a4
|
deec3631679b1c14de92742fff9ee5d249fcfc92
|
/city_project/user_info_app/models.py
|
4acd85d04fa49e5ad1c4f63e7884ceeff9e3b496
|
[] |
no_license
|
Pyrigok/citymap
|
ae1c971133f42529b6853295057c8845cac16769
|
554175bd6be2edcc449908432b2a5d5db4957bca
|
refs/heads/master
| 2020-05-14T13:37:04.707650
| 2019-05-19T20:19:25
| 2019-05-19T20:19:25
| 164,826,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class UserProfileInfo(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_pic = models.ImageField(upload_to='images/user_pics', blank=True)
def __str__(self):
return self.user.username
# def get_info(self, request):
# profile_info = User.objects.filter(id=request.user.id)
# return profile_info
class ExtendUser(User):
class Meta():
proxy = True
def get_current_user_info(self, request):
profile_info = User.objects.filter(id=request.user.id)
return profile_info
def get_user_info():
return User.objects.all()
def clean(self):
if self.username is None or self.username == "":
raise ValidationError('Field with username can not be empty')
if self.password is None or self.password == "":
raise ValidationError('Field with passwords can not be empty')
try:
User.objects.get(email=self.email)
raise ValidationError('User with same email already exists')
except User.DoesNotExist:
if self.email is None or self.email == "":
raise ValidationError('Field with email address can not be empty')
@staticmethod
def username_if_exists(username):
if User.objects.filter(username=username).exists():
return True
return False
class ResetPasswordUserInfo(User):
@staticmethod
def email_if_exists(email):
if User.objects.filter(email=email).exists():
return True
return False
@staticmethod
def set_new_password(email, password):
user = User.objects.get(email=email)
user.set_password(password)
user.save()
return True
|
[
"pyrigok@i.ua"
] |
pyrigok@i.ua
|
1038692fed742e2946bb4effc50bada2affccf08
|
e3c2add8826161f29643b05f4e8ff3e6caa4c0e2
|
/CNN/util.py
|
23f9266325f527fb0ae1741205ceeaf5abde1868
|
[] |
no_license
|
KimMatt/Unknown-Image-Classification
|
4be6b32718406c664d7afaaefb1a20339ee94b8f
|
80447f3250c7364c136f78d3c84ac48e53341d9c
|
refs/heads/master
| 2022-04-20T10:56:18.925709
| 2020-04-17T16:41:26
| 2020-04-17T16:41:26
| 256,551,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
def LoadData(fname):
"""Loads data from an NPZ file.
Args:
fname: NPZ filename.
Returns:
data: Tuple {inputs, target}_{train, valid, test}.
Row-major, outer axis to be the number of observations.
"""
npzfile = np.load(fname)
inputs_train = npzfile['training_inputs'].T / 255.0
inputs_valid = npzfile['valid_inputs'].T / 255.0
inputs_test = npzfile['test_inputs'].T / 255.0
target_train = npzfile['training_labels'].tolist()
target_valid = npzfile['valid_labels'].tolist()
target_test = npzfile['test_labels'].tolist()
# Add +1 because 0 is a class
num_class = max(target_train + target_valid + target_test) + 1
# Create 1hot encoding output arrays
target_train_1hot = np.zeros([num_class, len(target_train)])
target_valid_1hot = np.zeros([num_class, len(target_valid)])
target_test_1hot = np.zeros([num_class, len(target_test)])
for ii, xx in enumerate(target_train):
target_train_1hot[xx, ii] = 1.0
for ii, xx in enumerate(target_valid):
target_valid_1hot[xx, ii] = 1.0
for ii, xx in enumerate(target_test):
target_test_1hot[xx, ii] = 1.0
inputs_train = inputs_train.T
inputs_valid = inputs_valid.T
inputs_test = inputs_test.T
target_train_1hot = target_train_1hot.T
target_valid_1hot = target_valid_1hot.T
target_test_1hot = target_test_1hot.T
return inputs_train, inputs_valid, inputs_test, target_train_1hot, target_valid_1hot, target_test_1hot
def Save(fname, data):
"""Saves the model to a numpy file."""
print('Writing to ' + fname)
np.savez_compressed(fname, **data)
def Load(fname):
"""Loads model from numpy file."""
print('Loading from ' + fname)
return dict(np.load(fname))
def DisplayPlot(train, valid, ylabel, number=0):
"""Displays training curve.
Args:
train: Training statistics.
valid: Validation statistics.
ylabel: Y-axis label of the plot.
"""
plt.figure(number)
plt.clf()
train = np.array(train)
valid = np.array(valid)
plt.plot(train[:, 0], train[:, 1], 'b', label='Train')
plt.plot(valid[:, 0], valid[:, 1], 'g', label='Validation')
plt.xlabel('Epoch')
plt.ylabel(ylabel)
plt.legend()
plt.draw()
plt.pause(0.0001)
|
[
"mk1995x@gmail.com"
] |
mk1995x@gmail.com
|
78ff9fd03db2c8527755e665259124f76ca28ea2
|
56edc6e5edce6afdf659bda61dfd373ea102f2c1
|
/software/utils/drc_utils/python/plan_eval_proxi.py
|
39c458c4959d1c8bc902db7d12955a2a9d50384a
|
[
"BSD-3-Clause"
] |
permissive
|
MannyKayy/oh-distro
|
c921a6e1dbe71e0751b455558a84df30a0450cce
|
9aa09eda469c2dc407868c9af1ea155ff0b4a442
|
refs/heads/master
| 2021-01-16T20:30:39.726929
| 2016-02-02T22:43:39
| 2016-02-02T22:43:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,024
|
py
|
#!/usr/bin/python
import os,sys
import lcm
import time
home_dir =os.getenv("HOME")
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/site-packages")
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/dist-packages")
from drc.robot_plan_t import robot_plan_t
from drc.plan_status_t import plan_status_t
from drc.robot_state_t import robot_state_t
from drc.walking_plan_request_t import walking_plan_request_t
def timestamp_now (): return int (time.time () * 1000000)
class State:
def __init__(self):
self.last_utime = 0
self.init_status()
self.manip_until_utime = 0
self.walk_until_utime = 0
self.seconds_per_step = 2.9 # guesstimated time per step
def init_status(self):
self.status = plan_status_t()
self.status.utime = 0
self.status.execution_status = 0
self.status.last_plan_msg_utime = 0
self.status.last_plan_start_utime = 0
self.status.plan_type = 0
self.status.recovery_enabled = False
self.status.bracing_enabled = False
def on_manip_plan(channel, data):
m = robot_plan_t.decode(data)
print "got manip plan"
print m.plan[m.num_states-1].utime
s.status.last_plan_msg_utime = s.last_utime
s.status.last_plan_start_utime = s.last_utime
s.manip_until_utime = s.last_utime + m.plan[m.num_states-1].utime
def on_walking_plan_request(channel, data):
m = walking_plan_request_t.decode(data)
print "got walking plan request: ", m.footstep_plan.num_steps , " steps - 2 initial and ", (m.footstep_plan.num_steps-2), " actual"
s.status.last_plan_msg_utime = s.last_utime
s.status.last_plan_start_utime = s.last_utime
s.walk_until_utime = s.last_utime + (m.footstep_plan.num_steps-2)*s.seconds_per_step*1E6
def on_est_robot_state(channel, data):
m = robot_state_t.decode(data)
s.last_utime = m.utime
s.status.utime = m.utime
if (s.manip_until_utime > m.utime):
# manip plan still being executed
s.status.execution_status = 0 # EXECUTION_STATUS_EXECUTING
s.status.plan_type = 8 # manip
time_remaining = (s.manip_until_utime - m.utime)*1E-6
print "manip, time remaining: ", time_remaining
elif (s.walk_until_utime > m.utime):
# manip plan still being executed
s.status.execution_status = 0 # EXECUTION_STATUS_EXECUTING
s.status.plan_type = 2 # walking
time_remaining = (s.walk_until_utime - m.utime)*1E-6
print "walking, time remaining: ", time_remaining
else:
# manip or walking plan not being executed
s.status.plan_type = 1 # standing
s.status.execution_status = 2 # NO PLAN
lc.publish("PLAN_EXECUTION_STATUS", s.status.encode())
lc = lcm.LCM()
print "started"
s = State();
sub1 = lc.subscribe("COMMITTED_ROBOT_PLAN", on_manip_plan)
sub2 = lc.subscribe("EST_ROBOT_STATE", on_est_robot_state)
sub3 = lc.subscribe("WALKING_CONTROLLER_PLAN_REQUEST", on_walking_plan_request)
while True:
lc.handle()
|
[
"james.patrick.marion@gmail.com"
] |
james.patrick.marion@gmail.com
|
3b918aa27098cca3e6aac4878c9aba8ed3b4f70c
|
804892fd741a9148091f23e8f2bec6d3ea46b9f4
|
/03-makinjson01.py
|
b45fb25103b6a895d6571bad105fd987a3d28a25
|
[] |
no_license
|
waysman1/pyapi
|
2505a11ddc5dcb384089cae23063f135ffc7ee7e
|
722b417640596cba18a1fdd8215138c36be5228d
|
refs/heads/master
| 2020-06-26T03:55:09.826493
| 2019-08-01T14:31:54
| 2019-08-01T14:31:54
| 199,521,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
#!/usr/bin/python3
import json
def main():
hitchhikers = [{"name": "Zaphod Beeblebrox", "species": "Betelgeusain"}, {"name": "Arthur Dent", "species": "Human"}]
with open("galaxyguide.json", "w") as zfile:
json.dump(hitchhikers, zfile)
main()
|
[
"lovern.m.ways@verizon.com"
] |
lovern.m.ways@verizon.com
|
20ebbca7b3ffa5f410d21772f07b0c1af4325148
|
e208bd998583bbbefbb5ece714597d16b48ff557
|
/webis_cpc/conv_net_classes.py
|
d8bea3a4b7a822330cb7414203ddf254bb44642e
|
[] |
no_license
|
edithal-14/rdv-cnn
|
b321250d64fce43597c97042e37ab3882b9adc22
|
496b5cdf567e49e3a418230a53d39f8e116bc86a
|
refs/heads/master
| 2022-11-07T06:07:54.188575
| 2020-06-22T08:14:41
| 2020-06-22T08:14:41
| 135,123,433
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,492
|
py
|
"""
Sample code for
Convolutional Neural Networks for Sentence Classification
http://arxiv.org/pdf/1408.5882v2.pdf
Much of the code is modified from
- deeplearning.net (for ConvNet classes)
- https://github.com/mdenil/dropout (for dropout)
- https://groups.google.com/forum/#!topic/pylearn-dev/3QbKtCumAW4 (for Adadelta)
"""
import numpy
import theano.tensor.shared_randomstreams
import theano
import theano.tensor as T
from theano.tensor.signal import pool
from theano.tensor.nnet import conv2d
def ReLU(x):
y = T.maximum(0.0, x)
return(y)
def Sigmoid(x):
y = T.nnet.sigmoid(x)
return(y)
def Tanh(x):
y = T.tanh(x)
return(y)
def Iden(x):
y = x
return(y)
class HiddenLayer(object):
"""
Class for HiddenLayer
"""
def __init__(self, rng, input, n_in, n_out, activation, W=None, b=None,
use_bias=False):
self.input = input
self.activation = activation
if W is None:
if activation.func_name == "ReLU":
W_values = numpy.asarray(0.01 * rng.standard_normal(size=(n_in, n_out)), dtype=theano.config.floatX)
else:
W_values = numpy.asarray(rng.uniform(low=-numpy.sqrt(6. / (n_in + n_out)), high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W')
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W = W
self.b = b
if use_bias:
lin_output = T.dot(input, self.W) + self.b
else:
lin_output = T.dot(input, self.W)
self.output = (lin_output if activation is None else activation(lin_output))
# parameters of the model
if use_bias:
self.params = [self.W, self.b]
else:
self.params = [self.W]
def _dropout_from_layer(rng, layer, p):
"""p is the probablity of dropping a unit
"""
srng = theano.tensor.shared_randomstreams.RandomStreams(rng.randint(999999))
# p=1-p because 1's indicate keep and p is prob of dropping
mask = srng.binomial(n=1, p=1-p, size=layer.shape)
# The cast is important because
# int * float32 = float64 which pulls things off the gpu
output = layer * T.cast(mask, theano.config.floatX)
return output
class DropoutHiddenLayer(HiddenLayer):
def __init__(self, rng, input, n_in, n_out,
activation, dropout_rate, use_bias, W=None, b=None):
super(DropoutHiddenLayer, self).__init__(
rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
activation=activation, use_bias=use_bias)
self.output = _dropout_from_layer(rng, self.output, p=dropout_rate)
class MLPDropout(object):
"""A multilayer perceptron with dropout"""
def __init__(self,rng,input,layer_sizes,dropout_rates,activations,use_bias=True):
#rectified_linear_activation = lambda x: T.maximum(0.0, x)
# Set up all the hidden layers
self.weight_matrix_sizes = list(zip(layer_sizes, layer_sizes[1:]))
self.layers = []
self.dropout_layers = []
self.activations = activations
next_layer_input = input
#first_layer = True
# dropout the input
next_dropout_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])
layer_counter = 0
for n_in, n_out in self.weight_matrix_sizes[:-1]:
next_dropout_layer = DropoutHiddenLayer(rng=rng,
input=next_dropout_layer_input,
activation=activations[layer_counter],
n_in=n_in, n_out=n_out, use_bias=use_bias,
dropout_rate=dropout_rates[layer_counter])
self.dropout_layers.append(next_dropout_layer)
next_dropout_layer_input = next_dropout_layer.output
# Reuse the parameters from the dropout layer here, in a different
# path through the graph.
next_layer = HiddenLayer(rng=rng,
input=next_layer_input,
activation=activations[layer_counter],
# scale the weight matrix W with (1-p)
W=next_dropout_layer.W * (1 - dropout_rates[layer_counter]),
b=next_dropout_layer.b,
n_in=n_in, n_out=n_out,
use_bias=use_bias)
self.layers.append(next_layer)
next_layer_input = next_layer.output
#first_layer = False
layer_counter += 1
# Set up the output layer
n_in, n_out = self.weight_matrix_sizes[-1]
dropout_output_layer = LogisticRegression(
input=next_dropout_layer_input,
n_in=n_in, n_out=n_out)
self.dropout_layers.append(dropout_output_layer)
# Again, reuse paramters in the dropout output.
output_layer = LogisticRegression(
input=next_layer_input,
# scale the weight matrix W with (1-p)
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b,
n_in=n_in, n_out=n_out)
self.layers.append(output_layer)
# Use the negative log likelihood of the logistic regression layer as
# the objective.
self.dropout_negative_log_likelihood = self.dropout_layers[-1].negative_log_likelihood
self.dropout_errors = self.dropout_layers[-1].errors
self.negative_log_likelihood = self.layers[-1].negative_log_likelihood
self.errors = self.layers[-1].errors
# Grab all the parameters together.
self.params = [ param for layer in self.dropout_layers for param in layer.params ]
def predict(self, new_data):
next_layer_input = new_data
for i,layer in enumerate(self.layers):
if i<len(self.layers)-1:
next_layer_input = self.activations[i](T.dot(next_layer_input,layer.W) + layer.b)
else:
p_y_given_x = T.nnet.softmax(T.dot(next_layer_input, layer.W) + layer.b)
y_pred = T.argmax(p_y_given_x, axis=1)
return [y_pred,p_y_given_x]
def predict_p(self, new_data):
next_layer_input = new_data
for i,layer in enumerate(self.layers):
if i<len(self.layers)-1:
next_layer_input = self.activations[i](T.dot(next_layer_input,layer.W) + layer.b)
else:
p_y_given_x = T.nnet.softmax(T.dot(next_layer_input, layer.W) + layer.b)
return p_y_given_x
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(rng=rng, input=input,
n_in=n_in, n_out=n_hidden,
activation=T.tanh)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out, W=None, b=None):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
self.W = theano.shared(
value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX),
name='W')
else:
self.W = W
# initialize the baises b as a vector of n_out 0s
if b is None:
self.b = theano.shared(
value=numpy.zeros((n_out,), dtype=theano.config.floatX),
name='b')
else:
self.b = b
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch ;
zero one loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), non_linear="tanh"):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.non_linear = non_linear
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /numpy.prod(poolsize))
# initialize weights with random weights
if self.non_linear=="none" or self.non_linear=="relu":
self.W = theano.shared(numpy.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
else:
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True, name="b_conv")
# convolve input feature maps with filters
conv_out = conv2d(input=input, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = pool.pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
elif self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = pool.pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = pool.pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
self.params = [self.W, self.b]
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = pool.pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = pool.pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = pool.pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
|
[
"vigneshedithal11031997v@gmail.com"
] |
vigneshedithal11031997v@gmail.com
|
3a96346811d055e54895d05a8786a069e81c6271
|
e1bd3e69307f37450474dcdc26a15620237f8075
|
/src/Lang/Struct/_weakref.py
|
5ed95b024acb24464af8bf3ee2955a560c10a1ee
|
[] |
no_license
|
fwin-dev/py.Lang
|
3cee40f71a1256852c984ebc94689972980d360c
|
0a31f329083cd3108c6d23e096dd8916fab5ef48
|
refs/heads/master
| 2021-01-10T11:06:23.315900
| 2014-08-22T15:13:27
| 2014-09-02T12:57:59
| 46,517,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
from weakref import *
try:
WeakSet
except NameError:
from weakrefset import WeakSet
|
[
"jcowles@indigital.net"
] |
jcowles@indigital.net
|
4f5fe63bb5539c2ab1cdc71d4e46fbf563676332
|
cb9b7d4021372ef8a94b9fdd33497c3e4beaecfd
|
/myproducts/views.py
|
370f3803a8d106f66ba0ddd9de69577f75c2ece4
|
[] |
no_license
|
Ksingh210/ProductHuntSite
|
ee7b40c2acbf8b184ee7db82af94528b06f2b865
|
9fbc44bfcab4ce5e05ce6a4f23d66b4dd43073fe
|
refs/heads/master
| 2022-02-20T08:47:15.399553
| 2019-07-23T03:12:24
| 2019-07-23T03:12:24
| 198,342,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import Product
from django.utils import timezone
def home(request):
products = Product.objects
return render(request, 'myproducts/home.html', {'products':products})
@login_required(login_url="/accounts/signup")
def create(request):
if request.method == 'POST':
if request.POST['title'] and request.POST['body'] and request.POST['url'] and request.FILES['icon'] and request.FILES['image']:
product = Product()
product.title = request.POST['title']
product.body = request.POST['body']
if request.POST['url'].startswith('http://') or request.POST['url'].startswith('https://'):
product.url = request.POST['url']
else:
product.url = 'http://' + request.POST['url']
product.icon = request.FILES['icon']
product.image = request.FILES['image']
product.pub_date = timezone.datetime.now()
product.hunter = request.user
product.save()
return redirect('/myproducts/' + str(product.id))
else:
return render(request, 'myproducts/create.html',{'error':'All fields are required!'})
else:
return render(request, 'myproducts/create.html')
def detail(request, product_id):
product = get_object_or_404(Product, pk=product_id)
return render(request, 'myproducts/detail.html',{'product':product})
def upvote(request, product_id):
if request.method == 'POST':
product = get_object_or_404(Product, pk=product_id)
product.votes_total += 1
product.save()
return redirect('/myproducts/' + str(product_id))
|
[
"48933892+Ksingh210@users.noreply.github.com"
] |
48933892+Ksingh210@users.noreply.github.com
|
1a45ad4e67f457ab221279e1648af0437f4691e6
|
11aec16011fc8558e129da75f0ffcf22c79478fc
|
/pitchShift.py
|
0d043765d7a730a705d74dd2b69870a8ff29f61d
|
[] |
no_license
|
davhogan/VoiceChanger
|
a16ba91a89fde762700c982c5b9e09505843ac1e
|
266cff3d8082523ba6e6093bd3c63f9599f3be89
|
refs/heads/master
| 2022-07-05T21:00:30.670735
| 2020-05-23T19:07:03
| 2020-05-23T19:07:03
| 265,057,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
from scipy.io import wavfile as sciwv
import numpy as np
import playsound as ps
import wave as wv
wr = wv.open('you-are-acting-so-weird.wav', 'r')
ww = wv.open('new_test.wav', 'w')
par = list(wr.getparams())
#par[3] = 0
#par = tuple(par)
ww.setparams(par)
sz = wr.getframerate()//20
c = int(wr.getnframes()/sz)
shift = 750//20
for num in range(c):
da = np.fromstring(wr.readframes(sz), dtype=np.int16)
left = da[0::2]
right = da[1::2]
#Take DFT
lf = np.fft.rfft(left)
rf = np.fft.rfft(right)
#Scale It Up or Down
lf = np.roll(lf, shift)
rf = np.roll(rf, shift)
lf[0:shift] = 0
rf[0:shift] = 0
#Take inverse DFT
nl = np.fft.irfft(lf)
nr = np.fft.irfft(rf)
#Put it altogether
ns = np.column_stack((nl,nr)).ravel().astype(np.int16)
ww.writeframes(ns.tostring())
wr.close()
ww.close()
ps.playsound('you-are-acting-so-weird.wav')
ps.playsound('new_test.wav')
|
[
"davhogan@pdx.edu"
] |
davhogan@pdx.edu
|
7c77dedc4523345a450f433b62b25679157a08b1
|
010bf55a456e8efb1fa01a5a8ecc9935586172b2
|
/word_vector.py
|
29b3c2fe7d24e6dc4f57a23331041eda1843401e
|
[] |
no_license
|
GregoryElliott/TGMA_NLP_Project
|
4b8007b28ccdb78e5ec794683b3d904a1d6c8ce6
|
15bc517287f7d822b815c84528f7fe67aaa4dfb1
|
refs/heads/master
| 2021-01-10T17:41:33.627749
| 2016-02-22T22:58:53
| 2016-02-22T22:58:53
| 51,664,371
| 0
| 1
| null | 2016-02-22T22:58:54
| 2016-02-13T20:28:42
|
Python
|
UTF-8
|
Python
| false
| false
| 980
|
py
|
### Word-Vector
### (Utility)
########################################################################
#### Functions for String-Index-Vectors ####
#### Ex: "This is a sentence -> [0, 5, 8, 10, 19]
def get_string_indicies(string):
'''Returns a String-Index-Vector for a given string'''
indicies_vec = [0] # init with str start
for n in range(0,len(string)):
if (string[n] == " "):
indicies_vec.append(n+1)
indicies_vec.append(len(string)+1) # pushback ending pos
return indicies_vec
def get_len(indicies_vec):
'''Returns the number of words given a String-Index-Vector'''
return len(indicies_vec) - 1
def get_word(i, indicies_vec, string):
'''Returns the word at index i given a String-Index-Vector and a string'''
return string[indicies_vec[i]:(indicies_vec[i+1]-1)]
def find_ind(i, indicies_vec):
for n in range(0,len(indicies_vec)):
if (indicies_vec[n] == i): return n
return -1
|
[
"gregoryelliott34@yahoo.com"
] |
gregoryelliott34@yahoo.com
|
2a1b6c4e693ea1167762441bbe43a0240db17793
|
48967ce8ec0af199f6551ab5e292251c88a2dc6d
|
/coding_bat/s2/count_hi.py
|
8844830ff69b26059ef1f8827ed95895006c1bbf
|
[] |
no_license
|
SteveVallay/coding-bat-py
|
05bef046a1a67d5a56e3d37be3ef52e345ce13c4
|
15a3b8763a2c1c3f885472c090c60674d084ac09
|
refs/heads/master
| 2020-06-02T21:48:08.037853
| 2013-07-29T09:37:58
| 2013-07-29T09:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
def count_hi(str):
l = len(str)
s = 0
for i in range(l-1):
if str[i] == 'h' and str[i+1] == 'i':
s+=1
return s
print count_hi('abc hi hi')
|
[
"zhibinwang.q@gmail.com"
] |
zhibinwang.q@gmail.com
|
de347e329b25a85d23fcff41b19e00cf9c42f632
|
fdf9c968a41af28cc23e8e8953e0add22b6acd0e
|
/lab5/blog/articles/models.py
|
807a56e28b6e8171376b09992100919a878a89fc
|
[] |
no_license
|
Iccravy/KTP_LAB5
|
5147a48c130df5f25a0674dec13176ee65e49fb8
|
5edf330e26d46752a5785d689de6f364e545f32a
|
refs/heads/master
| 2020-05-07T20:50:11.756169
| 2019-04-11T21:25:31
| 2019-04-11T21:25:31
| 180,880,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Article(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(User)
text = models.TextField()
created_date = models.DateField(auto_now_add=True)
def __unicode__(self):
return "%s: %s" % (self.author.username, self.title)
def get_excerpt(self):
return self.text[:140] + "..." if len(self.text) > 140 else self.text
# Create your models here.
|
[
"noreply@github.com"
] |
Iccravy.noreply@github.com
|
7e0459af498a4370000525c2aa536f0fb419b5b1
|
77442878f78815bd54218ab0d43ed2b6ed7285be
|
/pie/type.py
|
a7dcfeb8d79df1af8f929d46f20aa5e7e07bc50c
|
[] |
no_license
|
aisk/pie
|
13dd844df56bcbb9ac7f463d857a4f72358cc4a3
|
03a2d96965542d5decf5325977ed81a9b028bf81
|
refs/heads/master
| 2020-05-30T18:32:52.451435
| 2013-01-29T11:33:22
| 2013-01-29T11:33:22
| 6,761,819
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
class Symbol(str):
pass
sym_define = Symbol('define')
sym_lambda = Symbol('lambda')
sym_if = Symbol('if')
sym_set = Symbol('set!')
sym_quote = Symbol('quote')
sym_begin = Symbol('begin')
sym_import = Symbol('import')
|
[
"aisk1988@gmail.com"
] |
aisk1988@gmail.com
|
06b6262507579c051d9211fe7d8d5f7ba5cca652
|
ac5f4c2fd9e3ef8bf23eaec36b6a4edae9a53aef
|
/lfortran/ast/tests/use_results.py
|
24e6104f78f878f4ffa0a2d383644cadbe222a80
|
[
"BSD-3-Clause"
] |
permissive
|
milancurcic/lfortran
|
893f6e61ad31cc115d4fa1e605d036b5ec732100
|
ee31896bf56fdcaa3aa2e067e8de5536b70f6e38
|
refs/heads/master
| 2022-11-14T21:01:58.895808
| 2020-07-09T20:05:59
| 2020-07-09T20:05:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
results = [
('Use', (1, 1), 'b', []),
('Use', (1, 1), 'a', [('UseSymbol', 'b', None), ('UseSymbol', 'c', None)]),
('Use', (1, 1), 'a', [('UseSymbol', 'x', 'b'), ('UseSymbol', 'c', None), ('UseSymbol', 'd', 'a')]),
]
|
[
"ondrej@certik.us"
] |
ondrej@certik.us
|
1929a1920e777863c342284fd742320c548dafe9
|
86e35d5c5e82626481e81b9a5f98a9f709b622e0
|
/2017-2018/lecture-notes/python/09-dynamic-programming-algorithms_listing_2_fib_dp.py
|
950d0e08067ec895221ec53b91359649d6d62fe8
|
[
"BSD-2-Clause"
] |
permissive
|
HughP/comp-think
|
dc439798a9d02551d92e64779fcebf31997ce692
|
3dac317bda0eb7650adc4a92c1ccb8a4ce87a3a6
|
refs/heads/master
| 2020-03-30T10:38:49.352365
| 2018-05-11T10:17:17
| 2018-05-11T10:17:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
def fib_dp(n, input_dict=dict()):
if n not in input_dict:
if n == 0 or n == 1:
input_dict[n] = n
else:
input_dict[n] = fib_dp(n-1, input_dict) + fib_dp(n-2, input_dict)
return input_dict[n]
if __name__ == "__main__":
n = 35
print("Input number:", n)
print("Result list:", fib_dp(n))
|
[
"essepuntato@gmail.com"
] |
essepuntato@gmail.com
|
75fd716a3c070aa52e013ba605bac2398ed59f88
|
7e4a4a504c4262da41e4228d004978bc8c4c52fb
|
/problem35.py
|
86dfa2a47e05edf661114598d76c02562e104e90
|
[] |
no_license
|
manionan/project-euler
|
7b1e5e40200d9df043fa46c1938b1dabfa33afb1
|
a87e6615304ac0b61f2bb99757e636aebc0a6dfb
|
refs/heads/master
| 2021-06-08T00:43:12.113738
| 2018-12-10T15:59:39
| 2018-12-10T15:59:39
| 31,432,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
def permutations(number):
perms = []
n_dig = -1
temp_num = number
while temp_num > 0:
temp_num /= 10
n_dig += 1
for i in range(0,n_dig):
perms.append( (number%(10**(i+1)))*(10**(n_dig-i)) + number/(10**(i+1)) )
return perms
print "perms 12045: ", permutations(12045)
from problem7 import *
primes = list_primes(2,10000000)
print "got primes"
is_prime = {}
for i in range(0,10000000):
is_prime[i] = False
print "inited dict"
for p in primes:
is_prime[p] = True
print "set prime bools"
n_circ = 0
for p in primes:
is_circ = True
perms = permutations(p)
for i in perms :
if is_prime[i] == False:
is_circ = False
if is_circ:
n_circ += 1
print "n_circ = ", n_circ
|
[
"manionan+git@gmail.com"
] |
manionan+git@gmail.com
|
0851ff0e994b5575c57516bfc1cbd44921ca20fe
|
fdd3dce259fcd5ca5ba81a59670224fc05eada1e
|
/4/dream/__init__.py
|
c7aa94246d77f46d1b2207871eb7e241165ef3a3
|
[
"MIT"
] |
permissive
|
JacobFV/Computatrum-2019-attempt
|
18f624a9b1b81717eb8effb74633964f4e8e7285
|
6b9c324f4e0e73e8d7af79bb7785d0e86d26bc31
|
refs/heads/master
| 2022-11-29T01:09:28.366881
| 2020-08-06T18:37:42
| 2020-08-06T18:37:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,910
|
py
|
import numpy as np
import tensorflow as tf
class Intelligence:
def __init__(self, sensors, actuators):
self.o_vec_len=0
self.d_vec_len=0
self.sensors=[]
self.actuators=[]
self.add_sensors(sensors)
self.add_actuators(actuators)
def __del__(self):
pass#self.session.close()
def think(self):
"""Public Intelligence think
compiles a list of sensor observations o and calls the
subclassed _think(o) -> d and then executes the decision
tensor on appropriate actuators while remaining differentiability
since some of these sensors and actuators may be used
in backpropagation
"""
observation=tf.zeros(shape=(self.o_vec_len,))
observation
o_index=0
for sensor in self.sensors:
new_o_index=o_index+sensor.o_vec_len(new=False)
observation[o_index:new_o_index]=sensor.get_observation()
o_index=new_o_index
decision=self._think(observation)
d_index=0
for actuator in self.actuators:
new_d_index=d_index+actuator.d_vec_len(self, new=False)
actuator.execute(decision[d_index:new_d_index])
d_index=new_d_index
def _think(self, observation):
"""returns a decision tensor given an observation tensor.
This is the foundation from which subclasses layer on logic just as a mighty redwood protrudes from California soil
"""
raise NotImplementedError
def add_sensors(self, sensors):
for sensor in sensors:
self.sensors.append(sensor)
self.o_vec_len+=sensor.o_vec_len(self, new=True)
def add_actuators(self, actuators):
for actuator in actuators:
self.actuators.append(actuator)
self.d_vec_len+=actuator.d_vec_len(self, new=True)
class DREAM(Intelligence):
O_REAL_LEN=40
O_ABS_LEN=10
D_INTERN_LEN=10
D_REAL_LEN=15
D_ABS_LEN=10
PRED_STEPS_PER_CONF_CALC=3
LEARNING_RATE=0.001
NUM_MINIMIZATIONS=3
def __init__(self, observation_abstractor, decision_abstractor, decider, predictor, conscience, sensors, actuators):
super(DREAM, self).__init__(
sensors=sensors,
actuators=actuators
)
self.observation_abstractor=observation_abstractor
self.decision_abstractor=decision_abstractor
self.decider=decider
self.predictor=predictor
self.conscience=conscience
self.min_confidence=0.5
class Object(object): pass; self.past=Object()
def _think(self, observation):
"""internal thinking function
takes an observation and returns the decisoin made in that observation while statefully updating self.deciding_internal_state
"""
observation_abstraction=self.observation_abstractor(observation)
error=self.divergence(observation_abstraction, self.past.prediction_abstraction)
self.predictor.fit(
X=(self.past.observation_abstraction, self.past.decision_abstraction),
Y=(observation_abstraction, error),
state=self.past.predictor_state)
self._maximize_mean_predicted_goodness(now=observation_abstraction, past=self.past)
public_decision, private_decision = self.decider((observation_abstraction, self.past.decision_abstraction))
decision_abstraction=self.decision_abstractor((public_decision, private_decision))
self.conscience.fit(
X=(observation_abstraction, decision_abstraction),
Y=1.0)
self.past.predictor_state=self.predictor.state
prediction_abstraction, _ = self.predictor((observation_abstraction, decision_abstraction))
self.past.prediction_abstraction=prediction_abstraction
self.past.observation_abstraction=observation_abstraction
self.past.decision_abstraction=decision_abstraction
return public_decision
def _maximize_mean_predicted_goodness(self, now, past):
old_states = (self.observation_abstractor.state, self.decision_abstractor.state, self.decider.state, self.predictor.state)
confidence=1.0
decision_abstraction=past.decision_abstraction
prediction_abstraction=now
mean_predicted_goodness=0.0
while confidence.eval()>=self.min_confidence:
#don't want ot have to run confidence every iteration
for _ in range(self.PRED_STEPS_PER_CONF_CALC):
public_decision, private_decision = self.decider((prediction_abstraction, decision_abstraction))
decision_abstraction=self.decision_abstractor((public_decision, private_decision))
prediction_abstraction, new_confidence = self.predictor((prediction_abstraction, decision_abstraction))
confidence*=new_confidence
mean_predicted_goodness+=confidence*self.conscience((prediction_abstraction, decision_abstraction))
#run minimizer
badness=tf.log(1.0-mean_predicted_goodness)
minimizer=tf.train.AdamOptimizer(learning_rate=self.LEARNING_RATE).minimize(loss=badness, var_list=self.decider.trainable_vars)
for _ in range(self.NUM_MINIMIZATIONS):
minimizer.run()
self.observation_abstractor.state, self.decision_abstractor.state, self.decider.state, self.predictor.state = old_states
@classmethod
def divergence(cls, ideal, actual):
#TODO: implement KL divergence
return tf.reduce_mean(tf.pow(ideal-actual, 2.0))
class leftover_code:
def build_networks(self):
"""builds decider and predictor networks, assigns handles to these networks in self accessible for later consumption by decide and predict, and returns an initial internal state variable to base changes from
note:
This function is called after Intelligence initializes, so o_vec_len and d_vec_len are fully initialized
returns:
initial internal state: irregularly shaped tensorflow variable used to keep track of recurrent data. See readme.md > Code > Variables > internal_state
"""
raise NotImplementedError
def decide(self, observation, prev_decision, internal_state, learn=False):
"""Makes decision based on current observation and previous decision
==========================
Parameters:
observation: 1 deminsional tf tensor representing current observation
prev_decision: 1 deminsional tf tensor representing previous decision
internal_state: irregularly shaped tensorflow variable used to keep track of recurrent data. See readme.md > Code > Variables > internal_state
learn: whether or not the observation and decision values presented represent data from a real distribution that internal unsupervised learning (if any) should train itself on
Returns:
2-element tuple containing:
0: selected decision encoded as a 1 deminsional array tensorflow operation
1: the new internal recurrent state expressed in an irregular data structure of tensorflow operations defined by the implementation subclass
"""
raise NotImplementedError
def predict(self, observation, decision, internal_state, learn=False):
"""Makes decision based on current observation and previous decision
==========================
Parameters:
observation: 1 deminsional tf tensor representing current observation
decision: 1 deminsional tf tensor representing decision
internal_state: irregularly shaped tensorflow variable used to keep track of recurrent data. See readme.md > Code > Variables > internal_state
learn: whether or not the observation and decision values presented represent data from a real distribution that internal unsupervised learning (if any) should train itself on
Returns:
2-element tuple containing:
0: the observation or abstract prediction expressed as a 1 deminsional array tensorflow operation identical in size to the observation or abstraction tensor
1: new internal recurrent state expressed in an irregular data structure of tensorflow operations defined by the implementation subclass
"""
raise NotImplementedError
def fit_decider(self, observation, prev_decision, ideal_decision, internal_state, learning_rate):
"""fits the decider such that the output of inputing observation and prev_decision into the decider with internal_state tends more towards ideal_decision
This methods minimizes raw mean squared error with the tf Adam optimizer which is naturally slow. If subclassed model can use an custom minimizer or the keras fitting functions, they will be much faster.
==========================
Parameters:
observation: 1 deminsional tf tensor representing current observation
prev_decision: 1 deminsional tf tensor representing previous decision
ideal_decision: 1 deminsional tf tensor representing desired decision given the input parameters
internal_state: irregularly shaped tensorflow variable used to keep track of recurrent data. See readme.md > Code > Variables > internal_state
learning_rate: degree to which DREAM.decider_trainable_vars(self) are adjusted. In pure gradient descent: new_weights += learning_rate * weight_gradients. However, subclass implementations may use their own optimizers.
Returns:
nothing
"""
actual_decision=self.decide(observation, prev_decision, internal_state)
error=tf.pow(tf.reduce_mean(tf.pow(ideal_decision - actual_decision, 2.0)), 0.5)
minimizer = tf.train.optimizers.Adam(learning_rate).minimize(self.decider_trainable_vars)
with tf.session as session:
minimizer.run()
def fit_predictor(self, observation, decision, ideal_prediction, internal_state, learning_rate):
"""fits the predictor such that the output of inputing observation and decision into the predictor with internal_state tends more towards ideal_prediction
This methods minimizes raw mean squared error with the tf Adam optimizer which is naturally slow. If subclassed model can use an custom minimizer or the keras fitting functions, they will be much faster.
==========================
Parameters:
observation: 1 deminsional tf tensor representing current observation
decision: 1 deminsional tf tensor representing decision
ideal_prediction: 1 deminsional tf tensor representing the ideal predictor prediction
internal_state: irregularly shaped tensorflow variable used to keep track of recurrent data. See readme.md > Code > Variables > internal_state
learning_rate: degree to which DREAM.predictor_trainable_vars(self) are adjusted. In pure gradient descent: new_weights += learning_rate * weight_gradients. However, subclass implementations may use their own optimizers.
Returns:
nothing
"""
actual_prediction = self.predict(observation, decision, internal_state)
error = tf.pow(tf.reduce_mean(tf.pow(ideal_prediction - actual_prediction, 2.0)), 0.5)
minimizer = tf.train.optimizers.Adam(learning_rate).minimize(error, self.predictor_trainable_vars)
with tf.session as session:
minimizer.run()
|
[
"jacobfv@msn.com"
] |
jacobfv@msn.com
|
56392c3f5b27404a1f2b2f1c34eaaa7e939a5d97
|
ae101caf03a373e3ad6a2ea0fddd99038574e73f
|
/archivers/_file.py
|
520d1dbd8cba9d093d393d76061f7e91ede8f63f
|
[] |
no_license
|
internetimagery/todo
|
df9be99c34761191c4322eb315681fcd4e9a1cf8
|
ed09c14f231da5613ec86a2a6ff6683d9a663eb1
|
refs/heads/master
| 2020-05-30T22:48:07.645472
| 2015-12-29T10:44:40
| 2015-12-29T10:44:40
| 34,398,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,092
|
py
|
# File archive. Store off info into a folder
# Created 02/10/15 Jason Dixon
# http://internetimagery.com
import archive
import os.path
import zipfile
import time
import os
class File(archive.Archive):
def start(s):
s.settingName = "setting.file"
s.settingFileName = "setting.file.files"
def buildSettings(s, parent):
s.files = set(s.data.get(s.settingFileName, []))
s.section = s.view.CheckSection(
attributes={
"checked" : s.data.get(s.settingName, False),
"label" : "File Archive",
"annotation": "Store a backup of the current scene into the provided folder upon each Todo completion."
},
events={
"change" : lambda x: s.data.set(s.settingName, x.checked)
},
parent=parent
)
s.view.Button(
attributes={
"label" : "Pick archive folder.",
"image" : s.model.Icon["settings.file"],
"annotation": "Pick archive folder."
},
events={
"pressed" : s.setFile
},
parent=s.section
)
s.wrapperOuter = s.view.HorizontalLayout(
parent=s.section
)
s.wrapperInner = None
s.buildFiles()
def setFile(s, element):
path = s.model.File.dialog(True)
if path:
project = s.model.File.project()
path = s.relativePath(path, project)
s.files.add(path)
s.data[s.settingFileName] = list(s.files)
s.buildFiles()
def removeFile(s, path, element):
if path in s.files:
s.files.remove(path)
s.data[s.settingFileName] = list(s.files)
element.delete()
def buildFiles(s):
if s.wrapperInner:
s.wrapperInner.delete()
s.wrapperInner = s.view.HorizontalLayout(
parent=s.wrapperOuter
)
def addFile(f):
s.view.Button(
attributes={
"label" : f,# s.absolutePath(f),
"annotation": "Click to remove",
"image" : s.model.Icon["settings.filepath"]
},
events={
"pressed" : lambda x: s.removeFile(f, x)
},
parent=s.wrapperInner
)
for f in s.files:
addFile(f)
def absolutePath(s, path, root):
"""
Taken relative path return a workable absolute path
"""
root = s.model.File.project()
return os.path.realpath(os.path.join(root, path))
def relativePath(s, path, root):
"""
Take an absolute path, return a relative path if in project
"""
try:
rPath = os.path.relpath(path, root)
except ValueError: # On windows, the path is on another drive?
rPath = path
return s.absolutePath(path).replace("\\", "/") if rPath[:2] == ".." else rPath.replace("\\", "/")
def runArchive(s, todo, filename):
active = s.data.get(s.settingName, False)
target = os.path.realpath(filename)
if active and os.path.isfile(target):
paths = s.data.get(s.settingFileName, [])
if paths:
basename = os.path.basename(target)
whitelist = [" ", ".", "_", "@"] # Strip invalid characters
label = "".join(ch for ch in todo.label if ch.isalnum() or ch in whitelist).rstrip()
name = "%s_%s_%s.zip" % (os.path.splitext(basename)[0], int(time.time() * 100), label)
project = s.model.File.project()
for path in paths:
folder = s.absolutePath(path, project)
if os.path.isdir(folder):
dest = os.path.realpath(os.path.join(path, name))
z = zipfile.ZipFile(dest, "w")
z.write(target, basename)
z.close()
|
[
"jason.dixon.email@gmail.com"
] |
jason.dixon.email@gmail.com
|
56dcc1b15ee7166180b01fa29f55d6d123d95a1e
|
f1812503a3293037ac5a1f60012c4e803fe8f358
|
/7-Kyuu/Two_oldest_ages.py
|
e7f7362891dd9ad871a0126b8a4519347717dd09
|
[] |
no_license
|
AgaK1/codewars_python
|
197ef2a8d92ef058d0a4738756bdd79f9349c1bc
|
36e6b9bfaa172b4b2434baa855d59f8839cf5f94
|
refs/heads/master
| 2023-04-04T13:46:09.262690
| 2021-04-24T10:50:15
| 2021-04-24T10:50:15
| 315,000,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# The two oldest ages function/method needs to be completed. It should take an array of numbers as its argument and return the two highest numbers within the array. The returned value should be an array in the format [second oldest age, oldest age].
# The order of the numbers passed in could be any order. The array will always include at least 2 items.
# For example:
# two_oldest_ages([1, 3, 10, 0]) # should return [3, 10]
def two_oldest_ages(ages):
return sorted(ages)[-2:]
|
[
"AgaK1@users.noreply.github.com"
] |
AgaK1@users.noreply.github.com
|
a7130034bf32eebeba61534e79a27b57963c3afa
|
db802b08020f53874d7be9f3d41b592c48b77ed4
|
/strategy.py
|
020e99cb89ebf9dab94bb4299fc6d499d67c3ce2
|
[
"Apache-2.0"
] |
permissive
|
pfrstg/bgend
|
4b9e7c88084bd2213ad819ad58a78f96b156cac0
|
e659597f8671d9769eff65498f5c3be91b54461f
|
refs/heads/master
| 2020-12-06T13:52:46.061227
| 2020-05-31T22:10:06
| 2020-05-31T22:10:06
| 232,479,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,959
|
py
|
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import numpy as np
import time
import board
class ProgressIndicator(object):
"""Simple print based progress indicator."""
def __init__(self, total_objects, progress_interval):
self.total_objects = total_objects
self.completed_objects = 0
self.progress_interval = progress_interval
self.start_time = time.time()
def complete_one(self):
"""Mark completion of one obhect."""
self.completed_objects += 1
if self.progress_interval == 0:
return
if (self.completed_objects != self.total_objects and
self.completed_objects % self.progress_interval != 0):
return
frac_complete = self.completed_objects / self.total_objects
this_time = time.time()
print("%d/%d %.1f%%, %fs elapsed, %fs estimated total" % (
self.completed_objects,
self.total_objects,
frac_complete * 100,
this_time - self.start_time,
(this_time - self.start_time) / frac_complete),
flush=True)
class MoveCountDistribution(object):
"""Stores a distribution over number of moves till end of game.
Attribute dist is an np array of float containing the probabilities of
each number of moves to finish."""
__slots__ = ["dist"]
def __init__(self, dist=np.zeros([1])):
self.dist = np.asarray(dist)
if len(self.dist.shape) != 1:
raise ValueError("Need 1D shape, got %s", self.dist.shape)
def __add__(self, other):
"""Only support adding to another MoveCountDistribution."""
max_len = max(self.dist.shape[0], other.dist.shape[0])
return MoveCountDistribution(
np.pad(self.dist, (0, max_len - self.dist.shape[0]),
mode="constant",
constant_values=0) +
np.pad(other.dist, (0, max_len - other.dist.shape[0]),
mode="constant",
constant_values=0))
def __sub__(self, other):
"""Only support subtracting to another MoveCountDistribution."""
max_len = max(self.dist.shape[0], other.dist.shape[0])
return MoveCountDistribution(
np.pad(self.dist, (0, max_len - self.dist.shape[0]),
mode="constant",
constant_values=0) -
np.pad(other.dist, (0, max_len - other.dist.shape[0]),
mode="constant",
constant_values=0))
def __mul__(self, other):
"""Only support multiplying with a scalar."""
return MoveCountDistribution(self.dist * other)
def __truediv__(self, other):
"""Only support multiplying with a scalar."""
return MoveCountDistribution(self.dist / other)
def __str__(self):
return "MCD(%f, %s)" % (self.expected_value(), self.dist)
def __iter__(self):
return self.dist.__iter__()
def __len__(self):
return self.dist.__len__()
def increase_counts(self, amount):
return MoveCountDistribution(np.insert(self.dist, 0, [0] * amount))
def is_normalized(self):
return np.allclose(np.sum(self.dist), 1)
def expected_value(self):
return np.sum(self.dist * range(self.dist.shape[0]))
def append(self, values):
return MoveCountDistribution(np.append(self.dist, values))
def trim_low_prob(self, threshold):
modified_dist = self.dist
modified_dist[modified_dist < threshold] = 0
return MoveCountDistribution(np.trim_zeros(modified_dist, 'b'))
class DistributionStore(object):
"""Stores MoveCountDistributions for board states.
Attributes:
config: board.GameConfiguration
distribution_map: map from board id to MoveCountDistribution
"""
def __init__(self, config):
self.config = config
self.distribution_map = {}
def compute_best_moves_for_roll(self, this_board, roll):
"""Computes the best moves for the roll.
"best" means the resulting position with the lowest expected
value. "moves" means the move for every die. If there are
multiple groups of moves that return the same next board, one
will be chosen arbitrarily.
Assumes that all next board positions are already computed in
self.distribution_map.
Args:
this_board: board.Board
roll: board.Roll
Return
list of board.Move
"""
# dict from board id to tuple of (expected_value, moves)
possible_next_boards = {}
for moves in this_board.generate_moves(roll):
next_board = this_board.apply_moves(moves)
next_board_id = next_board.get_id()
if next_board_id in possible_next_boards:
continue
possible_next_boards[next_board_id] = (
self.distribution_map[next_board_id].expected_value(),
moves)
best_next_board = min(possible_next_boards.keys(),
key=(lambda k: possible_next_boards[k][0]))
return possible_next_boards[best_next_board][1]
def compute_move_distribution_for_board(self, this_board):
"""Computes the MoveCountDistribution for this_board.
Assumes that all next board position are already computed in
self.distribution_map
Args:
this_board: board.Board
Return
MoveCountDistribution
"""
out = MoveCountDistribution()
for roll in board.ROLLS:
moves = self.compute_best_moves_for_roll(this_board, roll)
next_board = this_board.apply_moves(moves)
out += (self.distribution_map[next_board.get_id()]
.increase_counts(1) * roll.prob)
assert out.is_normalized()
return out
def compute(self, progress_interval=500, limit=-1):
"""Computes and stores MoveCountDistribution for each board.
clears an existing data in self.distribution_map
Args:
limit: if > 0, only computes this many valid boards
"""
self.distribution_map.clear()
progress_indicator = ProgressIndicator(self.config.num_valid_boards,
progress_interval)
if progress_interval:
print("Starting compute on %d boards" %
self.config.num_valid_boards,
flush=True)
# The minimum board id is the game ended state.
progress_indicator.complete_one()
self.distribution_map[self.config.min_board_id] = MoveCountDistribution([1])
id_generator = self.config.generate_valid_ids()
next(id_generator) # skip the solved state
for board_id in id_generator:
if not self.config.is_valid_id(board_id):
continue
progress_indicator.complete_one()
this_board = board.Board.from_id(self.config, board_id)
dist = self.compute_move_distribution_for_board(this_board)
self.distribution_map[board_id] = dist
if limit > 0 and progress_indicator.completed_objects >= limit:
print("Stopping at %d boards, id %d"
% (progress_indicator.completed_objects, board_id))
break
def pretty_string(self, limit=-1):
num_printed = 0
for board_id, dist in self.distribution_map.items():
this_board = board.Board.from_id(self.config, board_id)
print("Board %d" % board_id)
print(dist)
print(this_board.pretty_string())
def save_hdf5(self, fileobj):
with h5py.File(fileobj, "w") as f:
dist_map_grp = f.create_group("distribution_map")
for board_id, mcd in self.distribution_map.items():
#print(mcd)
dist_map_grp.create_dataset(str(board_id), data=mcd.dist)
self.config.save_into_hdf5(f.create_group("config"))
def load_hdf5(fileobj):
with h5py.File(fileobj, "r") as f:
store = DistributionStore(
board.GameConfiguration.load_from_hdf5(f["config"]))
for board_id, arr in f["distribution_map"].items():
store.distribution_map[int(board_id)] = (
MoveCountDistribution(arr))
return store
|
[
"patriley@gmail.com"
] |
patriley@gmail.com
|
baf487b29934108f64b1096cd4f6834ae9956240
|
400e869dff5595d5b1f3e2ddc09bb4f5dac54939
|
/逐步代入降维/step.py
|
364b6e45dcec8cc6d444cfb2c1bba7c207524088
|
[] |
no_license
|
ypc8272805/ICU-mortality-forecast
|
45ea09d4df9e279a2d106bbb66e745403c8e8da0
|
72cc9747670f785debb694e5e56158cb3406829f
|
refs/heads/master
| 2021-09-06T01:15:37.641726
| 2018-02-01T08:11:59
| 2018-02-01T08:11:59
| 119,774,558
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 09:10:47 2017
@author: zg
"""
import scipy.io as sio
from sklearn import svm
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import numpy as np
#载入数据
matfna='E:/工作汇报/代亚菲工作汇报45/trainj1.mat'
matfnb='E:/工作汇报/代亚菲工作汇报45/testj1.mat'
dataa=sio.loadmat(matfna)
datab=sio.loadmat(matfnb)
load_matrixa = dataa['trainj1']
load_matrixb = datab['testj1']
#数据预处理
B=[]
C=[]
for j in range(1,29):
result=[]
for i in range(1,11):
x_train=load_matrixa[30006*(i-1):30006*i,0:j]
min_max_scaler = preprocessing.MinMaxScaler()
x_train_minmax = min_max_scaler.fit_transform(x_train)
x_test=load_matrixb[1908*(i-1):1908*i,0:j]
x_test_minmax = min_max_scaler.transform(x_test)
y_train=load_matrixa[30006*(i-1):30006*i,28]
y_test=load_matrixb[1908*(i-1):1908*i,28]
#这里使用支持向量机作为预测模型
clf=svm.SVC(C=0.1, kernel='linear', decision_function_shape='ovr',probability=True)
aam=clf.fit(x_train_minmax, y_train)
y_pred=clf.predict(x_test_minmax)
prob=clf.predict_proba(x_test_minmax)
y_scor=prob[:,1:2]
#查看AUROC值选择最佳变量个数
tn, fp, fn, tp = confusion_matrix(y_test,y_pred).ravel()
test_auc = metrics.roc_auc_score(y_test,y_scor)
resul=[test_auc]
result.append(resul)
A=np.array(result)
B=np.mean(A, axis=0)
C.append(B)
|
[
"412358797@qq.com"
] |
412358797@qq.com
|
2ea3eeb5bca725cb8094211a287afdb3f7beee07
|
8c573454a4e64d26109ce30a1f134242a2c0745f
|
/main.py
|
71ea0af47990057fc97bdff9c8845c5edd0aa850
|
[] |
no_license
|
OliviaNocentini/concatenation
|
c8c461a36f9e74a56bd6a113f9ad63b675ab19e2
|
e5d1ed7718f7513cca6071a6c8d63dabcde60830
|
refs/heads/master
| 2023-04-11T07:57:41.640639
| 2021-04-28T16:46:30
| 2021-04-28T16:46:30
| 362,542,855
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,232
|
py
|
from FashionCNN import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import confusion_matrix
from skimage.io import imread, imshow
from skimage.transform import resize
from skimage.feature import hog
from skimage import exposure
import matplotlib.pyplot as plt
if __name__ == '__main__':
batch_size = 2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Load the training and test dataset")
train_set = torchvision.datasets.FashionMNIST("./data", download=True, transform=
transforms.Compose([transforms.ToTensor()]))
test_set = torchvision.datasets.FashionMNIST("./data", download=True, train=False, transform=
transforms.Compose([transforms.ToTensor()]))
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_set,
batch_size=batch_size)
# Examine a sample
dataiter = iter(train_loader)
images, labels = dataiter.next()
# print("images.shape",images.shape)
# plt.imshow(images[0].numpy().squeeze(), cmap = 'Greys_r')
# loading CNN
model = FashionCNN()
model.to(device)
# defining error
error = nn.CrossEntropyLoss()
# defining lr and optimizer
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
num_epochs = 2
count = 0
# Lists for visualization of loss and accuracy
loss_list = []
iteration_list = []
accuracy_list = []
# Lists for knowing classwise accuracy
predictions_list = []
labels_list = []
print("Back propagation starting")
for epoch in range(num_epochs):
print("Epoch ", epoch)
running_loss = 0.0
for images, labels in train_loader:
# Transfering images and labels to GPU if available
images, labels = images.to(device), labels.to(device)
# visualize = false because nto interested to visualize the hog image
fd = np.array([hog(image.numpy().squeeze(), orientations=8, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualize=False, multichannel=False) for image in images])
"""
Uncomment me if you want to check the hog of the first figure
fd, hog_image = hog(images[0].numpy().squeeze(), orientations=8, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualize=True, multichannel=False)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.axis('off')
ax1.imshow(images[0].numpy().squeeze(), cmap = 'Greys_r')
ax1.set_title('Input image')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
plt.show()
break
"""
train = Variable(images.view(batch_size, 1, 28, 28))
fd = fd.astype(np.float32)
data_vector = torch.from_numpy(fd)
labels = Variable(labels)
# Forward pass
outputs = model(train, data_vector)
loss = error(outputs, labels)
# Initializing a gradient as 0 so there is no mixing of gradient among the batches
optimizer.zero_grad()
# Propagating the error backward
loss.backward()
# Optimizing the parameters
optimizer.step()
count += 1
running_loss += loss.item()
print("count ", count)
if count % 10: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, count + 1, running_loss / 2000))
running_loss = 0.0
# Testing the model
if not (count % 50): # It's same as "if count % 50 == 0"
total = 0
correct = 0
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
labels_list.append(labels)
test = Variable(images.view(batch_size, 1, 28, 28))
outputs = model(test, data_vector)
predictions = torch.max(outputs, 1)[1].to(device)
predictions_list.append(predictions)
correct += (predictions == labels).sum()
total += len(labels)
accuracy = correct * 100 / total
loss_list.append(loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
if not (count % 500):
print("Iteration: {}, Loss: {}, Accuracy: {}%".format(count, loss.data, accuracy))
|
[
"o.nocentini@gmail.com"
] |
o.nocentini@gmail.com
|
f86c0c8bd14330b86459ff07a3f118d6d4657a55
|
003ffcf8144565404636f3d74590a8d6b10a90a4
|
/121-best-time-to-buy-and-sell-stock/121-best-time-to-buy-and-sell-stock.py
|
5ac3fb0f7733adcd44336159745074cdf03339e3
|
[] |
no_license
|
congve1/leetcode
|
fb31edf93049e21210d73f7b3e7b9b82057e1d7a
|
ce1e802b5052da2cdb919d6d7e39eed860e0b61b
|
refs/heads/master
| 2020-05-13T19:19:58.835432
| 2019-05-06T00:44:07
| 2019-05-06T00:44:07
| 181,652,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
min_price = prices[0]
max_profit = 0
for price in prices:
if price < min_price:
min_price = price
elif price - min_price > max_profit:
max_profit = price - min_price
return max_profit
|
[
"congve1@live.com"
] |
congve1@live.com
|
a6179c7b7ab05f45f07bccd48d9ab291ee20a507
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/cors-anywhere-webserver/main.py
|
840714b06997a3a565d33f6c533abccdb1e8d486
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164
| 2021-06-11T22:59:07
| 2021-06-11T22:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,590
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# TODO: append logging
import flask
app = flask.Flask(__name__)
app.debug = True
from urllib.request import urlopen, Request
@app.route('/')
def index():
url = flask.request.args.get('url')
print('URL:', url)
if url is None:
return "Append url, please: {}?url=<your_url>".format(flask.request.host_url)
headers = dict()
headers['Origin'] = flask.request.host_url
request = Request(url, headers=headers)
with urlopen(request) as f:
content = f.read()
print(content)
# Нужно узнать encoding, для этого вытаскиваем xml-декларацию, а из нее уже значение encoding
try:
s_index = content.find(b'<?')
e_index = content.find(b'?>')
if s_index != -1 and e_index != -1:
declaration = content[s_index: e_index + len(b'?>')].decode('utf-8')
import re
match = re.search(r'encoding="(.+)"', declaration)
if match:
print(match.group(1))
content = content.decode(match.group(1))
except Exception as e:
print(e)
headers = dict(f.getheaders())
rs = flask.Response(content)
rs.headers.extend(headers)
rs.headers['Access-Control-Allow-Origin'] = '*'
print(rs.headers)
return rs
if __name__ == "__main__":
# Localhost
app.run()
# # Public IP
# app.run(host='0.0.0.0')
|
[
"gil9red@gmail.com"
] |
gil9red@gmail.com
|
687927b70dba012c7f0fa3ca371d40da97a1c9ba
|
b8e3363a40bc9928ae85c16232c5bf6240597a18
|
/out/production/home-assistant/components/switch/scsgate.py
|
7755168585e310461392d1ea73e8bc15c3313219
|
[
"MIT"
] |
permissive
|
LaurentTrk/home-assistant
|
4cbffd5a71f914e003918542319bc6caa96dbb72
|
5a808d4e7df4d8d0f12cc5b7e6cff0ddf42b1d40
|
refs/heads/dev
| 2021-01-15T23:02:38.147063
| 2016-05-15T12:21:52
| 2016-05-15T12:21:52
| 51,471,180
| 2
| 0
| null | 2016-02-10T20:49:47
| 2016-02-10T20:49:47
| null |
UTF-8
|
Python
| false
| false
| 5,589
|
py
|
"""
homeassistant.components.switch.scsgate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for SCSGate switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.scsgate/
"""
import logging
import homeassistant.components.scsgate as scsgate
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import ATTR_ENTITY_ID
DEPENDENCIES = ['scsgate']
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Add the SCSGate swiches defined inside of the configuration file. """
logger = logging.getLogger(__name__)
_setup_traditional_switches(
logger=logger,
config=config,
add_devices_callback=add_devices_callback)
_setup_scenario_switches(
logger=logger,
config=config,
hass=hass)
def _setup_traditional_switches(logger, config, add_devices_callback):
""" Add traditional SCSGate switches """
traditional = config.get('traditional')
switches = []
if traditional:
for _, entity_info in traditional.items():
if entity_info['scs_id'] in scsgate.SCSGATE.devices:
continue
logger.info(
"Adding %s scsgate.traditional_switch", entity_info['name'])
name = entity_info['name']
scs_id = entity_info['scs_id']
switch = SCSGateSwitch(
name=name,
scs_id=scs_id,
logger=logger)
switches.append(switch)
add_devices_callback(switches)
scsgate.SCSGATE.add_devices_to_register(switches)
def _setup_scenario_switches(logger, config, hass):
""" Add only SCSGate scenario switches """
scenario = config.get("scenario")
if scenario:
for _, entity_info in scenario.items():
if entity_info['scs_id'] in scsgate.SCSGATE.devices:
continue
logger.info(
"Adding %s scsgate.scenario_switch", entity_info['name'])
name = entity_info['name']
scs_id = entity_info['scs_id']
switch = SCSGateScenarioSwitch(
name=name,
scs_id=scs_id,
logger=logger,
hass=hass)
scsgate.SCSGATE.add_device(switch)
class SCSGateSwitch(SwitchDevice):
""" Provides a SCSGate switch. """
def __init__(self, scs_id, name, logger):
self._name = name
self._scs_id = scs_id
self._toggled = False
self._logger = logger
@property
def scs_id(self):
""" SCS ID """
return self._scs_id
@property
def should_poll(self):
""" No polling needed for a SCSGate switch. """
return False
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def is_on(self):
""" True if switch is on. """
return self._toggled
def turn_on(self, **kwargs):
""" Turn the device on. """
from scsgate.tasks import ToggleStatusTask
scsgate.SCSGATE.append_task(
ToggleStatusTask(
target=self._scs_id,
toggled=True))
self._toggled = True
self.update_ha_state()
def turn_off(self, **kwargs):
""" Turn the device off. """
from scsgate.tasks import ToggleStatusTask
scsgate.SCSGATE.append_task(
ToggleStatusTask(
target=self._scs_id,
toggled=False))
self._toggled = False
self.update_ha_state()
def process_event(self, message):
""" Handle a SCSGate message related with this switch"""
if self._toggled == message.toggled:
self._logger.info(
"Switch %s, ignoring message %s because state already active",
self._scs_id, message)
# Nothing changed, ignoring
return
self._toggled = message.toggled
self.update_ha_state()
command = "off"
if self._toggled:
command = "on"
self.hass.bus.fire(
'button_pressed', {
ATTR_ENTITY_ID: self._scs_id,
'state': command
}
)
class SCSGateScenarioSwitch:
""" Provides a SCSGate scenario switch.
This switch is always in a 'off" state, when toggled
it's used to trigger events
"""
def __init__(self, scs_id, name, logger, hass):
self._name = name
self._scs_id = scs_id
self._logger = logger
self._hass = hass
@property
def scs_id(self):
""" SCS ID """
return self._scs_id
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
def process_event(self, message):
""" Handle a SCSGate message related with this switch"""
from scsgate.messages import StateMessage, ScenarioTriggeredMessage
if isinstance(message, StateMessage):
scenario_id = message.bytes[4]
elif isinstance(message, ScenarioTriggeredMessage):
scenario_id = message.scenario
else:
self._logger.warn(
"Scenario switch: received unknown message %s",
message)
return
self._hass.bus.fire(
'scenario_switch_triggered', {
ATTR_ENTITY_ID: int(self._scs_id),
'scenario_id': int(scenario_id, 16)
}
)
|
[
"laurent.turek_github@gadz.org"
] |
laurent.turek_github@gadz.org
|
6ffc5872be65097b6a6998e7b83e67bba34b90e4
|
2fa35c9f536d86e91e78aef4ac482ece6b15c8fe
|
/lct/src/lcToolbox/lcToolbox.py
|
6cdb2c733e0f373d5df74222b348ce2d3b814ed7
|
[
"MIT"
] |
permissive
|
leocov-dev/lct-legacy
|
f566a3b69a15da40ed197d4cf57c25d572924768
|
6fb135ac47d20026ee957a54fcd56baad7c4a893
|
refs/heads/master
| 2020-03-19T19:19:47.424487
| 2019-03-15T01:35:40
| 2019-03-15T01:35:40
| 136,850,162
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,294
|
py
|
import functools
import math
import os
import pymel.core as pm
from pymel import versions
import lct.src.core.lcConfiguration as lcConfiguration
import lct.src.core.lcPath as lcPath
import lct.src.core.lcPrefs as lcPrefs
import lct.src.core.lcShelf as lcShelf
import lct.src.core.lcUI as lcUI
import lct.src.core.lcUpdate as lcUpdate
import lct.src.core.lcUtility as lcUtility
# init global paths
srcPath = lcPath.Path.getSrcPath()
basePath = os.path.abspath(os.path.dirname(__file__))
iconPath = os.path.normpath(os.path.join(basePath, 'icons'))
# set conf values
conf = lcConfiguration.Conf.load_conf_file(os.path.join(os.path.abspath(os.path.dirname(__file__)),
"{}.conf".format(os.path.basename(__file__).split('.')[0])))
# lct conf values
lct_conf = lcConfiguration.Conf.load_conf_file()
# shelf button command
shelfCommand = 'import lct.src.lcToolbox.lcToolbox as lcTb\nreload(lcTb)\nlcTb.lcToolboxUI()'
# set up configuration
global_cfg = lcConfiguration.GlobalSettingsDictionary()
lct_cfg = lcConfiguration.ConfigurationNode(lcPath.Path.get_tools_settings_file(), global_cfg)
lct_cfg.add('lcToolboxRelease', lct_conf['release'])
lct_cfg.add('lcToolboxCurrentTool', '')
lct_cfg.add('lcToolboxHeight', conf['height'])
def lcToolboxUI(dockable=False, *args, **kwargs):
""" """
prefix = conf['prefix']
ci = 0 # color index iterator
toolName = 'lcToolbox'
icon = os.path.join(basePath, 'lcToolbox.png')
winWidth = 231
winHeight = conf['height']
mainWindow = lcUI.lcWindow(prefix=prefix, windowName=toolName, width=winWidth, height=winHeight, icon=icon,
shelfCommand=shelfCommand, annotation=conf['annotation'], dockable=dockable,
menuBar=True, rtf=True)
mainWindow.create()
principalMenus = mainWindow.menuBarLayout.getMenuArray()
optionsMenu = principalMenus[0]
helpMenu = principalMenus[-1]
# add to help menu
pm.menuItem(parent=helpMenu, divider=True, dividerLabel='Misc')
pm.menuItem(parent=helpMenu, l='Reset All Tools', image='smallTrash.png',
command=lambda *args: lct_cfg.reset_all_config())
pm.menuItem(parent=helpMenu, l='Mini Maya Prefs', command=lambda *args: lcPrefs.MiniPrefsWindow().show())
if lct_conf['release'] == 'dev':
pm.menuItem(parent=helpMenu, l='Switch to Pub', image='blendColors.svg',
command=lambda *args: lcUpdate.Update.lct_auto_update(confirmDiag=True, releaseSwitch='pub'))
pm.columnLayout(prefix + '_columnLayout_main')
mainWindow.show()
lcTb_open_tool(mainWindow.windowName, lct_cfg.get('lcToolboxHeight'), lct_cfg.get('lcToolboxCurrentTool'))
# first launch window
lcUI.UI.lcToolbox_first_launch_window()
# check for an update
lcUpdate.Update.update_periodic_check()
def lcTb_open_tool(windowName, heightAdjust, commandString='', *args, **kwargs):
''' '''
prefix = conf['prefix']
if lcUtility.Utility.maya_version_check():
if pm.columnLayout(prefix + '_columLayout_holder', exists=True):
pm.deleteUI(prefix + '_columLayout_holder')
if pm.formLayout('fl_form', exists=True):
pm.deleteUI('fl_form')
if pm.columnLayout('fl_form_shelf', exists=True):
pm.deleteUI('fl_form_shelf')
if pm.columnLayout('fl_form_tool', exists=True):
pm.deleteUI('fl_form_tool')
pm.setParent(prefix + '_columnLayout_main')
pm.columnLayout(prefix + '_columLayout_holder', rowSpacing=0)
pm.formLayout('fl_form', numberOfDivisions=100)
pm.picture('fl_form_header', image=os.path.join(iconPath, 'header_{}.png'.format(lct_conf['release'])))
if lct_conf['release'] == 'dev':
pm.symbolButton('fl_form_reload', image=os.path.join(iconPath, 'reload.png'),
command=functools.partial(lcTb_open_tool_new_window, shelfCommand))
pm.columnLayout('fl_form_shelf')
shelfHeight = 32
fl_flow_layout = pm.flowLayout(width=204, height=shelfHeight + 4, wrap=True, columnSpacing=0)
# list published tools except lcToolbox
toolList = lcUtility.Utility.buildPublishList(inline=False)
toolCount = 0
for item in toolList:
if item[0] != 'lcToolbox':
toolCount = toolCount + 1
toolName = item[0]
toolPrefix = item[1]
toolAnnotation = item[2]
toolHeight = int(item[5])
toolIcon = os.path.normpath(os.path.join(srcPath, toolName, toolName + '.png'))
shelfIcon = os.path.normpath(os.path.join(srcPath, toolName, 'icons', toolName + '_shelf.png'))
toolShelfCommand = "import lct.src.{0}.{0} as {1}\nreload({1})\n{1}.{0}UI()".format(toolName,
toolPrefix)
toolExecString = unicode(
"import lct.src.{0}.{0} as {1}\nreload({1})\n{1}.{0}UI(asChildLayout=True)".format(toolName,
toolPrefix))
toolButton = pm.symbolButton(prefix + '_' + toolName, image=toolIcon, annotation=toolAnnotation,
command=functools.partial(lcTb_open_tool, windowName, toolHeight,
toolExecString))
popup = pm.popupMenu(prefix + '_' + toolName + 'popup', parent=toolButton)
pm.menuItem(l='Open in new window', parent=popup,
command=functools.partial(lcTb_open_tool_new_window, toolShelfCommand))
pm.menuItem(l='Add to shelf', parent=popup,
command=functools.partial(lcShelf.Shelf.makeShelfButton, toolName, toolShelfCommand,
shelfIcon, toolAnnotation))
if pm.window(toolName, ex=True): # if i have the tool window open seperately use the return arrow icon
pm.symbolButton(prefix + '_' + toolName, edit=True, image=os.path.normpath(
os.path.join(srcPath, toolName, 'icons', toolName + '_Return.png')),
command=functools.partial(lcTb_open_tool, windowName, toolHeight, toolExecString))
# if i am loading a specific tool back into the window update its icon to standard
if commandString and toolName in commandString:
pm.symbolButton(toolButton, edit=True, image=os.path.normpath(
os.path.join(srcPath, toolName, 'icons', toolName + '_Release.png')),
command=functools.partial(lcTb_open_tool_new_window, toolShelfCommand))
rowCount = max(1, math.ceil(toolCount / 5.0))
shelfHeight = shelfHeight * rowCount + 4
pm.flowLayout(fl_flow_layout, edit=True, height=shelfHeight)
pm.setParent('fl_form')
fl_form_tool = pm.columnLayout('fl_form_tool', width=224, columnOffset=('left', 10))
pm.separator(style='double', h=5, w=205)
if not commandString:
pm.picture(image=os.path.join(iconPath, 'none.png'))
else:
exec commandString in locals()
lct_cfg.set('lcToolboxCurrentTool', commandString)
lct_cfg.set('lcToolboxHeight', heightAdjust)
if lct_conf['release'] == 'dev':
pm.formLayout('fl_form', edit=True, attachForm=[('fl_form_header', 'top', 0), ('fl_form_shelf', 'top', 54),
('fl_form_shelf', 'left', 25), ('fl_form_reload', 'top', 0),
('fl_form_reload', 'left', 103)],
attachControl=[(fl_form_tool, 'top', 0, 'fl_form_shelf')])
else:
pm.formLayout('fl_form', edit=True, attachForm=[('fl_form_header', 'top', 0), ('fl_form_shelf', 'top', 54),
('fl_form_shelf', 'left', 25)],
attachControl=[(fl_form_tool, 'top', 0, 'fl_form_shelf')])
pm.setParent(prefix + '_columLayout_holder')
pm.picture('fl_form_footer', image=os.path.join(iconPath, 'footer_{}.png'.format(lct_conf['release'])))
pm.window(windowName, edit=True,
height=heightAdjust + shelfHeight + 122) # +conf['height'])#, width=mainWindow.width)
else:
pm.separator(style='none', h=30)
pm.text(l='Your Maya Version:', al='center', w=231, h=25, font='boldLabelFont')
pm.text(l='{}'.format(versions.shortName()), al='center', w=231, h=10, font='boldLabelFont')
pm.separator(style='none', h=10)
pm.text(l='You must have\nMaya 2014 or greater\nto run the\nLEOCOV Toolbox', al='center', w=231, h=60,
font='boldLabelFont')
pm.window(windowName, edit=True, height=231)
def lcTb_open_tool_new_window(commandString='', *args, **kwargs):
''' '''
if commandString:
exec commandString in locals()
|
[
"leo@leocov.com"
] |
leo@leocov.com
|
fa303d0893189bbad3f9eb0e4c2d7a525733c134
|
43825d5266e717d1e1bb0b6eb07b9c9e26015edc
|
/Images/image_car_count.py
|
1ff39c2396a936cf3bdd6a3aebb927955d2d62fa
|
[] |
no_license
|
navdeepuppal/Smart-Traffic-Lights-Triggering-System
|
b0a9f35bb57f927f92445d23c3897d0d8dbd7c74
|
44d1aa50df0a3fd43906caaf603fad2c6d98a464
|
refs/heads/master
| 2023-08-18T19:34:07.093977
| 2021-09-28T06:46:25
| 2021-09-28T06:46:25
| 408,417,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 01:12:51 2019
@author: Baazigar
"""
import cv2
def counting():
img = "C:\\Users\\Baazigar\\Desktop\\Images for project\\cars front.jpg"
cascade_src = 'C:\\Users\\Baazigar\\Desktop\\Images for project\\opencv-samples-master\\vehicle-detection-haar\\cars3.xml'
car_cascade = cv2.CascadeClassifier(cascade_src)
img = cv2.imread(img,1)
#img = cv2.resize(img,(16*100,9*100))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.05 ,2)
print (len(cars))
for (x,y,w,h) in cars:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)
cv2.imshow('rectangled', img)
cv2.waitKey(0)
counting()
|
[
"navdeepuppal1609@gmail.com"
] |
navdeepuppal1609@gmail.com
|
238c6a60fb442dcb3398993ce5643b368b5eeae4
|
f6241867ebf36cdd1c9f4de8819ab8d58e44c5d2
|
/channels/models.py
|
fe75e50dd8cea76d6908ca2a524dc8cf88c9cc34
|
[] |
no_license
|
bratomes/work-at-olist
|
a96c76efa5220c8bc38a149707862ee503c81f5f
|
85f21fc91532b40466f57b4ff928a4d47b605071
|
refs/heads/master
| 2021-01-19T10:48:21.787628
| 2017-06-28T13:46:34
| 2017-06-28T13:46:34
| 82,227,397
| 0
| 0
| null | 2017-02-16T21:22:17
| 2017-02-16T21:22:17
| null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
from django.db import models
from django.db import IntegrityError
from django.utils.text import slugify
from hashid_field import HashidAutoField
class Channel(models.Model):
"""
Stores a channel and its related parent if exist
"""
id = HashidAutoField(primary_key=True)
name = models.CharField(max_length=60)
slug = models.SlugField(max_length=70, unique=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
# automatically fill the slug field
self.slug = slugify(self.name)
super(Channel, self).save(*args, **kwargs)
class Category(models.Model):
"""
Stores a category with or without its parent.
A combination of name and parent must be unique.
"""
id = HashidAutoField(primary_key=True)
name = models.CharField(max_length=60)
slug = models.SlugField(max_length=70)
parent = models.ForeignKey(
'self', on_delete=models.CASCADE,
null=True, related_name='subcategories')
channel = models.ForeignKey(Channel, related_name='categories')
class Meta:
unique_together = (('slug', 'parent', 'channel'),)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
# automatically fill the slug field
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
|
[
"brunomsss@gmail.com"
] |
brunomsss@gmail.com
|
47e8758b2d4caf9a68b360328daad3dd0427bd11
|
19f1464599f7731854cc2c8e1f8085db3b2c4ec0
|
/mask-rcnn/mrcnn/config.py
|
878d8273d27333d53e631b7e54c4ca57c2ee458b
|
[] |
no_license
|
sendeb/Instance-Seg-MRCNN-Building
|
f39f90720287be2dd9a8d3f66c4c46529457b957
|
17521e9eeec505c68f6e9377b93038098f49493c
|
refs/heads/master
| 2022-11-22T04:23:36.589423
| 2020-07-22T03:41:22
| 2020-07-22T03:41:22
| 279,156,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,489
|
py
|
"""
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet101"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
# RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256) #CHANGE
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 6000
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
# WEIGHT_DECAY = 0.0001 # CHANGE
WEIGHT_DECAY = 0.001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,
self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,
self.IMAGE_CHANNEL_COUNT])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
|
[
"noreply@github.com"
] |
sendeb.noreply@github.com
|
937afbe4f4cbb16d08ef660734905be59366a73a
|
658a6fe8aa734e3a3710c10b72067fa791af0a26
|
/Miscellaneous/Next Day.py
|
3e43c188ef0c293a32f17142960ab047652c6f48
|
[] |
no_license
|
Mofvsv/Web-Crawler
|
ddf3997936f37f67f3a605892e4db95e3a56fe6b
|
2f6b9a3c0f77fe6af18b5849a2b4602600bfac5b
|
refs/heads/master
| 2021-01-20T14:04:55.495004
| 2017-05-22T02:51:04
| 2017-05-22T02:51:04
| 90,560,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
def isLeapYear(year):
if year %400 == 0:
return True
if year % 100 == 0:
return False
if year % 4 == 0:
return True
return False
def daysInMonth(year, month):
if month in (1,3,5,7,8,10,12):
return 31
else:
if month == 2:
if isLeapYear(year):
return 29
else:
return 28
return 30
def nextDay(year, month, day):
"""
Returns the year, month, day of the next day.
Simple version: assume every month has 30 days.
"""
if day < daysInMonth(year,month):
return year, month, day +1
if month < 12:
return year, month + 1, 1
else:
return year +1, 1, 1
def dateIsBefore(year1, month1, day1, year2, month2, day2):
assert not (year2,month2,day2,year1,month1,day1)
if year1<year2:
return True
if year1 == year2:
if month1<month2:
return True
if month1== month2:
return day1<day2
return False
def test():
assert dateIsBefore (2013, 1, 1, 2013, 1,1) ==0
assert dateIsBefore (2013, 1, 1, 2013, 1,2) ==1
assert nextDay(2012,1,2) == (2012,1,3)
assert nextDay (2013,4,30) == (2013,5,1)
assert nextDay (2013,12,30) == (2014,1,1)
assert dateIsBefore(2012,1,1,2013,1,1) == 365
assert dateIsBefore(2013,1,1,2014,1,1) == 365
print(")
|
[
"elimustaf@gmail.com"
] |
elimustaf@gmail.com
|
e5fb2903fb14c73e5eba876cd0d8410b5ba6c111
|
3f82d4e8cbb11373151751799be0f246a3557d0b
|
/Service.py
|
5b62739cb042051d2de430c58dc08829516b399a
|
[] |
no_license
|
tejalbannore/todo-List
|
c28505e5a93fbbc5334fd65074e559c596989d83
|
88bea73d62ee383e22458700abee68b1c802c156
|
refs/heads/master
| 2023-02-16T14:21:35.738850
| 2021-01-12T10:16:01
| 2021-01-12T10:16:01
| 328,945,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
# Service.py
class ToDoService:
def __init__(self):
self.model = ToDoModel()
def create(self, params):
self.model.create(params["text"], params["Description"])
|
[
"tejalbannore@gmail.com"
] |
tejalbannore@gmail.com
|
c014c496ccc68ac4737b193b277ed7a029522fb3
|
8cc30a27835e205a3476783106ca1605a6a85c48
|
/amy/fiscal/tests/test_membership_tasks.py
|
0f1cb34f4fca4c635c961c964dac246cd47d8db5
|
[
"MIT"
] |
permissive
|
gaybro8777/amy
|
d968edc78bbd3f63f3353450334721628dbbc0f4
|
3cf99aed58a0f0acf83d2645a30d8408208ccea9
|
refs/heads/develop
| 2023-03-07T22:08:28.692700
| 2021-02-23T18:06:06
| 2021-02-23T18:06:06
| 341,930,505
| 0
| 0
|
MIT
| 2021-02-24T17:22:08
| 2021-02-24T14:40:43
| null |
UTF-8
|
Python
| false
| false
| 4,329
|
py
|
from django.urls import reverse
from workshops.tests.base import TestBase
from workshops.models import (
Membership,
)
from fiscal.models import MembershipPersonRole, MembershipTask
class TestMembershipTasks(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
self.membership = Membership.objects.create(
public_status="public",
variant="partner",
agreement_start="2021-02-14",
agreement_end="2022-02-14",
contribution_type="financial",
seats_instructor_training=0,
additional_instructor_training_seats=0,
)
self.membership_person_role = MembershipPersonRole.objects.first()
def test_adding_new_tasks(self):
self.assertEqual(self.membership.membershiptask_set.count(), 0)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 0,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-person": self.hermione.pk,
"form-0-role": self.membership_person_role.pk,
"form-0-id": "",
"form-1-person": self.harry.pk,
"form-1-role": self.membership_person_role.pk,
"form-1-id": "",
}
response = self.client.post(
reverse("membership_tasks", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(self.membership.membershiptask_set.count(), 2)
self.assertEqual(
list(self.membership.persons.all()), [self.hermione, self.harry]
)
def test_removing_tasks(self):
mt1 = MembershipTask.objects.create(
person=self.hermione,
membership=self.membership,
role=self.membership_person_role,
)
mt2 = MembershipTask.objects.create(
person=self.harry,
membership=self.membership,
role=self.membership_person_role,
)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 2,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-person": mt1.person.pk,
"form-0-role": mt1.role.pk,
"form-0-id": mt1.pk,
"form-0-DELETE": "on",
"form-1-person": mt2.person.pk,
"form-1-role": mt2.role.pk,
"form-1-id": mt2.pk,
"form-1-DELETE": "on",
}
response = self.client.post(
reverse("membership_tasks", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(list(self.membership.persons.all()), [])
def test_mix_adding_removing_tasks(self):
mt1 = MembershipTask.objects.create(
person=self.hermione,
membership=self.membership,
role=self.membership_person_role,
)
mt2 = MembershipTask.objects.create(
person=self.harry,
membership=self.membership,
role=self.membership_person_role,
)
data = {
"form-TOTAL_FORMS": 3,
"form-INITIAL_FORMS": 2,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-person": mt1.person.pk,
"form-0-role": mt1.role.pk,
"form-0-id": mt1.pk,
"form-0-DELETE": "on",
"form-1-person": mt2.person.pk,
"form-1-role": mt2.role.pk,
"form-1-id": mt2.pk,
"form-1-DELETE": "on",
"form-2-person": self.ron.pk,
"form-2-role": self.membership_person_role.pk,
"form-2-id": "",
}
response = self.client.post(
reverse("membership_tasks", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(list(self.membership.persons.all()), [self.ron])
|
[
"piotr@banaszkiewicz.org"
] |
piotr@banaszkiewicz.org
|
d2e1d5f9f5d401d5359bd1dae4c90e39ff10d3c7
|
3756357852562a6c8fe48f983e0da1610d10ce9c
|
/DeepSecuritySDK/SmartCheckAPI/models/inline_response2006_vulnerabilities1.py
|
c27c1f288479a55921c8d645884dda1356840255
|
[] |
no_license
|
johnsobm/deepsecuritySDK
|
5de8c788d190fa8c5b67fb0bb6b4c545ba74c754
|
d15ef79aea91d0c9a3a2bb4ac71b8060e0b67bfd
|
refs/heads/master
| 2020-04-03T22:53:11.692909
| 2018-10-31T23:40:47
| 2018-10-31T23:40:47
| 155,612,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,671
|
py
|
# coding: utf-8
"""
Deep Security Smart Check
Deep Security Smart Check is a container image scanner from Trend Micro. # noqa: E501
OpenAPI spec version: 2018-05-01
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from SmartCheckAPI.models.inline_response2006_findings import InlineResponse2006Findings # noqa: F401,E501
from SmartCheckAPI.models.inline_response2006_vulnerabilities import InlineResponse2006Vulnerabilities # noqa: F401,E501
class InlineResponse2006Vulnerabilities1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'namespace_name': 'str',
'version': 'str',
'version_format': 'str',
'vulnerabilities': 'list[InlineResponse2006Vulnerabilities]',
'findings': 'InlineResponse2006Findings'
}
attribute_map = {
'name': 'name',
'namespace_name': 'namespaceName',
'version': 'version',
'version_format': 'versionFormat',
'vulnerabilities': 'vulnerabilities',
'findings': 'findings'
}
def __init__(self, name=None, namespace_name=None, version=None, version_format=None, vulnerabilities=None, findings=None): # noqa: E501
"""InlineResponse2006Vulnerabilities1 - a model defined in Swagger""" # noqa: E501
self._name = None
self._namespace_name = None
self._version = None
self._version_format = None
self._vulnerabilities = None
self._findings = None
self.discriminator = None
self.name = name
self.namespace_name = namespace_name
self.version = version
self.version_format = version_format
self.vulnerabilities = vulnerabilities
if findings is not None:
self.findings = findings
@property
def name(self):
"""Gets the name of this InlineResponse2006Vulnerabilities1. # noqa: E501
The name of an installed package with known vulnerabilities. # noqa: E501
:return: The name of this InlineResponse2006Vulnerabilities1. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this InlineResponse2006Vulnerabilities1.
The name of an installed package with known vulnerabilities. # noqa: E501
:param name: The name of this InlineResponse2006Vulnerabilities1. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace_name(self):
"""Gets the namespace_name of this InlineResponse2006Vulnerabilities1. # noqa: E501
The namespace that the package `name` is unique within. # noqa: E501
:return: The namespace_name of this InlineResponse2006Vulnerabilities1. # noqa: E501
:rtype: str
"""
return self._namespace_name
@namespace_name.setter
def namespace_name(self, namespace_name):
"""Sets the namespace_name of this InlineResponse2006Vulnerabilities1.
The namespace that the package `name` is unique within. # noqa: E501
:param namespace_name: The namespace_name of this InlineResponse2006Vulnerabilities1. # noqa: E501
:type: str
"""
if namespace_name is None:
raise ValueError("Invalid value for `namespace_name`, must not be `None`") # noqa: E501
self._namespace_name = namespace_name
@property
def version(self):
"""Gets the version of this InlineResponse2006Vulnerabilities1. # noqa: E501
The package version that was detected. # noqa: E501
:return: The version of this InlineResponse2006Vulnerabilities1. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this InlineResponse2006Vulnerabilities1.
The package version that was detected. # noqa: E501
:param version: The version of this InlineResponse2006Vulnerabilities1. # noqa: E501
:type: str
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
@property
def version_format(self):
"""Gets the version_format of this InlineResponse2006Vulnerabilities1. # noqa: E501
The version format. # noqa: E501
:return: The version_format of this InlineResponse2006Vulnerabilities1. # noqa: E501
:rtype: str
"""
return self._version_format
@version_format.setter
def version_format(self, version_format):
"""Sets the version_format of this InlineResponse2006Vulnerabilities1.
The version format. # noqa: E501
:param version_format: The version_format of this InlineResponse2006Vulnerabilities1. # noqa: E501
:type: str
"""
if version_format is None:
raise ValueError("Invalid value for `version_format`, must not be `None`") # noqa: E501
self._version_format = version_format
@property
def vulnerabilities(self):
"""Gets the vulnerabilities of this InlineResponse2006Vulnerabilities1. # noqa: E501
A list of the known vulnerabilities in this package. # noqa: E501
:return: The vulnerabilities of this InlineResponse2006Vulnerabilities1. # noqa: E501
:rtype: list[InlineResponse2006Vulnerabilities]
"""
return self._vulnerabilities
@vulnerabilities.setter
def vulnerabilities(self, vulnerabilities):
"""Sets the vulnerabilities of this InlineResponse2006Vulnerabilities1.
A list of the known vulnerabilities in this package. # noqa: E501
:param vulnerabilities: The vulnerabilities of this InlineResponse2006Vulnerabilities1. # noqa: E501
:type: list[InlineResponse2006Vulnerabilities]
"""
if vulnerabilities is None:
raise ValueError("Invalid value for `vulnerabilities`, must not be `None`") # noqa: E501
self._vulnerabilities = vulnerabilities
@property
def findings(self):
"""Gets the findings of this InlineResponse2006Vulnerabilities1. # noqa: E501
:return: The findings of this InlineResponse2006Vulnerabilities1. # noqa: E501
:rtype: InlineResponse2006Findings
"""
return self._findings
@findings.setter
def findings(self, findings):
"""Sets the findings of this InlineResponse2006Vulnerabilities1.
:param findings: The findings of this InlineResponse2006Vulnerabilities1. # noqa: E501
:type: InlineResponse2006Findings
"""
self._findings = findings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2006Vulnerabilities1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"brendanj@US-BRENDANJ-MAC.local"
] |
brendanj@US-BRENDANJ-MAC.local
|
85be99a2356a7e01b178c1fbee74d821dcbf0abc
|
23744596dc030474be496d270352e40275ad2b6b
|
/src/backtest/sma.py
|
4039a78d6318eaa741bc9614c910506cbc35af50
|
[] |
no_license
|
cloudinertia/coin_predict
|
0419116d2a1b6ed41a44fcc2bd24e3a1d197433b
|
469d659c0a0787d99a3f141c86562dd0a4bb2613
|
refs/heads/master
| 2021-08-28T21:45:57.360167
| 2017-12-13T07:00:17
| 2017-12-13T07:00:17
| 112,627,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
import bt
data = bt.get('aapl,msft,c,gs,ge', start='2010-01-01')
# calculate moving average DataFrame using pandas' rolling_mean
import pandas as pd
import matplotlib.pyplot as plt
# a rolling mean is a moving average, right?
sma = data.rolling(50).mean()
class SelectWhere(bt.Algo):
def __init__(self, signal):
self.signal = signal
def __call__(self, target):
# get signal on target.now
if target.now in self.signal.index:
sig = self.signal.ix[target.now]
# get indices where true as list
selected = list(sig.index[sig])
# save in temp - this will be used by the weighing algo
target.temp['selected'] = selected
# return True because we want to keep on moving down the stack
return True
s = bt.Strategy('above50sma', [SelectWhere(data > sma),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
# now we create the Backtest
t = bt.Backtest(s, data)
# and let's run it!
res = bt.run(t)
res.plot()
plt.show()
|
[
"oyt311@hanmail.net"
] |
oyt311@hanmail.net
|
79e211c40926d23198a0a93e3a9b9837c2ac7d8c
|
1ee3306116be466e3fb58ff303798fbf4cdba73c
|
/blueprint/auth/__init__.py
|
fd41d24a3269969a4d281242e28ec6fc00ba9c1c
|
[] |
no_license
|
SyamsulAlterra/project_rest
|
0a38db5e8d02b38af8707d8616001eea0cf64751
|
2a736c9221a5dde0263c6d16495063210e2518e9
|
refs/heads/master
| 2020-06-27T19:44:42.231879
| 2019-08-02T09:36:05
| 2019-08-02T09:36:05
| 200,033,279
| 0
| 0
| null | 2019-08-02T09:36:06
| 2019-08-01T10:53:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
from flask import Blueprint
from flask_restful import Api, Resource, reqparse, marshal
from flask_jwt_extended import create_access_token, get_jwt_claims, jwt_required
from blueprint import db
from ..user.model import User
bp_auth = Blueprint('auth',__name__)
api = Api(bp_auth)
class CreateTokenResource(Resource):
def __init__(self):
pass
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('id', location='args', type=int, nullable=False)
args = parser.parse_args()
qry = User.query.get(args['id'])
if qry is not None:
qry_dict = marshal(qry, User.response_fields)
token = create_access_token(identity = qry_dict['nama'],
user_claims={
'id': qry_dict['id'],
'status': qry_dict['status'],
}
)
return {'token': token}, 200, {'Content-Type': 'application/json'}
else:
return {'message': 'there is no such query'}, 404, {'Content-Type': 'application/json'}
@jwt_required
def get(self):
claims = get_jwt_claims()
return claims, 200, {'Content-Type': 'application/json'}
api.add_resource(CreateTokenResource, '/login')
|
[
"syamsul@alterra.id"
] |
syamsul@alterra.id
|
746407a2bf3f917a2a9651be71bdb6a32dcc8b13
|
1fd8e5db25f8ebc7cc4506cbb07ba98f717b667e
|
/virt/Lib/site-packages/nltk/internals.py
|
ba92adb168562ff46e0b970aaa0d1dce5ae45659
|
[] |
no_license
|
flatplanet/Intro-To-TKinter-Youtube-Course
|
6103410435fc3b977fb44a4b08d045950ba10380
|
cf988099fc358e52ed773273cb2e7ddb9d37d995
|
refs/heads/master
| 2022-10-06T10:02:38.689302
| 2022-07-18T18:11:12
| 2022-07-18T18:11:12
| 174,183,345
| 524
| 426
| null | 2021-10-10T16:16:44
| 2019-03-06T16:44:03
|
Python
|
UTF-8
|
Python
| false
| false
| 38,644
|
py
|
# Natural Language Toolkit: Internal utility functions
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# Nitin Madnani <nmadnani@ets.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import fnmatch
import locale
import os
import re
import stat
import subprocess
import sys
import textwrap
import types
import warnings
from xml.etree import ElementTree
##########################################################################
# Java Via Command-Line
##########################################################################
_java_bin = None
_java_options = []
# [xx] add classpath option to config_java?
def config_java(bin=None, options=None, verbose=False):
"""
Configure nltk's java interface, by letting nltk know where it can
find the Java binary, and what extra options (if any) should be
passed to Java when it is run.
:param bin: The full path to the Java binary. If not specified,
then nltk will search the system for a Java binary; and if
one is not found, it will raise a ``LookupError`` exception.
:type bin: str
:param options: A list of options that should be passed to the
Java binary when it is called. A common value is
``'-Xmx512m'``, which tells Java binary to increase
the maximum heap size to 512 megabytes. If no options are
specified, then do not modify the options list.
:type options: list(str)
"""
global _java_bin, _java_options
_java_bin = find_binary(
"java",
bin,
env_vars=["JAVAHOME", "JAVA_HOME"],
verbose=verbose,
binary_names=["java.exe"],
)
if options is not None:
if isinstance(options, str):
options = options.split()
_java_options = list(options)
def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True):
"""
Execute the given java command, by opening a subprocess that calls
Java. If java has not yet been configured, it will be configured
by calling ``config_java()`` with no arguments.
:param cmd: The java command that should be called, formatted as
a list of strings. Typically, the first string will be the name
of the java class; and the remaining strings will be arguments
for that java class.
:type cmd: list(str)
:param classpath: A ``':'`` separated list of directories, JAR
archives, and ZIP archives to search for class files.
:type classpath: str
:param stdin, stdout, stderr: Specify the executed programs'
standard input, standard output and standard error file
handles, respectively. Valid values are ``subprocess.PIPE``,
an existing file descriptor (a positive integer), an existing
file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a
new pipe to the child should be created. With None, no
redirection will occur; the child's file handles will be
inherited from the parent. Additionally, stderr can be
``subprocess.STDOUT``, which indicates that the stderr data
from the applications should be captured into the same file
handle as for stdout.
:param blocking: If ``false``, then return immediately after
spawning the subprocess. In this case, the return value is
the ``Popen`` object, and not a ``(stdout, stderr)`` tuple.
:return: If ``blocking=True``, then return a tuple ``(stdout,
stderr)``, containing the stdout and stderr outputs generated
by the java command if the ``stdout`` and ``stderr`` parameters
were set to ``subprocess.PIPE``; or None otherwise. If
``blocking=False``, then return a ``subprocess.Popen`` object.
:raise OSError: If the java command returns a nonzero return code.
"""
subprocess_output_dict = {
"pipe": subprocess.PIPE,
"stdout": subprocess.STDOUT,
"devnull": subprocess.DEVNULL,
}
stdin = subprocess_output_dict.get(stdin, stdin)
stdout = subprocess_output_dict.get(stdout, stdout)
stderr = subprocess_output_dict.get(stderr, stderr)
if isinstance(cmd, str):
raise TypeError("cmd should be a list of strings")
# Make sure we know where a java binary is.
if _java_bin is None:
config_java()
# Set up the classpath.
if isinstance(classpath, str):
classpaths = [classpath]
else:
classpaths = list(classpath)
classpath = os.path.pathsep.join(classpaths)
# Construct the full command string.
cmd = list(cmd)
cmd = ["-cp", classpath] + cmd
cmd = [_java_bin] + _java_options + cmd
# Call java via a subprocess
p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
if not blocking:
return p
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print(_decode_stdoutdata(stderr))
raise OSError("Java command failed : " + str(cmd))
return (stdout, stderr)
if 0:
# config_java(options='-Xmx512m')
# Write:
# java('weka.classifiers.bayes.NaiveBayes',
# ['-d', '/tmp/names.model', '-t', '/tmp/train.arff'],
# classpath='/Users/edloper/Desktop/weka/weka.jar')
# Read:
(a, b) = java(
[
"weka.classifiers.bayes.NaiveBayes",
"-l",
"/tmp/names.model",
"-T",
"/tmp/test.arff",
"-p",
"0",
], # , '-distribution'],
classpath="/Users/edloper/Desktop/weka/weka.jar",
)
######################################################################
# Parsing
######################################################################
class ReadError(ValueError):
"""
Exception raised by read_* functions when they fail.
:param position: The index in the input string where an error occurred.
:param expected: What was expected when an error occurred.
"""
def __init__(self, expected, position):
ValueError.__init__(self, expected, position)
self.expected = expected
self.position = position
def __str__(self):
return f"Expected {self.expected} at {self.position}"
_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
def read_str(s, start_position):
"""
If a Python string literal begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the string literal and the position where
it ends. Otherwise, raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python string literal exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched string literal evaluated as a
string and the end position of the string literal.
:rtype: tuple(str, int)
:raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a
match in ``s`` at ``start_position``, i.e., open quote. If the
``_STRING_END_RE`` regex doesn't return a match in ``s`` at the
end of the first match, i.e., close quote.
:raise ValueError: If an invalid string (i.e., contains an invalid
escape sequence) is passed into the ``eval``.
:Example:
>>> from nltk.internals import read_str
>>> read_str('"Hello", World!', 0)
('Hello', 7)
"""
# Read the open quote, and any modifiers.
m = _STRING_START_RE.match(s, start_position)
if not m:
raise ReadError("open quote", start_position)
quotemark = m.group(1)
# Find the close quote.
_STRING_END_RE = re.compile(r"\\|%s" % quotemark)
position = m.end()
while True:
match = _STRING_END_RE.search(s, position)
if not match:
raise ReadError("close quote", position)
if match.group(0) == "\\":
position = match.end() + 1
else:
break
# Process it, using eval. Strings with invalid escape sequences
# might raise ValueError.
try:
return eval(s[start_position : match.end()]), match.end()
except ValueError as e:
raise ReadError("valid escape sequence", start_position) from e
_READ_INT_RE = re.compile(r"-?\d+")
def read_int(s, start_position):
"""
If an integer begins at the specified position in the given
string, then return a tuple ``(val, end_position)`` containing the
value of the integer and the position where it ends. Otherwise,
raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python integer exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched integer casted to an int,
and the end position of the int in ``s``.
:rtype: tuple(int, int)
:raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a
match in ``s`` at ``start_position``.
:Example:
>>> from nltk.internals import read_int
>>> read_int('42 is the answer', 0)
(42, 2)
"""
m = _READ_INT_RE.match(s, start_position)
if not m:
raise ReadError("integer", start_position)
return int(m.group()), m.end()
_READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?")
def read_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the number and the position where it ends.
Otherwise, raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python number exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched number casted to a ``float``,
and the end position of the number in ``s``.
:rtype: tuple(float, int)
:raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a
match in ``s`` at ``start_position``.
:Example:
>>> from nltk.internals import read_number
>>> read_number('Pi is 3.14159', 6)
(3.14159, 13)
"""
m = _READ_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ReadError("number", start_position)
if m.group(2):
return float(m.group()), m.end()
else:
return int(m.group()), m.end()
######################################################################
# Check if a method has been overridden
######################################################################
def overridden(method):
"""
:return: True if ``method`` overrides some method with the same
name in a base class. This is typically used when defining
abstract base classes or interfaces, to allow subclasses to define
either of two related methods:
>>> class EaterI:
... '''Subclass must define eat() or batch_eat().'''
... def eat(self, food):
... if overridden(self.batch_eat):
... return self.batch_eat([food])[0]
... else:
... raise NotImplementedError()
... def batch_eat(self, foods):
... return [self.eat(food) for food in foods]
:type method: instance method
"""
if isinstance(method, types.MethodType) and method.__self__.__class__ is not None:
name = method.__name__
funcs = [
cls.__dict__[name]
for cls in _mro(method.__self__.__class__)
if name in cls.__dict__
]
return len(funcs) > 1
else:
raise TypeError("Expected an instance method.")
def _mro(cls):
"""
Return the method resolution order for ``cls`` -- i.e., a list
containing ``cls`` and all its base classes, in the order in which
they would be checked by ``getattr``. For new-style classes, this
is just cls.__mro__. For classic classes, this can be obtained by
a depth-first left-to-right traversal of ``__bases__``.
"""
if isinstance(cls, type):
return cls.__mro__
else:
mro = [cls]
for base in cls.__bases__:
mro.extend(_mro(base))
return mro
######################################################################
# Deprecation decorator & base class
######################################################################
# [xx] dedent msg first if it comes from a docstring.
def _add_epytext_field(obj, field, message):
"""Add an epytext @field to a given object's docstring."""
indent = ""
# If we already have a docstring, then add a blank line to separate
# it from the new field, and check its indentation.
if obj.__doc__:
obj.__doc__ = obj.__doc__.rstrip() + "\n\n"
indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs())
if indents:
indent = min(indents)
# If we don't have a docstring, add an empty one.
else:
obj.__doc__ = ""
obj.__doc__ += textwrap.fill(
f"@{field}: {message}",
initial_indent=indent,
subsequent_indent=indent + " ",
)
def deprecated(message):
"""
A decorator used to mark functions as deprecated. This will cause
a warning to be printed the when the function is used. Usage:
>>> from nltk.internals import deprecated
>>> @deprecated('Use foo() instead')
... def bar(x):
... print(x/10)
"""
def decorator(func):
msg = f"Function {func.__name__}() has been deprecated. {message}"
msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
def newFunc(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# Copy the old function's name, docstring, & dict
newFunc.__dict__.update(func.__dict__)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__deprecated__ = True
# Add a @deprecated field to the docstring.
_add_epytext_field(newFunc, "deprecated", message)
return newFunc
return decorator
class Deprecated:
"""
A base class used to mark deprecated classes. A typical usage is to
alert users that the name of a class has changed:
>>> from nltk.internals import Deprecated
>>> class NewClassName:
... pass # All logic goes here.
...
>>> class OldClassName(Deprecated, NewClassName):
... "Use NewClassName instead."
The docstring of the deprecated class will be used in the
deprecation warning message.
"""
def __new__(cls, *args, **kwargs):
# Figure out which class is the deprecated one.
dep_cls = None
for base in _mro(cls):
if Deprecated in base.__bases__:
dep_cls = base
break
assert dep_cls, "Unable to determine which base is deprecated."
# Construct an appropriate warning.
doc = dep_cls.__doc__ or "".strip()
# If there's a @deprecated field, strip off the field marker.
doc = re.sub(r"\A\s*@deprecated:", r"", doc)
# Strip off any indentation.
doc = re.sub(r"(?m)^\s*", "", doc)
# Construct a 'name' string.
name = "Class %s" % dep_cls.__name__
if cls != dep_cls:
name += " (base class for %s)" % cls.__name__
# Put it all together.
msg = f"{name} has been deprecated. {doc}"
# Wrap it.
msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
# Do the actual work of __new__.
return object.__new__(cls)
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
##########################################################################
# Search for files/binaries
##########################################################################
def find_file_iter(
filename,
env_vars=(),
searchpath=(),
file_names=None,
url=None,
verbose=False,
finding_dir=False,
):
"""
Search for a file to be used by nltk.
:param filename: The name or path of the file.
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
file_names = [filename] + (file_names or [])
assert isinstance(filename, str)
assert not isinstance(file_names, str)
assert not isinstance(searchpath, str)
if isinstance(env_vars, str):
env_vars = env_vars.split()
yielded = False
# File exists, no magic
for alternative in file_names:
path_to_file = os.path.join(filename, alternative)
if os.path.isfile(path_to_file):
if verbose:
print(f"[Found {filename}: {path_to_file}]")
yielded = True
yield path_to_file
# Check the bare alternatives
if os.path.isfile(alternative):
if verbose:
print(f"[Found {filename}: {alternative}]")
yielded = True
yield alternative
# Check if the alternative is inside a 'file' directory
path_to_file = os.path.join(filename, "file", alternative)
if os.path.isfile(path_to_file):
if verbose:
print(f"[Found {filename}: {path_to_file}]")
yielded = True
yield path_to_file
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
if finding_dir: # This is to file a directory instead of file
yielded = True
yield os.environ[env_var]
for env_dir in os.environ[env_var].split(os.pathsep):
# Check if the environment variable contains a direct path to the bin
if os.path.isfile(env_dir):
if verbose:
print(f"[Found {filename}: {env_dir}]")
yielded = True
yield env_dir
# Check if the possible bin names exist inside the environment variable directories
for alternative in file_names:
path_to_file = os.path.join(env_dir, alternative)
if os.path.isfile(path_to_file):
if verbose:
print(f"[Found {filename}: {path_to_file}]")
yielded = True
yield path_to_file
# Check if the alternative is inside a 'file' directory
# path_to_file = os.path.join(env_dir, 'file', alternative)
# Check if the alternative is inside a 'bin' directory
path_to_file = os.path.join(env_dir, "bin", alternative)
if os.path.isfile(path_to_file):
if verbose:
print(f"[Found {filename}: {path_to_file}]")
yielded = True
yield path_to_file
# Check the path list.
for directory in searchpath:
for alternative in file_names:
path_to_file = os.path.join(directory, alternative)
if os.path.isfile(path_to_file):
yielded = True
yield path_to_file
# If we're on a POSIX system, then try using the 'which' command
# to find the file.
if os.name == "posix":
for alternative in file_names:
try:
p = subprocess.Popen(
["which", alternative],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
path = _decode_stdoutdata(stdout).strip()
if path.endswith(alternative) and os.path.exists(path):
if verbose:
print(f"[Found {filename}: {path}]")
yielded = True
yield path
except (KeyboardInterrupt, SystemExit, OSError):
raise
finally:
pass
if not yielded:
msg = (
"NLTK was unable to find the %s file!"
"\nUse software specific "
"configuration parameters" % filename
)
if env_vars:
msg += " or set the %s environment variable" % env_vars[0]
msg += "."
if searchpath:
msg += "\n\n Searched in:"
msg += "".join("\n - %s" % d for d in searchpath)
if url:
msg += f"\n\n For more information on {filename}, see:\n <{url}>"
div = "=" * 75
raise LookupError(f"\n\n{div}\n{msg}\n{div}")
def find_file(
filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
):
return next(
find_file_iter(filename, env_vars, searchpath, file_names, url, verbose)
)
def find_dir(
filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
):
return next(
find_file_iter(
filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True
)
)
def find_binary_iter(
name,
path_to_bin=None,
env_vars=(),
searchpath=(),
binary_names=None,
url=None,
verbose=False,
):
"""
Search for a file to be used by nltk.
:param name: The name or path of the file.
:param path_to_bin: The user-supplied binary location (deprecated)
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
yield from find_file_iter(
path_to_bin or name, env_vars, searchpath, binary_names, url, verbose
)
def find_binary(
name,
path_to_bin=None,
env_vars=(),
searchpath=(),
binary_names=None,
url=None,
verbose=False,
):
return next(
find_binary_iter(
name, path_to_bin, env_vars, searchpath, binary_names, url, verbose
)
)
def find_jar_iter(
name_pattern,
path_to_jar=None,
env_vars=(),
searchpath=(),
url=None,
verbose=False,
is_regex=False,
):
"""
Search for a jar that is used by nltk.
:param name_pattern: The name of the jar file
:param path_to_jar: The user-supplied jar location, or None.
:param env_vars: A list of environment variable names to check
in addition to the CLASSPATH variable which is
checked by default.
:param searchpath: List of directories to search.
:param is_regex: Whether name is a regular expression.
"""
assert isinstance(name_pattern, str)
assert not isinstance(searchpath, str)
if isinstance(env_vars, str):
env_vars = env_vars.split()
yielded = False
# Make sure we check the CLASSPATH first
env_vars = ["CLASSPATH"] + list(env_vars)
# If an explicit location was given, then check it, and yield it if
# it's present; otherwise, complain.
if path_to_jar is not None:
if os.path.isfile(path_to_jar):
yielded = True
yield path_to_jar
else:
raise LookupError(
f"Could not find {name_pattern} jar file at {path_to_jar}"
)
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
if env_var == "CLASSPATH":
classpath = os.environ["CLASSPATH"]
for cp in classpath.split(os.path.pathsep):
cp = os.path.expanduser(cp)
if os.path.isfile(cp):
filename = os.path.basename(cp)
if (
is_regex
and re.match(name_pattern, filename)
or (not is_regex and filename == name_pattern)
):
if verbose:
print(f"[Found {name_pattern}: {cp}]")
yielded = True
yield cp
# The case where user put directory containing the jar file in the classpath
if os.path.isdir(cp):
if not is_regex:
if os.path.isfile(os.path.join(cp, name_pattern)):
if verbose:
print(f"[Found {name_pattern}: {cp}]")
yielded = True
yield os.path.join(cp, name_pattern)
else:
# Look for file using regular expression
for file_name in os.listdir(cp):
if re.match(name_pattern, file_name):
if verbose:
print(
"[Found %s: %s]"
% (
name_pattern,
os.path.join(cp, file_name),
)
)
yielded = True
yield os.path.join(cp, file_name)
else:
jar_env = os.path.expanduser(os.environ[env_var])
jar_iter = (
(
os.path.join(jar_env, path_to_jar)
for path_to_jar in os.listdir(jar_env)
)
if os.path.isdir(jar_env)
else (jar_env,)
)
for path_to_jar in jar_iter:
if os.path.isfile(path_to_jar):
filename = os.path.basename(path_to_jar)
if (
is_regex
and re.match(name_pattern, filename)
or (not is_regex and filename == name_pattern)
):
if verbose:
print(f"[Found {name_pattern}: {path_to_jar}]")
yielded = True
yield path_to_jar
# Check the path list.
for directory in searchpath:
if is_regex:
for filename in os.listdir(directory):
path_to_jar = os.path.join(directory, filename)
if os.path.isfile(path_to_jar):
if re.match(name_pattern, filename):
if verbose:
print(f"[Found {filename}: {path_to_jar}]")
yielded = True
yield path_to_jar
else:
path_to_jar = os.path.join(directory, name_pattern)
if os.path.isfile(path_to_jar):
if verbose:
print(f"[Found {name_pattern}: {path_to_jar}]")
yielded = True
yield path_to_jar
if not yielded:
# If nothing was found, raise an error
msg = "NLTK was unable to find %s!" % name_pattern
if env_vars:
msg += " Set the %s environment variable" % env_vars[0]
msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ")
if searchpath:
msg += "\n\n Searched in:"
msg += "".join("\n - %s" % d for d in searchpath)
if url:
msg += "\n\n For more information, on {}, see:\n <{}>".format(
name_pattern,
url,
)
div = "=" * 75
raise LookupError(f"\n\n{div}\n{msg}\n{div}")
def find_jar(
name_pattern,
path_to_jar=None,
env_vars=(),
searchpath=(),
url=None,
verbose=False,
is_regex=False,
):
return next(
find_jar_iter(
name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex
)
)
def find_jars_within_path(path_to_jars):
return [
os.path.join(root, filename)
for root, dirnames, filenames in os.walk(path_to_jars)
for filename in fnmatch.filter(filenames, "*.jar")
]
def _decode_stdoutdata(stdoutdata):
"""Convert data read from stdout/stderr to unicode"""
if not isinstance(stdoutdata, bytes):
return stdoutdata
encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding())
if encoding is None:
return stdoutdata.decode()
return stdoutdata.decode(encoding)
##########################################################################
# Import Stdlib Module
##########################################################################
def import_from_stdlib(module):
"""
When python is run from within the nltk/ directory tree, the
current directory is included at the beginning of the search path.
Unfortunately, that means that modules within nltk can sometimes
shadow standard library modules. As an example, the stdlib
'inspect' module will attempt to import the stdlib 'tokenize'
module, but will instead end up importing NLTK's 'tokenize' module
instead (causing the import to fail).
"""
old_path = sys.path
sys.path = [d for d in sys.path if d not in ("", ".")]
m = __import__(module)
sys.path = old_path
return m
##########################################################################
# Wrapper for ElementTree Elements
##########################################################################
class ElementWrapper:
"""
A wrapper around ElementTree Element objects whose main purpose is
to provide nicer __repr__ and __str__ methods. In addition, any
of the wrapped Element's methods that return other Element objects
are overridden to wrap those values before returning them.
This makes Elements more convenient to work with in
interactive sessions and doctests, at the expense of some
efficiency.
"""
# Prevent double-wrapping:
def __new__(cls, etree):
"""
Create and return a wrapper around a given Element object.
If ``etree`` is an ``ElementWrapper``, then ``etree`` is
returned as-is.
"""
if isinstance(etree, ElementWrapper):
return etree
else:
return object.__new__(ElementWrapper)
def __init__(self, etree):
r"""
Initialize a new Element wrapper for ``etree``.
If ``etree`` is a string, then it will be converted to an
Element object using ``ElementTree.fromstring()`` first:
>>> ElementWrapper("<test></test>")
<Element "<?xml version='1.0' encoding='utf8'?>\n<test />">
"""
if isinstance(etree, str):
etree = ElementTree.fromstring(etree)
self.__dict__["_etree"] = etree
def unwrap(self):
"""
Return the Element object wrapped by this wrapper.
"""
return self._etree
##////////////////////////////////////////////////////////////
# { String Representation
##////////////////////////////////////////////////////////////
def __repr__(self):
s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8")
if len(s) > 60:
e = s.rfind("<")
if (len(s) - e) > 30:
e = -20
s = f"{s[:30]}...{s[e:]}"
return "<Element %r>" % s
def __str__(self):
"""
:return: the result of applying ``ElementTree.tostring()`` to
the wrapped Element object.
"""
return (
ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip()
)
##////////////////////////////////////////////////////////////
# { Element interface Delegation (pass-through)
##////////////////////////////////////////////////////////////
def __getattr__(self, attrib):
return getattr(self._etree, attrib)
def __setattr__(self, attr, value):
return setattr(self._etree, attr, value)
def __delattr__(self, attr):
return delattr(self._etree, attr)
def __setitem__(self, index, element):
self._etree[index] = element
def __delitem__(self, index):
del self._etree[index]
def __setslice__(self, start, stop, elements):
self._etree[start:stop] = elements
def __delslice__(self, start, stop):
del self._etree[start:stop]
def __len__(self):
return len(self._etree)
##////////////////////////////////////////////////////////////
# { Element interface Delegation (wrap result)
##////////////////////////////////////////////////////////////
def __getitem__(self, index):
return ElementWrapper(self._etree[index])
def __getslice__(self, start, stop):
return [ElementWrapper(elt) for elt in self._etree[start:stop]]
def getchildren(self):
return [ElementWrapper(elt) for elt in self._etree]
def getiterator(self, tag=None):
return (ElementWrapper(elt) for elt in self._etree.getiterator(tag))
def makeelement(self, tag, attrib):
return ElementWrapper(self._etree.makeelement(tag, attrib))
def find(self, path):
elt = self._etree.find(path)
if elt is None:
return elt
else:
return ElementWrapper(elt)
def findall(self, path):
return [ElementWrapper(elt) for elt in self._etree.findall(path)]
######################################################################
# Helper for Handling Slicing
######################################################################
def slice_bounds(sequence, slice_obj, allow_step=False):
"""
Given a slice, return the corresponding (start, stop) bounds,
taking into account None indices and negative indices. The
following guarantees are made for the returned start and stop values:
- 0 <= start <= len(sequence)
- 0 <= stop <= len(sequence)
- start <= stop
:raise ValueError: If ``slice_obj.step`` is not None.
:param allow_step: If true, then the slice object may have a
non-None step. If it does, then return a tuple
(start, stop, step).
"""
start, stop = (slice_obj.start, slice_obj.stop)
# If allow_step is true, then include the step in our return
# value tuple.
if allow_step:
step = slice_obj.step
if step is None:
step = 1
# Use a recursive call without allow_step to find the slice
# bounds. If step is negative, then the roles of start and
# stop (in terms of default values, etc), are swapped.
if step < 0:
start, stop = slice_bounds(sequence, slice(stop, start))
else:
start, stop = slice_bounds(sequence, slice(start, stop))
return start, stop, step
# Otherwise, make sure that no non-default step value is used.
elif slice_obj.step not in (None, 1):
raise ValueError(
"slices with steps are not supported by %s" % sequence.__class__.__name__
)
# Supply default offsets.
if start is None:
start = 0
if stop is None:
stop = len(sequence)
# Handle negative indices.
if start < 0:
start = max(0, len(sequence) + start)
if stop < 0:
stop = max(0, len(sequence) + stop)
# Make sure stop doesn't go past the end of the list. Note that
# we avoid calculating len(sequence) if possible, because for lazy
# sequences, calculating the length of a sequence can be expensive.
if stop > 0:
try:
sequence[stop - 1]
except IndexError:
stop = len(sequence)
# Make sure start isn't past stop.
start = min(start, stop)
# That's all folks!
return start, stop
######################################################################
# Permission Checking
######################################################################
def is_writable(path):
# Ensure that it exists.
if not os.path.exists(path):
return False
# If we're on a posix system, check its permissions.
if hasattr(os, "getuid"):
statdata = os.stat(path)
perm = stat.S_IMODE(statdata.st_mode)
# is it world-writable?
if perm & 0o002:
return True
# do we own it?
elif statdata.st_uid == os.getuid() and (perm & 0o200):
return True
# are we in a group that can write to it?
elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020):
return True
# otherwise, we can't write to it.
else:
return False
# Otherwise, we'll assume it's writable.
# [xx] should we do other checks on other platforms?
return True
######################################################################
# NLTK Error reporting
######################################################################
def raise_unorderable_types(ordering, a, b):
raise TypeError(
"unorderable types: %s() %s %s()"
% (type(a).__name__, ordering, type(b).__name__)
)
|
[
"johne4196@gmail.com"
] |
johne4196@gmail.com
|
7ec4d63dbc282d95ec7d4a10684ab866034ea6ff
|
acc303fc40aeb6bbdf42f8888d3350fba0db6651
|
/src/train.py
|
2871beaab7628c646b1b75fd1d14014adb92f7e3
|
[] |
no_license
|
Plutokekz/Selenium
|
89449039933553bfd3ea5466f19b5d6061d86e49
|
7c83a1744e16e0a4f64521d503b946a54407e749
|
refs/heads/master
| 2022-03-11T19:08:49.025378
| 2019-11-23T16:07:58
| 2019-11-23T16:07:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,799
|
py
|
#!/usr/bin/env python3
# Author: omgimanerd (Alvin Lin)
#
# Executable CLI to run the genetic algorithm.
# Parameterizable via command line options, invoke with the -h flag.
from lib.field import Field
from lib.genetic_algorithm.population import Population
from lib.genetic_algorithm.chromosome import Chromosome
import argparse
import pickle
def main():
parser = argparse.ArgumentParser(description='Runs genetic algorithm.')
parser.add_argument('outfile', type=argparse.FileType('wb'))
parser.add_argument('--seed', type=argparse.FileType('rb'))
parser.add_argument('--generations', type=int, default=25)
parser.add_argument('--population_size', type=int, default=16)
parser.add_argument('--n_simulations', type=int,
default=Chromosome.N_SIMULATIONS)
parser.add_argument('--max_simulation_length', type=int,
default=Chromosome.MAX_SIMULATION_LENGTH)
parser.add_argument('--mutation_chance', type=float,
default=Chromosome.MUTATION_CHANCE)
args = parser.parse_args()
genes = Chromosome.random_genes()
if args.seed:
with args.seed as seed:
chromosome = pickle.load(seed)
genes = chromosome.genes
Chromosome.set_globals(args.n_simulations,
args.max_simulation_length,
args.mutation_chance)
population = Population([
Chromosome(genes) for i in range(args.population_size)])
population.run(args.generations)
fittest = population.get_fittest_member()
with args.outfile as outfile:
pickle.dump(fittest, outfile)
print('Fittest member: {}'.format(fittest))
print('Result dumped to {}'.format(outfile))
if __name__ == '__main__':
main()
|
[
"LukasMahr@gmx.de"
] |
LukasMahr@gmx.de
|
e77973612e692fd3b030293e674a0b9778f66a1c
|
623966abb5c12b65d9d138b8861bbdaf423ea029
|
/task/models.py
|
280f004477f9ace46d5d6bf54a5d0a8943478600
|
[] |
no_license
|
DattuBurle/fsf_2019_screening_task1
|
d286b43094ee115155796611339b7cccedf0ce3f
|
9fce3768763f91c74e4467f5b03d56b7782c5402
|
refs/heads/master
| 2020-05-03T16:15:17.449703
| 2019-03-31T17:24:22
| 2019-03-31T17:24:22
| 178,719,111
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.db.models.signals import pre_save
from django.utils.text import slugify
from usercreation.models import Teamcreation
class Taskcreation(models.Model):
taskname=models.CharField(max_length=20,blank=False)
taskleader=models.BooleanField(default=False)
taskuser=models.ForeignKey(User,on_delete=models.SET_NULL,null=True,related_name='taskuser')
taskteamname=models.ForeignKey(Teamcreation,on_delete=models.SET_NULL,null=True,related_name='teamtaskname')
taskslug=models.SlugField(blank=True)
taskdescription=models.TextField(blank=True)
taskassignee=models.CharField(blank=True,max_length=255)
taskstatus=models.CharField(default=None,max_length=40)
def __str__(self):
return self.taskteamname
def create_taskslug(instance,new_slug=None):
taskslug=slugify(instance.taskname)
if new_slug is not None:
taskslug=new_slug
queryset=Taskcreation.objects.filter(taskslug=taskslug).order_by('id')
exists=queryset.exists()
if exists:
new_slug='%s-%s' %(taskslug,queryset.first().id)
return create_taskslug(instance,new_slug=new_slug)
return taskslug
def pre_save_taskdetails_receiver(sender,instance,*args,**kwargs):
if not instance.taskslug:
instance.taskslug=create_taskslug(instance)
pre_save.connect(pre_save_taskdetails_receiver,sender=Taskcreation)
class Comment(models.Model):
commentuser=models.ForeignKey(User,on_delete=models.SET_NULL,null=True,related_name='commentuser')
commentteamname=models.ForeignKey(Taskcreation,on_delete=models.SET_NULL,null=True,related_name='commentteamname')
comment_text=models.TextField(blank=True)
|
[
"noreply@github.com"
] |
DattuBurle.noreply@github.com
|
0e5fac1343217ff4e255358ad6836ca3b5d6d144
|
ad10cd9aecb8e1b277996484a35f54544a3f56c4
|
/New folder (3)/programs/Leap.py
|
2ac7d0b521e18be3130c6142408cb77447070acb
|
[] |
no_license
|
gokul05021992/whole-projects
|
4a4264ec75e1878a3661145a63c99c2b4b7c8350
|
993a8b8db83266e434387730cc5f8b16e4a8b77e
|
refs/heads/master
| 2023-05-22T18:36:38.651925
| 2021-06-13T01:09:11
| 2021-06-13T01:09:11
| 376,460,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
print("leap year program")
A=int(input("enter the year"))
if (A%4)==0 and A%100!=0 or (A%400)== 0:
print("print is an leap year")
else:
print("it is not a leap")
|
[
"bgokul92@gmail.com"
] |
bgokul92@gmail.com
|
61c9b0a2513a642e0bc973a108e7dbfe96ca4190
|
04fd4078e0a2c4424616dca36fd3f9dfbf8a57d6
|
/settings.py
|
a891f124f0259cb175052a99dab30a8b65b81dc1
|
[] |
no_license
|
Vickyarts/Alien-Invasion
|
6d97359460b37600cf33246868dfd3550d637f4d
|
e71173547db1b86312741e8d12705ea142f50753
|
refs/heads/master
| 2022-12-25T11:45:59.356937
| 2020-09-30T10:51:39
| 2020-09-30T10:51:39
| 299,854,985
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
import json
files = 'save.json'
def load_res():
"""Loads the resolution from the save file"""
with open(files) as f:
resolution = json.load(f)
return resolution
class Settings:
"""This class stores all the settings"""
def __init__(self):
self.resolution = load_res()
self.screen_width = self.resolution[0]
self.screen_height = self.resolution[1]
self.bg_color = (0,0,0)
self.ship_limit = 3
#Bullet properties
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (255,215,0)
self.bullet_limit = 3
#Alien fleet fall speed
self.fleet_drop_speed = 10
#Increase 10% speed for leach level
self.speed_scale = 1.1
#Increase the points for each alien
self.score_scale = 1.5
self.saved_high_score = self.resolution[2]
self.initialize_level_sets()
def initialize_level_sets(self):
"""Set the initial speed and points"""
self.ship_speed = 1.5
self.bullet_speed = 3
#alien's speed
self.alien_speed = 0.5
#fleet direction 1 represent right, -1 left
self.fleet_direction = 1
self.alien_points = 50
def speed_up(self):
"""Increase the speed"""
self.ship_speed *= self.speed_scale
self.bullet_speed *= self.speed_scale
self.alien_speed *= self.speed_scale
self.alien_points = int(self.alien_points * self.score_scale)
|
[
"noreply@github.com"
] |
Vickyarts.noreply@github.com
|
02b665951ef992eba2d27a4644e0dbf98ef86bd0
|
7d2f933ed3c54e128ecaec3a771817c4260a8458
|
/venv/Lib/site-packages/pandas/tests/indexes/categorical/test_constructors.py
|
129203c1f9c7f81043d6bcdd4aa9435776f685d1
|
[] |
no_license
|
danielmoreira12/BAProject
|
c61dfb1d0521eb5a28eef9531a00e744bfb0e26a
|
859f588305d826a35cc8f7d64c432f54a0a2e031
|
refs/heads/master
| 2021-01-02T07:17:39.267278
| 2020-02-25T22:27:43
| 2020-02-25T22:27:43
| 239,541,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,822
|
py
|
import numpy as np
import pandas._testing as tm
import pytest
from pandas import Categorical, CategoricalDtype, CategoricalIndex, Index
class TestCategoricalIndexConstructors:
def test_construction(self):
ci = CategoricalIndex(list("aabbca"), categories=list("abcd"), ordered=False)
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
# empty
result = CategoricalIndex(categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype="int8"))
assert not result.ordered
# passing categories
result = CategoricalIndex(list("aabbca"), categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
c = Categorical(list("aabbca"))
result = CategoricalIndex(c)
tm.assert_index_equal(result.categories, Index(list("abc")))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
assert not result.ordered
result = CategoricalIndex(c, categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
assert not result.ordered
ci = CategoricalIndex(c, categories=list("abcd"))
result = CategoricalIndex(ci)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")
)
assert not result.ordered
result = CategoricalIndex(ci, categories=list("ab"))
tm.assert_index_equal(result.categories, Index(list("ab")))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8")
)
assert not result.ordered
result = CategoricalIndex(ci, categories=list("ab"), ordered=True)
tm.assert_index_equal(result.categories, Index(list("ab")))
tm.assert_numpy_array_equal(
result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8")
)
assert result.ordered
result = CategoricalIndex(ci, categories=list("ab"), ordered=True)
expected = CategoricalIndex(
ci, categories=list("ab"), ordered=True, dtype="category"
)
tm.assert_index_equal(result, expected, exact=True)
# turn me to an Index
result = Index(np.array(ci))
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
# specify dtype
ci = CategoricalIndex(list("aabbca"), categories=list("abc"), ordered=False)
result = Index(np.array(ci), dtype="category")
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype="category")
tm.assert_index_equal(result, ci, exact=True)
# these are generally only equal when the categories are reordered
ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
result = Index(np.array(ci), dtype="category").reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
# make sure indexes are handled
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2], ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_construction_empty_with_bool_categories(self):
# see GH#22702
cat = CategoricalIndex([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_construction_with_categorical_dtype(self):
# construction with CategoricalDtype
# GH#18109
data, cats, ordered = "a a b b".split(), "c b a".split(), True
dtype = CategoricalDtype(categories=cats, ordered=ordered)
result = CategoricalIndex(data, dtype=dtype)
expected = CategoricalIndex(data, categories=cats, ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
# GH#19032
result = Index(data, dtype=dtype)
tm.assert_index_equal(result, expected, exact=True)
# error when combining categories/ordered and dtype kwargs
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalIndex(data, categories=cats, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Index(data, categories=cats, dtype=dtype)
with pytest.raises(ValueError, match=msg):
CategoricalIndex(data, ordered=ordered, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Index(data, ordered=ordered, dtype=dtype)
def test_create_categorical(self):
# GH#17513 The public CI constructor doesn't hit this code path with
# instances of CategoricalIndex, but we still want to test the code
ci = CategoricalIndex(["a", "b", "c"])
# First ci is self, second ci is data.
result = CategoricalIndex._create_categorical(ci, ci)
expected = Categorical(["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
|
[
"danielmoreira12@github.com"
] |
danielmoreira12@github.com
|
1785e1f68c5a312cfcad5c9e9a38d082a0f42cc8
|
6cc8b485c7e07a21ec7d838f9baf777edebaac76
|
/seoman/utils/export_utils.py
|
7ab1187b9c1b0e5d9a5a3994949c8229f503e604
|
[
"Apache-2.0"
] |
permissive
|
rawen54/seoman
|
aff27fc33a62697abb9ac0f24cb443b836a01be8
|
2372b4b330104ef1829a4a1c54669963830f7805
|
refs/heads/main
| 2023-01-05T09:16:42.129254
| 2020-11-01T18:08:31
| 2020-11-01T18:08:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,528
|
py
|
import csv
import json
import sys
from collections import OrderedDict
from time import time
from typing import Any, Dict, List, Optional, Union
import typer # type: ignore
from halo import Halo # type: ignore
from pytablewriter import TsvTableWriter, UnicodeTableWriter # type: ignore
class Export:
def __init__(
self, data: Dict[Any, Any] = {}, keys: List[Any] = [], values: List[Any] = []
) -> None:
self.data = data
self.keys = keys
self.values = values
def _flatten(self, data: Dict[Any, Any], sep="_") -> OrderedDict:
obj = OrderedDict()
def recurse(temp, parent_key=""):
"""
Recursive iterator to reach everything inside data.
"""
if isinstance(temp, list):
for i in range(len(temp)):
recurse(
temp[i], parent_key + sep + str(i) if parent_key else str(i)
)
elif isinstance(temp, dict):
for key, value in temp.items():
recurse(value, parent_key + sep + key if parent_key else key)
else:
obj[parent_key] = temp
recurse(data)
return obj
def _split_to_kv(self, data: Dict[Any, Any]) -> None:
"""
Split data to key value pair.
"""
if (
data.keys().__contains__("path")
or data.keys().__contains__("permissionLevel")
or data.keys().__contains__("sitemap_0_path")
):
"""
If data contains path, it must be a sitemap instance.
If data contains permissionLevel, it must be sites instance.
Else a SearchAnalytics report.
"""
# TODO FIX SITEMAPS
for key, value in data.items():
if key.__contains__("_"):
key = key.split("_")
if key[0] == "sitemap":
if len(key) >= 4:
key = " ".join(
[key[2], key[4] if key[4] != "0" else "", key[3]]
)
else:
key = " ".join([key[2]])
else:
key = " ".join([key[0], key[2], key[1]])
if key not in self.keys:
self.keys.append(key)
self.values.append(value)
else:
for key, value in data.items():
key = key.split("_")
if key[0] == "responseAggregationType":
continue
try:
if key[-2] == "keys":
# If there are multiple keys, add key number to keys eg. [keys1, keys2]
key = key[-2] + key[-1]
except IndexError:
pass
key = key[-1] if isinstance(key, list) else key
if key not in self.keys:
self.keys.append(key)
self.values.append(value)
def __preprocess(self) -> None:
"""
Preprocess the data.
"""
self._split_to_kv(self._flatten(self.data))
def export_to_table(self) -> None:
"""
Export in Unicode Table format.
"""
self.__preprocess()
sub = len(self.keys)
writer = UnicodeTableWriter()
writer.table_name = "Analytics"
writer.margin = 2
writer.headers = self.keys
if sub >= 1:
writer.value_matrix = [
self.values[ctr : ctr + sub] for ctr in range(0, len(self.values), sub)
]
else:
typer.secho(
"An error occured please check your query.",
fg=typer.colors.RED,
bold=True,
)
sys.exit()
writer.write_table()
@Halo("Exporting to JSON", spinner="dots")
def export_to_json(self, filename: str) -> None:
"""
Export in JSON format.
"""
with open(filename, "w") as file:
json.dump(self.data, file, indent=4, ensure_ascii=False)
print(f"Analytics successfully created in JSON format ✅")
@Halo("Exporting to CSV", spinner="dots")
def export_to_csv(self, filename: str) -> None:
"""
Export in CSV format.
"""
self.__preprocess()
sub = len(self.keys)
from csv import writer
with open(filename, "w") as file:
csv_writer = writer(file)
csv_writer.writerow(self.keys)
for ctr in range(0, len(self.values), sub):
csv_writer.writerow(self.values[ctr : ctr + sub])
typer.secho(
"\nAnalytics successfully created in CSV format ✅", bold=True,
)
@Halo("Exporting to Excel", spinner="dots")
def export_to_excel(self, filename: str) -> None:
"""
Export in XLSX format.
"""
from pyexcelerate import Workbook # type: ignore
self.__preprocess()
sub = len(self.keys)
if sub >= 1:
data = [
self.values[ctr : ctr + sub] for ctr in range(0, len(self.values), sub)
]
else:
typer.secho(
"An error occured please check your query.",
fg=typer.colors.RED,
bold=True,
)
sys.exit()
data.insert(0, self.keys)
wb = Workbook()
ws = wb.new_sheet("Analytics", data=data)
wb.save(filename)
typer.secho(
"\nAnalytics successfully created in XLSX format ✅", bold=True,
)
@Halo("Exporting to TSV", spinner="dots")
def export_to_tsv(self, filename: str) -> None:
"""
Export in TSV format.
"""
self.__preprocess()
sub = len(self.keys)
writer = TsvTableWriter()
writer.headers = self.keys
if sub >= 1:
writer.value_matrix = [
self.values[ctr : ctr + sub] for ctr in range(0, len(self.values), sub)
]
else:
typer.secho(
"An error occured please check your query.",
fg=typer.colors.RED,
bold=True,
)
sys.exit()
writer.dump(filename)
typer.secho(
"\nAnalytics successfully created in TSV format ✅", bold=True,
)
|
[
"yagizcanilbey1903@gmail.com"
] |
yagizcanilbey1903@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.