blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a13069478ea5cffd0237a926fcfbac9d1099903
|
5de9668c5b01efb4a218589686fc0861d9e3e94c
|
/Business_Cards/migrations/0005_auto_20210224_1319.py
|
3db1d8e2cb654c314420d7d39742f25172c32f91
|
[] |
no_license
|
panda1909/beedeeprinting
|
cd3a9828577b9dbda7ebe7ddb3a64b6ad9c668dc
|
c3f696d6cbe3990014a8ff53036e69ec62f67f19
|
refs/heads/main
| 2023-03-24T15:19:45.333594
| 2021-03-03T16:42:46
| 2021-03-03T16:42:46
| 322,282,438
| 0
| 0
| null | 2021-03-18T23:08:44
| 2020-12-17T12:04:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
# Generated by Django 3.1 on 2021-02-24 13:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Business_Cards', '0004_auto_20210224_1315'),
]
operations = [
migrations.RemoveField(
model_name='edge_painted_business_cards_price',
name='Template',
),
migrations.RemoveField(
model_name='foil_business_cards_price',
name='Template',
),
migrations.RemoveField(
model_name='pantone_business_cards_price',
name='Template',
),
migrations.RemoveField(
model_name='plastic_business_cards_price',
name='Template',
),
migrations.RemoveField(
model_name='raised_ink_business_cards_price',
name='Template',
),
migrations.RemoveField(
model_name='raised_spot_uv_business_cards_price',
name='Template',
),
migrations.AddField(
model_name='edge_painted_business_cards_price',
name='Template1',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='foil_business_cards_price',
name='Template1',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='pantone_business_cards_price',
name='Template1',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='plastic_business_cards_price',
name='Template1',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='raised_ink_business_cards_price',
name='Template1',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='raised_spot_uv_business_cards_price',
name='Template1',
field=models.FileField(default='', upload_to=''),
),
]
|
[
"umerfarooqb97@gmail.com"
] |
umerfarooqb97@gmail.com
|
82b3a0104576ede4ec3257767503b467a431d6ae
|
c4cd066de1f47c4b1f056aca87dee4c259ec3d5f
|
/stars.py
|
0ecce4474f3a6544988f49fef1fc4e61b1a74fc9
|
[] |
no_license
|
reece-carrisosa/Python18
|
719863ab5f01d75f6d3c7a8a70c081cd81169f5c
|
a65e6239101d01d1444d2409763ac453cd80f416
|
refs/heads/master
| 2021-05-05T15:31:22.663212
| 2018-01-23T21:17:43
| 2018-01-23T21:17:43
| 117,284,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
######## Stars
b = ['Reece', 2, 'Sam', 7, 9]
def draw_stars(arr):
for x in arr:
if type(x) == int:
print "*"*x
else:
print x[0].lower()*len(x)
draw_stars(b)
|
[
"noreply@github.com"
] |
reece-carrisosa.noreply@github.com
|
5473c9ef6e54ff41b2dff4bc909d4d31209b4761
|
799e7ad7a71fe9c7f8707d60160fa2d84f2fa719
|
/core/migrations/0001_initial.py
|
55430b5fb0b04da04727fe01d8a8c946f57d21d7
|
[] |
no_license
|
TERESIA012/Netflix_Clone
|
517cf1d9cc11979d52e39b27ea7147b1a911d3b4
|
4785130ad52ca816bbdcc455cf27e1f6245b14fd
|
refs/heads/master
| 2023-08-19T10:04:28.850838
| 2021-10-13T23:25:47
| 2021-10-13T23:25:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,681
|
py
|
# Generated by Django 3.2.7 on 2021-10-13 21:42
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=225)),
('age_limit', models.CharField(choices=[('All', 'All'), ('Kids', 'Kids')], max_length=5)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=225, null=True)),
('file', models.FileField(upload_to='movies')),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=225)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('type', models.CharField(choices=[('single', 'Single'), ('seasonal', 'Seasonal')], max_length=10)),
('flyer', models.ImageField(blank=True, null=True, upload_to='flyers')),
('age_limit', models.CharField(blank=True, choices=[('All', 'All'), ('Kids', 'Kids')], max_length=5, null=True)),
('videos', models.ManyToManyField(to='core.Video')),
],
),
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('profiles', models.ManyToManyField(to='core.Profile')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"kingoriteresia@gmail.com"
] |
kingoriteresia@gmail.com
|
43137b2a59d80228815f1d64b9c6ec0c1b27ddb6
|
112882b8d6c5071e7d2610c595bfca9210c79a0a
|
/tools/leetcode.055.Jump Game/leetcode.055.Jump Game.submission8.py
|
4c234877c34cd25cb43dda9cbd6c32b54c52d785
|
[
"MIT"
] |
permissive
|
tedye/leetcode
|
193b1900d98e35d5c402013cbe3bc993d0235da2
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
refs/heads/master
| 2021-01-01T19:06:06.408135
| 2015-10-24T06:44:40
| 2015-10-24T06:44:40
| 41,804,923
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
class Solution:
# @param A, a list of integers
# @return a boolean
def canJump(self, A):
if not A: return False
if len(A) == 1: return True
pos = 0
l = len(A) - 2
maxCover = A[0]
pos = 0
while pos <= maxCover:
cover = pos + A[pos]
if cover > l:
return True
if cover > maxCover:
maxCover = cover
pos += 1
return False
|
[
"tedye@bu.edu"
] |
tedye@bu.edu
|
32329b04f4258b316e9d349705aad4bc6c362edc
|
d9115e216eadb68d9d8dae102a7d80390f54a609
|
/xuesheng_web/page/main.py
|
88818ae8f4cf1276c3ed79ace3b494665e5eca3c
|
[] |
no_license
|
Chengyuxiang12/chengyu
|
9e5760e767d48b1fe6e1c59d18545656dcb209fa
|
b6c8d976af56c9ec25a84013744ec42d886a1799
|
refs/heads/master
| 2022-11-24T19:21:57.607653
| 2020-08-06T00:06:54
| 2020-08-06T00:06:54
| 267,125,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from selenium.webdriver.common.by import By
from xuesheng_web.page.base_page import BasePage
class Main(BasePage):
_base_url = 'http://localhost:8090/'
def login(self, a, b):
self._params["a"] = a
self._params["b"] = b
self.steps("../page/login.yaml")
from xuesheng_web.page.index import Index
return Index(self._driver)
|
[
"1561933707@qq.com"
] |
1561933707@qq.com
|
8240eca8d3e73725e8d897c9f953dfc5f1ad8b63
|
b9c4633be31a8cd15b8fa3b8cf44256678c2bfc5
|
/lesereggs/wsgi.py
|
b46fb9fa92df339adc795fecb0862c31101c8243
|
[] |
no_license
|
tahirawan4/laseregg
|
a53d1508febbf736f924c2af179d3065b559d590
|
f39a10056d54769df974da7577f1f69668a9daac
|
refs/heads/master
| 2021-01-23T00:39:54.707114
| 2017-06-12T00:16:47
| 2017-06-12T00:16:47
| 92,830,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for lesereggs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lesereggs.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"tahir.fazal@arbisoft.com"
] |
tahir.fazal@arbisoft.com
|
038ede28994b5f958eb3156da9cfc4af849fc0b4
|
9f66c839f1c3f11dc9a56922d6f313d911a861c1
|
/python-client/cloudera/director/d6_1/users_api.py
|
74f2e732134e87a7c8e0a6e33f2e42436f6c1566
|
[
"EPL-1.0",
"Classpath-exception-2.0",
"LGPL-2.0-or-later",
"W3C",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"MPL-2.0",
"LicenseRef-scancode-json-pd",
"LicenseRef-scancode-protobuf",
"MPL-2.0-no-copyleft-exception",
"CC0-1.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-warranty-disclaimer",
"ISC",
"CDDL-1.1",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"Apache-2.0",
"AGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"Unlicense",
"MPL-1.1",
"BSD-2-Clause",
"LGPL-2.1-only",
"LicenseRef-scancode-oracle-bcl-java-platform-2013",
"LGPL-2.1-or-later",
"Plexus",
"MITNFA",
"WTFPL",
"CDDL-1.0",
"MIT"
] |
permissive
|
daanknoope/director-sdk
|
11996aa0ccff4cb709d63fc195e5619f2ee218f5
|
a099fedd5afe365aedbb50daa75de048ef6f7ab4
|
refs/heads/master
| 2022-11-27T02:00:20.943586
| 2019-07-29T11:17:24
| 2019-07-29T11:17:24
| 285,579,086
| 0
| 0
|
Apache-2.0
| 2020-08-06T13:29:32
| 2020-08-06T13:29:31
| null |
UTF-8
|
Python
| false
| false
| 29,608
|
py
|
# coding: utf-8
"""
Licensed to Cloudera, Inc. under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. Cloudera, Inc. licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudera.director.common.client import ApiClient
class UsersApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create(self, user, **kwargs): # noqa: E501
"""Create a new user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create(user, async=True)
>>> result = thread.get()
:param async bool
:param User user: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_with_http_info(user, **kwargs) # noqa: E501
else:
(data) = self.create_with_http_info(user, **kwargs) # noqa: E501
return data
def create_with_http_info(self, user, **kwargs): # noqa: E501
"""Create a new user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_with_http_info(user, async=True)
>>> result = thread.get()
:param async bool
:param User user: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user' is set
if ('user' not in params or
params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user' in params:
body_params = params['user']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def current_redacted(self, **kwargs): # noqa: E501
"""Get the current user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.current_redacted(async=True)
>>> result = thread.get()
:param async bool
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.current_redacted_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.current_redacted_with_http_info(**kwargs) # noqa: E501
return data
def current_redacted_with_http_info(self, **kwargs): # noqa: E501
"""Get the current user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.current_redacted_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method current_redacted" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users/current', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete(self, username, **kwargs): # noqa: E501
"""Delete a user by username # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete(username, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_with_http_info(username, **kwargs) # noqa: E501
else:
(data) = self.delete_with_http_info(username, **kwargs) # noqa: E501
return data
def delete_with_http_info(self, username, **kwargs): # noqa: E501
"""Delete a user by username # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_with_http_info(username, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `delete`") # noqa: E501
if 'username' in params and not re.search('.+', params['username']): # noqa: E501
raise ValueError("Invalid value for parameter `username` when calling `delete`, must conform to the pattern `/.+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users/{username}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_redacted(self, username, **kwargs): # noqa: E501
"""Get a user by username # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_redacted(username, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_redacted_with_http_info(username, **kwargs) # noqa: E501
else:
(data) = self.get_redacted_with_http_info(username, **kwargs) # noqa: E501
return data
def get_redacted_with_http_info(self, username, **kwargs): # noqa: E501
"""Get a user by username # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_redacted_with_http_info(username, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_redacted" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `get_redacted`") # noqa: E501
if 'username' in params and not re.search('.+', params['username']): # noqa: E501
raise ValueError("Invalid value for parameter `username` when calling `get_redacted`, must conform to the pattern `/.+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users/{username}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_management_status(self, **kwargs): # noqa: E501
"""Gets user management status # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_user_management_status(async=True)
>>> result = thread.get()
:param async bool
:return: UserManagementStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_user_management_status_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_user_management_status_with_http_info(**kwargs) # noqa: E501
return data
def get_user_management_status_with_http_info(self, **kwargs): # noqa: E501
"""Gets user management status # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_user_management_status_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: UserManagementStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_management_status" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users/managementStatus', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserManagementStatus', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list(self, **kwargs): # noqa: E501
"""List all users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list(async=True)
>>> result = thread.get()
:param async bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_with_http_info(**kwargs) # noqa: E501
return data
def list_with_http_info(self, **kwargs): # noqa: E501
"""List all users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update(self, username, user, **kwargs): # noqa: E501
"""Update an existing user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update(username, user, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:param User user: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_with_http_info(username, user, **kwargs) # noqa: E501
else:
(data) = self.update_with_http_info(username, user, **kwargs) # noqa: E501
return data
def update_with_http_info(self, username, user, **kwargs): # noqa: E501
"""Update an existing user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_with_http_info(username, user, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:param User user: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'user'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `update`") # noqa: E501
# verify the required parameter 'user' is set
if ('user' not in params or
params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `update`") # noqa: E501
if 'username' in params and not re.search('.+', params['username']): # noqa: E501
raise ValueError("Invalid value for parameter `username` when calling `update`, must conform to the pattern `/.+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user' in params:
body_params = params['user']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users/{username}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_password(self, username, passwords, **kwargs): # noqa: E501
"""Update the password of an existing user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_password(username, passwords, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:param PasswordChange passwords: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_password_with_http_info(username, passwords, **kwargs) # noqa: E501
else:
(data) = self.update_password_with_http_info(username, passwords, **kwargs) # noqa: E501
return data
def update_password_with_http_info(self, username, passwords, **kwargs): # noqa: E501
"""Update the password of an existing user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_password_with_http_info(username, passwords, async=True)
>>> result = thread.get()
:param async bool
:param str username: (required)
:param PasswordChange passwords: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'passwords'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_password" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `update_password`") # noqa: E501
# verify the required parameter 'passwords' is set
if ('passwords' not in params or
params['passwords'] is None):
raise ValueError("Missing the required parameter `passwords` when calling `update_password`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'passwords' in params:
body_params = params['passwords']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic'] # noqa: E501
return self.api_client.call_api(
'/api/d6.1/users/{username}/password', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
model_package="cloudera.director.d6_1.models",
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"jadair@cloudera.com"
] |
jadair@cloudera.com
|
e0e9a4ca7bed230fe0aa3b3d2feca8572337be8d
|
4d49d4d59c9517fe99884cd69ad88644265c6755
|
/week3/Group2/boj10829_ayoung0430.py
|
f0d65bb6189fca0cdf0ca82f8c73f601248b0652
|
[] |
no_license
|
all1m-algorithm-study/2021-1-Algorithm-Study
|
3f34655dc0a3d8765143f4230adaa96055d13626
|
73c7cac1824827cb6ed352d49c0ead7003532a35
|
refs/heads/main
| 2023-06-03T18:45:28.852381
| 2021-06-11T06:28:44
| 2021-06-11T06:28:44
| 348,433,854
| 8
| 16
| null | 2021-06-11T06:28:45
| 2021-03-16T17:23:37
|
Python
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
N=int(input())
answer=[]
def change(N):
if N==1:
print('1')
elif N%2==0:
return change(N//2)
answer.append('0')
else:
return change(N//2)
answer.append('1')
answer.reverse()
print(' '.join(answer))
#미완성 코드입니다ㅠㅠ 기한 내 제대로 된 코드를 제출하지 못해 죄송합니다. 다시 완성된 코드로 업로드하도록 하겠습니다.ㅠㅠ
|
[
"happycatcindy@gmail.com"
] |
happycatcindy@gmail.com
|
0a79292c6e8e11f6d2538dd7ce59fb62a36bd5ad
|
37a456d9e16d11fabe8dc854b84181478b196a66
|
/tests/test_s3.py
|
ccaf8ce6faa19da61163cde0321a86fb9af0e95b
|
[
"MIT"
] |
permissive
|
reuf/storagelayer
|
e1f44f71158d759f1cb1e291ab92c7c2c44de8b2
|
5fde8602332f811703955ae18e2426474baed89b
|
refs/heads/master
| 2021-06-22T17:59:05.112584
| 2017-08-30T15:39:27
| 2017-08-30T15:39:27
| 104,938,709
| 1
| 0
| null | 2017-09-26T21:18:19
| 2017-09-26T21:18:19
| null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
import os
from unittest import TestCase
from moto import mock_s3
import storagelayer
class FileArchiveTest(TestCase):
def setUp(self):
self.mock = mock_s3()
self.mock.start()
self.archive = storagelayer.init('s3', bucket='foo')
self.file = os.path.abspath(__file__)
def tearDown(self):
self.mock.stop()
def test_basic_archive(self):
checksum = storagelayer.checksum(self.file)
assert checksum is not None, checksum
out = self.archive.archive_file(self.file)
assert checksum == out, (checksum, out)
out2 = self.archive.archive_file(self.file)
assert out == out2, (out, out2)
def test_basic_archive_with_checksum(self):
checksum = 'banana'
out = self.archive.archive_file(self.file, checksum)
assert checksum == out, (checksum, out)
def test_generate_url(self):
out = self.archive.archive_file(self.file)
url = self.archive.generate_url(out)
assert url is not None, url
def test_load_file(self):
out = self.archive.archive_file(self.file)
path = self.archive.load_file(out)
assert path is not None, path
assert os.path.isfile(path), path
def test_cleanup_file(self):
out = self.archive.archive_file(self.file)
self.archive.cleanup_file(out)
path = self.archive.load_file(out)
assert os.path.isfile(path), path
self.archive.cleanup_file(out)
assert not os.path.isfile(path), path
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
97b7ab6c4c3416b70062d954b3752cfaa7b7661c
|
17711096359106c13da5875153eccc308b765003
|
/attrdict/dictionary.py
|
874e4a4ede8b39867c7fb9a05531a56a6a37db66
|
[
"Python-2.0",
"MIT"
] |
permissive
|
FISCO-BCOS/python-sdk
|
625ae9e6d601237c5a4684e0fe94629dfa6fd9c1
|
5fa6cc416b604de4bbd0d2407f36ed286d67a792
|
refs/heads/master
| 2023-07-25T22:35:37.472322
| 2023-07-13T10:04:56
| 2023-07-13T10:04:56
| 194,997,013
| 68
| 71
|
MIT
| 2023-03-08T07:53:04
| 2019-07-03T06:55:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
"""
A dict that implements MutableAttr.
"""
from attrdict.mixins import MutableAttr
import six
__all__ = ['AttrDict']
class AttrDict(dict, MutableAttr):
"""
A dict that implements MutableAttr.
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self._setattr('_sequence_type', tuple)
self._setattr('_allow_invalid_attributes', False)
def _configuration(self):
"""
The configuration for an attrmap instance.
"""
return self._sequence_type
def __getstate__(self):
"""
Serialize the object.
"""
return (
self.copy(),
self._sequence_type,
self._allow_invalid_attributes
)
def __setstate__(self, state):
"""
Deserialize the object.
"""
mapping, sequence_type, allow_invalid_attributes = state
self.update(mapping)
self._setattr('_sequence_type', sequence_type)
self._setattr('_allow_invalid_attributes', allow_invalid_attributes)
def __repr__(self):
return six.u('AttrDict({contents})').format(
contents=super(AttrDict, self).__repr__()
)
@classmethod
def _constructor(cls, mapping, configuration):
"""
A standardized constructor.
"""
attr = cls(mapping)
attr._setattr('_sequence_type', configuration)
return attr
|
[
"20529321+coderkentzhang@users.noreply.github.com"
] |
20529321+coderkentzhang@users.noreply.github.com
|
c31b82ebed4ecfc6905cefbefe7f86a780fbd7cb
|
adc8ce0452f38501c400b6336a2d7634f85d8f30
|
/python 1.py
|
8786f2490a72f43eddf35af7bbdecaf899f8c486
|
[] |
no_license
|
naresh123506/fy
|
654f4f06e422f6b0c2417fe7c8d39a6ce4243443
|
9f78ff308d7d3187dbe2f3669e9ee909ec251c3d
|
refs/heads/master
| 2020-07-08T20:32:48.085848
| 2019-08-22T10:44:00
| 2019-08-22T10:44:00
| 203,768,384
| 0
| 1
| null | 2019-08-22T11:09:02
| 2019-08-22T10:10:48
|
Python
|
UTF-8
|
Python
| false
| false
| 30
|
py
|
mydict=["27"]
print(mydict)
|
[
"noreply@github.com"
] |
naresh123506.noreply@github.com
|
44aa8f64312a6be8b6373e1a65891323834ded3a
|
68af41aa65acda1a1da4a7096379b2d56ab5b99c
|
/startup.py
|
0df618a57ab5c4b37f942c4df8f87f925a28239c
|
[] |
no_license
|
lavabyrd/slackedoverflow
|
6deccced527a22a9c7a53ce5f8d6179b32dc9fbf
|
3aee3cdb5d29fdbc38e0878ffafa52460af14d99
|
refs/heads/master
| 2022-11-30T03:02:35.237469
| 2020-08-10T20:14:03
| 2020-08-10T20:14:03
| 137,400,336
| 0
| 0
| null | 2020-08-10T20:14:04
| 2018-06-14T19:29:32
|
Python
|
UTF-8
|
Python
| false
| false
| 558
|
py
|
from application import (
app,
db,
models
)
import os
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': models.User, 'Post': models.Post}
# this may not be required as I'm using a top level entry point
# App startup
# if __name__ == '__main__':
# port = int(os.environ.get('PORT', 5000))
# app.run(debug=False,
# host="0.0.0.0",
# port=port
# )
port = int(os.environ.get('PORT', 5000))
app.run(debug=False,
host="0.0.0.0",
port=port
)
|
[
"mpreston@slack-corp.com"
] |
mpreston@slack-corp.com
|
b82d4974caad00ca47fd74b3dcc08953ab3fde4b
|
d0e38a3631ac8350a2467e3834967c548425c94e
|
/artificial intelligence/Assignment 1/Matin_Noohnezhad_Assignment1/searchAgents.py
|
b0d5edac24cd443a720fbf5f5e06c250f095286d
|
[] |
no_license
|
Matin-Noohnezhad/university_codes
|
6137dd19aa3dc6afe45f6416898fb5470b93e0e5
|
fd00e35f3983d614ab433d7650ad2a3c7dd5d19b
|
refs/heads/master
| 2023-08-28T20:02:49.161808
| 2021-09-17T13:27:24
| 2021-09-17T13:27:24
| 407,548,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,494
|
py
|
# searchAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, successor
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn=lambda x: 1, goal=(1, 1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): # @UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) # @UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x, y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append((nextState, action, cost))
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x, y = self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x, y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ((xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height - 2, self.walls.width - 2
self.corners = ((1, 1), (1, top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded
# Please add any code here which you would like to use
# in initializing the problem
"*** YOUR CODE HERE ***"
def getStartState(self):
"""
Returns the start state (in your state space, not the full Pacman state
space)
"""
return (self.startingPosition, list(self.corners))
util.raiseNotDefined()
def isGoalState(self, state):
"""
Returns whether this search state is a goal state of the problem.
"""
if (state[0] in state[3] and len(state[3]) == 1):
return True
return False
util.raiseNotDefined()
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
# x,y = currentPosition
# dx, dy = Actions.directionToVector(action)
# nextx, nexty = int(x + dx), int(y + dy)
# hitsWall = self.walls[nextx][nexty]
x, y = state[0]
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
successors.append(((nextx, nexty), action, 1))
self._expanded += 1 # DO NOT CHANGE
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x, y = self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound on the
shortest path from the state to a goal of the problem; i.e. it should be
admissible (as well as consistent).
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
current_position = state[0]
x = current_position[0]
y = current_position[1]
goals = state[1]
heuristic_value = abs(goals[0][0] - x) + abs(goals[0][1] - y)
for i in range(len(goals)):
x2 = goals[i][0]
y2 = goals[i][1]
if (heuristic_value > abs(x2 - x) + abs(y2 - y)):
heuristic_value = abs(x2 - x) + abs(y2 - y)
return heuristic_value # Default to trivial solution
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1 # DO NOT CHANGE
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x, y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append((((nextx, nexty), nextFood), direction, 1))
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x, y = self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come
up with an admissible heuristic; almost all admissible heuristics will be
consistent as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the
other hand, inadmissible or inconsistent heuristics may find optimal
solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
(see game.py) of either True or False. You can call foodGrid.asList() to get
a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the
problem. For example, problem.walls gives you a Grid of where the walls
are.
If you want to *store* information to be reused in other calls to the
heuristic, there is a dictionary called problem.heuristicInfo that you can
use. For example, if you only want to count the walls once and store that
value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access
problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
# print 'position type',type(position)
# print 'position0 type',type(position[0])
# print 'position1 type',type(position[1])
# print 'position',position
# print 'foodgrid',foodGrid
list_of_foods = foodGrid.asList()
heuristic = 0
length_of_x = len(list_of_foods)
no_of_foods = 0
min_distance = 2 * length_of_x
for i in range(length_of_x):
for j in range(len(list_of_foods[i])):
if (list_of_foods[i][j]):
no_of_foods = no_of_foods + 1
if (min_distance > abs(position[0] - (i + 1)) + abs(position[1] - (j + 1))):
min_distance = abs(position[0] - (i + 1)) + abs(position[1] - (j + 1))
heuristic = 20 * no_of_foods + min_distance
return heuristic
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while (currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
# print 'start position: ',startPosition
# print 'food positions: ',food
x, y = startPosition
food_list = list(food)
x2 = 0
y2 = 0
min_distance = 0
for i in range(len(food_list)):
for j in range(food_list[i]):
if (food_list[i][j] == True):
if (min_distance == 0):
min_distance = abs(x - (i + 1)) + abs(y - (j + 1))
elif(min_distance>abs(x - (i + 1)) + abs(y - (j + 1))):
min_distance = abs(x - (i + 1)) + abs(y - (j + 1))
util.raiseNotDefined()
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x, y = state
if(self.food.count() == 0):
return True
return False
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's
position in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob))
|
[
"matinnouhnejad@gmail.com"
] |
matinnouhnejad@gmail.com
|
8041cf4b80ab3c8ae6003c2ac36e5b582ed26c97
|
b235424d69530fc78805b89e6d64e3cfea3bffad
|
/employee_app/views.py
|
5f80c433b88785de2fe4237cbab9b8e9d177c2b2
|
[] |
no_license
|
bhargava-kush/employees
|
4201ed0047e14ab053c80456047753302cdd39a6
|
2dcc53b7896ebf6a69ee6c92f5245dcc069b27f6
|
refs/heads/master
| 2021-06-22T10:31:38.204679
| 2019-09-13T13:01:18
| 2019-09-13T13:01:18
| 208,237,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.authentication import TokenAuthentication
from rest_framework.filters import SearchFilter
from rest_framework import viewsets
from django_filters import rest_framework as filters
from employee_app.models import Employee
from employee_app.serializers import EmployeeSerializer
# Create your views here.
class EmployeeFilter(filters.FilterSet):
"""
customize filter for filtering employees
"""
first_name = filters.CharFilter(field_name='first_name', lookup_expr='icontains')
last_name = filters.CharFilter(field_name='last_name', lookup_expr='icontains')
dept_name = filters.CharFilter(field_name='dept_name', lookup_expr='icontains')
class EmployeeViewSet(viewsets.ModelViewSet):
"""
A simple ViewSet for viewing employees.
"""
permission_classes = (IsAuthenticatedOrReadOnly,)
authentication_classes = (TokenAuthentication,)
filter_backends = (filters.DjangoFilterBackend, SearchFilter)
filterset_class = EmployeeFilter
search_fields = ('first_name','last_name','dept_name')
queryset = Employee.objects.all()
serializer_class = EmployeeSerializer
|
[
"bhargavakush93@gmail.com"
] |
bhargavakush93@gmail.com
|
258040687eef069d88eaf6602b3b6a908d15621c
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/~inactive/cheerleadersbackup/app.py
|
15ba4c64f46e15230f624ab0f68ec761e311cb53
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
#encoding:utf-8
subreddit = 'cheerleaders'
t_channel = '@cheerleadersbackup'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
[
"git@fillll.ru"
] |
git@fillll.ru
|
1a6417f7d4b4ce956970176e7ab8ff3c830b13ea
|
94d1af1009485e1ae6ffa04ce24034184225a4d1
|
/patter/server/__init__.py
|
ba846d35aba4052b9c09377222256b5079d51c15
|
[
"MIT"
] |
permissive
|
songtaoshi/patter
|
489dc5abb846f74da5a878cd92f7dff75222ae1b
|
92e8fb0d5d98571bd76a0ff90fae1fc1853ba0da
|
refs/heads/master
| 2021-10-23T03:16:29.976872
| 2018-12-19T00:52:00
| 2018-12-19T01:26:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from .server import SpeechServicer
from .speech_pb2_grpc import add_SpeechServicer_to_server, SpeechStub
from .speech_pb2 import (RecognitionConfig, RecognitionAudio, RecognizeRequest, RecognizeResponse,
SpeechRecognitionResult, SpeechRecognitionAlternative, WordInfo, StreamingRecognizeRequest,
StreamingRecognitionConfig)
|
[
"ryanleary@gmail.com"
] |
ryanleary@gmail.com
|
19a2ae7c5cc347c4bf684be6108aafae16a5951a
|
f5e0baa9d5ad8e01ce048cfa81e2a696a7441b3c
|
/analysis_scripts/M287480_170518.py
|
774636cff636e5ed99c1929586de1c44c0d4ca09
|
[] |
no_license
|
mtv2101/cats_mapping
|
d8dcba4f435cec20f04ff5e52a60dd36a2ec81f8
|
b9605abec839b525eea86b1b58da338100ff3d45
|
refs/heads/master
| 2022-01-11T10:00:32.045546
| 2019-02-26T21:19:27
| 2019-02-26T21:19:27
| 104,128,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 25 12:21:57 2017
@author: mattv
"""
from cats_mapping.hemodynamics.hemo_2cam import hemo_2cam
# 170524_M291236
hemo = hemo_2cam(r"Y:\CorticalMapping\IntrinsicImageData\170531-M287480\M287480_170531_cam1_crosstalkJCamF101.dcimg_4_4_1.h5",
r"Y:\CorticalMapping\IntrinsicImageData\170531-M287480\M287480_170531_cam2_crosstalkJCamF108.dcimg_4_4_1.h5",
r"Y:\CorticalMapping\IntrinsicImageData\170531-M287480\M287480_170531_cam1_crosstalkJCamF101.dcimg_16_16_1.h5",
r"Y:\CorticalMapping\IntrinsicImageData\170531-M287480\M287480_170531_cam2_crosstalkJCamF108.dcimg_16_16_1.h5",
r"Y:\CorticalMapping\IntrinsicImageData\170531-M287480\M287480_170531_cam1JCamF103.dcimg_16_16_1.h5",
r"Y:\CorticalMapping\IntrinsicImageData\170531-M287480\M287480_170531_cam2JCamF109.dcimg_16_16_1.h5",
r"Y:\CorticalMapping\IntrinsicImageData\170531-M287480\M287480_170531_cam1JPhys103",
r"G:\170531-M287480\170531_M287480_alldat")
hemo.run_2cam()
|
[
"valley@gmail.com"
] |
valley@gmail.com
|
bf460c74e5aed162bee3001c663f972f0073d3e8
|
e06e2c8080002d9397fab2d186328c372daa4f19
|
/coronavirus-2020/exp/knn_dist_diff.py
|
5cda65d659709ba802ee939378bfd2e92f8c877c
|
[
"MIT"
] |
permissive
|
HRaeder42/CS349_Final
|
3ea2dfc1bbb8470c3633caa7fbedd2379fada269
|
83d8cc70f6ac5f960138525190e25ad7dad13277
|
refs/heads/master
| 2023-09-03T22:49:31.709536
| 2021-09-26T21:34:09
| 2021-09-26T21:34:09
| 270,836,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
"""
Experiment summary
------------------
Treat each province/state in a country cases over time
as a vector, do a simple K-Nearest Neighbor between
countries. Take the difference between cases. Get
the distribution of this data (to make it time-invariant).
Use the distribution as the feature vector.
"""
import sys
sys.path.insert(0, '..')
from utils import data
import os
import sklearn
import numpy as np
from sklearn.neighbors import (
KNeighborsClassifier,
DistanceMetric
)
import json
# ------------ HYPERPARAMETERS -------------
BASE_PATH = '../COVID-19/csse_covid_19_data/'
N_NEIGHBORS = 5
MIN_CASES = 1000
N_BINS = 20
NORMALIZE = True
# ------------------------------------------
confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_confirmed_global.csv')
confirmed = data.load_csv_data(confirmed)
features = []
targets = []
for val in np.unique(confirmed["Country/Region"]):
df = data.filter_by_attribute(
confirmed, "Country/Region", val)
cases, labels = data.get_cases_chronologically(df)
features.append(cases)
targets.append(labels)
features = np.concatenate(features, axis=0)
targets = np.concatenate(targets, axis=0)
predictions = {}
for _dist in ['minkowski', 'manhattan']:
for val in np.unique(confirmed["Country/Region"]):
# test data
df = data.filter_by_attribute(
confirmed, "Country/Region", val)
cases, labels = data.get_cases_chronologically(df)
# filter the rest of the data to get rid of the country we are
# trying to predict
mask = targets[:, 1] != val
tr_features = features[mask]
tr_targets = targets[mask][:, 1]
above_min_cases = tr_features.sum(axis=-1) > MIN_CASES
tr_features = np.diff(tr_features[above_min_cases], axis=-1)
if NORMALIZE:
tr_features = tr_features / tr_features.sum(axis=-1, keepdims=True)
tr_features = np.apply_along_axis(
lambda a: np.histogram(a, bins=N_BINS)[0], -1, tr_features)
tr_targets = tr_targets[above_min_cases]
# train knn
knn = KNeighborsClassifier(n_neighbors=N_NEIGHBORS, metric=_dist)
knn.fit(tr_features, tr_targets)
# predict
cases = cases.sum(axis=0, keepdims=True)
cases = np.apply_along_axis(
lambda a: np.histogram(a, bins=N_BINS)[0], -1, cases)
# nearest country to this one based on trajectory
label = knn.predict(cases)
if val not in predictions:
predictions[val] = {}
predictions[val][_dist] = label.tolist()
with open('results/knn_dist_diff.json', 'w') as f:
json.dump(predictions, f, indent=4)
|
[
"noreply@github.com"
] |
HRaeder42.noreply@github.com
|
06dbf8e4e2546909ea1b7b63fb8bd169c4531429
|
193bb37a5e878a5918ce1e181f033770a015f49a
|
/Approach Toward Service/TxService/TxService.py
|
7482c656a1021954db7c9f7df1b48707991d5a80
|
[] |
no_license
|
eKaiCHEN/basebandinCloud
|
15c1d3a94fed8eeb32663887d69196d7951e1a38
|
974690aea2ba095b15de9d35a70a51c084f91f6a
|
refs/heads/master
| 2016-08-11T09:12:52.024363
| 2016-01-15T04:40:50
| 2016-01-15T04:40:50
| 49,696,809
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='192.168.56.101'))
channel = connection.channel()
channel.queue_declare(queue='Txjobs')
print ' [*] Waiting for messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [x] Received %r" % (body,)
#excute the TX task script locally
os.system("./Tasktransmiter.sh")
channel.basic_consume(callback,
queue='Txjobs',
no_ack=True)
channel.start_consuming()
|
[
"sendtochenkai@gmail.com"
] |
sendtochenkai@gmail.com
|
6083ad9ee42d5fc183d6a6f29f55f678986445dd
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_DayOfMonth_MLP.py
|
df214c11738b68732f0ec02c5441c9a6023ff1b2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] , ['MLP'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
497c4102c8fd48665cc9efd8cd714a7eb6ee4f99
|
2fced350d1368476db8eb7a1577a8d737f716af3
|
/src/chat/migrations/0004_replace_newline_with_br.py
|
de648b470ba9bffab5b31bdd1bcfcdcaadaf178c
|
[] |
no_license
|
demidov91/game-for-you
|
48ac9c242fb9752f3c20cf43ea87fbaf943b9a08
|
d9226244dd6bf8100f0aaa391ff0cf9a7343c7ef
|
refs/heads/master
| 2021-01-19T09:02:03.807777
| 2016-09-19T14:18:45
| 2016-09-19T14:18:45
| 17,358,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,607
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for message in orm.Message.objects.filter(text__contains='\n'):
message.text = message.text.replace('\n', '<br/>')
message.save()
def backwards(self, orm):
for message in orm.Message.objects.filter(text__contains='<br/>'):
message.text = message.text.replace('<br/>', '\n')
message.save()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'symmetrical': 'False', 'to': "orm['auth.Permission']"})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'symmetrical': 'False', 'related_name': "'user_set'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'symmetrical': 'False', 'related_name': "'user_set'", 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'chat.chat': {
'Meta': {'object_name': 'Chat'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'chat.message': {
'Meta': {'ordering': "('create_time',)", 'object_name': 'Message'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'chat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['chat.Chat']"}),
'create_time': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True', 'default': 'datetime.datetime(2014, 10, 5, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('ckeditor.fields.RichTextField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['chat']
symmetrical = True
|
[
"demidov91@mail.ru"
] |
demidov91@mail.ru
|
d82a1ed6f7cac59d851481158ea80444fbfcc75b
|
edfb435ee89eec4875d6405e2de7afac3b2bc648
|
/tags/selenium-2.0-alpha-6/chrome/src/py/driver.py
|
c41017930d641e4f4bfcc750d9af99f8941723d0
|
[
"Apache-2.0"
] |
permissive
|
Escobita/selenium
|
6c1c78fcf0fb71604e7b07a3259517048e584037
|
f4173df37a79ab6dd6ae3f1489ae0cd6cc7db6f1
|
refs/heads/master
| 2021-01-23T21:01:17.948880
| 2012-12-06T22:47:50
| 2012-12-06T22:47:50
| 8,271,631
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,016
|
py
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from selenium.common.exceptions import RemoteDriverServerException
from selenium.remote import utils
from subprocess import Popen
import httplib
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from threading import Thread
from Queue import Queue
try:
import json
except ImportError:
import simplejson as json
if not hasattr(json, 'dumps'):
import simplejson as json
from time import sleep, time
from urllib import urlopen
from os.path import expanduser, join, dirname, abspath, isdir, isfile
from sys import platform
from tempfile import mkdtemp
from shutil import copytree, rmtree, copy
from os import environ
INITIAL_HTML = '''
<html>
<head>
<script type='text/javascript'>
if (window.location.search == '') {
setTimeout("window.location = window.location.href + '?reloaded'", 5000);
}
</script>
</head>
<body>
<p>
ChromeDriver server started and connected. Please leave this tab open.
</p>
</body>
</html>'''
class RequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
self.respond(INITIAL_HTML, "text/html")
# From my (Miki Tebeka) understanding, the driver works by sending a POST http
# request to the server, the server replies with the command to execute and the
# next POST reply will have the result. So we hold a command and result
# queues on the server. When we get a POST request we see if there is a
# response in there and place it in the result queue, then we pop the next
# command from the command queue (blocking if needed) the post it to the
# client.
# FIXME: Somewhere here there is a race condition I'm still hunting.
# One option to try is http://blog.odegra.com/?p=3
def do_POST(self):
self.process_reply()
command = self.server.command_queue.get()
data = json.dumps(command)
self.respond(data, "application/json")
def process_reply(self):
lines = []
while not self.rfile.closed:
line = self.rfile.readline()
if line.strip() == "EOResponse":
break
lines.append(line)
data = "".join(lines).strip()
if not data:
return
self.server.result_queue.put(json.loads(data))
def respond(self, data, content_type):
self.send_response(httplib.OK)
self.send_header("Content-type", content_type)
self.send_header("Content-Length", len(data))
self.end_headers()
self.wfile.write(data)
# Just to make it quiet
def log_message(self, format, *args):
pass
def _find_chrome_in_registry():
from _winreg import OpenKey, QueryValue, HKEY_CURRENT_USER
path = r"Software\Microsoft\Windows\CurrentVersion\Uninstall\Google Chrome"
try:
key = OpenKey(HKEY_CURRENT_USER, path)
install_dir = QueryValue(key, "InstallLocation")
except WindowsError:
return ""
return join(install_dir, "chrome.exe")
def _is_win7():
import sys
return sys.getwindowsversion()[0] == 6
def _default_windows_location():
if _is_win7():
appdata = environ["LOCALAPPDATA"]
else:
home = expanduser("~")
appdata = join(home, "Local Settings", "Application Data")
return join(appdata, "Google", "Chrome", "Application", "chrome.exe")
def _windows_chrome():
return _find_chrome_in_registry() or _default_windows_location()
_BINARY = {
"win32" : _windows_chrome,
"darwin" : lambda: "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
"linux2" : lambda: "/usr/bin/google-chrome"
}
def chrome_exe():
return _BINARY[platform]()
def touch(filename):
with open(filename, "ab"):
pass
def _copy_zipped_extension(extension_zip):
extension_dir = utils.unzip_to_temp_dir(extension_zip)
return extension_dir
def create_extension_dir():
extension_dir = _copy_zipped_extension("chrome-extension.zip")
if extension_dir:
return extension_dir
extension_dir = join(dirname(abspath(__file__)), "chrome-extension.zip")
extension_dir = _copy_zipped_extension(extension_dir)
if extension_dir:
return extension_dir
path = mkdtemp()
# FIXME: Copied manually
extdir = join(dirname(abspath(__file__)), "extension")
if not isdir(extdir):
extdir = join(dirname(dirname(abspath(__file__))), "extension")
assert isdir(extdir), "can't find extension"
# copytree need to create the directory
rmtree(path)
copytree(extdir, path)
dll = join(dirname(__file__), "npchromedriver.dll")
if not isfile(dll): # In source
dll = r"..\..\prebuilt\Win32\Release\npchromedriver.dll"
assert isfile(dll), "can't find dll"
copy(dll, path)
return path
def create_profile_dir():
path = mkdtemp()
touch(join(path, "First Run"))
touch(join(path, "First Run Dev"))
return path
# FIXME: Find a free one dinamically
PORT = 33292
def run_chrome(extension_dir, profile_dir, port):
command = [
chrome_exe(),
"--load-extension=%s" % extension_dir,
"--user-data-dir=%s" % profile_dir,
"--activate-on-launch",
"--disable-hang-monitor",
"--homepage=about:blank",
"--no-first-run",
"--disable-popup-blocking",
"--disable-prompt-on-repost",
"--no-default-browser-check",
"http://localhost:%s/chromeCommandExecutor" % port]
return Popen(command)
def run_server(timeout=10):
server = HTTPServer(("", 0), RequestHandler)
server.command_queue = Queue()
server.result_queue = Queue()
t = Thread(target=server.serve_forever)
t.daemon = True
t.start()
start = time()
while time() - start < timeout:
try:
urlopen("http://localhost:%s" % server.server_port)
break
except IOError:
sleep(0.1)
else:
raise RemoteDriverServerException("Can't open server after %s seconds" % timeout)
return server
class ChromeDriver:
def __init__(self):
self._server = None
self._profile_dir = None
self._extension_dir = None
self._chrome = None
def start(self):
self._extension_dir = create_extension_dir()
self._profile_dir = create_profile_dir()
self._server = run_server()
self._chrome = run_chrome(self._extension_dir, self._profile_dir,
self._server.server_port)
def stop(self):
if self._chrome:
try:
self._chrome.kill()
self._chrome.wait()
self._chrome = None
except AttributeError:
# POpen.kill is a python 2.6 API...
pass
if self._server:
self._server.server_close()
self._server = None
for path in (self._profile_dir, self._extension_dir):
if not path:
continue
try:
rmtree(path)
except IOError:
pass
def execute(self, command, params):
to_send = params.copy()
to_send["request"] = command
self._server.command_queue.put(to_send)
return self._server.result_queue.get()
|
[
"simon.m.stewart@07704840-8298-11de-bf8c-fd130f914ac9"
] |
simon.m.stewart@07704840-8298-11de-bf8c-fd130f914ac9
|
f481700072405a9cff065f964a97d883a37dc1c3
|
de1ba6c5f6b3f1af02433d38b2572cc5c11f3855
|
/menu.py
|
8ee31b682246d871455cc634d2ecdee4799353ec
|
[] |
no_license
|
jcoates2/Independ_RBPi_UMW_2017
|
d0bfd15a0ae3e6922b50e01a10d58c22ecaa727c
|
973c5158d9b6131311b157841eab57b1536a4a33
|
refs/heads/master
| 2021-01-19T21:12:12.363170
| 2017-05-05T13:45:45
| 2017-05-05T13:45:45
| 88,623,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
import subprocess
import os
from termios import tcflush, TCIFLUSH
import sys
def main():
readfile = open("games.txt", "r")
options = []
for line in readfile:
options.append(line.split(":"))
for option in options:
option[1] = option[1].rstrip("\n")
while(True):
tcflush(sys.stdin, TCIFLUSH)
choice = None
print("Enter the program to run:")
num = 1
for option in options:
print(str(num) + ": " + option[0])
num += 1
choice = str(input(">"))
if(choice == None or choice == ""):
print("Enter a choice!")
elif(choice[0] == "q" or choice[0] == "Q"):
print("Exiting...")
readfile.close()
return(0)
elif(not choice.isnumeric()):
print("Invalid choice!")
elif(int(choice) >= 1 and int(choice) < num):
process = subprocess.Popen(option[int(choice) - 1], shell=True)
process.wait()
process = subprocess.Popen("clear", shell=True)
process.wait()
else:
print("Invalid choice!")
main()
|
[
"noreply@github.com"
] |
jcoates2.noreply@github.com
|
4abbb819cd6010f59370221e27f232bf4deca730
|
78c2b8d2e4f51c5221a01fd5c37ed57f016f3edc
|
/alfunduqapi/configure/urls.py
|
6fbcd95a255a3ad1e65499f7f1bf42a82b1c6cfc
|
[] |
no_license
|
Aamier/alfunduq
|
300f962cd504a13f91d62194815a259d8a6a888b
|
8f92465bae02a08548951f83023af71e93966ebb
|
refs/heads/master
| 2020-04-07T17:57:54.698437
| 2018-11-20T19:38:02
| 2018-11-20T19:38:02
| 158,591,268
| 0
| 0
| null | 2018-11-21T18:36:10
| 2018-11-21T18:36:09
| null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# rooms/urls.py
from django.urls import path
from django.conf.urls import url
from . import views
from . import create_hotel_base
urlpatterns = [
path('rooms', create_hotel_base.RoomsList.as_view()),
path('floors', create_hotel_base.Floor.as_view()),
path('roomtype', create_hotel_base.RoomType.as_view()),
path('facility', create_hotel_base.FacilityList.as_view()),
url(r'^(?P<pk>[0-9]+)/floor/$', views.Floor.as_view()),
]
|
[
"shakeel.rymec@gmail.com"
] |
shakeel.rymec@gmail.com
|
bc04bfb96825ff3d5a218f6ba8044413b20402df
|
c99c36f40bc1dbca96000ee5525f65aa4537f93b
|
/Python for Beginner/1123 실습1.py
|
02731bea5d6e552f879a1d5380308f9a0f366339
|
[] |
no_license
|
SeungsuHa95/Python
|
73c278ccca349428402cc2328f79718bc1d8d0d2
|
925729fac37279708037def7e9d70309d921267f
|
refs/heads/main
| 2023-01-30T02:24:24.215324
| 2020-12-07T13:10:49
| 2020-12-07T13:10:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
inStr,outStr='',''
count=0
inStr = input("문자열을 입력하세요: ")
count = len(inStr)
for i in range(0,count):
outStr = outStr + inStr[count-(i+1)]
print('내용을 거꾸로 출력한 결과는? %s' %outStr)
|
[
"ht106@naver.com"
] |
ht106@naver.com
|
2699f3562f5f5d601fb2cfc9a82c5b14f346a7c3
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/connect_write_2/approved-origin_disassociate.py
|
da8d023fda61a2b27b63fd0dd83a57bc665bed95
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/connect/disassociate-approved-origin.html
if __name__ == '__main__':
"""
associate-approved-origin : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/connect/associate-approved-origin.html
list-approved-origins : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/connect/list-approved-origins.html
"""
parameter_display_string = """
# instance-id : The identifier of the Amazon Connect instance.
# origin : The domain URL of the integrated application.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("connect", "disassociate-approved-origin", "instance-id", "origin", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
235f88c9405dd635273f6f7bca82689c42eb1d5b
|
09417c9467e02ee622e423343528e456558f77fd
|
/tensorflow/layers.py
|
6ba4375e0d7f8ed1e3f0646e47d407fbb9775fcd
|
[] |
no_license
|
RizhaoCai/riz_utils
|
d387f37d006fef19881d954259bdb43dfb4d6049
|
9b78bb554f92745bc31893b60f78d52766e3a43c
|
refs/heads/master
| 2020-04-16T07:20:32.481267
| 2019-01-19T02:24:19
| 2019-01-19T02:24:19
| 165,382,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
import numpy as np
import tensorflow as tf
import os
np.set_printoptions(formatter={'float_kind': lambda x: '%.2f' % x})
def print_vars(sess):
print('print vars')
for _var in tf.global_variables():
assert _var.dtype.name == 'float32_ref', _var.name
var = sess.run(_var)
if _var in tf.trainable_variables():
print('T', end='')
else:
print(' ', end='')
if _var in tf.moving_average_variables():
print('A', end='')
else:
print(' ', end='')
if _var in tf.model_variables():
print('M', end='')
else:
print(' ', end='')
if _var in tf.local_variables():
print('L', end='')
else:
print(' ', end='')
print('', _var.name, var.shape, var.ravel()[0])
def w(shape):
return tf.get_variable('w', shape, initializer=tf.variance_scaling_initializer())###
###def w(shape):###
### return tf.get_variable('w', shape, initializer=tf.initializers.random_normal(0.0, 0.02))###
def b(shape):
return tf.get_variable('b', shape, initializer=tf.zeros_initializer())
def conv(x, c_out):
c_in = x.get_shape()[3].value
return tf.nn.conv2d(x, w([3, 3, c_in, c_out]), [1, 1, 1, 1], 'SAME')
def bias(x):
c_in = x.get_shape()[-1].value
return tf.nn.bias_add(x, b([c_in]))
def bn(x, is_training):
"""
Batch-Normalization
"""
### return bias(x)
return tf.contrib.layers.batch_norm(x, is_training=is_training, updates_collections=None)
def max_pooling(x):
"""
Conduct max-pooling operation
"""
# 1,3,3,1?
return tf.nn.max_pool(x, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')
|
[
"rizhao.cai@gmail.com"
] |
rizhao.cai@gmail.com
|
a22e2d7fccf45e3cd3031acd1b409a1def790206
|
ac514cf43f276c054ddc0b1f99086fd0110e4ea8
|
/microblog/bin/sqlformat
|
07506c3e2e355b57f9082405c46783ad26a86278
|
[] |
no_license
|
matheus-manoel/microblog
|
c1b853b3602e179c19ef64cf95d3bf905ead0653
|
68de75e7a0f32738ac91f656c96a0495dbf03a4e
|
refs/heads/master
| 2020-12-02T19:35:09.291815
| 2017-07-11T19:31:04
| 2017-07-11T19:31:04
| 96,361,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
#!/home/matheus/Documents/microblog/microblog/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"matheuscmanoel@gmail.com"
] |
matheuscmanoel@gmail.com
|
|
44cec4dae69b612b97cdc84eb0f9081d818c006e
|
cb848d0c80abb04c080155d1502d22391423c4e8
|
/build_isolated/libphidget21/catkin_generated/pkg.develspace.context.pc.py
|
ec4ce7bbe5813fe0e0ea355c1374827189a79773
|
[] |
no_license
|
MTU-Autobot/catkin_ws
|
d8bc9b0de46befc53282b9b7e6d338a7ff7e3a0c
|
cf104fe048c6101f50be1b87e181d80a4be3e770
|
refs/heads/master
| 2020-03-13T23:14:56.276075
| 2018-04-27T18:28:01
| 2018-04-27T18:28:01
| 131,331,599
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "libphidget21"
PROJECT_SPACE_DIR = "/home/ubuntu/catkin_ws/devel_isolated/libphidget21"
PROJECT_VERSION = "0.7.5"
|
[
"spartanhaden@gmail.com"
] |
spartanhaden@gmail.com
|
ae1006ba2a803ee00b821b8948b24441b00e9591
|
037c7a8bc39146c31449e248f8a9c025ec1c01d0
|
/02.device/RaspberryPi/03_OPENCV-EX/cv_ex03.py
|
592e2ce01d6d4ae2108dfa63526b8e3b11da40b7
|
[] |
no_license
|
cooluks2/iot
|
e62874624bc06acbe5647fed35f6ec62bd582e7f
|
e452b29f0c6967dad9b11207c3b5189cec385497
|
refs/heads/master
| 2023-01-22T17:19:58.456577
| 2020-11-20T11:36:34
| 2020-11-20T11:36:34
| 292,755,019
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
import cv2
import matplotlib.pyplot as plt
imageFile = './data/lena.jpg'
img_bgr = cv2.imread(imageFile)
plt.axis('off')
plt.imshow(img_bgr)
plt.show()
plt.axis('off')
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb)
plt.show()
|
[
"cooluks2@gmail.com"
] |
cooluks2@gmail.com
|
ea6c78b1047f6fccc5d9da937d8f1b6a02df1d2a
|
82d0e47a1ac3b54e3c8f1ea076cbb133cc44d689
|
/proxy.py
|
8db7eb5dc1d07d512ff1a9b457755b724ab8d7e9
|
[] |
no_license
|
godjealous/get_book_score
|
938c456fc717e83c67f2db6094d1bd0a78b3e29e
|
e8cb8880caf5ed9d2385aa61db6ad83ddc54e12f
|
refs/heads/master
| 2020-04-02T21:28:09.688774
| 2018-11-18T02:18:25
| 2018-11-18T02:18:25
| 154,800,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import os
from fake_useragent import UserAgent
ua = UserAgent()
def visitDirectly(url):
response = urllib.request.urlopen(url)
page=response.read().decode('utf-8')
soup=BeautifulSoup(page,'html.parser')
return soup
def visitUsingHeader(url):
headers={"User-Agent":ua.random}
req=urllib.request.Request(url=url,headers=headers)
response=urllib.request.urlopen(req)
page=response.read().decode('utf-8')
soup=BeautifulSoup(page,'html.parser')
return soup
def visitThroughProxy(url):
proxy=urllib.request.ProxyHandler({'http':'110.40.13.5:80'}) #find proxy ip in www.xicidaili.com free
opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
req=urllib.request.Request(url=url)
response = urllib.request.urlopen(req)
page = response.read().decode('utf-8')
soup=BeautifulSoup(page,'html.parser')
return soup
url='https://book.douban.com/'
a=visitDirectly(url)
print(a.title)
b=visitUsingHeader(url)
print(b.title)
c=visitThroughProxy(url)
print(c.title)
|
[
"hanyang@RLKF-001.chinabond.com.cn"
] |
hanyang@RLKF-001.chinabond.com.cn
|
c77b06fa1cdfc4e7d338b30310efcc34bcb1d600
|
5c0c129a61843ca38c6afbe09eb58e46191abfba
|
/test/ComparatorTest.py
|
4962b1b1409c9f289b68f04077a439a0226ae15f
|
[] |
no_license
|
fractalbass/titanic
|
298378f6ad8220171a7795dc4b5146955954bb9d
|
582a6c28e9d7095c02f610da1f33dc8a8231197b
|
refs/heads/master
| 2021-01-01T16:01:18.013345
| 2017-07-21T19:33:02
| 2017-07-21T19:33:02
| 97,755,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
import unittest
from Comparator import Comparator
from FileUtils import FileUtils
class ComparatorTest(unittest.TestCase):
def test_compute_error(self):
c = Comparator([[]],1)
err = c.compute_error([1, 2, 3, 4], [1, 2, 3, 4])
print("ERROR: {0}".format(err))
self.assertTrue(err == 0.0)
def test_compute_with_different_start_col(self):
c = Comparator([[]], 2)
err = c.compute_error([1, 2, 3, 4], [50, 60, 3, 4])
print("ERROR: {0}".format(err))
self.assertTrue(err == 0.0)
def test_compute_error(self):
c = Comparator([[]],1)
err = c.compute_error([1, 3, 4, 5], [1, 2, 3, 4])
print("ERROR: {0}".format(err))
self.assertTrue(err == 3.0)
def test_record_size_mismatch(self):
c = Comparator("blah",1)
self.assertRaises(Exception, c.compute_error, [1, 2, 3, 4], [1, 2, 3, 4, 5])
self.assertRaises(Exception, c.compute_error, [1, 2, 3, 4, 5], [1, 2, 3, 4])
def test_get_best_match(self):
fu = FileUtils(filename="test_file.csv", skip_header=True)
reference_rows = fu.get_arrays_from_csv()
comp = Comparator(reference_rows,1)
match = comp.get_closes_match([1, 10.0, 20.0, 30.0])
self.assertTrue(match[0] == 3.0)
def test_best_match_non_trivial(self):
a = [[870, 1, 3, 0.134684136, 1, 1, 0.021730754, 0],
[871, 0, 3, 0.875446884, 0, 0, 0.015411575, 0],
[872, 1, 1, 1.582538598, 1, 1, 0.102578967, 0],
[873, 0, 1, 1.111144122, 0, 0, 0.00975935, 0],
[874, 0, 3, 1.582538598, 0, 0, 0.01756683, 0],
[875, 1, 2, 0.942788952, 1, 0, 0.04684488, 1],
[876, 1, 3, 0.50506551, 0, 0, 0.014102261, 1],
[877, 0, 3, 0.67342068, 0, 0, 0.019217722, 0],
[878, 0, 3, 0.639749646, 0, 0, 0.015411575, 0]]
comp = Comparator(a, 2)
a1 = comp.get_closes_match([0, 0, 1, 1.582538598, 1, 1, 0.102578967, 0])
print("Match ID: {0}".format(int(a1[0])))
self.assertTrue(int(a1[0]) == 872)
a2 = comp.get_closes_match([0, 0, 3, 0.50506559, 0, 0, 0.014102261, 1])
print("Match ID: {0}".format(int(a2[0])))
self.assertTrue(int(a2[0]) == 876)
if __name__ == '__main__':
unittest.main()
|
[
"mporter@paintedharmony.com"
] |
mporter@paintedharmony.com
|
0be4a5c978da4c4e0c4e962e37c135f6cf942e73
|
546de28044c4e83a7f5eccabdebf755af61bf7cd
|
/utils/__init__.py
|
9873687c31aee46cd4fde9e32cb4d8570fcad01c
|
[] |
no_license
|
80000v/tpro
|
f83de7a10586eb5c28dbfc6ea8fa05ff2106914a
|
c8506f7a47175fcc256f3467b76ccb6f94a7c7c0
|
refs/heads/master
| 2021-04-20T10:15:37.520322
| 2019-09-14T14:54:26
| 2019-09-14T14:54:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,966
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
@Author: freemoses
@Since: 2019-08-23 14:07:36
@LastEditTime: 2019-09-10 06:45:40
@Description: the folder for general functions
'''
import json
import os
import traceback
from PyQt5.QtGui import QFont, QIcon
BASIC_FONT = QFont(u'微软雅黑', 11)
# 图标及JSON配置路径字典(全局变量)
Icon_Map = {}
Json_Map = {}
Qss_Map = {}
# 遍历安装目录
for root, subdirs, files in os.walk(os.path.abspath(os.path.dirname(__file__))):
for filename in files:
if '.ico' in filename or '.png' in filename:
Icon_Map[filename] = os.path.join(root, filename)
if '.json' in filename:
Json_Map[filename] = os.path.join(root, filename)
if '.qss' in filename:
Qss_Map[filename] = os.path.join(root, filename)
# 遍历工作目录
for root, subdirs, files in os.walk(os.getcwd()):
for filename in files:
if '.ico' in filename or '.png' in filename:
Icon_Map[filename] = os.path.join(root, filename)
if '.json' in filename:
Json_Map[filename] = os.path.join(root, filename)
if '.qss' in filename:
Qss_Map[filename] = os.path.join(root, filename)
# ----------------------------------------------------------------------
def load_icon(icon_name: str):
"""
Get QIcon object with ico name
"""
return QIcon(Icon_Map.get(icon_name, ''))
# ----------------------------------------------------------------------
def load_json(file_name: str, sub_item: str = None):
"""
Load data from json file, you can select one of the subitems
"""
file_path = Json_Map.get(file_name, '')
try:
with open(file_path, mode='r', encoding='UTF-8') as f:
data = json.load(f)
if sub_item is None:
return data
return data.get(sub_item, {})
except:
traceback.print_exc()
# ----------------------------------------------------------------------
def save_json(file_name: str, data: dict, sub_item: str = None):
"""
Save data into json file, you can specify one of the subitems
"""
if sub_item is None:
full_data = data
else:
full_data = load_json(file_name)
full_data[sub_item] = data
file_path = Json_Map.get(file_name, '')
with open(file_path, mode='w+', encoding='UTF-8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
# ----------------------------------------------------------------------
def load_qss(file_name: str):
"""
Get Qss file absolutely path
"""
return Qss_Map.get(file_name, '')
# ----------------------------------------------------------------------
def get_temp_file(file_name: str):
"""
Get path for temp file with filename
"""
temp_path = os.path.join(os.getcwd(), 'tmp')
if not os.path.exists(temp_path):
os.makedirs(temp_path)
return os.path.join(temp_path, file_name)
|
[
"freemoses@hotmail.com"
] |
freemoses@hotmail.com
|
94ddc2e7336e19160a3e969f15cd752239324c2e
|
9fbaec3ae4927562c1feab89c2c228d7d193a935
|
/Dean Church Github/inventory.py
|
230dcca4da5f28f24c65afc1a1c0146961cd81bc
|
[] |
no_license
|
DeanChurch1/tic-tac-toe
|
efcab9abc9c770ee8f3bd70a46b2f6ee89685d22
|
8baf3bec17ee444c2fd6b3d93b426a1d030fa2bd
|
refs/heads/master
| 2020-04-12T15:53:39.304308
| 2019-01-08T16:07:44
| 2019-01-08T16:07:44
| 162,595,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
#Hero's inventory
import random
player_health = 100
player_armor = 1250
player_attack = 250
player_money = 0
inventory = ["rusty sword","leather armor","wood shield","small healing potion"]
player_stats = ["health",player_health,"armor",player_armor,
"attack",player_attack,"money",player_money]
print("player stats")
print(player_stats)
print("Your items:")
for item in inventory:
print(item)
input("\nPress the enter key to continue")
print("You have",len(inventory), "items in your possession.")
player_health -= 22
input("\nyou have taken some damage on your journey \n" +
"your health is at " +str(player_health)+"\n"+
"you need to use your healing potion \n Press the enter key to continue")
if "small healing potion" in inventory:
print("You will live to fight another day by using the healing potion")
player_health += 20
print(player_stats)
inventory.remove("small healing potion")
for item in inventory:
print(item)
index = int(input("\nEnter the index number for an item in inventory: "))
while index >len(inventory)-1 or index <0:
print("that number is out of range")
index = int(input("\nEnter the index number for an item in inventory: "))
print("At index",index,"is",inventory[index])
start = int(input("\nEnter the index number to begin a slice: "))
finish = int(input("enter the index number to end the slice: "))
print("inventory[",start, ":",finish,"] is", end="")
print(inventory[start:finish])
input("\nPress the enter key to continue.")
chest_items = ["gold","gems","elven sword","bow","corssbow","boots","hat"]
chest = []
for i in range(3):
item = random.choice(chest_items)
chest.append(item)
print("you find a chest which contains:")
print(chest)
print("You add the contents of the chest to your inventory.")
inventory += chest
print("Your inventory now:")
print(inventory)
input("\nPress the neter key to continue.")
print("You trade your sword for a crossbow.")
inventory[0] = "crossbow"
print("your inventory is now:")
print(inventory)
input("\n Press the enter key to continue.")
print("You use your gold and gems to buy an orb of future telling.")
inventory[4:6] = ["orb of future telling"]
print("Inventory now",inventory)
input("\n Press teh neter key to continue.")
print("in a great battle, your shield is destroyed")
del inventory[2]
print("Your inventory is now:")
print(inventory)
print("Your crossbow and armor are stolen by thieves.")
del inventory[:2]
print("Your inventory is now:")
print(inventory)
|
[
"deanchurch@gmail.com"
] |
deanchurch@gmail.com
|
55f20846bd2edf5cc32b88b4a6c2ff27dca58467
|
f7f66d1327238f34d0b3b85c1e221616a95aae8c
|
/memex_dossier/fc/tests/__init__.py
|
6d15c998ef4c46df029b62b3bf7790956a2b02f4
|
[
"MIT"
] |
permissive
|
biyanisuraj/memex-dossier-open
|
820d5afc8a5cf93afc1364fb2a960ac5ab245217
|
43bab4e42d46ab2cf1890c3c2935658ae9b10a3a
|
refs/heads/master
| 2020-06-07T01:34:51.467907
| 2018-10-09T15:44:58
| 2018-10-09T15:44:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
'''memex_dossier.fc Feature Collections
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
'''
from __future__ import absolute_import, division, print_function
import pytest
from memex_dossier.fc.feature_collection import registry, FeatureTypeRegistry
def is_testable_counter(k):
return 'Counter' in k or k == 'SparseVector'
@pytest.yield_fixture(params=filter(is_testable_counter, registry.types()))
def counter_type(request):
ct = registry.get_constructor(request.param)
old_default = FeatureTypeRegistry.DEFAULT_FEATURE_TYPE_NAME
FeatureTypeRegistry.DEFAULT_FEATURE_TYPE_NAME = request.param
yield ct
FeatureTypeRegistry.DEFAULT_FEATURE_TYPE_NAME = old_default
|
[
"jrf@diffeo.com"
] |
jrf@diffeo.com
|
b662cbd056aeae8d02807bbbbce7025ea31e6451
|
71b139516c013ca14d2416ffe74f31d1948e9168
|
/Mininet/tree5.py
|
551259ab25ea0c0b8fac3fac08cc48e5a37ec67b
|
[] |
no_license
|
cavallo5/Nemo-Mininet
|
0a807b80df026d1b3359914086b59093fb7e12b1
|
8de003541e4f7c7c088f38205d59b632652e015b
|
refs/heads/master
| 2022-12-02T19:01:41.173776
| 2020-08-12T08:05:25
| 2020-08-12T08:05:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
#!/usr/bin/python
#Librerie API importate:
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.link import Intf
from mininet.log import setLogLevel, info
from mininet.link import Link, TCLink
#from mininet.util import irange, dumpNodeConnections
import time
from mininet.topolib import TreeNet
if __name__ == '__main__':
#Avvio tempo
info('Tempo iniziale')
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
setLogLevel( 'info' )
#Creazione rete
net = TreeNet( depth=4, fanout=2, controller=RemoteController, link=TCLink)
#Avvio rete
info('*** Avvio rete\n')
net.start()
mid_time = time.time();
print("--- %s seconds ---" % (mid_time - start_time))
info('*** Testing network connectivity\n')
#time.sleep(10)
#Verifica %CPU e %MEM tramite comando TOP
CLI( net )
info('*** Stopping network\n')
net.stop()
|
[
"vincenzocavallo.ing@gmail.com"
] |
vincenzocavallo.ing@gmail.com
|
a2693b767a4f8ed189bcb93c51a1c83b980348a3
|
40186c0efae793c12983a1b22d0241ac615c28fd
|
/student.py
|
8bd9d76eccaa14ec2332bdf213da94428af8a4db
|
[] |
no_license
|
mohitsharma8229/CORRELATION
|
16a76a1a562a7f39a7895073330d906e96d4b35b
|
8715a0cb00d6e661c4a7d3d24135145b82475542
|
refs/heads/main
| 2023-06-26T22:06:48.201831
| 2021-07-25T04:03:39
| 2021-07-25T04:03:39
| 389,255,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
import plotly.express as px
import csv
import numpy as np
def getDataSource(data_path):
marks=[]
days_present=[]
with open (data_path)as f:
csv_reader=csv.DictReader(f)
for row in csv_reader:
marks.append(float(row["Marks In Percentage"]))
days_present.append(float(row["Days Present"]))
return {"x":marks,"y":days_present}
def findCorrelation(datasourcs):
Correlation=np.corrcoef(datasourcs["x"],datasourcs["y"])
print ("Correlation",Correlation[0,1])
def setup():
data_path="student.csv"
datasourcs=getDataSource(data_path)
findCorrelation(datasourcs)
setup()
|
[
"noreply@github.com"
] |
mohitsharma8229.noreply@github.com
|
d963210bd58eb6b67b4838b8f16896818c215858
|
1a4885f4851f9b2c65fc5fbbe04c29f2126edb7d
|
/TravelingSalemanProblem/tsp-solver-master/build/lib/tsp_solver/greedy.py
|
c2ec9bcd75b38294d74ecdf11a7316429625ced6
|
[] |
no_license
|
OWLxx/DiscreteOptimization
|
f271caae31bb3461d0d9e5805a3584a9682551f8
|
0f5a60c9eee6f8e3e1d6479c7f700011b0965099
|
refs/heads/master
| 2021-05-01T06:24:13.377728
| 2018-03-05T23:17:50
| 2018-03-05T23:17:50
| 121,142,562
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,587
|
py
|
from __future__ import print_function, division
from itertools import islice
from array import array as pyarray
################################################################################
# A simple algorithm for solving the Travelling Salesman Problem
# Finds a suboptimal solution
################################################################################
if "xrange" not in globals():
#py3
xrange = range
else:
#py2
pass
def optimize_solution( distances, connections ):
"""Tries to optimize solution, found by the greedy algorithm"""
N = len(connections)
path = restore_path( connections )
def ds(i,j): #distance between ith and jth points of path
return distances[path[i]][path[j]]
d_total = 0.0
optimizations = 0
for a in xrange(N-1):
b = a+1
for c in xrange( b+2, N-1):
d = c+1
delta_d = ds(a,b)+ds(c,d) -( ds(a,c)+ds(b,d))
if delta_d > 0:
d_total += delta_d
optimizations += 1
connections[path[a]].remove(path[b])
connections[path[a]].append(path[c])
connections[path[b]].remove(path[a])
connections[path[b]].append(path[d])
connections[path[c]].remove(path[d])
connections[path[c]].append(path[a])
connections[path[d]].remove(path[c])
connections[path[d]].append(path[b])
path[:] = restore_path( connections )
return optimizations, d_total
def restore_path( connections ):
"""Takes array of connections and returns a path.
Connections is array of lists with 1 or 2 elements.
These elements are indices of teh vertices, connected to this vertex
Guarantees that first index < last index
"""
#there are 2 nodes with valency 1 - start and end. Get them.
start, end = [idx
for idx, conn in enumerate(connections)
if len(conn)==1 ]
path = [start]
prev_point = None
cur_point = start
while True:
next_points = [pnt for pnt in connections[cur_point]
if pnt != prev_point ]
if not next_points: break
next_point = next_points[0]
path.append(next_point)
prev_point, cur_point = cur_point, next_point
return path
def pairs_by_dist(N, distances):
#Sort coordinate pairs by distance
indices = [None] * (N*(N-1)//2)
idx = 0
for i in xrange(N):
for j in xrange(i+1,N):
indices[idx] = (i,j)
idx += 1
indices.sort(key = lambda ij: distances[ij[0]][ij[1]])
return indices
def solve_tsp( distances, optim_steps=3, pairs_by_dist=pairs_by_dist ):
"""Given a distance matrix, finds a solution for the TSP problem.
Returns list of vertex indices.
Guarantees that the first index is lower than the last"""
N = len(distances)
if N == 0: return []
if N == 1: return [0]
for row in distances:
if len(row) != N: raise ValueError( "Matrix is not square")
#State of the TSP solver algorithm.
node_valency = pyarray('i', [2])*N #Initially, each node has 2 sticky ends
#for each node, stores 1 or 2 connected nodes
connections = [[] for i in xrange(N)]
def join_segments(sorted_pairs):
#segments of nodes. Initially, each segment contains only 1 node
segments = [ [i] for i in xrange(N) ]
def filtered_pairs():
#Generate sequence of
for ij in sorted_pairs:
i,j = ij
if not node_valency[i] or\
not node_valency[j] or\
(segments[i] is segments[j]):
continue
yield ij
for i,j in islice( filtered_pairs(), N-1 ):
node_valency[i] -= 1
node_valency[j] -= 1
connections[i].append(j)
connections[j].append(i)
#Merge segment J into segment I.
seg_i = segments[i]
seg_j = segments[j]
if len(seg_j) > len(seg_i):
seg_i, seg_j = seg_j, seg_i
i, j = j, i
for node_idx in seg_j:
segments[node_idx] = seg_i
seg_i.extend(seg_j)
join_segments(pairs_by_dist(N, distances))
for passn in range(optim_steps):
nopt, dtotal = optimize_solution( distances, connections )
if nopt == 0:
break
path = restore_path( connections )
return path
|
[
"a472975692@gmail.com"
] |
a472975692@gmail.com
|
dd8121be90f1ecbe5f9bfc3f7df5c31058787d2e
|
9e53cf81f82379e5a7cde0ec88513f0e5ee43537
|
/homestud/findtutors/models.py
|
cdcb5a1129413020d8343b1bf8bc50b5d5136903
|
[] |
no_license
|
stephappiah/hometutors
|
ef14f2ab29948402a8f0373f7b4edf0a7c638ab7
|
e550d14ae105a17ce5a49365ff0d3d14cbb88f53
|
refs/heads/master
| 2023-07-30T22:49:21.335578
| 2021-09-09T16:22:21
| 2021-09-09T16:22:21
| 285,824,012
| 0
| 0
| null | 2021-09-09T16:23:26
| 2020-08-07T12:29:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 9,560
|
py
|
# from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.gis.db import models
from django_q.tasks import async_task, schedule
from multiselectfield import MultiSelectField
from .courses import courses_choices, programmes_choices
from .multi_choices import (class_type_choices, free_lesson_choices,
highest_education_choices, teach_level_choices,
user_type_choices, user_profile_choices)
#User Profile
class TutorProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True, related_name='tutor_user_profile')
first_name = models.CharField(max_length=50, blank=True, null=True)
last_name = models.CharField(max_length=50, blank=True, null=True)
location = models.PointField(blank=True, null=True)
address = models.CharField(max_length=100, blank=True, null=True)
avatar = models.ImageField(blank=True, null=True, upload_to='avatar/%Y/%m/%d/')
dob = models.DateField(null=True, blank=True)
# Education
school = models.CharField(max_length=100, blank=True, null=True)
programme = models.CharField(max_length=100, blank=True, null=True)
start_year = models.DateField(null=True, blank=True)
end_year = models.DateField(null=True, blank=True)
# Tutor Profile
bio = models.CharField(max_length=500, blank=True, null=True)
highest_education = models.CharField(max_length=20, choices=highest_education_choices, null=True, blank=False)
class_type = MultiSelectField(choices=class_type_choices, null=True, blank=True)
free_lesson_duration = models.IntegerField(choices=free_lesson_choices, null=True, blank=True)
rate_per_hour = models.CharField(max_length=20, null=True, blank=True)
negotiable = models.BooleanField(default=True, blank=True)
# Subjects and Programmes
teach_levels = MultiSelectField(choices=teach_level_choices, null=True, blank=True)
tutoring_programs = MultiSelectField(choices=programmes_choices, null=True, blank=True)
courses_subjects = MultiSelectField(choices=courses_choices, null=True, blank=True)
slug = models.CharField(unique=True, max_length=50, null=True, blank=True)
show_profile = models.BooleanField(default=True, blank=True)
# admin role edit for reviewing tutor substandard profile
admin_show = models.BooleanField(default=False, blank=True)
admin_comment = models.CharField(max_length=500, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
# below attribute helps track change in admin_profile_status and admin comment
# this is cached and compared with the actual attribute to spot any changes
# it is then used to decide whether to notify users or admin of change
__original_admin_show = None
__original_admin_comment = None
# __original_admin_profile_status to be compared with admin_profile_status in signal
# this helps send an email on change
def __init__(self, *args, **kwargs):
super(TutorProfile, self).__init__(*args, **kwargs)
self.__original_admin_show = self.admin_show
self.__original_admin_comment = self.admin_comment
def __str__(self):
return str(self.user)
def get_full_name(self):
if self.first_name != None and self.last_name != None:
return self.first_name + ' ' + self.last_name
elif self.first_name == None:
return self.last_name
else:
return self.first_name
def get_short_name(self):
if self.first_name == None:
return self.last_name
else:
return self.first_name
def save(self, force_insert=False, force_update=False, *args, **kwargs):
# save __original_admin_show to avoid error
# below triggers if admin_show or admin comment changes
# it is being compared to __original_admin_show which
# is cached
# the code actually means, send email if admin_show or admin_comment changes
if self.admin_show != self.__original_admin_show or self.admin_comment != self.__original_admin_comment:
print('admin changed tutor status!')
tutor_email = self.user.email
tutor_slug = self.slug
first_name = self.first_name
tutor_url = f'https://homestud.co/tutor/{tutor_slug}'
tutor_review_url = f'https://homestud.co/tutor/{tutor_slug}/review'
review_comment = self.admin_comment
print(tutor_email)
# admin_show is true
# email is sent to user if admin sets self.admin_show to true
if self.admin_show is True:
print('tutor is live')
subject = 'Hurray! Your tutor profile is live on Homestud'
context = {
'tutor_url': tutor_url,
'first_name': first_name,
'tutor_review_url': tutor_review_url
}
template = 'findtutors/email/tutor-live-email.html'
# notify tutor
async_task(
'homestud.utils.notify_email',
template,
tutor_email,
subject,
context
)
elif self.admin_show is False or self.admin_comment != self.__original_admin_comment: # self.admin_show is False or self.admin_comment changes
print('tutor is offline')
subject = 'Make changes to your tutor profile'
template = 'findtutors/email/tutor-off-email.html'
context = {
'tutor_url': tutor_url,
'first_name': first_name,
'review_comment': review_comment
}
# send tutor email so they make changes to their profile
async_task(
'homestud.utils.notify_email',
template,
tutor_email,
subject,
context
)
else:
pass
# self.admin_show didn't change
# if save method is updating, self.pk doesn't return None
# then check if self.__original_admin_show == False
# __original_admin_show returns false when tutor is previously disabled and asked to make changes to their profile
# send email notification to admin for review
if self.pk is not None:
print('tutor profile updated!')
# tutor isn't live and is updating
# send admin notification for profile review
# issue!! --fix: notification email is also sent on tutor onboarding as...
# it saves the model on different page loads
if self.__original_admin_show is False and self.admin_show is False:
# variables
tutor_slug = self.slug
first_name = self.first_name
last_name = self.last_name
admin_email = 'ad.homestud@gmail.com'
tutor_url = f'https://homestud.co/tutor/{tutor_slug}'
subject = f'Review changes for {first_name} {last_name}!'
template = 'findtutors/email/notify-admin-tutor-review.html'
context = {
'first_name': first_name,
'tutor_url': tutor_url,
'last_name': last_name,
}
# tutor isn't live and is updating
# send email to admin to review changes made by tutor
async_task(
'homestud.utils.notify_email',
template,
admin_email,
subject,
context
)
else:
pass
super(TutorProfile, self).save(force_insert, force_update, *args, **kwargs)
# update __oringinal to related attribute when saving
self.__original_admin_show = self.admin_show
self.__original_admin_comment = self.admin_comment
class TutorReview(models.Model):
rater = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='rater_review', blank=True, null=True)
tutor = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='tutor_review', on_delete=models.CASCADE)
comment = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return 'Review for ' + str(self.tutor) + ' by ' + str(self.rater)
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='user_profile', on_delete=models.CASCADE, blank=True, null=True)
first_name = models.CharField(max_length=50, blank=True, null=True)
last_name = models.CharField(max_length=50, blank=True, null=True)
location = models.PointField(blank=True, null=True)
address = models.CharField(max_length=100, blank=True, null=True)
avatar = models.ImageField(default='no-avatar.png', blank=True, null=True, upload_to='avatar/%Y/%m/%d/')
dob = models.DateField(null=True, blank=True)
user_type = models.CharField(max_length=50, choices=user_profile_choices, null=True, blank=True)
def __str__(self):
return str(self.user)
|
[
"appstephen8@gmail.com"
] |
appstephen8@gmail.com
|
11ade20ef6f9afb1f9205f05d1ba5b6fb7d70395
|
202eb065f4b62429a304e18f648360726846ad26
|
/posenet/model.py
|
cac82f63e0935182ea3092ece1934a7c9a970da7
|
[
"Apache-2.0"
] |
permissive
|
apollojain/PoseNet-Controller
|
21166c550e427aef21847bf23289bf3e827f4522
|
35a06b6406c84fa3de71f6c799469b343ad587f9
|
refs/heads/main
| 2023-01-31T00:50:08.664312
| 2020-12-18T03:19:22
| 2020-12-18T03:19:22
| 322,159,658
| 0
| 0
| null | 2020-12-18T03:19:23
| 2020-12-17T02:40:50
| null |
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import posenet.converter.config
MODEL_DIR = "./_models"
DEBUG_OUTPUT = False
def model_id_to_ord(model_id):
if 0 <= model_id < 4:
return model_id # id is already ordinal
elif model_id == 50:
return 0
elif model_id == 75:
return 1
elif model_id == 100:
return 2
else: # 101
return 3
def load_config(model_ord):
converter_cfg = posenet.converter.config.load_config()
checkpoints = converter_cfg["checkpoints"]
output_stride = converter_cfg["outputStride"]
checkpoint_name = checkpoints[model_ord]
model_cfg = {
"output_stride": output_stride,
"checkpoint_name": checkpoint_name,
}
return model_cfg
def load_model(model_id, sess, model_dir=MODEL_DIR):
model_ord = model_id_to_ord(model_id)
model_cfg = load_config(model_ord)
model_path = os.path.join(model_dir, "model-%s.pb" % model_cfg["checkpoint_name"])
if not os.path.exists(model_path):
print("Cannot find model file %s, converting from tfjs..." % model_path)
from posenet.converter.tfjs2python import convert
convert(model_ord, model_dir, check=False)
assert os.path.exists(model_path)
with tf.gfile.GFile(model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name="")
if DEBUG_OUTPUT:
graph_nodes = [n for n in graph_def.node]
names = []
for t in graph_nodes:
names.append(t.name)
print("Loaded graph node:", t.name)
offsets = sess.graph.get_tensor_by_name("offset_2:0")
displacement_fwd = sess.graph.get_tensor_by_name("displacement_fwd_2:0")
displacement_bwd = sess.graph.get_tensor_by_name("displacement_bwd_2:0")
heatmaps = sess.graph.get_tensor_by_name("heatmap:0")
return model_cfg, [heatmaps, offsets, displacement_fwd, displacement_bwd]
|
[
"apollojain@gmail.com"
] |
apollojain@gmail.com
|
b68fc3d35b008cd438892eb9802aa1086d3217e6
|
f2ac32ab5b959cd96611cee08c7dabd1d1e3dc76
|
/asyncio_redis/replies.py
|
a8222c0104a3470925841d9fe450c2850beb24a2
|
[
"BSD-2-Clause-Views"
] |
permissive
|
uservidya/asyncio-redis
|
478a572083788f546ff7b75b1c1c1cfa6b494560
|
057cc5ea883769609418ee79b548e3a911d52951
|
refs/heads/master
| 2020-12-26T21:49:38.017669
| 2013-12-06T09:01:17
| 2013-12-06T09:01:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,048
|
py
|
import asyncio
from asyncio.queues import Queue
from asyncio.tasks import gather
__all__ = (
'BlockingPopReply',
'DictReply',
'ListReply',
'PubSubReply',
'SetReply',
'StatusReply',
'SubscribeReply',
'ZRangeReply',
)
class StatusReply:
"""
Wrapper for Redis status replies.
(for messages like OK, QUEUED, etc...)
"""
def __init__(self, status):
self.status = status
def __repr__(self):
return 'StatusReply(status=%r)' % self.status
def __eq__(self, other):
return self.status == other.status
class DictReply:
""" Container for a dict reply. """
def __init__(self, multibulk_reply):
self._result = multibulk_reply
def _parse(self, key, value):
return key, value
def __iter__(self):
""" Yield a list of futures that yield {key: value } tuples. """
i = iter(self._result)
@asyncio.coroutine
def getter(key_f, value_f):
""" Coroutine which processes one item. """
key, value = yield from gather(key_f, value_f)
key, value = self._parse(key, value)
return { key: value }
while True:
yield asyncio.Task(getter(next(i), next(i)))
@asyncio.coroutine
def get_as_dict(self):
"""
Return the result of a sorted set query as dictionary.
This is a mapping from the elements to their scores.
"""
result = { }
for f in self:
result.update((yield from f))
return result
@asyncio.coroutine
def get_keys_as_list(self):
""" Return the keys as a list. """
result = []
for f in self:
result += (yield from f).keys()
return result
class ZRangeReply(DictReply):
"""
Container for a zrange query result.
"""
def _parse(self, key, value):
# Mapping { key: score_as_float }
return key, float(value)
class SetReply:
"""
Redis set result.
The content can be retrieved by calling ``get_as_set`` or by
iterating over it::
for f in set_reply:
item = yield from f
print(item)
"""
def __init__(self, multibulk_reply):
self._result = multibulk_reply
def __iter__(self):
""" Yield a list of futures. """
return iter(self._result)
@asyncio.coroutine
def get_as_set(self):
""" Return the result as a Python ``set``. """
result = yield from gather(* list(self._result))
return set(result)
class ListReply:
"""
Redis list result.
The content can be retrieved by calling ``get_as_list`` or by
iterating over it::
for f in list_reply:
item = yield from f
print(item)
"""
def __init__(self, multibulk_reply):
self._result = multibulk_reply
def __iter__(self):
""" Yield a list of futures. """
return iter(self._result)
def get_as_list(self):
""" Return the result as a Python ``list``. """
return gather(* list(self._result))
class BlockingPopReply:
""" ``blpop`` or ``brpop`` reply """
def __init__(self, list_name, value):
self._list_name = list_name
self._value = value
@property
def list_name(self):
""" List name. """
return self._list_name
@property
def value(self):
""" Popped value """
return self._value
class SubscribeReply:
""" Reply to subscribe command. """
def __init__(self, channel):
self._channel = channel
@property
def channel(self):
""" Channel name. """
return self._channel
class PubSubReply:
""" Received pubsub message. """
def __init__(self, channel, value):
self._channel = channel
self._value = value
@property
def channel(self):
""" Channel name """
return self._channel
@property
def value(self):
""" Received PubSub value """
return self._value
|
[
"jonathan@slenders.be"
] |
jonathan@slenders.be
|
6f2fd8f7c21f375b0b9dcb142e212817dc597933
|
7505dbb99764fc6e0a73bc98884ecac06f096952
|
/setup.py
|
69d33e5c085a145332ce94981ce316740230ab75
|
[
"BSD-2-Clause"
] |
permissive
|
zeanzhou/django-coconuts
|
a979771c52fee94587b2c17a6d374a43befaaeb6
|
96d70eb672ff6d7500a6a28d3e28cd2e7c0ad25f
|
refs/heads/master
| 2021-05-03T21:46:06.731963
| 2016-03-28T19:09:38
| 2016-03-28T19:09:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
#!/usr/bin/env python
from distutils.core import setup
import coconuts
setup(
name = "coconuts",
version = str(coconuts.__version__),
license = coconuts.__license__,
url = coconuts.__url__,
packages = ['coconuts'],
package_data = {
'coconuts': [
'static/coconuts/css/*.css',
'static/coconuts/img/*.png',
'static/coconuts/index.html',
'static/coconuts/js/*.js',
'templates/coconuts/*.html',
]
})
|
[
"jeremy.laine@m4x.org"
] |
jeremy.laine@m4x.org
|
782e20b8f3a6e8d1a411c2bd7e7609053d62672d
|
c7fc52a0818c6dedfb3d983d04142a4efe89a917
|
/cogs/TypeKeyboard.py
|
0ac986d2bd311ab47ec13c127b1df53b8fd3f272
|
[] |
no_license
|
Mo2024/Automation-for-devices
|
257500b876d99943d509c8205d5a16310b71cee9
|
2f85dfde267ec7fd17b3adfa1ee066da442d64cb
|
refs/heads/main
| 2023-01-02T05:57:34.488128
| 2020-10-29T03:17:06
| 2020-10-29T03:17:06
| 308,185,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
import discord
from discord.ext import commands
import os
import sys
import time
from pynput.keyboard import Key, Controller
import pyautogui as pg
from discord.utils import get
from discord.ext import commands, tasks
import asyncio
keyboard = Controller()
a = "a"
class TypeKeyboard(commands.Cog):
def __init__(self, client):
self.client = client
'''@commands.Cog.listener()
async def on_message(self, message: discord.Message):
msg_dump_channel = 760182149839716423
channel = self.bot.get_channel(msg_dump_channel)
if message.guild is None and not message.author.bot:
if (len(message.attachments)>0):
await channel.send(f"Author: {message.author}\nAuthor ID: {message.author.id}\nAttachment: {message.attachments[0].url} \nContent: {message.content}")
else:
if message.guild is None and not message.author.bot:
await channel.send(f"Author: {message.author}\nAuthor ID: {message.author.id}\nContent: {message.content}")'''
@commands.command()
@commands.has_role('Idiots')
async def dm(self, ctx, *, content):
user = self.client.get_user(229948716868567040)
await user.send(content)
@commands.command()
@commands.has_role('Idiots')
async def send(self, ctx, channel: discord.TextChannel, *, content):
await channel.send(content)
@commands.command()
@commands.has_role('Idiots')
async def typethis(self, ctx, *, content):
await pg.typewrite(content)
def setup(client):
client.add_cog(TypeKeyboard(client))
|
[
"mrkvsbusiness@gmail.com"
] |
mrkvsbusiness@gmail.com
|
e1f2d66d71922425a2e94616dfa2b3a0d4eca426
|
a4d1bd83f013ec19169ff91d46d68dde432625d2
|
/webinar/migrations/0001_initial.py
|
82100c28a4cae6ab2cac8eee801ca12258f6e3c8
|
[] |
no_license
|
ravichandra99/forwebinar
|
2412843ceb88f1403a47ce3993e320dbc9587a08
|
c893e79a07b41d2187db427dbd229d5d3985cd40
|
refs/heads/master
| 2023-08-23T14:36:39.514371
| 2020-05-03T14:31:22
| 2020-05-03T14:31:22
| 260,046,654
| 0
| 0
| null | 2021-09-22T18:56:57
| 2020-04-29T21:10:09
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
# Generated by Django 3.0.5 on 2020-05-01 14:10
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JustEdit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref', models.CharField(max_length=4)),
('subject', models.CharField(max_length=100)),
('body', models.TextField()),
],
),
migrations.CreateModel(
name='JustUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=30, verbose_name='First Name')),
('lname', models.CharField(max_length=30, verbose_name='Last Name')),
('email', models.EmailField(max_length=254)),
('mobile', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('college', models.CharField(max_length=30, verbose_name='College/Organization')),
('profession', models.CharField(max_length=30, verbose_name='Profession')),
],
),
]
|
[
"root@vps.server.local"
] |
root@vps.server.local
|
5d501cd10ac5d5df3387d51d427dac9f46c66dba
|
b4b2cb10ecfbe0f62615e750aad140b2e57c727d
|
/rc_board/recipes/serializers.py
|
c83bc90121be942ba6ff99a24c606f266a02a07b
|
[] |
no_license
|
apustovitin/rb
|
8694a7d5167a1c9180c175d3442222c1bda7691e
|
2150ab15e03f63e321f5acd3f98e3605e8e554bf
|
refs/heads/main
| 2023-08-28T20:03:13.603134
| 2021-11-01T00:23:32
| 2021-11-01T00:23:32
| 417,274,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
from .models import Recipe, Category
from rest_framework_nested.serializers import NestedHyperlinkedModelSerializer
from rest_framework import serializers
class RecipeSerializer(serializers.ModelSerializer):
category = serializers.StringRelatedField(many=False)
class Meta:
model = Recipe
fields = ['id', 'title', 'content', 'category', 'creation_datetime']
class CategorySerializer(serializers.ModelSerializer):
category = serializers.CharField(source='get_category_display')
class Meta:
model = Category
fields = ('id', 'category')
|
[
"apustovitin@gmail.com"
] |
apustovitin@gmail.com
|
b73cb2379dd9934ca68bad7b6138c5b5bcdc7349
|
f7ab5a345003e737f5f9b9dde01f373f1fd7a022
|
/testprintcosts.py
|
b2a77e93cdd28ece47ec564f5f9c466dfdc7d5d0
|
[] |
no_license
|
mthezeng/euler-solutions
|
697dcd2549a5c20da32ebac7fbd08a1a53a4acd4
|
70acad8f0a3e9c5427f88f470b794b3b44948744
|
refs/heads/master
| 2021-10-08T20:01:31.049403
| 2018-12-17T06:19:05
| 2018-12-17T06:19:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
from copy import deepcopy
costsfile = open('triangle.txt')
costs = []
for r in range(15):
current_row = []
for c in range(0,r+1):
seek_position = c * 3
for r_temp in range(0,r+1):
seek_position += 3 * r_temp
costsfile.seek(seek_position)
current_row.append(int(costsfile.read(2)))
costs.append(deepcopy(current_row))
print(costs)
"""
import numpy as np
costs = np.genfromtxt('triangle.txt', dtype='int')
print(costs)
#self.costs = np.genfromtxt('triangle.txt', dtype="int", delimiter=['\n', ' ']).T
return self.nodes[np.where(self.nodes[,0:2] == to_node)][2]
"""
|
[
"mzeng7@berkeley.edu"
] |
mzeng7@berkeley.edu
|
ff37c83bac67778f48ffc0df86e82157c3640b9d
|
9abc3182a7d1ea910ee0df2e7c3730bb0a20f271
|
/level_1/PYTHON/2016년.py
|
ddc0813f4544874b195ad4b7a9666a98a367da96
|
[] |
no_license
|
sw1203/Programmers
|
5449a9400972b07d1e8ca4aba4c10965138dc1eb
|
1e31876091256030a704d98d47e682f1f6139ea4
|
refs/heads/master
| 2021-05-21T14:34:39.984469
| 2020-05-18T07:48:00
| 2020-05-18T07:48:00
| 252,682,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
def solution(a, b):
answer = ''
day= ['SUN','MON','TUE','WED','THU','FRI','SAT']
total=0
month={1:31,2:29,3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31}
for i in range(1,a):
total+=month[i]
sum=(5+total%7+b%7)%7-1
answer=day[sum]
return answer
|
[
"tkddnjs1203@gmail.com"
] |
tkddnjs1203@gmail.com
|
900dc6da444d2558dc9604aace9f61a9bb6f7df4
|
8359d0ade9f5338259e473a53f5482307d14d12f
|
/src/core/jupiter/core/use_cases/projects/archive.py
|
c19a1db4c9e80eaee81174c36e5a65429f9db646
|
[
"MIT"
] |
permissive
|
horia141/jupiter
|
0cba46b1aee24bf73e037335709fa0fce120b097
|
5be2f692e851ce18ad65df40a6c00afc00ae86ae
|
refs/heads/master
| 2023-08-15T16:09:10.631254
| 2023-06-27T05:36:02
| 2023-06-27T05:36:02
| 240,745,415
| 16
| 2
|
MIT
| 2023-09-06T20:44:17
| 2020-02-15T16:12:06
|
Python
|
UTF-8
|
Python
| false
| false
| 4,326
|
py
|
"""Use case for archiving a project."""
from dataclasses import dataclass
from typing import Optional
from jupiter.core.domain.projects.service.archive_service import ProjectArchiveService
from jupiter.core.framework.base.entity_id import EntityId
from jupiter.core.framework.event import EventSource
from jupiter.core.framework.use_case import ContextProgressReporter, UseCaseArgsBase
from jupiter.core.use_cases.infra.use_cases import (
AppLoggedInMutationUseCase,
AppLoggedInUseCaseContext,
)
@dataclass
class ProjectArchiveArgs(UseCaseArgsBase):
"""Project archive args."""
ref_id: EntityId
backup_project_ref_id: Optional[EntityId] = None
class ProjectArchiveUseCase(AppLoggedInMutationUseCase[ProjectArchiveArgs, None]):
"""The command for archiving a project."""
async def _execute(
self,
progress_reporter: ContextProgressReporter,
context: AppLoggedInUseCaseContext,
args: ProjectArchiveArgs,
) -> None:
"""Execute the command's action."""
workspace = context.workspace
if args.backup_project_ref_id:
async with self._storage_engine.get_unit_of_work() as uow:
async with progress_reporter.start_updating_entity(
"workspace",
context.workspace.ref_id,
str(context.workspace.name),
) as entity_reporter:
if context.workspace.default_project_ref_id == args.ref_id:
workspace = workspace.change_default_project(
args.backup_project_ref_id,
EventSource.CLI,
self._time_provider.get_current_time(),
)
await uow.workspace_repository.save(workspace)
await entity_reporter.mark_local_change()
else:
await entity_reporter.mark_not_needed()
async with progress_reporter.start_updating_entity("metric collection"):
metric_collection = (
await uow.metric_collection_repository.load_by_parent(
workspace.ref_id
)
)
if metric_collection.collection_project_ref_id == args.ref_id:
await entity_reporter.mark_known_entity_id(
metric_collection.ref_id
)
metric_collection = metric_collection.change_collection_project(
args.backup_project_ref_id,
EventSource.CLI,
self._time_provider.get_current_time(),
)
await uow.metric_collection_repository.save(metric_collection)
await entity_reporter.mark_local_change()
else:
await entity_reporter.mark_not_needed()
async with progress_reporter.start_updating_entity("person collection"):
person_collection = (
await uow.person_collection_repository.load_by_parent(
workspace.ref_id
)
)
if person_collection.catch_up_project_ref_id == args.ref_id:
await entity_reporter.mark_known_entity_id(
person_collection.ref_id
)
person_collection = person_collection.change_catch_up_project(
args.backup_project_ref_id,
EventSource.CLI,
self._time_provider.get_current_time(),
)
await uow.person_collection_repository.save(person_collection)
await entity_reporter.mark_local_change()
else:
await entity_reporter.mark_not_needed()
project_archive_service = ProjectArchiveService(
EventSource.CLI, self._time_provider, self._storage_engine
)
await project_archive_service.do_it(progress_reporter, workspace, args.ref_id)
|
[
"horia141@gmail.com"
] |
horia141@gmail.com
|
b59e8590fc08aaf7a0610e13b50274819fde9257
|
02b1b3543211ebb94938b7816f370b8bd5bf6717
|
/social_handler.py
|
df405026cadf7c2a75f5ec9ae5b553b162deecb9
|
[] |
no_license
|
Jasperrr91/MQTT-PGOMAP
|
aec0233aba3c7cf3c13e8f2a6ee869e2941c5207
|
71554fb876d26fe828ca056e37e38a2d27991aca
|
refs/heads/master
| 2020-09-25T06:12:46.396790
| 2016-08-23T20:58:10
| 2016-08-23T20:58:10
| 66,389,479
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
import paho.mqtt.client as mqtt
import pokemon_list
import mysql.connector
from mysql.connector import IntegrityError
from datetime import datetime, timedelta
import config
cnx = mysql.connector.connect(user=config.db['user'], password=config.db['password'],
host=config.db['host'],
database=config.db['database'])
cursor = cnx.cursor()
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("$SYS/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
if(msg.topic.startswith('pgomapcatch/all/catchable/')):
topic = msg.topic.split('pgomapcatch/all/catchable/')
pid = int(topic[1])
process_pokemon(pid, msg)
elif(msg.topic.startswith('pgomapgeo')):
topic = msg.topic.split('/')
pid = int(topic[2])
process_pokemon(pid, msg)
def process_pokemon(pid, msg):
pokemon_id = pid - 1
pokemon_name = pokemon_list.pokemon[pokemon_id]
data = msg.payload.split(',')
current_time = datetime.now() + timedelta(minutes=15)
time_string = current_time.strftime("%Y-%m-%d %H:%M:%S")
pokemon = (data[2], '', pokemon_id, data[0], data[1], time_string)
insert_pokemon(pokemon)
print("[" + time_string + "] " + pokemon_name + " found at " + data[0] + ", " + data[1])
def insert_pokemon(pokemon):
add_pokemon = ("INSERT INTO pokemon "
"(encounter_id, spawnpoint_id, pokemon_id, latitude, longitude, disappear_time) "
"VALUES (%s, %s, %s, %s, %s, %s)")
try:
cursor.execute(add_pokemon, pokemon)
except IntegrityError as e:
print "Duplicate"
cnx.commit()
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("test.mosca.io", 1883, 60)
client.subscribe("pgomapcatch/#", 0)
client.subscribe("pgomapgeo/#", 0)
client.loop_forever()
|
[
"jasper@vanderstoop.nl"
] |
jasper@vanderstoop.nl
|
66c416586d76d19c4316bbce007159a8d3fe8132
|
6c3cb408402f271ed9b02f19c45f3eee18a639e1
|
/alpheus/specifiedfields (copy).py
|
e8e8a045d31a3034f929bbb166acdc2ea729563a
|
[] |
no_license
|
dabhanddevelopments/Alpheus
|
9fd01bdda153f2609b6b1013c05cd2ab364e2b8f
|
87b7bf3400a4d3982d5a07cf2e8a5b15d141e7f7
|
refs/heads/master
| 2016-09-06T19:32:28.640060
| 2014-08-04T07:00:32
| 2014-08-04T07:00:32
| 22,905,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,153
|
py
|
from tastypie.resources import ModelResource
class SpecifiedFields(ModelResource):
def build_filters(self, filters=None):
self.filters = filters
return super(SpecifiedFields, self).build_filters(filters)
def get_object_list(self, request):
self.specified_fields = []
objects = super(SpecifiedFields, self).get_object_list(request)
distinct = request.GET.get('distinct', False) == 'true'
fields = request.GET.get("fields", False)
if not fields:
return objects
try:
self.specified_fields = fields.split(',')
except:
self.specified_fields.append(fields)
#
has_m2m = False
for field in self.filters:
try:
related = objects.model._meta.get_field_by_name(field)[0]
except:
related = False
if related and related.get_internal_type() == 'ManyToManyField':
has_m2m = True
only_fields = []
select_related = []
self.prefetch_related = []
for specified_field in self.specified_fields:
try:
fields = specified_field.split('__')
except:
continue
# Only adds fields that exist for this model
# excluding model methods
for meta_field in objects.model._meta.fields:
if meta_field.name == fields[0]:
only_fields.append(specified_field)
# Set `select_related` and `prefetch_related` for related fields
if len(fields) > 1:
try:
related = objects.model._meta.get_field_by_name(fields[0])[0]
except:
related = False
if related:
if related.get_internal_type() == 'ManyToManyField':
self.prefetch_related.append(fields[0])
elif related.get_internal_type() == 'ForeignKey':
select_related.append(fields[0])
if len(only_fields):
objects = objects.only(*only_fields)
if len(self._meta.excludes):
objects = objects.defer(*self._meta.excludes)
if len(self.prefetch_related):
objects = objects.prefetch_related(*self.prefetch_related)
if len(select_related):
objects = objects.select_related(*select_related)
if (has_m2m and not distinct) or distinct:
objects = objects.distinct()
#assert False
return objects
def full_dehydrate(self, bundle, for_list=False):
"""
This override disables `full=True` and other things we don't use
"""
if not len(self.specified_fields):
return super(SpecifiedFields, self).full_dehydrate(self, \
bundle, for_list)
# Dehydrate each field supplied in the `fields` parameter
for field_name, field_object in self.fields.items():
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
# Dehydrate each field including related ones
for row in self.specified_fields:
f = row.split('__')
if len(f) == 1:
try:
bundle.data[row] = getattr(bundle.obj, f[0])()
except:
bundle.data[row] = getattr(bundle.obj, f[0])
elif len(f) == 2:
try:
for m2m in getattr(bundle.obj, f[0]).all():
if m2m.pk == bundle.obj.id:
if f[0] not in bundle.data:
bundle.data[f[0]] = {}
bundle.data[f[0]][f[1]] = getattr(m2m, f[1])
#bundle.obj.author.get(book=bundle.obj.id).first_name
#bundle.data[row] = getattr(getattr(bundle.obj, fields[0]).get(**kwargs), fields[1])
except:
try:
bundle.data[row] = getattr(getattr(bundle.obj, f[0]), f[1])
#bundle.data[fields[0]] = bundle.obj
except:
pass
elif len(f) == 3:
try:
for m2m in getattr(bundle.obj, f[0]).all():
if m2m.pk == bundle.obj.id:
if f[0] not in bundle.data:
bundle.data[f[0]] = {}
bundle.data[f[0]][f[1]] = getattr(m2m, f[1])
except:
try:
bundle.data[row] = getattr(getattr(getattr(bundle.obj, \
f[0]), f[1]), f[2])
except:
raise Exception("'%s' not found." % row)
#bundle.data[row] = reduce(getattr, fields, bundle.obj)
# display actual values for `choices` fields
method = getattr(bundle.obj, "get_%s_display" % f[0], False)
if method:
bundle.data[f[0]] = method()
return bundle
|
[
"dan@dan.(none)"
] |
dan@dan.(none)
|
3da880d650ed11d210e00953cd7fdd86eb2f5f95
|
fd487f0396c3d56913a67b3e0cc8e716b36b22f3
|
/localized_fields/fields/__init__.py
|
c8f8f18b753d7656b6876b24fa2ae5dea0d09f15
|
[
"MIT"
] |
permissive
|
si14/django-localized-fields
|
01764f345153fc8ce897a6a95b110fb64d1a95cd
|
d507cbfe7551fedcff6e090cac2e36861dec08ba
|
refs/heads/master
| 2021-01-11T17:43:24.307852
| 2017-01-12T14:56:34
| 2017-01-12T14:56:34
| 79,824,497
| 0
| 0
| null | 2017-01-23T16:43:33
| 2017-01-23T16:43:33
| null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
from .localized_field import LocalizedField
from .localized_value import LocalizedValue
from .localized_autoslug_field import LocalizedAutoSlugField
from .localized_bleach_field import LocalizedBleachField
__all__ = [
'LocalizedField',
'LocalizedValue',
'LocalizedAutoSlugField',
'LocalizedBleachField'
]
|
[
"swen@sectorlabs.ro"
] |
swen@sectorlabs.ro
|
ef1217e108b7cc9d0f8e8462cddecfe08abff2f2
|
d134b0503d05955e52c62762f67f0e7add792213
|
/server.py
|
a8c5ef762e78163046fdb8b0aec2defbdc95e593
|
[] |
no_license
|
TinTran710/trie
|
22cd148d938bc3a83f474ff0841abddec87136d7
|
d03e2125f4d8444dba7662718239321cdef3bf26
|
refs/heads/master
| 2020-03-22T20:20:01.900579
| 2018-07-11T18:12:43
| 2018-07-11T18:12:43
| 140,589,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
from flask import Flask, render_template, request
from src.NameCrawler import NameCrawler
from src.TrieSuggester import TrieSuggester
import time
app = Flask(__name__)
nameCrawler = NameCrawler()
names = getattr(nameCrawler, 'names')
namesLower = getattr(nameCrawler, 'namesLower')
root = TrieSuggester()
root.insert(namesLower)
@app.route('/trie/index')
def runCrawler():
return render_template('index.html', namesLower=namesLower)
@app.route('/trie/search', methods = ['POST'])
def search():
if request.method == 'POST':
prefix = request.form['prefix'].lower()
startTime1 = time.time()
lowerCaseMatch1 = root.search(prefix)
elapsedTime1 = time.time() - startTime1
result1 = getCaseSensitiveResult(lowerCaseMatch1)
startTime2 = time.time()
lowerCaseMatch2 = alternateSearch(prefix)
elapsedTime2 = time.time() - startTime2
result2 = getCaseSensitiveResult(lowerCaseMatch2)
return render_template('index.html', result1=result1, elapsedTime1=elapsedTime1, result2=result2, elapsedTime2=elapsedTime2)
def alternateSearch(prefix):
result = []
for string in namesLower:
if string.startswith(prefix):
result.append(string)
return result
def getCaseSensitiveResult(lowerCaseMatch):
result = []
for string in lowerCaseMatch:
for i in range(0, len(names)):
if string == names[i].lower():
result.append(names[i])
return result
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=4600)
|
[
"tranquangtin710@gmail.com"
] |
tranquangtin710@gmail.com
|
a6bb80935308a3756e5b00c5bfb9bea54e12ff4f
|
d2be9079efb05b664cc0ee99f926684d5cb1994b
|
/core/migrations/0006_auto_20200907_1653.py
|
d6f77c35a818852ec7f21d7dc29291acf95d242b
|
[] |
no_license
|
ssamiran472/forumsite
|
01cc05d8644df9f496f1f41869c1dee89508f9dc
|
f16aa3f671a8a060ca36fe3e84bbb54c05435704
|
refs/heads/master
| 2022-12-13T01:48:00.073604
| 2020-09-14T17:10:46
| 2020-09-14T17:10:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
# Generated by Django 3.0.9 on 2020-09-07 13:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0005_article_origin_content'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='origin_content',
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parentArticle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.TranslatedArticle')),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parentArticle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.TranslatedArticle')),
],
),
]
|
[
"solomin12q@gmail.com"
] |
solomin12q@gmail.com
|
2299e59e4bcf97ec1a169b467c901784973a81a5
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Sklearn_scipy_numpy/source/scipy/integrate/tests/test_banded_ode_solvers.py
|
01e2fd7abbc3124e2398546cd351b7818ffbfe2c
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,863
|
py
|
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import run_module_suite, assert_allclose
from scipy.integrate import ode
def _band_count(a):
"""Returns ml and mu, the lower and upper band sizes of a."""
nrows, ncols = a.shape
ml = 0
for k in range(-nrows+1, 0):
if np.diag(a, k).any():
ml = -k
break
mu = 0
for k in range(nrows-1, 0, -1):
if np.diag(a, k).any():
mu = k
break
return ml, mu
def _linear_func(t, y, a):
"""Linear system dy/dt = a * y"""
return a.dot(y)
def _linear_jac(t, y, a):
"""Jacobian of a * y is a."""
return a
def _linear_banded_jac(t, y, a):
"""Banded Jacobian."""
ml, mu = _band_count(a)
bjac = []
for k in range(mu, 0, -1):
bjac.append(np.r_[[0] * k, np.diag(a, k)])
bjac.append(np.diag(a))
for k in range(-1, -ml-1, -1):
bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
return bjac
def _solve_linear_sys(a, y0, tend=1, dt=0.1,
solver=None, method='bdf', use_jac=True,
with_jacobian=False, banded=False):
"""Use scipy.integrate.ode to solve a linear system of ODEs.
a : square ndarray
Matrix of the linear system to be solved.
y0 : ndarray
Initial condition
tend : float
Stop time.
dt : float
Step size of the output.
solver : str
If not None, this must be "vode", "lsoda" or "zvode".
method : str
Either "bdf" or "adams".
use_jac : bool
Determines if the jacobian function is passed to ode().
with_jacobian : bool
Passed to ode.set_integrator().
banded : bool
Determines whether a banded or full jacobian is used.
If `banded` is True, `lband` and `uband` are determined by the
values in `a`.
"""
if banded:
lband, uband = _band_count(a)
else:
lband = None
uband = None
if use_jac:
if banded:
r = ode(_linear_func, _linear_banded_jac)
else:
r = ode(_linear_func, _linear_jac)
else:
r = ode(_linear_func)
if solver is None:
if np.iscomplexobj(a):
solver = "zvode"
else:
solver = "vode"
r.set_integrator(solver,
with_jacobian=with_jacobian,
method=method,
lband=lband, uband=uband,
rtol=1e-9, atol=1e-10,
)
t0 = 0
r.set_initial_value(y0, t0)
r.set_f_params(a)
r.set_jac_params(a)
t = [t0]
y = [y0]
while r.successful() and r.t < tend:
r.integrate(r.t + dt)
t.append(r.t)
y.append(r.y)
t = np.array(t)
y = np.array(y)
return t, y
def _analytical_solution(a, y0, t):
"""
Analytical solution to the linear differential equations dy/dt = a*y.
The solution is only valid if `a` is diagonalizable.
Returns a 2-d array with shape (len(t), len(y0)).
"""
lam, v = np.linalg.eig(a)
c = np.linalg.solve(v, y0)
e = c * np.exp(lam * t.reshape(-1, 1))
sol = e.dot(v.T)
return sol
def test_banded_ode_solvers():
# Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
# with a system that has a banded Jacobian matrix.
t_exact = np.linspace(0, 1.0, 5)
# --- Real arrays for testing the "lsoda" and "vode" solvers ---
# lband = 2, uband = 1:
a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
[0.2, -0.5, 0.9, 0.0, 0.0],
[0.1, 0.1, -0.4, 0.1, 0.0],
[0.0, 0.3, -0.1, -0.9, -0.3],
[0.0, 0.0, 0.1, 0.1, -0.7]])
# lband = 0, uband = 1:
a_real_upper = np.triu(a_real)
# lband = 2, uband = 0:
a_real_lower = np.tril(a_real)
# lband = 0, uband = 0:
a_real_diag = np.triu(a_real_lower)
real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
real_solutions = []
for a in real_matrices:
y0 = np.arange(1, a.shape[0] + 1)
y_exact = _analytical_solution(a, y0, t_exact)
real_solutions.append((y0, t_exact, y_exact))
def check_real(idx, solver, meth, use_jac, with_jac, banded):
a = real_matrices[idx]
y0, t_exact, y_exact = real_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(real_matrices)):
p = [['vode', 'lsoda'], # solver
['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
yield check_real, idx, solver, meth, use_jac, with_jac, banded
# --- Complex arrays for testing the "zvode" solver ---
# complex, lband = 2, uband = 1:
a_complex = a_real - 0.5j * a_real
# complex, lband = 0, uband = 0:
a_complex_diag = np.diag(np.diag(a_complex))
complex_matrices = [a_complex, a_complex_diag]
complex_solutions = []
for a in complex_matrices:
y0 = np.arange(1, a.shape[0] + 1) + 1j
y_exact = _analytical_solution(a, y0, t_exact)
complex_solutions.append((y0, t_exact, y_exact))
def check_complex(idx, solver, meth, use_jac, with_jac, banded):
a = complex_matrices[idx]
y0, t_exact, y_exact = complex_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(complex_matrices)):
p = [['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for meth, use_jac, with_jac, banded in itertools.product(*p):
yield check_complex, idx, "zvode", meth, use_jac, with_jac, banded
if __name__ == "__main__":
run_module_suite()
|
[
"master@MacBook-Pro-admin.local"
] |
master@MacBook-Pro-admin.local
|
92de662edb25f1b512a0338ae8d0fa44e437a5c0
|
425ce80b456b0563ae4c2da3033ec72a5b86be50
|
/game/gamewithGUI.py
|
9b8b9335ec654bd24d2dab272907c2f40f47f34c
|
[] |
no_license
|
bolt41/gitBolt
|
c95aab1ee2e6493274355c11f4a12cb86f5c26bd
|
afd5afe8fc1d4b2b5081faee3bec9edc99b3ea56
|
refs/heads/master
| 2021-07-25T20:07:32.872681
| 2021-01-25T20:05:56
| 2021-01-25T20:05:56
| 242,374,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,350
|
py
|
'''ДОБАВИТЬ:
ПРОВЕРКУ НА ПУСТОЕ ПОЛЕ ВВОДА'''
import random
from tkinter import *
from tkinter import scrolledtext
from tkinter import messagebox
def gen(): # функция генерации случайного числа
global gen_number # глобальное значение загадонного числа
gen_number = random.randint(0,1000) # рандомим
btn['state'] = 'normal' # устанавливаем видимость кнопки проверки числа
history.delete(1.0, END) # удаляем всю историю из scrolledtext
def clicked(): # функция обработчик проверки
input_number = int(txt.get()) # читаем введенное число
status = check_number(input_number, gen_number) # отправляем его на проверку в функцию
history.insert(INSERT, txt.get() + ' - ' + status +'\n') # добавляем запись в scrolledtext
txt.delete(0,END) # чистим поле ввода
def check_number(input_number, gen_number): # проверка
if input_number == gen_number:
btn['state'] = 'disabled' # если число угадано статус кнопки "неактивна"
messagebox.showinfo('Ура!!!', 'Ты угадал число!')
return 'Число угадано, поздравляю!'
elif input_number > gen_number:
return 'Введенное число больше загаданного'
else:
return 'Введенное число меньше загаданного'
#Создаем окно программы, задаем его настройки
window = Tk()
window.resizable(0,0)
window.title("Игра 'Угадай число'")
w = window.winfo_screenmmwidth()
h = window.winfo_screenheight()
window.geometry(f'358x260+{w}+{h}')
lbl = Label(window, text="Введи число:")
lbl.place(x=10, y=5)
txt = Entry(window,width=10)
txt.place(x=110, y=5)
btn = Button(window, text="Проверить", command = clicked)
btn.place(x=200, y=2)
btn2 = Button(window, text="Генерировать новое число", command = gen)
btn2.place(x=10, y=220)
history = scrolledtext.ScrolledText(window, width=40, height=10)
history.place(x=10, y=40)
gen()
window.mainloop()
|
[
"bolt1986@bk.ru"
] |
bolt1986@bk.ru
|
dd83d96cef6fd32fd984cd573aa08a5303e786a7
|
4061fb23140589c74ef67b0345426a9231bc1c81
|
/api/v1/views/places_reviews.py
|
effa5ba8a11b312464f81a74521b44c06c94e390
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
bmuha1/AirBnB_clone_v3
|
940ebe5b70be8cd680366dfe8cad1ddffb7e604d
|
05b0cdd0d626b58e70527a60f5ad7ee0531f49b2
|
refs/heads/master
| 2023-03-16T04:39:11.951006
| 2020-01-31T07:13:53
| 2020-01-31T07:13:53
| 236,544,851
| 1
| 0
| null | 2020-01-29T23:47:32
| 2020-01-27T17:08:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
#!/usr/bin/python3
"""
New view for Review objects that handles default Restful API actions
"""
from flask import Flask, jsonify, abort, request
from api.v1.views import app_views
from models import storage
from models.review import Review
@app_views.route('/api/v1/places/<place_id>/reviews', strict_slashes=False)
def all_reviews(place_id):
""" retrieve list of all Review objects """
all_reviews = []
if not storage.get('Place', place_id):
abort(404)
for review in storage.all('Review').values():
if place_id == review.to_dict()['place_id']:
all_reviews.append(review.to_dict())
return jsonify(all_reviews)
@app_views.route('/api/v1/reviews/<review_id>', strict_slashes=False)
def retrieve_review(review_id):
""" retrieve a particular Review """
review = storage.get('Review', review_id)
if review:
return review.to_dict()
abort(404)
@app_views.route('/api/v1/reviews/<review_id>', methods=['DELETE'],
strict_slashes=False)
def delete_review(review_id):
""" delete a Review """
review = storage.get('Review', review_id)
if review:
storage.delete(review)
storage.save()
return {}
abort(404)
@app_views.route('/api/v1/places/<place_id>/reviews', methods=['POST'],
strict_slashes=False)
def create_review(place_id):
""" create a Review """
review_name = request.get_json()
if not storage.get('Place', place_id):
abort(404)
if not review_name:
abort(400, {'Not a JSON'})
elif 'user_id' not in review_name:
abort(400, {'Missing user_id'})
elif not storage.get('User', review_name['user_id']):
abort(404)
elif 'text' not in review_name:
abort(400, {'Missing text'})
review_name['place_id'] = place_id
new_review = Review(**review_name)
storage.new(new_review)
storage.save()
return new_review.to_dict(), 201
@app_views.route('/api/v1/reviews/<review_id>', methods=['PUT'],
strict_slashes=False)
def update_review(review_id):
""" update a Review """
update_attr = request.get_json()
if not update_attr:
abort(400, {'Not a JSON'})
my_review = storage.get('Review', review_id)
if not my_review:
abort(404)
for key, value in update_attr.items():
if key not in ['id', 'user_id', 'place_id', 'created_at',
'updated_at']:
setattr(my_review, key, value)
storage.save()
return my_review.to_dict()
|
[
"800@holbertonschool.com"
] |
800@holbertonschool.com
|
93ea9150587b48b5870e355d79a91891172ccd7b
|
e90f9eaadcd099f1d4b30504702df59ef0ff63db
|
/build/algorithms/localization/catkin_generated/pkg.installspace.context.pc.py
|
586c5f37eb42b20c59c6d3d9fc67b395bbb24732
|
[] |
no_license
|
SiChiTong/f110_ws
|
ce1e7b8408af645a9d09d8298933253e9810745f
|
a44b77f58527fabd4b2b2905132c6651e102134f
|
refs/heads/master
| 2020-04-02T12:00:55.998831
| 2018-07-24T18:41:31
| 2018-07-24T18:41:31
| 154,416,188
| 1
| 0
| null | 2018-10-24T00:49:29
| 2018-10-24T00:49:29
| null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "localization"
PROJECT_SPACE_DIR = "/home/nvidia/Desktop/f110_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"chriskao@seas.upenn.edu"
] |
chriskao@seas.upenn.edu
|
29f5560d44f15ef8cc4279933e0fcf158485d5ca
|
663dacd96b6d7d126cdde5c3114b8a15d8b305b5
|
/back_up_wiki_db.py
|
7e1ae3035a604dabacdc88b73c18db12381b4895
|
[] |
no_license
|
HugoJBello/python-scripts
|
8ffed08eb04459e9cbd6310eeee1e770ba4ed0c1
|
ee4cf934f670dec33aa03ce5be115576865646eb
|
refs/heads/master
| 2021-01-22T10:46:08.882064
| 2018-09-29T18:23:17
| 2018-09-29T18:23:17
| 92,653,790
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
# you need the mysql python adapter:
# pip install mysqlclient-1.3.12-cp36-cp36m-win32.whl
# https://pypi.python.org/pypi/MySQL-python/
import MySQLdb
import base64
def connect_db():
return MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="", # your password
db="my_wiki") # name of the data base
def extract_pages(db):
cur_pages = db.cursor()
cur_pages.execute('select CONVERT(p.page_title USING utf8), CONVERT(t.old_text USING utf8) FROM text t, revision r, page p WHERE p.page_latest = r.rev_id AND t.old_id =r.rev_text_id')
return cur_pages.fetchall()
def clean_filename(text):
return "".join(x for x in text if (x.isalnum() or x in "._- "))
class WikiPage:
title=''
text=''
date=''
def save_file(wiki_page):
file_name = str(wiki_page.title) + '.txt'
file_name = clean_filename(file_name)
text_file = open(file_name, 'w')
text_file.write(str(wiki_page.text))
def main():
db=connect_db()
for row in extract_pages(db):
wiki_page = WikiPage()
wiki_page.title = row[0]
wiki_page.text=row[1]
save_file(wiki_page)
db.close()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
HugoJBello.noreply@github.com
|
352fcb7c9f58a521bb5d26570b6dcfd4ba20d69b
|
d7b68e008c51d92d0e2b955167a8bc8198a36f56
|
/Python/CalculoIDFT.py
|
810f18fa308b739b775e96059f4a151b3e5426e6
|
[] |
no_license
|
BouCode/-PDS-Transformada-Discreta-de-Fourier
|
1f707fce63a57646f146142edbc09f0a706c0409
|
e9b723e2a00b0aa43127109956553ef77ab71b5c
|
refs/heads/master
| 2022-12-09T14:15:06.938178
| 2020-09-16T21:08:49
| 2020-09-16T21:08:49
| 296,142,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
import idft_01
import dft_01
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
x = np.array ([7, 4, 3, 9, 0, 1, 5, 2])
X = dft_01.dft (x)
XReal = np.real (X)
XImag = np.imag (X)
print (f'X: \n{X}')
x_2 = idft_01.idft (X)
x_2Real = np.real (x_2)
x_2Imag = np.imag (x_2)
print (f'IDFT: \n{x_2}')
plt.figure (1)
plt.plot (XReal)
plt.figure (2)
plt.plot (XImag)
plt.figure (3)
plt.plot (x_2Real)
plt.title ('Valores reales de la IDFT de X')
print (f" x_2 Reales: {x_2Real}")
plt.figure (4)
plt.title ('Valores imaginarios de la IDFT de X')
plt.plot (x_2Imag)
print (f"x_2 Imaginarios{x_2Imag}")
plt.show()
|
[
"joan.boulangger@gmail.com"
] |
joan.boulangger@gmail.com
|
72ffe935ac5c6d6ec8939aee95d84dc64d6e398e
|
bcd495a7b05233435e6c3e2aa6e8c50321feaa72
|
/RNN/TFRecordMaker.py
|
3cfdb8241c5fc3400798e84f1a51a9bc9dcee38a
|
[
"Apache-2.0"
] |
permissive
|
MProtoss/Model4TF2
|
abda4671e0c3461bd4600bdd5f5fad24fa1bc943
|
02b0fd2bf7fc346a347cdf5e04dbe8abd61be743
|
refs/heads/master
| 2020-12-10T11:42:08.537511
| 2020-03-30T09:55:54
| 2020-03-30T09:55:54
| 233,584,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
import os
import tensorflow as tf
import sys
import numpy as np
def generate_tfrecords(data_fileneme, tfrecod_filename):
fin = open(data_fileneme, "r")
line = fin.readline()
with tf.io.TFRecordWriter(tfrecod_filename) as f:
while line:
tokens = line.strip().split(' ')
label = int(tokens[0])
input_x = tokens[1:]
for i in range(len(input_x)):
input_x[i] = int(input_x[i])
feature = {
'raw_data': tf.train.Feature(int64_list=tf.train.Int64List(value=input_x)),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
}
example = tf.train.Example(
features=tf.train.Features(feature=feature)
)
f.write(example.SerializeToString())
line = fin.readline()
f.close()
fin.close()
def _parse_function(example_string):
feature_description = {
'raw_data': tf.io.FixedLenFeature([10], tf.int64),
'label': tf.io.FixedLenFeature([], tf.int64)
}
feature_dict = tf.io.parse_single_sequence_example(example_string, feature_description)
raw_data = feature_dict[0]['raw_data']
label = feature_dict[0]['label']
return raw_data, label
def read_tfrecords(tfrecod_filename):
raw_dataset = tf.data.TFRecordDataset(tfrecod_filename)
parsed_dataset = raw_dataset.map(_parse_function)
return parsed_dataset
if __name__ == "__main__":
generate_tfrecords(sys.argv[1], sys.argv[2])
#read_tfrecords(sys.argv[2])
|
[
"mprotoss@outlook.com"
] |
mprotoss@outlook.com
|
f8b306141123347a6c62aa14dd2293dbf65787ef
|
6dfb7fe44b6c5bfb7feb5a101656e3d3402a621f
|
/simulator/simp_py/comic24.py
|
2e637423a88beedde004443a919170cec7bf5ef8
|
[
"MIT"
] |
permissive
|
kcfkwok2003/Simp_py
|
11d6813fac83ab6309eb8efc22fcd8edde5b19b8
|
f75e66da01b45dc8688dda602f8b33d4258f0c31
|
refs/heads/master
| 2021-05-11T00:36:36.872754
| 2018-12-19T01:41:15
| 2018-12-19T01:41:15
| 118,306,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,577
|
py
|
# This comes with no warranty, implied or otherwise
# This data structure was designed to support Proportional fonts
# on Arduinos. It can however handle any ttf font that has been converted
# using the conversion program. These could be fixed width or proportional
# fonts. Individual characters do not have to be multiples of 8 bits wide.
# Any width is fine and does not need to be fixed.
# The data bits are packed to minimize data requirements, but the tradeoff
# is that a header is required per character.
# comic.c
# Point Size : 24
# Memory usage : 2814 bytes
# # characters : 95
# Header Format (to make Arduino UTFT Compatible):
# ------------------------------------------------
# Character Width (Used as a marker to indicate use this format. i.e.: = 0x00)
# Character Height
# First Character (Reserved. 0x00)
# Number Of Characters (Reserved. 0x00)
tft_Comic24 = [
0x00, 0x19, 0x00, 0x00,
# Individual Character Format:
# ----------------------------
# Character Code
# Adjusted Y Offset
# Width
# Height
# xOffset
# xDelta (the distance to move the cursor. Effective width of the character.)
# Data[n]
# NOTE: You can remove any of these characters if they are not needed in
# your application. The first character number in each Glyph indicates
# the ASCII character code. Therefore, these do not have to be sequential.
# Just remove all the content for a particular character to save space.
# ' '
0x20,0x15,0x00,0x00,0x00,0x07,
# '!'
0x21,0x02,0x02,0x14,0x01,0x06,
0xFF,0xFF,0xFF,0xFC,0x2D,
# '"'
0x22,0x03,0x06,0x08,0x02,0x0A,
0xCF,0x3C,0xF3,0xCF,0x3C,0xF3,
# '#'
0x23,0x03,0x14,0x12,0x01,0x14,
0x01,0x81,0x80,0x18,0x18,0x01,0x81,0x80,0x30,0x30,0x03,0x03,0x07,0xFF,0xFF,0x7F,0xFF,0xF0,0x60,0x60,0x06,0x06,0x00,0xC0,0xC0,0x0C,0x0C,0x0F,0xFF,0xFE,0xFF,0xFF,0xE1,0x81,0x80,0x18,0x18,0x03,0x83,0x00,0x30,0x30,0x03,0x03,0x00,
# '$'
0x24,0x00,0x0B,0x19,0x02,0x11,
0x0C,0x01,0x80,0x30,0x0F,0x83,0xFC,0xD9,0xBB,0x06,0x60,0xCC,0x19,0x83,0xB0,0x3F,0x83,0xFC,0x1B,0x83,0x18,0x63,0x0C,0x71,0x9F,0x37,0x7F,0xC3,0xF0,0x18,0x03,0x00,0x60,0x0C,0x00,
# '%'
0x25,0x01,0x11,0x14,0x02,0x14,
0x00,0x00,0x00,0x0C,0x0E,0x0E,0x0F,0x86,0x0C,0x67,0x06,0x33,0x03,0x19,0x80,0xF9,0x80,0x38,0xC0,0x00,0xE0,0x00,0x60,0x00,0x70,0x00,0x31,0xE0,0x39,0xF8,0x19,0xCE,0x1C,0xC3,0x0C,0x61,0x86,0x39,0xC6,0x0F,0xC3,0x03,0xC0,
# '&'
0x26,0x03,0x0F,0x13,0x01,0x10,
0x01,0xC0,0x07,0xC0,0x19,0x80,0x33,0x00,0x6E,0x00,0xF8,0x01,0xE0,0x07,0x80,0x1F,0x8C,0x73,0x19,0xC3,0x37,0x07,0xEC,0x07,0xD8,0x07,0x30,0x0E,0x38,0x7E,0x3F,0xEC,0x3F,0x0C,0x00,0x18,
# '''
0x27,0x03,0x02,0x06,0x03,0x09,
0xFF,0xF0,
# '('
0x28,0x02,0x07,0x18,0x01,0x09,
0x06,0x1C,0x71,0xC3,0x0E,0x18,0x30,0xE1,0x83,0x06,0x0C,0x18,0x30,0x60,0xE0,0xC1,0x83,0x83,0x83,0x87,0x83,
# ')'
0x29,0x02,0x06,0x18,0x02,0x09,
0xC3,0x86,0x0C,0x30,0x61,0x86,0x0C,0x30,0xC3,0x0C,0x30,0xC3,0x0C,0x61,0x86,0x31,0xCE,0x30,
# '*'
0x2A,0x03,0x0B,0x09,0x01,0x0D,
0x0C,0x01,0x83,0xBF,0xFF,0xF3,0xFC,0x3C,0x0F,0xC3,0x9C,0x61,0x80,
# '+'
0x2B,0x09,0x0A,0x0A,0x00,0x0C,
0x0C,0x03,0x00,0xC0,0x30,0xFF,0xFF,0xF0,0xC0,0x30,0x0C,0x03,0x00,
# ','
0x2C,0x13,0x04,0x06,0x02,0x07,
0x37,0x66,0xEC,
# '-'
0x2D,0x0E,0x08,0x02,0x01,0x0A,
0xFF,0xFF,
# '.'
0x2E,0x12,0x03,0x03,0x02,0x06,
0xFF,0x80,
# '/'
0x2F,0x01,0x0A,0x15,0x01,0x0C,
0x00,0x00,0x30,0x0C,0x06,0x01,0x80,0x60,0x30,0x0C,0x06,0x01,0x80,0xC0,0x30,0x18,0x06,0x03,0x00,0xC0,0x60,0x18,0x0E,0x03,0x00,0xC0,0x00,
# '0'
0x30,0x03,0x0D,0x12,0x01,0x0F,
0x0F,0x80,0xFF,0x0E,0x18,0xE0,0x66,0x03,0x70,0x0F,0x00,0x78,0x03,0xC0,0x1E,0x00,0xF0,0x07,0x80,0x3C,0x03,0xB0,0x19,0x81,0xC7,0x1C,0x3F,0xC0,0x7C,0x00,
# '1'
0x31,0x03,0x06,0x12,0x03,0x0B,
0x10,0xC7,0x3C,0xB0,0xC3,0x0C,0x30,0xC3,0x0C,0x30,0xC3,0x0C,0xFF,0xF0,
# '2'
0x32,0x03,0x0B,0x12,0x02,0x0F,
0x1F,0x07,0xFB,0xC3,0xE0,0x30,0x06,0x00,0xC0,0x38,0x0E,0x07,0x81,0xE0,0xF8,0x3C,0x07,0x01,0xC0,0x30,0x06,0x00,0xFF,0xDF,0xFC,
# '3'
0x33,0x03,0x0B,0x12,0x02,0x0F,
0x1F,0x0F,0xF9,0xC3,0x80,0x30,0x06,0x00,0xC0,0x78,0x7E,0x0F,0x80,0x78,0x03,0x80,0x30,0x06,0x00,0xF0,0x1F,0x0E,0x7F,0x83,0xE0,
# '4'
0x34,0x03,0x0D,0x12,0x02,0x0F,
0x01,0xC0,0x0E,0x00,0xF0,0x0F,0x80,0x6C,0x07,0x60,0x33,0x03,0x98,0x38,0xC1,0x86,0x1C,0x31,0xFF,0xFF,0xFF,0x80,0x60,0x03,0x00,0x18,0x00,0xC0,0x06,0x00,
# '5'
0x35,0x02,0x0C,0x13,0x02,0x0F,
0x00,0x0F,0xFE,0xFF,0xE6,0x00,0x60,0x0E,0x00,0xEF,0x8F,0xFC,0xF8,0x6E,0x07,0xC0,0x30,0x03,0x00,0x30,0x03,0x00,0x7C,0x06,0xE1,0xE7,0xFC,0x3F,0x00,
# '6'
0x36,0x03,0x0C,0x12,0x01,0x0F,
0x03,0x00,0x70,0x0E,0x01,0xC0,0x38,0x03,0x00,0x60,0x06,0xF8,0xFF,0xEE,0x0E,0xC0,0x3C,0x03,0xC0,0x3C,0x03,0x60,0x77,0x0E,0x3F,0xC1,0xF8,
# '7'
0x37,0x02,0x0D,0x13,0x01,0x0F,
0x00,0x07,0xFF,0xFF,0xFE,0x00,0xE0,0x0E,0x00,0x60,0x06,0x00,0x30,0x03,0x80,0x18,0x01,0xC0,0x0C,0x00,0x60,0x07,0x00,0x30,0x03,0x80,0x18,0x00,0xC0,0x04,0x00,
# '8'
0x38,0x02,0x0C,0x13,0x01,0x0F,
0x00,0x00,0xFC,0x3F,0xE3,0x07,0x60,0x36,0x03,0x60,0x37,0x8F,0x3F,0xE1,0xFE,0x38,0xE7,0x07,0x60,0x36,0x03,0x60,0x36,0x03,0x30,0x63,0xFE,0x0F,0x80,
# '9'
0x39,0x03,0x0D,0x13,0x01,0x0F,
0x0F,0x01,0xFE,0x1C,0x38,0xC0,0xCC,0x07,0x60,0x1B,0x00,0xD8,0x06,0xE0,0x73,0x87,0x8F,0xF8,0x3E,0xC0,0x0E,0x00,0x60,0x07,0x00,0xF0,0x1F,0x03,0xE0,0x1C,0x00,
# ':'
0x3A,0x09,0x03,0x0B,0x02,0x07,
0xFF,0x80,0x00,0xFF,0x80,
# ';'
0x3B,0x09,0x04,0x0E,0x02,0x07,
0xEE,0xE0,0x00,0x00,0x03,0x7E,0xCC,
# '<'
0x3C,0x09,0x07,0x0A,0x01,0x09,
0x06,0x1C,0x71,0xC7,0x1E,0x1E,0x0E,0x0E,0x0C,
# '='
0x3D,0x0A,0x09,0x09,0x01,0x0C,
0xFF,0xFF,0xC0,0x00,0x00,0x00,0x03,0xFF,0xFF,0x00,0x00,
# '>'
0x3E,0x08,0x08,0x0B,0x01,0x0A,
0x60,0x70,0x38,0x3C,0x1E,0x0F,0x06,0x0C,0x38,0x70,0xC0,
# '?'
0x3F,0x04,0x0B,0x12,0x01,0x0D,
0x1E,0x0F,0xE3,0xC6,0x60,0x60,0x06,0x00,0xC0,0x18,0x07,0x01,0xE0,0xF8,0x3E,0x0F,0x01,0x80,0x00,0x00,0x01,0x80,0x30,0x06,0x00,
# '@'
0x40,0x02,0x13,0x14,0x01,0x16,
0x03,0xF8,0x01,0xFF,0xC0,0x78,0x3C,0x1C,0x01,0xC3,0x00,0x1C,0xC1,0xC1,0x98,0xF8,0x1E,0x3C,0x03,0xC6,0x30,0x79,0x8E,0x0F,0x31,0xC1,0xE6,0x78,0x6C,0x7F,0xFC,0xC7,0x3E,0x18,0x00,0x01,0x80,0x00,0x38,0x00,0x03,0xC0,0xE0,0x1F,0xFC,0x00,0xFE,0x00,
# 'A'
0x41,0x03,0x0E,0x12,0x01,0x11,
0x00,0x80,0x07,0x00,0x1C,0x00,0xF0,0x03,0xC0,0x1D,0x80,0x76,0x03,0x98,0x0E,0x20,0x70,0xC1,0xFF,0x0F,0xFC,0x7C,0x19,0xC0,0x67,0x01,0xB8,0x07,0xE0,0x0F,0x00,0x30,
# 'B'
0x42,0x03,0x0B,0x13,0x03,0x0F,
0x7C,0x1F,0xE3,0x0E,0x60,0xEC,0x0D,0x81,0xB0,0x36,0x0E,0xC3,0x9F,0xE3,0xFC,0x61,0xEC,0x0F,0x80,0xF0,0x1E,0x0E,0xC7,0xDF,0xE3,0xF0,0x00,
# 'C'
0x43,0x03,0x0D,0x12,0x01,0x0E,
0x01,0xF8,0x3F,0xC3,0xC6,0x38,0x31,0x80,0x1C,0x01,0xC0,0x0C,0x00,0x60,0x06,0x00,0x30,0x01,0x80,0x0C,0x00,0x60,0x19,0x81,0xCE,0x3C,0x3F,0xC0,0xF8,0x00,
# 'D'
0x44,0x03,0x0D,0x12,0x02,0x11,
0x60,0x07,0xC0,0x37,0x81,0x8F,0x0C,0x1C,0x60,0x73,0x01,0xD8,0x06,0xC0,0x1E,0x00,0xF0,0x07,0x80,0x3C,0x01,0xE0,0x1B,0x01,0xDC,0x1C,0xFF,0xC1,0xF8,0x00,
# 'E'
0x45,0x03,0x0D,0x12,0x02,0x0F,
0xFF,0xF7,0xFF,0xF0,0x01,0x80,0x0C,0x00,0x60,0x03,0x00,0x18,0x7E,0xFF,0xF7,0xE0,0x30,0x01,0x80,0x0C,0x00,0x60,0x03,0x00,0x18,0x00,0x7F,0xF1,0xFF,0x80,
# 'F'
0x46,0x03,0x0C,0x12,0x02,0x0F,
0xFF,0xCF,0xFF,0xC0,0x7C,0x00,0xC0,0x0C,0x00,0xC0,0x0D,0xFE,0xFF,0xEF,0x00,0xC0,0x0C,0x00,0xC0,0x0C,0x00,0xC0,0x0C,0x00,0xC0,0x0C,0x00,
# 'G'
0x47,0x03,0x0F,0x12,0x01,0x10,
0x03,0xE0,0x0F,0xF0,0x38,0xE0,0xE0,0x03,0x80,0x06,0x00,0x18,0x00,0x30,0x00,0x61,0xFF,0x9F,0xFF,0x3C,0x36,0x00,0x6C,0x01,0x98,0x07,0x30,0x0C,0x30,0x70,0x7F,0xC0,0x3E,0x00,
# 'H'
0x48,0x03,0x0F,0x12,0x02,0x12,
0xC0,0x03,0x80,0x0F,0x00,0x1E,0x00,0x3C,0x00,0x78,0x00,0xF0,0x01,0xE0,0x03,0xC0,0xFF,0xFF,0xFF,0xFC,0x1E,0x00,0x3C,0x00,0x78,0x00,0xF0,0x01,0xE0,0x03,0xC0,0x07,0x80,0x0C,
# 'I'
0x49,0x03,0x0C,0x12,0x00,0x0D,
0xFF,0xEF,0xFF,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x00,0x60,0xFF,0xFF,0xFF,
# 'J'
0x4A,0x03,0x0E,0x12,0x01,0x10,
0x1F,0xFC,0x7F,0xF0,0x0C,0x00,0x30,0x00,0xC0,0x03,0x00,0x0C,0x00,0x30,0x00,0xC0,0x03,0x00,0x0C,0x00,0x30,0xC0,0xC3,0x06,0x0E,0x18,0x1C,0x60,0x3F,0x80,0x3C,0x00,
# 'K'
0x4B,0x03,0x0C,0x12,0x03,0x0F,
0xC0,0x6C,0x0E,0xC1,0xCC,0x38,0xC7,0x0C,0xE0,0xDC,0x0F,0x80,0xF0,0x0F,0x00,0xF8,0x0F,0xC0,0xDE,0x0C,0xF0,0xC7,0x8C,0x1E,0xC0,0xFC,0x07,
# 'L'
0x4C,0x03,0x0B,0x12,0x01,0x0D,
0xC0,0x18,0x03,0x00,0x60,0x0C,0x01,0x80,0x30,0x06,0x00,0xC0,0x18,0x03,0x00,0x60,0x0C,0x01,0x80,0x30,0x06,0x00,0xFF,0xEF,0xFC,
# 'M'
0x4D,0x03,0x13,0x13,0x01,0x15,
0x0C,0x06,0x01,0x80,0xC0,0x78,0x3C,0x0F,0x07,0x81,0xE0,0xF0,0x3C,0x1E,0x07,0x83,0xC1,0xD8,0xEC,0x3B,0x1D,0x87,0x63,0xB0,0xCC,0xE6,0x38,0xDC,0x47,0x1B,0x8C,0xE3,0xF1,0xB8,0x3C,0x37,0x07,0x86,0xE0,0xF0,0x7C,0x1E,0x0F,0x01,0x81,0x80,
# 'N'
0x4E,0x03,0x11,0x12,0x01,0x13,
0x60,0x01,0x38,0x00,0xDE,0x00,0x6F,0x00,0x37,0xC0,0x1B,0x70,0x0D,0x9C,0x06,0xCF,0x03,0x63,0x81,0xB0,0xE0,0xD8,0x38,0x6C,0x0E,0x36,0x03,0x9B,0x00,0xED,0x80,0x3E,0xC0,0x0F,0x60,0x03,0xB0,0x00,0xC0,
# 'O'
0x4F,0x03,0x11,0x12,0x01,0x13,
0x01,0xF8,0x03,0xFF,0x07,0x81,0xC3,0x00,0x63,0x00,0x1B,0x80,0x0D,0x80,0x07,0xC0,0x03,0xC0,0x01,0xE0,0x00,0xF0,0x00,0xF8,0x00,0x6C,0x00,0x33,0x00,0x31,0xC0,0x38,0x70,0x78,0x1F,0xF8,0x03,0xF0,0x00,
# 'P'
0x50,0x03,0x0B,0x12,0x01,0x0D,
0xFE,0x1F,0xF3,0x0F,0x60,0x7C,0x07,0x80,0xF0,0x1E,0x06,0xC3,0xDF,0xF3,0xF8,0x60,0x0C,0x01,0x80,0x30,0x06,0x00,0xC0,0x18,0x00,
# 'Q'
0x51,0x03,0x14,0x17,0x01,0x15,
0x01,0xF8,0x00,0x7F,0xE0,0x1E,0x07,0x03,0x80,0x18,0x30,0x01,0xC6,0x00,0x0C,0x60,0x00,0xEC,0x00,0x06,0xC0,0x00,0x6C,0x00,0x06,0xC0,0x00,0x6C,0x00,0x06,0x60,0xE0,0xE7,0x0F,0x0C,0x38,0x79,0xC1,0xC3,0xF8,0x0F,0xFF,0x00,0x3F,0x78,0x00,0x03,0xC0,0x00,0x1E,0x00,0x00,0xF0,0x00,0x07,0x00,0x00,0x20,
# 'R'
0x52,0x02,0x0D,0x13,0x01,0x0F,
0x00,0x03,0xE0,0x3F,0xC1,0x8F,0x0C,0x0E,0x60,0x33,0x00,0xD8,0x06,0xC0,0x36,0x03,0xB0,0x79,0xFF,0x8F,0xF0,0x7F,0x83,0x1F,0x18,0x3C,0xC0,0xF6,0x01,0xF0,0x06,
# 'S'
0x53,0x03,0x0F,0x13,0x01,0x11,
0x01,0xF0,0x07,0xF8,0x18,0x70,0x60,0x01,0x80,0x03,0x00,0x06,0x00,0x0E,0x00,0x0F,0xF0,0x07,0xF0,0x00,0xF0,0x00,0x70,0x00,0x60,0x00,0xD8,0x01,0xB8,0x06,0x78,0x3C,0x7F,0xE0,0x3F,0x00,
# 'T'
0x54,0x02,0x0F,0x13,0x01,0x10,
0x00,0x01,0xFF,0xFD,0xFF,0xF8,0x18,0x00,0x30,0x00,0x60,0x00,0xC0,0x01,0x80,0x03,0x00,0x06,0x00,0x0C,0x00,0x18,0x00,0x30,0x00,0x60,0x00,0xC0,0x01,0x80,0x03,0x00,0x06,0x00,0x0C,0x00,
# 'U'
0x55,0x03,0x11,0x12,0x01,0x12,
0x60,0x03,0x30,0x01,0x98,0x00,0xCC,0x00,0x66,0x00,0x33,0x00,0x19,0x80,0x0C,0xC0,0x06,0x60,0x03,0x30,0x01,0x98,0x01,0xCC,0x00,0xC7,0x00,0x61,0x80,0x70,0xE0,0x30,0x38,0x38,0x0F,0xF8,0x01,0xF0,0x00,
# 'V'
0x56,0x03,0x0E,0x13,0x02,0x10,
0x80,0x0F,0x00,0x3C,0x01,0xB0,0x06,0x60,0x31,0x80,0xC6,0x03,0x0C,0x18,0x30,0x60,0xC1,0x81,0x8C,0x06,0x30,0x0D,0x80,0x36,0x00,0xF8,0x01,0xC0,0x07,0x00,0x08,0x00,0x00,0x00,
# 'W'
0x57,0x03,0x17,0x12,0x01,0x19,
0xC0,0x20,0x0F,0xC0,0x60,0x19,0x81,0xC0,0x23,0x03,0x80,0xC6,0x07,0x01,0x86,0x1E,0x03,0x0C,0x36,0x0C,0x18,0x6C,0x18,0x11,0x98,0x60,0x33,0x30,0xC0,0x66,0x61,0x80,0xD8,0x66,0x01,0xB0,0xCC,0x01,0xC1,0xB0,0x03,0x83,0x60,0x07,0x07,0x80,0x0C,0x07,0x00,0x08,0x0E,0x00,
# 'X'
0x58,0x03,0x10,0x12,0x01,0x11,
0x60,0x03,0x70,0x07,0x38,0x0E,0x1C,0x1C,0x0C,0x1C,0x0E,0x38,0x07,0x70,0x03,0xE0,0x01,0xC0,0x03,0xC0,0x07,0xE0,0x07,0x70,0x0E,0x38,0x1C,0x18,0x38,0x1C,0x70,0x0E,0xE0,0x07,0xC0,0x03,
# 'Y'
0x59,0x03,0x0F,0x13,0x00,0x10,
0x60,0x06,0xE0,0x1D,0xC0,0x31,0xC0,0xE1,0xC1,0x83,0x83,0x03,0x8C,0x07,0x18,0x07,0x70,0x0F,0xC0,0x0F,0x80,0x0F,0x00,0x1C,0x00,0x38,0x00,0x60,0x01,0xC0,0x03,0x00,0x06,0x00,0x08,0x00,
# 'Z'
0x5A,0x03,0x0F,0x12,0x01,0x11,
0xFF,0xFF,0xFF,0xFC,0x00,0xF0,0x03,0x80,0x0E,0x00,0x3C,0x00,0xF0,0x03,0xC0,0x07,0x00,0x1E,0x00,0x38,0x00,0xE0,0x03,0xC0,0x07,0x00,0x1C,0x00,0x70,0x00,0xFF,0xFF,0xFF,0xFC,
# '['
0x5B,0x01,0x07,0x1A,0x01,0x09,
0x00,0xFD,0xFB,0x06,0x0C,0x18,0x30,0x60,0xC1,0x83,0x06,0x0C,0x18,0x30,0x60,0xC1,0x83,0x06,0x0C,0x18,0x3F,0x7E,0x00,
# '\'
0x5C,0x03,0x0B,0x14,0x02,0x0D,
0xC0,0x18,0x01,0x80,0x30,0x03,0x00,0x60,0x06,0x00,0xC0,0x0C,0x01,0x80,0x18,0x03,0x00,0x20,0x06,0x00,0xC0,0x0C,0x01,0x80,0x18,0x03,0x00,0x60,
# ']'
0x5D,0x01,0x07,0x1A,0x02,0x09,
0x01,0xFB,0xF0,0x60,0xC1,0x83,0x06,0x0C,0x18,0x30,0x60,0xC1,0x83,0x06,0x0C,0x18,0x30,0x60,0xC1,0x83,0x7E,0xFC,0x00,
# '^'
0x5E,0x02,0x0A,0x06,0x02,0x0E,
0x0C,0x07,0x83,0xF1,0xCE,0xE1,0xF0,0x30,
# '_'
0x5F,0x16,0x0F,0x04,0x00,0x0F,
0x00,0x01,0xFF,0xFF,0xFF,0xF8,0x00,0x00,
# '`'
0x60,0x02,0x05,0x06,0x02,0x0D,
0xC7,0x1C,0x63,0x8C,
# 'a'
0x61,0x09,0x0B,0x0C,0x01,0x0C,
0x0F,0x87,0xF9,0xE3,0x30,0x6E,0x0D,0x81,0xB0,0x36,0x06,0xC0,0xCC,0x39,0xFF,0x9F,0x30,
# 'b'
0x62,0x02,0x0C,0x13,0x01,0x0E,
0x60,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x78,0x7F,0xC7,0x8E,0x60,0x76,0x03,0x60,0x36,0x03,0x60,0x36,0x06,0x70,0xE7,0xFC,0x7F,0x00,
# 'c'
0x63,0x09,0x0A,0x0C,0x01,0x0C,
0x0F,0x07,0xF3,0x0D,0x80,0x60,0x30,0x0C,0x03,0x00,0xC0,0x1C,0x33,0xFC,0x7C,
# 'd'
0x64,0x02,0x0C,0x13,0x01,0x0E,
0x00,0x20,0x06,0x00,0x60,0x06,0x00,0x60,0x06,0x00,0x61,0xF6,0x3F,0xE7,0x0E,0x60,0x6C,0x06,0xC0,0x6C,0x06,0xC0,0x6E,0x06,0x70,0xE3,0xFE,0x1F,0x60,
# 'e'
0x65,0x09,0x0B,0x0C,0x01,0x0D,
0x1F,0x07,0xF9,0xC7,0x30,0xEC,0x79,0xBE,0x3E,0x07,0x00,0xC0,0x6E,0x1D,0xFF,0x0F,0x80,
# 'f'
0x66,0x02,0x0A,0x14,0x01,0x0C,
0x03,0x83,0xE0,0xE0,0x70,0x18,0x06,0x01,0x83,0xFF,0xFF,0xC6,0x01,0x80,0x60,0x18,0x06,0x01,0x80,0x60,0x18,0x06,0x01,0x80,0x60,
# 'g'
0x67,0x09,0x0A,0x13,0x02,0x0D,
0x0F,0x0F,0xF7,0x0D,0x83,0xC0,0xF0,0x3C,0x1F,0x07,0xC1,0xD8,0xF7,0xEC,0xF3,0x00,0xC0,0x30,0x18,0x06,0x03,0xBF,0xC7,0xE0,
# 'h'
0x68,0x02,0x0B,0x13,0x01,0x0E,
0x60,0x0C,0x01,0x80,0x30,0x06,0x00,0xC0,0x18,0x03,0x1E,0x6F,0xEF,0x8D,0xE1,0xB8,0x36,0x06,0xC0,0xD8,0x1B,0x03,0x60,0x6C,0x0D,0x81,0x80,
# 'i'
0x69,0x04,0x02,0x11,0x03,0x07,
0xF0,0x3F,0xFF,0xFF,0xC0,
# 'j'
0x6A,0x04,0x08,0x18,0x00,0x0A,
0x03,0x03,0x00,0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0xC3,0xE3,0x77,0x7E,0x1C,
# 'k'
0x6B,0x03,0x0B,0x13,0x02,0x0E,
0xC0,0x18,0x03,0x00,0x60,0x0C,0x01,0x80,0x30,0x36,0x0E,0xC7,0x99,0xE3,0x70,0x7E,0x0F,0xE1,0xCE,0x30,0xE6,0x0E,0xC0,0xF8,0x08,0x00,0x00,
# 'l'
0x6C,0x02,0x02,0x13,0x03,0x07,
0xFF,0xFF,0xFF,0xFF,0xFC,
# 'm'
0x6D,0x09,0x10,0x0C,0x01,0x12,
0x67,0x3C,0x6F,0xFE,0x7D,0xEE,0x79,0x86,0x71,0x86,0x61,0x86,0x61,0x86,0x61,0x86,0x61,0x86,0x61,0x86,0x61,0x86,0x61,0x86,
# 'n'
0x6E,0x09,0x0B,0x0C,0x01,0x0D,
0x63,0x8D,0xF9,0xF1,0xBC,0x37,0x06,0xE0,0xD8,0x1B,0x03,0x60,0x6C,0x0D,0x81,0xB0,0x30,
# 'o'
0x6F,0x09,0x0C,0x0C,0x01,0x0D,
0x0F,0x81,0xFC,0x38,0xC3,0x06,0x60,0x66,0x06,0x60,0x66,0x06,0x60,0xE3,0x1C,0x1F,0x80,0xF0,
# 'p'
0x70,0x08,0x0A,0x14,0x02,0x0D,
0xC0,0x33,0xCF,0xFB,0xC6,0xC0,0xF0,0x3C,0x0F,0x03,0xC0,0xF0,0x7C,0x1B,0xFC,0xFE,0x30,0x0C,0x03,0x00,0xC0,0x30,0x0C,0x03,0x00,
# 'q'
0x71,0x08,0x0A,0x14,0x01,0x0C,
0x00,0x03,0xF3,0xFD,0xE3,0x60,0xF8,0x3C,0x0F,0x03,0xC0,0xF0,0x76,0x1D,0xFF,0x1F,0x80,0x60,0x18,0x06,0x01,0x80,0x60,0x18,0x06,
# 'r'
0x72,0x09,0x09,0x0C,0x01,0x0B,
0xCF,0x6F,0xFE,0x7C,0x3C,0x1E,0x03,0x01,0x80,0xC0,0x60,0x30,0x18,0x00,
# 's'
0x73,0x09,0x09,0x0C,0x02,0x0C,
0x03,0x9F,0xDE,0x7C,0x3E,0x07,0xF0,0xFC,0x07,0x01,0xE0,0xFF,0xC7,0xC0,
# 't'
0x74,0x05,0x0A,0x10,0x00,0x0A,
0x0C,0x03,0x00,0xC0,0x30,0xFF,0xFF,0xF0,0xC0,0x30,0x0C,0x03,0x00,0xC0,0x30,0x0C,0x03,0x00,0xC0,0x30,
# 'u'
0x75,0x09,0x0B,0x0C,0x01,0x0C,
0xC0,0xD8,0x1B,0x03,0x60,0x6C,0x0D,0x81,0xB0,0x36,0x06,0xC0,0xD8,0x19,0xFF,0x1F,0x60,
# 'v'
0x76,0x09,0x0B,0x0D,0x01,0x0C,
0xC0,0x78,0x1F,0x83,0x30,0x67,0x1C,0x63,0x0C,0xE0,0xD8,0x1E,0x03,0xC0,0x30,0x06,0x00,0x00,
# 'w'
0x77,0x09,0x0F,0x0D,0x01,0x11,
0xC1,0x87,0x83,0x0F,0x0E,0x1E,0x1C,0x66,0x7C,0xCC,0xD9,0x99,0x36,0x36,0x6C,0x7C,0xD8,0x70,0xE0,0xE1,0xC0,0x83,0x80,0x00,0x00,
# 'x'
0x78,0x09,0x0D,0x0D,0x01,0x0E,
0x60,0x1B,0x81,0xCE,0x1C,0x39,0xC0,0xFC,0x03,0xC0,0x3C,0x03,0xF0,0x39,0xC3,0x87,0x38,0x1D,0x80,0x70,0x01,0x80,
# 'y'
0x79,0x09,0x0C,0x13,0x00,0x0D,
0xC0,0x3E,0x07,0x60,0x67,0x0C,0x30,0xC3,0x98,0x19,0x81,0xD8,0x0F,0x00,0xF0,0x06,0x00,0x60,0x0C,0x00,0xC0,0x18,0x01,0x80,0x30,0x03,0x00,0x30,0x00,
# 'z'
0x7A,0x09,0x0B,0x0C,0x01,0x0D,
0xFF,0xFF,0xFC,0x07,0x00,0xC0,0x30,0x0C,0x03,0x80,0xE0,0x38,0x0E,0x03,0xFF,0xFF,0xF0,
# '{'
0x7B,0x02,0x08,0x18,0x01,0x09,
0x0F,0x1F,0x38,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x60,0xE0,0xE0,0x70,0x30,0x30,0x30,0x30,0x30,0x38,0x18,0x1F,0x07,
# '|'
0x7C,0x01,0x02,0x18,0x04,0x0A,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
# '}'
0x7D,0x02,0x08,0x18,0x01,0x09,
0x70,0xF8,0x1C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x06,0x07,0x07,0x0E,0x0C,0x0C,0x0C,0x0C,0x0C,0x1C,0x18,0xF8,0xE0,
# '~'
0x7E,0x0B,0x0C,0x05,0x01,0x0E,
0x38,0x37,0xE3,0xE7,0x7C,0x3E,0x01,0xC0,
# Terminator
0xFF
]
|
[
"kcfkwok@gmail.com"
] |
kcfkwok@gmail.com
|
b4b6d03a8c962d75d30e5e9116ebaae19c7b2e62
|
24badcc01e77651123b88592dba0c764c61c821d
|
/v1.2.py
|
6b7c578f167bfb21df101040178ddc4086b92e9d
|
[] |
no_license
|
PythonCalismaGrubu/Quizlet
|
b5cb155309b69d5cbc7a6064277dcb0129fcd752
|
72434010ed507e23737e86625aecea23056d3bc1
|
refs/heads/main
| 2023-01-12T15:51:10.401816
| 2020-11-15T22:12:31
| 2020-11-15T22:12:31
| 311,963,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
## ortancabijon sunar:)
print('Quize hoş geldiniz!')
sorular = ['1) Cumhuriyet ne zaman ilan edildi ?',
'2) TBMM ne zaman açıldı ?',
'3) Zafer Bayramı ne zamandır ?',
'4) Öğretmenler günü ne zamandır ?',
'5) Gençlik ve Spor Bayramı ne zamandır ?']
soru_numarası = 0
sonSoru = len(sorular)
cevaplar = ['29 Ekim', '23 Nisan', '30 Ağustos', '24 Kasım', '19 Mayıs']
yanlisCevap = False
print('Cevaplarınızı GG Ay şeklinde yazınız')
while soru_numarası <= sonSoru and not yanlisCevap:
cevap = input(sorular[soru_numarası])
if cevap == cevaplar[soru_numarası]:
print('Doğru Cevap!')
soru_numarası += 1
else:
print('Yanlış Cevap !')
soru_numarası += 1
print('Quiz Bitti !')
|
[
"noreply@github.com"
] |
PythonCalismaGrubu.noreply@github.com
|
ba82736ee41740e994253cf27c45278555f34f16
|
eaa2b268142a4c382a768300c004f57e214e74ff
|
/Inheritance/Chef.py
|
4b5462acf724bc127122add206fc2884fdf47510
|
[] |
no_license
|
rmdimran/Mastering_Python
|
08292e944f6d1f35a46f360f0ead7f7373896892
|
67435d0bd14e775f0fec23ded9ad2b80fcdac54c
|
refs/heads/master
| 2020-03-30T21:27:28.865610
| 2018-10-12T12:20:15
| 2018-10-12T12:20:15
| 151,629,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
class Chef:
def make_chicken ( self ):
print ( " The Chef makes chicken." )
def make_salad ( self ):
print ( " The Chef makes Salad." )
def make_special_dish ( self ):
print ( " The Chef makes BBQ Kababs." )
|
[
"rmd92imran@gmail.com"
] |
rmd92imran@gmail.com
|
3b0446384c14791a627fc1e2531ab30ff7889cb2
|
3730763f757e1fc2267dfc1a10341715c7ba9bfe
|
/custom_components/sensor/strava.py
|
5202b12b20155533ea8b93409f60226f1ecbbb6e
|
[] |
no_license
|
tylerwal/HASS
|
017dfeac50416915e4afc68fe4cd990dd9aa8aa6
|
f336f8126b52ca8aa2bd9f45da4156c28a6b331b
|
refs/heads/master
| 2021-05-11T13:59:38.329890
| 2018-12-09T03:43:41
| 2018-12-09T03:43:41
| 117,691,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,055
|
py
|
"""
Support for the Strava API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fitbit/
"""
import os
import logging
import datetime
import time
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['stravalib==0.9.0']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESS_TOKEN = 'access_token'
ATTR_REFRESH_TOKEN = 'refresh_token'
ATTR_CLIENT_ID = 'client_id'
ATTR_CLIENT_SECRET = 'client_secret'
ATTR_LAST_SAVED_AT = 'last_saved_at'
CONF_MONITORED_RESOURCES = 'monitored_resources'
CONF_CLOCK_FORMAT = 'clock_format'
CONF_ATTRIBUTION = 'Data provided by Strava.com'
DEPENDENCIES = ['http']
FITBIT_AUTH_CALLBACK_PATH = '/api/fitbit/callback'
FITBIT_AUTH_START = '/api/fitbit'
STRAVA_CONFIG_FILE = 'strava.conf'
FITBIT_DEFAULT_RESOURCES = ['activities/steps']
SCAN_INTERVAL = datetime.timedelta(minutes=30)
DEFAULT_CONFIG = {
'client_id': 'CLIENT_ID_HERE',
'client_secret': 'CLIENT_SECRET_HERE'
}
FITBIT_RESOURCES_LIST = {
'activities/activityCalories': ['Activity Calories', 'cal', 'fire'],
'activities/calories': ['Calories', 'cal', 'fire'],
'activities/caloriesBMR': ['Calories BMR', 'cal', 'fire'],
'activities/distance': ['Distance', '', 'map-marker'],
'activities/elevation': ['Elevation', '', 'walk'],
'activities/floors': ['Floors', 'floors', 'walk'],
'activities/heart': ['Resting Heart Rate', 'bpm', 'heart-pulse'],
'activities/minutesFairlyActive':
['Minutes Fairly Active', 'minutes', 'walk'],
'activities/minutesLightlyActive':
['Minutes Lightly Active', 'minutes', 'walk'],
'activities/minutesSedentary':
['Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/minutesVeryActive': ['Minutes Very Active', 'minutes', 'run'],
'activities/steps': ['Steps', 'steps', 'walk'],
'activities/tracker/activityCalories':
['Tracker Activity Calories', 'cal', 'fire'],
'activities/tracker/calories': ['Tracker Calories', 'cal', 'fire'],
'activities/tracker/distance': ['Tracker Distance', '', 'map-marker'],
'activities/tracker/elevation': ['Tracker Elevation', '', 'walk'],
'activities/tracker/floors': ['Tracker Floors', 'floors', 'walk'],
'activities/tracker/minutesFairlyActive':
['Tracker Minutes Fairly Active', 'minutes', 'walk'],
'activities/tracker/minutesLightlyActive':
['Tracker Minutes Lightly Active', 'minutes', 'walk'],
'activities/tracker/minutesSedentary':
['Tracker Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/tracker/minutesVeryActive':
['Tracker Minutes Very Active', 'minutes', 'run'],
'activities/tracker/steps': ['Tracker Steps', 'steps', 'walk'],
'body/bmi': ['BMI', 'BMI', 'human'],
'body/fat': ['Body Fat', '%', 'human'],
'body/weight': ['Weight', '', 'human'],
'devices/battery': ['Battery', None, None],
'sleep/awakeningsCount':
['Awakenings Count', 'times awaken', 'sleep'],
'sleep/efficiency': ['Sleep Efficiency', '%', 'sleep'],
'sleep/minutesAfterWakeup': ['Minutes After Wakeup', 'minutes', 'sleep'],
'sleep/minutesAsleep': ['Sleep Minutes Asleep', 'minutes', 'sleep'],
'sleep/minutesAwake': ['Sleep Minutes Awake', 'minutes', 'sleep'],
'sleep/minutesToFallAsleep':
['Sleep Minutes to Fall Asleep', 'minutes', 'sleep'],
'sleep/startTime': ['Sleep Start Time', None, 'clock'],
'sleep/timeInBed': ['Sleep Time in Bed', 'minutes', 'hotel']
}
FITBIT_MEASUREMENTS = {
'en_US': {
'duration': 'ms',
'distance': 'mi',
'elevation': 'ft',
'height': 'in',
'weight': 'lbs',
'body': 'in',
'liquids': 'fl. oz.',
'blood glucose': 'mg/dL',
'battery': '',
},
'en_GB': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'stone',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
},
'metric': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'kilograms',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
}
}
BATTERY_LEVELS = {
'High': 100,
'Medium': 50,
'Low': 20,
'Empty': 0
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_RESOURCES, default=FITBIT_DEFAULT_RESOURCES):
vol.All(cv.ensure_list, [vol.In(FITBIT_RESOURCES_LIST)]),
vol.Optional(CONF_CLOCK_FORMAT, default='24H'):
vol.In(['12H', '24H'])
})
def request_app_setup(hass, config, add_devices, config_path,
discovery_info=None):
"""Assist user with configuring the Strava dev application."""
configurator = hass.components.configurator
# pylint: disable=unused-argument
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
config_path = hass.config.path(STRAVA_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
error_msg = ("You didn't correctly modify fitbit.conf",
" please try again")
configurator.notify_errors(_CONFIGURING['fitbit'],
error_msg)
else:
setup_platform(hass, config, add_devices, discovery_info)
else:
setup_platform(hass, config, add_devices, discovery_info)
start_url = "{}{}".format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
description = """Please create a Strava developer app at
https://www.strava.com/settings/api.
For the OAuth 2.0 Application Type choose Personal.
Set the Callback URL to {}.
They will provide you a Client ID and secret.
These need to be saved into the file located at: {}.
Then come back here and hit the below button.
""".format(start_url, config_path)
submit = "I have saved my Client ID and Client Secret into fitbit.conf."
_CONFIGURING['fitbit'] = configurator.request_config(
'Strava', fitbit_configuration_callback,
description=description, submit_caption=submit,
description_image="/static/images/config_fitbit_app.png"
)
def request_oauth_completion(hass):
"""Request user complete Strava OAuth2 flow."""
configurator = hass.components.configurator
if "fitbit" in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['fitbit'], "Failed to register, please try again.")
return
# pylint: disable=unused-argument
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
start_url = '{}{}'.format(hass.config.api.base_url, FITBIT_AUTH_START)
description = "Please authorize Strava by visiting {}".format(start_url)
_CONFIGURING['fitbit'] = configurator.request_config(
'Strava', fitbit_configuration_callback,
description=description,
submit_caption="I have authorized Strava."
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Strava sensor."""
config_path = hass.config.path(STRAVA_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
request_app_setup(
hass, config, add_devices, config_path, discovery_info=None)
return False
else:
config_file = save_json(config_path, DEFAULT_CONFIG)
request_app_setup(
hass, config, add_devices, config_path, discovery_info=None)
return False
if "fitbit" in _CONFIGURING:
hass.components.configurator.request_done(_CONFIGURING.pop("fitbit"))
import fitbit
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
expires_at = config_file.get(ATTR_LAST_SAVED_AT)
if None not in (access_token, refresh_token):
authd_client = fitbit.Fitbit(config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token,
expires_at=expires_at,
refresh_cb=lambda x: None)
if int(time.time()) - expires_at > 3600:
authd_client.client.refresh_token()
authd_client.system = authd_client.user_profile_get()["user"]["locale"]
if authd_client.system != 'en_GB':
if hass.config.units.is_metric:
authd_client.system = 'metric'
else:
authd_client.system = 'en_US'
dev = []
registered_devs = authd_client.get_devices()
clock_format = config.get(CONF_CLOCK_FORMAT)
for resource in config.get(CONF_MONITORED_RESOURCES):
# monitor battery for all linked FitBit devices
if resource == 'devices/battery':
for dev_extra in registered_devs:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format, dev_extra))
else:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format))
add_devices(dev, True)
else:
oauth = fitbit.api.FitbitOauth2Client(
config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET))
redirect_uri = '{}{}'.format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
fitbit_auth_start_url, _ = oauth.authorize_token_url(
redirect_uri=redirect_uri,
scope=['activity', 'heartrate', 'nutrition', 'profile',
'settings', 'sleep', 'weight'])
hass.http.register_redirect(FITBIT_AUTH_START, fitbit_auth_start_url)
hass.http.register_view(FitbitAuthCallbackView(
config, add_devices, oauth))
request_oauth_completion(hass)
class FitbitAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
requires_auth = False
url = FITBIT_AUTH_CALLBACK_PATH
name = 'api:fitbit:callback'
def __init__(self, config, add_devices, oauth):
"""Initialize the OAuth callback view."""
self.config = config
self.add_devices = add_devices
self.oauth = oauth
@callback
def get(self, request):
"""Finish OAuth callback request."""
from oauthlib.oauth2.rfc6749.errors import MismatchingStateError
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
hass = request.app['hass']
data = request.query
response_message = """Strava has been successfully authorized!
You can close this window now!"""
result = None
if data.get('code') is not None:
redirect_uri = '{}{}'.format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH)
try:
result = self.oauth.fetch_access_token(data.get('code'),
redirect_uri)
except MissingTokenError as error:
_LOGGER.error("Missing token: %s", error)
response_message = """Something went wrong when
attempting authenticating with Strava. The error
encountered was {}. Please try again!""".format(error)
except MismatchingStateError as error:
_LOGGER.error("Mismatched state, CSRF error: %s", error)
response_message = """Something went wrong when
attempting authenticating with Strava. The error
encountered was {}. Please try again!""".format(error)
else:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Strava.
An unknown error occurred. Please try again!
"""
if result is None:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Strava.
An unknown error occurred. Please try again!
"""
html_response = """<html><head><title>Strava Auth</title></head>
<body><h1>{}</h1></body></html>""".format(response_message)
if result:
config_contents = {
ATTR_ACCESS_TOKEN: result.get('access_token'),
ATTR_REFRESH_TOKEN: result.get('refresh_token'),
ATTR_CLIENT_ID: self.oauth.client_id,
ATTR_CLIENT_SECRET: self.oauth.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(hass.config.path(STRAVA_CONFIG_FILE), config_contents)
hass.async_add_job(setup_platform, hass, self.config, self.add_devices)
return html_response
class FitbitSensor(Entity):
"""Implementation of a Strava sensor."""
def __init__(self, client, config_path, resource_type,
is_metric, clock_format, extra=None):
"""Initialize the Strava sensor."""
self.client = client
self.config_path = config_path
self.resource_type = resource_type
self.is_metric = is_metric
self.clock_format = clock_format
self.extra = extra
self._name = FITBIT_RESOURCES_LIST[self.resource_type][0]
if self.extra:
self._name = '{0} Battery'.format(self.extra.get('deviceVersion'))
unit_type = FITBIT_RESOURCES_LIST[self.resource_type][1]
if unit_type == "":
split_resource = self.resource_type.split('/')
try:
measurement_system = FITBIT_MEASUREMENTS[self.client.system]
except KeyError:
if self.is_metric:
measurement_system = FITBIT_MEASUREMENTS['metric']
else:
measurement_system = FITBIT_MEASUREMENTS['en_US']
unit_type = measurement_system[split_resource[-1]]
self._unit_of_measurement = unit_type
self._state = 0
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self.resource_type == 'devices/battery' and self.extra:
battery_level = BATTERY_LEVELS[self.extra.get('battery')]
return icon_for_battery_level(battery_level=battery_level,
charging=None)
return 'mdi:{}'.format(FITBIT_RESOURCES_LIST[self.resource_type][2])
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = CONF_ATTRIBUTION
if self.extra:
attrs['model'] = self.extra.get('deviceVersion')
attrs['type'] = self.extra.get('type').lower()
return attrs
def update(self):
"""Get the latest data from the Strava API and update the states."""
if self.resource_type == 'devices/battery' and self.extra:
self._state = self.extra.get('battery')
else:
container = self.resource_type.replace("/", "-")
response = self.client.time_series(self.resource_type, period='7d')
raw_state = response[container][-1].get('value')
if self.resource_type == 'activities/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'activities/tracker/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'body/bmi':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/fat':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/weight':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'sleep/startTime':
if raw_state == '':
self._state = '-'
elif self.clock_format == '12H':
hours, minutes = raw_state.split(':')
hours, minutes = int(hours), int(minutes)
setting = 'AM'
if hours > 12:
setting = 'PM'
hours -= 12
elif hours == 0:
hours = 12
self._state = '{}:{} {}'.format(hours, minutes, setting)
else:
self._state = raw_state
else:
if self.is_metric:
self._state = raw_state
else:
try:
self._state = '{0:,}'.format(int(raw_state))
except TypeError:
self._state = raw_state
if self.resource_type == 'activities/heart':
self._state = response[container][-1]. \
get('value').get('restingHeartRate')
token = self.client.client.session.token
config_contents = {
ATTR_ACCESS_TOKEN: token.get('access_token'),
ATTR_REFRESH_TOKEN: token.get('refresh_token'),
ATTR_CLIENT_ID: self.client.client.client_id,
ATTR_CLIENT_SECRET: self.client.client.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(self.config_path, config_contents)
|
[
"tylerwal@gmail.com"
] |
tylerwal@gmail.com
|
88b8fac854159c39322313a56f3037ae435235d7
|
0dcf63aa75145f95ad3fba5e5b1c3dfed9e02241
|
/Proyecto1/views.py
|
963a02589043542208a6f7c5d3dcb6d1b4c96cde
|
[] |
no_license
|
erooster/python_p1
|
3c8385e050bd43b59e160d473bcf6fe074e0c224
|
103197c761e953b16d63c9e2fc08c3906d9c3b8a
|
refs/heads/master
| 2022-12-07T11:55:36.164092
| 2020-09-01T13:47:48
| 2020-09-01T13:47:48
| 271,944,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,130
|
py
|
from django.http import HttpResponse
import datetime
from django.template import Template, Context
from django.template.loader import get_template
from django.shortcuts import render
class Persona(object):
def __init__(self, nombre, apellido):
self.nombre=nombre
self.apellido=apellido
def medios(request):
fecha_actual=datetime.datetime.now()
return render(request, 'politics.html',{"currentDate":fecha_actual})
def music(request):
fecha_actual=datetime.datetime.now()
return render(request, 'music.html',{"currentDate":fecha_actual})
# Cargar plantillas con cargador importandolo : from django.template import loader
def saludo(request):
p1=Persona("Emilio", "Rooster")
ahora=datetime.datetime.now()
temasDelCurso=["Plantillas","Modelos","Formularios","Vistas","Despliegue"]
# doc_externo=get_template('saludo.html')
# documento=doc_externo.render({"nombre_persona":p1.nombre,"apellido_persona":p1.apellido,"momento_actual":ahora,"temas":temasDelCurso})
return render(request,"saludo.html",{"nombre_persona":p1.nombre,"apellido_persona":p1.apellido,"momento_actual":ahora,"temas":temasDelCurso})
# Cargar plantilla de forma no debida
# def saludo(request):
# p1=Persona("Emilio", "Rooster")
# # nombre="Juan"
# # apellido="Diáz"
# ahora=datetime.datetime.now()
# temasDelCurso=["Plantillas","Modelos","Formularios","Vistas","Despliegue"]
# # temasDelCurso=[]
#
# doc_externo=open("/home/shdaemon/Desktop/Deskontop/django/Proyecto1/Proyecto1/views/saludo.html")
# #Plantilla
# plt=Template(doc_externo.read())
# doc_externo.close()
# #Contexto vacio
# # ctx=Context({"nombre_persona":nombre,"apellido_persona":"Diáz","momento_actual":ahora})
# ctx=Context({"nombre_persona":p1.nombre,"apellido_persona":p1.apellido,"momento_actual":ahora,"temas":temasDelCurso})
# #renderizar documento
# documento=plt.render(ctx)
# return HttpResponse(documento)
# DE ESTA MANERA NO SE DEBE USAR, LA FORMA ADECUADA ES USANDO PLANTILLA (VISTAS)
# def saludo(request): #Primera Vista / Devuelve una respuesta
# documento="<html><body><h1>Hi mate, this is my first page with Django</h1></body></html>"
# return HttpResponse(documento)
def despedida(request): #Primera Vista / Devuelve una respuesta
documento="""
<html>
<body>
<h1>Good by mate</h1>
</body>
</html>
"""
return HttpResponse(documento)
def dameFecha(request):
fecha_actual=datetime.datetime.now()
documento="""
<html>
<body>
<h3>
Fecha y hora actual %s
</h3>
</body>
</html>""" % fecha_actual
return HttpResponse(documento)
def calculaEdad(request, edad,agno):
# edadActual=27
periodo=agno-2020
edadFutura=edad+periodo
documento="<html><body><h3>En el año %s tendrás %s años</h3></body></html>" %(agno, edadFutura)
return HttpResponse(documento)
# def despedida(request): #Primera Vista / Devuelve una respuesta
# return HttpResponse("Goodbye mate")
|
[
"emilio@sherpab2b.com"
] |
emilio@sherpab2b.com
|
de581644bd80cc093a03bd65e532d8ba67b9ead8
|
aa89ee068c9b337a2255eec24c113eb8c7ae1be9
|
/client/cryptosocket.py
|
27a628452be7ff5cd5ab01639d614b131eb8e414
|
[
"MIT"
] |
permissive
|
iwalton3/urpc
|
fd18393502808e15eb18fcacd9dd41cbfed363d5
|
b8554b15b34b1109744db9b471243809b794e99f
|
refs/heads/master
| 2023-09-01T12:03:41.839948
| 2021-10-26T23:58:13
| 2021-10-26T23:58:13
| 420,561,252
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,592
|
py
|
import os
import hashlib
from Crypto.Cipher import AES
def hash(*args):
h = hashlib.sha256()
for arg in args:
h.update(arg)
return h.digest()[0:16]
class CryptoMsgSocket:
def __init__(self, sock, key, recv_first=False):
self.secret_key = key
self.session_key = os.urandom(16)
self.r_session_key = None
self.sock = sock
if recv_first:
self._recv_sesskey()
self._send_sesskey()
ack = self.sock.recv(2)
if ack != b'OK':
self.close()
raise BrokenPipeError('No OK')
else:
self._send_sesskey()
self._recv_sesskey()
self.sock.send(b'OK')
def _recv_sesskey(self):
keys = self.sock.recv(32)
if len(keys) != 32:
self.close()
raise BrokenPipeError('Unexpected stream length')
self.r_session_key = keys[:16]
auth = keys[16:]
if auth != hash(self.secret_key, self.r_session_key):
self.close()
raise BrokenPipeError('Authentication failed')
def _send_sesskey(self):
self.sock.send(self.session_key)
self.sock.send(hash(self.secret_key, self.session_key))
def send(self, data):
padding_amt = 16 - len(data) % 16
data += bytes([padding_amt])*padding_amt
aes = AES.new(self.secret_key, AES.MODE_CBC, self.r_session_key)
data = aes.encrypt(data)
length = len(data)//16
length = bytes([length>>8, length&0xFF])
auth = hash(self.secret_key, self.r_session_key, data, length)
self.r_session_key = hash(self.secret_key, self.r_session_key)
self.sock.send(auth + length + data)
def recv(self):
data = self.sock.recv(18)
if len(data) != 18:
raise BrokenPipeError('Unexpected stream length')
auth = data[:16]
length = data[16:]
block_ct = (length[0] << 8) + length[1]
data_len = block_ct*16
ciphertext = self.sock.recv(data_len)
if len(ciphertext) != data_len:
raise BrokenPipeError('Unexpected stream length')
if auth != hash(self.secret_key, self.session_key, ciphertext, length):
raise BrokenPipeError('Signature is invalid')
aes = AES.new(self.secret_key, AES.MODE_CBC, self.session_key)
ciphertext = aes.decrypt(ciphertext)
self.session_key = hash(self.secret_key, self.session_key)
return ciphertext[:-ciphertext[-1]]
def close(self):
self.sock.close()
|
[
"ian@iwalton.com"
] |
ian@iwalton.com
|
c79c02516117cbbc551762c62dfc3a6760b6b536
|
6ba2a005c682a5bbd5980ad3bd0645efed337d95
|
/setup.py
|
0a073acc367bef22bd7299b800abce4c6e223f3b
|
[
"MIT"
] |
permissive
|
n1te/drf_orjson
|
f75d4e7607953633c12012ee9e9db7408dfbee31
|
d5a955609ee52eed7d7efb6300f1ef9333592bf7
|
refs/heads/master
| 2020-06-24T04:34:06.344562
| 2019-07-29T11:49:19
| 2019-07-29T11:49:19
| 198,850,532
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='drf_orjson',
version='0.3',
description='DRF ORJSON Renderer',
license='MIT',
author='Stanislav Shershnev',
author_email='shershnev.stas@gmail.com',
url='https://github.com/n1te/drf_orjson',
packages=['drf_orjson'],
install_requires=['django', 'orjson', 'djangorestframework'],
tests_require=['pytest'],
python_requires=">=3.5",
)
|
[
"shershnev.stas@gmail.com"
] |
shershnev.stas@gmail.com
|
1ff0ef24c1649cfd0c9c0958cf7fa0df46f8ed99
|
b506a28ec3c9a5c5c38b2fe1c76d023fe292062f
|
/PH_LIGHTING/PH_MANAGERLIGHTS/OLD/PH_MANAGERLIGHTS_V004.py
|
9beadae62cef4176425f737227bd5475bc1272b6
|
[] |
no_license
|
lefan2016/PipelineTool
|
4fe7f631cce7017e4a81d2c3b08488e4a0e98968
|
8e9b6463222df468878958aa8eb0ed763ef5c52a
|
refs/heads/master
| 2020-12-02T07:14:39.701840
| 2017-09-12T15:27:50
| 2017-09-12T15:27:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,138
|
py
|
import maya.cmds as cmds
import maya.mel as mel
from functools import partial
lights = []
swatches = []
main_layout = ''
light_layout = ''
opt = 0
WIDTH = 800
HEIGHT = 600
#contadores para nombre de luz
contSpot=0
contDir=0
contPoint=0
contArea=0
contAmb=0
def UI(*args):
"""creates window for UI"""
# check to see if window exists
if (cmds.window('PH_MANAGERLIGHTS', exists=True)):
cmds.deleteUI('PH_MANAGERLIGHTS')
# create window
window = cmds.window('PH_MANAGERLIGHTS', title='PH_MANAGERLIGHTS', w=WIDTH, h=HEIGHT, mxb=False, mnb=False, sizeable=True)
create_layout()
cmds.showWindow(window)
def create_layout():
"""generates the rows/columns/buttons for the UI"""
num_lights = len(lights)
global main_layout
main_layout = cmds.scrollLayout(verticalScrollBarThickness=16, horizontalScrollBarThickness=0)
# create buttons
cmds.rowLayout( numberOfColumns=10, h=40)
cmds.button(label='Spotlight', w=80, command=partial(add_light, 'spot'))
cmds.button(label='Directional', w=80, command=partial(add_light, 'dir'))
cmds.button(label='Point', w=80, command=partial(add_light, 'point'))
cmds.button(label='Ambient', w=80, command=partial(add_light, 'amb'))
cmds.button(label='Area', w=80, command=partial(add_light, 'area'))
cmds.text(label='', w=40)
cmds.button(label='Organize', w=80, command=organize)
cmds.button(label='Basic Lights', w=80, command=basic)
cmds.text(label='', w=40)
cmds.button(label='Refresh', w=80, al='right', command=refresh)
cmds.setParent('..')
# create column labels
cmds.rowColumnLayout(nc=12,
columnWidth=[(1, 60), (2, 150), (3, 100), (4, 100), (5, 60), (6, 60), (7, 60), (8, 60), (9, 60), (10, 100), (11, 60), (12, 60)],
cs=[(1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10), (10, 10), (11, 10), (12, 10)])
cmds.text(label='Enabled', w=60, al='left')
cmds.text(label='Name', w=130, al='left')
cmds.text(label='Type', w=60, al='left')
cmds.text(label='Intensity', al='left')
cmds.text(label='Color', w=60, al='left')
cmds.text(label='Cone', w=60, al='left')
cmds.text(label='Penumbra', w=60, al='left')
cmds.text(label='Diffuse', w=60, al='left')
cmds.text(label='Spec', w=60, al='left')
cmds.text(label='Temperature Color', w=60, al='left')
cmds.text(label='Select', w=60, al='left')
cmds.text(label='Point At', w=60, al='left')
cmds.setParent('..')
create_lights()
def create_lights():
"""populates the UI with a row for each light in the scene"""
global lights
lights = cmds.ls(type='light')
global swatches
swatches = []
global main_layout
cmds.setParent(main_layout)
global light_layout
light_layout = cmds.rowColumnLayout(nc=12,
columnWidth=[(1, 60), (2, 150), (3, 100), (4, 100), (5, 60), (6, 60), (7, 60), (8, 60), (9, 60), (10, 100), (11, 60), (12, 60)],
cs=[(1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10), (10, 10), (11, 10), (12, 10)], rs=(1, 10))
# create rows of individual lights
for i, light in enumerate(lights):
# 1 - enabled
enabled = cmds.getAttr(light + '.visibility')
cmds.checkBox(label='',
v=enabled,
onc=partial(turn_on, light, 'visibility'),
ofc=partial(turn_off, light, 'visibility'),
al='center', w=40)
# 2 - name
cmds.textField(light + 'name',
tx=cmds.listRelatives(light, type='transform', p=True)[0],
w=130,
cc=partial(rename, light),
ec=partial(rename, light))
# 3 - type
cmds.text(label=cmds.nodeType(light),
w=130,
al='left')
# 4 - intensity
cmds.floatField(light + 'intensity',
v=cmds.getAttr(light + '.intensity'),
cc=partial(update_float, light, 'intensity'),
ec=partial(update_float, light, 'intensity'),
w=60)
# 5 - color
swatch = cmds.canvas(rgbValue=cmds.getAttr(light + '.color')[0],
w=40,
h=20,
pressCommand=partial(color_picker, light, i))
swatches.append(swatch)
# 6 - cone angle
if(cmds.nodeType(light) == 'spotLight'):
cmds.floatField(light + 'coneAngle',
v=cmds.getAttr(light + '.coneAngle'),
cc=partial(update_float, light, 'coneAngle'),
ec=partial(update_float, light, 'coneAngle'),
w=60)
else:
cmds.floatField(v=0, w=60, en=0)
# 7 - penumbra angle
if(cmds.nodeType(light) == 'spotLight'):
cmds.floatField(light + 'penumbraAngle',
v=cmds.getAttr(light + '.penumbraAngle'),
cc=partial(update_float, light, 'penumbraAngle'),
ec=partial(update_float, light, 'penumbraAngle'),
w=60)
else:
cmds.floatField(v=0, w=60, en=0)
# 8 - diffuse
if(cmds.nodeType(light) != 'ambientLight'):
cmds.checkBox(label='',
v=cmds.getAttr(light + '.emitDiffuse'),
onc=partial(turn_on, light, 'emitDiffuse'),
ofc=partial(turn_off, light, 'emitDiffuse'),
al='center',
w=40)
else:
cmds.checkBox(label='', en=0)
# 9 - spec
if(cmds.nodeType(light) != 'ambientLight'):
cmds.checkBox(label='',
v=cmds.getAttr(light + '.emitSpecular'),
onc=partial(turn_on, light, 'emitSpecular'),
ofc=partial(turn_off, light, 'emitSpecular'),
al='center',
w=40)
else:
cmds.checkBox(label='', en=0)
# 10 - Temperature Color
if cmds.nodeType(luz) == 'directionalLight' or cmds.nodeType(luz) == 'areaLight' or cmds.nodeType(luz) == 'spotLight':
cmds.floatField(light + 'Temperature',v=cmds.getAttr(light + '.aiColorTemperature'),
cc=partial(update_float, light, 'aiColorTemperature'),
ec=partial(update_float, light, 'aiColorTemperature'),
w=60)
# 11 - select
cmds.button(label='Select', command=partial(select, light), al='center', w=40)
# 12 - point at
if(cmds.nodeType(light) != 'ambientLight' and cmds.nodeType(light) != 'pointLight'):
cmds.button(label='Point', command=partial(aim, light), al='center', w=40)
else:
cmds.button(label='Point', en=0, w=40)
cmds.setParent('..')
def refresh(*args):
"""deletes the light layout and regenerates"""
global lights
global light_layout
cmds.deleteUI(light_layout)
light_layout = ''
lights = cmds.ls(type='light')
create_lights()
def update_float(light, kind, *args):
"""generic function that updates float values"""
sel_light = cmds.listRelatives(cmds.textField(light + 'name', q=True, tx=True), s=True)[0]
cmds.setAttr(sel_light + '.' + kind, args[0])
def turn_off(light, kind, *args):
"""turns off a light"""
sel_light = cmds.listRelatives(cmds.textField(light + 'name', q=True, tx=True), s=True)[0]
cmds.setAttr(sel_light + '.' + kind, False)
def turn_on(light, kind, *args):
"""turns on a light"""
sel_light = cmds.listRelatives(cmds.textField(light + 'name', q=True, tx=True), s=True)[0]
cmds.setAttr(sel_light + '.' + kind, True)
def select(light, *args):
"""selects a light in the viewport"""
sel_light = cmds.textField(light + 'name', q=True, tx=True)
cmds.select(sel_light)
def aim(light, *args):
"""creates a constraint to aim the light, then removes the constraint"""
sel_light = cmds.textField(light + 'name', q=True, tx=True)
sel_obj = cmds.ls(sl=True)
if (sel_obj and sel_light not in sel_obj):
aim = cmds.aimConstraint(sel_obj, sel_light, aim=[0, 0, -1])
cmds.delete(aim)
def rename(light, *args):
"""renames a light"""
cmds.select(cmds.listRelatives(light, type='transform', p=True))
new_name = cmds.rename(cmds.textField(light + 'name', q=True, tx=True))
cmds.textField(light + 'name', e=True, tx=new_name)
def organize(*args):
"""parents all lights under a top-level 'lights' node"""
if(not cmds.ls('LIGHTS__GRP')):
cmds.group(name='LIGHTS__GRP', em=True, w=True)
cmds.parent(cmds.ls(type='light'), 'LIGHTS__GRP', absolute=True)
def basic(*args):
"""creates a basic 6 light light rig for interiors, probably unnecessary now..."""
if(not cmds.ls('LIGHTS__GRP')):
cmds.group(name='LIGHTS__GRP', em=True, w=True)
cool = [.8, .85, 1]
warm = [1, .88, .8]
north = cmds.directionalLight(n=('lFill_fromSouthOnNorth__LGDIR'), rgb=cool, i=.2)
cmds.setAttr(cmds.listRelatives(north,type='transform',p=True)[0] + '.ry', 180)
south = cmds.directionalLight(n='lFill_fromNorthOnSouth__LGDIR', rgb=cool, i=.2)
east = cmds.directionalLight(n='lFill_fromWestOnEast__LGDIR', rgb=cool, i=.2)
cmds.setAttr(cmds.listRelatives(east,type='transform',p=True)[0] + '.ry', 90)
west = cmds.directionalLight(n='lFill_fromEastOnWest__LGDIR', rgb=cool, i=.2)
cmds.setAttr(cmds.listRelatives(west,type='transform',p=True)[0] + '.ry', -90)
sky = cmds.directionalLight(n='lFill_fromFloorOnSky__LGDIR', rgb=warm, i=.1)
cmds.setAttr(cmds.listRelatives(sky,type='transform',p=True)[0] + '.rx', 90)
floor = cmds.directionalLight(n='lFill_fromSkyOnFloor__LGDIR', rgb=cool, i=.2)
cmds.setAttr(cmds.listRelatives(floor,type='transform',p=True)[0] + '.rx', -90)
amb = cmds.ambientLight(n='lAmb_onSet__LGAMB', i=.01)
cmds.parent(cmds.ls(type='light'), 'LIGHTS__GRP', absolute=True)
refresh()
def change_decay(light, *args):
"""changes the decay type of light"""
sel_light = cmds.listRelatives(cmds.textField(light + 'name', q=True, tx=True), s=True)[0]
global opt
if (args[0] == 'No Decay'):
opt = 0
elif (args[0] == 'Linear'):
opt = 1
elif (args[0] == 'Quadratic'):kind
opt = 2
elif (args[0] == 'Cubic'):
opt = 3
else:
opt = int(args[0])
cmds.optionMenu('decay' + light, edit=True, sl = opt + 1)
cmds.setAttr(sel_light + '.decayRate', opt)
def color_picker(light, index, *args):
"""brings up the color picker UI to select a color for a light"""
sel_light = cmds.listRelatives(cmds.textField(light + 'name', q=True, tx=True), s=True)[0]
curr_color = cmds.getAttr(sel_light + '.color')
cmds.colorEditor(rgbValue=curr_color[0])
if cmds.colorEditor(query=True, result=True):
values = cmds.colorEditor(query=True, rgb=True)
cmds.setAttr(sel_light + '.color', *values)
cmds.canvas(swatches[index], e=True, rgbValue=cmds.getAttr(sel_light + '.color')[0])
def add_light(kind, *args):
global contSpot
global contDir
global contPoint
global contArea
global contAmb
"""adds a new light, organizes it, and refreshes the UI"""
if(not cmds.ls('LIGHTS__GRP')):
cmds.group(name='LIGHTS__GRP', em=True, w=True)
if kind == 'spot':
nameLight = cmds.spotLight(name='RENAMEMEPLEASE' + str(contSpot)).encode("utf-8")
cmds.select(nameLight)
lightTrf = cmds.listRelatives(nameLight, shapes=True, children= True, allParents=True)[0]
newName = cmds.rename(lightTrf, str(lightTrf) + '__LGSPO')
contSpot=contSpot+1
cmds.parent(newName, 'LIGHTS__GRP')
refresh()
elif kind == 'dir':
nameLight = cmds.directionalLight(name='RENAMEMEPLEASE' + str(contDir)).encode("utf-8")
cmds.select(nameLight)
lightTrf = cmds.listRelatives(nameLight, shapes=True, children= True, allParents=True)[0]
newName = cmds.rename(lightTrf, str(lightTrf) + '__LGDIR')
contDir=contDir+1
cmds.parent(newName, 'LIGHTS__GRP')
refresh()
elif kind == 'point':
nameLight = cmds.pointLight(name='RENAMEMEPLEASE' + str(contPoint)).encode("utf-8")
cmds.select(nameLight)
lightTrf = cmds.listRelatives(nameLight, shapes=True, children= True, allParents=True)[0]
newName = cmds.rename(lightTrf, str(lightTrf) + '__LGPOI')
contPoint=contPoint+1
cmds.parent(newName, 'LIGHTS__GRP')
refresh()
elif kind == 'amb':
nameLight = cmds.ambientLight(name='RENAMEMEPLEASE' + str(contAmb)).encode("utf-8")
cmds.select(nameLight)
lightTrf = cmds.listRelatives(nameLight, shapes=True, children= True, allParents=True)[0]
newName = cmds.rename(lightTrf, str(lightTrf) + '__LGAMB')
contAmb=contAmb+1
cmds.parent(newName, 'LIGHTS__GRP')
refresh()
elif kind == 'area':
nameLight = cmds.shadingNode('areaLight',name='RENAMEMEPLEASE' + str(contArea), asLight=True).encode("utf-8")
cmds.select(nameLight)
nameLight = cmds.rename(nameLight, 'RENAMEMEPLEASE' + str(contArea) + '__LGARE' )
lightTrf = cmds.listRelatives(nameLight)[0]
newName = cmds.rename(lightTrf, str(lightTrf) + '__LGARESH')
contArea=contArea+1
cmds.parent(nameLight, 'LIGHTS__GRP')
refresh()
def main():
if cmds.pluginInfo('mtoa.mll',q=True, l=True ):
print 'Arnold it is ON'
else:
cmds.loadPlugin( 'mtoa.mll' )
print 'Arnold ON'
"""calls the UI function to generate the UI"""
UI()
main()
|
[
"pabloemmanueldeleo@gmail.com"
] |
pabloemmanueldeleo@gmail.com
|
e42a51b692893c7356c406e9ed9c6a69317687b6
|
01a55f4e4d79c8f3541632dd8754ab7010ccf4d4
|
/vanilla_net_code/__init__.py
|
c5efb8e5f1b051cd9e9d538b6f174114e14e164f
|
[] |
no_license
|
zhao1701/supervised-vae
|
30fe379776af3916b92c8b505a597777818a674b
|
8b30e7a9288a1eada4bafd27a86758872083f040
|
refs/heads/master
| 2023-02-08T21:12:04.229815
| 2018-12-12T22:17:45
| 2018-12-12T22:17:45
| 154,344,195
| 3
| 1
| null | 2023-02-02T04:31:59
| 2018-10-23T14:38:10
|
Python
|
UTF-8
|
Python
| false
| false
| 11,070
|
py
|
import os
import numpy as np
import tensorflow as tf
class ClassicNeuralNetwork:
def __init__(
self, checkpoint_dir, log_dir, img_shape=(128, 128, 3), num_final_hidden_layer=32,
num_classes=2):
self.checkpoint_dir = checkpoint_dir
self.img_shape = img_shape
self.num_final_hidden_layer = num_final_hidden_layer
self.num_classes = num_classes
with tf.variable_scope('ClassicNeuralNetwork', reuse=tf.AUTO_REUSE):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self._create_network()
self._create_losses()
self._create_optimizers()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self._load_checkpoint()
self.summary_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
def _load_checkpoint(self):
"""
Checks if a model checkpoint exists, and if so, alters 'self.sess' to
reflect the appropriate state of the Tensorflow computation graph.
"""
self.saver = tf.train.Saver(max_to_keep=5)
checkpoint = tf.train.get_checkpoint_state(self.checkpoint_dir)
# If checkpoint exists and is reachable, load checkpoint state into 'sess'
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
print('loaded checkpoint: {}'.format(
checkpoint.model_checkpoint_path))
else:
print(
'Could not find old checkpoint. '
'Creating new checkpoint directory.'
)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
def _save_checkpoint(self):
self.saver.save(
self.sess,
self.checkpoint_dir,
global_step=self.global_step)
def _create_network(self, x, reuse=tf.AUTO_REUSE):
"""
Create computation graph that returns output tensors of the recognition
network: a tensor of means and a tensor of log standard deviations that
define the factorized latent distribution q(z).
"""
height, width, channels = self.img_shape
self.x_input = tf.placeholder(
tf.float32, shape=[None, height, width, channels], name='x_input')
self.y_input = tf.placeholder(
tf.float32, shape=[None, 2], name='y_input')
self.learning_rate = tf.placeholder(
tf.float32, name='learning_rate')
self.beta = tf.placeholder(
tf.float32, name='beta')
with tf.variable_scope('network', reuse=reuse):
# If input images were reshaped into vectors, reshape them back into
# their original dimensions for convolutional layers.
# height, width, channels = self.img_shape
# x_reshaped = tf.reshape(x, [-1, height, width, channels])
# Ex: filters=32, kernel_size=4, stride=2
x = tf.layers.Conv2D(32, 4, 2, 'same', activation=tf.nn.relu,)(x)
x = tf.layers.BatchNormalization(axis=-1, momentum=0.1, epsilon=1e-5)(x)
x = tf.layers.Conv2D(64, 4, 2, 'same', activation=tf.nn.relu,)(x)
x = tf.layers.BatchNormalization(axis=-1, momentum=0.1, epsilon=1e-5)(x)
x = tf.layers.Conv2D(128, 4, 2, 'same', activation=tf.nn.relu,)(x)
x = tf.layers.BatchNormalization(axis=-1, momentum=0.1, epsilon=1e-5)(x)
x = tf.layers.Conv2D(128, 4, 2, 'same', activation=tf.nn.relu,)(x)
x = tf.layers.BatchNormalization(axis=-1, momentum=0.1, epsilon=1e-5)(x)
x = tf.layers.Conv2D(256, 4, 2, 'same', activation=tf.nn.relu,)(x)
x = tf.layers.BatchNormalization(axis=-1, momentum=0.1, epsilon=1e-5)(x)
x = tf.layers.Conv2D(512, 4, activation=tf.nn.relu,)(x)
x = tf.layers.BatchNormalization(axis=-1, momentum=0.1, epsilon=1e-5)(x)
# Final convolutions downsize each channels' dimensions to a 1x1 patch,
# resulting in a final tensors with shape (batch_size, 1, 1, num_final_hidden_layer)
final_hidden_layer = tf.layers.Conv2D(self.num_final_hidden_layer, 1)(x)
self.y_logits = self._create_classification(final_hidden_layer)
def _create_classification(self, z):
with tf.variable_scope('classifier', reuse=tf.AUTO_REUSE):
y_logits = tf.layers.Dense(self.num_classes, name='y_logits')(z)
return y_logits
def _create_losses(self):
summary_ops = list()
# Flatten each input image into a vector
height, width, channels = self.img_shape
flat_shape = [-1, height*width*channels]
x_input_flattened = tf.reshape(self.x_input, flat_shape)
x_out_logit_flattened = tf.reshape(self.x_out_logit, flat_shape)
# Binary cross-entropy loss
cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.y_input, logits=self.y_logits)
self.cross_entropy_loss = tf.reduce_mean(
cross_entropy_loss, name='cross_entropy_loss')
summary_ops.append(
tf.summary.scalar('cross_entropy loss', self.cross_entropy_loss))
acc, acc_op = tf.metrics.accuracy(labels=tf.argmax(self.y_input, 1),
predictions=tf.argmax(self.y_logits, 1))
self.acc = acc
self.acc_op = acc_op
summary_ops.append(
tf.summary.scalar('accuracy', self.acc))
summary_ops.append(
tf.summary.scalar('acc_op', self.acc_op))
auc_value, auc_op = tf.metrics.auc(
labels=tf.argmax(self.y_input, 1),
predictions=tf.argmax(self.y_logits, 1), curve='ROC')
self.auc_avlue = auc_value
self.auc_op = auc_op
summary_ops.append(
tf.summary.scalar('auc_value', self.auc_value))
summary_ops.append(
tf.summary.scalar('auc_op', self.auc_op))
average_precision = tf.metrics.average_precision_at_k(
labels=tf.argmax(self.y_input, 1),
predictions=tf.argmax(self.y_logits, 1), k=5)
self.average_precision = average_precision
summary_ops.append(
tf.summary.scalar('auc_value', self.auc_value))
self.summary_ops_merged = tf.summary.merge(
summary_ops)
def _create_optimizers(self):
optimizer = tf.train.AdamOptimizer
classifier_optimizer = optimizer(self.learning_rate)
self.classifier_optimizer = classifier_optimizer.minimize(
self.cross_entropy_loss)
def _partial_fit_classifier(self, x_batch, y_batch, learning_rate, beta):
"""
Train encoder and classifier networks based on minibatch
of training data.
Parameters
----------
x_batch : array-like, shape = [batch_size, height, width, channels]
A minibatch of input images.
y_batch : array-like, shape = [batch_size, num_classes]
A one-hot encoded matrix of training labels.
"""
feed_dict = {
self.x_input: x_batch,
self.y_input: y_batch,
self.learning_rate: learning_rate,
self.beta: beta
}
_ = self.sess.run(self.classifier_optimizer, feed_dict=feed_dict)
step = self.sess.run(self.global_step)
summary_str = self.sess.run(
self.summary_ops_classifier_merged, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, step)
def fit_classifier(
self, x, y, num_epochs=5, batch_size=256,
learning_rate=1e-3, beta=1):
"""
Train encoder and classifier networks.
Parameters
----------
x : array-like, shape = [num_samples, height, width, channels]
A set of input images.
y : array-like, shape = [num_samples, num_classes]
A one-hot encoded matrix of training labels.
"""
# Shuffle x and y
num_samples = len(x)
for epoch in range(num_epochs):
random_indices = np.random.permutation(num_samples)
x = x[random_indices]
y = y[random_indices]
# Split x and y into batches
num_batches = num_samples // batch_size
indices = [[k, k+batch_size] for k in range(0, num_samples, batch_size)]
indices[-1][-1] = num_samples
x_batches = [x[start:end] for start, end in indices]
y_batches = [y[start:end] for start, end in indices]
print(f'Training epoch {epoch}...')
# Iteratively train the classifier
for x_batch, y_batch in zip(x_batches, y_batches):
self._partial_fit_classifier(
x_batch, y_batch, learning_rate, beta)
def _partial_fit_decoder(self, x_batch, learning_rate):
"""
Train decoder network based on minibatch of input images.
Parameters
----------
x_batch : array-like, shape = [batch_size, height, width, channels]
A minibatch of input images.
"""
feed_dict = {
self.x_input: x_batch,
self.learning_rate: learning_rate,
}
_ = self.sess.run(self.decoder_optimizer, feed_dict=feed_dict)
step = self.sess.run(self.global_step)
summary_str = self.sess.run(
self.summary_ops_decoder_merged, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, step)
def fit_decoder(self, x, num_epochs=5, batch_size=256, learning_rate=1e-3):
"""
Train decoder network.
Parameters
----------
x : array-like, shape = [num_samples, height, width, channels]
A set of input images.
"""
# Shuffle x
num_samples = len(x)
for epoch in range(num_epochs):
random_indices = np.random.permutation(num_samples)
x = x[random_indices]
# Split x into batches
num_batches = num_samples // batch_size
indices = [[k, k+batch_size] for k in range(0, num_samples, batch_size)]
indices[-1][-1] = num_samples
x_batches = [x[start:end] for start, end in indices]
print(f'Training epoch {epoch}...')
# Iteratively train the decoder
for x_batch in x_batches:
self._partial_fit_decoder(x_batch, learning_rate)
def predict(self, x):
"""
Given a minibatch of input images, predict classes.
Parameters
==========
x : array-like, shape = [batch_size, height, width, channels]
A minibatch of input images.
Returns
=======
predictions : array, shape = [batch_size, num_classes]
A matrix of <batch_size> predictive distributions.
"""
feed_dict = {
self.x_input: x,
}
predictions = self.sess.run(self.y_pred_denoised, feed_dict=feed_dict)
return predictions
def compress(self, x):
"""
Given a minibatch of input images, create a minibatch of
latent means.
Parameters
==========
x : array-like, shape = [batch_size, height, width, channels]
A minibatch of input images.
Returns
=======
latents : array, shape = [batch_size, num_latent_dimensions]
A minibatch of latent means.
"""
feed_dict = {
self.x_input: x,
}
latents = self.sess.run(self.z_mean, feed_dict=feed_dict)
return latents
def reconstruct(self, x):
"""
Given a minibatch of input images, create a minibatch of reconstructions.
Parameters
==========
x : array-like, shape = [batch_size, height, width, channels]
A minibatch of input images.
Returns
=======
reconstructions : array, shape = [batch_size, height, width, channels]
A minibatch of reconstructions.
"""
feed_dict = {
self.x_input: x,
}
reconstructions = self.sess.run(self.x_out_denoised, feed_dict=feed_dict)
return reconstructions
def reconstruct_latents(self, z):
"""
Given a minibatch of latent means, create a minibatch of reconstructions.
Parameters
==========
z : array-like, shape = [batch_size, num_latent_dimensions]
A minibatch of latent means.
Returns
=======
reconstructions : array, shape = [batch_size, height, width, channels]
A minibatch of reconstructions.
"""
feed_dict = {
self.z_mean: z,
}
reconstructions = self.sess.run(self.x_out_denoised, feed_dict=feed_dict)
return reconstructions
|
[
"ramy.fahim.t@gmail.com"
] |
ramy.fahim.t@gmail.com
|
71bb4c2e8b74c2309967e368b6b2f58a594a391a
|
d2e09ff4faf7c9330ac5970b92b1cc8ab918d586
|
/controls.py
|
4b727940196c06e050fd4d424d102af978790df6
|
[] |
no_license
|
zzggbb/waves
|
cc06e6ff48e1e577531dcfb9000a53599f56ef67
|
6f059118812e63d1b3e1bc08666c2e11afe84979
|
refs/heads/master
| 2018-12-04T11:00:53.995135
| 2018-09-06T05:07:26
| 2018-09-06T05:07:26
| 117,961,700
| 5
| 3
| null | 2018-03-12T23:15:48
| 2018-01-18T09:31:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
import pygame
import util
FONT_SIZE = 18
FONT_PATH = './resources/terminus.ttf'
FONT_COLOR = (255,255,255)
FONT_ANTIALIAS = True
CARD_WIDTH = 200
class Controls(object):
def __init__(self, surface):
pygame.font.init()
self.surface = surface
self.font = pygame.font.Font(FONT_PATH, FONT_SIZE)
def draw(self, sample_rate, sample_size, pull, smooth, width, bars, mouse_frequency):
params = {
'sample rate': str(sample_rate),
'sample size': str(sample_size),
'pull ratio (0-1)': '{0:.2f}'.format(pull),
'smoothing ratio (0-1)': '{0:.2f}'.format(smooth),
'window width': str(width),
'bar count': str(bars),
'bar width': str(width // bars),
'mouse frequency': str(int(mouse_frequency))
}
max_label_width = 0
for label in params.keys():
width, _ = self.font.size(label)
if width > max_label_width:
max_label_width = width
i = 0
for label, value in params.items():
item_text = label + ' ' + value
item_width, item_height = self.font.size(item_text)
label_width, _ = self.font.size(label)
item_surface = self.font.render(item_text, FONT_ANTIALIAS, FONT_COLOR)
pos = (max_label_width - label_width, i)
self.surface.blit(item_surface, pos)
i = i + item_height
|
[
"zanegbradley@gmail.com"
] |
zanegbradley@gmail.com
|
c4c053b4e5fb7caef4a06a5ca2ef9c6651462719
|
aa8ff303018658501b2c03c7f3beaf444c466335
|
/bin/utmlabel.py
|
5c124c2dfbd8406d26ac21b0698bdffdbaa41678
|
[
"MIT"
] |
permissive
|
bitangler/usgs-maptools
|
d5fe6e9c6ad7eb9112a1f400b1f49c2ac95e10e7
|
5a391e7d27b5296d98e029661e1326b2122baa00
|
refs/heads/master
| 2020-03-30T08:13:37.676974
| 2018-09-30T19:32:45
| 2018-09-30T19:32:45
| 150,999,087
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
"""Format UTM grid points."""
from qgis.core import *
from qgis.gui import *
from math import floor
superscript = [u"\u2070", u"\u00B9", u"\u00B2", u"\u00B3", u"\u2074",
u"\u2075", u"\u2076", u"\u2077", u"\u2078", u"\u2079"]
@qgsfunction(args="auto", group="Custom")
def utmlabel(grid_number, feature, parent):
"""Converts grid_number to a formatted UTM label."""
i = int(grid_number)
n = int(floor(i / 100000.0))
m = int(floor((i - 100000*n) / 1000.0))
out = ""
for k in "%d" % n:
out += superscript[int(k)]
out += "%02d" % m
return out
|
[
"kris@bitangler.com"
] |
kris@bitangler.com
|
8fa1a366dbce5bdbffd627c0e4d92da526c386e8
|
8095901fb1499464d13ac0c203b787d8260220a3
|
/solution.py
|
ef9c962b022387ab679c3942bb0e10b79027feb7
|
[] |
no_license
|
ks5750/prbml1
|
0bb9b5c32cbcd8729287a7a70001f1f45419016e
|
975edbdf1b9cc58f14ef8ccf0b24e25ea70c8198
|
refs/heads/master
| 2023-08-10T18:09:23.398905
| 2021-09-18T04:55:55
| 2021-09-18T04:55:55
| 407,752,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
# ! /usr/bin/env python3
from nacl.secret import SecretBox
from nacl.exceptions import CryptoError
import sys
import json
## with open(sys.argv[1]) as json_data:
## inputs = json.load(json_data)
##
inputs = json.load(sys.stdin)
outputs = {}
# Problem 1
ints_sum = 0
ints_product = 1
for x in inputs["problem1"]:
ints_sum += x
ints_product *= x
outputs["problem1"] = {
"sum": ints_sum,
"product": ints_product,
}
# Problem 2
input_hexstr=inputs["problem2"]
output_byte=bytes.fromhex(input_hexstr)
output_str=output_byte.decode()
outputs["problem2"]= output_str
# Problem 3
input_str=inputs["problem3"]
output_bytes=input_str.encode()
output_hex=output_bytes.hex()
outputs["problem3"]= output_hex
# Problem 4
in_cyper_hex=inputs["problem4"]
ciphertext_bytes = bytes.fromhex(in_cyper_hex)
key = b"A" * 32
nonce = b"B" * 24
plaintext_bytes = SecretBox(key).decrypt(ciphertext_bytes, nonce)
plaintext_string = plaintext_bytes.decode()
outputs["problem4"] = plaintext_string
# Problem 5
ciphertext_list = inputs["problem5"]
key = b"C" * 32
nonce = b"D" * 24
for ciphertext_hex in ciphertext_list:
ciphertext_bytes = bytes.fromhex(ciphertext_hex)
try:
plaintext_bytes = SecretBox(key).decrypt(ciphertext_bytes, nonce)
except CryptoError:
# Bad ciphertext
continue
plaintext_string = plaintext_bytes.decode()
outputs["problem5"] = plaintext_string
break
# Output
#
# In the video I wrote something more like `json.dump(outputs, sys.stdout)`.
# Either way works. This way adds some indentation and a trailing newline,
# which makes things look nicer in the terminal.
print(json.dumps(outputs, indent=" "))
|
[
"kumar.saswat@gmail.com"
] |
kumar.saswat@gmail.com
|
adf4885d393bf0099e401b25aaa6873e9d870213
|
05eb33ed7741542ba59b925c6a294722d17373ff
|
/PythonCrashCourse/ch_7/deli.py
|
6a9e438fee661c9b6d916b769726015e06ae102c
|
[] |
no_license
|
alliejee/100DaysOfCode
|
87bf3c524aef379731b793d4a921fbcec360fae0
|
532f6b39bfc13c19ee200e5a1af71dd8eb59d369
|
refs/heads/master
| 2021-05-18T22:24:35.703504
| 2020-04-30T02:44:40
| 2020-04-30T02:44:40
| 251,451,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
sandwich_order = ['grilled cheese', 'ham and cheese', 'sausage', 'pastrami', 'pastrami', 'pastrami']
finished_sandwiches = []
# no pastrami loop ex7.9
print("We are out of pastrami.. Sorry!")
while 'pastrami' in sandwich_order:
sandwich_order.remove('pastrami')
while sandwich_order:
sandwich = sandwich_order.pop()
print(f"I made your {sandwich} sandwich!")
finished_sandwiches.append(sandwich)
print("\nHere's all the sandwiches I made:")
for sandwich in finished_sandwiches:
print(sandwich.title())
|
[
"grayskyze@protonmail.com"
] |
grayskyze@protonmail.com
|
f67418f9306eb02a5a68894a2fe661d4c07deceb
|
061c7d6b96dd7b2c688a058011c0439072c24c99
|
/home/migrations/0002_load_initial_data.py
|
f0df0b50225f3bd23b8b86f64e0dcdd3941b70d6
|
[] |
no_license
|
crowdbotics-apps/elden-web-app-18198
|
68a842f9d6fdffb94350ced0e9e8642c09ff0b88
|
9df018b854ae17d1424d5f99e26321e17b3e4626
|
refs/heads/master
| 2022-10-25T03:20:35.014126
| 2020-06-18T17:54:57
| 2020-06-18T17:54:57
| 273,304,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Elden Web App"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Elden Web App</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "elden-web-app-18198.botics.co"
site_params = {
"name": "Elden Web App",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bcf69b0576e77df73e23bfbeb74faec8ed5d12d2
|
ad9ebfb9812ae23186e6de1e2191b95a1faec230
|
/main.py
|
67c825e55ac7fe2168d98d9ac5c3d45977c58965
|
[] |
no_license
|
saritazavala/Proyecto-3-Raycaster
|
75346d1736b3a1b82c633bff7ee7477580c5f8d9
|
03b999f4f4b37add746502f0bad3ea8761b85be6
|
refs/heads/main
| 2023-01-18T23:19:27.072994
| 2020-11-29T16:42:25
| 2020-11-29T16:42:25
| 312,156,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,041
|
py
|
# Universidad del Valle de Guatemala
# Sara Nohemi Zavala Gutierrez
# Graficas por computador
# 18893 - Proyecto 3
import pygame
import sys
from math import pi, cos, sin, atan2
import time
# ---------------------------------------------------------
SPRITE_BACKGROUND = (152, 0, 136, 255)
buttonsColor = (209, 101, 44)
onSelectedButton = (85, 180, 144)
onSelectedTextColor = (250, 250, 250)
textColor = (0, 0, 0)
GRASS = (89, 159, 13)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
# ---------------------------------------------------------
# images, walls and enemies
# ---------------------------------------------------------
back = pygame.image.load('./images/main.jpg')
inst = pygame.image.load('./images/inst.jpg')
pause = pygame.image.load('./images/pause.jpg')
wall1 = pygame.image.load('./images/wall1.png')
wall2 = pygame.image.load('./images/wall2.jpg')
wall3 = pygame.image.load('./images/wall3.jpg')
wall4 = pygame.image.load('./images/wall4.png')
wall5 = pygame.image.load('./images/wall5.png')
win = pygame.image.load('./images/win.jpeg')
lose = pygame.image.load('./images/lose.jpg')
rock = pygame.image.load('./images/rock.png')
pick = pygame.image.load('./images/pick.png')
# Enemies
# ---------------------------------------------------------`
enemy1 = pygame.image.load('./images/sprite1.png')
enemy2 = pygame.image.load('./images/sprite2.png')
enemy3 = pygame.image.load('./images/sprite3.png')
enemy4 = pygame.image.load('./images/sprite4.png')
player_hand = pygame.image.load('./images/player.png')
textures = {
"1": wall1,
"2": wall2,
"3": wall3,
"4": wall4,
"5": wall5,
"6": rock
}
enemies = [
{
"x": 100,
"y": 200,
"texture": enemy1
},
{
"x": 280,
"y": 190,
"texture": enemy2
},
{
"x": 225,
"y": 340,
"texture": enemy3
},
{
"x": 220,
"y": 425,
"texture": enemy4
},
{
"x": 320,
"y": 420,
"texture": enemy1
}
]
pygame.init()
screen = pygame.display.set_mode((1000, 500))
class Raycaster:
def __init__(self, screen):
_, _, self.width, self.height = screen.get_rect()
self.screen = screen
self.blocksize = 50
self.map = []
self.zbuffer = [-float('inf') for z in range(0, 1000)]
self.player = {
"x": self.blocksize + 20,
"y": self.blocksize + 20,
"a": 0,
"fov": pi / 3
}
def main_menu_sound(self):
pygame.mixer.music.load('./music/01_Circle_of_Life.mp3')
pygame.mixer.music.set_volume(0.35)
pygame.mixer.music.play(-1)
def step_sound(self):
a = pygame.mixer.Sound('./music/step.wav')
a.play()
def point(self, x, y, c=None):
screen.set_at((x, y), c)
def draw_rectangle(self, x, y, texture, size):
x = int(x)
y = int(y)
size = int(size)
for cx in range(x, x + size):
cx = int(cx)
for cy in range(y, y + size):
cy = int(cy)
tx = int((cx - x) * 12.8)
ty = int((cy - y) * 12.8)
c = texture.get_at((tx, ty))
self.point(cx, cy, c)
def draw_player(self, xi, yi, w=256, h=256):
for x in range(xi, xi + w):
for y in range(yi, yi + h):
tx = int((x - xi) * 32 / w)
ty = int((y - yi) * 32 / h)
c = player_hand.get_at((tx, ty))
if c != (152, 0, 136, 255):
self.point(x, y, c)
def load_map(self, filename):
with open(filename) as f:
for line in f.readlines():
self.map.append(list(line))
def cast_ray(self, a):
d = 0
cosa = cos(a)
sina = sin(a)
while True:
x = int(self.player["x"] + d * cosa)
y = int(self.player["y"] + d * sina)
i = int(x / self.blocksize)
j = int(y / self.blocksize)
if self.map[j][i] != ' ':
hitx = x - i * 50
hity = y - j * 50
if 1 < hitx < 49:
maxhit = hitx
else:
maxhit = hity
tx = int(maxhit * 2.56)
return d, self.map[j][i], tx
self.screen.set_at((int(x / 2), int(y / 2)), WHITE)
d += 1
def draw_stake(self, x, h, tx, texture):
h_half = h / 2
start = int(250 - h_half)
end = int(250 + h_half)
end_start_pro = 128 / (end - start)
for y in range(start, end):
ty = int((y - start) * end_start_pro)
c = texture.get_at((tx, ty))
self.point(x, y, c)
def draw_sprite(self, sprite):
sprite_a = atan2((sprite["y"] - self.player["y"]),
(sprite["x"] - self.player["x"]))
sprite_d = ((self.player["x"] - sprite["x"]) ** 2 +
(self.player["y"] - sprite["y"]) ** 2) ** 0.5
sprite_size_half = int(250 / sprite_d * 70)
sprite_size = sprite_size_half * 2
sprite_x = int(500 + (sprite_a - self.player["a"]) * 477.46 +
250 - sprite_size_half)
sprite_y = int(250 - sprite_size_half)
sprite_size_pro = 128 / sprite_size
for x in range(sprite_x, sprite_x + sprite_size):
for y in range(sprite_y, sprite_y + sprite_size):
if 500 < x < 1000 and self.zbuffer[x - 500] <= sprite_d:
tx = int((x - sprite_x) * sprite_size_pro)
ty = int((y - sprite_y) * sprite_size_pro)
c = sprite["texture"].get_at((tx, ty))
if c != (152, 0, 136, 255):
self.point(x, y, c)
self.zbuffer[x - 500] = sprite_d
def coords(self):
font = pygame.font.SysFont("forte", 25, False)
coords = "X: " + str(r.player["x"]) + " Y: " + str(r.player["y"])
coords_text = font.render(coords, 1, pygame.Color(58, 166, 166))
return coords_text
def render(self):
halfWidth = int(self.width / 2)
halfHeight = int(self.height / 2)
for i in range(0, 1000):
try:
a = self.player["a"] - self.player["fov"] / 2 + (i * self.player["fov"] / 1000)
d, m, tx = self.cast_ray(a)
self.zbuffer[i] = d
x = i
h = (500 / (d * cos(a - self.player["a"]))) * 50
self.draw_stake(x, h, tx, textures[m])
except:
self.player["x"] = 70
self.player["y"] = 70
self.player['a'] = 0
self.lose_action()
for x in range(0, halfWidth, self.blocksize):
for y in range(0, self.height, self.blocksize):
i = int(x / self.blocksize)
j = int(y / self.blocksize)
if self.map[j][i] != ' ':
self.draw_rectangle(x / 2, y / 2, textures[self.map[j][i]], self.blocksize / 2)
self.point(int(self.player["x"] * 0.2) + 900, int(self.player["y"] * 0.2) + 400, (255, 255, 255))
for enemy in enemies:
self.point(enemy["x"], enemy["y"], BLACK)
self.draw_sprite(enemy)
# self.draw_player(1000 - 256 - 128, 500 - 256)
def drawText(self, text, font, color, surface, x, y):
textobj = font.render(text, 1, color)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
def text_objects(self, text, font):
textSurface = font.render(text, True, WHITE)
return textSurface, textSurface.get_rect()
def text_objects2(self, text, font):
textSurface = font.render(text, True, BLACK)
return textSurface, textSurface.get_rect()
def main_menu(self):
tittle_font = pygame.font.SysFont('gabriola', 50, True)
textFont = pygame.font.SysFont("Cambria", 30)
isClicked = False
self.main_menu_sound()
screen_img = back
intro = True
while intro:
screen.blit(screen_img, [-1, 0])
r.drawText("Lion King", tittle_font, WHITE, screen, 415, 100)
x, y = pygame.mouse.get_pos()
startGameBtn = pygame.Rect(410, 200, 200, 50)
if startGameBtn.collidepoint((x, y)):
pygame.draw.rect(screen, onSelectedButton, startGameBtn)
r.drawText("Play", textFont, onSelectedTextColor, screen, 480, 205)
if isClicked:
self.start_game()
else:
pygame.draw.rect(screen, buttonsColor, startGameBtn)
r.drawText("Play", textFont, textColor, screen, 480, 205)
# -------------------------------------------------------------------------------------------------
instBtn = pygame.Rect(410, 300, 200, 50)
if instBtn.collidepoint((x, y)):
pygame.draw.rect(screen, onSelectedButton, instBtn)
r.drawText("Instructions", textFont, onSelectedTextColor, screen, 430, 305)
if isClicked:
self.instructionsPage()
else:
pygame.draw.rect(screen, buttonsColor, instBtn)
r.drawText("Instructions", textFont, textColor, screen, 430, 305)
# -------------------------------------------------------------------------------------------------
exitBtn = pygame.Rect(410, 400, 200, 50)
if exitBtn.collidepoint((x, y)):
pygame.draw.rect(screen, onSelectedButton, exitBtn)
r.drawText("Exit", textFont, onSelectedTextColor, screen, 480, 405)
if isClicked:
pygame.quit()
sys.exit()
else:
pygame.draw.rect(screen, buttonsColor, exitBtn)
r.drawText("Exit", textFont, textColor, screen, 480, 405)
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
pygame.quit()
sys.exit()
if ev.type == pygame.MOUSEBUTTONDOWN:
if ev.button == 1:
isClicked = True
pygame.display.update()
clock.tick(15)
def instructionsPage(self):
screen_img = inst
screen.blit(screen_img, [-1, 0])
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT or (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) or event.type == pygame.K_3:
exit(0)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
self.main_menu()
# Fonts --------------------
instruction_tittle = pygame.font.SysFont('gabriola', 50, True)
return_key = pygame.font.SysFont('gabriola', 25, True)
message = pygame.font.SysFont('inkfree', 35, False, True)
description = pygame.font.SysFont('couriernew', 20, False, False)
option = pygame.font.SysFont('lucidasanstypewriter', 15, False, False)
TextSurf, TextRect = self.text_objects("Instrucciones", instruction_tittle)
TextRect.center = (int(self.width / 2), int(self.height / 4))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("Bienvenido! El objetivo del juego es llegar a la roca del Rey!.",
description)
TextRect.center = (int(self.width / 2), int(self.height / 3))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("Trata de no toparte con paredes o cazadores, de lo contrario",
description)
TextRect.center = (int(self.width / 2), int(self.height / 2.5))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("Simba morirá y perderá su progreso", description)
TextRect.center = (int(self.width / 2), int(self.height / 2.10))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("Controles: W A D S ", description)
TextRect.center = (int(self.width / 2), int(self.height / 1.75))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("o click derecho, izquierdo y scroll ", description)
TextRect.center = (int(self.width / 2), int(self.height / 1.60))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("Hakuna matata", message)
TextRect.center = (int(self.width / 1.5), int(self.height / 1.25))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("Presiona R para regresar", return_key)
TextRect.center = (int(self.width / 4), int(self.height / 1.25))
screen.blit(TextSurf, TextRect)
pygame.display.update()
clock.tick(15)
def lose_action(self):
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT or (e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE):
exit(0)
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_0:
self.start_game()
congrats_message = pygame.font.SysFont('erasitc', 50, False)
TextSurf, TextRect = self.text_objects("Intentalo otra vez!", congrats_message)
TextRect.center = (int(self.width / 1.25), int(self.height / 3))
screen.blit(TextSurf, TextRect)
pygame.display.update()
clock.tick(15)
screen.blit(lose, (0, 0))
def win_action(self):
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT or (e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE):
exit(0)
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_0:
self.main_menu()
congrats_message = pygame.font.SysFont('erasitc', 30, True)
TextSurf, TextRect = self.text_objects("Hemos ganado!", congrats_message)
TextRect.center = (int(self.width / 1.25), int(self.height / 3))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects("Presiona 0 para regresar", congrats_message)
TextRect.center = (int(self.width / 1.25), int(self.height / 2.5))
screen.blit(TextSurf, TextRect)
pygame.display.update()
clock.tick(15)
screen.blit(win, (0, 0))
def update_fps(self):
font = pygame.font.SysFont("erasitc", 25, True)
fps = "FPS: " + str(int(clock.get_fps()))
fps_text = font.render(fps, 1, pygame.Color("black"))
return fps_text
def pause(self):
while True:
screen_img = pause
screen.blit(screen_img, [-1, 0])
for e in pygame.event.get():
if e.type == pygame.QUIT or (e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE):
exit(0)
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_p:
self.start_game()
instruction_tittle = pygame.font.SysFont('gabriola', 50, True)
description = pygame.font.SysFont('couriernew', 20, False, False)
TextSurf, TextRect = self.text_objects2("P A U S A", instruction_tittle)
TextRect.center = (int(self.width / 2), int(self.height / 4))
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = self.text_objects2("Vuelve a apachar P para regrsar!.", description)
TextRect.center = (int(self.width / 2), int(self.height / 3))
screen.blit(TextSurf, TextRect)
pygame.display.update()
clock.tick(15)
def start_game(self):
while True:
screen.fill((0, 0, 0))
d = 10
for e in pygame.event.get():
if e.type == pygame.QUIT or (e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE):
exit(0)
if e.type == pygame.MOUSEBUTTONDOWN:
print(e.button)
if e.button == 1:
r.player["a"] -= 0.157
if e.button == 3:
r.player["a"] += 0.157
if e.button == 4:
r.player["x"] += int(d * cos(r.player["a"]))
r.player["y"] += int(d * sin(r.player["a"]))
self.step_sound()
if e.button == 5:
r.player["x"] -= int(d * cos(r.player["a"]))
r.player["y"] -= int(d * sin(r.player["a"]))
self.step_sound()
if (r.player["x"] > 390) and (r.player["y"] > 70):
self.win_action()
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_a:
r.player["a"] -= 0.157
if e.key == pygame.K_d:
r.player["a"] += 0.157
if e.key == pygame.K_w:
r.player["x"] += int(d * cos(r.player["a"]))
r.player["y"] += int(d * sin(r.player["a"]))
self.step_sound()
if e.key == pygame.K_s:
r.player["x"] -= int(d * cos(r.player["a"]))
r.player["y"] -= int(d * sin(r.player["a"]))
self.step_sound()
if e.key == pygame.K_p:
self.pause()
if (r.player["x"] > 390) and (r.player["y"] > 70):
self.win_action()
screen.fill(pygame.Color("LIGHTSKYBLUE"), (0, 0, int(r.width), int(r.height / 2)))
screen.fill(GRASS, (0, int(r.height / 2), int(r.width), int(r.height / 2)))
r.render()
screen.blit(self.update_fps(), (745, 15))
screen.blit(self.coords(), (850, 15))
pygame.display.flip()
screen.set_alpha(None)
r = Raycaster(screen)
r.load_map('./level1.txt')
pygame.display.set_caption('Zaravala')
inst = pygame.transform.scale(inst, (r.width, r.height))
rock = pygame.transform.scale(back, (r.blocksize, r.blocksize))
back = pygame.transform.scale(back, (r.width, r.height))
win = pygame.transform.scale(win, (r.width, r.height))
lose = pygame.transform.scale(lose, (r.width, r.height))
pause = pygame.transform.scale(pause, (r.width, r.height))
clock = pygame.time.Clock()
r.main_menu()
|
[
"saritazg2010@gmail.om"
] |
saritazg2010@gmail.om
|
4a29bd38d35dea84f88a9400a37163e7576ef7ac
|
0f52b2e16a4918a6aeb72f8c6ff9ae6cf7d3323a
|
/base/pytorch_AF.py
|
4e7744fa4de267b81582390946be06e57eae4ac6
|
[] |
no_license
|
wellstone-cheng/pytorch
|
f7ec5bba4b8b8829382166ec2ba3201286198bde
|
8094f0c7a9a076df64658f4eb5d000ecfafd1e01
|
refs/heads/master
| 2021-04-17T15:23:22.040913
| 2020-04-10T22:20:14
| 2020-04-10T22:20:14
| 249,454,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
x=torch.linspace(-5,5,200) #
x_np=x.numpy()
#print(
# '\n x',x,
# '\n x_np',x_np
#)
y_relu=F.relu(x).numpy()
y_sigmoid =torch.sigmoid(x).numpy() # F.sigmoid(x).numpy()
y_tanh =torch.tanh(x).numpy() #F.tanh(x).numpy()
y_softplus =F.softplus(x).numpy()
# y_softmax =F.softmax(x,dim=0).numpy
plt.figure(1,figsize=(8,6)) # 图片编号为1,尺寸为宽高8*6英寸,编号相同即在同一张图片上显示
plt.subplot(221)
plt.plot(x_np,y_relu,c='red',label='relu')
plt.ylim(-1,5) #y轴的范围为-1到5
plt.legend(loc='best') #给图加上图例,loc为位置
plt.subplot(222)
plt.plot(x_np,y_sigmoid,c='red',label='sigmoid')
plt.ylim(-0.1,1.1) #y轴的范围为-0.1到1.1
plt.legend(loc='best') #给图加上图例,loc为位置
plt.subplot(223)
plt.plot(x_np,y_tanh,c='red',label='tanh')
plt.ylim(-1.1,1.1) #y轴的范围为-1.1到1.1
plt.legend(loc='best') #给图加上图例,loc为位置
plt.subplot(224)
plt.plot(x_np,y_softplus,c='red',label='softplus')
plt.ylim(-0.1,6) #y轴的范围为-1.1到1.1
plt.legend(loc='upper left') #给图加上图例,loc为位置
# plt.subplot(224)
# plt.plot(x_np,y_softmax,c='red',label='softmax')
# #plt.ylim(-0.1,6) #y轴的范围为-1.1到1.1
# plt.legend(loc='upper left') #给图加上图例,loc为位置
plt.show()
|
[
"wellstone_cheng@qq.com"
] |
wellstone_cheng@qq.com
|
42d9a4c190582ed883701c786643f56d0a13f971
|
2c49383d1d6427ef51d25b92a3bb700f5f6e697f
|
/s3tup/cli.py
|
87b18fb290c46940d949e7fe3fa340feb7e59781
|
[] |
no_license
|
viglesiasce/s3tup
|
d1bc3fec270e8861c99268e6eeaf313ac8c10278
|
97070d78b873d51439a15b6b8dc5ac514c6df0f0
|
refs/heads/master
| 2020-12-31T03:26:03.767224
| 2014-01-21T07:01:57
| 2014-01-21T07:01:57
| 16,094,178
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,956
|
py
|
import argparse
import logging
import textwrap
import sys
import os
from s3tup.parse import load_config, parse_config
log = logging.getLogger('s3tup')
title = (
"\n"
" _____ __ \n"
" ____|__ // /___ ______ \n"
" / ___//_ </ __/ / / / __ \ \n"
" /__ /__/ / /_/ /_/ / /_/ / \n"
"/____/____/\__/\____/ /___/ \n"
" /_/ "
)
parser = argparse.ArgumentParser(
description='s3tup: configuration management and deployment for AmazonS3')
parser.add_argument(
'config_path',
help='path to your configuration file')
parser.add_argument(
'--dryrun',
action='store_true',
help='preview what will happen when this command runs')
parser.add_argument(
'--rsync',
action='store_true',
help='only sync keys that have been modified or removed')
parser.add_argument(
'-c',
type=int,
metavar='CONCURRENCY',
help='number of concurrent requests (default: 5)')
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument(
'-v', '--verbose',
action='store_true',
help='increase output verbosity')
verbosity.add_argument(
'-q', '--quiet',
action='store_true',
help='silence all output')
parser.add_argument(
'--access_key_id',
help='your aws access key id')
parser.add_argument(
'--secret_access_key',
help='your aws secret access key')
class WrappedFormatter(logging.Formatter):
"""Wrap log lines at 78 chars."""
def format(self, record):
formatted = super(WrappedFormatter, self).format(record)
split = formatted.split('\n')
return '\n'.join([textwrap.fill(l, 78) for l in split])
def main():
"""Command line interface entry point."""
args = parser.parse_args()
if not (args.quiet or args.verbose):
handler = logging.StreamHandler()
handler.setFormatter(WrappedFormatter('%(message)s'))
log.addHandler(handler)
log.setLevel(logging.INFO)
elif args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s',
level=logging.DEBUG)
try:
run(args.config_path, args.dryrun, args.rsync, args.c,
args.access_key_id, args.secret_access_key)
except Exception as e:
if args.verbose:
raise
log.error('{}: {}'.format(sys.exc_info()[0].__name__, e))
sys.exit(1)
def run(config, dryrun=False, rsync=False, concurrency=None,
access_key_id=None, secret_access_key=None,):
if access_key_id is not None:
os.environ['AWS_ACCESS_KEY_ID'] = access_key_id
if secret_access_key is not None:
os.environ['AWS_SECRET_ACCESS_KEY'] = secret_access_key
config = load_config(config)
buckets = parse_config(config)
log.info(title)
for b in buckets:
if concurrency is not None:
b.conn.concurrency = concurrency
b.sync(dryrun=dryrun, rsync=rsync)
|
[
"alex@heyimalex.com"
] |
alex@heyimalex.com
|
3d9582feae2c0fedf4ccf9382eb49e0b37af4796
|
cf1b129434ae2525e5f9518afa49ed572a13854b
|
/Easy/20. 有效的括号.py
|
7f984a77123a7c28e65c4011f0dc2ece8162da60
|
[] |
no_license
|
1042113343/LeetCode
|
b338ac94af4ca94e9b453d86d6aec43408cd7a22
|
3830e354b6231b3a06c1d4a2fa6eb5b0dae9382c
|
refs/heads/master
| 2020-04-28T23:37:35.672259
| 2019-06-11T10:08:55
| 2019-06-11T10:08:55
| 175,662,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
class Solution:
def isValid(self, s: str) -> bool:
stack = []
mapping = {")":"(","}":"{","]":"["}
for c in s:
if c in mapping:
top = stack.pop() if stack else "#"
if mapping[c] != top:
return False
else:
stack.append(c)
return not stack
|
[
"noreply@github.com"
] |
1042113343.noreply@github.com
|
260edbe4c99daa2a80cd61822d51dd85ecb9006c
|
065216339a23f8d22229cb417f20f55f6c53c194
|
/rsna_dataset.py
|
15f2b8ef19860db9d11d3a9373355529c4ed19f7
|
[] |
no_license
|
yishayahu/transfer_avg
|
340eb362f2f642ac684c70a1be6a43647def965c
|
4412fe03bc6db64846e2bc994d28e7ecd0463e70
|
refs/heads/master
| 2023-08-28T20:29:40.466371
| 2021-10-24T16:57:44
| 2021-10-24T16:57:44
| 418,930,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,370
|
py
|
from torch.utils.data import Dataset
import torch
import numpy as np
import os
from torchvision import transforms
import random
import matplotlib
import matplotlib.pyplot as plt
import glob
import cv2
import PIL
from PIL import Image
import pandas as pd
import pydicom
import tqdm
matplotlib.use('Agg')
class RSNADataset(Dataset):
"""
torch dataLoader for masks or images
"""
def __init__(self, list_IDs, image_dir, settings, labels, use_smaller_datasize=False, train=True):
"""
Initialize this dataset class. for train mode
"""
self.image_dir = image_dir
self.list_IDs = list_IDs
self.labels = labels
if use_smaller_datasize:
self.list_IDs = self.list_IDs[:500]
self.window_center_dict = {'1': 50, '2': 40, '3': 300}
self.window_width_dict = {'1': 150, '2': 80, '3': 1500}
# self.filter_noise_images()
self.labels = {key: self.labels[key] for key in self.list_IDs}
self.input_size = settings.input_size
self.batch_size = settings.batch_size
self.n_channels = settings.n_channels
self.crop_x_min = 64
self.crop_x_max = 448
self.crop_y_min = 64
self.crop_y_max = 448
if train:
self.transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.2),
transforms.RandomRotation(degrees=(0, 15)),
transforms.CenterCrop(384),
transforms.ToTensor()]
)
else:
self.transforms = transforms.Compose([
transforms.CenterCrop(384),
transforms.ToTensor()]
)
def __len__(self):
return len(self.list_IDs)
@staticmethod
def get_id(img_dicom):
return str(img_dicom.SOPInstanceUID)
@staticmethod
def get_first_of_dicom_field_as_int(x):
if type(x) == pydicom.multival.MultiValue:
return int(x[0])
return int(x)
@staticmethod
def get_metadata_from_dicom(img_dicom):
metadata = {
"window_center": img_dicom.WindowCenter,
"window_width": img_dicom.WindowWidth,
"intercept": img_dicom.RescaleIntercept,
"slope": img_dicom.RescaleSlope,
}
return {k: RSNADataset.get_first_of_dicom_field_as_int(v) for k, v in metadata.items()}
def window_image(self, img, key, window_center, window_width, intercept, slope):
img = img * slope + intercept
# img_min = window_center - window_width // 2
# img_max = window_center + window_width // 2
img_min = self.window_center_dict[key] - self.window_width_dict[key] // 2
img_max = self.window_center_dict[key] + self.window_width_dict[key] // 2
img[img < img_min] = img_min
img[img > img_max] = img_max
return img
def crop_center(self, img):
img = img[self.crop_x_min:self.crop_x_max, self.crop_y_min:self.crop_y_max]
return img
def resize(self, img):
img = PIL.Image.fromarray(img.astype(np.int8), mode="L")
return img.resize((self.input_size[1], self.input_size[2]), resample=PIL.Image.BICUBIC)
def resize_cv2(self, img):
return cv2.resize(img, (384, 384), interpolation=cv2.INTER_LINEAR)
@staticmethod
def normalize_minmax(img):
mi, ma = img.min(), img.max()
return (img - mi) / (ma - mi)
def prepare_image(self, img_path):
img_dicom = pydicom.read_file(img_path)
img_id = RSNADataset.get_id(img_dicom)
metadata = RSNADataset.get_metadata_from_dicom(img_dicom)
img_1 = self.window_image(img_dicom.pixel_array, key='1', **metadata)
img_2 = self.window_image(img_dicom.pixel_array, key='2', **metadata)
img_3 = self.window_image(img_dicom.pixel_array, key='3', **metadata)
img_1 = np.expand_dims(RSNADataset.normalize_minmax(img_1), axis=-1)
img_2 = np.expand_dims(RSNADataset.normalize_minmax(img_2), axis=-1)
img_3 = np.expand_dims(RSNADataset.normalize_minmax(img_3), axis=-1)
# img = self.crop_center(img)
# img_pil = np.expand_dims(RSNADataset.resize_cv2(self, img), axis=-1)
img_pil = np.concatenate((img_1,img_2,img_3), axis=-1)
img_pil = Image.fromarray(np.uint8(img_pil * 255))
return img_id, img_pil
def __getitem__(self, idx):
tensor_transform = transforms.ToTensor()
img_id = self.list_IDs[idx]
img_path = os.path.join(self.image_dir, 'ID_{}.dcm'.format(img_id))
_, image = self.prepare_image(img_path)
label = self.labels[img_id]
#image = tensor_transform(img_pil)
image = self.transforms(image)
return {'image': image.float(), 'label': label}
def filter_noise_images(self):
filtered_list = []
for img_id in self.list_IDs:
img_path = os.path.join(self.image_dir, 'ID_{}.dcm'.format(img_id))
img_dicom = pydicom.read_file(img_path)
metadata = RSNADataset.get_metadata_from_dicom(img_dicom)
img = self.window_image(img_dicom.pixel_array, **metadata)
if img.max() > img.min():
filtered_list.append(img_id)
self.list_IDs = filtered_list
print('len id list: {}'.format(len(self.list_IDs)))
|
[
"ishayahu156@gmail.com"
] |
ishayahu156@gmail.com
|
8160a3dfc6157110e0f27732509eac6fd6a48b62
|
b62a0d978f1d3bfbec7af6b3e766ecf70e6a363b
|
/lib/read_hosei.py
|
4b8331c8d66063ebaafa5811b34c1eb88aadfca6
|
[] |
no_license
|
nanten2/necst-ros
|
8b9f515435bb21b91c2716e41d7eeca83dd3259c
|
26342e813506f428ac8d97ebe1e30309357eddad
|
refs/heads/master
| 2022-10-14T23:33:56.881073
| 2022-06-10T05:52:21
| 2022-06-10T05:52:21
| 63,134,661
| 1
| 4
| null | 2022-06-10T05:52:22
| 2016-07-12T07:03:07
|
Python
|
UTF-8
|
Python
| false
| false
| 101
|
py
|
import pandas
def read(path):
d = pandas.read_csv(path, index_col=0, header=0)
return d.T
|
[
"shiotani@a.phys.nagoya-u.ac.jp"
] |
shiotani@a.phys.nagoya-u.ac.jp
|
8e3b665661854e7b4234d58cdd8c902e6d475e2b
|
f9cabf7aa1130dd28aff661497fcadddb4c0ae98
|
/scripts/pre_process.py
|
43c30e4bb1951cab697e968da191bcb0bcfb9f7b
|
[
"MIT"
] |
permissive
|
NCBI-Hackathons/DiabetesDataViewer
|
5b2ab956d9c209b2b7412ee13b1983dcf8a67458
|
d7aad7f768818191505cdd8b9ffe333c9a7c0fc9
|
refs/heads/master
| 2020-04-02T07:36:35.550408
| 2018-11-10T22:49:32
| 2018-11-10T22:49:32
| 154,205,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,661
|
py
|
import xlrd
import xlwt
from xlutils.copy import copy
import argparse
import sys
from numpy import array,concatenate, savetxt
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
debug = True
columns = [0,2,3,4,5,17,18]
sheets = ['cleaned_data_14-15.xlsx','cleaned_data_15-16.xlsx',"cleaned_data_16-17.xlsx","cleaned_data_17-18.xlsx"]
choice_one = ["Female","Non-Hispanic", "White or Caucasian", "Type 1"]
choice_two = ["Male","Hispanic", "Black or African American", "Type 2"]
choice_three = ["Asian","Unknown","Secondary","Monogenic", "Other", "CFRD"]
choice_four = ["American Indian and Alaska Native","Type 1 vs Type 2", "IGT"]
choice_five = ["Native Hawaiian and Other Pacific Islander","Patient Refused"]
def merge_rows(file, add_columns=[23,24,25,22]):
wb = xlrd.open_workbook(file)
sheet=wb.sheet_by_index(0)
data = []
results = []
row = 1
while row < range(sheet.nrows)[-1]:
row_data = []
result = []
result2 = []
for col in columns:
value = sheet.cell_value(row, col)
if value in choice_one:
value = 0
elif value in choice_two :
value =1
elif value in choice_three:
value =2
elif value in choice_four:
value =3
elif value in choice_five:
value = 4
#print("adding value " + str(value))
row_data.append(value)
id = sheet.cell_value(row,0)
x = sheet.cell_value(row,add_columns[0])
y = sheet.cell_value(row,add_columns[1])
z = sheet.cell_value(row,add_columns[2])
n= sheet.cell_value(row,add_columns[3])
if row+1 < range(sheet.nrows)[-1]:
while id == sheet.cell_value(row+1,0):
x = x + sheet.cell_value(row+1,add_columns[0])
y = y + sheet.cell_value(row+1, add_columns[1])
z = z + sheet.cell_value(row+1, add_columns[2])
n = n + sheet.cell_value(row + 1, add_columns[3])
row = row + 1
row = row +1
#print("appending value")
row_data.append(x)
row_data.append(y)
row_data.append(z)
#row_data.append(n)
result.append(x)
result.append(y)
result.append(z)
result2.append(n)
results.append(result2)
data.append(row_data)
if debug:
pass
#for row in data:
# print(row)
return data,results
def load_data():
input_data = []
results = []
for sheet in sheets:
print("processing sheet " + sheet)
d, r = merge_rows(sheet)
input_data.append(d)
results.append(r)
input_data_np = array(input_data)
results_np = array(results)
print("numpy input data ", input_data_np[0].shape)
print("numpy input data2 ", results_np[1].shape)
input_data_np_stack = concatenate((input_data_np[0],input_data_np[1], input_data_np[2]),axis=0)
results_np_stack = concatenate((results_np[1],results_np[2], results_np[3]),axis=0)
return input_data_np_stack, results_np_stack, input_data_np[3]
def run_regression():
input, result, to_predict = load_data()
print("input shape, ",input.shape)
print("out shape, ", result.shape)
regr = linear_model.Ridge(alpha=.8)
i = 0
X_train, X_test, y_train, y_test = train_test_split(input, result , test_size = 0.2, random_state = 0)
# Train the model using the training sets
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
prediction = regr.predict(to_predict)
print(prediction.shape)
print(prediction.astype(int))
# Plot outputs
'''
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
'''
savetxt('prediction_result',(prediction) , fmt="%d")
if __name__== "__main__":
run_regression()
|
[
"noreply@github.com"
] |
NCBI-Hackathons.noreply@github.com
|
499923f92142809ac271284cc63aec01501f3c95
|
c99c037ce146bc77d43ec3fe96f619b08b65b05e
|
/volunteers/migrations/0003_auto_20190326_1306.py
|
24eff425800fe11433f225f020aa568404f895a4
|
[] |
no_license
|
julianl092/cs91r_volunteer_dashboard
|
58309690d89aafdaf71f1180938884356c7b0632
|
0d05bb54b301576a4ac3997bf9cfd0d5bfa1c75c
|
refs/heads/master
| 2020-05-20T09:03:14.258429
| 2020-03-11T15:25:06
| 2020-03-11T15:25:06
| 177,918,198
| 1
| 0
| null | 2019-03-27T04:31:19
| 2019-03-27T04:31:19
| null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
# Generated by Django 2.1.7 on 2019-03-26 17:06
import datetime
from django.db import migrations, models
import django.utils.timezone
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('volunteers', '0002_auto_20190326_1306'),
]
operations = [
migrations.AlterField(
model_name='volunteer',
name='birthday',
field=models.DateTimeField(default=datetime.datetime(2019, 4, 2, 17, 6, 43, 739124, tzinfo=utc)),
),
migrations.AlterField(
model_name='volunteer',
name='date_joined',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
[
"eric_lin@college.harvard.edu"
] |
eric_lin@college.harvard.edu
|
2c3c77674b4df94e548985e61649a7caa7b3d400
|
c4d529c211bfd31b3b55772808d790fdcd90316a
|
/train_bert_on_questions.py
|
301c975e7d76e528b14d28b57e945a4562d8af09
|
[] |
no_license
|
ostamand/google-quest-qa
|
76a2e061027b23087da22d807f9e0f48d44db112
|
a22fefcf7d93dafda9c90804cdcd5f41c24452ed
|
refs/heads/master
| 2022-03-27T02:36:45.960955
| 2020-01-08T02:31:16
| 2020-01-08T02:31:16
| 226,440,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,677
|
py
|
import argparse
import os
import gc
from typing import List
from functools import partial
from multiprocessing import Process
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
import transformers
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
try:
from apex import amp
except:
pass
try:
import wandb
except:
pass
import pdb
from constants import targets
from modeling import BertOnQA_2
from training import Trainer
targets_for_tr = [x for x in targets if x.startswith('question')]
# not used anymore
def apply_tokenizer(tokenizer, texts: List[str], maxlen) -> np.array:
tokens = np.zeros((len(texts), maxlen), dtype=np.long)
for i, text in enumerate(texts):
text = ' '.join(text.strip().split(' ')[:maxlen])
text_tokens = tokenizer.encode(text, max_length=maxlen, add_special_tokens=True)[:maxlen]
tokens[i, :len(text_tokens)] = text_tokens
return tokens
def process_for_questions(tokenizer, row):
t = tokenizer.encode(row['question_title'], max_length=512, add_special_tokens=False)
q = tokenizer.encode(row['question_body'], max_length=512, add_special_tokens=False)
t_len = len(t)
q_len = len(q)
# [CLS] question title [SEP] question body [SEP] [PAD]
tokens = [tokenizer.cls_token_id] + (512-1)*[tokenizer.pad_token_id]
question_title_trunc = t
if t_len + q_len + 3 > 512:
question_body_trunc = q[:512-t_len-3]
else:
question_body_trunc = q
combined = question_title_trunc + [tokenizer.sep_token_id] + question_body_trunc + [tokenizer.sep_token_id]
tokens[1:1+len(combined)] = combined
token_types = [0] * (len(question_title_trunc)+2) + (len(question_body_trunc)+1) * [1] + (512 - len(question_title_trunc) - len(question_body_trunc) - 3) * [0]
return tokens, token_types
#@email_sender(recipient_emails=["olivier.st.amand.1@gmail.com"], sender_email="yellow.bhaji@gmail.com")
def main(**args):
# data
train_df = pd.read_csv(os.path.join(args['data_dir'], 'train.csv'))
tokenizer = transformers.BertTokenizer.from_pretrained(args['model_dir'])
#targets_for_tr = [x for x in targets if x.startswith('question')]
train_df['all'] = train_df.apply(lambda x: process_for_questions(tokenizer, x), axis=1)
tokens = np.stack(train_df['all'].apply(lambda x: x[0]).values).astype(np.long)
token_types = np.stack(train_df['all'].apply(lambda x: x[1]).values).astype(np.long)
labels = train_df[targets_for_tr].values.astype(np.float32)
if args['fold'] is not None:
tr_ids = pd.read_csv(os.path.join(args['data_dir'], f"train_ids_fold_{args['fold']}.csv"))['ids'].values
val_ids = pd.read_csv(os.path.join(args['data_dir'], f"valid_ids_fold_{args['fold']}.csv"))['ids'].values
else:
# train on almost all the data
tr_ids = np.arange(labels.shape[0])
val_ids = None
train_dataset = torch.utils.data.TensorDataset(
torch.tensor(tokens[tr_ids], dtype=torch.long),
torch.tensor(token_types[tr_ids], dtype=torch.long),
torch.tensor(labels[tr_ids], dtype=torch.float32)
)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args['bs'], shuffle=True)
valid_loader = None
if val_ids is not None:
x_valid = tokens[val_ids]
y_valid = labels[val_ids]
valid_dataset = torch.utils.data.TensorDataset(
torch.tensor(tokens[val_ids], dtype=torch.long),
torch.tensor(token_types[val_ids], dtype=torch.long),
torch.tensor(labels[val_ids], dtype=torch.float32)
)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args['bs'], shuffle=False)
loaders = {'train': train_loader, 'valid': valid_loader}
device = torch.device(args['device'])
model = BertOnQA_2(len(targets_for_tr), args['model_dir'], **BertOnQA_2.default_params())
model.to(device)
if args['do_wandb']:
wandb.init(project=args['project'], tags=['questions'])
wandb.watch(model)
optimizer = transformers.AdamW(model.optimizer_grouped_parameters, lr=args['lr1'])
if args['do_apex']:
# TODO tru O2
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
# train head
if args['do_head']:
model.train_head_only()
trainer = Trainer(**args)
trainer.train(model, loaders, optimizer, epochs=args['epochs1'])
# train all layers
model.train_all()
for param_group in optimizer.param_groups:
param_group['lr'] = args['lr2']
trainer = Trainer(**args)
trainer.train(model, loaders, optimizer, epochs=args['epochs2'], warmup=0.5, warmdown=0.5)
# save trained model and features
out_dir = args['out_dir']
if not os.path.exists(out_dir):
os.mkdir(out_dir)
suffix = f"_fold_{args['fold']}" if args['fold'] is not None else ""
torch.save(model.state_dict(), os.path.join(out_dir, f"model_state_dict{suffix}.pth"))
torch.save(args, os.path.join(out_dir, f"training_args{suffix}.bin"))
def get_preds(df, ckpt_dir, fold_n, params):
p = Process(target=_run_get_preds, args=[df, ckpt_dir, fold_n, params])
p.start()
p.join()
with open('.tmp/questions_preds.pickle', 'rb') as f:
preds = pickle.load(f)
return preds
def _run_get_preds(df, ckpt_dir, fold_n, params):
tokenizer = transformers.BertTokenizer.from_pretrained(params['model_dir'])
df['all'] = df.apply(lambda x: process_for_questions(tokenizer, x), axis=1)
tokens = np.stack(df['all'].apply(lambda x: x[0]).values).astype(np.long)
token_types = np.stack(df['all'].apply(lambda x: x[1]).values).astype(np.long)
dataset = torch.utils.data.TensorDataset(
torch.tensor(tokens, dtype=torch.long),
torch.tensor(token_types, dtype=torch.long)
)
loader = torch.utils.data.DataLoader(dataset, batch_size=params['bs'], shuffle=False)
device = torch.device(params['device'])
model = BertOnQA_2(len(targets_for_tr), params['model_dir'], **BertOnQA_2.default_params())
model.to(device)
ckpt_path = os.path.join(ckpt_dir, f'model_state_dict_fold_{fold_n}.pth')
model.load_state_dict(torch.load(ckpt_path))
model.eval()
all_preds = []
for batch in loader:
with torch.no_grad():
tokens, token_types = batch
preds = model(tokens.to(device), attention_mask=(tokens > 0).to(device), token_type_ids=token_types.to(device))
all_preds.append(torch.sigmoid(preds).cpu().numpy())
all_preds = np.vstack(all_preds)
with open('.tmp/questions_preds.pickle', 'wb') as f:
pickle.dump(all_preds, f)
# example: python train_bert_on_questions.py --do_apex --do_wandb --maxlen 256 --bs 8 --dp 0.1 --fold 0 --out_dir test
# trained model will be saved to model/test_fold_0
# python train_bert_on_questions.py --do_apex --do_wandb --maxlen 256 --bs 8 --dp 0.1 --out_dir test
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--epochs1", default=10, type=int)
parser.add_argument("--epochs2", default=5, type=int)
parser.add_argument("--lr1", default=1e-2, type=float)
parser.add_argument("--lr2", default=2e-5, type=float)
parser.add_argument("--model_dir", default="model/bert-base-uncased", type=str)
parser.add_argument("--out_dir", default="outputs/bert_questions", type=str)
parser.add_argument("--data_dir", default="data", type=str)
parser.add_argument("--fold", default=0, type=int)
parser.add_argument("--log_dir", default=".logs", type=str)
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--bs", default=8, type=int)
parser.add_argument("--dp", default=0.4, type=float)
parser.add_argument("--maxlen", default=512, type=int)
parser.add_argument("--device", default="cuda", type=str)
parser.add_argument("--do_apex", action='store_true')
parser.add_argument("--do_wandb", action='store_true')
parser.add_argument("--do_tb", action='store_true')
parser.add_argument("--do_head", action='store_true')
parser.add_argument("--warmup", default=0.5, type=float)
parser.add_argument("--warmdown", default=0.5, type=float)
parser.add_argument("--clip", default=None, type=float)
parser.add_argument("--accumulation_steps", default=2, type=int)
parser.add_argument("--project", default="google-quest-qa", type=str)
parser.add_argument("--head_ckpt", default=None, type=str)
args = parser.parse_args()
main(**args.__dict__)
|
[
"olivier.st.amand.1@gmail.com"
] |
olivier.st.amand.1@gmail.com
|
da598a0e46a26492e6954cc6ae54e451d95f1bfc
|
c2ec27eee039c372ca327ffb7e10e848902ffe77
|
/2.The-Network-Basics/simple-tcp-server.py
|
4b3a9d1f038673669507de6f46f6d1a07840c7c8
|
[] |
no_license
|
JaewoongMoon/black-hat-python
|
3a8b74fbe353d5bf69231c079aa98d291662f85b
|
5e922dbf3a452518707618e7d6a358c2c7610fea
|
refs/heads/master
| 2021-01-12T12:48:36.764581
| 2018-09-27T02:42:54
| 2018-09-27T02:42:54
| 68,992,588
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip, bind_port)
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
print "[*] Received: %s" % request
# send back a packet
client_socket.send("ACK!")
client_socket.close()
while True:
client, addr = server.accept()
print "[*] Accepted connect from : %s%d" % (addr[0], addr[1])
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
[
"mjw8585@gmail.com"
] |
mjw8585@gmail.com
|
eacd581ec81cac1ea696592dcce5be2e8ad2632c
|
af5b78621cce14edd6b3d4fd2668cafecb84ea25
|
/mail/migrations/0001_initial.py
|
1ad261da8e95d86f687bb6874b1aa0d1b7d78522
|
[] |
no_license
|
supah0t/mail
|
e76e7a26428deae8edd8d54463af29b26ff2362c
|
9afee09ac69d6c534ea0280f483e3eccf7c0bcee
|
refs/heads/main
| 2023-01-13T04:06:57.480924
| 2020-11-20T11:59:10
| 2020-11-20T11:59:10
| 313,339,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,883
|
py
|
# Generated by Django 3.1.2 on 2020-11-16 11:04
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField(blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('read', models.BooleanField(default=False)),
('archived', models.BooleanField(default=False)),
('recipients', models.ManyToManyField(related_name='emails_received', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='emails_sent', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"supah0t@users.noreply.github.com"
] |
supah0t@users.noreply.github.com
|
dc7b17db74f253bcf0493dbde80f8e4c084a676e
|
89694213813612e2ac859a25f4a69f4f9b348c4a
|
/phylogeny_prepare_partitionfinder_exons.py
|
c09054eec13752fba063e0cc079c29d6939df01b
|
[] |
no_license
|
singhal/AWT_delimit
|
b953a87f021b96d7c06550c05b9a40faa9d8699b
|
91bc830704a9fcf38e098c9bc0e9488a4c88bd4d
|
refs/heads/master
| 2020-11-29T15:08:30.392916
| 2018-05-22T18:23:12
| 2018-05-22T18:23:12
| 87,488,218
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,551
|
py
|
import argparse
import glob
import os
import pandas as pd
import re
import subprocess
import random
"""
Sonal Singhal
created on 23 June 2016
Written assuming nothing!
"""
def get_args():
parser = argparse.ArgumentParser(
description="This creates the files that then get "
"aligned in the next script.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# file
parser.add_argument(
'--file',
type=str,
default=None,
help='File with information for phylogeny making.'
)
# coords file
parser.add_argument(
'--cfile',
type=str,
default=None,
help='File with coordinates.'
)
# miss
parser.add_argument(
'--miss',
type=float,
default=None,
help='How much missing data will you tolerate?'
)
# output dir
parser.add_argument(
'--outdir',
type=str,
default=None,
help='Output directory for phylogeny if not '
'running in context of pipeline.'
)
# number of loci
parser.add_argument(
'--nloci',
type=int,
default=None,
help='Number of loci to use'
)
# run number
parser.add_argument(
'--run',
type=int,
default=None,
help='Run number, to allow multiple tests'
)
# subsample
parser.add_argument(
'--subsample',
default=False,
action='store_true',
help='Include flag if you only want to include '
'one haplotype per lineage; useful for '
'downstream concatenated analyses'
)
return parser.parse_args()
def get_sp_loci(args):
d = pd.read_csv(args.file)
d = d.groupby('lineage')
groups = {}
for lineage, group in d:
groups[lineage] = group['sample'].tolist()
if args.subsample:
for lineage, group in groups.items():
groups[lineage] = [random.choice(groups[lineage])]
outdir = args.outdir
loc_file = os.path.join(outdir, 'locus_data.csv')
d1 = pd.read_csv(loc_file)
d2 = pd.read_csv(args.cfile)
d = d1.merge(d2, left_on="locus", right_on="exon")
d = d[d.coords.notnull()]
d = d[d.missingness >= args.miss]
loci = d.locus.tolist()
loci = random.sample(loci, args.nloci)
return outdir, groups, loci, d
def make_concatenated(args, outdir, sps, loci, d):
subdir = os.path.join(outdir, 'partitionfinder')
if not os.path.isdir(subdir):
os.mkdir(subdir)
subdir = os.path.join(subdir, 'run%s' % args.run)
if not os.path.isdir(subdir):
os.mkdir(subdir)
# where the alignments are
seqdir = os.path.join(outdir, 'alignments')
file1 = os.path.join(subdir, 'concatenated%s.phy' %
(args.miss))
file2 = os.path.join(subdir, 'partition_finder.cfg')
seq = {}
for group, inds in sps.items():
for ind in inds:
seq[ind + '_1'] = ''
if not args.subsample:
seq[ind + '_2'] = ''
partitions = {}
cur_pos = 1
for locus in loci:
f1 = os.path.join(seqdir, '%s.fasta.aln' % locus)
coords = d.ix[d.locus == locus, 'coords'].tolist()[0]
coords = [int(x) for x in re.split('_', coords)]
# just keep the first annotated exon
# if there are more than one
# minus one because of indexing for both
# add one to end because of python handling of lists
c = [coords[0] - 1, coords[1]]
f = open(f1, 'r')
id = ''
s = {}
for l in f:
if re.search('>', l):
id = re.search('>(\S+)', l).group(1)
# get rid of reverse
id = re.sub('^_R_', '', id)
s[id] = ''
else:
s[id] += l.rstrip()
f.close()
for sp, tmpseq in s.items():
tmpseq = list(re.sub('\s+', '', tmpseq))
tmpseq = tmpseq[c[0]:c[1]]
s[sp] = ''.join(tmpseq)
loc_length = len(s[s.keys()[0]])
partitions[locus] = [cur_pos, (cur_pos + loc_length - 1)]
cur_pos = cur_pos + loc_length
null = '-' * loc_length
for sp in seq:
if sp not in s:
seq[sp] += null
else:
seq[sp] += s[sp]
f = open(file1, 'w')
f.write(' %s %s\n' % (len(seq), len(seq[seq.keys()[0]])))
for sp, s in seq.items():
f.write('%s %s\n' % (sp, s))
f.close()
f = open(file2, 'w')
f.write("alignment = concatenated%s.phy;\n" % args.miss)
f.write("branchlengths = linked;\n")
f.write("models = all;\n")
f.write("model_selection = aicc;\n\n")
f.write("[data_blocks]\n")
for locus in loci:
f.write('%s_1 = %s-%s\\3;\n' % (locus, partitions[locus][0],
partitions[locus][1]))
f.write('%s_2 = %s-%s\\3;\n' % (locus, partitions[locus][0] + 1,
partitions[locus][1]))
f.write('%s_3 = %s-%s\\3;\n' % (locus, partitions[locus][0] + 2,
partitions[locus][1]))
f.write("\n")
f.write("[schemes]\n")
if args.nloci > 200:
f.write("search=rcluster;\n")
else:
f.write("search=greedy;\n")
f.close()
def main():
args = get_args()
outdir, sp, loci, d = get_sp_loci(args)
make_concatenated(args, outdir, sp, loci, d)
if __name__ == "__main__":
main()
|
[
"sonal.singhal1@gmail.com"
] |
sonal.singhal1@gmail.com
|
4fe454a1f0a7ff87244442e3bb8745be2de594e7
|
77a6d698bd38257d19e2be58395d666970fb1abc
|
/MISC/opt_omega_ip.py
|
1f30e7564e6c5decd42ff9ef937b6271af7e25ce
|
[
"MIT"
] |
permissive
|
chrinide/photoxrepo
|
89775bf95067f5fe78f0bcd75524bf46ae1be09b
|
83ad3813e9c52926e6387afc76813e99d430a5f3
|
refs/heads/master
| 2023-08-16T12:50:06.391936
| 2021-10-11T12:37:22
| 2021-10-11T12:37:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,797
|
py
|
#!/usr/bin/env python
import os
import sys
sys.path.append(os.getcwd())
import abinitio_driver as driver
from abinitio_driver import AUtoEV
import scipy.optimize as opt
from scipy.interpolate import interp1d
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
pass
# This is the driver script for omega tuning of long-range functionals such as BNL or wPBE
# The interface to ab initio programs is in separate file abinitio_driver.py
# and currently supports QCHEM and TeraChem
# Initial input files for ground and ionized state should be in files:
# optomega_gs.inp and optomega_is.inp
# OR
# optomega_scf.inp and optomega_na.inp in case you choose the "QCHEM_IEDC" PROGRAM option"
# This file can be directly submitted to the queue: qsub -V -cwd opt_omega_ip.py aq/nq
#For further details see our wiki pages...
####### USER INPUT PARAMETERS ############################
#PROGRAM = "QCHEM"
PROGRAM = "QCHEM_PCM"
#PROGRAM = "QCHEM_IEDC"
#PROGRAM = "QCHEM_IEDC_PCM"
#PROGRAM = "TERACHEM"
METHOD = 1
# 0 - minimization
# 1 - interpolation
# 2 - read omega-deltaIP function from file omegas.dat and interpolate
# Options for interpolation
MIN_OMEGA = 200
BEST_GUESS = 300
MAX_OMEGA = 400
STEP = 20
# for interpolation, one needs at least 2 starting points
# i.e. (MAX_OMEGA-MIN_OMEGA)/STEP >=2
# of course, this inequality should hold as well: MIN_OMEGA < BEST_GUESS < MAX_OMEGA
# OPTIONS for minimizer
# accuracy and maximum iterations for the minimizer
THR_OMEGA = 10.000 # absolute accuracy, omega*1000
MAXITER = 20
# These are bounds for the minimizer, can be tighter if you know where to look
MIN_OMEGA_DEF = 10
MAX_OMEGA_DEF = 250
####### END OF USER INPUT #########################################
# Whether to check SCF convergence (implemented only for TC at the moment)
driver.CHECK_SCF = True
if BEST_GUESS <= MIN_OMEGA or BEST_GUESS >= MAX_OMEGA:
print("ERROR:Incorrect input value for BEST_GUESS")
sys.exit(1)
if METHOD == 1 and (MAX_OMEGA-MIN_OMEGA)/STEP < 1:
print("ERROR: Wrong initial interpolation interval. I need at least 2 initial points")
print("Adjust MIN_OMEGA or MAX_OMEGA or STEP")
sys.exit(1)
def minimize(min_omega, max_omega, thr_omega):
"""Minimization of a general univariate function"""
# http://docs.scipy.org/doc/scipy/reference/optimize.html
try:
res = opt.minimize_scalar(f_optomega_ip,method="bounded",bounds=(MIN_OMEGA_DEF, MAX_OMEGA_DEF), \
options={"xatol":thr_omega,"maxiter": MAXITER,"disp": True})
except NameError:
print("Whoops, you probably have old version of SciPy that does not have minimize_scalar!")
print("Use interpolation instead and comment out this code!")
raise
print(res)
if "success" in res:
suc = res.success # older scipy versions do not have this attribute
else:
suc = True
if suc == True:
return res.x
else:
print("Minimization probably did not converge! Check results carefully.")
sys.exit(2)
def f_optomega_ip(omega):
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
elif PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
elif PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
elif PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
elif PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
f = (IP_dscf - IP_koop)**2
return f
def interpolate(min_omega, max_omega, step, best_guess):
"""Interpolate for fixed omega range using cubic spline
Then find the root."""
omega = min_omega
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
elif PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
elif PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
elif PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
elif PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
deltaIP = []
omegas = []
# Initial points for interpolation, determined by the user via MAX_OMEGA, MIN_OMEGA and STEP
while omega <= max_omega:
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.append(IP_dscf-IP_koop)
omegas.append(omega)
omega += step
# Check whether deltaIP crosses zero
# If not, extend the interpolation interval
# This assumes a monotonic dependence of deltaIP on omega
while deltaIP[0] * deltaIP[-1] > 0:
if (deltaIP[-1] < deltaIP[-2] and deltaIP[-1] > 0) \
or (deltaIP[-1] > deltaIP[-2] and deltaIP[-1] < 0):
best_guess = omegas[-1] + step / 2.0
omega = omegas[-1] + step
omegas.append(omega)
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.append(IP_dscf-IP_koop)
else:
best_guess = omegas[0] - step / 2.0
omega = omegas[0] - step
omegas.insert(0,omega)
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.insert(0,IP_dscf-IP_koop)
# Interpolate the computed points
if len(omegas) >=4:
f_omega = interp1d(omegas, deltaIP, kind='cubic')
elif len(omegas) == 3:
f_omega = interp1d(omegas, deltaIP, kind='quadratic')
elif len(omegas) == 2:
f_omega = interp1d(omegas, deltaIP, kind='linear')
else:
print("ERROR: I need at least 2 points for interpolation, and I only got "+str(len(omegas)))
sys.exit(1)
# Plot the interpolated function for later inspection
try:
x = [ x + omegas[0] for x in range((omegas[-1]-omegas[0]))]
plt.plot(omegas, deltaIP, 'o', x, f_omega(x), "-")
plt.savefig("omega-deltaIP.png")
except:
pass
# Find the root of interpolated function deltaIP(omega)
# Brent method should be superior to newton
# It is also guaranteed not to step out of a given interval,
# which is crucial here, since f_omega function throws an exception in that case
res = opt.brentq(f_omega, omegas[0], omegas[-1])
return res
def interpolate_read(min_omega, max_omega, step, best_guess):
"""Interpolate for fixed omega range using cubic spline
Then find the root. Read omegas from s file"""
deltaIP = []
omegas = []
with open("omegas.dat","r") as f:
comm_first = True
for line in f:
l = line.split()
if not len(l):
continue
if l[0][0] == '#':
if comm_first:
comm_first = False
continue
else:
break
else:
omegas.append(float(l[0]))
deltaIP.append(float(l[1]))
# Check whether deltaIP crosses zero. If not, exit
# This assumes a monotonic dependence of deltaIP on omega
if deltaIP[0] * deltaIP[-1] > 0:
print("ERROR:could not find optimal omega for a computed range.")
sys.exit(1)
# Interpolate the computed points
if len(omegas) >=4:
f_omega = interp1d(omegas, deltaIP, kind='cubic')
elif len(omegas) == 3:
f_omega = interp1d(omegas, deltaIP, kind='quadratic')
elif len(omegas) == 2:
f_omega = interp1d(omegas, deltaIP, kind='linear')
else:
print("ERROR: I need at least 2 points for interpolation, and I only got "+str(len(omegas)))
sys.exit(1)
# Plot the interpolated function for later inspection
try:
x = [ x + omegas[0] for x in range((omegas[-1]-omegas[0]))]
plt.plot(omegas, deltaIP, 'o', x, f_omega(x), "-")
plt.savefig("omega-deltaIP.png")
except:
pass
# Find the root of interpolated function deltaIP(omega)
res = opt.brentq(f_omega, omegas[0], omegas[-1])
return res
#### Actual calculation starts here!
if METHOD == 0:
omega = minimize(MIN_OMEGA, MAX_OMEGA, THR_OMEGA)
elif METHOD == 1:
omega = interpolate(MIN_OMEGA, MAX_OMEGA, STEP, BEST_GUESS)
elif METHOD == 2:
omega = interpolate_read(MIN_OMEGA, MAX_OMEGA, STEP, BEST_GUESS)
print("Final tuned omega = ",omega)
if METHOD == 2:
sys.exit(0)
# This can be skipped if you want to save time
print("Recomputing with final omega...")
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
if PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
if PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
if PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
if PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
err = IP_dscf - IP_koop
print("Final IP_dscf:",IP_dscf*AUtoEV)
print("Final IP_exc_na:",IP_koop*AUtoEV)
print("Final deltaIP:",err*AUtoEV)
|
[
"hollasd@vscht.cz"
] |
hollasd@vscht.cz
|
374f8bf39cfeedb5f14513ada5f0d41be8a8a13d
|
eda0e84113b1e7fe2676af7dc8e7e52b12a57f8e
|
/ChatSite/chatsiteapp/models.py
|
7ce0aa5b51b0f777427a9a14cad4f8551f3d152f
|
[] |
no_license
|
Alton1998/finalWatermodule
|
d6e2487d23b0ba842efa026fe66704222b2b9451
|
405fd0427e1529cce5ddc16ed50a0dbcdab75a1f
|
refs/heads/master
| 2020-03-19T03:31:58.131464
| 2018-06-01T15:53:18
| 2018-06-01T15:53:18
| 135,736,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
from django.db import models
# Create your models here.
class Access_Log(models.Model):
User=models.CharField(max_length=255)
time=models.CharField(max_length=25)
def __str__(self):
return self.User,self.time
class Data_LogA(models.Model):
Sump=models.FloatField()
Tank=models.FloatField()
Reservoir=models.FloatField()
time=models.CharField(max_length=25)
def __str__(self):
return self.Sump,self.Tank,self.Reservoir,self.time
class Data_LogB(models.Model):
Sump=models.FloatField()
Tank=models.FloatField()
Reservoir=models.FloatField()
time=models.CharField(max_length=25)
def __str__(self):
return self.Sump, self.Tank, self.Reservoir, self.time
class Data_LogC(models.Model):
Sump=models.FloatField()
Tank=models.FloatField()
Reservoir=models.FloatField()
time=models.CharField(max_length=25)
def __str__(self):
return self.Sump, self.Tank, self.Reservoir, self.time
class Data_LogD(models.Model):
Sump=models.FloatField()
Tank=models.FloatField()
Reservoir=models.FloatField()
time=models.CharField(max_length=25)
def __str__(self):
return self.Sump, self.Tank, self.Reservoir, self.time
|
[
"noreply@github.com"
] |
Alton1998.noreply@github.com
|
87f191441b5b12c8a70b3307b557694e990fad3c
|
99c780837f329c246b0e973c8cd0b05b1e4dd904
|
/main.py
|
899809b3ae630186cf742e57990bcb993f47b2b8
|
[] |
no_license
|
srishti49/snakeGame
|
cdc8a264b2cbc938df8fe9870e28ee25b5fd565a
|
b2988772ce64a09ff5131afb165d8da03e926ada
|
refs/heads/master
| 2023-04-22T06:03:08.317312
| 2021-05-14T10:58:25
| 2021-05-14T10:58:25
| 367,333,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,593
|
py
|
from turtle import *
import time
import random
score=0
execution_delay=0.1
root=Screen()
root.title('Snake Game')
root.setup(width=600,height=600)
root.bgcolor('grey')
root.bgpic('border.gif')
root.tracer(False)
root.addshape('upmouth.gif')
root.addshape('downmouth.gif')
root.addshape('rightmouth.gif')
root.addshape('leftmouth.gif')
root.addshape('food.gif')
root.addshape('body.gif')
head=Turtle()
head.shape('upmouth.gif')
head.penup()
head.goto(0,0)
head.direction='stop'
food=Turtle()
food.shape('food.gif')
food.penup()
food.goto(0,100)
text=Turtle()
text.penup()
text.goto(0,268)
text.hideturtle()
text.color('white')
text.write('Score:0',font=('courier',25,'bold'),align='center')
lost=Turtle()
lost.color('black')
lost.penup()
lost.hideturtle()
def move_snake():
if head.direction == 'up':
y=head.ycor()
y=y+20
head.sety(y)
if head.direction == 'down':
y = head.ycor()
y = y - 20
head.sety(y)
if head.direction == 'right':
x = head.xcor()
x = x + 20
head.setx(x)
if head.direction == 'left':
x = head.xcor()
x = x - 20
head.setx(x)
def go_up():
if head.direction!='down':
head.direction='up'
head.shape('upmouth.gif')
def go_down():
if head.direction!='up':
head.direction='down'
head.shape('downmouth.gif')
def go_left():
if head.direction!='right':
head.direction='left'
head.shape('leftmouth.gif')
def go_right():
if head.direction!='left':
head.direction='right'
head.shape('rightmouth.gif')
root.listen()
root.onkeypress(go_up,'Up')
root.onkeypress(go_down,'Down')
root.onkeypress(go_left,'Left')
root.onkeypress(go_right,'Right')
segments=[]
while True:
root.update()
if(head.xcor()>260 or head.xcor()<-260 or head.ycor()>260 or head.ycor()<-260):
lost.write('Game is Over',align='center',font=('serif',40,'bold'))
time.sleep(1)
lost.clear()
time.sleep(1)
head.goto(0,0)
head.direction='stop'
for bodies in segments:
bodies.goto(1000,1000)
score=0
execution_delay=0.1
segments.clear()
text.clear()
text.write('Score:0', font=('courier', 25, 'bold'), align='center')
if head.distance(food)<20:
x=random.randint(-255,255)
y= random.randint(-255, 255)
food.goto(x,y)
execution_delay=execution_delay-0.004
body=Turtle()
body.penup()
body.shape('body.gif')
segments.append(body)
score=score+10
text.clear()
text.write(f'Score:{score}',font=('courier',25,'bold'),align='center')
#3segments are used to attach body to turtle
for i in range(len(segments)-1,0,-1):
x=segments[i-1].xcor()
y=segments[i-1].ycor()
segments[i].goto(x,y)
if len(segments)>0:
x=head.xcor()
y=head.ycor()
segments[0].goto(x,y)
move_snake()
for bodies in segments:
if bodies.distance(head)<20:
time.sleep(1)
head.goto(0,0)
head.direction='stop'
for bodies in segments:
bodies.goto(1000,1000)
segments.clear()
score=0
execution_delay=0.1
lost.write('Game is Over', align='center', font=('serif', 40, 'bold'))
time.sleep(1)
lost.clear()
text.clear()
text.write('Score:0', font=('courier', 25, 'bold'), align='center')
time.sleep(execution_delay)
|
[
"srishtigupta191@gmail.com"
] |
srishtigupta191@gmail.com
|
50e8682696ae42585406f3bfc3c0c86d4e89c605
|
bb88cc84557dd00ac1a10ba9912d4c81a38ca724
|
/repo_aga.py
|
11cc98b645063debbc198493724879d2096ea866
|
[] |
no_license
|
aguszcz/ZaliczeniePython
|
fb4eeccf6a68c733e1d8d1d82336094aa613cf32
|
cbf41186f006067b6f186b02a9e61f0d43e0d642
|
refs/heads/master
| 2016-08-12T07:31:39.272756
| 2016-01-25T23:05:14
| 2016-01-25T23:05:14
| 50,363,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,115
|
py
|
# coding=utf-8
# -*- coding: utf-8 -*-
import sqlite3
import math
from datetime import datetime
#
# Scieżka połączenia z bazą danych
#
db_path = 'aga_baza.db'
#
# Wyjątek używany w repozytorium
#
class RepositoryException(Exception):
def __init__(self, message, *errors):
Exception.__init__(self, message)
self.errors = errors
#
# Model danych
#
class Film():
"""Model pojedynczego film
"""
def __init__(self, id_film, tytul, rok, aktorzy=[]):
self.id_film = id_film
self.tytul = tytul
self.rok=rok
self.aktorzy=aktorzy
def __repr__(self):
return "<FILM tytul %s, rok=%s, (id=%s), aktorzy = %s)>" % (self.tytul, str(self.rok), self.id_film, self.aktorzy)
class Aktor():
"""Model aktorów występujących w danym filmie
"""
def __init__(self, id_film, imie, nazwisko, wynagrodzenie):
self.id_film=id_film
self.imie=imie
self.nazwisko=nazwisko
self.wynagrodzenie=wynagrodzenie
def __repr__(self):
return "<Aktor %s %s, wynagrodzenie %s (id_filmu=%s)>" %(self.imie, self.nazwisko, self.wynagrodzenie, self.id_film)
class Repository():
def __init__(self):
try:
self.conn = self.get_connection()
except Exception as e:
raise RepositoryException('GET CONNECTION:', *e.args)
self._complete = False
# wejście do with ... as ...
def __enter__(self):
return self
# wyjście z with ... as ...
def __exit__(self, type_, value, traceback):
self.close()
def complete(self):
self._complete = True
def get_connection(self):
return sqlite3.connect(db_path)
def close(self):
if self.conn:
try:
if self._complete:
self.conn.commit()
else:
self.conn.rollback()
except Exception as e:
raise RepositoryException(*e.args)
finally:
try:
self.conn.close()
except Exception as e:
raise RepositoryException(*e.args)
#
# repozytorium obiektow typu Invoice
#
class repoaga(Repository):
def add(self, film):
"""Metoda dodaje pojedynczą fakturę do bazy danych,
wraz ze wszystkimi jej pozycjami.
"""
try:
c = self.conn.cursor()
c.execute('INSERT INTO film (id_film, tytul, rok) VALUES(?, ?, ?)',
(film.id_film, film.tytul, film.rok)
)
# zapisz aktorow
if film.aktorzy:
for aktorzy in film.aktorzy:
try:
c.execute('INSERT INTO aktor (imie, nazwisko, wynagrodzenie,id_film) VALUES(?,?,?,?)',
(aktorzy.imie, aktorzy.nazwisko, aktorzy.wynagrodzenie, film.id_film)
)
except Exception as e:
#print "item add error:", e
raise RepositoryException('error adding: %s, to komunikat bledu: %s' %
(str(film), e)
)
except Exception as e:
#print "invoice add error:", e
raise RepositoryException('error adding %s' % str(e))
def delete(self, id_film):
"""Metoda usuwa pojedynczy film wraz z przypisanymi aktorami
"""
try:
c = self.conn.cursor()
# usuń pozycje
c.execute('DELETE FROM film WHERE id_film=?', (id_film,))
# usuń aktorow
c.execute('DELETE FROM aktor WHERE id_film=?', (id_film,))
except Exception as e:
#print "delete :", e
raise RepositoryException('error deleting %s' % str(e))
def getById(self, id):
"""Get film by id
"""
try:
c = self.conn.cursor()
c.execute("SELECT * FROM film WHERE id_film=?", (id,))
row = c.fetchone()
# film = Film(id_film=id)
a=[]
if row == None:
film=None
else:
film=Film(id_film=id, tytul= row[1], rok=row[2],aktorzy=[])
c.execute("SELECT * FROM aktor WHERE id_film=? order by imie", (id,))
aktor_rows = c.fetchall()
for i_rows in aktor_rows:
a = Aktor(id_film=id, imie=i_rows[1], nazwisko=i_rows[2], wynagrodzenie=i_rows[3])
film.aktorzy.append(a)
except Exception as e:
#print "invoice getById error:", e
raise RepositoryException('error getting by id film: %s' % str(e))
return film
def getMaxAktor(self, id): #id jest id filmu
"""Get max wiek aktorow wystepujacych w danym filmie
"""
try:
c = self.conn.cursor()
c.execute("SELECT max(wynagrodzenie) FROM aktor WHERE id_film=?", (id,))
row = c.fetchone()
if row == None:
wynagrodzenie_max=None
else:
wynagrodzenie_max=row[0]
except Exception as e:
#print "aktor getMax error:", e
raise RepositoryException('error getting max: %s' % str(e))
return wynagrodzenie_max
def getMinAktor(self, id): #id jest id filmu
"""Get min wiek aktorow wystepujacych w danym filmie
"""
try:
c = self.conn.cursor()
c.execute("SELECT min(wynagrodzenie) FROM aktor WHERE id_film=?", (id,))
row = c.fetchone()
if row == None:
wynagrodzenie_min=None
else:
wynagrodzenie_min=row[0]
except Exception as e:
#print "aktor getMax error:", e
raise RepositoryException('error getting min: %s' % str(e))
return wynagrodzenie_min
def getSrednieAktor(self): #id jest id filmu
"""Get srednie wynagrodzenie aktorow wystepujacych we wszystkich filmach
"""
try:
c = self.conn.cursor()
c.execute("SELECT avg(wynagrodzenie) FROM aktor")
row = c.fetchone()
if row == None:
wynagrodzenie_srednie=None
else:
wynagrodzenie_srednie=row[0]
except Exception as e:
#print "aktor getMax error:", e
raise RepositoryException('error getting min: %s' % str(e))
return wynagrodzenie_srednie
def update(self, film):
"""Metoda uaktualnia pojedynczy film wraz z przypisanymi aktorami.
"""
try:
# pobierz z bazy fakturę
F_oryg = self.getById(film.id_film)
if F_oryg != None:
# faktura jest w bazie: usuń ją
self.delete(film.id_film)
self.add(film)
except Exception as e:
#print "film update error:", e
raise RepositoryException('error updating film %s' % str(e))
if __name__ == '__main__':
try:
with repoaga() as rpa:
rpa.delete(1)
rpa.delete(2)
rpa.complete()
rpa.add(
Film(1, "Zjawa" , 2015 ,
aktorzy = [
Aktor(1, "Leonardo", "DiCaprio", 1000000),
Aktor(1, "Tom", "Hardy", 500000),
Aktor(1, "Domhnall", "Gleeson", 400000),
Aktor(1, "Will", "Poulter", 250000),
]
))
rpa.add(
Film(2, "Dwunastu gniewnych ludzi" , 1957 ,
aktorzy = [
Aktor(2, "Henry", "Fonda", 4500000),
Aktor(2, "Martin", "Balsam", 550000),
Aktor(2, "Jack", "Klugman", 400000),
Aktor(2, "Edward", "Binns", 250000),
]
))
rpa.complete()
#print("***Zastapienie filmu o podanym numerze id innym filmem wraz z aktorami ***")
#rpa.update(Film(1,"JAWA",2000,[Aktor(1,"dd","sss",1000)]))
print("***Film o podanym id wraz z aktorami: ***")
print(rpa.getById(id=1))
print("*******************")
print("*** Maksymalne wynagrodzenie aktora dla filmu o danym id ***")
print(rpa.getMaxAktor(id=1))
print("*******************")
print("*** Minimalne wynagrodzenie aktora dla filmu o danym id ***")
print(rpa.getMinAktor(id=2))
print("*******************")
print("*** Srednie wynagrodzenie wszystkich aktorow ***")
print(rpa.getSrednieAktor())
print("*******************")
except RepositoryException as e:
print(e)
#a1=Aktor(1,'A','B',22)
#a2=Aktor(1,"Jan","Kowalski",1998)
#FA= Film(1,'sdklfjsl',2014, [a1,a2])
#print(FA)
#print(a1)
#print(a2)
|
[
"aguszcz@gmail.com"
] |
aguszcz@gmail.com
|
514a8f5bf7947643d5efe1a4b19b02143b7ccf70
|
b095173b2dbc77c8ad61c42403258c76169b7a63
|
/tests/unit/sagemaker/cli/compatibility/v2/modifiers/test_matching.py
|
9886eba0f512979d69baf935e96cfe87301464d7
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-python-sdk
|
666665e717cfb76698ba3ea7563b45344634264d
|
8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85
|
refs/heads/master
| 2023-09-04T01:00:20.663626
| 2023-08-31T15:29:19
| 2023-08-31T15:29:19
| 110,621,895
| 2,050
| 1,255
|
Apache-2.0
| 2023-09-14T17:37:15
| 2017-11-14T01:03:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,807
|
py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from sagemaker.cli.compatibility.v2.modifiers import matching
from tests.unit.sagemaker.cli.compatibility.v2.modifiers.ast_converter import ast_call
def test_matches_any():
name_to_namespaces_dict = {
"KMeansPredictor": ("sagemaker", "sagemaker.amazon.kmeans"),
"Predictor": ("sagemaker.tensorflow.serving",),
}
matches = (
"KMeansPredictor()",
"sagemaker.KMeansPredictor()",
"sagemaker.amazon.kmeans.KMeansPredictor()",
"Predictor()",
"sagemaker.tensorflow.serving.Predictor()",
)
for call in matches:
assert matching.matches_any(ast_call(call), name_to_namespaces_dict)
non_matches = ("MXNet()", "sagemaker.mxnet.MXNet()")
for call in non_matches:
assert not matching.matches_any(ast_call(call), name_to_namespaces_dict)
def test_matches_name_or_namespaces():
name = "KMeans"
namespaces = ("sagemaker", "sagemaker.amazon.kmeans")
matches = ("KMeans()", "sagemaker.KMeans()")
for call in matches:
assert matching.matches_name_or_namespaces(ast_call(call), name, namespaces)
non_matches = ("MXNet()", "sagemaker.mxnet.MXNet()")
for call in non_matches:
assert not matching.matches_name_or_namespaces(ast_call(call), name, namespaces)
def test_matches_name():
assert matching.matches_name(ast_call("KMeans()"), "KMeans")
assert not matching.matches_name(ast_call("sagemaker.KMeans()"), "KMeans")
assert not matching.matches_name(ast_call("MXNet()"), "KMeans")
def test_matches_attr():
assert matching.matches_attr(ast_call("sagemaker.amazon.kmeans.KMeans()"), "KMeans")
assert not matching.matches_attr(ast_call("KMeans()"), "KMeans")
assert not matching.matches_attr(ast_call("sagemaker.mxnet.MXNet()"), "KMeans")
def test_matches_namespace():
assert matching.matches_namespace(ast_call("sagemaker.mxnet.MXNet()"), "sagemaker.mxnet")
assert not matching.matches_namespace(ast_call("sagemaker.KMeans()"), "sagemaker.mxnet")
def test_has_arg():
assert matching.has_arg(ast_call("MXNet(framework_version=mxnet_version)"), "framework_version")
assert not matching.has_arg(ast_call("MXNet()"), "framework_version")
|
[
"noreply@github.com"
] |
aws.noreply@github.com
|
011a24fe82e89f3224deaf69c1273d7b84c8cc9e
|
cecf366befe2c349d98d153b6ea9af4a2f750de7
|
/forum/migrations/0001_initial.py
|
d692650c6541db3d219d9eb48486740101583f70
|
[] |
no_license
|
VinceKlaus/django-forum
|
acfe603a1f549c96bc42be66ab1e43f8e51af5e6
|
913a8e504fbdd0068288fa0fb7f7be2a3e4bb485
|
refs/heads/master
| 2021-06-14T14:21:34.283813
| 2016-10-21T16:46:05
| 2016-10-21T16:46:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-09 14:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('date_added', models.DateTimeField(auto_now_add=True)),
('text', models.TextField()),
],
),
]
|
[
"benemeritosam@gmail.com"
] |
benemeritosam@gmail.com
|
f5b5dde599fd732b3dcd51c460406ac5ddd39a3e
|
70bb44a349b8019a1288f634a8489741b3c9390c
|
/show_best_per_level.py
|
b6666f6a46dd37c474da5fb237dd9e51fcce25a4
|
[] |
no_license
|
TopSteely/GenericObjectCounting
|
d6c5d27e0ef1c576cb252f80b6e110029df4680c
|
3807ca0d4fe6fa5ee203cd17216263da29f30e1d
|
refs/heads/master
| 2023-03-05T03:06:55.000702
| 2017-12-05T04:57:32
| 2017-12-05T04:57:32
| 75,942,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,753
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 12:20:18 2015
@author: root
"""
from sklearn import linear_model, preprocessing
import matplotlib
matplotlib.use('agg')
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
import numpy
import math
import sys
import random
import pylab as pl
import networkx as nx
import pyximport; pyximport.install(pyimport = True)
import get_overlap_ratio
import itertools
from get_intersection import get_intersection
from collections import deque
from itertools import chain, islice
from get_intersection_count import get_intersection_count
#from count_per_lvl import iep,sums_of_all_cliques,count_per_level
import matplotlib.colors as colors
from load import get_seperation, get_data,get_image_numbers,get_class_data, get_traineval_seperation, get_data_from_img_nr
import matplotlib.image as mpimg
from utils import create_tree, find_children, sort_boxes, surface_area, extract_coords, get_set_intersection
from ml import tree_level_regression, tree_level_loss, count_per_level, sums_of_all_cliques, constrained_regression, learn_root, loss, iep_single_patch, iep_single_patch_inverse
import time
import matplotlib.cm as cmx
from scipy import optimize
import time
import cProfile
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from matplotlib.patches import Rectangle
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import SGDClassifier
from scipy.misc import imread
#class_ = 'sheep'
baseline = False
add_window_size = False
iterations = 1000
subsampling = False
c = 'partial'
normalize = True
prune = False
delta = math.pow(10,-3)
features_used = 5
less_features = False
learn_intersections = True
squared_hinge_loss = False
prune_fully_covered = True
prune_tree_levels = 2
jans_idea = True
def get_labels(class_,i, criteria, subsamples):
labels = []
if os.path.isfile('/var/node436/local/tstahl/Coords_prop_windows/Labels/Labels/'+(format(i, "06d")) + '_' + class_ + '_' + criteria + '.txt'):
file = open('/var/node436/local/tstahl/Coords_prop_windows/Labels/Labels/'+(format(i, "06d")) + '_' + class_ + '_' + criteria + '.txt', 'r')
else:
print 'warning /var/node436/local/tstahl/Coords_prop_windows/Labels/Labels/'+(format(i, "06d")) + '_' + class_ + '_' + criteria + '.txt does not exist '
return np.zeros(subsamples)
for i_l, line in enumerate(file):
tmp = line.split()[0]
labels.append(float(tmp))
if i_l == subsamples - 1:
break
return labels
def minibatch_(functions, clf,scaler,w, loss__,mse,hinge1,hinge2,full_image,img_nr,alphas,learning_rate,subsamples, mode):
X_p, y_p, inv = get_data_from_img_nr(class_,img_nr, subsamples)
if X_p != []:
boxes = []
ground_truth = inv[0][2]
img_nr = inv[0][0]
print img_nr
if less_features:
X_p = [fts[0:features_used] for fts in X_p]
if os.path.isfile('/var/node436/local/tstahl/Coords_prop_windows/'+ (format(img_nr, "06d")) +'.txt'):
f = open('/var/node436/local/tstahl/Coords_prop_windows/'+ (format(img_nr, "06d")) +'.txt', 'r')
else:
print 'warning, no /var/node436/local/tstahl/Coords_prop_windows/'+ (format(img_nr, "06d")) +'.txt'
for line, y in zip(f, inv):
tmp = line.split(',')
coord = []
for s in tmp:
coord.append(float(s))
boxes.append(coord)
#assert(len(boxes)<500)
boxes, y_p, X_p = sort_boxes(boxes, y_p, X_p, 0,5000)
if os.path.isfile('/var/node436/local/tstahl/GroundTruth/%s/%s.txt'%(class_,format(img_nr, "06d"))):
gr = open('/var/node436/local/tstahl/GroundTruth/%s/%s.txt'%(class_,format(img_nr, "06d")), 'r')
else:
gr = []
ground_truths = []
for line in gr:
tmp = line.split(',')
ground_truth = []
for s in tmp:
ground_truth.append(int(s))
ground_truths.append(ground_truth)
#prune boxes
pruned_x = []
pruned_y = []
pruned_boxes = []
if prune:
for i, y_ in enumerate(y_p):
if y_ > 0:
pruned_x.append(X_p[i])
pruned_y.append(y_p[i])
pruned_boxes.append(boxes[i])
else:
pruned_x = X_p
pruned_y = y_p
pruned_boxes = boxes
if subsampling and pruned_boxes > subsamples:
pruned_x = pruned_x[0:subsamples]
pruned_y = pruned_y[0:subsamples]
pruned_boxes = pruned_boxes[0:subsamples]
# create_tree
G, levels = create_tree(pruned_boxes)
#prune tree to only have levels which fully cover the image, tested
if prune_fully_covered:
nr_levels_covered = 100
total_size = surface_area(pruned_boxes, levels[0])
for level in levels:
sa = surface_area(pruned_boxes, levels[level])
sa_co = sa/total_size
if sa_co != 1.0:
G.remove_nodes_from(levels[level])
else:
nr_levels_covered = level
levels = {k: levels[k] for k in range(0,nr_levels_covered + 1)}
# prune levels, speedup + performance
levels_tmp = {k:v for k,v in levels.iteritems() if k<prune_tree_levels}
levels_gone = {k:v for k,v in levels.iteritems() if k>=prune_tree_levels}
levels = levels_tmp
#prune tree as well, for patches training
for trash_level in levels_gone.values():
G.remove_nodes_from(trash_level)
coords = []
features = []
f_c = []
f = []
#either subsampling or prune_fully_covered
#assert(subsampling != prune_fully_covered)
if subsampling:
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples)):
f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'r+')
else:
if mode == 'extract_train' or mode == 'extract_test':
print 'coords for %s with %s samples have to be extracted'%(img_nr,subsamples)
f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'w')
for level in levels:
levl_boxes = extract_coords(levels[level], pruned_boxes)
if levl_boxes != []:
for lvl_box in levl_boxes:
if lvl_box not in coords:
coords.append(lvl_box)
f_c.write('%s,%s,%s,%s'%(lvl_box[0],lvl_box[1],lvl_box[2],lvl_box[3]))
f_c.write('\n')
f_c.close()
print 'features for %s with %s samples have to be extracted'%(img_nr,subsamples)
os.system('export PATH=$PATH:/home/koelma/impala/lib/x86_64-linux-gcc')
os.system('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/koelma/impala/third.13.03/x86_64-linux/lib')
#print "EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_%s.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_%s.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),subsamples,format(img_nr, "06d"),subsamples)
os.system("EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/%s_%s_%s.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt"%(class_,(format(img_nr, "06d")),format(img_nr, "06d"),subsamples,class_,format(img_nr, "06d"),subsamples))
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples)):
f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'r')
else:
f_c = []
coords = []
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/Features_upper/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples)):
f = open('/var/node436/local/tstahl/Features_prop_windows/Features_upper/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'r')
elif prune_fully_covered:
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d"))):
f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'r+')
else:
if mode == 'extract_train' or mode == 'extract_test':
print 'coords for %s with fully_cover_tree samples have to be extracted'%(img_nr)
f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'w')
for level in levels:
levl_boxes = extract_coords(levels[level], pruned_boxes)
if levl_boxes != []:
for lvl_box in levl_boxes:
if lvl_box not in coords:
coords.append(lvl_box)
f_c.write('%s,%s,%s,%s'%(lvl_box[0],lvl_box[1],lvl_box[2],lvl_box[3]))
f_c.write('\n')
f_c.close()
print 'features for %s with fully_cover_tree samples have to be extracted'%(img_nr)
os.system('export PATH=$PATH:/home/koelma/impala/lib/x86_64-linux-gcc')
os.system('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/koelma/impala/third.13.03/x86_64-linux/lib')
#print "EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_%s.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_%s.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),subsamples,format(img_nr, "06d"),subsamples)
print "EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),format(img_nr, "06d"))
os.system("EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),format(img_nr, "06d")))
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d"))):
f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'r')
else:
f_c = []
coords = []
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d"))):
f = open('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'r')
else:
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep%s.txt'%(format(img_nr, "06d"))):
f = open('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep%s.txt'%(format(img_nr, "06d")), 'r')
if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep%s.txt'%(format(img_nr, "06d"))):
f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep%s.txt'%(format(img_nr, "06d")), 'r+')
if f_c != []:
for i,line in enumerate(f_c):
str_ = line.rstrip('\n').split(',')
cc = []
for s in str_:
cc.append(float(s))
coords.append(cc)
if f != []:
for i,line in enumerate(f):
str_ = line.rstrip('\n').split(',')
ff = []
for s in str_:
ff.append(float(s))
features.append(ff)
#assert len(coords) == len(features)
# append x,y of intersections
if learn_intersections:
for inters,coord in zip(features,coords):
# if inters not in pruned_x:
pruned_x.append(inters)
ol = 0.0
ol = get_intersection_count(coord, ground_truths)
pruned_y.append(ol)
if mode == 'mean_variance':
print 'normalizing'
scaler.partial_fit(pruned_x) # Don't cheat - fit only on training data
return scaler
if less_features:
features = [fts[0:features_used] for fts in features]
#normalize
norm_x = []
if normalize and (mode != 'extract_train' and mode != 'extract_test'):
# for p_x in pruned_x:
# norm_x.append((p_x-mean)/variance)
norm_x = scaler.transform(pruned_x)
if features != []:
features = scaler.transform(features)
else:
norm_x = pruned_x
data = (G, levels, pruned_y, norm_x, pruned_boxes, ground_truths, alphas)
sucs = nx.dfs_successors(G)
predecs = nx.dfs_predecessors(G)
#preprocess: node - children
children = {}
last = -1
for node,children_ in zip(sucs.keys(),sucs.values()):
if node != last+1:
for i in range(last+1,node):
children[i] = []
children[node] = children_
elif node == last +1:
children[node] = children_
last = node
if mode == 'training':
if alphas[0] == 0: #if we don't learn the proposals, we learn just the levels: better, because every level has same importance and faster
w_levels_img=np.zeros(4096,np.dtype('float64'))
for level in levels:
if img_nr in functions:
if level in functions[img_nr]:
function = functions[img_nr][level]
else:
function = []
else:
functions[img_nr] = {}
function = []
#print count_per_level([],class_,features,coords,scaler,w, np.dot(w,np.array(norm_x).T), img_nr, pruned_boxes,levels[level], '',function)[0]
w_level, function = tree_level_regression(class_,function,levels,level,features,coords,scaler,w,norm_x,pruned_y,None,predecs,children,pruned_boxes,learning_rate,alphas,img_nr,jans_idea)
#print count_per_level([],class_,features,coords,scaler,w_level, np.dot(w,np.array(norm_x).T), img_nr, pruned_boxes,levels[level], '',function)[0]
w_levels_img += w_level
if level not in functions[img_nr]:
functions[img_nr][level] = function
w_levels_img = w_levels_img / len(levels)
return w_levels_img, len(pruned_y), len(levels), pruned_y[0]
else: #if we learn proposals, levels with more proposals have more significance...., slow - need to change
nodes = list(G.nodes())
for node in nodes:
if node == 0:
w = learn_root(w,norm_x[0],pruned_y[0],learning_rate,alphas)
else:
for num,n in enumerate(levels.values()):
if node in n:
level = num
break
if img_nr in functions:
if level in functions[img_nr]:
function = functions[img_nr][level]
else:
function = []
else:
functions[img_nr] = {}
function = []
#w, function = tree_level_regression(class_,function,levels,level,features,coords,scaler,w,norm_x,pruned_y,node,predecs,children,pruned_boxes,learning_rate,alphas,img_nr)
w, function = constrained_regression(class_,function,features,coords,scaler,w,norm_x,pruned_y,node,predecs,children,pruned_boxes,learning_rate,alphas,img_nr,squared_hinge_loss)
#TODO: train regressor/classifier that predicts/chooses level. Features: level, number of proposals, number of intersections, avg size of proposal, predictions(for regressor), etc.
if level not in functions[img_nr]:
functions[img_nr][level] = function
return w, len(pruned_y), len(G.nodes()), pruned_y[0]
elif mode == 'scikit_train':
clf.partial_fit(norm_x,pruned_y)
return clf
elif mode == 'loss_train':
if alphas[0] == 0: #levels
loss__.append(tree_level_loss(class_,features,coords,scaler, w, data, predecs, children,img_nr,-1,functions))
return loss__
else:
loss__.append(loss(class_,squared_hinge_loss,features,coords,scaler,w, data, predecs, children,img_nr, -1))
elif mode == 'loss_test' or mode == 'loss_eval':
if alphas[0] == 0: #levels
loss__.append(tree_level_loss(class_,features,coords,scaler, w, data, predecs, children,img_nr,-1,functions))
cpl = max(0, np.dot(w,np.array(norm_x[0]).T))
full_image.append([pruned_y[0],cpl])
return loss__,full_image
else:
loss__.append(loss(class_,squared_hinge_loss,features,coords,scaler,w, data, predecs, children,img_nr, -1))
cpl = max(0, np.dot(w,np.array(norm_x[0]).T))
full_image.append([pruned_y[0],cpl])
return loss__,full_image
elif mode == 'loss_scikit_test' or mode == 'loss_scikit_train':
loss__.append(((clf.predict(norm_x) - pruned_y)**2).sum())
return loss__
elif mode == 'levels_train' or mode == 'levels_test':
preds = []
for i,x_ in enumerate(norm_x):
preds.append(np.dot(w, x_))
cpls = []
truelvls = []
used_boxes_ = []
total_size = surface_area(pruned_boxes, levels[0])
fully_covered_score = 0.0
fully_covered_score_lvls = 0.0
covered_levels = []
print mode, len(levels)
best = []
best_iep=[]
all_patches = []
for level in levels:
iep_boxes_levels_inverse,f = iep_single_patch(y,class_,w,preds,levels,level,features,coords,scaler, norm_x,img_nr, boxes, features, [], jans_idea)
best_in_level = preds.index(max([preds[l] for l in levels[level]]))
ind_best_iep_in_level = levels[level][iep_boxes_levels_inverse.index(max(iep_boxes_levels_inverse))]
best_iep_in_level = [max(iep_boxes_levels_inverse), ind_best_iep_in_level]
print len(pruned_boxes), best_in_level
best.append([pruned_boxes[best_in_level], preds[best_in_level]])
best_iep.append([pruned_boxes[best_iep_in_level[1]], best_iep_in_level[0],pruned_boxes[ind_best_iep_in_level]])
print pruned_boxes[best_iep_in_level[1]]
for bb in levels[level]:
all_patches.append([pruned_boxes[bb],preds[bb]])
if img_nr in functions:
if level in functions[img_nr]:
function = functions[img_nr][level]
else:
function = []
else:
functions[img_nr] = {}
function = []
cpl,used_boxes,_ = count_per_level([],class_,features,coords,scaler,w, preds, img_nr, pruned_boxes,levels[level], '',function)
# clipp negative predictions
cpl = max(0,cpl)
tru = y_p[0]
cpls.append(cpl)
sa = surface_area(pruned_boxes, levels[level])
sa_co = sa/total_size
if sa_co == 1.0:
fully_covered_score += cpl
fully_covered_score_lvls += 1
covered_levels.append(cpl)
truelvls.append(tru)
print best_iep
return cpls,truelvls,best, all_patches, best_iep
def main():
test_imgs, train_imgs = get_seperation()
train_imgs = train_imgs
test_imgs = test_imgs
training_imgs, evaluation_imgs = get_traineval_seperation(train_imgs)
# learn
# if os.path.isfile('/var/node436/local/tstahl/Models/'+class_+c+'normalized_constrained.pickle'):
# with open('/var/node436/local/tstahl/Models/'+class_+c+'normalized_constrained.pickle', 'rb') as handle:
# w = pickle.load(handle)
# else:
evaluation_imgs = evaluation_imgs
img_train = training_imgs
img_eval = evaluation_imgs
gamma = 0.005
#subsamples_ = [5,8,12]
if subsampling:
subsamples = 5
else:
subsamples = 100000
learning_rates = [math.pow(10,-4)]
learning_rates_ = {}
if less_features:
weights_sample = random.sample(range(features_used), 2)
else:
weights_sample = random.sample(range(4096), 10)
all_alphas = [1]
regs = [1e-6]
n_samples = 0.0
if less_features:
sum_x = np.zeros(features_used)
sum_sq_x = np.zeros(features_used)
else:
sum_x = np.zeros(4096)
sum_sq_x = np.zeros(4096)
if len(sys.argv) != 2:
print 'wrong arguments'
exit()
mous = 'whole'
global class_
class_ = sys.argv[1]
print 'learning', class_
if mous != 'whole':
train_imgs = get_image_numbers(test_imgs,train_imgs,class_)
plt.figure()
mean = []
variance = []
scaler = []
functions = {}
#want to just learn images with objects present
tr_images = []
te_images = []
for img in img_train:
y = get_labels(class_,img, 'partial', 1)
if y[0] > 0:
tr_images.append(img)
for img in img_eval:
y = get_labels(class_,img, 'partial', 1)
if y[0] > 0:
te_images.append(img)
if normalize:
print 'normalizing scaler'
scaler = MinMaxScaler()
for img_nr in tr_images:
print img_nr
scaler = minibatch_(None,None,scaler,[], [],[],[],[],[],img_nr,[],[],subsamples,'mean_variance')
#normalize
learning_rate0 = learning_rates[0]
learning_rate = learning_rate0
alpha1 = all_alphas[0]
reg = regs[0]
alphas_levels = [0,1,reg]
alphas_patches = [1,reg,1,1]
alphas_just_patches = [1, reg, 0, 0]
alphas_just_parent = [1, reg, 1, 0]
alphas_just_level = [1, reg, 0, 1]
X_ = []
y_ = []
X_test = []
y_test = []
w_all = {}
levels_num = 8
t = 0
for epochs in np.arange(6):
global prune_tree_levels
prune_tree_levels = levels_num
alphas = [1-alpha1,alpha1,reg]
if epochs == 0:
# initialize or reset w , plot_losses
w_all[levels_num] = []
if less_features:
w_levels = 0.01 * np.random.rand(features_used)
w_patches = 0.01 * np.random.rand(features_used)
else:
w_levels = 0.01 * np.random.rand(4096)
w_patches = 0.01 * np.random.rand(4096)
w_just_patches = 0.01 * np.random.rand(4096)
w_just_parent = 0.01 * np.random.rand(4096)
w_just_level = 0.01 * np.random.rand(4096)
w_new_loss = 0.01 * np.random.rand(4096)
plot_training_loss_levels = []
plot_evaluation_loss_levels = []
plot_training_loss_patches = []
plot_evaluation_loss_patches = []
loss_train= []
loss_test = []
full_image_test = []
full_image_train = []
learning_rate_again = []
start = time.time()
mse_train_ = []
mse_test_ = []
mse_mxlvl_train = []
mse_mxlvl_test = []
mse_fllycover_train = []
mse_fllycover_test = []
clte = []
cltr = []
lmte = []
lmtr = []
new = True
print 'training model'
loss_train_levels = []
loss_eval_levels = []
loss_train_patches = []
loss_eval_patches = []
full_image__train = []
print epochs, learning_rate, alphas
#shuffle images, not boxes!
y_train = []
for img_nr in tr_images:
w_temp,le,nr_nodes, y = minibatch_(functions,None,scaler,w_levels, [],[],[],[],[],img_nr,alphas_levels,learning_rate,subsamples,'training')
w_levels -= (learning_rate * w_temp)
t += nr_nodes
y_train.append(y)
learning_rate = learning_rate0 * (1+learning_rate0*gamma*t)**-1
plt.figure()
for name, w, alpha in zip(['levels'],[w_levels],[alphas_levels]):
predictions = {}
over_under = {}
distance = {}
levels_c = {}
levels_error = []
patches_error = []
for img_nr in te_images:
cpls,trew,best,all_patches, best_iep = minibatch_(functions, [],scaler,w, [],[],[],[],[],img_nr,alphas,learning_rate0,subsamples, 'levels_test')
if len(cpls) < 6 or trew[0] < 2:
continue
im = imread('/var/node436/local/tstahl/Images/'+ (format(img_nr, "06d")) +'.jpg')
for p in all_patches:
patches_error.append((p[0][1]-p[1])**2)
for i,(b,c,b_iep) in enumerate(zip(best,cpls,best_iep)):
levels_error.append((c-trew[0])**2)
coord_iep = b_iep[0]
plt.imshow(im)
plt.axis('off')
ax = plt.gca()
#ax.add_patch(Rectangle((int(coord[0]), int(coord[1])), int(coord[2] - coord[0]), int(coord[3] - coord[1]), edgecolor='black', facecolor='none'))
ax.add_patch(Rectangle((int(coord_iep[0]), int(coord_iep[1])), int(coord_iep[2] - coord_iep[0]), int(coord_iep[3] - coord_iep[1]), edgecolor='red', facecolor='none'))
ax.set_title('IEP best: %s\n IEP Level: %s'%(b_iep[1],c))
plt.savefig('/home/tstahl/best/best_preds_%s_%s.png'%(img_nr,i))
plt.clf()
print name, 'levels error: ',np.array(levels_error).sum()/len(levels_error), 'patches error: ',np.array(patches_error).sum()/len(patches_error),
def bool_rect_intersect(A, B):
return not (B[0]>A[2] or B[2]<A[0] or B[3]<A[1] or B[1]>A[3]), 1/((A[2]- A[0] + 1)*(A[3]-A[1] + 1))
#return !(r2.left > r1.right || r2.right < r1.left || r2.top > r1.bottom ||r2.bottom < r1.top);
if __name__ == "__main__":
# cProfile.run('main()')
main()
|
[
"tstahl@node429.cm.cluster"
] |
tstahl@node429.cm.cluster
|
5bdb451f4eafff11c3931838f36a071598586ff1
|
df23ee09fffae3ea6a32925f80996f474aefabb9
|
/src/myclips/TemplatesManager.py
|
4b84efa7e9d07d49ae0de0d6c54d21e6f5505cd1
|
[] |
no_license
|
stefano-bragaglia/myclips
|
9e5e985d4b67250723012da4b2ed720e2bfeac72
|
bb7b8dc6c1446235777c0e4ebf23e641d99ebc03
|
refs/heads/master
| 2022-08-01T10:09:45.019686
| 2020-05-27T19:59:47
| 2020-05-27T19:59:47
| 267,410,326
| 0
| 0
| null | 2020-05-27T19:38:28
| 2020-05-27T19:38:28
| null |
UTF-8
|
Python
| false
| false
| 12,149
|
py
|
'''
Created on 17/lug/2012
@author: Francesco Capozzo
'''
from myclips.Observable import Observable
from myclips.RestrictedManager import RestrictedManager, RestrictedDefinition
from myclips.facts.Fact import FactInvalidSlotName
class TemplatesManager(RestrictedManager, Observable):
'''
Stores the list of allowed templates definitions for the scope
'''
instance = None
EVENT_NEW_DEFINITION = "EVENT_TemplatesManager_NewDefinition"
"""Event sign used when new definition is added, observer will
be notified with this sign!"""
def __init__(self, scope):
'''
Create a new TemplatesManager for the scope
@param scope: the scope owner of this manager
@type scope: L{Scope}
'''
Observable.__init__(self, [
TemplatesManager.EVENT_NEW_DEFINITION
])
RestrictedManager.__init__(self, scope)
def addDefinition(self, definition):
'''
Add a new definition and notify observers about this
@param definition: a new function definition
@type definition: L{TemplateDefinition}
'''
RestrictedManager.addDefinition(self, definition)
# after i added the definition, i need to fire the event
self.fire(self.__class__.EVENT_NEW_DEFINITION, definition)
class TemplateDefinition(RestrictedDefinition):
'''
Describes a template definition
'''
def __init__(self, moduleName, defName, linkedType, slots=None):
'''
Create a new definition from params
@param moduleName: the owner module's name
@type moduleName: string
@param defName: the template name
@type defName: string
@param linkedType: the DefTemplateConstruct linked to this!
@type linkedType: L{DefTemplateConstruct}
@param slots: a list of slot definitions
@type slots: list of L{SlotDefinition}
'''
RestrictedDefinition.__init__(self, moduleName, defName, "deftemplate", linkedType)
self._slots = {} if slots is None else slots
@property
def slots(self):
'''
Get the list of slot definitions
'''
return self._slots
def getSlot(self, slotName):
'''
Get a slot def by name
@param slotName: the name of slot
@type slotName: string
'''
return self._slots[slotName]
def isValidFact(self, fact):
'''
Check if a Template-Fact is valid for this template,
using templates definitions. Valid =
- fact template name is the same for this def
- fact has all slot values for required slots
- fact has not slot not definited in this def
- fact slots type is ok for this definition
- values in fact slots are ok for this def
@param fact: the fact to check
@type fact: L{Fact}
@rtype: boolean
'''
from myclips.facts.TemplateFact import TemplateFact
assert isinstance(fact, TemplateFact)
# fast check first
if not isinstance(fact, TemplateFact) \
or fact.templateName != self.name \
or fact.moduleName != self.moduleName:
return False
# go deeper in slot configurations
for slotName, slotDef in self.slots.items():
try:
# if slot is a single-field and value is a multi-field is an error for sure
if slotDef.getSlotType() == SlotDefinition.TYPE_SINGLE \
and isinstance(fact[slotName], list):
return "DefTemplate {0} slot definition {1} requires a single value. Multifield value found: {2}".format(self.name,
slotName,
fact[slotName])
# if slot is a multi-field and value is single, i can cast it to a multi-field
# to avoid errors
elif slotDef.getSlotType() == SlotDefinition.TYPE_MULTI \
and not isinstance(fact[slotName], list):
fact[slotName] = [fact[slotName]]
# now check slot attributes
for sAttr in slotDef.getSlotAttributes():
if isinstance(sAttr, Attribute_TypeConstraint):
# check vs types
valuesToCheck = []
if slotDef.getSlotType() == SlotDefinition.TYPE_SINGLE:
valuesToCheck.append(fact[slotName])
else:
valuesToCheck = fact[slotName]
for singleValue in valuesToCheck:
if not isinstance(singleValue, sAttr.getAllowedTypes()):
return "A {2} value found doesn't match the allowed types {3} for slot {0} of template {4}::{1}".format(
slotName,
self.name,
singleValue.__class__.__name__,
tuple([t.__name__ for t in sAttr.getAllowedTypes()]),
self.moduleName
)
except FactInvalidSlotName:
# the slotName is not set in the fact
# check if a default attr is available for the slot
# if the default is ?NONE raise error (nil value is not admitted)
# else use the default value or None
if slotDef.hasSlotAttribute(Attribute_DefaultValue.attributeType):
defValue = slotDef.getSlotAttribute(Attribute_DefaultValue.attributeType).getDefaultValue()
#print defValue
if defValue is None:
return "Slot %s requires a value because of its (default ?NONE) attribute"%slotName
else:
fact[slotName] = defValue
else:
import myclips.parser.Types as types
# is not default attribute is used, default ?DERIVE is default (and it means nil for singleslot, [] for multislot)
if slotDef.getSlotType() == SlotDefinition.TYPE_MULTI:
fact[slotName] = []
else:
fact[slotName] = types.SPECIAL_VALUES['?DERIVE']
# check if some slot in the fact has no definition
for slotInFact in fact.slots():
if not self._slots.has_key(slotInFact):
return "Invalid slot %s not defined in corresponding deftemplate %s"%(slotInFact, self.name)
return True
class SlotDefinition(object):
'''
Describes a slot definition
'''
TYPE_SINGLE = "single-slot"
TYPE_MULTI = "multi-slot"
def __init__(self, slotName, slotType, slotAttributes=None):
'''
Create the definition from params
@param slotName: the slot key name
@type slotName: string
@param slotType: the slot type (single or multi?)
@type slotType: "single-slot"|"multi-slot"
@param slotAttributes: a list of attributes for the slots
@type slotAttributes: list of L{Attribute}
'''
self._slotName = slotName
'''store the name'''
self._slotType = slotType
'''store the type'''
self._slotAttributes = dict([(attribute.attributeType, attribute) for attribute
in (slotAttributes
if isinstance(slotAttributes, list)
else [])]) if not isinstance(slotAttributes, dict) else slotAttributes
'''store the attributes using a dict'''
def getSlotName(self):
'''
get the slot name
'''
return self._slotName
def getSlotType(self):
'''
get the type
'''
return self._slotType
def getSlotAttributes(self):
'''
Get all attributes
'''
return self._slotAttributes.values()
def addSlotAttribute(self, attribute):
'''
Add a new attribute
@param attribute: an attribute
@type attribute: L{Attribute}
'''
self._slotAttributes[attribute.attributeType] = attribute
def getSlotAttribute(self, attrName):
'''
Get an attribute by attribute type name
@param attrName: the type
@type attrName: string
'''
return self._slotAttributes[attrName]
def hasSlotAttribute(self, attrName):
'''
Check if an attribute is already defined
@param attrName: the name
@type attrName: string
'''
return self._slotAttributes.has_key(attrName)
@staticmethod
def fromParserSlotDefinition(psl):
'''
Helper method: create a slot-definition
from a parsed types.SlotDefinition, setting attributes
and other magic things
@param psl: a parsed types.SlotDefinition
@type psl: L{myclips.parser.types.SlotDefinition}
@rtype: L{SlotDefinition}
'''
import myclips.parser.Types as types
sType = SlotDefinition.TYPE_SINGLE
if isinstance(psl, types.MultiSlotDefinition):
sType = SlotDefinition.TYPE_MULTI
# add Default = Nil for every slot
# if a definition of default value is
# available, it will overwrite this default
attrs = {
Attribute_DefaultValue.attributeType : Attribute_DefaultValue(types.SPECIAL_VALUES['?DERIVE'] if sType == SlotDefinition.TYPE_SINGLE else [])
}
for sattr in psl.attributes:
if isinstance(sattr, types.DefaultAttribute):
attrs[Attribute_DefaultValue.attributeType] = Attribute_DefaultValue(sattr.defaultValue)
elif isinstance(sattr, types.TypeAttribute):
attrs[Attribute_TypeConstraint.attributeType] = Attribute_TypeConstraint(sattr.allowedTypes)
# else unknown slot attribute type
# (like carinality, range or allowed-constant that are unsupported)
# so i just ignore it
return SlotDefinition(psl.slotName, sType, attrs)
class Attribute(object):
'''
Base class for attributes
'''
attributeType = ""
'''attribute type key'''
pass
class Attribute_DefaultValue(Attribute):
'''
Descrive a default attribute for a slot
'''
attributeType = "default"
def __init__(self, defaultValue):
'''
Setup the default type for a slot
@param defaultValue: the default type
@type defaultValue: L{BaseParsedType}
'''
self.defaultValue = defaultValue
def getDefaultValue(self):
'''
Get the default type!
'''
return self.defaultValue
class Attribute_TypeConstraint(Attribute):
'''
Descrive a type attribute for a slot
'''
attributeType = "type"
def __init__(self, allowedTypes):
'''
@param allowedTypes: a tuple of valid class types
'''
self.allowedTypes = allowedTypes
def getAllowedTypes(self):
'''
Get valid types
'''
return self.allowedTypes
|
[
"ximarx@gmail.com"
] |
ximarx@gmail.com
|
1114c90bad42e3a38380605f91f476588a622ef2
|
4e792017bbcc286c9b9002a50406f64e5edebe1a
|
/VAE.py
|
091ff98130ea5097f83debf1ca1b2fbcf715c657
|
[] |
no_license
|
thefirstthing/testnew
|
a34daf44b0c0739f05ee65f0a17975445b1ca4e7
|
85db8816359834147d066645f7787c0729fccfa1
|
refs/heads/master
| 2022-05-30T07:02:23.854190
| 2020-05-05T10:15:19
| 2020-05-05T10:15:19
| 260,722,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,637
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 15:34:08 2018
@author: zy
"""
'''
变分自编码
'''
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from scipy.stats import norm
#https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10
# train-images-idx3-ubyte.gz: training set images (9912422 bytes)
# train-labels-idx1-ubyte.gz: training set labels (28881 bytes)
# t10k-images-idx3-ubyte.gz: test set images (1648877 bytes)
# t10k-labels-idx1-ubyte.gz: test set labels (4542 bytes)
mnist = input_data.read_data_sets('MNIST-data', one_hot=True)
print(type(mnist)) # <class 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets'>
print('Training data shape:', mnist.train.images.shape) # Training data shape: (55000, 784)
print('Test data shape:', mnist.test.images.shape) # Test data shape: (10000, 784)
print('Validation data shape:', mnist.validation.images.shape) # Validation data shape: (5000, 784)
print('Training label shape:', mnist.train.labels.shape) # Training label shape: (55000, 10)
train_X = mnist.train.images
train_Y = mnist.train.labels
test_X = mnist.test.images
test_Y = mnist.test.labels
'''
定义网络参数
'''
n_input = 784
n_hidden_1 = 256
n_hidden_2 = 2
learning_rate = 0.001
training_epochs = 20 # 迭代轮数
batch_size = 128 # 小批量数量大小
display_epoch = 3
show_num = 10
x = tf.placeholder(dtype=tf.float32, shape=[None, n_input])
# 后面通过它输入分布数据,用来生成模拟样本数据
zinput = tf.placeholder(dtype=tf.float32, shape=[None, n_hidden_2])
'''
定义学习参数
'''
weights = {
'w1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.001)),
'mean_w1': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.001)),
'log_sigma_w1': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.001)),
'w2': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1], stddev=0.001)),
'w3': tf.Variable(tf.truncated_normal([n_hidden_1, n_input], stddev=0.001))
}
biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1])),
'mean_b1': tf.Variable(tf.zeros([n_hidden_2])),
'log_sigma_b1': tf.Variable(tf.zeros([n_hidden_2])),
'b2': tf.Variable(tf.zeros([n_hidden_1])),
'b3': tf.Variable(tf.zeros([n_input]))
}
'''
定义网络结构
'''
# 第一个全连接层是由784个维度的输入样->256个维度的输出
h1 = tf.nn.relu(tf.add(tf.matmul(x, weights['w1']), biases['b1']))
# 第二个全连接层并列了两个输出网络
z_mean = tf.add(tf.matmul(h1, weights['mean_w1']), biases['mean_b1'])
z_log_sigma_sq = tf.add(tf.matmul(h1, weights['log_sigma_w1']), biases['log_sigma_b1'])
# 然后将两个输出通过一个公式的计算,输入到以一个2节点为开始的解码部分 高斯分布样本
eps = tf.random_normal(tf.stack([tf.shape(h1)[0], n_hidden_2]), 0, 1, dtype=tf.float32)
z = tf.add(z_mean, tf.multiply(tf.sqrt(tf.exp(z_log_sigma_sq)), eps))
# 解码器 由2个维度的输入->256个维度的输出
h2 = tf.nn.relu(tf.matmul(z, weights['w2']) + biases['b2'])
# 解码器 由256个维度的输入->784个维度的输出 即还原成原始输入数据
reconstruction = tf.matmul(h2, weights['w3']) + biases['b3']
# 这两个节点不属于训练中的结构,是为了生成指定数据时用的
h2out = tf.nn.relu(tf.matmul(zinput, weights['w2']) + biases['b2'])
reconstructionout = tf.matmul(h2out, weights['w3']) + biases['b3']
'''
构建模型的反向传播
'''
# 计算重建loss
# 计算原始数据和重构数据之间的损失,这里除了使用平方差代价函数,也可以使用交叉熵代价函数
reconstr_loss = 0.5 * tf.reduce_sum((reconstruction - x) ** 2)
print(reconstr_loss.shape) # (,) 标量
# 使用KL离散度的公式
latent_loss = -0.5 * tf.reduce_sum(1 + z_log_sigma_sq - tf.square(z_mean) - tf.exp(z_log_sigma_sq), 1)
print(latent_loss.shape) # (128,)
cost = tf.reduce_mean(reconstr_loss + latent_loss)
# 定义优化器
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
num_batch = int(np.ceil(mnist.train.num_examples / batch_size))
'''
开始训练
'''
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('开始训练')
for epoch in range(training_epochs):
total_cost = 0.0
for i in range(num_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, loss = sess.run([optimizer, cost], feed_dict={x: batch_x})
total_cost += loss
# 打印信息
if epoch % display_epoch == 0:
print('Epoch {}/{} average cost {:.9f}'.format(epoch + 1, training_epochs, total_cost / num_batch))
print('训练完成')
# 测试
print('Result:', cost.eval({x: mnist.test.images}))
# 数据可视化
reconstruction = sess.run(reconstruction, feed_dict={x: mnist.test.images[:show_num]})
plt.figure(figsize=(1.0 * show_num, 1 * 2))
for i in range(show_num):
# 原始图像
plt.subplot(2, show_num, i + 1)
plt.imshow(np.reshape(mnist.test.images[i], (28, 28)), cmap='gray')
plt.axis('off')
# 变分自编码器重构图像
plt.subplot(2, show_num, i + show_num + 1)
plt.imshow(np.reshape(reconstruction[i], (28, 28)), cmap='gray')
plt.axis('off')
plt.show()
# 绘制均值和方差代表的二维数据
plt.figure(figsize=(5, 4))
# 将onehot转为一维编码
labels = [np.argmax(y) for y in mnist.test.labels]
mean, log_sigma = sess.run([z_mean, z_log_sigma_sq], feed_dict={x: mnist.test.images})
plt.scatter(mean[:, 0], mean[:, 1], c=labels)
plt.colorbar()
plt.show()
'''
plt.figure(figsize=(5,4))
plt.scatter(log_sigma[:,0],log_sigma[:,1],c=labels)
plt.colorbar()
plt.show()
'''
'''
高斯分布取样,生成模拟数据
'''
n = 15 # 15 x 15的figure
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = sess.run(reconstructionout, feed_dict={zinput: z_sample})
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='gray')
plt.show()
|
[
"noreply@github.com"
] |
thefirstthing.noreply@github.com
|
851f82dcfa058916c1efeb84f69d6a2d9bbed524
|
4f81302e314a8ec89e2d142178d21a766bdb2bd1
|
/snake.py
|
aea8d223986e9e59416e2ce0d657ea7ac73b222e
|
[] |
no_license
|
AlirezaKhadem/snake_game
|
200f7cf73066fbfd7866c32b381148411b9d565c
|
b51bc047273000f926e86b4179fa417db3c92707
|
refs/heads/master
| 2022-12-15T12:01:32.850070
| 2020-08-26T06:54:13
| 2020-08-26T06:54:13
| 290,420,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,179
|
py
|
import consts
class Snake:
dx = {'UP': 0, 'DOWN': 0, 'LEFT': -1, 'RIGHT': 1}
dy = {'UP': -1, 'DOWN': 1, 'LEFT': 0, 'RIGHT': 0}
def __init__(self, keys, game, pos, color, direction):
self.keys = keys
self.cells = [pos]
self.game = game
self.game.add_snake(self)
self.color = color
self.direction = direction
self.pre_direction = ''
game.get_cell(pos).set_color(color)
def get_head(self):
return self.cells[-1]
def val(self, x):
if x < 0:
x += self.game.size
if x >= self.game.size:
x -= self.game.size
return x
def next_move(self):
new_direction = self.direction
if new_direction == 'DOWN':
self.move_down()
elif new_direction == 'UP':
self.move_up()
elif new_direction == 'LEFT':
self.move_left()
elif new_direction == 'RIGHT':
self.move_right()
def move_right(self):
head = self.get_head()
new_cell = (head[0] + 1, head[1])
if new_cell[0] >= consts.table_size:
new_cell = (0, head[1])
self.check_kill(new_cell)
if not self.check_fruit(new_cell):
self.game.get_cell(self.cells[0]).set_color(consts.back_color)
self.cells.remove(self.cells[0])
self.cells.append(new_cell)
self.game.get_cell(new_cell).set_color(self.color)
def move_left(self):
head = self.get_head()
new_cell = (head[0] - 1, head[1])
if new_cell[0] == -1:
new_cell = (consts.table_size - 1, head[1])
self.check_kill(new_cell)
if not self.check_fruit(new_cell):
self.game.get_cell(self.cells[0]).set_color(consts.back_color)
self.cells.remove(self.cells[0])
self.cells.append(new_cell)
self.game.get_cell(new_cell).set_color(self.color)
def move_up(self):
head = self.get_head()
new_cell = (head[0], head[1] - 1)
if new_cell[1] == -1:
new_cell = (head[0], consts.table_size - 1)
self.check_kill(new_cell)
if not self.check_fruit(new_cell):
self.game.get_cell(self.cells[0]).set_color(consts.back_color)
self.cells.remove(self.cells[0])
self.cells.append(new_cell)
self.game.get_cell(new_cell).set_color(self.color)
def move_down(self):
head = self.get_head()
new_cell = (head[0], head[1] + 1)
if new_cell[1] >= consts.table_size:
new_cell = (head[0], 1)
self.check_kill(new_cell)
if not self.check_fruit(new_cell):
self.game.get_cell(self.cells[0]).set_color(consts.back_color)
self.cells.remove(self.cells[0])
self.cells.append(new_cell)
self.game.get_cell(new_cell).set_color(self.color)
def check_kill(self, new_cell):
if [new_cell[0], new_cell[1]] in consts.block_cells:
self.game.kill(self)
elif new_cell in self.cells:
self.game.kill(self)
for snake in self.game.snakes:
if new_cell in snake.cells:
self.game.kill(self)
def check_fruit(self, new_cell):
if self.game.get_cell(new_cell).color == consts.fruit_color:
return True
else:
return False
def handle(self, keys):
for key in keys:
if key in self.keys:
new_direction = self.keys[key]
pre_direction = self.pre_direction
if new_direction == 'UP' and pre_direction != 'DOWN':
self.change_direction(key)
elif new_direction == 'DOWN' and pre_direction != 'UP':
self.change_direction(key)
elif new_direction == 'LEFT' and pre_direction != 'RIGHT':
self.change_direction(key)
elif new_direction == 'RIGHT' and pre_direction != "LEFT":
self.change_direction(key)
def change_direction(self, key):
self.pre_direction = self.keys[key]
self.direction = self.keys[key]
|
[
"sar.khadem2000@gmail.com"
] |
sar.khadem2000@gmail.com
|
59bad89b567bce131e1cad571a2d11e9acdf72e4
|
1f34a9acb248bbe585cc3c976f58e2c7733990c8
|
/to_inference_engine.py
|
f3472ae6e7469e6006927b99da9a346a540d019d
|
[] |
no_license
|
minikai/FireHose
|
822edab71448819f5a07c8cf3bbd85e657a64ec3
|
ff0f85f2433ac62b1ffba438e1e20d3c22d7712d
|
refs/heads/master
| 2020-03-11T02:20:00.853226
| 2018-04-20T02:36:53
| 2018-04-20T02:36:53
| 129,716,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
import requests
import json
import pandas as pd
import configparser
import csv
import datetime
import numpy as np
import time
config = configparser.ConfigParser()
config.read('ex_config.ini')
duration_API = config['push_data']['duration_API']
url = config['api']['url']
df1 = pd.read_csv('testing_data.txt', header=None)
a = df1.shape[0]
b = df1.shape[1]
def instructions_data_structure(timeslice_str):
arr = timeslice_str.split(';')
if len(arr) != 14:
return timeslice_str
else:
instructions = {}
instructions['STATUS_EQUIPMENT']=int(arr[0])
instructions['STATUS_FAN']=int(arr[1])
instructions['VOLTAGE_INPUT']=float(arr[2])
instructions['CURRENT_INPUT']=float(arr[3])
instructions['PRESSURE_OUTPUT']=float(arr[4])
instructions['TEMPERATURE_OUTPUT']=int(arr[5])
instructions['KW_FAN']=float(arr[6])
instructions['KW_EQUIPMENT']=float(arr[7])
instructions['FREQ_FAN']=float(arr[8])
instructions['KW_SUMMARY']=float(arr[9])
instructions['PRESSURE_EQUIPMENT']=float(arr[10])
instructions['TEMPERATURE_EQUIPMENT']=int(arr[11])
instructions['TEMPERATURE_ENVIRONMENT']=float(arr[12])
return instructions
c = 0
d = []
for i in range(a):
for ii in range(0, b - 2):
if c != 10 :
if type(df1[df1.columns[ii]][i]) == float or type(df1[df1.columns[ii]][i]) == np.float64:
pass
else:
fields = {}
fields = instructions_data_structure(df1[df1.columns[ii]][i])
d.append(fields)
c = c + 1
else :
e = {}
e['data']=d
r = requests.post(url, json=e)
time.sleep(int(duration_API))
c = 0
#print e
print (e)
d = []
#print "end"
print ("end")
|
[
"noreply@github.com"
] |
minikai.noreply@github.com
|
91454149cf929925c7fb858725a3e439460d1deb
|
d3444cc316b1ac38af84fff0478602c017f5223b
|
/src/models/audio_model.py
|
b4ec6b44b42d340a5b6e297ced353595830e20f8
|
[] |
no_license
|
andricmitrovic/multimodal-emotion-classification
|
57b100ef71eca011bb39cb395950f9602ce654a4
|
098970a568b2544519d41525beed969b93e5cb74
|
refs/heads/main
| 2023-07-04T09:03:15.401933
| 2021-08-11T07:33:15
| 2021-08-11T07:33:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,866
|
py
|
import torch
from torch import nn
import torch.nn.functional as F
class BaselineAudioModel_1(nn.Module):
def __init__(self, input_size):
super(BaselineAudioModel_1, self).__init__()
self.input_size = input_size
self.fc1 = nn.Linear(6280, 2000)
self.bc1 = nn.BatchNorm1d(2000)
self.fc2 = nn.Linear(2000, 200)
self.bc2 = nn.BatchNorm1d(200)
self.fc3 = nn.Linear(200, 100)
self.bc3= nn.BatchNorm1d(100)
self.fc4= nn.Linear(100, 7)
def forward(self, x):
# flatten
h= torch.flatten(x, start_dim=1)
h = self.fc1(h)
h = self.bc1(h)
h = torch.relu(h)
h = F.dropout(h, p=0.5, training=self.training)
h = self.fc2(h)
h = self.bc2(h)
h = torch.relu(h)
h = F.dropout(h, p=0.5, training=self.training)
h=self.fc3(h)
h= self.bc3(h)
h=torch.relu(h)
h=F.dropout(h, p=0.5, training=self.training)
out = self.fc4(h)
# We dont apply softmax to output since nn.CrossEntropyLoss
# combines LogSoftmax and NLLLoss
return out
class BaselineAudioModel_2(nn.Module):
def __init__(self):
super(BaselineAudioModel_2, self).__init__()
def conv(ni, nf, ks=3, act=True):
res=nn.Conv2d(ni,nf, stride=2, kernel_size=ks, padding=ks//2)
if act: res = nn.Sequential(res, nn.ReLU())
return res
self.conv1 = conv(1,4)
self.conv2= conv(4,8)
self.conv3= conv(8,16)
self.conv4= conv(16,32)
self.conv5= conv(32,64)
self.conv6=conv(64, 128, act=False)
self.flatten=nn.Flatten()
self.lin1= nn.Linear(1024, 512)
self.lin2= nn.Linear(512, 64)
self.lin3= nn.Linear(64, 3)
def forward(self, x):
h= self.conv1(x)
h= self.conv2(h)
h= self.conv3(h)
h= self.conv4(h)
h= self.conv5(h)
h= self.conv6(h)
h = self.flatten(h)
h= self.lin1(h)
h= self.lin2(h)
h= self.lin3(h)
return h
class BaselineAudioModel_3(nn.Module):
def __init__(self, n_input=1, n_output=3, stride=16, n_channel=32):
super().__init__()
self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride)
self.bn1 = nn.BatchNorm1d(n_channel)
self.pool1 = nn.MaxPool1d(4)
self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=3)
self.bn2 = nn.BatchNorm1d(n_channel)
self.pool2 = nn.MaxPool1d(4)
self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3)
self.bn3 = nn.BatchNorm1d(2 * n_channel)
self.pool3 = nn.MaxPool1d(4)
self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3)
self.bn4 = nn.BatchNorm1d(2 * n_channel)
self.pool4 = nn.MaxPool1d(4)
self.fc1 = nn.Linear(2 * n_channel, n_output)
self.flatten=nn.Flatten()
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.pool3(x)
x = self.conv4(x)
x = F.relu(self.bn4(x))
x = self.pool4(x)
x = F.avg_pool1d(x, x.shape[-1])
x = x.permute(0, 2, 1)
x= self.flatten(x)
x = self.fc1(x)
# We dont apply softmax to output since nn.CrossEntropyLoss
# combines LogSoftmax and NLLLoss
return x
if __name__=='__main__':
print('UNIT TEST AudioBaselineModel_3:')
x=torch.zeros(32, 1, 57515)
model = BaselineAudioModel_3()
out=model(x)
print('\t out shape: ', out.size())
print('AudioBaselineModel_3 test PASSED')
|
[
"bkoch4142@users.noreply.github.com"
] |
bkoch4142@users.noreply.github.com
|
0a926fa0ce8a2eecd37752dd43344ad283fc7811
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/MSSM_HiggsToMuMu/fragment_mhmodp_MA225_tb10_bbA.py
|
757549654d6e789d0e2ed800678ef40325916912
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 16,607
|
py
|
COM_ENERGY = 13000.0 # GeV
CROSS_SECTION = 1 # pb
PROCESS = 'HiggsBSM:gg2A3bbbar = on'
SLHA_TABLE = """BLOCK SPINFO
1 FeynHiggs
2 2.12.0
2 built on ott 13, 2016
BLOCK MODSEL
1 0 # Model
2 1 # GridPts
3 0 # Content
4 0 # RPV
5 0 # CPV
6 0 # FV
BLOCK SMINPUTS
1 1.28952828E+02 # invAlfaMZ
2 1.16637000E-05 # GF
3 1.19000000E-01 # AlfasMZ
4 9.11876000E+01 # MZ
5 4.16000000E+00 # Mb
6 1.73200000E+02 # Mt
7 1.77703000E+00 # Mtau
11 5.10998902E-04 # Me
13 1.05658357E-01 # Mmu
21 6.00000000E-03 # Md
22 3.00000000E-03 # Mu
23 9.50000000E-02 # Ms
24 1.28600000E+00 # Mc
BLOCK MINPAR
3 1.00000000E+01 # TB
BLOCK EXTPAR
0 0.00000000E+00 # Q
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
11 1.52000000E+03 # At
12 1.52000000E+03 # Ab
13 1.52000000E+03 # Atau
23 2.00000000E+02 # MUE
25 1.00000000E+01 # TB
26 2.25000000E+02 # MA0
27 2.38932707E+02 # MHp
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK MASS
1000012 4.95907995E+02 # MSf(1,1,1)
1000011 5.02255496E+02 # MSf(1,2,1)
2000011 5.01811396E+02 # MSf(2,2,1)
1000002 1.49904454E+03 # MSf(1,3,1)
2000002 1.49959668E+03 # MSf(2,3,1)
1000001 1.50115636E+03 # MSf(1,4,1)
2000001 1.50020160E+03 # MSf(2,4,1)
1000014 4.95907995E+02 # MSf(1,1,2)
1000013 5.02339352E+02 # MSf(1,2,2)
2000013 5.01727474E+02 # MSf(2,2,2)
1000004 1.49904496E+03 # MSf(1,3,2)
2000004 1.49959737E+03 # MSf(2,3,2)
1000003 1.50116044E+03 # MSf(1,4,2)
2000003 1.50019752E+03 # MSf(2,4,2)
1000016 9.97960289E+02 # MSf(1,1,3)
1000015 1.00057941E+03 # MSf(1,2,3)
2000015 1.00146014E+03 # MSf(2,2,3)
1000006 8.76446970E+02 # MSf(1,3,3)
2000006 1.13479599E+03 # MSf(2,3,3)
1000005 9.99823082E+02 # MSf(1,4,3)
2000005 1.00222830E+03 # MSf(2,4,3)
25 1.23126379E+02 # Mh0
35 2.26430640E+02 # MHH
36 2.25000000E+02 # MA0
37 2.39079993E+02 # MHp
1000022 8.62939893E+01 # MNeu(1)
1000023 1.49424226E+02 # MNeu(2)
1000025 -2.09064388E+02 # MNeu(3)
1000035 2.68817824E+02 # MNeu(4)
1000024 1.44153518E+02 # MCha(1)
1000037 2.68602755E+02 # MCha(2)
1000021 1.50000000E+03 # MGl
BLOCK DMASS
0 1.73200000E+02 # Q
25 7.60054275E-01 # Delta Mh0
35 2.79385398E-02 # Delta MHH
36 0.00000000E+00 # Delta MA0
37 8.66637758E-02 # Delta MHp
BLOCK NMIX
1 1 9.21138506E-01 # ZNeu(1,1)
1 2 -1.38923513E-01 # ZNeu(1,2)
1 3 3.23973955E-01 # ZNeu(1,3)
1 4 -1.65060554E-01 # ZNeu(1,4)
2 1 -3.48437302E-01 # ZNeu(2,1)
2 2 -6.94538675E-01 # ZNeu(2,2)
2 3 4.94343250E-01 # ZNeu(2,3)
2 4 -3.89656549E-01 # ZNeu(2,4)
3 1 9.12601053E-02 # ZNeu(3,1)
3 2 -1.26949251E-01 # ZNeu(3,2)
3 3 -6.79246732E-01 # ZNeu(3,3)
3 4 -7.17063008E-01 # ZNeu(3,4)
4 1 -1.47536069E-01 # ZNeu(4,1)
4 2 6.94406346E-01 # ZNeu(4,2)
4 3 4.35074137E-01 # ZNeu(4,3)
4 4 -5.53844228E-01 # ZNeu(4,4)
BLOCK UMIX
1 1 -6.13722981E-01 # UCha(1,1)
1 2 7.89521439E-01 # UCha(1,2)
2 1 7.89521439E-01 # UCha(2,1)
2 2 6.13722981E-01 # UCha(2,2)
BLOCK VMIX
1 1 -7.89521439E-01 # VCha(1,1)
1 2 6.13722981E-01 # VCha(1,2)
2 1 6.13722981E-01 # VCha(2,1)
2 2 7.89521439E-01 # VCha(2,2)
BLOCK STAUMIX
1 1 6.11193766E-01 # USf(1,1)
1 2 7.91481005E-01 # USf(1,2)
2 1 7.91481005E-01 # USf(2,1)
2 2 -6.11193766E-01 # USf(2,2)
BLOCK STOPMIX
1 1 7.08232465E-01 # USf(1,1)
1 2 -7.05979302E-01 # USf(1,2)
2 1 7.05979302E-01 # USf(2,1)
2 2 7.08232465E-01 # USf(2,2)
BLOCK SBOTMIX
1 1 4.49962257E-01 # USf(1,1)
1 2 8.93047573E-01 # USf(1,2)
2 1 8.93047573E-01 # USf(2,1)
2 2 -4.49962257E-01 # USf(2,2)
BLOCK ALPHA
-1.68790043E-01 # Alpha
BLOCK DALPHA
2.08632599E-03 # Delta Alpha
BLOCK HMIX Q= -0.99900000E+03
1 2.00000000E+02 # MUE
2 1.00000000E+01 # TB
BLOCK MSOFT Q= 0.00000000E+00
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK AE Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.52000000E+03 # Af(3,3)
BLOCK AU Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.52000000E+03 # Af(3,3)
BLOCK AD Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.52000000E+03 # Af(3,3)
BLOCK YE Q= 0.00000000E+00
1 1 2.94965567E-05 # Yf(1,1)
2 2 6.09895188E-03 # Yf(2,2)
3 3 1.02576084E-01 # Yf(3,3)
BLOCK YU Q= 0.00000000E+00
1 1 1.73169981E-05 # Yf(1,1)
2 2 7.42321984E-03 # Yf(2,2)
3 3 9.99768022E-01 # Yf(3,3)
BLOCK YD Q= 0.00000000E+00
1 1 3.42833578E-04 # Yf(1,1)
2 2 5.42808075E-03 # Yf(2,2)
3 3 2.32696980E-01 # Yf(3,3)
BLOCK VCKMIN
1 2.25300000E-01 # lambda
2 8.08000000E-01 # A
3 1.32000000E-01 # rhobar
4 3.41000000E-01 # etabar
BLOCK MSL2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSL2(1,1)
2 2 2.50000000E+05 # MSL2(2,2)
3 3 1.00000000E+06 # MSL2(3,3)
BLOCK MSE2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSE2(1,1)
2 2 2.50000000E+05 # MSE2(2,2)
3 3 1.00000000E+06 # MSE2(3,3)
BLOCK MSQ2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSQ2(1,1)
2 2 2.25000000E+06 # MSQ2(2,2)
3 3 1.00000000E+06 # MSQ2(3,3)
BLOCK MSU2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSU2(1,1)
2 2 2.25000000E+06 # MSU2(2,2)
3 3 1.00000000E+06 # MSU2(3,3)
BLOCK MSD2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSD2(1,1)
2 2 2.25000000E+06 # MSD2(2,2)
3 3 1.00000000E+06 # MSD2(3,3)
BLOCK TE Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 1.55915647E+02 # Tf(3,3)
BLOCK TU Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 1.51964739E+03 # Tf(3,3)
BLOCK TD Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 3.53699410E+02 # Tf(3,3)
BLOCK SELMIX
1 1 9.99997373E-01 # UASf(1,1)
1 4 -2.29196501E-03 # UASf(1,4)
2 2 9.28921279E-01 # UASf(2,2)
2 5 -3.70277270E-01 # UASf(2,5)
3 3 6.11193766E-01 # UASf(3,3)
3 6 7.91481005E-01 # UASf(3,6)
4 1 2.29196501E-03 # UASf(4,1)
4 4 9.99997373E-01 # UASf(4,4)
5 2 3.70277270E-01 # UASf(5,2)
5 5 9.28921279E-01 # UASf(5,5)
6 3 7.91481005E-01 # UASf(6,3)
6 6 -6.11193766E-01 # UASf(6,6)
BLOCK USQMIX
1 1 9.99999999E-01 # UASf(1,1)
1 4 3.62387194E-05 # UASf(1,4)
2 2 9.99879422E-01 # UASf(2,2)
2 5 1.55287116E-02 # UASf(2,5)
3 3 7.08232465E-01 # UASf(3,3)
3 6 -7.05979302E-01 # UASf(3,6)
4 1 -3.62387194E-05 # UASf(4,1)
4 4 9.99999999E-01 # UASf(4,4)
5 2 -1.55287116E-02 # UASf(5,2)
5 5 9.99879422E-01 # UASf(5,5)
6 3 7.05979302E-01 # UASf(6,3)
6 6 7.08232465E-01 # UASf(6,6)
BLOCK DSQMIX
1 1 9.99991408E-01 # UASf(1,1)
1 4 -4.14528410E-03 # UASf(1,4)
2 2 9.97871270E-01 # UASf(2,2)
2 5 -6.52144862E-02 # UASf(2,5)
3 3 4.49962257E-01 # UASf(3,3)
3 6 8.93047573E-01 # UASf(3,6)
4 1 4.14528410E-03 # UASf(4,1)
4 4 9.99991408E-01 # UASf(4,4)
5 2 6.52144862E-02 # UASf(5,2)
5 5 9.97871270E-01 # UASf(5,5)
6 3 8.93047573E-01 # UASf(6,3)
6 6 -4.49962257E-01 # UASf(6,6)
BLOCK CVHMIX
1 1 9.99499987E-01 # UH(1,1)
1 2 3.16192499E-02 # UH(1,2)
1 3 0.00000000E+00 # UH(1,3)
2 1 -3.16192499E-02 # UH(2,1)
2 2 9.99499987E-01 # UH(2,2)
2 3 0.00000000E+00 # UH(2,3)
3 1 0.00000000E+00 # UH(3,1)
3 2 0.00000000E+00 # UH(3,2)
3 3 1.00000000E+00 # UH(3,3)
DECAY 25 7.85400221E-03 # Gamma(h0)
1.13633926E-03 2 22 22 # BR(h0 -> photon photon)
6.40744394E-04 2 22 23 # BR(h0 -> photon Z)
1.07127201E-02 2 23 23 # BR(h0 -> Z Z)
9.18890830E-02 2 -24 24 # BR(h0 -> W W)
3.17562219E-02 2 21 21 # BR(h0 -> gluon gluon)
7.08533425E-09 2 -11 11 # BR(h0 -> Electron electron)
3.15166850E-04 2 -13 13 # BR(h0 -> Muon muon)
9.04310726E-02 2 -15 15 # BR(h0 -> Tau tau)
9.95470767E-08 2 -2 2 # BR(h0 -> Up up)
1.37879170E-02 2 -4 4 # BR(h0 -> Charm charm)
1.15199340E-06 2 -1 1 # BR(h0 -> Down down)
2.89304278E-04 2 -3 3 # BR(h0 -> Strange strange)
7.59040172E-01 2 -5 5 # BR(h0 -> Bottom bottom)
DECAY 35 3.99329682E-01 # Gamma(HH)
2.33552869E-06 2 22 22 # BR(HH -> photon photon)
3.12718630E-06 2 22 23 # BR(HH -> photon Z)
8.93174528E-03 2 23 23 # BR(HH -> Z Z)
2.23212105E-02 2 -24 24 # BR(HH -> W W)
8.77304376E-04 2 21 21 # BR(HH -> gluon gluon)
8.80734239E-09 2 -11 11 # BR(HH -> Electron electron)
3.91836344E-04 2 -13 13 # BR(HH -> Muon muon)
1.11883265E-01 2 -15 15 # BR(HH -> Tau tau)
8.87898855E-11 2 -2 2 # BR(HH -> Up up)
1.22868768E-05 2 -4 4 # BR(HH -> Charm charm)
1.26249958E-06 2 -1 1 # BR(HH -> Down down)
3.17056797E-04 2 -3 3 # BR(HH -> Strange strange)
8.12457155E-01 2 -5 5 # BR(HH -> Bottom bottom)
4.28014061E-02 2 1000022 1000022 # BR(HH -> neutralino1 neutralino1)
1.62642918E-11 2 23 36 # BR(HH -> Z A0)
DECAY 36 4.27573770E-01 # Gamma(A0)
-3.34025054E-06 2 22 22 # BR(A0 -> photon photon)
-5.92281895E-06 2 22 23 # BR(A0 -> photon Z)
-2.26831021E-04 2 21 21 # BR(A0 -> gluon gluon)
-8.28631502E-09 2 -11 11 # BR(A0 -> Electron electron)
3.68655611E-04 2 -13 13 # BR(A0 -> Muon muon)
-1.05277096E-01 2 -15 15 # BR(A0 -> Tau tau)
-2.67530715E-11 2 -2 2 # BR(A0 -> Up up)
-3.72356591E-06 2 -4 4 # BR(A0 -> Charm charm)
-1.18906081E-06 2 -1 1 # BR(A0 -> Down down)
-2.98613908E-04 2 -3 3 # BR(A0 -> Strange strange)
-7.65888287E-01 2 -5 5 # BR(A0 -> Bottom bottom)
-1.26784165E-01 2 1000022 1000022 # BR(A0 -> neutralino1 neutralino1)
-1.14216722E-03 2 23 25 # BR(A0 -> Z h0)
DECAY 37 1.99253848E-01 # Gamma(Hp)
1.98585128E-08 2 -11 12 # BR(Hp -> Electron nu_e)
8.49012970E-04 2 -13 14 # BR(Hp -> Muon nu_mu)
2.40131050E-01 2 -15 16 # BR(Hp -> Tau nu_tau)
2.51613315E-06 2 -1 2 # BR(Hp -> Down up)
2.84048071E-05 2 -3 2 # BR(Hp -> Strange up)
1.73446592E-05 2 -5 2 # BR(Hp -> Bottom up)
5.06262051E-07 2 -1 4 # BR(Hp -> Down charm)
6.38356329E-04 2 -3 4 # BR(Hp -> Strange charm)
2.42874401E-03 2 -5 4 # BR(Hp -> Bottom charm)
9.76665116E-06 2 -1 6 # BR(Hp -> Down top)
2.13132998E-04 2 -3 6 # BR(Hp -> Strange top)
5.15096421E-01 2 -5 6 # BR(Hp -> Bottom top)
2.25806898E-01 2 1000022 1000024 # BR(Hp -> neutralino1 chargino1)
1.47722488E-02 2 24 25 # BR(Hp -> W h0)
2.06042305E-06 2 24 35 # BR(Hp -> W HH)
3.51688173E-06 2 24 36 # BR(Hp -> W A0)
DECAY 6 1.37127534E+00 # Gamma(top)
1.00000000E+00 2 5 24 # BR(top -> bottom W)
"""
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Higgs:useBSM = on',
PROCESS,
'SLHA:allowUserOverride = off',
'SLHA:minMassSM = 100.',
'PhaseSpace:mHatMin = 56.0'
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"pietro.vischia@gmail.com"
] |
pietro.vischia@gmail.com
|
86b995a13a8b8c7b9d9178f53df1180e499b924e
|
cfcf2a2002bf6099ed5bbfcfa215f3c83efb14a2
|
/113bN_proper_event_summary.py
|
758cc58fe6bdf8521e41d21e1a1435b883ed7256
|
[] |
no_license
|
griffada/AQUACAT_UKCEH
|
c07dcbf1ac277cd4759929e3cc2fe121cdc68fb5
|
cee49f0fa5a8b3d1fc7dab7f02da4f64648ffc5a
|
refs/heads/master
| 2023-08-16T08:02:51.831710
| 2021-10-22T13:50:48
| 2021-10-22T13:50:48
| 281,631,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,416
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 2 11:17:32 2021
Author: Adam Griffin, UKCEH
Project: AQUACAT
Script to summarise EC events.
"""
import os
import netCDF4 as nc
import pandas as pd
import numpy as np
import re
import sys
import yaml
import time
import gc
# def season(x):
# return ["DJF","MAM","JJA","SON"][((x // 90) % 4)]
#=rcm = "10"
#period = "198012_201011"
rcm = sys.argv[1]
if sys.argv[2] == "present":
period = "198012_201011"
else:
period = "205012_208011"
print(f"Running RCM {rcm} for {period}.")
RCMS = ["01","04","05","06","07","08","09","10","11","12","13","15"]
periods = ["198012_201011","205012_208011"]
# CHANGE THIS TO THE TOP LEVEL OF THE FOLDER THE CSVs ARE IN
toplevel = r"/prj/aquacat/Data"
# CHANGE THIS TO THE TOP LEVEL OF THE FOLDER THE NETCDFs ARE IN
outlevel = toplevel #r'S:/Data' #
# CHANGE THIS TO WHERE THE hasData files are, they should exist in the toplevel folder.
rn = pd.read_csv(f"{toplevel}/hasData_primary.csv")
rnreg = pd.read_csv(f"{toplevel}/hasData_Regions.csv")
method="OBS"
regional = False
if regional:
subfold='/NW'
fileinfix = 'NW_POT2_pc01'
rn = rn[rnreg.REGION=="NW"]
NH = 1437
else:
subfold=''
fileinfix = 'POT2_pc01'
NH = 19914
if len(sys.argv) > 3:
if sys.argv[3] == "FF":
subfold = '_FF'
ncpath = (f"{outlevel}/RCM{rcm}_{period}{subfold}/event{method}_"
f"POT2_pc01_RCM{rcm}_{period}.nc")
ncfile = nc.Dataset(ncpath, mode='r')
param_path = (f"{outlevel}/RCM{rcm}_{period}{subfold}/paramtableG"
f"_POT2_RCM{rcm}_{period}.csv")
param_table = pd.read_csv(param_path)
thresh_path = f"{outlevel}/RCM{rcm}_{period}{subfold}/threshMat_RCM{rcm}_{period}.csv"
threshvec = pd.read_csv(thresh_path).iloc[:,1]
if regional:
threshvec = threshvec[rnreg.REGION == "NW"]
init_path = (f"{outlevel}/RCM{rcm}_{period}{subfold}/initialSummary_RCM{rcm}_{period}.csv")
init_table = pd.read_csv(init_path)
summ_path = (f"{outlevel}/RCM{rcm}_{period}{subfold}/eventSumm"
f"_OBS_POT2_pc01_RCM{rcm}_{period}.csv")
#summtable = pd.read_csv(summ_path) # Done in R afterwards
summtable_out = pd.DataFrame(columns=["eventNumber", "eventDay", "eventLength",
"area","peakA", "peakA_mid", "peakD", "season",
"nclusters","peakyness"])
eventNo = list(ncfile.variables["eventNo"][:])
NE = np.sum([i > 0 for i in eventNo])
avec_all = ncfile.variables['ape'][:,:]
avec_mid = ncfile.variables['ape_mid'][:,:]
print("Setup complete")
start_time = time.time()
for i in range(NE):
if (i < 10) or (i % 1000) == 0:
print(i)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
ni = eventNo[i]
vvec = 0
avec = min(avec_all[i,:])
amid = min(avec_mid[i,:])
dvec = 0
D = init_table.iloc[ni-1,:] # Done in R afterwards
#seas = init_table.iloc[ni-1, 3]) # Done in R afterwards
summtable_out.loc[i] = [ni, D[0], D[1],
vvec, avec, amid, dvec,
D[3], 0, 0]
print("--- %s seconds ---" % (time.time() - start_time))
print("avec amid done")
del avec_all
del avec_mid
gc.collect()
vvec_all = ncfile.variables['flow'][:,:]
dvec_all = ncfile.variables['dpe'][:,:]
for i in range(NE):
if (i < 10) or (i % 1000) == 0:
print(i)
print(">>> %s seconds >>>" % (time.time() - start_time))
start_time = time.time()
ni = eventNo[i]
summtable_out.iloc[i,3] = sum(vvec_all[i,:] > threshvec)
summtable_out.iloc[i,6] = min(dvec_all[i,:])
print(">>> %s seconds >>>" % (time.time() - start_time))
print("vvec dvec done")
ncfile.close()
yaml_path = f"{outlevel}/RCM{rcm}_{period}{subfold}/settings.yaml"
summpath_out = (f"{outlevel}/RCM{rcm}_{period}{subfold}/eventSumm_"
f"{method}_POT2_pc01_RCM{rcm}_{period}.csv")
summtable_out.to_csv(summpath_out, index=False)
with open(yaml_path) as ym:
list_doc = yaml.safe_load(ym)
list_doc['OBSsumm'] = True
list_doc['propsumm'] = "113bN.py"
with open(yaml_path, 'w') as ym:
yaml.dump(list_doc, ym, sort_keys=False)
print(time.strftime("%Y-%m-%d %H:%M:%S"))
print("Files saved and YAML updated. End.")
|
[
"adagri@ceh.ac.uk"
] |
adagri@ceh.ac.uk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.