blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
080ade01442e6e832e638a11db4aae091773d1e8
|
47ef648d0d497db58573bfa9ab8964d3600293da
|
/libowei/preprocessing.py
|
782d9c542b23155ad4509d74d55e6433ab0d0ab7
|
[] |
no_license
|
serea/DataMiningProject
|
9fab7ae5df8bf87478ef9b3cf9aa59e2c1cbb12e
|
f11721bb0eddf0a6ee87b4b721ca773ecb7b750b
|
refs/heads/master
| 2020-07-04T05:45:07.586199
| 2016-12-02T16:17:58
| 2016-12-02T16:17:58
| 73,867,212
| 0
| 1
| null | 2016-11-29T07:16:48
| 2016-11-16T00:18:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
# coding=utf-8
import pandas as pd
import numpy as np
# 列名
cols = ['肝气郁结证型系数', '热毒蕴结证型系数', '冲任失调证型系数', \
'气血两虚证型系数', '脾胃虚弱证型系数', '肝肾阴虚证型系数']
# 删除有空值的行
def dropNull(data):
return data.dropna()
# 用均值填补空值
def fillNullWithMean(data):
# 0值替换为空
# data = data.replace(0, np.nan)
# 空值用平均值填补
for col in cols:
mean = data[col].describe().loc['mean'];
data[col] = data[col].replace(np.nan, mean)
return data
def deleteOutliers(data):
# 计算统计值
statistics = data.describe()
min = statistics.loc['min']
max = statistics.loc['max']
mean = statistics.loc['mean']
q1 = statistics.loc['25%']
q3 = statistics.loc['75%']
# 异常值边界
high = q3 + 1.5 * (q3 - q1)
low = q1 - 1.5 * (q3 - q1)
# 去掉异常值
index = data[cols[0]] > 0
for i in range(5):
index &= (data[cols[i]] < high[i]) & (data[cols[i]] > low[i])
return data[index]
if __name__ == '__main__':
xls = pd.read_excel("data/hw2data.xls")
# 根据TNM分期把数据分类
datas = []
for i in range(4):
datas.append(xls[xls['TNM分期'] == 'H' + str(i + 1)])
output = pd.DataFrame()
# 处理每个类下的数据
for data in datas:
data = dropNull(data)
# data = fillNullWithMean(data)
# data = deleteOutliers(data)
output = output.append(data)
output.to_excel("data/output.xls")
|
[
"libowei123123@qq.com"
] |
libowei123123@qq.com
|
fa69ff9359ea44d255bc0bb5b7360434514e20c3
|
da73af9dacd2e5161dc5843fe9140d00dfa59685
|
/enaml/widgets/widget_component.py
|
309435d7069c60b7ac5e21874750c050bf37f5b7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bgrant/enaml
|
20d7c1e69a47b7ad926afff132d7f1391642d473
|
0bc0b61142d2f77b042b527b2780c8c8810184dd
|
refs/heads/master
| 2021-01-18T05:57:39.583506
| 2012-12-02T17:52:59
| 2012-12-02T17:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,504
|
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from traits.api import Bool, Str, Tuple, Range, Enum
from enaml.core.declarative import Declarative
#: A predefined trait which defines a size tuple. A size value of
#: (-1, -1) represents a default size.
SizeTuple = Tuple(Range(low=-1, value=-1), Range(low=-1, value=-1))
#: The standard attributes to proxy for a widget component.
_WIDGET_ATTRS = [
'enabled', 'visible', 'bgcolor', 'fgcolor', 'font', 'minimum_size',
'maximum_size', 'show_focus_rect'
]
class WidgetComponent(Declarative):
""" A Declarative subclass which represents the base of all widgets
in Enaml.
"""
#: Whether or not the widget is enabled.
enabled = Bool(True)
#: Whether or not the widget is visible.
visible = Bool(True)
#: A flag indicating whether or not to show the focus rectangle for
#: the given widget. This is not necessarily support by all widgets
#: on all clients. A value of None indicates to use the default as
#: supplied by the client.
show_focus_rect = Enum(None, True, False)
#: The background color of the widget. Supports CSS3 color strings.
bgcolor = Str
#: The foreground color of the widget. Supports CSS3 color strings.
fgcolor = Str
#: The font used for the widget. Supports CSS font formats.
font = Str
#: The minimum size for the widget. The default means that the
#: client should determine an intelligent minimum size.
minimum_size = SizeTuple
#: The maximum size for the widget. The default means that the
#: client should determine and inteliigent maximum size.
maximum_size = SizeTuple
#--------------------------------------------------------------------------
# Initialization
#--------------------------------------------------------------------------
def snapshot(self):
""" Return the initial properties for a widget component.
"""
snap = super(WidgetComponent, self).snapshot()
get = getattr
attrs = dict((attr, get(self, attr)) for attr in _WIDGET_ATTRS)
snap.update(attrs)
return snap
def bind(self):
""" Bind the change handlers for a widget component.
"""
super(WidgetComponent, self).bind()
self.publish_attributes(*_WIDGET_ATTRS)
|
[
"sccolbert@gmail.com"
] |
sccolbert@gmail.com
|
b4ebd31f1184138ba79dc2f76a2542d3a9b01d92
|
e1cf0e9941e72d06870baa63c792f1123f325762
|
/classify/chinese_classify/StenceDet_FlyAI/transformation.py
|
4aff8007fbf9ff619b3460ebb4b956765766e799
|
[] |
no_license
|
yphacker/flyai_nlp
|
1ab79be510d82fb0e9bc7d5d823c3fbaf9bf2ce5
|
78a8cd8680190dacc053993fe4a00d2391a62408
|
refs/heads/master
| 2020-07-16T02:22:03.338890
| 2020-01-02T12:52:14
| 2020-01-02T12:52:14
| 205,699,001
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
# -*- coding: utf-8 -*-
class Transformation:
'''
处理训练数据的类,某些情况下需要对训练的数据再一次的处理。
如无需处理的话,不用实现该方法。
'''
def transformation_data(self, x_train=None, y_train=None, x_test=None, y_test=None):
return x_train, y_train, x_test, y_test
|
[
"yphacker@163.com"
] |
yphacker@163.com
|
40ea9e1bc48ce74446ea3380754e19550eb28111
|
1ed384608eb2624f53e8b6866bd99d842b7d986e
|
/scripts/test/integration/main.py
|
ada39452ce9e881035e7812a26b27e18645d180a
|
[] |
no_license
|
d-becker/oozie-dbd-testing
|
00f20f9ee8419984f1b05c35b7884790ac208b9e
|
84db745a980f231f1062556d0c454086bc52c529
|
refs/heads/master
| 2021-07-11T20:01:11.243269
| 2019-01-22T14:34:45
| 2019-02-05T12:51:54
| 147,356,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,166
|
py
|
#!/usr/bin/env python3
"""
This script is the entry point to running the integration tests.
For more information, run the script with the "--help" switch.
"""
import argparse
import logging
from pathlib import Path
import re
import sys
from typing import Any, Iterable, List, Optional
import unittest
import __main__
import docker_setup
# We add the project root to the path to be able to access the project modules.
sys.path.append(str(Path("../..").resolve()))
def iterate_tests(test_suite_or_case: Iterable[Any]) -> Iterable[Any]:
"""
Iterate through all of the test cases in 'test_suite_or_case'.
Copied from https://stackoverflow.com/questions/15487587/python-unittest-get-testcase-ids-from-nested-testsuite.
"""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
def get_argument_parser() -> argparse.ArgumentParser:
"""
Builds and returns an argument parser for the script entry point.
Returns:
An argument parser for the script entry point.
"""
parser = argparse.ArgumentParser(description="Run the Oozie-dbd integration tests.")
parser.add_argument("-t", "--tests", nargs="*", help="Only run tests that match any the provided regexes.")
return parser
def any_regex_matches(string: str, regexes: List[str]) -> bool:
"""
Checks whether any of the provided regexes matches the given string.
Args:
string: The string that will be checked agains the regexes.
regexes: A list of regular expressions.
Returns:
True if any of `regexes` matches `string`; false otherwise.
"""
return any(map(lambda regex: re.fullmatch(regex, string), regexes))
def filter_tests(tests: Iterable[Any], filter_test_regexes: Optional[List[str]]) -> Iterable[Any]:
"""
Filters the provided tests by the given regular expressions. If `filter_test_regexes`
is not None, only keeps the tests whoses name match any of the given regular expressions.
If `filter_test_regexes` is None, keeps all tests.
Args:
tests: An iterable of tests.
filter_test_regexes: An optional list of regular expressions.
Returns:
The iterable filtered as described above.
"""
if filter_test_regexes is not None:
regexes: List[str] = filter_test_regexes
return filter(lambda test: any_regex_matches(test.id(), regexes), tests)
return tests
def main() -> None:
"""
The entry point of the script.
"""
logging.basicConfig(level=logging.INFO,
filename="test_logs.txt")
args = get_argument_parser().parse_args()
docker_setup.ensure_docker_daemon_running()
this_directory = Path(__main__.__file__).expanduser().resolve().parent
discovered = unittest.TestLoader().discover(str(this_directory))
tests = filter_tests(iterate_tests(discovered), args.tests)
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
main()
|
[
"daniel.becker@cloudera.com"
] |
daniel.becker@cloudera.com
|
5052a557d88620ed5f6f3a3471aede46c367a29a
|
d6d903f9cfb3c0a15824da682ad9b9f91aea6783
|
/larb/migrations/0016_auto_20180325_2203.py
|
2008588038baf0261f0036d5671918f1f88a3703
|
[] |
no_license
|
SlashKing/ctdev-DJANGO
|
3c7e99ca715e6ae1a150f7fe4bd148f84b23c5b9
|
75c8cc7e68bf4a6f6c094a994c7dfa7f2f1de735
|
refs/heads/master
| 2022-12-15T11:18:27.513572
| 2018-07-20T16:09:36
| 2018-07-20T16:09:36
| 139,062,357
| 0
| 0
| null | 2022-12-08T00:44:35
| 2018-06-28T19:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-03-26 05:03
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('larb', '0015_auto_20180314_2206'),
]
operations = [
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2018, 3, 26, 5, 3, 42, 822896, tzinfo=utc)),
),
migrations.AlterField(
model_name='userprofile',
name='date_of_birth',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 3, 26, 5, 3, 42, 826396, tzinfo=utc)),
),
migrations.AlterField(
model_name='userprofile',
name='profile_image',
field=models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile_image', to='cicu.ProfilePicture'),
),
]
|
[
"n.leblanc.cpga@gmail.com"
] |
n.leblanc.cpga@gmail.com
|
d7cf648dd2846833855d11012bf88ea6e796bd33
|
d7ccb4225f623139995a7039f0981e89bf6365a4
|
/.history/mall/settings_20211012030652.py
|
ed9099343f8accda9ab42601efb05cfebbf948f8
|
[] |
no_license
|
tonnymuchui/django-mall
|
64fd4abc3725c1bd0a3dcf20b93b490fe9307b37
|
55c083d8433be3c77adc61939cd197902de4ce76
|
refs/heads/master
| 2023-08-23T04:59:20.418732
| 2021-10-13T15:59:37
| 2021-10-13T15:59:37
| 415,668,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,977
|
py
|
"""
Django settings for mall project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import django_heroku
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#l0ij4e$3v@&xi3i#y$19f#_@z(yv+5yw$kc+02!-)g%ny%oi8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
# 'https://git.heroku.com/housingtonny.git'
# 'localhost',
# '127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'category',
'accounts',
'store',
'carts'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processors.menu_links',
'carts.context_processors.counter',
],
},
},
]
WSGI_APPLICATION = 'mall.wsgi.application'
AUTH_USER_MODEL = 'accounts.Account'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
# STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# STATIC_ROOT = BASE_DIR / 'staticfiles'
STATIC_ROOT = BASE_DIR /'static'
STATICFILES_DIRS = [
'mall/static'),
]
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# media files configuration
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR /'media'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Activate Django-Heroku.
django_heroku.settings(locals())
|
[
"tonykanyingah@gmail.com"
] |
tonykanyingah@gmail.com
|
af0b82cd8c4a577365c20ea44999e5b9e3ca91b3
|
6538105c6ba7b0e6ab011e56beb3b1c413188136
|
/img_to_gray_scale_app/test1.py
|
d8cce85f8f1980a759c958e4cb74853b5fd6cd77
|
[] |
no_license
|
dR3m/somecode
|
d3dcfa35fce9788cda93452c218dfe8ac239c4f8
|
4d4c9f3d0324f3b9c3dd0b86ad3ec5df064fc962
|
refs/heads/master
| 2021-05-11T15:13:31.988579
| 2019-08-21T16:13:25
| 2019-08-21T16:13:25
| 117,723,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
import requests
from json import loads, dumps
from random import randint
def test(links, params):
b = []
for i in range(len(links)):
b.append({'img': {'url': links[i], 'param': params[i]}})
u = 'http://localhost:8080'
res = requests.post(u, data=dumps(b))
print(res.content)
def main():
l = [
"https://s3.amazonaws.com/cdn-origin-etr.akc.org/wp-content/uploads/2017/11/12225919/Pembroke-Welsh-Corgi-On-White-01.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/f/fb/Welchcorgipembroke.JPG/1200px-Welchcorgipembroke.JPG",
"https://s3.amazonaws.com/cdn-origin-etr.akc.org/wp-content/uploads/2017/11/12225906/Pembroke-Welsh-Corgi-On-White-05.jpg",
"https://img.buzzfeed.com/buzzfeed-static/static/2014-09/23/12/enhanced/webdr10/longform-original-22600-1411489016-22.jpg?downsize=700:*&output-format=auto&output-quality=auto",
"https://g77v3827gg2notadhhw9pew7-wpengine.netdna-ssl.com/wp-content/uploads/2018/01/corgi-2168005_1920-1024x575.jpg"
]
p = [randint(0, 256) for i in range(len(l))]
test(l, p)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
dR3m.noreply@github.com
|
11da33bffccf0c24fc03696dd6135d85c28aa71f
|
b77127288b2f1fabe8703abea72eece93f07a178
|
/regp/regapp/admin.py
|
922c69bc3b39b06ed56ca3afdb45e94180264791
|
[] |
no_license
|
dharanisaikumar/djangoex
|
3c7649f752c8e46d54084262c5e5bc11fdcbf697
|
71474bcebb92b16aabbae7ea9574c2d7cbca39cd
|
refs/heads/master
| 2021-01-06T15:01:44.289998
| 2020-02-18T13:47:44
| 2020-02-18T13:47:44
| 241,371,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
from django.contrib import admin
from .models import usermodel,User
# Register your models here.
admin.site.register(usermodel)
|
[
"dharanisaikumar819@gmail.com"
] |
dharanisaikumar819@gmail.com
|
276aa7b5b5d17cd1720f2bf15794ef27dab6986e
|
1d01b716cb3f7a516562e614b452b46444048e1a
|
/Leetcode/algorithms-questions/1249_Minimum_Remove_to_Make_Valid_Parentheses.py
|
a8f3e2c2c1b44378c362201bc143e8e28bbfb4c6
|
[] |
no_license
|
deep0892/Algorithms_Practice
|
49e96178eb7fff6cc93acfb8dcba9f5aa25135bc
|
7b79bca508a520274f26098c9d2b3cf305b1a407
|
refs/heads/master
| 2023-06-19T00:20:51.981482
| 2021-07-13T08:03:09
| 2021-07-13T08:03:09
| 259,134,357
| 0
| 0
| null | 2021-06-11T16:54:46
| 2020-04-26T21:13:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
# https://leetcode.com/problems/minimum-remove-to-make-valid-parentheses/
"""
Discription of question in above link
"""
from typing import List
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
result: str = ''
stringBuilder = ''
cnt_o: int = 0
for i in range(len(s)):
if s[i] == '(':
cnt_o += 1
elif s[i] == ')':
if cnt_o == 0:
continue
cnt_o -= 1
stringBuilder += s[i]
print(cnt_o)
print(stringBuilder)
for j in range(len(stringBuilder)-1, -1, -1):
if stringBuilder[j] == "(":
if cnt_o > 0:
cnt_o -= 1
continue
result += stringBuilder[j]
return result[-1::-1]
def main():
s = "lee(t(c)o)de)"
sol = Solution()
print(sol.minRemoveToMakeValid(s))
s = "a)b(c)d"
print(sol.minRemoveToMakeValid(s))
s = "))(("
print(sol.minRemoveToMakeValid(s))
s = "(a(b(c)d)"
print(sol.minRemoveToMakeValid(s))
if __name__ == "__main__":
main()
|
[
"deepankar080892@gmail.com"
] |
deepankar080892@gmail.com
|
09e9508924280b73e43341e921b6f66a853acef0
|
61cebc9f65bbff9125584da51d18b6f1f23e049f
|
/python/src/mqttSubs.py
|
3a6aae3262078c68022eee5dea2b55b9b06ed94b
|
[] |
no_license
|
jacklee032016/rtosLwip
|
db466d3dba830266e0f7facc0f3c03d5804b68c2
|
0c66943809fa87cf808bb5e91ee44cc98f7cc8f5
|
refs/heads/master
| 2022-12-15T11:57:01.219702
| 2022-05-04T22:58:04
| 2022-05-04T22:58:04
| 142,618,793
| 1
| 2
| null | 2022-12-08T02:19:16
| 2018-07-27T20:01:41
|
C
|
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2013 Roger Light <roger@atchoo.org>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Distribution License v1.0
# which accompanies this distribution.
#
# The Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial implementation
# Copyright (c) 2010,2011 Roger Light <roger@atchoo.org>
# All rights reserved.
# This shows a simple example of an MQTT subscriber.
#import context # Ensures paho is in PYTHONPATH
import paho.mqtt.client as mqtt
def on_connect(mqttc, obj, flags, rc):
print("rc: " + str(rc))
def on_message(mqttc, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def on_publish(mqttc, obj, mid):
print("mid: " + str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
# If you want to use a specific client id, use
# mqttc = mqtt.Client("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Uncomment to enable debug messages
# mqttc.on_log = on_log
mqttc.connect("192.168.168.102", 1883, 60)
#mqttc.subscribe("$SYS/#", 0)
mqttc.subscribe("extMqtt/#", 0)
mqttc.loop_forever()
|
[
"jacklee032016@gmail.com"
] |
jacklee032016@gmail.com
|
b12b9a4ef48443e63d6e0e312fe3e69e78868ee9
|
8a25ada37271acd5ea96d4a4e4e57f81bec221ac
|
/home/pi/GrovePi/Software/Python/others/temboo/Library/Flickr/Photos/SearchPhotos.py
|
3d8ba19da58779404fb135fdb6062271aef6c52e
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
lupyuen/RaspberryPiImage
|
65cebead6a480c772ed7f0c4d0d4e08572860f08
|
664e8a74b4628d710feab5582ef59b344b9ffddd
|
refs/heads/master
| 2021-01-20T02:12:27.897902
| 2016-11-17T17:32:30
| 2016-11-17T17:32:30
| 42,438,362
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,817
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SearchPhotos
# Returns a list of photos matching a search criteria.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchPhotos(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchPhotos Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchPhotos, self).__init__(temboo_session, '/Library/Flickr/Photos/SearchPhotos')
def new_input_set(self):
return SearchPhotosInputSet()
def _make_result_set(self, result, path):
return SearchPhotosResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchPhotosChoreographyExecution(session, exec_id, path)
class SearchPhotosInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchPhotos
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
super(SearchPhotosInputSet, self)._set_input('APIKey', value)
def set_Accuracy(self, value):
"""
Set the value of the Accuracy input for this Choreo. ((optional, integer) The accuracy level of the location information. Current range is 1-16. World level is 1, Country is ~3, Region is ~6, City is ~11, Street is ~16.)
"""
super(SearchPhotosInputSet, self)._set_input('Accuracy', value)
def set_BoundingBox(self, value):
"""
Set the value of the BoundingBox input for this Choreo. ((optional, string) A comma-delimited list of 4 values defining the Bounding Box of the area that will be searched. These values represent the coordinates of the bottom-left corner and top-right corner of the box.)
"""
super(SearchPhotosInputSet, self)._set_input('BoundingBox', value)
def set_ContentType(self, value):
"""
Set the value of the ContentType input for this Choreo. ((optional, integer) The content type setting. 1 = photos only, 2 = screenshots only, 3 = other, 4 = photos and screenshots, 5 = screenshots and other, 6 = photos and other, 7 = all.)
"""
super(SearchPhotosInputSet, self)._set_input('ContentType', value)
def set_Extras(self, value):
"""
Set the value of the Extras input for this Choreo. ((optional, string) A comma-delimited list of extra information to fetch for each returned record. See documentation for more details on supported fields.)
"""
super(SearchPhotosInputSet, self)._set_input('Extras', value)
def set_GeoContext(self, value):
"""
Set the value of the GeoContext input for this Choreo. ((optional, integer) A numeric value representing the photo's location info beyond latitude and longitude. 0 = not defined, 1 = indoors, 2 = outdoors.)
"""
super(SearchPhotosInputSet, self)._set_input('GeoContext', value)
def set_GroupID(self, value):
"""
Set the value of the GroupID input for this Choreo. ((optional, string) The id of a group who's pool to search. If specified, only matching photos posted to the group's pool will be returned.)
"""
super(SearchPhotosInputSet, self)._set_input('GroupID', value)
def set_InGallery(self, value):
"""
Set the value of the InGallery input for this Choreo. ((optional, boolean) Limits the search to only photos that are in a gallery. Default is false.)
"""
super(SearchPhotosInputSet, self)._set_input('InGallery', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((conditional, decimal) A valid latitude, in decimal format, for performing geo queries (not required if providing another limiting search parameter).)
"""
super(SearchPhotosInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((conditional, decimal) A valid longitude, in decimal format, for performing geo queries (not required if providing another limiting search parameter).)
"""
super(SearchPhotosInputSet, self)._set_input('Longitude', value)
def set_MaxTakenDate(self, value):
"""
Set the value of the MaxTakenDate input for this Choreo. ((optional, date) The maximum taken date. Photos with an taken date less than or equal to this value will be returned. The date can be in the form of a mysql datetime or unix timestamp.)
"""
super(SearchPhotosInputSet, self)._set_input('MaxTakenDate', value)
def set_MaxUploadDate(self, value):
"""
Set the value of the MaxUploadDate input for this Choreo. ((optional, date) The maximum upload date. Photos with an upload date less than or equal to this value will be returned. The date can be in the form of a unix timestamp or mysql datetime.)
"""
super(SearchPhotosInputSet, self)._set_input('MaxUploadDate', value)
def set_Media(self, value):
"""
Set the value of the Media input for this Choreo. ((optional, string) Filter results by media type. Valid values are all (default), photos or videos.)
"""
super(SearchPhotosInputSet, self)._set_input('Media', value)
def set_MinTakenDate(self, value):
"""
Set the value of the MinTakenDate input for this Choreo. ((optional, date) The minimum taken date. Photos with a taken date greater than or equal to this value will be returned. The date can be in the form of a mysql datetime or unix timestamp.)
"""
super(SearchPhotosInputSet, self)._set_input('MinTakenDate', value)
def set_MinUploadDate(self, value):
"""
Set the value of the MinUploadDate input for this Choreo. ((optional, date) The minimum upload date. Photos with an upload date greater than or equal to this value will be returned. The date can be in the form of a unix timestamp or mysql datetime.)
"""
super(SearchPhotosInputSet, self)._set_input('MinUploadDate', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to return. Defaults to 1.)
"""
super(SearchPhotosInputSet, self)._set_input('Page', value)
def set_PerPage(self, value):
"""
Set the value of the PerPage input for this Choreo. ((optional, integer) The number of photos to return per page. Defaults to 100.)
"""
super(SearchPhotosInputSet, self)._set_input('PerPage', value)
def set_PlaceID(self, value):
"""
Set the value of the PlaceID input for this Choreo. ((optional, string) A Flickr place id.)
"""
super(SearchPhotosInputSet, self)._set_input('PlaceID', value)
def set_RadiusUnits(self, value):
"""
Set the value of the RadiusUnits input for this Choreo. ((optional, string) The unit of measure when doing radial geo queries. Valid values are: "mi" (miles) and "km" (kilometers). The default is "km".)
"""
super(SearchPhotosInputSet, self)._set_input('RadiusUnits', value)
def set_Radius(self, value):
"""
Set the value of the Radius input for this Choreo. ((optional, integer) A valid radius used for geo queries, greater than zero and less than 20 miles (or 32 kilometers). Defaults to 5 (km).)
"""
super(SearchPhotosInputSet, self)._set_input('Radius', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
super(SearchPhotosInputSet, self)._set_input('ResponseFormat', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, string) Defaults to date-posted-desc unless performing a geo query. Valid values are: date-posted-asc, date-posted-desc, date-taken-asc, date-taken-desc, interestingness-desc, interestingness-asc, relevance.)
"""
super(SearchPhotosInputSet, self)._set_input('Sort', value)
def set_TagMode(self, value):
"""
Set the value of the TagMode input for this Choreo. ((optional, string) Use the mode 'any' to search using an OR combination of tags. Use 'all' for an AND combnation. Defaults to 'any'.)
"""
super(SearchPhotosInputSet, self)._set_input('TagMode', value)
def set_Tags(self, value):
"""
Set the value of the Tags input for this Choreo. ((optional, string) A comma-delimited list of tags. Photos with one or more of the tags listed will be returned. You can exclude results that match a term by prepending it with a hyphen.)
"""
super(SearchPhotosInputSet, self)._set_input('Tags', value)
def set_Text(self, value):
"""
Set the value of the Text input for this Choreo. ((conditional, string) A keyword search against photo titles, descriptions, or tags. Prepend search term with a hyphen to exclude. Not required if providing another limiting search parameter.)
"""
super(SearchPhotosInputSet, self)._set_input('Text', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The ID of the user who's photo to search. If this parameter isn't passed, all public photos will be searched. A value of "me" will search against the authenticated user's photos.)
"""
super(SearchPhotosInputSet, self)._set_input('UserID', value)
def set_WOEID(self, value):
"""
Set the value of the WOEID input for this Choreo. ((optional, string) The unique 'Where on Earth ID' that uniquely represents spatial entities.)
"""
super(SearchPhotosInputSet, self)._set_input('WOEID', value)
class SearchPhotosResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchPhotos Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Flickr.)
"""
return self._output.get('Response', None)
class SearchPhotosChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchPhotosResultSet(response, path)
|
[
"lupyuen@gmail.com"
] |
lupyuen@gmail.com
|
14b6bc5e5c8792849b11566ea3e04e9a7ee2ce12
|
eaf6481387d3055c9ccd36401e2c04713c214f16
|
/last_this_is_last_final_testing_code-master/registration/views.py
|
bb90b4414ce04571460318f409a2df2e60ccc512
|
[] |
no_license
|
PrabhavLamichhane/chartupdate
|
deb38733b59a476fa4b1e472d4e9e3f29bead3c2
|
de6137b60e741f25ebf4934ba44d776613144c19
|
refs/heads/master
| 2020-11-24T05:29:12.439760
| 2019-12-14T07:38:47
| 2019-12-14T07:38:47
| 227,984,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,605
|
py
|
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.shortcuts import render, redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from django.core.mail import send_mail
from registration.forms import RegistrationForm
from .models import Account
from registration.tokens import account_activation_token
from django.conf import settings
def signup(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = 'Activate Your MySite Account'
message = render_to_string('prabhav/registration/account_activation_email.html', {
'user': user,
'domain': current_site.domain,
# 'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'token': account_activation_token.make_token(user),
})
from_email = [settings.EMAIL_HOST_USER]
to_email = [user.email]
send_mail(subject=subject, from_email=from_email,
recipient_list=to_email, message=message, fail_silently=False)
return redirect('account_activation_sent')
else:
form = RegistrationForm()
return render(request, 'prabhav/registration/signup.html', {'form': form})
def account_activation_sent(request):
return render(request, 'prabhav/registration/account_activation_sent.html')
def activate(request, uidb64, token):
try:
# uid: force_text(urlsafe_base64_encode(force_bytes(user.pk)))
uid = force_text(urlsafe_base64_decode(uidb64))
user = Account.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, Account.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.email_confirmed = True
user.save()
login(request, user)
return redirect('/')
else:
return render(request, 'prabhav/registration/account_activation_invalid.html')
|
[
"parolaro123@gmail.com"
] |
parolaro123@gmail.com
|
9780a87442ab339acb2d1cf4c6b6bbdad42a2478
|
f9b7930e6f43eca26abf87b39961fc2d022db54a
|
/Python/easy/796. Rotate String.py
|
29bda180d5ebf43de72fd30c3c808a6733bc89b3
|
[] |
no_license
|
LRenascence/LeetCode
|
639452dd3bf65a14d0056c01e203a7082fbdc326
|
1a0e1d1503e0a7bff6917491a964a08c572827fb
|
refs/heads/master
| 2021-05-12T03:41:35.346377
| 2021-01-07T23:39:14
| 2021-01-07T23:39:14
| 117,622,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
"""
We are given two strings, A and B.
A shift on A consists of taking string A and moving the leftmost character to the rightmost position.
For example, if A = 'abcde', then it will be 'bcdea' after one shift on A. Return True if and only if A can become B after some number of shifts on A.
Example 1:
Input: A = 'abcde', B = 'cdeab'
Output: true
Example 2:
Input: A = 'abcde', B = 'abced'
Output: false
Note:
A and B will have length at most 100.
"""
# sim rotate
class Solution:
def rotateString(self, A: str, B: str) -> bool:
if A == B:
return True
for i in range(len(A)):
# sim rotate
A = A[1:] + A[0]
if A == B:
return True
return False
# smarter
class Solution:
def rotateString(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
A = A + A
if B in A:
return True
return False
|
[
"im.renascence@gmail.com"
] |
im.renascence@gmail.com
|
54194b62f43d7ed37431e16a9d4ffbbc4b1e712f
|
27ce7d1ecb8556b1cce963eb649c92abf27347ab
|
/CodeChef/CRDGAME_Chef_and_Card_Game.py
|
a3f0ec40bd931dc499df1d23633ec66c7b141d66
|
[
"Unlicense"
] |
permissive
|
a3X3k/Competitive-programing-hacktoberfest-2021
|
c005c2413e93e14808c5f1e60cbbc73c0ff04674
|
bc3997997318af4c5eafad7348abdd9bf5067b4f
|
refs/heads/main
| 2023-08-31T17:14:21.584487
| 2021-10-04T04:03:41
| 2021-10-04T04:03:41
| 413,268,126
| 1
| 0
|
Unlicense
| 2021-10-04T04:02:03
| 2021-10-04T04:02:02
| null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
# Problem :: https://www.codechef.com/JULY20B/problems/CRDGAME
# Python
from sys import stdin,stdout
def sumdigits(n):
return 0 if n==0 else (n%10) + sumdigits(n//10)
for _ in range(int(stdin.readline())):
C_W = 0
M_W = 0
for _ in range(int(stdin.readline())):
C , M = map(int,stdin.readline().split())
C_S = sumdigits(C)
M_S = sumdigits(M)
if (C_S == M_S):
C_W = C_W+1
M_W = M_W+1
elif(C_S > M_S):
C_W = C_W+1
else:
M_W = M_W+1
if (C_W == M_W):
print("2",C_W)
elif(C_W > M_W):
print("0" , C_W)
else:
print("1",M_W)
|
[
"noreply@github.com"
] |
a3X3k.noreply@github.com
|
bfd9359c7293fc2547b4a7f106c6bff14f477c6a
|
fde693971d3f9671aa1d2efbf8cafa58168f9271
|
/Vendors_project/vendors/migrations/0014_auto_20200806_1152.py
|
6b8b604c7bb841ae0294b8b9b54bb9d9191e8462
|
[] |
no_license
|
HarikaMandava/Django-Projects
|
84e0929c686a3ee2c6379a803250cc8892b126d4
|
371cf0078818273bfabd609a5d69e1ef5356ebe6
|
refs/heads/master
| 2022-12-01T00:06:28.673108
| 2020-08-07T05:39:11
| 2020-08-07T05:39:11
| 285,738,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
# Generated by Django 3.0.7 on 2020-08-06 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendors', '0013_remove_contacts_vendor'),
]
operations = [
migrations.AddField(
model_name='contacts',
name='address1',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='contacts',
name='address2',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='contacts',
name='city',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='contacts',
name='country',
field=models.CharField(default='United States', max_length=200, null=True),
),
migrations.AddField(
model_name='contacts',
name='mobile',
field=models.CharField(blank=True, max_length=17),
),
migrations.AddField(
model_name='contacts',
name='name',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='contacts',
name='state',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='contacts',
name='zipp',
field=models.CharField(max_length=200, null=True),
),
]
|
[
"harika.mand@gmail.com"
] |
harika.mand@gmail.com
|
83d71d33de21186e775b72c9924dcd96f1c005e1
|
5a001c02e36426359bc3f5b5a29a3373af343376
|
/Benchmark-gnn/data/molecules.py
|
972e749859619500206a5f81edd1cec7195b23e5
|
[
"MIT"
] |
permissive
|
Axeln78/Transferability-of-spectral-gnns
|
65376e51b4e3e3e0e2df7a3c9dac89d7609d7e59
|
a89628851c22ceb6361ff6180ab810d4fa923898
|
refs/heads/main
| 2023-02-03T16:37:57.877362
| 2020-12-23T17:38:23
| 2020-12-23T17:38:23
| 306,058,184
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,069
|
py
|
import csv
import pickle
import time
import dgl
import numpy as np
import torch
import torch.utils.data
from scipy import sparse as sp
# *NOTE
# The dataset pickle and index files are in ./zinc_molecules/ dir
# [<split>.pickle and <split>.index; for split 'train', 'val' and 'test']
class MoleculeDGL(torch.utils.data.Dataset):
def __init__(self, data_dir, split, num_graphs=None):
self.data_dir = data_dir
self.split = split
self.num_graphs = num_graphs
with open(data_dir + "/%s.pickle" % self.split, "rb") as f:
self.data = pickle.load(f)
if self.num_graphs in [10000, 1000]:
# loading the sampled indices from file ./zinc_molecules/<split>.index
with open(data_dir + "/%s.index" % self.split, "r") as f:
data_idx = [list(map(int, idx)) for idx in csv.reader(f)]
self.data = [self.data[i] for i in data_idx[0]]
assert len(self.data) == num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k"
"""
data is a list of Molecule dict objects with following attributes
molecule = data[idx]
; molecule['num_atom'] : nb of atoms, an integer (N)
; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type
; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type
; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable
"""
self.graph_lists = []
self.graph_labels = []
self.n_samples = len(self.data)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper()))
for molecule in self.data:
node_features = molecule['atom_type'].long()
adj = molecule['bond_type']
edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list
edge_idxs_in_adj = edge_list.split(1, dim=1)
edge_features = adj[edge_idxs_in_adj].reshape(-1).long()
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(molecule['num_atom'])
g.ndata['feat'] = node_features
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
g.edata['feat'] = edge_features
self.graph_lists.append(g)
self.graph_labels.append(molecule['logP_SA_cycle_normalized'])
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.graph_labels[idx]
class MoleculeDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name='Zinc'):
t0 = time.time()
self.name = name
self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well
self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well
data_dir = './data/molecules'
if self.name == 'ZINC-full':
data_dir = './data/molecules/zinc_full'
self.train = MoleculeDGL(data_dir, 'train', num_graphs=220011)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=24445)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=5000)
else:
self.train = MoleculeDGL(data_dir, 'train', num_graphs=10000)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=1000)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=1000)
print("Time taken: {:.4f}s".format(time.time() - t0))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in MoleculeDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:, idx])
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1]).float()
# # Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
# EigVec = EigVec[:, EigVal.argsort()] # increasing order
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class MoleculeDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/molecules/'
with open(data_dir + name + '.pkl', "rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
self.num_atom_type = f[3]
self.num_bond_type = f[4]
print('train, test, val sizes :', len(self.train), len(self.test), len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time() - start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
# tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
# tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
# snorm_n = torch.cat(tab_snorm_n).sqrt()
# tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
# tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
# snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, edge_feat):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
# tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
# tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
# snorm_n = tab_snorm_n[0][0].sqrt()
# batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if edge_feat:
# use edge feats also to prepare adj
adj_with_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type + self.num_bond_type)])
adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
us, vs = g.edges()
for idx, edge_label in enumerate(g.edata['feat']):
adj_with_edge_feat[edge_label.item() + 1 + self.num_atom_type][us[idx]][vs[idx]] = 1
for node, node_label in enumerate(g.ndata['feat']):
adj_with_edge_feat[node_label.item() + 1][node][node] = 1
x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
return None, x_with_edge_feat, labels
else:
# use only node feats to prepare adj
adj_no_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type)])
adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_no_edge_feat[node_label.item() + 1][node][node] = 1
x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
return x_no_edge_feat, None, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim=0) # .squeeze()
deg_inv = torch.where(deg > 0, 1. / torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
|
[
"axel.nilsson@epfl.ch"
] |
axel.nilsson@epfl.ch
|
904bda4e2b052e0b879f460c446f7c066e14621c
|
b6076a3d63ec0591e435b0d763faa02419405fc3
|
/nlpaug/augmenter/word/wordnet.py
|
cec586ed2a672b60f9c87d674faffeb9fbf5fae8
|
[
"MIT"
] |
permissive
|
Allensmile/nlpaug
|
37684850c22c6cc48f518cb63244e8c17cdef34f
|
8d60627cb6412b5ceb263e02e605360b174f0ddf
|
refs/heads/master
| 2020-05-28T08:22:09.270879
| 2019-05-24T01:31:07
| 2019-05-24T01:31:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,625
|
py
|
import nltk
from nltk.corpus import wordnet
from nlpaug.augmenter.word import WordAugmenter
from nlpaug.util import Action, PartOfSpeech
class WordNetAug(WordAugmenter):
def __init__(self, name='WordNet_Aug', aug_min=1, aug_p=0.3, tokenizer=None):
super(WordNetAug, self).__init__(
action=Action.SUBSTITUTE, name=name, aug_p=aug_p, aug_min=aug_min, tokenizer=tokenizer)
self.model = self.get_model()
def skip_aug(self, token_idxes, pos):
results = []
for token_idx in token_idxes:
# Some word does not come with synonym. It will be excluded in lucky draw.
if pos[token_idx][1] not in ['DT']:
results.append(token_idx)
return results
def substitute(self, text):
results = []
tokens = self.tokenizer(text)
pos = nltk.pos_tag(tokens)
aug_cnt = self.generate_aug_cnt(len(tokens))
word_idxes = [i for i, t in enumerate(tokens)]
word_idxes = self.skip_aug(word_idxes, pos)
aug_idexes = self.sample(word_idxes, aug_cnt)
for i, token in enumerate(tokens):
# Skip if no augment for word
if i not in aug_idexes:
results.append(token)
continue
word_poses = PartOfSpeech.pos2wn(pos[i][1])
synets = []
if word_poses is None or len(word_poses) == 0:
# Use every possible words as the mapping does not defined correctly
synets.extend(self.model.synsets(pos[i][0]))
else:
for word_pos in word_poses:
synets.extend(self.model.synsets(pos[i][0], pos=word_pos))
augmented_data = []
for synet in synets:
for candidate in synet.lemma_names():
if candidate.lower() != token.lower():
augmented_data.append(candidate)
if len(augmented_data) == 0:
results.append(token)
else:
candidate = self.sample(augmented_data, 1)[0]
results.append(self.align_capitalization(token, candidate))
return self.reverse_tokenizer(results)
def get_model(self):
try:
# Check whether wordnet package is downloaded
wordnet.synsets('computer')
except Exception:
nltk.download('wordnet')
try:
# Check whether POS package is downloaded
nltk.pos_tag('computer')
except Exception:
nltk.download('averaged_perceptron_tagger')
return wordnet
|
[
"makcedward@gmail.com"
] |
makcedward@gmail.com
|
9f00ea8dc974ab43ffed2a536376dc9c57e25440
|
8b85111e6bd07ff89d48f3fe9f4bc9323e400516
|
/FuzzAnalysis/winappdbgResultAnalysis.py
|
8cabcc94749b46dfb9a81e7d77277ecc0a071e74
|
[] |
no_license
|
JsHuang/fuzz4fun
|
612e74afed379eeaed6fc72f737493a509fa8dd1
|
3ca9e152e97d8890651e885a6025b0188cae0231
|
refs/heads/master
| 2023-04-07T19:33:38.778303
| 2023-03-28T05:35:34
| 2023-03-28T05:35:34
| 146,147,726
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
#coding=utf-8
import os
stack_hashes = {}
def calculate_stack_hash(file_path):
trace_str = ""
traceIndex = -1
with open(file_path, "r") as f:
for line in f.readlines():
if traceIndex < 0:
line = line.strip()
if line == "Frame Origin":
traceIndex = 0
elif traceIndex < 5:
traceIndex += 1
if len(line) < 10: # stack trace 不足5个
break
trace_str += line
else:
break
#print trace_str
return (hash(trace_str),trace_str)
def analyse():
result_dir = "D:"
for result_file in os.listdir(result_dir):
file_path = os.path.join(result_dir, result_file)
if os.path.isfile(file_path):
s_hash = calculate_stack_hash(file_path)
if s_hash[0] not in stack_hashes.iterkeys():
stack_hashes[s_hash[0]] = [s_hash[1], result_file]
#break
print "Total unique crash %d\nResult write to crash_result..." % len(stack_hashes.keys())
# write result to file
with open("./crash_result.txt","w") as f:
for k,v in stack_hashes.iteritems():
f.write("Unique Crash file %s" % v[1])
f.write("StackTrace:\n%s\n" % v[0])
f.close()
if __name__ == "__main__":
analyse()
|
[
"54jin.huang@gmail.com"
] |
54jin.huang@gmail.com
|
d69e444b95067b65c99730c702a1bc782949bbc9
|
91683c3a72383e07543f21c7cf09f7f94dd85f6a
|
/components/level.py
|
e73e920fb09ba62613a2ae9446b20c4ff8e88723
|
[] |
no_license
|
TimTheFiend/roguepy
|
cd1528a7dea685ec826fe01ef2ab784d23cfdf25
|
2c5fe1e58c8693e91e757da8dd7b589a6a9b9db4
|
refs/heads/master
| 2022-12-06T23:34:46.248031
| 2020-08-03T12:36:26
| 2020-08-03T12:36:26
| 282,830,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
from __future__ import annotations
from typing import TYPE_CHECKING
from components.base_component import BaseComponent
if TYPE_CHECKING:
from entity import Actor
class Level(BaseComponent):
parent: Actor
def __init__(
self,
current_level: int = 1,
current_xp: int = 0,
level_up_base: int = 0,
level_up_factor: int = 150,
xp_given: int = 0,
):
self.current_level = current_level
self.current_xp = current_xp
self.level_up_base = level_up_base
self.level_up_factor = level_up_factor
self.xp_given = xp_given
@property
def experience_to_next_level(self):
return self.level_up_base + self.current_level * self.level_up_factor
@property
def requires_level_up(self) -> bool:
return self.current_xp > self.experience_to_next_level
def add_xp(self, xp: int) -> None:
if xp == 0 or self.level_up_base == 0:
return
self.current_xp += xp
self.engine.message_log.add_message(
f"You gain {xp} EXP.",
)
if self.requires_level_up:
self.engine.message_log.add_message(
f"You advance to level {self.current_level}!",
)
def increase_level(self) -> None:
self.current_xp -= self.experience_to_next_level
self.current_level += 1
def increase_max_hp(self, amount: int = 20) -> None:
self.parent.fighter.max_hp += amount
self.parent.fighter.hp += amount
self.engine.message_log.add_message(
"Your health improves! You hunger for more!",
)
self.increase_level()
def increase_power(self, amount: int = 1) -> None:
self.parent.fighter.power += amount
self.engine.message_log.add_message(
"Your muscles swell! A red mist covers your vision!"
)
self.increase_level()
def increase_defense(self, amount: int = 1) -> None:
self.parent.fighter.defense += amount
self.engine.message_log.add_message(
"You think back on past relationships. Your resistance to pain increases."
)
|
[
"33222649+JoakimFKK@users.noreply.github.com"
] |
33222649+JoakimFKK@users.noreply.github.com
|
5d5078fb2eea343a3e1fd0560ff27a70a9ebbfd4
|
300e7208a0b296f291fdba2ccad0617b20d50eba
|
/topics/Matrix/Flood_Fill_733/[Iterative_DFS_getNeighbors_helper]_Flood_Fill_733.py
|
8816507b77d60021c564d4968c2c85ea6d8681d1
|
[
"MIT"
] |
permissive
|
DmitryNaimark/leetcode-solutions-python
|
05b7854f40791e5c5af345293181c7ac4435eb3c
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
refs/heads/master
| 2021-06-25T04:37:00.880255
| 2020-04-12T05:59:51
| 2020-04-12T05:59:51
| 215,267,156
| 1
| 0
|
MIT
| 2021-03-31T03:12:10
| 2019-10-15T10:07:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
# https://leetcode.com/problems/flood-fill/
# ---------------------------------------------------
from collections import deque
from typing import List
# Runtime Complexity: O(N), where N is the amount of same-colored neighbors
# Space Complexity: O(N) in worst case, if same-colored neighbors are in positioned as continuous line.
class Solution:
# __init_ is created, so that there is no warning that self.* vars are defined outside of __init__ initially
def __init__(self):
self.rows = self.cols = 0
self.color = None
self.image = None
def floodFill(self, image: List[List[int]], sr: int, sc: int, new_color: int) -> List[List[int]]:
self.rows, self.cols = len(image), len(image[0])
self.color = image[sr][sc]
self.image = image
if self.color == new_color:
return image
stack = deque()
stack.append((sr, sc))
while stack:
(r, c) = stack.pop()
image[r][c] = new_color
stack.extend(self.getSameColorNeighbors(r, c))
return image
def getSameColorNeighbors(self, r, c):
neighbors = []
for dr, dc in ((-1, 0), (0, 1), (1, 0), (0, -1)):
if self.isValidCell(r + dr, c + dc) and self.image[r + dr][c + dc] == self.color:
neighbors.append((r + dr, c + dc))
return neighbors
def isValidCell(self, r, c):
return 0 <= r < self.rows and 0 <= c < self.cols
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# [2,2,2],
# [2,2,0],
# [2,0,1]
print(solution.floodFill([
[1,1,1],
[1,1,0],
[1,0,1]], 1, 1, 2))
|
[
"dmitrynaimark@gmail.com"
] |
dmitrynaimark@gmail.com
|
8d6c5acfd84ddf1433ab7c4c19901159f35a940b
|
17a3c31d3d7f3d0ee4c76ba0196b9a32b0e3745d
|
/lpthw/ex15.py
|
d26b8c2aa78370b23f0c99b445393de7bd7d24bf
|
[] |
no_license
|
pixlalchemy/lpthw
|
440d3e21347e85133a8042b1da36fa9bcf38e9d0
|
1a93d73c2598de8f63589471a03e2da172bad4ef
|
refs/heads/master
| 2021-01-21T02:01:44.838200
| 2016-06-15T16:56:42
| 2016-06-15T16:56:42
| 60,893,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
# Imports argument variable from system module
from sys import argv
# Unpacks the variables passed from the command line into their own variables
script, filename = argv
# opens the file stored in filename and stores it in txt
txt = open(filename)
# Print's a string "Here's your file %r:" and takes the name of the file stored
# in filename and formats it into the string
print "Here's your file %r:" % filename
# reads the file and prints it as a string
print txt.read()
# Prints the string "Type the filename again:"
print "Type the filename again:"
# Get's the filename again from the user and stores it in file_again
file_again = raw_input("> ")
# Opens the file stored in file again and stores it in txt_again
txt_again = open(file_again)
# reads the file and prints it as a string again
print txt_again.read()
# Close the file
txt.close()
txt_again.close()
|
[
"jordan.pixlalchemy@gmail.com"
] |
jordan.pixlalchemy@gmail.com
|
50c64474635be18407de296ad776205d2b5b55c9
|
48ce7ff0bdb8dcd186b923dadc873167177937ae
|
/sentiment-KNN.py
|
19c5fb16611863a849effae37a219fcaf1af1c32
|
[] |
no_license
|
tariqrahmani08/CPSC571project
|
9cce84f1d8ce40825b9abeee448a3ea0a59dec8a
|
c531d7b36927c576c0a4273759c666164d1c20b5
|
refs/heads/master
| 2020-04-21T22:32:00.256812
| 2019-04-04T03:50:22
| 2019-04-04T03:50:22
| 169,914,515
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,969
|
py
|
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, precision_score, recall_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from utils import *
df = pd.read_csv('apple_merged.csv')
pos = df.loc[df['value'] == 1, 'text'].copy().reset_index(drop=True)
neg = df.loc[df['value'] == -1, 'text'].copy().reset_index(drop=True)
neg = pd.concat([pd.DataFrame(neg), pd.DataFrame(np.zeros(neg.shape), columns=['class'])], 1)
pos = pd.concat([pd.DataFrame(pos), pd.DataFrame(np.ones(pos.shape), columns=['class'])], 1)
np.random.seed(42)
rand = np.random.permutation(pos.shape[0])
pos = pos.iloc[rand[:neg.shape[0]]].reset_index(drop=True)
df = pd.concat([pos, neg]).sample(frac=1).reset_index(drop=True)
df.head()
X_train, X_test, y_train, y_test = train_test_split(df['text'].values, df['class'].values, test_size=0.2,
random_state=42)
ENGLISH_STOP_WORDS = frozenset([
"a", "about", "above", "across", "after", "afterwards", "again", "against",
"all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fifty", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "s", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself",
"yourselves"])
vect = CountVectorizer(strip_accents='unicode', stop_words=ENGLISH_STOP_WORDS, binary=True)
tf_train = vect.fit_transform(X_train)
tf_test = vect.transform(X_test)
pipeline_knn = make_pipeline(vect, KNeighborsClassifier())
param_grid = {'kneighborsclassifier__n_neighbors': np.arange(1, 50)}
grid_knn = GridSearchCV(pipeline_knn,
param_grid,
cv=5,
scoring="roc_auc",
verbose=1,
n_jobs=-1)
grid_knn.fit(X_train, y_train)
grid_knn.score(X_test, y_test)
print("Count Vectorizer:")
print(grid_knn.best_params_)
print(grid_knn.best_score_)
def report_results(model, X, y):
pred_proba = model.predict_proba(X)[:, 1]
pred = model.predict(X)
auc = roc_auc_score(y, pred_proba)
acc = accuracy_score(y, pred)
f1 = f1_score(y, pred)
prec = precision_score(y, pred)
rec = recall_score(y, pred)
result = {'auc': auc, 'f1': f1, 'acc': acc, 'precision': prec, 'recall': rec}
return result
print(report_results(grid_knn.best_estimator_, X_test, y_test))
vect = TfidfVectorizer(strip_accents='unicode', stop_words=ENGLISH_STOP_WORDS, binary=True, ngram_range=(1, 2),
max_df=0.9, min_df=3, sublinear_tf=True)
tfidf_train = vect.fit_transform(X_train)
tfidf_test = vect.transform(X_test)
print("TFidf Vectorizer:")
pipeline_knn = make_pipeline(vect, KNeighborsClassifier())
grid_knn.fit(X_train, y_train)
grid_knn.score(X_test, y_test)
print(grid_knn.best_params_)
print(grid_knn.best_score_)
print(report_results(grid_knn.best_estimator_, X_test, y_test))
|
[
"noreply@github.com"
] |
tariqrahmani08.noreply@github.com
|
deb7ea13038fc4edbe7aab556974d2650f9dbb0c
|
6c4b9d86590c43e8a74685b0b367e6c5c0cd653e
|
/5-3networkrunning.py
|
a6749f78b3f78f412af226607bfcdcbafc81dd41
|
[] |
no_license
|
weicmjincheng/Tensorflow
|
c24b81995b245bc583bafa107178042799f28043
|
a5c76987a057dee61a6c0408d5d8790b3113d45b
|
refs/heads/master
| 2020-03-24T12:39:46.227716
| 2018-07-29T01:15:31
| 2018-07-29T01:15:31
| 142,719,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
# _*_coding:utf-8 _*_
"""
@Time :2018/6/25 14:29
@Author :weicm
#@Software: PyCharm
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 需要载入数据 one_hot编码
mnist = input_data.read_data_sets("MNIST.data",one_hot=True)
batch_size = 100
# 计算一共有多少个批次
m_batch = mnist.train.num_examples // batch_size
# 参数概要 计算参数值
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
# 记录值并给予名字s
tf.summary.scalar('mean',mean) #平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
tf.summary.scalar('stddev',stddev) #标准差
tf.summary.scalar('max', tf.reduce_max(var)) # 最大值
tf.summary.scalar('min', tf.reduce_min(var)) # 最小值
tf.summary.histogram('histogram', var) # 直方图
# 可视化需要定义命名空间
with tf.name_scope('input'):
# 定义两个占位符
x = tf.placeholder(tf.float32,[None,784],name="x-input")
y = tf.placeholder(tf.float32,[None,10],name="y-input")
with tf.name_scope('layer'):
# 创建神经网络
with tf.name_scope('weight'):
W = tf.Variable(tf.zeros([784,10]),name="W")
# 在网络实际运行中需要观察权值变化
variable_summaries(W)
with tf.name_scope('biases'):
b = tf.Variable(tf.zeros([10]),name='b')
variable_summaries(b)
with tf.name_scope('wx_plus_b'):
wx_plus_b = tf.matmul(x,W)+b
with tf.name_scope('softmax'):
prediction = tf.nn.softmax(wx_plus_b)
with tf.name_scope('loss'):
# 定义二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
tf.summary.scalar('loss',loss)
with tf.name_scope('train'):
# 梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()
with tf.name_scope('accuracy'):
# 求准确率 求最大的值是在那个位置
with tf.name_scope('score'):
score = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
# 将true转换成1将false转换成0
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(score,tf.float32))
tf.summary.scalar('accuracy',accuracy)
# 合并所有的suammary
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init)
# 存放在当前路径下 如果没有次路径则新建
# 存好以后打开命令提示符对应到相应盘符下通过
# 命令 tensorboard --logdir=C:\Users\weicm\PycharmProjects\TensorFlow\logs 得到相应网址
writer = tf.summary.FileWriter('logs/',sess.graph)
for epoch in range(51):
for batch in range(m_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
# 将merged返回值传到summary中
summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})
# 将summary和运行的周期写到文件中
writer.add_summary(summary,epoch)
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("当前周期是:"+str(epoch)+" 准确率:"+str(acc))
|
[
"2608383530@qq.com"
] |
2608383530@qq.com
|
49970e17697f8013da979a6dc53e76b32a6ce935
|
3c9a73bcae72487085eabb26c1e48bee6ac747be
|
/DjangoBlog/manage.py
|
9694a81464414424399d1213155099bf19de508b
|
[] |
no_license
|
kevinmarsh/DjangoBlog
|
7c67b681227a92b6c13fad22a9cd93f850010f90
|
da382b6a8fa36c36482a62f78d6254eb2562e19a
|
refs/heads/master
| 2016-09-06T09:13:27.895260
| 2013-07-23T21:16:24
| 2013-07-23T21:16:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoBlog.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"kevinmarsh3@gmail.com"
] |
kevinmarsh3@gmail.com
|
ef81c4e21eadc3055006999d6cdfb2c1796a8a7c
|
b8a14abb639e489d67a7d7f80ccdd8a36c6fec6c
|
/Unidad 1/Python/3. Ejercicios/Act03/Ejercicio6.py
|
6b786b9ee848c63d0729e1a74ced8b9915ea1ee7
|
[] |
no_license
|
djgonza/SGEM
|
de9eb5f11b80964a5e7b2a3155d8df7ca3c2f779
|
4f3b029c8cd9889432839cbb01c90c66555538fd
|
refs/heads/master
| 2021-05-16T06:52:01.153640
| 2017-11-17T09:43:28
| 2017-11-17T09:43:28
| 103,507,934
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
password = input("Introduce contraseña: ")
longUser = len(password)
if longUser < 8:
print ("La contraseña debe contener un mínimo de 8 caracteres")
correct = bool(1)
conNoAlfa = 0
for i in password:
if i.isspace():
correct = bool(0)
break
if i.isalpha():
conNoAlfa += 1
if not correct or conNoAlfa <= 0:
print ("Contraseña incorrecta")
else:
print ("Contraseña correcta")
|
[
"davidjimenezgonza@gmail.com"
] |
davidjimenezgonza@gmail.com
|
cc149f8e720cd77bdd3cdb333c2ee5d3a4c830d9
|
75fb9b79ed6db179540176511f35fddb094080dc
|
/template.py
|
e8c435474b941436e852eea5afbf15c24e098579
|
[] |
no_license
|
wty0511/sentiment_analysis
|
47d38555b0b9720e0476c79f33417302ac346e89
|
e68862a40f06f943c1d0a4c07447fa995c3fff81
|
refs/heads/master
| 2023-01-24T13:03:18.036687
| 2020-11-18T16:12:54
| 2020-11-18T16:12:54
| 313,979,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
template = {
'/r/RelatedTo': '和{}相关',
'/r/FormOf': '的形式为{}',
'/r/IsA': '是{}',
'/r/PartOf': '是{}的一部分',
'/r/HasA': '具有{}',
'/r/UsedFor': '用来{}',
'/r/CapableOf': '可以{}',
'/r/AtLocation': '在{}',
'/r/Causes': '导致{}',
'/r/HasSubevent': ',接下来,{}',
'/r/HasFirstSubevent': ',紧接着,{}',
'/r/HasLastSubevent': '的最后一步是{}',
'/r/HasPrerequisite': '的前提为{}',
'/r/HasProperty': '具有{}的属性',
'/r/MotivatedByGoal': '受到{}的驱动',
'/r/ObstructedBy': '受到{}的影响',
'/r/Desires': '想要{}',
'/r/CreatedBy': '被{}创造',
'/r/Synonym': '和{}同义',
'/r/Antonym': '和{}反义',
'/r/DistinctFrom': '和{}相区别',
'/r/DerivedFrom': '由{}导致',
'/r/SymbolOf': '象征着{}',
'/r/DefinedAs': '定义为{}',
'/r/MannerOf': '',
'/r/LocatedNear': '和{}相邻',
'/r/HasContext': '的背景是{}',
'/r/SimilarTo': '和{}相似',
'/r/EtymologicallyRelatedTo': '',
'/r/EtymologicallyDerivedFrom': '',
'/r/CausesDesire': '',
'/r/MadeOf': '由{}制成',
'/r/ReceivesAction': '',
'/r/ExternalURL': ''
}
def strip(str):
return str.split('/')[3]
|
[
"15652198208@163.com"
] |
15652198208@163.com
|
9505c6e284962d18864326a21ccb0101ba2b4d43
|
f9dbc7136e1f067d3064c44cf37d286abdd94495
|
/blogapp/migrations/0018_delete_aboutsite.py
|
a464184d9f705ae271a8f58db9558c1adb3093cb
|
[] |
no_license
|
Aravind2203/Blog
|
0a58d8dfd4e8c557b48fba728cc8dcc92338d199
|
88e70a286f2e745a66e0978926e1eaa043f2b572
|
refs/heads/master
| 2023-04-30T23:00:46.308556
| 2021-05-10T12:47:07
| 2021-05-10T12:47:07
| 361,818,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
# Generated by Django 3.1.1 on 2021-05-02 13:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0017_aboutsite'),
]
operations = [
migrations.DeleteModel(
name='AboutSite',
),
]
|
[
"ndrevanth09@gmail.com"
] |
ndrevanth09@gmail.com
|
7c41de820bc4c321a1797c54ee14abcdc803f8de
|
b187ca51679e28dd533a05f31a9faf891e831dd9
|
/palindrome_partitioning.py
|
d490a91fa7eecaf17415fcdaa0f714b9b9b93800
|
[] |
no_license
|
88sanjay/Backtracking-2
|
85e36240bb3a20fa2a3850e817ae1009d4a61e8d
|
c223eefd3931a6c99be50b8631f22b745a49668b
|
refs/heads/master
| 2020-07-31T17:11:07.220861
| 2019-09-25T13:52:19
| 2019-09-25T13:52:19
| 210,687,158
| 0
| 0
| null | 2019-09-24T19:59:58
| 2019-09-24T19:59:58
| null |
UTF-8
|
Python
| false
| false
| 3,031
|
py
|
class Solution(object):
def partition(self, input_string):
"""
Partition a palindrome into subsets of palindromes
logic is similar to power set
palindrome_partitioning("abacaba") = { "a" , palindrome_partitioning("bacaba") } +
{ "aba" , palindrome_partitioning("caba") } +
{ "abacaba"}
Time complexity : O(c^n)
Space complexity : O(n)
Run on leet code : yes
Edge Cases : s = ""
:param input_string: string to partition
:return: set of all partitions
"""
if input_string == "":
return [[""]]
return self._palindrome_partitioning(input_string[0], input_string) + (
[[input_string]] if self.is_palindrome(input_string) else [])
def is_palindrome(self, s):
"""
Checks if string is palindrome
:param s: string
:return: boolean indicating whether palindrome or not
"""
return s == s[::-1]
def merge_palindrome_to_partition(self, p, partitions):
"""
merges partition p to an existing list of partitions
:param p: start partition
:param partitions: existing partition list
:return: merged partition list
"""
return [[p] + e for e in partitions] if partitions else [[p]]
def next_palindrome(self, old_start_partition, input_string):
"""
Returns next start partition.
if input string is "" it returns ""
:param old_start_partition: previous start partition .
:param input_string: input string
:return: next start partition
"""
if input_string == "":
return None
if old_start_partition == input_string:
return input_string
partition_start = len(old_start_partition) + 1
while not self.is_palindrome(input_string[:partition_start]):
partition_start += 1
if len(input_string) < partition_start:
return None
# this should exit because input string is a palindrome
return input_string[:partition_start]
def _palindrome_partitioning(self, start_partition, input_string):
"""
We exit when start_partition == input string
:param start_partition:
:param input_string:
:return:
"""
result = []
if start_partition == input_string:
return []
while (start_partition != input_string) and (start_partition is not None):
result += self.merge_palindrome_to_partition(
start_partition, self.partition(input_string[len(start_partition):]))
start_partition = self.next_palindrome(start_partition, input_string)
return result
if __name__ == "__main__" :
print(Solution().partition("aaaaa"))
|
[
"sanjay.kumar@shopkick.com"
] |
sanjay.kumar@shopkick.com
|
75d13fa48f0b8fec03e88dfd359466b218141f81
|
6323ef814af90c23897d1f697a0bd4effd33d9cc
|
/VideoGame/GBMRAgent.py
|
2549a9a55008568a218ffd638642d3b792f272d1
|
[] |
no_license
|
kangyongxin/GBMRcode
|
dc6c69144c01a04718506ab186d5623274724b01
|
8fec4e1efda67229482e2012e61eef14c418a3ed
|
refs/heads/master
| 2022-12-02T10:08:36.545723
| 2020-08-23T00:41:43
| 2020-08-23T00:41:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,616
|
py
|
# 一个对maze 和Atari环境都能适应的智能体结构
from functools import reduce
import random
from Memory import Memory
import numpy as np
class Agent():
def __init__(self,num_actions=None,dim_obs=None,memory_size=100,memory_word_size=32,name="TrainableAgent"):
'''
智能体对环境的基本认知,动作空间,状态空间
'''
self.num_actions = num_actions
# reduce (lambda x,y:x+y, [1,2,3]) 输出为 6
self._obs_size = dim_obs
self.memory_size = memory_size
self.memory_word_size=memory_word_size
self.StateAttributesDict={}
self.StateLabelDict= {}
self.ExternalMemory = Memory(self.memory_size)
def TakeRandomAction(self):
action = random.randint(0,self.num_actions-1)
return action
'''
# 编码模块: 目前有三种,
1 maze中是将观测的格子边界,转换为一个状态标记,用Dict 存储,同时把这个格子边界作为特征写到节点上,并打上一个序号标签,后面画图用到
2 ram 的编码是有针对的,比如mspacman 就是把固定的维度拿出来做为位置特征, 这里的state 直接作为节点特征
3 直接对图像进行编解码(未完成)
'''
def obs2state(self,observation):
if observation == 'terminal':
state= str(list([365.0,365.0,395.0,395.0])) #10*10的最后一个格子是多少
# self.StateAttributesDict[state]=list([165.0,165.0,195.0,195.0])
self.StateAttributesDict[state]=list([365.0,365.0,395.0,395.0])
self.StateLabelDict[state]= 99
else:
state=str(observation)
self.StateAttributesDict[state]=observation #为了把值传到后面重构部分进行计算
self.StateLabelDict[state]=int(((observation[1] + 15.0 - 20.0) / 40) *10 + (observation[0] + 15.0 - 20.0) / 40 )
return state
def obs_ram(self,observation):
#仅限于MsPacman
state = np.array([[observation[10],observation[16]]])
state = state.astype('float32')
return state
# # 编解码功能
# def obs2state(self,observation):
# obs = observation.reshape(1,self._obs_size).astype('float32') / 255 #这个应该放到函数里面
# obs_code = self._im2state(obs)
# return obs_code
# def state2obs(self,state):
# reconstructed_obs = self._state2im(state)
# return reconstructed_obs
# def state2value(self,state):
# value_estimate = self._vdecoder(state)
# return value_estimate
|
[
"kangyongxin2018@ia.ac.cn"
] |
kangyongxin2018@ia.ac.cn
|
d8fd013e77df3e0b005e7b56e0c5c2909ae9dce0
|
c2330074689cc1e579fe84f762bc990ef83691f8
|
/wsgi.py
|
988a9bf766a26c4bf1214dea78bd06d20d239638
|
[] |
no_license
|
asafvala/GetMeHome
|
bc1099c3ad4fb539d231f5eb2103dadeca09b7e8
|
dc67c826e5b1799b2194898382ff55a883d24f12
|
refs/heads/master
| 2020-12-31T04:42:43.538582
| 2016-06-01T18:23:24
| 2016-06-01T18:23:24
| 59,219,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
"""
WSGI config for busses project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_wsgi_application()
|
[
"mestrebisli@gmail.com"
] |
mestrebisli@gmail.com
|
0c75b53c22419526419582115e847b0ff2e238e0
|
7669bfdd12852111f320f94c1587a951017c05d7
|
/fileOp-WordOcuurance.py
|
e957e37908f5db4c6d3268fd33ec527ad9033992
|
[] |
no_license
|
pathakamaresh86/python_class_prgms
|
177fb1e6137b972369735b02dce8de5027624f87
|
2d671300677f60cce347031ab79610deb2180fae
|
refs/heads/master
| 2020-04-04T13:12:45.106943
| 2018-12-17T09:54:33
| 2018-12-17T09:54:33
| 155,952,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
#!/usr/bin/python
#WAP to accept file name, word and its occurance count from user print lines from file which hava that word occuring specified number of times
import io
def printFileLines(fname,word,wCnt):
fd=io.FileIO(fname)
wordList=[]
if fd != None:
while True:
wordList=[]
data=fd.readline()
if data == '':
break
wordList=data.rstrip().lower().split(" ")
if wordList.count(word.lower()) == wCnt:
print data,
fd.close()
def main():
fname=input("Enter file name:")
word=input("Enter the word to be searched:")
wCnt=input("Enter the count of word:")
print
printFileLines(fname,word,wCnt)
if __name__=="__main__":
main()
'''
D:\F DATA\python_class>python fileOp-WordOcuurance.py
Enter file name:"file-wordocc.txt"
Enter the word to be searched:"vijay"
Enter the count of word:2
Mayura vijay pathak vijay
Vijay vijay Devidas pathak
'''
|
[
"noreply@github.com"
] |
pathakamaresh86.noreply@github.com
|
d38df1b8cecbfa6ad0a2c22c75552007ce5b74b3
|
19245ce3b322a8d8dd731f6c14811e11ee826ee8
|
/IBRd.py
|
36db6e2875ca4de3eafe7901597a42c7bc572b7c
|
[] |
no_license
|
akitaki6556/ID_By_Ringtone
|
c8d820145b3a505272076e05d267d9a9354ca18e
|
c93a94a76d71119d35e3962102e245542a3152d4
|
refs/heads/master
| 2020-07-06T01:35:06.116335
| 2019-08-17T07:10:39
| 2019-08-17T07:10:39
| 192,741,398
| 0
| 0
| null | 2019-06-19T13:48:22
| 2019-06-19T13:48:21
| null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
#!/usr/local/bin/pyenv/versions/3.6.5/bin/python
import os
import sys
import discord
import settings
from time import sleep
TOKEN = settings.DIS_TOKEN
NOSTALGIA_TEXT_CHANNEL = settings.NOS_TEXT
NOSTALGIA_VOICE_CHANNEL = settings.NOS_VOICE
TNS_VOICE_CHANNEL = settings.TNS_VOICE
client = discord.Client()
connect_flag = True
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
global voice
if message.author.bot:
return
if message.content == "&join":
if client.user != message.author:
if client.voice_client_in(message.server) is not None:
voice = await client.join_voice_channel(client.get_channel(VOICE_CHANNEL))
else:
await client.send_message(message.channel, "already connected.")
if voice.is_connected():
await client.send_message(message.channel, "success!")
if message.content == "&exit":
if client.user != message.author:
if voice.is_connected():
await voice.disconnect()
else:
m = "Not connected!!"
await client.send_message(message.channel, m)
@client.event
async def on_voice_state_update(before,after):
global connect_flag
global vc
if before.bot:
return
if before.server.id == "280222178497003521":
VOICE_CHANNEL = NOSTALGIA_VOICE_CHANNEL
else:
VOICE_CHANNEL = TNS_VOICE_CHANNEL
if ((before.voice.self_mute is not after.voice.self_mute) or (before.voice.self_deaf is not after.voice.self_deaf)):
print("exist changing mute setting")
return
if (before.voice_channel is not after.voice_channel) and (after.voice_channel is client.get_channel(VOICE_CHANNEL)):
if connect_flag:
await client.join_voice_channel(client.get_channel(VOICE_CHANNEL))
connect_flag = False
# m = before.name + "が" + after.voice_channel.name + "へ入室しました。"
# await client.send_message(client.get_channel(TEXT_CHANNEL), m)
file_name = before.name + ".mp3"
path = "/home/sshuser/discord_bot/ID_By_Ringtone/" + file_name
vc = client.voice_client_in(before.server)
player = vc.create_ffmpeg_player(path)
player.volume = 0.2
sleep(1)
player.start()
if (before.voice_channel is not after.voice_channel) and (after.voice_channel is None):
mem = client.get_channel(VOICE_CHANNEL).voice_members
len_mem = len(mem)
if (len_mem == 1) and (mem.pop().bot):
await vc.disconnect()
connect_flag = True
client.run(TOKEN)
|
[
"kyuta3579@yahoo.co.jp"
] |
kyuta3579@yahoo.co.jp
|
76f67f825101294e7de8a25e425d1a839a66dc39
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/last_child/give_little_time/first_eye/same_company.py
|
a19d53b348ad122a9629e5741cefab5e63b5db8d
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#! /usr/bin/env python
def call_small_day_from_life(str_arg):
work(str_arg)
print('few_year')
def work(str_arg):
print(str_arg)
if __name__ == '__main__':
call_small_day_from_life('man')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
04cc4c3737f0e1ee3ef280cd8f7501ddae000731
|
480604046fccd4310ab84db4cc768dfa45fd36d3
|
/plant_analysis.py
|
9a4b20c491a0e9eb4a6f3bea43825ab9bcc0cd90
|
[] |
no_license
|
jeffschwane/ProjectPlant
|
ccb4b5f1464b69460c53950d6c36e45b210d1ee1
|
4d245e5f5f7aafe72ed92dbdb420ca8dc62a901e
|
refs/heads/main
| 2023-03-23T12:43:17.199193
| 2021-03-10T20:52:29
| 2021-03-10T20:52:29
| 327,977,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,095
|
py
|
import sqlalchemy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import calendar
import datetime as dt
import sys
import os
from IPython.display import display
from scipy.signal import argrelextrema, lfilter
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, TimeSeriesSplit, learning_curve
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from plotting import plot_learning_curve
def select_plant(plant_id, plant_table, readings_table):
"""Returns the plant name and sensor readings for the particular plant based on the plant id number provided and DataFrames"""
plant_name = plant_table[plant_table.index == plant_id].name_common.item()
plant_readings = readings_table[readings_table.plant_id == plant_id]
return plant_name, plant_readings
def plot_time_series(x, y, title, ylabel, color, figure='None'):
"""Creates time series plots given x and y series data, title and data label for y axis"""
if figure == 'None':
fig = plt.figure()
else:
fig = figure # plot on the same figure
plt.plot(x, y, label=ylabel, c=color)
plt.xlabel('Date')
plt.ylabel(ylabel)
plt.title(title)
return fig
def plot_day(x, y, row, col, axis, title, ylabel):
"""Creates time series plots given x and y series data, title and data label for y axis"""
axis[row, col].bar(x, y, color='orange')
axis[row, col].set_xlabel('Hour in a Day')
axis[row, col].set_ylabel(ylabel)
axis[row, col].set_title(title)
axis[row, col].set_xticks(np.arange(0, 25, 3))
axis[row, col].set_ylim(0, 1000)
return axis
def return_score(y_test, y_pred, y_dumb_test, metric):
"""Returns the regression scores for the regression and dummy model for the metric type used: MSE, MAE, or r2"""
if metric == 'MSE':
reg_score = mean_squared_error(y_test, y_pred)
dummy_score = mean_squared_error(y_test, y_dumb_test)
elif metric == 'MAE':
reg_score = mean_absolute_error(y_test, y_pred)
dummy_score = mean_absolute_error(y_test, y_dumb_test)
elif metric == 'r2':
reg_score = r2_score(y_test, y_pred)
dummy_score = r2_score(y_test, y_dumb_test)
return reg_score, dummy_score
# Connect to local SQL database
sql_pass = os.environ['sql_password']
engine = sqlalchemy.create_engine(
f"mysql+pymysql://root:{sql_pass}@localhost/plant_data")
connection = engine.connect()
# Load SQL data into pandas DataFrames
plant_table = pd.read_sql('plants', connection, index_col='id', columns=[
'name_common', 'name_latin', 'soil_moist_min', 'soil_moist_max', 'light_min', 'light_max'])
readings_table = pd.read_sql('readings', connection, columns=[
'plant_id', 'datetime', 'light', 'soil_moist', 'soil_fert', 'temp'])
# Create new column "month" for monthly analysis later on
readings_table['month'] = readings_table['datetime'].dt.month
# Apply smoothing function to soil_moist data
n = 15 # the larger n is, the smoother curve will be
b = [1.0 / n] * n
a = 1
readings_table.soil_moist = lfilter(b, a, readings_table.soil_moist)
if __name__ == '__main__': # Returns false during an import which prevents the following code from getting executed during testing
# Ask for user input about graphs
sns.set_theme() # Apply default seaborn theme
entry = 'null'
num = 'null'
while entry not in ['a', 'l', 'm', 'g', 'w']:
entry = input(
'Graphs: Enter \n(a) for all light data for each plant\n(l) for average light data each month\n(m) for soil moisture and watering\n(g) for monthly global solar radiation for NYC and move comparison\n: ')
if entry == 'a': # Graph of light data in its entirety for each plant
while num not in ['m', 'r']:
num = input(
'\nPlot data showing:\n(m) for before/after move \n(r) light requirement \n: ')
for plant_id in readings_table.plant_id.unique():
plant_name, plant_readings = select_plant(
plant_id, plant_table, readings_table)
x = plant_readings.datetime
y = plant_readings.light
title = f'Full Light Data for {plant_name} Plant'
plot_time_series(x, y,
title, ylabel='Light (mmol)', color='orange')
if num == 'm':
bottom, top = plt.ylim()
plt.vlines(dt.date(2020, 12, 1), bottom, top)
elif num == 'r':
# Calculate % of prime daylight hours (8am-5pm) that plant is getting light above min thresholds
mask = (plant_readings['datetime'].dt.hour >= 8) & (
plant_readings['datetime'].dt.hour <= 17) # Grab values between 8am and 5pm
plant_light_min = plant_table.loc[plant_id, 'light_min']
light_above_threshold_pct = plant_readings.groupby(mask)['light'].apply(
lambda c: (c > plant_light_min).sum() / len(c)
)[True]
xmin = plant_readings.datetime.iloc[0]
xmax = plant_readings.datetime.iloc[-1]
xtext = plant_readings.datetime.iloc[int(
.65*len(plant_readings.datetime))]
plt.hlines(
plant_light_min, xmin, xmax, label='Average watering threshold')
bottom, top = plt.ylim()
plt.text(
xtext, plant_light_min - .1*plant_light_min, f'Min. light threshold')
plt.text(
xmin, .7*top, f'Percentage of prime daylight hours (8am-5pm) \nthat plant receives light above min threshold: {round(light_above_threshold_pct*100)}%')
# Graph of light data over course of an average day each month for each plant
elif entry == 'l':
while num != 'q':
num = input(
'Enter plant number (1-4) to graph avg light for each month or (q) to quit and show plots: ')
try:
plant_name, plant_readings = select_plant(
int(num), plant_table, readings_table)
except ValueError:
break
fig, axs = plt.subplots(2, 6)
fig.suptitle(
f'Light Data for Average Day in Each Month for {plant_name} Plant')
for month in np.arange(1, 13):
# select month of data
plant_data_month = plant_readings.loc[plant_readings.month == month]
# plant_data_month = plant_data_month.reset_index()
readings_avg_day = plant_data_month.groupby(
plant_data_month['datetime'].dt.hour).mean() # group data by hour in day and average for each hour
x = readings_avg_day.index
y = readings_avg_day.light
axs = plot_day(x, y,
title=f'{calendar.month_abbr[month]}', ylabel='Light (mmol)', axis=axs, row=(month - 1) // 6, col=(month - 1) % 6)
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
elif entry == 'g': # Incorporate global solar radiation for NYC to normalize data based on month
# Import data from National Solar Radiation Database API
# https://nsrdb.nrel.gov/data-sets/api-instructions.html
lat, lon = 40.6872854, -73.9757991
api_key = os.environ['nsrdb_api_key']
attributes = 'ghi'
year = '2019'
leap_year = 'false'
# Set time interval in minutes, i.e., '30' is half hour intervals. Valid intervals are 30 & 60.
interval = '60'
# Specify Coordinated Universal Time (UTC), 'true' will use UTC, 'false' will use the local time zone of the data.
# local time zone.
utc = 'false'
your_name = 'Jeff+Schwane'
reason_for_use = 'personal+project'
your_affiliation = 'N/A'
your_email = 'jschwane@gmail.com'
mailing_list = 'false'
# Declare url string
url = f'https://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap_year}&interval={interval}&utc={utc}&full_name={your_name}&email={your_email}&affiliation={your_affiliation}&mailing_list={mailing_list}&reason={reason_for_use}&api_key={api_key}&attributes={attributes}'
# Return just the first 2 lines to get metadata:
info = pd.read_csv(url, nrows=1)
# Return all but first 2 lines of csv to get data:
df = pd.read_csv(
f'https://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap_year}&interval={interval}&utc={utc}&full_name={your_name}&email={your_email}&affiliation={your_affiliation}&mailing_list={mailing_list}&reason={reason_for_use}&api_key={api_key}&attributes={attributes}', skiprows=2)
# Set the time index in the pandas dataframe:
df = df.set_index(pd.date_range(
f'1/1/{year}', freq=interval+'Min', periods=525600/int(interval)))
# plot GHI over average month and sum GHI per month
ghi_month_sum = {}
fig, axs = plt.subplots(2, 6)
fig.suptitle(
'Global Horizontal Irradiance for Average Day in Each Month in NYC')
for month in np.arange(1, 13):
month_data = df.loc[df.Month == month]
readings_avg_day = month_data.groupby(
month_data.Hour).mean() # group data by hour in day and average for each hour
x = readings_avg_day.index
y = readings_avg_day.GHI
axs = plot_day(x, y,
title=f'{calendar.month_abbr[month]}', ylabel='Light ($W/m^2$)', axis=axs, row=(month-1)//6, col=(month-1) % 6)
# Sum GHI for each month
ghi_month_sum[month] = month_data.GHI.sum()
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
# Comparision of light levels before/after move on 12/1/20
while num != 'q':
num = input(
'Enter plant number (1-4) to graph soil moisture or (q) to quit and show plots: ')
try:
plant_name, plant_readings = select_plant(
int(num), plant_table, readings_table)
except ValueError:
break
plant_month_mean = {}
for month in np.arange(1, 13):
if month in plant_readings.month.values:
# Grab only data for the month
plant_data_month = plant_readings.loc[plant_readings.month == month]
plant_month_mean[month] = plant_data_month.light.mean()
df_ghi_month = pd.DataFrame.from_dict(
ghi_month_sum, orient='index')
df_plant_month = pd.DataFrame.from_dict(
plant_month_mean, orient='index')
# Normalize light levels based on GHI for the month
df_plant_month_norm = df_plant_month / df_ghi_month
# Percentage difference before/after 12/1/20
before = df_plant_month_norm[7:12].mean()
after = (df_plant_month_norm.loc[12] +
df_plant_month_norm.loc[1]) / 2
pct_diff = round(((after-before)/before*100)[0])
# Graph of normalized light levels and comparison between before/after 12/1/20
months = [calendar.month_abbr[i] for i in range(1, 13)]
df_plant_month_norm.index = months
x = df_plant_month_norm.index
y = df_plant_month_norm[0]
fig = plt.figure()
plt.bar(x, y, color='orange')
plt.xlabel('Month')
plt.ylabel('Normalized Relative Light Levels')
plt.yticks([])
plt.title(
f'Comparison of Light Levels Before & After Move on 12/1/20 for {plant_name}')
bottom, top = plt.ylim()
plt.text(
4, .9*top, f'Difference before/after 12/1/20: {pct_diff}%')
# Train supervised ML model based on target variable
elif entry == 'm': # Graph of soil moisture data in its entirety for each plant
# Find local peaks
ilocs_min = argrelextrema(
readings_table.soil_moist.values, np.less_equal, order=125)[0] # Searches range of 125 hours (5+ days) on both sides for minimum
ilocs_max = argrelextrema(
readings_table.soil_moist.values, np.greater_equal, order=125)[0] # Searches range of 125 hours (5+ days) on both sides for maximum
# Add soil moisture local min and max to table
readings_table['local_max_moist'] = False
readings_table['local_min_moist'] = False
# Assign values to true where they are equal to local maxes
readings_table.loc[readings_table.iloc[ilocs_max].index,
'local_max_moist'] = True
# Assign values to true where they are equal to local mins
readings_table.loc[readings_table.iloc[ilocs_min].index,
'local_min_moist'] = True
# Learning algorithm that determines based on current soil moist, sunlight plant has been receiving and avg. temperature when it is next expected to need water
while num != 'q':
num = input(
'Enter plant number (1-4) to graph soil moisture or (q) to quit and show plots: ')
try:
plant_name, plant_readings = select_plant(
int(num), plant_table, readings_table)
except ValueError:
break
pd.set_option('mode.chained_assignment', None)
# Clean mins of soil moisture readings by removing all but the first max and min when there are duplicate maxs and mins
mask = plant_readings.local_min_moist
# FIXME - Marks duplicate based on ever seeing it again, but this actually needs to be done locally in case there are two actual mins of the same value in the future
duplicates = plant_readings.loc[mask].duplicated(
subset=['soil_moist'], keep='first') # Store duplicates in boolean series
mask2 = mask & duplicates
# Add column to dataframe
plant_readings.loc[:,
'local_min_moist_dropped'] = plant_readings.loc[mask2, 'local_min_moist']
# Fill in 'None' readings with False
plant_readings.fillna(
{'local_min_moist_dropped': False}, inplace=True)
plant_readings.loc[plant_readings['local_min_moist_dropped'],
'local_min_moist'] = False # Set duplicate minimums to False
plant_readings.drop('local_min_moist_dropped',
axis=1, inplace=True) # drop uneeded column
# Determine soil moisture value when each plant is watered
# Mean of the soil moist reading everytime it is detected that that plant was watered (mean of soil_moist_min)
# avg_time_between_watering = plant_readings.loc[plant_readings.local_min_moist, 'soil_moist'].mean(
# )
# print(
# f'The average time between watering for {plant_name} is {avg_time_between_watering}')
# Use values stored in SQL table to determine when each plant should be watered
watering_value = plant_table.loc[int(num), 'soil_moist_min']
# Create "days until next watering" target variable by backfilling
# Dates plant should be watered is anytime soil moisture value is less than or equal to watering value
watering_dates = plant_readings.loc[plant_readings.soil_moist <=
watering_value, 'datetime']
plant_readings.loc[:, 'days_until_watering'] = None
plant_readings.loc[:, 'days_between_waterings'] = None
# Set "days until watering" to zero for index positions in watering_dates
plant_readings.loc[watering_dates.index, 'days_until_watering'] = 0
# Backfill from zeros
# Set first 'days until watering' value to zero
plant_readings.at[plant_readings.first_valid_index(),
'days_until_watering'] = 0
# Update values in 'days_until_watering'
counter = 'zero'
for index_label, row_series in plant_readings.iterrows():
if row_series[9] != 0: # 9th position is 'days_until_watering'
counter = 'nonzero'
if row_series[9] == 0 and counter == 'nonzero':
counter = 'zero'
backsteps = 1
# Start backfilling either until the next zero is reached, or once it has hit a max. Otherwise model will not learn correctly the plant may have been watered before the soil moisture value hit the watering_value
i = index_label - backsteps
while plant_readings.loc[i, 'days_until_watering'] != 0 and plant_readings.loc[i, 'local_max_moist'] == False:
plant_readings.at[i,
'days_until_watering'] = backsteps / 24
if plant_readings.loc[i - 1, 'local_max_moist'] == True:
# caluclate days between watering for each watering
plant_readings.at[i, 'days_between_waterings'] = plant_readings.loc[i,
'days_until_watering']
backsteps += 1
i = index_label - backsteps
# Plot soil moisture and days until watering
x = plant_readings.datetime
y = plant_readings.soil_moist
title = f'Soil Moisture Data for {plant_name} Plant'
fig_1 = plot_time_series(
x, y, title, 'Soil Moisture (%)', color='blue')
y = plant_readings.days_until_watering
plt.plot(x, y, figure=fig_1,
label='Days until watering', color='orange')
y = plant_readings[plant_readings['local_max_moist']].soil_moist
max_idx = y.index
x = plant_readings.datetime[max_idx]
plt.scatter(x, y, linewidths=1, c='red',
marker="v", figure=fig_1)
y = plant_readings[plant_readings['local_min_moist']].soil_moist
min_idx = y.index
x = plant_readings.datetime[min_idx]
plt.scatter(x, y, linewidths=1, c='green',
marker="^", figure=fig_1)
xmin = plant_readings.datetime.iloc[0]
xmax = plant_readings.datetime.iloc[-1]
plt.hlines(
watering_value, xmin, xmax, label='Average watering threshold')
plt.text(
xmax, watering_value, f'{round(watering_value)}%')
plt.legend()
# # Plot each sensor vs. days until watering to notice trends
# # Plot light vs. days until watering
# fig_2 = plt.figure()
# x = plant_readings.days_until_watering
# y = plant_readings.light
# plt.plot(x, y)
# plt.xlabel('Days Until Watering')
# plt.ylabel('Light (mmol)')
# plt.title(f'Light vs. Days Until Watering for {plant_name} Plant')
# plt.xlim(max(filter(None.__ne__, x)), 0) # reverse x-axis
# # Plot soil moisture vs. days until watering
# fig_3 = plt.figure()
# x = plant_readings.days_until_watering
# y = plant_readings.soil_moist
# plt.plot(x, y)
# plt.xlabel('Days Until Watering')
# plt.ylabel('Soil Moisture (%)')
# plt.title(
# f'Soil Moisture vs. Days Until Watering for {plant_name} Plant')
# plt.xlim(max(filter(None.__ne__, x)), 0) # reverse x-axis
# # Plot temperature vs. days until watering
# fig_4 = plt.figure()
# x = plant_readings.days_until_watering
# y = plant_readings.temp
# plt.plot(x, y)
# plt.xlabel('Days Until Watering')
# plt.ylabel('Temperature (deg C)')
# plt.title(
# f'Temperature vs. Days Until Watering for {plant_name} Plant')
# # Plot soil fertility vs. days until watering
# fig_5 = plt.figure()
# x = plant_readings.days_until_watering
# y = plant_readings.soil_fert
# plt.plot(x, y)
# plt.xlabel('Days Until Watering')
# plt.ylabel('Soil Fertility (μS/cm)')
# plt.title(
# f'Soil Fertility vs. Days Until Watering for {plant_name} Plant')
# plt.xlim(max(filter(None.__ne__, x)), 0) # reverse x-axis
# Create column for rolling sum of light to analyze if that affects days until watering
plant_readings['light_roll'] = plant_readings.light.rolling(
96).sum()
# fig_6 = plt.figure()
# x = plant_readings.days_until_watering
# y = plant_readings.light_roll
# plt.plot(x, y)
# plt.xlabel('Days Until Watering')
# plt.ylabel('Light Received Over Past 6 Days')
# plt.title(
# f'Past Light Received vs. Days Until Watering for {plant_name} Plant')
# plt.xlim(max(filter(None.__ne__, x)), 0) # reverse x-axis
# Create column for rolling sum of temperature to analyze if that affects days until watering
plant_readings['temp_roll'] = plant_readings.temp.rolling(
96).mean()
# fig_7 = plt.figure()
# x = plant_readings.days_until_watering
# y = plant_readings.temp_roll
# plt.plot(x, y)
# plt.xlabel('Days Until Watering')
# plt.ylabel('Average Temperature Over Past 6 Days')
# plt.title(
# f'Past Temperature vs. Days Until Watering for {plant_name} Plant')
# plt.xlim(max(filter(None.__ne__, x)), 0) # reverse x-axis
# Create Dummy Model which uses the average time between waterings to predict days until watering
plant_readings.loc[:, 'dummy_days_until_watering'] = None
avg_days_between_watering = plant_readings.days_between_waterings.mean()
counter = 'false'
for index_label, row_series in plant_readings.iterrows():
if row_series[7] == True: # 7th position true indicates plant was watered
counter = 'true'
plant_readings.at[index_label,
'dummy_days_until_watering'] = 0
if row_series[7] == False and counter == 'true':
counter = 'false'
forwardsteps = 0
i = index_label + forwardsteps
plant_readings.at[i-1,
'dummy_days_until_watering'] = avg_days_between_watering
try:
while plant_readings.loc[i, 'local_max_moist'] == False:
if plant_readings.loc[i-1, 'dummy_days_until_watering'] <= 0:
plant_readings.at[i-1,
'dummy_days_until_watering'] = 0
plant_readings.at[i,
'dummy_days_until_watering'] = 0
elif plant_readings.loc[i-1, 'dummy_days_until_watering'] == 0:
plant_readings.at[i,
'dummy_days_until_watering'] = 0
else:
plant_readings.at[i, 'dummy_days_until_watering'] = avg_days_between_watering - (
forwardsteps/24) # Fill forward with predictions
forwardsteps += 1
i = index_label + forwardsteps
except KeyError:
break
# Fit linear Regression Model
# Drop rows where days until watering coudn't be calculated due to soil mosisture never reaching watering threshold before getting watered
df = plant_readings.dropna(
subset=['days_until_watering', 'light_roll', 'temp_roll'])
df.set_index('datetime', inplace=True)
X = df.loc[:, ['light', 'soil_moist',
'temp', 'soil_fert']] # 4 features
# X = df.loc[:, ['soil_moist', 'soil_fert']] # 2 best features
# # uses rolling sums/avgs for light and temp
# X = df.loc[:, ['light_roll',
# 'soil_moist', 'temp_roll', 'soil_fert']]
y = df.loc[:, 'days_until_watering']
y_dumb = df.loc[:, 'dummy_days_until_watering']
# Arrow of time - everything in test set must occur after training - no shuffling!
X_train, X_test, y_train, y_test, y_dumb_train, y_dumb_test = train_test_split(
X, y, y_dumb, test_size=0.35, shuffle=False)
# Scale the training data with fit transform, and the testing data with transform only, so that each parameter counts equally toward learning
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
reg = LinearRegression()
# reg = Lasso()
# reg = Ridge()
reg.fit(X_train_scaled, y_train)
y_pred = reg.predict(X_test_scaled)
metric = 'r2' # Input MSE, MAE, or r2
reg_score, dummy_score = return_score(
y_test, y_pred, y_dumb_test, metric=metric)
print(
f"The {metric} for {plant_name} for the average days between watering ({round(avg_days_between_watering)} days) is: {round(dummy_score, 2)}")
print(
f"The {metric} for {plant_name} for the regression model is: {round(reg_score, 2)}\n")
# Plot predicted vs. acutal on the same plot
x = y_test.index
title = f'Predicted vs. Actual Days Until Watering for {plant_name} Plant'
fig_1 = plot_time_series(
x, y_pred, title, 'Predicted days Until Watering', color='red')
plt.scatter(x, y_test, figure=fig_1,
label='Actual days until watering', color='black', s=1)
plt.legend()
# Plot learning curve
tscv = TimeSeriesSplit(n_splits=10)
plot_learning_curve(reg, X, y, cv=tscv,
train_sizes=np.linspace(0.1, 1.0, 10), scoring='neg_mean_squared_error')
# TODO - Forward-looking sunlight prediction - Connect to weather prediction API (sunny/cloudy) for light predictions? Correlate to light detected in training data
plt.show()
|
[
"jschwane@gmail.com"
] |
jschwane@gmail.com
|
fced4e0818140de43fec1105cf81e34675f42c09
|
4c054c36e80cca5f79e0c4a9665faf3b838b4ce9
|
/postprocessing/archive/UWater Supply PlanningResEvalMdlgGWMNFSEGV1.15_modelPEST_GIS_Postproccase007h_PO_noRF_finalpest_postprocbin/e2__NFSEG_Zonebudget_Figures.py
|
7b37dcb97d8dc5763b2e84a062f6c792b8fac820
|
[] |
no_license
|
sjrwmd-wmm/nfseg_processing_utilities
|
534ac5070488d566388ce89a8af996e4542b14f1
|
6ede5f493f72df51b5a6d45f4179100e5beef14e
|
refs/heads/master
| 2023-03-20T22:30:19.154544
| 2021-03-11T21:51:09
| 2021-03-11T21:51:09
| 341,916,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,330
|
py
|
import arcpy
import sys
import os
import time
arcpy.env.overwriteOutput = True
start = time.clock()
import numpy as np
import fnmatch
start = time.clock()
# routines to flip arrows (trd 20171201)
def flipUP(elename):
for gre in greList:
if gre.name == (ele.name+"_arrow"):
XPOS = gre.elementPositionX
YPOS = gre.elementPositionY
gre.delete()
NEWARROW = UPARROW.clone("_clone")
NEWARROW.elementPositionX=XPOS
NEWARROW.elementPositionY=YPOS
NEWARROW.name=gre.name
def flipDOWN(elename):
for gre in greList:
if gre.name == (ele.name+"_arrow"):
XPOS = gre.elementPositionX
YPOS = gre.elementPositionY
gre.delete()
NEWARROW = DOWNARROW.clone("_clone")
NEWARROW.elementPositionX=XPOS
NEWARROW.elementPositionY=YPOS
NEWARROW.name=gre.name
def flipLEFT(elename):
for gre in greList:
if gre.name == (ele.name+"_arrow"):
XPOS = gre.elementPositionX
YPOS = gre.elementPositionY
gre.delete()
NEWARROW = LEFTARROW.clone("_clone")
NEWARROW.elementPositionX=XPOS
NEWARROW.elementPositionY=YPOS
NEWARROW.name=gre.name
def flipRIGHT(elename):
for gre in greList:
if gre.name == (ele.name+"_arrow"):
XPOS = gre.elementPositionX
YPOS = gre.elementPositionY
gre.delete()
NEWARROW = RIGHTARROW.clone("_clone")
NEWARROW.elementPositionX=XPOS
NEWARROW.elementPositionY=YPOS
NEWARROW.name=gre.name
###input derived from current location:
argfnl=sys.argv[1]
### check for more arguments in sysargv. spaces in folder names!
arg_ct=-1
for arg in sys.argv:
#print(arg)
arg_ct=arg_ct+1
if arg_ct>1:
argfnl=argfnl+" "+arg
cpath_py='/'.join(argfnl.split('\\'))
up1=os.path.abspath(os.path.join(cpath_py, os.pardir))
cpath_py_upper='/'.join(up1.split('\\'))
up2=os.path.abspath(os.path.join(up1, os.pardir))
cpath_py_base='/'.join(up2.split('\\'))
##alternative for manual (debugging)
#cpath_py="T:/NFSEGv1_1/Workspace_PEST_case006e_UPD/pest_postproc"
#cpath_py_upper="T:/NFSEGv1_1/Workspace_PEST_case006e_UPD"
#cpath_py_base="T:/NFSEGv1_1"
print("current directory: "+str(cpath_py))
print("parent directory:" + str(cpath_py_upper))
print("grandparent directory:" + str(cpath_py_base))
#find optimal parameters output file (*.pst.txt) - the rest of the files get named with the pst file name in it
num_simnams=0
for file in os.listdir(cpath_py_upper):
if str(file[-4:])=='.pst':
simnam=file[:-4]
num_simnams=num_simnams+1
if num_simnams==0:
print("looked for a *.pst file - but found none - stopping")
exit()
elif num_simnams>1:
print("multiple *.pst files found in this folder - stopping")
exit()
else:
print("sim name:"+str(simnam))
gdb=cpath_py+"/"+simnam+"_ZB.gdb"
if arcpy.Exists(gdb):
print("geodatabase for this sim exists - continuing ")
#arcpy.Delete_management(gdb)#temp action for debugging
#arcpy.CreateFileGDB_management(dir_sim_proc,simnam,"CURRENT")
#exit()
else:
#arcpy.CreateFileGDB_management(cpath_py,simnam+"_ZB","9.3")
print("geodatabase for this sim does not exist - stopping - run s9 cbb_fc first at least")
###check for a sub directory called /ZB, create it if necessary
dir_ZB=str(cpath_py)+"/ZB"
if os.path.exists(dir_ZB) == False:
os.makedirs(dir_ZB)
else:
print("subdirectory / ZB directory already exists - existing files will be overwritten without any further warning")
print("root directory for ZB output: "+str(dir_ZB))
###basemap input (static)
base_gdb=cpath_py+"/templates/PEST_Baselayers.gdb"
MassBal_poly=base_gdb+"/nfseg_zonebudget_polygons"
###Setup graphics_fieldnames (onetime)
laylist=["L01_","L02_","L03_","L04_","L05_","L06_","L07_"]
graphics_fieldnames=["ZB_NAME","NUMCELLS"] # positions 0 to 1
for layer_prefix in laylist:
graphics_fieldnames.append(str(layer_prefix)+'FLOW_LATERAL_NET') # LATERAL FLOWS; positions 2 to 8
for layer_prefix in laylist:
graphics_fieldnames.append(str(layer_prefix)+'FLOW_LOWER_FACE') # DOWNWARD FLOW; positions 9 to 15
for layer_prefix in laylist:
graphics_fieldnames.append(str(layer_prefix)+'RECHARGE') # RECHARGES; positions 16 to 22
for layer_prefix in laylist:
graphics_fieldnames.append(str(layer_prefix)+'ET') # ET; positions 23 to 29
for layer_prefix in laylist:
graphics_fieldnames.append(str(layer_prefix)+'WELLS') # WELLS; positions 30 to 36
graphics_fieldnames.append('L01_DRAINS') # L1 DRAINS; position 37
graphics_fieldnames.append('L01_RIVER_LEAKAGE') # L1 RIVS(springs); position 38
graphics_fieldnames.append('L02_RIVER_LEAKAGE') # L2 RIVS (springs); position 39
graphics_fieldnames.append('L03_RIVER_LEAKAGE') # L3 RIVS (springs); position 40
for layer_prefix in laylist:
graphics_fieldnames.append(str(layer_prefix)+'HEAD_DEP_BOUNDS') # GHBs; positions 41 to 47
graphics_fieldnames.append('L01_CONSTANT_HEAD') # L1 CH (springs); position 48
#print graphics_fieldnames
for layer_prefix in laylist:
graphics_fieldnames.append(str(layer_prefix)+'MNW2') # MNW2 wells; positions 49 to 55
YEARS=['2001','2009']
for yearval2 in YEARS:
print yearval2
cbb_poly_fc=gdb+'/'+simnam+'_'+str(yearval2)+'_cbb_poly'
if arcpy.Exists(cbb_poly_fc) is False:
print "no simulation cbb polygons files found in:"+gdb
print "Steps C1,C2 must be run - exiting"
exit()
with arcpy.da.SearchCursor(cbb_poly_fc,graphics_fieldnames) as SRC2cursor:
for row3 in SRC2cursor:
#for each figure, we need to re-read the template mxd to make sure the sign changes don't get mismatched...
template_mxd = arcpy.mapping.MapDocument(cpath_py+"/templates/nfseg_zonebudget.mxd")
#template_mxd = arcpy.mapping.MapDocument("CURRENT") # dfor debugging in arcmpa
lyrList = arcpy.mapping.ListLayers(template_mxd)
eleList = arcpy.mapping.ListLayoutElements(template_mxd, "TEXT_ELEMENT")
greList = arcpy.mapping.ListLayoutElements(template_mxd,"GRAPHIC_ELEMENT")
###update the figures
for lyr in lyrList:
if lyr.name=='nfseg_zonebudget_polygons':
arcpy.mapping.Layer.replaceDataSource(lyr,gdb,"FILEGDB_WORKSPACE",simnam+'_'+str(yearval2)+'_cbb_poly')
# assign variables to the flip arrows set outside of the page
for gre in greList:
if gre.name == "UPARROW":
UPARROW = gre
if gre.name == "DOWNARROW":
DOWNARROW = gre
if gre.name == "LEFTARROW":
LEFTARROW = gre
if gre.name == "RIGHTARROW":
RIGHTARROW = gre
ZB_NAME=row3[0]
print(ZB_NAME)
CELLAREA_SQFT=row3[1]*2500*2500
L1_Q_LAT__inyr=row3[2]/CELLAREA_SQFT*12*365*-1
L2_Q_LAT__inyr=row3[3]/CELLAREA_SQFT*12*365*-1
L3_Q_LAT__inyr=row3[4]/CELLAREA_SQFT*12*365*-1
L4_Q_LAT__inyr=row3[5]/CELLAREA_SQFT*12*365*-1
#L5_Q_LAT__inyr=row3[6]/CELLAREA_SQFT*12*365*-1
#L6_Q_LAT__inyr=row3[7]/CELLAREA_SQFT*12*365*-1
#L7_Q_LAT__inyr=row3[8]/CELLAREA_SQFT*12*365*-1
L567_Q_LAT__inyr=(row3[6]+row3[7]+row3[8])/CELLAREA_SQFT*12*365*-1
L1_Q_LOWER__inyr=row3[9]/CELLAREA_SQFT*12*365
L2_Q_LOWER__inyr=row3[10]/CELLAREA_SQFT*12*365
L3_Q_LOWER__inyr=row3[11]/CELLAREA_SQFT*12*365
L4_Q_LOWER__inyr=row3[12]/CELLAREA_SQFT*12*365
RCH_ALL__inyr=(row3[16]+row3[17]+row3[18]+row3[19]+row3[20]+row3[21]+row3[22])/CELLAREA_SQFT*12*365
ET_ALL__inyr=(row3[23]+row3[24]+row3[25]+row3[26]+row3[27]+row3[28]+row3[29])/CELLAREA_SQFT*12*365*-1
L1_Q_WEL__inyr=(row3[30]+row3[49])/CELLAREA_SQFT*12*365*-1 #adds regular WEL and MNW2 wells
L2_Q_WEL__inyr=(row3[31]+row3[50])/CELLAREA_SQFT*12*365*-1 #adds regular WEL and MNW2 wells
L3_Q_WEL__inyr=(row3[32]+row3[51])/CELLAREA_SQFT*12*365*-1 #adds regular WEL and MNW2 wells
L4_Q_WEL__inyr=(row3[33]+row3[52])/CELLAREA_SQFT*12*365*-1 #adds regular WEL and MNW2 wells
#L5_Q_WEL__inyr=(row3[34]+row3[53])/CELLAREA_SQFT*12*365*-1 #adds regular WEL and MNW2 wells
#L6_Q_WEL__inyr=(row3[35]+row3[54])/CELLAREA_SQFT*12*365*-1 #adds regular WEL and MNW2 wells
#L7_Q_WEL__inyr=(row3[36]+row3[55])/CELLAREA_SQFT*12*365*-1 #adds regular WEL and MNW2 wells
L567_Q_WEL__inyr=(row3[34]+row3[53]+row3[35]+row3[54]+row3[36]+row3[55])/CELLAREA_SQFT*12*365*-1
L1_Q_DRN__inyr=row3[37]/CELLAREA_SQFT*12*365*-1
L1_Q_RIV__inyr=row3[38]/CELLAREA_SQFT*12*365*-1
L2_Q_RIV__inyr=row3[39]/CELLAREA_SQFT*12*365*-1
L3_Q_RIV__inyr=row3[40]/CELLAREA_SQFT*12*365*-1
L1_Q_GHB__inyr=row3[41]/CELLAREA_SQFT*12*365*-1
L2_Q_GHB__inyr=row3[42]/CELLAREA_SQFT*12*365*-1
L3_Q_GHB__inyr=row3[43]/CELLAREA_SQFT*12*365*-1
L4_Q_GHB__inyr=row3[44]/CELLAREA_SQFT*12*365*-1
#L5_Q_GHB__inyr=row3[45]/CELLAREA_SQFT*12*365*-1
#L6_Q_GHB__inyr=row3[46]/CELLAREA_SQFT*12*365*-1
#L7_Q_GHB__inyr=row3[47]/CELLAREA_SQFT*12*365*-1
L567_Q_GHB__inyr=(row3[45]+row3[46]+row3[47])/CELLAREA_SQFT*12*365*-1
L1_Q_CH__inyr=row3[48]/CELLAREA_SQFT*12*365*-1
for ele in eleList:
#Qlat, default points to the right
if ele.name == "L1 Q_LAT":
ele.text="L1 Q_LAT: "+str('{:5.2f}'.format(abs(L1_Q_LAT__inyr+L1_Q_GHB__inyr))) # plus L1 GHB here...
if (L1_Q_LAT__inyr+L1_Q_GHB__inyr)<0:
ele.text="L1 Q_LAT: "+str('{:5.2f}'.format(abs((L1_Q_LAT__inyr+L1_Q_GHB__inyr)))) # plus L1 GHB here...
flipLEFT(ele.name)
if ele.name == "L2 Q_LAT":
ele.text="L2 Q_LAT: "+str('{:5.2f}'.format(abs(L2_Q_LAT__inyr+L2_Q_GHB__inyr))) # plus L2 GHB here...
if (L2_Q_LAT__inyr+L2_Q_GHB__inyr)<0:
ele.text="L2 Q_LAT: "+str('{:5.2f}'.format(abs((L2_Q_LAT__inyr+L2_Q_GHB__inyr)))) # plus L1 GHB here...
flipLEFT(ele.name)
if ele.name == "L3 Q_LAT":
ele.text="L3 Q_LAT: "+str('{:5.2f}'.format(abs(L3_Q_LAT__inyr))) #
if (L3_Q_LAT__inyr)<=0:
ele.text="L3 Q_LAT: "+str('{:5.2f}'.format(abs((L3_Q_LAT__inyr)))) #
flipLEFT(ele.name)
if ele.name == "L4 Q_LAT":
ele.text="L4 Q_LAT: "+str('{:5.2f}'.format(abs(L4_Q_LAT__inyr+L4_Q_GHB__inyr))) # plus L4 GHB here...
if (L4_Q_LAT__inyr+L4_Q_GHB__inyr)<0:
ele.text="L4 Q_LAT: "+str('{:5.2f}'.format(abs((L4_Q_LAT__inyr+L4_Q_GHB__inyr)))) # plus L4 GHB here...
flipLEFT(ele.name)
if ele.name == "L567 Q_LAT":
ele.text="L567 Q_LAT: "+str('{:5.2f}'.format(abs(L567_Q_LAT__inyr+L567_Q_GHB__inyr))) # plus L567 GHB here...
if (L567_Q_LAT__inyr+L567_Q_GHB__inyr)<0:
ele.text="L567 Q_LAT: "+str('{:5.2f}'.format(abs((L567_Q_LAT__inyr+L567_Q_GHB__inyr)))) # plus L1 GHB here...
flipLEFT(ele.name)
# vertical flow terms, default arrow is downward
if ele.name == "L1_Q_LOWER":
ele.text="L1 to L2: "+str('{:5.2f}'.format(abs(L1_Q_LOWER__inyr)))
if L1_Q_LOWER__inyr<0:
ele.text="L2 to L1: "+str('{:5.2f}'.format(abs(L1_Q_LOWER__inyr)))
flipUP(ele.name)
if ele.name == "L2_Q_LOWER":
ele.text="L2 to L3: "+str('{:5.2f}'.format(abs(L2_Q_LOWER__inyr)))
if L2_Q_LOWER__inyr<0:
ele.text="L3 to L2: "+str('{:5.2f}'.format(abs(L2_Q_LOWER__inyr)))
flipUP(ele.name)
if ele.name == "L3_Q_LOWER":
ele.text="L3 to L4: "+str('{:5.2f}'.format(abs(L3_Q_LOWER__inyr)))
if L3_Q_LOWER__inyr<0:
ele.text="L4 to L3: "+str('{:5.2f}'.format(abs(L3_Q_LOWER__inyr)))
flipUP(ele.name)
if ele.name == "L4_Q_LOWER":
ele.text="L4 to L5: "+str('{:5.2f}'.format(abs(L4_Q_LOWER__inyr)))
if L4_Q_LOWER__inyr<0:
ele.text="L5 to L4: "+str('{:5.2f}'.format(abs(L4_Q_LOWER__inyr)))
flipUP(ele.name)
#recharge, default down
if ele.name == "RCH":
ele.text="RCH: "+str('{:5.2f}'.format(abs(RCH_ALL__inyr)))
if RCH_ALL__inyr<0:
ele.text="RCH: "+str('{:5.2f}'.format(abs(RCH_ALL__inyr)))
flipUP(ele.name)
#GW ET, default up should always be positive but just in case...
if ele.name == "GW ET":
ele.text="GW ET: "+str('{:5.2f}'.format(abs(ET_ALL__inyr)))
if ET_ALL__inyr<0:
ele.text="GW ET: "+str('{:5.2f}'.format(abs(ET_ALL__inyr)))
flipDOWN(ele.name)
#WEL, default is to the right
if ele.name == "L1 Q_WEL":
ele.text="L1 Q_WEL: "+str('{:5.2f}'.format(abs(L1_Q_WEL__inyr)))
if (L1_Q_WEL__inyr)<0:
ele.text="L1 Q_WEL: "+str('{:5.2f}'.format(abs(L1_Q_WEL__inyr))) # plus L1 GHB here...
flipLEFT(ele.name)
if ele.name == "L2 Q_WEL":
ele.text="L2 Q_WEL: "+str('{:5.2f}'.format(abs(L2_Q_WEL__inyr)))
if (L2_Q_WEL__inyr)<0:
ele.text="L2 Q_WEL: "+str('{:5.2f}'.format(abs(L2_Q_WEL__inyr))) # plus L1 GHB here...
flipLEFT(ele.name)
if ele.name == "L3 Q_WEL":
ele.text="L3 Q_WEL: "+str('{:5.2f}'.format(abs(L3_Q_WEL__inyr)))
if (L3_Q_WEL__inyr)<0:
ele.text="L3 Q_WEL: "+str('{:5.2f}'.format(abs(L3_Q_WEL__inyr))) # plus L1 GHB here...
flipLEFT(ele.name)
if ele.name == "L4 Q_WEL":
ele.text="L4 Q_WEL: "+str('{:5.2f}'.format(abs(L4_Q_WEL__inyr)))
if (L4_Q_WEL__inyr)<0:
ele.text="L4 Q_WEL: "+str('{:5.2f}'.format(abs(L4_Q_WEL__inyr))) # plus L1 GHB here...
flipLEFT(ele.name)
if ele.name == "L567 Q_WEL":
ele.text="L567 Q_WEL: "+str('{:5.2f}'.format(abs(L567_Q_WEL__inyr)))
if (L567_Q_WEL__inyr)<0:
ele.text="L567 Q_WEL: "+str('{:5.2f}'.format(abs(L567_Q_WEL__inyr))) # plus L1 GHB here...
flipLEFT(ele.name)
#well flows in mgd
if ele.name == "L1_WEL_mgd":
mgdval=L1_Q_WEL__inyr/12*CELLAREA_SQFT/365*7.4805/1000000
mgdval=abs(mgdval)
ele.text="(L1 Q_WEL: "+str('{:5.2f}'.format(mgdval))+ " mgd)"
if ele.name == "L2_WEL_mgd":
mgdval=L2_Q_WEL__inyr/12*CELLAREA_SQFT/365*7.4805/1000000
mgdval=abs(mgdval)
ele.text="(L2 Q_WEL: "+str('{:5.2f}'.format(mgdval))+ " mgd)"
if ele.name == "L3_WEL_mgd":
mgdval=L3_Q_WEL__inyr/12*CELLAREA_SQFT/365*7.4805/1000000
mgdval=abs(mgdval)
ele.text="(L3 Q_WEL: "+str('{:5.2f}'.format(mgdval))+ " mgd)"
if ele.name == "L4_WEL_mgd":
mgdval=L4_Q_WEL__inyr/12*CELLAREA_SQFT/365*7.4805/1000000
mgdval=abs(mgdval)
ele.text="(L4 Q_WEL: "+str('{:5.2f}'.format(mgdval))+ " mgd)"
if ele.name == "L567_WEL_mgd":
mgdval=L567_Q_WEL__inyr/12*CELLAREA_SQFT/365*7.4805/1000000
mgdval=abs(mgdval)
ele.text="(L567 Q_WEL: "+str('{:5.2f}'.format(mgdval))+ " mgd)"
#BCs, default is tot he left
#L1_Q_DRN__inyr = -99999
#L1_Q_RIV__inyr = -99999
#L2_Q_RIV__inyr = -99999
#L3_Q_RIV__inyr = -99999
#L3_Q_GHB__inyr = -99999
#L1_Q_CH__inyr = -99999
if ele.name == "L1 DRN":
ele.text="L1 DRN: "+str('{:5.2f}'.format(abs(L1_Q_DRN__inyr)))
if (L1_Q_DRN__inyr)<0:
ele.text="L1 DRN: "+str('{:5.2f}'.format(abs(L1_Q_DRN__inyr)))
flipRIGHT(ele.name)
if ele.name == "L1 RIV":
ele.text="L1 RIV: "+str('{:5.2f}'.format(abs(L1_Q_RIV__inyr)))
if (L1_Q_RIV__inyr)<0:
ele.text="L1 RIV: "+str('{:5.2f}'.format(abs(L1_Q_RIV__inyr)))
flipRIGHT(ele.name)
if ele.name == "L2 RIV":
ele.text="L2 RIV: "+str('{:5.2f}'.format(abs(L2_Q_RIV__inyr)))
if (L2_Q_RIV__inyr)<0:
ele.text="L2 RIV: "+str('{:5.2f}'.format(abs(L2_Q_RIV__inyr)))
flipRIGHT(ele.name)
if ele.name == "L3 RIV":
ele.text="L3 RIV: "+str('{:5.2f}'.format(abs(L3_Q_RIV__inyr)))
if (L3_Q_RIV__inyr)<0:
ele.text="L3 RIV: "+str('{:5.2f}'.format(abs(L3_Q_RIV__inyr)))
flipRIGHT(ele.name)
#if ele.text[:6] == "L1 GHB":
# ele.text="L1 GHB: "+str('{:5.2f}'.format(L1_Q_GHB__inyr)) #merged with L1 QLAT
#if ele.text[:6] == "L2 GHB":
# ele.text="L2 GHB: "+str('{:5.2f}'.format(L2_Q_GHB__inyr)) #merged with L2 QLAT
if ele.name == "L3 GHB":
ele.text="L3 GHB: "+str('{:5.2f}'.format(abs(L3_Q_GHB__inyr))) # keep separate
if (L3_Q_GHB__inyr)<0:
ele.text="L3 GHB: "+str('{:5.2f}'.format(abs(L3_Q_GHB__inyr)))
flipRIGHT(ele.name)
#if ele.text[:6] == "L4 GHB":
# ele.text="L4 GHB: "+str('{:5.2f}'.format(L4_Q_GHB__inyr)) #merged with L567 QLAT
#if ele.text[:8] == "L567 GHB": #
# ele.text="L567 GHB: "+str('{:5.2f}'.format(L567_Q_GHB__inyr)) # merged with L567 QLAT
if ele.name == "L1 CH":
ele.text="L1 CH: "+str('{:5.2f}'.format(abs(L1_Q_CH__inyr)))
if (L1_Q_CH__inyr)<0:
ele.text="L1 CH: "+str('{:5.2f}'.format(abs(L1_Q_CH__inyr)))
flipRIGHT(ele.name)
if ele.text[:9] == "Sim Name:":
ele.text="Sim Name: "+str(simnam)+" "+str(yearval2)
if ele.text[:8] == "ZB_NAME:":
ele.text="ZB_NAME: "+str(ZB_NAME)+" Number of Cells: " + str(row3[1]) + " Area Per Cell: 6,250,500 SF"
if ele.text[:15] == "MassBal Polygon":
ele.text="MassBal Polygon: "+str(ZB_NAME)
#zoom to extent
sel_exp='ZB_NAME ='+"'"+str(ZB_NAME)+"'"
df = arcpy.mapping.ListDataFrames(template_mxd)[0]
for lyr in lyrList:
if lyr.name == "nfseg_zonebudget_polygons":
arcpy.mapping.Layer.replaceDataSource(lyr,gdb,"FILEGDB_WORKSPACE",simnam+'_'+str(yearval2)+'_cbb_poly')
lyr.definitionQuery = sel_exp
arcpy.SelectLayerByAttribute_management(lyr,"NEW_SELECTION",sel_exp)
cal_exp='"'+dir_ZB+"/"+simnam+"__massbal_"+str(ZB_NAME)+"_"+str(yearval2)+".jpg"+'"'
arcpy.CalculateField_management(lyr,"ZB_file",cal_exp,"VB","")
df.zoomToSelectedFeatures()
arcpy.SelectLayerByAttribute_management(lyr,"CLEAR_SELECTION")
arcpy.RefreshActiveView()
arcpy.mapping.ExportToJPEG(template_mxd,dir_ZB+"/"+simnam+"__massbal_"+str(ZB_NAME)+"_"+str(yearval2)+".jpg",resolution=300)
#2017.06.09 trd this was used for the modelwide figure so a quick edited version could be made manually
#template_mxd.saveACopy(cpath_py+"/"+simnam+"__massbal_"+str(ZB_NAME)+"_"+str(yearval2)+".mxd","10.0")
lyr.definitionQuery = None
#print("quickstop")
#exit()
print("completed the entire process")
stop = time.clock()
if (stop-start)>3600:
print("process completed in:" + str((stop-start)/3600)+' hours.')
elif (stop-start)>60:
print("process completed in:" + str((stop-start)/60)+' minutes.')
else:
print("process completed in:" + str((stop-start))+' seconds.')
|
[
"pbremner99@gmail.com"
] |
pbremner99@gmail.com
|
b6b7d1d5d578a8190c28f91375647fe6f1c28d88
|
5c3b77012d86ee5d0b5746f1c17e6284f28ae667
|
/custom/addons-hugo/Modulos especificos/product_cost/models/product.py
|
130bc990cbfc51e96c0b61d6bebf45993c1b4e43
|
[] |
no_license
|
apesquero/Odoo-10-PyCharm
|
c7e813979cf9bd87d23b5d2b680ba04a784c8f0d
|
e8a17a97aab99d9133e50bbbfd8bfbe689995f44
|
refs/heads/master
| 2018-10-21T05:48:59.770463
| 2018-09-05T18:45:49
| 2018-09-05T18:45:49
| 120,489,311
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,413
|
py
|
# -*- coding: utf-8 -*-
import numbers
from openerp import models, fields, exceptions, api, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
from openerp.addons.product_price_cost_base.models.simpleeval import simple_eval, InvalidExpression
class ProductCostExtra(models.Model):
_name = 'product.cost.extra'
product_template = fields.Many2one(
comodel_name='product.template')
value = fields.Many2one(
comodel_name='product.attribute.value', string='Value')
attribute = fields.Many2one(
comodel_name='product.attribute', related='value.attribute_id',
string='Attribute')
cost_extra = fields.Float(
string='Cost Extra', digits_compute=dp.get_precision('Product Price'),
default=0.0)
cost_percent_extra = fields.Float(
string='Cost Percent Extra', digits_compute=dp.get_precision('Product Price'),
default=0.0)
class ProductTemplate(models.Model):
_inherit = 'product.template'
@api.depends('table_cost_items')
def _compute_table_cost_items_len(self):
for product in self:
product.table_cost_items_len = len(product.table_cost_items)
@api.depends('table_cost_items1d')
def _compute_table_cost_items1d_len(self):
for product in self:
product.table_cost_items1d_len = len(product.table_cost_items1d)
@api.depends('attribute_line_ids')
def _compute_possible_range_num_cost_attribute(self):
for product in self:
product.possible_range_num_cost_attribute = product.attribute_line_ids.mapped('attribute_id'). \
filtered(lambda a: a.attr_type in ['range','numeric']).ids
cost_extras = fields.One2many(
comodel_name='product.cost.extra', inverse_name='product_template')
#we need standard_price_alias becose standard_price can not be changed under certain circumstances
standard_price_alias = fields.Float(
string='Base Cost', digits_compute=dp.get_precision('Product Price'),
default=0.0, groups="base.group_user",
help="Base cost of the product, used if the variant does not exists.")
cost_mode = fields.Selection(
selection=[
('standard', 'Standard'),
('table1d', 'Table1D'),
('table2d', 'Table2D'),
('area', 'Area'),
('formula', 'Formula')],
string='Cost Mode', required=True,
default='standard')
cost_formula_eval = fields.Char(
string='Cost Formula', default='')
possible_range_num_cost_attribute = fields.Many2many(
comodel_name='product.attribute', compute=_compute_possible_range_num_cost_attribute)
table_cost_attribute_x = fields.Many2one(
comodel_name='product.attribute', string='Attribute X')
table_cost_attribute_y = fields.Many2one(
comodel_name='product.attribute', string='Attribute Y')
table_cost_items = fields.One2many(
comodel_name='template.table.cost.item', inverse_name='template_id')
table_cost_items_len = fields.Integer(
compute=_compute_table_cost_items_len, string="Items loaded")
table_cost_items1d = fields.One2many(
comodel_name='template.table.cost.item.one', inverse_name='template_id') #TODO get rid of this like in product_list_price
table_cost_items1d_len = fields.Integer(
compute=_compute_table_cost_items1d_len, string="Items loaded")
area_cost_attribute_x = fields.Many2one(
comodel_name='product.attribute', string='First attribute')
area_x_factor = fields.Float(
digits_compute=dp.get_precision('Product Price'), default=1.0)
area_x_sum = fields.Float(
digits_compute=dp.get_precision('Product Price'), default=0.0)
area_cost_attribute_y = fields.Many2one(
comodel_name='product.attribute', string='Second attribute')
area_y_factor = fields.Float(
digits_compute=dp.get_precision('Product Price'), default=1.0)
area_y_sum = fields.Float(
digits_compute=dp.get_precision('Product Price'), default=0.0)
area_cost_factor = fields.Float(
string="Factor", digits_compute=dp.get_precision('Product Price'),
default=1.0)
area_min_cost = fields.Float(
string="Minimum cost", digits_compute=dp.get_precision('Product Price'),
default=0.0)
#Extras button
@api.multi
def action_open_cost_extras(self):
self.ensure_one()
extra_ds = self.env['product.cost.extra']
for line in self.attribute_line_ids:
for value in line.value_ids:
extra = extra_ds.search([('product_template', '=', self.id),
('value', '=', value.id)])
if not extra:
extra = extra_ds.create({
'product_template': self.id,
'value': value.id,
})
extra_ds |= extra
all_cost_extra = self.env['product.cost.extra']. \
search([('product_template', '=', self.id)])
remove_extra = all_cost_extra- extra_ds
remove_extra.unlink()
result = self._get_act_window_dict(
'product_cost.product_cost_extra_action')
return result
@api.multi
def _apply_extra_cost_by_mode(self):
self.ensure_one()
return True
@api.model
def _price_get(self, products, ptype='list_price'):
res = {}
#if 'product_attribute_values' in self._context and ptype == 'standard_price':
if products[0]._name == "product.template" and ptype == 'standard_price':
attr_values = self._context.get('product_attribute_values')
for product in products:
if not attr_values:
attr_values = product.get_minimum_attribute_values_dicts()
cost = product.get_cost_from_attribute_values(attr_values)
if product._apply_extra_cost_by_mode():
cost_extra, cost_percent_extra = product.get_all_cost_extra_from_values(attr_values)
cost += cost * cost_percent_extra / 100
cost += cost_extra
res.update({ product.id: cost })
if 'uom' in self._context:
res[product.id] = self.env['product.uom']._compute_price(self.env.cr, self.env.uid,
product.uom_id.id, res[product.id], self._context['uom'])
# Convert from current user company currency to asked one
if 'currency_id' in self._context:
currency_id = self.env['res.users'].browse(self.env.uid).company_id.currency_id.id
# Take current user company currency.
# This is right cause a field cannot be in more than one currency
res[product.id] = self.env['res.currency'].compute(self.env.cr, self.env.uid, currency_id,
self._context['currency_id'], res[product.id], context=self._context)
else:
res = super(ProductTemplate, self)._price_get(products, ptype)
if ptype == 'standard_price' and products[0]._name == "product.product":
for product in products:
res[product.id] += res[product.id] * product.cost_percent_extra / 100
res[product.id] += product.cost_extra
return res
@api.multi
def write(self, vals):
old_cost_modes = {}
if 'cost_mode' in vals:
for template in self:
old_cost_modes[template.id] = template.cost_mode
res = super(ProductTemplate, self).write(vals)
if 'standard_price_alias' in vals:
for template in self:
for product in template.product_variant_ids:
product.standard_price_alias = template.standard_price_alias
if 'cost_mode' in vals: #TODO check res for failure?
for template in self:
old_cost_mode = old_cost_modes[template.id]
if old_cost_mode != template.cost_mode:
if old_cost_mode == 'table2d':
template.table_cost_items.unlink()
if template.cost_mode != 'table1d':
template.write({'table_cost_attribute_x': False,
'table_cost_attribute_y': False,})
else:
template.table_cost_attribute_y = False
elif old_cost_mode == 'table1d':
template.table_cost_items1d.unlink()
if template.cost_mode != 'table2d':
template.table_cost_attribute_x = False
elif old_cost_mode == 'formula':
template.cost_formula_eval = ''
#elif old_cost_mode == 'standard':
# template.standard_price_alias = 0.0
elif old_cost_mode == 'area':
template.write({'area_cost_attribute_x': False,
'area_x_factor': 1.0,
'area_x_sum': 0.0,
'area_cost_attribute_y': False,
'area_y_factor': 1.0,
'area_y_sum': 0.0,
'area_cost_factor': 1.0,
'area_min_cost': 0.0,})
return res
@api.multi
def get_all_cost_extra_from_values(self, attr_values):
self.ensure_one()
total_cost_extra = 0.0
total_cost_percent_extra = 0.0
if isinstance(attr_values, list):
for value_dict in attr_values:
cost_extra_ds = self.cost_extras.filtered(lambda ce: ce.value.id == value_dict.get('value'))
if cost_extra_ds:
total_cost_extra += cost_extra_ds[0].cost_extra
total_cost_percent_extra += cost_extra_ds[0].cost_percent_extra
else:
for line in attr_values:
cost_extra_ds = self.cost_extras.filtered(lambda ce: ce.value == line.value)
if cost_extra_ds:
total_cost_extra += cost_extra_ds[0].cost_extra
total_cost_percent_extra += cost_extra_ds[0].cost_percent_extra
return total_cost_extra, total_cost_percent_extra
#Cost mode router
@api.multi
def get_cost_from_attribute_values(self, attribute_values):
self.ensure_one()
if self.cost_mode == 'standard':
product = self.env['product.product']._product_find(self, attribute_values)
if not product:
return self.sudo().standard_price_alias #need sudo because unsigned users do not have read permissions to product_template
return product.standard_price_alias
elif self.cost_mode == 'table1d':
return self._get_table1d_cost(attribute_values)
elif self.cost_mode == 'table2d':
return self._get_table2d_cost(attribute_values)
elif self.cost_mode == 'area':
return self._get_area_cost(attribute_values)
elif self.cost_mode == 'formula':
if isinstance(attribute_values, list):
return self._get_formula_cost_from_dicts(attribute_values)
return self._get_formula_cost_from_proclines(attribute_values)
else:
raise exceptions.Warning(_("Unknown cost mode"))
#Formula Cost methods
@api.multi
def _get_formula_cost_from_proclines(self, attribute_values):
self.ensure_one()
names, functions = self._get_init_names_and_function()
for attr_line in attribute_values:
if attr_line.attr_type == 'range':
names[self.spaceto_(attr_line.attribute.name)] = attr_line.custom_value
elif attr_line.attr_type == 'numeric':
names[self.spaceto_(attr_line.attribute.name)] = attr_line.value.numeric_value
return simple_eval(self.cost_formula_eval, names=names, functions=functions)
@api.multi
def _get_formula_cost_from_dicts(self, attribute_dict_values):
self.ensure_one()
names, functions = self._get_init_names_and_function()
for attr_dict in attribute_dict_values:
attr_line_ds = self.attribute_line_ids.filtered(lambda l: l.attribute_id.id == attr_dict.get('attribute'))
if not attr_line_ds:
raise exceptions.Warning(_("Could not find attribute in product."))
if attr_line_ds[0].attr_type == 'range':
numeric_value = attr_dict.get('r', False) or attr_dict.get('custom_value')
elif attr_line_ds[0].attr_type == 'numeric':
value_ds = attr_line_ds[0].attribute_id.value_ids.filtered(lambda v: v.id == attr_dict.get('value'))
if not value_ds:
raise exceptions.Warning(_("Could not find value in attribute."))
numeric_value = value_ds[0].numeric_value
else:
continue
if numeric_value is None:
raise exceptions.Warning(_("Numeric value is None."))
if not isinstance(numeric_value, numbers.Number):
raise exceptions.Warning(_("Numeric value is not a number"))
names[self.spaceto_(attr_line_ds[0].attribute_id.name)] = numeric_value
return simple_eval(self.cost_formula_eval, names=names, functions=functions)
@api.onchange('cost_formula_eval')
def onchange_cost_formula_eval(self):
if not self.cost_formula_eval or len(self.cost_formula_eval) <= 0:
return
names, functions = self._get_init_names_and_function()
for attr_line in self.attribute_line_ids:
if attr_line.attribute_id.attr_type in ('range', 'numeric'):
names[self.spaceto_(attr_line.attribute_id.name)] = 1
try:
simple_eval(self.cost_formula_eval, names=names, functions=functions)
except SyntaxError, reason:
raise UserError(_('Error in the expression of the quantity formula\nReason: %s') % (reason,))
except InvalidExpression, reason:
raise UserError(_('Error in the quantity formula\nReason: %s') % (reason,))
#Table2d Cost methods
@api.multi
def _get_table2d_cost(self, attribute_values):
self.ensure_one()
x_value = self._obtain_numeric_value(attribute_values, self.table_cost_attribute_x)
y_value = self._obtain_numeric_value(attribute_values, self.table_cost_attribute_y)
table_item = self.table_cost_items.search([('template_id', '=', self.id),
('x_upper', '>=', x_value),
('x_lower', '<', x_value),
('y_upper', '>=', y_value),
('y_lower', '<', y_value)])
if not table_item:
table_item = self.table_cost_items.search([('template_id', '=', self.id),
('x_upper', '>=', x_value),
('x_lower', '<=', x_value),
('y_upper', '>=', y_value),
('y_lower', '<=', y_value)])
if not table_item:
raise exceptions.Warning(_("Could not find cost for those values (out of range)"))
return table_item[0].cost
#Table1d Cost methods
@api.multi
def _get_table1d_cost(self, attribute_values):
self.ensure_one()
x_value = self._obtain_numeric_value(attribute_values, self.table_cost_attribute_x)
table_item = self.table_cost_items1d.search([('template_id', '=', self.id),
('x_upper', '>=', x_value),
('x_lower', '<', x_value),])
if not table_item:
table_item = self.table_cost_items1d.search([('template_id', '=', self.id),
('x_upper', '>=', x_value),
('x_lower', '<=', x_value),])
if not table_item:
raise exceptions.Warning(_("Could not find cost for those values (out of range)"))
return table_item[0].cost
#Area cost methods
@api.multi
def _get_area_cost(self, attribute_values):
self.ensure_one()
x_value = self._obtain_numeric_value(attribute_values, self.area_cost_attribute_x)
y_value = self._obtain_numeric_value(attribute_values, self.area_cost_attribute_y)
res_cost = ((x_value * self.area_x_factor) + self.area_x_sum) * \
((y_value * self.area_y_factor) + self.area_y_sum) * self.area_cost_factor
return max(self.area_min_cost, res_cost)
class ProductProduct(models.Model):
_inherit = 'product.product'
def _compute_all_cost_extra(self): #TODO cost extra not in price_ids anymore
cost_extra_env = self.env['product.cost.extra']
for product in self:
cost_extra = 0.0
cost_percent_extra = 0.0
for value in product.attribute_value_ids:
cost_extra_ds = cost_extra_env.search([('product_template', '=', product.product_tmpl_id.id),
('value', '=', value.id)])
if cost_extra_ds:
cost_extra += cost_extra_ds[0].cost_extra
cost_percent_extra += cost_extra_ds[0].cost_percent_extra
product.cost_extra = cost_extra
product.cost_percent_extra = cost_percent_extra
cost_extra = fields.Float(
compute=_compute_all_cost_extra, digits_compute=dp.get_precision('Product Price'))
cost_percent_extra = fields.Float(
compute=_compute_all_cost_extra, digits_compute=dp.get_precision('Product Price'))
#we need standard_price_alias becose standard_price can not be changed under certain circumstances
standard_price_alias = fields.Float(
related='standard_price', #store=True,
string='Cost', #digits_compute=dp.get_precision('Product Price'),
#default=0.0, groups="base.group_user",
help="Cost of the product, in the default unit of measure of the product..")
@api.model
def create(self, values):
product = super(ProductProduct, self).create(values)
product.standard_price_alias = product.product_tmpl_id.standard_price_alias
return product
|
[
"apesquero@gmail.com"
] |
apesquero@gmail.com
|
7b526764995917422d721659a971462b18bfe82b
|
eca0357843c9d3f99d6a6c42f300f0a1910f4ab4
|
/for-one-linea/main.py
|
42e49b4d0a255a5a48ba6496669f8bfbc06a767a
|
[] |
no_license
|
cokkike88/python-kok
|
8184cd3234e2fe6d281936d96f24b251c4e72266
|
1a994a6f1383b1df3e9f1509e1dedd14c8238642
|
refs/heads/main
| 2023-04-15T18:26:09.830874
| 2021-04-22T07:22:10
| 2021-04-22T07:22:10
| 360,268,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# exclusive = 'exclusive-value'
exclusive = None
durable = 'durable-value'
auto_delete = None
options = {k: v for k, v in
{'name': name, 'exclusive': exclusive, 'durable': durable, 'auto_delete': auto_delete}.items()
if v is not None}
print(options)
required_fields = ['name', 'lname', 'phone', 'email']
data = {
"name": 'oscar',
"phone": '55221458'
}
missing_fields = [f for f in required_fields if f not in data.keys()]
print(missing_fields)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
[
"cok_eight@hotmail.com"
] |
cok_eight@hotmail.com
|
af0c3c142ca16e37586048b4f96ec76da1cca252
|
35010e85657b3a943738ca8b04a317a1de3caf04
|
/produto/migrations/0003_alter_variacao_options.py
|
0f07a949eaaab7b74a91b757d5c7adf8c77cf65c
|
[] |
no_license
|
willgvfranco/django-ecommerce
|
00eb6a318ebb2d31184f4a066f4c81d661d7a86c
|
97890f159fa10d46a35daf89a682132379f8ffba
|
refs/heads/master
| 2023-05-11T09:42:10.283298
| 2021-05-20T02:54:59
| 2021-05-20T02:54:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# Generated by Django 3.2.3 on 2021-05-15 01:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('produto', '0002_variacao'),
]
operations = [
migrations.AlterModelOptions(
name='variacao',
options={'verbose_name': 'Variação', 'verbose_name_plural': 'Variações'},
),
]
|
[
"williamgvfranco@gmail.com"
] |
williamgvfranco@gmail.com
|
876b621d2137ffd49f796cb5050cb9512cff100e
|
5eeaac5d3afe03cb065ad514054c3f62ecbca750
|
/HRMS/migrations/0012_auto_20191222_1651.py
|
c8b9f4cbcd3bfb225e1e8823fb4b37799a13e5e1
|
[] |
no_license
|
InvokerAndrey/graduate_work
|
e8de1b7e83c4a3b609b3746533cd1c871f3765a4
|
62104a9a2209312b6c035eef49bf0b72e62866b8
|
refs/heads/master
| 2023-07-31T10:06:20.492259
| 2021-09-27T09:11:11
| 2021-09-27T09:14:03
| 410,814,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# Generated by Django 3.0 on 2019-12-22 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('HRMS', '0011_auto_20191221_2326'),
]
operations = [
migrations.AlterField(
model_name='position',
name='education_required',
field=models.DecimalField(choices=[(0, 'Образование не требуется'), (1, 'Среднее образование'), (2, 'Высшее образование')], decimal_places=0, max_digits=3, null=True),
),
migrations.AlterField(
model_name='position',
name='experience_required',
field=models.DecimalField(choices=[(0, 'Опыт работы не требуется'), (1, '1 год'), (2, '2 года'), (3, '3 года'), (4, '4 года'), (5, '5 и более лет')], decimal_places=0, max_digits=3, null=True),
),
migrations.AlterField(
model_name='position',
name='language_level',
field=models.DecimalField(blank=True, choices=[(1, 'A1'), (2, 'A2'), (3, 'B1'), (4, 'B2'), (5, 'C1'), (6, 'C2')], decimal_places=0, max_digits=3, null=True),
),
]
|
[
"dydyshko1999@gmail.com"
] |
dydyshko1999@gmail.com
|
bd559329e37823999dbf2b5a51e5cec08284346c
|
f3d7a8aaf2b4871eb0f292be0d03ee9b10cf8803
|
/views.py
|
34333de35b6cdf47efe99aad0176256cb40243bb
|
[] |
no_license
|
dazbash/FlaskBlog
|
f990ba9f814ce7728240ac9ee43f21176a3447ad
|
8d29d89b5528725810c9bc1c77205b2c1561e4f1
|
refs/heads/master
| 2022-10-16T00:12:32.433183
| 2020-01-23T07:59:38
| 2020-01-23T07:59:38
| 230,219,291
| 0
| 0
| null | 2022-09-16T18:16:22
| 2019-12-26T07:43:47
|
Python
|
UTF-8
|
Python
| false
| false
| 127
|
py
|
from flask import render_template
from app import app
@app.route('/')
def index():
return render_template('index.html')
|
[
"javadolaemeh49@gmail.com"
] |
javadolaemeh49@gmail.com
|
e206cde9dcb6e4f5d1415f96160275835c6c3060
|
d17706b3061c93b9f83f2e9c28e58eadcbe058cb
|
/problems/8.py
|
2e897b02fca31cc20f81382321c199a4d5e18149
|
[] |
no_license
|
Luctia/project-euler
|
b89c75a40c175216c9db00288edfcceb19e77ae3
|
56c207b37d67dcb9ae24e2afe61f4b145d69773a
|
refs/heads/master
| 2023-08-25T12:58:49.430470
| 2021-10-30T17:03:20
| 2021-10-30T17:03:20
| 391,651,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
import math
NUMBER = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
def find_largest_product(n):
largest = 0
position = 0
while True:
to_check = str(NUMBER)[position:position + n]
if position + n >= len(str(NUMBER)):
return largest
if to_check.count('0') == 0:
product = math.prod([int(x) for x in to_check])
if product > largest:
largest = product
position += 1
print(find_largest_product(13))
|
[
"timmerman.luc1999@gmail.com"
] |
timmerman.luc1999@gmail.com
|
d930808b9a505f0e701be59900a756fcee2fdfc0
|
e68247f79e7bcee4c4ec6293efb3e017fe5a7047
|
/Convolutional/datasets.py
|
456b324865f908cbce7021ce47849fc0c2695bf1
|
[] |
no_license
|
npbcl/icml21
|
3a2dc9e555fdb3eac1f96da25ea1b9ef381901d0
|
fce65915de54c7ad95b9ece0b960bc465fcd08d0
|
refs/heads/master
| 2023-03-01T13:27:06.047074
| 2021-02-10T09:41:56
| 2021-02-10T09:41:56
| 238,905,420
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,320
|
py
|
from __future__ import print_function
import os
import numpy as np
from urllib3 import request
import gzip
import pickle
import os.path
from os import path
import matplotlib.pyplot as plt
from urllib import request
import os
import sys
import tarfile
from scipy import ndimage
from PIL import Image
import re
import tensorflow as tf
def run_all():
cpath = os.getcwd()
try:
os.chdir(os.getcwd() + '/datasets')
except:
os.mkdir('datasets')
os.chdir(os.getcwd() + '/datasets')
print('\nDownloading the Cifar100 dataset')
data = tf.keras.datasets.cifar100.load_data(
label_mode='fine'
)
data_train, data_test = data
X_train, Y_train = data_train
X_test, Y_test = data_test
indexes = np.arange(100)
# np.random.shuffle(indexes)
all_sets = []
for i in range(20):
labels = indexes[i*5:(i+1)*5]
train_index = []
for l in labels:
train_index += list(np.where(Y_train == l)[0])
test_index = []
for l in labels:
test_index += list(np.where(Y_test == l)[0])
bxtrain, bytrain = X_train[train_index],Y_train[train_index]
bxtest, bytest = X_test[test_index],Y_test[test_index]
cset = [bxtrain,bytrain,bxtest,bytest]
all_sets.append(cset)
pickle.dump(all_sets, open('split_cifar_100.pkl', 'wb'))
print('\nDownloading the Cifar10 dataset')
data = tf.keras.datasets.cifar10.load_data()
data_train, data_test = data
X_train, Y_train = data_train
X_test, Y_test = data_test
indexes = np.arange(10)
# np.random.shuffle(indexes)
all_sets = []
set_labels = np.arange(10)#[2,0,1,5,3,7,6,4,8,9]
for i in range(5):
labels = [set_labels[2*i], set_labels[2*(i)+1]]
train_index = []
for l in labels:
train_index += list(np.where(Y_train == l)[0])
test_index = []
for l in labels:
test_index += list(np.where(Y_test == l)[0])
bxtrain, bytrain = X_train[train_index],Y_train[train_index]
bxtest, bytest = X_test[test_index],Y_test[test_index]
cset = [bxtrain,bytrain,bxtest,bytest]
all_sets.append(cset)
pickle.dump(all_sets, open('split_cifar_10.pkl', 'wb'))
os.chdir(cpath)
|
[
"kumarabh@microsoft.com"
] |
kumarabh@microsoft.com
|
392828c8ff7b1d323f4dda6c0d105bde46b4976c
|
608b5afd3af1b4dbc5c6778f3f3e71ff7652054e
|
/setup.py
|
900a51ed593258c536d295e34b5fb1fdbb43144d
|
[
"MIT"
] |
permissive
|
shresbi/py-web
|
6bf53bc2aef048a2465d7478e7b69ea351de089d
|
50a3899b82057840689fcf2e9e4af070d58a43fe
|
refs/heads/master
| 2020-03-12T02:22:23.922839
| 2017-11-29T18:55:01
| 2017-11-29T18:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
import setuptools
setuptools.setup(
version="0.0.1",
license='mit',
name='py-web',
author='nathan todd-stone',
author_email='me@nathants.com',
url='http://github.com/nathants/py-web',
packages=['web'],
install_requires=['tornado==4.5.1'],
description='a minimal, data centric web library'
)
|
[
"me@nathants.com"
] |
me@nathants.com
|
a9546a02e4ffcc7b91841a3e10a7c6a47b379b6a
|
6c1c4284cfc66216a5f8faf6a9b90c3192f3d61e
|
/com/isa/python/chapter6/Strings.py
|
365aafa821af37d3128285030d8be236b1385d41
|
[] |
no_license
|
isaolmez/core_python_programming
|
2c316f1dbf2a83175d2d4ab5fe01651c99935263
|
6275cfa3701e573c7e5f9e4d422488611786c039
|
refs/heads/master
| 2021-01-17T19:23:15.786220
| 2016-07-09T13:41:34
| 2016-07-09T13:41:34
| 56,500,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
## Strings are immutable so you cannot modify an existing string object. You can create a new string object with contents taken from source string and then manipulated.
first = "a"
second = 'a'
print first is second
listStr = str(range(3))
print listStr
text = "isaolmez"
print text[1:]
s1 = "a"
s2 = s1
s1 = "b" ## This is reference assignment so they refer to different string objects, no magic.
print s1 is s2
# But if there was a mutating function of string both s1 and s2 could see the changes done by the other. But there is no such method. Strings are immutable.
del s1
|
[
"isaolmez@gmail.com"
] |
isaolmez@gmail.com
|
fbc66d973a75b1d01d107cd5cfb5faf9931848fa
|
17f5fac9cd23dd705973e1439db551658f6ceb52
|
/dz5/task1.py
|
4c57d78a647d8ce386e48f4ab097091f37057daf
|
[] |
no_license
|
rozmeloz/PY
|
8d3fae1a386d22b18cad3834140244d64fbe1094
|
70773ae885b27b392ab6006698c36685130015fc
|
refs/heads/master
| 2021-01-06T16:45:18.043957
| 2020-02-10T22:38:54
| 2020-02-10T22:38:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
'''
Про инков
'''
s = [c for c in input('Введите: ')]
s.append('.')
i = 0
knot = 0
prepend = ['(', ')', '+', '-', '/', '*']
p = []
while i <= len(s)- 2:
if s[i] in prepend :
p.append(s[i])
elif (s[i] == '@') and (s[i+1] == '@'):
knot += 1
elif (s[i] == '@') and (s[i+1] != '@'):
knot += 1
p.append(knot)
knot = 0
elif (s[i] == '~') and (s[i-1] == '~'):
p.append(0)
i += 1
#print (p)
p1 = []
for num in p:
p1.append(str(num))
primer = ''.join(p1)
otvet = eval(primer)
otvet = int(otvet)
print (otvet)
otvet = str(otvet)
#print (type(otvet))
quipu = []
i = 0
while i <= (len (otvet) -1):
if (int(otvet[i]) in range (1, 10)) and (i < (len (otvet) -1)):
q = '@' * int(otvet [i])
quipu.append(q)
quipu.append('~')
elif (int(otvet[i]) in range (1, 10)) and (i == (len (otvet) -1)):
q = '@' * int(otvet [i])
quipu.append(q)
elif int(otvet[i]) == 0:
quipu.append('~')
i+=1
print (''.join(quipu))
|
[
"59009916+chost2004@users.noreply.github.com"
] |
59009916+chost2004@users.noreply.github.com
|
9e85aedc8db94ea25f38968f18e0d90e01322bfe
|
e8f3365ad89c6cf2a264bf0fb934c7ba123757a0
|
/datamonkey/datamonkey/lib/redshift.py
|
3fe27687cd39d86c914008823395717622ae0328
|
[] |
no_license
|
eatseng/insight
|
9313d23c711ecdb456addc5278a589aab8223765
|
afa5bb6aab22df7e8c62e7aa12c4fa09833daa26
|
refs/heads/master
| 2016-09-02T06:15:32.255545
| 2015-04-30T03:59:22
| 2015-04-30T03:59:22
| 34,832,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,957
|
py
|
from __future__ import absolute_import
import psycopg2
import psycopg2.extras
import psycopg2.extensions
from cStringIO import StringIO
from boto.s3.key import Key
from clay import config
from boto.s3.connection import S3Connection
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
class Redshift(object):
ROW_DELIMITER = '\n'
COL_DELIMITER = '\007'
def __init__(self):
self.conn_opts = dict(
host=config.get("redshift_db.host"),
port=config.get("redshift_db.port"),
user=config.get("redshift_db.user"),
password=config.get("redshift_db.password"),
database=config.get("redshift_db.db")
)
def get_conn(self, dict_cursor=False):
if dict_cursor is True:
self.conn_opts['cursor_factory'] = psycopg2.extras.DictCursor
return psycopg2.connect(**self.conn_opts)
def execute(self, select_sql, data=[]):
# print(select_sql)
conn = self.get_conn(False)
cursor = conn.cursor()
cursor.execute(select_sql, data)
cursor.connection.commit()
conn.close()
def select(self, select_sql):
# print(select_sql)
conn = self.get_conn(dict_cursor=True)
cursor = conn.cursor()
cursor.execute(select_sql)
result = cursor.fetchall()
conn.close()
if result is None:
return None
else:
return result
def if_table_exists(self, table_name):
query = """SELECT count(*)
FROM information_schema.tables
WHERE (table_name = %s);"""
cursor = self.get_conn().cursor()
cursor.execute(query, (table_name,))
result = cursor.fetchall()
cursor.connection.close()
if result[0][0] > 0:
return True
else:
return False
def last_value(self, table_name, lv_col, ts_col=None):
cursor = self.get_conn().cursor()
if ts_col is None:
query = """SELECT MAX(%s) FROM %s;""" % (lv_col, table_name)
else:
query = """SELECT
MAX(%s),
MAX(%s)
FROM %s
JOIN
(SELECT MAX(%s) AS ts_col FROM %s) AS sub
ON sub.ts_col = %s.%s;
""" % (lv_col, ts_col, table_name, ts_col, table_name, table_name, ts_col)
cursor.execute(query)
result = cursor.fetchall()
cursor.connection.close()
if result is None:
return None, None
else:
if ts_col is None:
return result[0][0], None
else:
return result[0][0], result[0][1]
def select_s3(self, s3_key, select_sql, batch_size=None):
s3_connection = S3Connection(config.get('S3.access_key'), config.get('S3.secret'))
bucket = s3_connection.get_bucket(config.get('S3.bucket'), validate=False)
fp = StringIO()
conn = self.get_conn()
cursor = conn.cursor()
cursor.execute(select_sql)
ordering = map(lambda c: c[0], cursor.description)
row_count = 0
last_row = []
for row in cursor:
row_arr = []
for val in row:
if val is None:
row_arr.append('\N')
else:
row_arr.append(unicode(val))
str_row = (self.COL_DELIMITER.join(row_arr).replace(self.ROW_DELIMITER, '') + self.ROW_DELIMITER).encode('utf-8')
fp.write(str_row)
row_count = row_count + 1
last_row = row
if batch_size is not None and row_count >= batch_size:
break
s3_file = Key(bucket)
s3_file.key = s3_key
fp.seek(0)
s3_file.set_contents_from_file(fp)
conn.close()
if batch_size is None:
return None, None
else:
return row_count, dict(zip(ordering, last_row))
def load_s3(self, s3_path, table_name):
query = """COPY %s FROM '%s' CREDENTIALS '%s' DELIMITER '%s' MAXERROR 0;
""" % (table_name, s3_path, get_s3_credentials(), self.COL_DELIMITER)
self.execute(query)
def upsert(self, tmp_table_name, table_name, id_column):
column_query = """SELECT column_name FROM information_schema.columns WHERE table_name = %s;"""
try:
cursor = self.get_conn().cursor()
cursor.execute(column_query, (table_name,))
columns = cursor.fetchall()
col_arr = []
for column in columns:
col_arr.append("%s=%s.%s" % (column[0], tmp_table_name, column[0]))
cols = ", ".join(col_arr)
update_query = """UPDATE %(table)s SET %(cols)s FROM %(tmp_table)s WHERE %(table)s.%(id)s=%(tmp_table)s.%(id)s;"""
update_query = update_query % {'table': table_name, 'cols': cols, 'tmp_table': tmp_table_name, 'id': id_column}
insert_query = """INSERT INTO %(table)s
SELECT %(tmp)s.*
FROM %(tmp)s
LEFT JOIN %(table)s ON %(tmp)s.%(id)s=%(table)s.%(id)s
WHERE %(table)s.%(id)s IS NULL;"""
insert_query = insert_query % {'table': table_name, 'tmp': tmp_table_name, 'id': id_column}
cursor.execute(update_query)
cursor.execute(insert_query)
cursor.connection.commit()
cursor.connection.close()
except Exception, e:
print(e)
raise
def swap_and_drop(self, table1, table2):
tmp_name = table1 + "_sdtmp"
query = """ALTER TABLE %s RENAME TO %s;
ALTER TABLE %s RENAME TO %s;
DROP TABLE %s;"""
cursor = self.get_conn().cursor()
cursor.execute(query % (table1, tmp_name, table2, table1, tmp_name))
cursor.connection.commit()
cursor.connection.close()
def drop_table(self, table_name):
query = """DROP TABLE %s;"""
cursor = self.get_conn().cursor()
cursor.execute(query % (table_name))
cursor.connection.commit()
cursor.connection.close()
def readonly_select(self, table_name):
query = """GRANT SELECT ON %s TO readonly;""" % table_name
cursor = self.get_conn().cursor()
cursor.execute(query)
cursor.connection.commit()
cursor.connection.close()
def vacuum_all(self):
query = "VACUUM;"
cursor = self.get_conn().cursor()
cursor.execute(query)
cursor.connection.close()
def get_s3_credentials():
return "aws_access_key_id=%s;aws_secret_access_key=%s" % (config.get('S3.access_key'), config.get('S3.secret'))
|
[
"edwardtseng@gmail.com"
] |
edwardtseng@gmail.com
|
0bf9eea56b83d6474919c148dc6af0b026a65fbf
|
80d46af8bad0da6066cca4c0f9666fc1f8a9bd6a
|
/power.py
|
5ab7e58a936d6ba34215c25653f8539889af6cb2
|
[] |
no_license
|
mayamithra/python-programming-
|
67cee943bfb8f936374cb8c4ef4cddf5f6f50867
|
48c7486f7c2a93b12373a3dfd81dde613a1364de
|
refs/heads/master
| 2021-07-17T00:26:18.276539
| 2019-02-05T09:50:16
| 2019-02-05T09:50:16
| 149,116,166
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
n1=input()
n2=input()
p=n1*n2
print(p)
|
[
"noreply@github.com"
] |
mayamithra.noreply@github.com
|
8a51710385c631437309fc3c7b1b1bcaa70be064
|
f073292678251f2b022aaa0864a66f9f480e25f6
|
/app/core/management/commands/wait_for_db.py
|
4900227f5c0918f8f6ba04b6dab373e8877153f6
|
[
"MIT"
] |
permissive
|
SammyVome/recipe-app-api
|
412776ab84d126f9f7d7a72152d89a941665a0d3
|
61195e36ff05733d0a981b4b8d285adccc7a9c44
|
refs/heads/master
| 2022-11-07T23:46:52.144765
| 2020-06-13T21:53:57
| 2020-06-13T21:53:57
| 269,751,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 seciond...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
|
[
"sammy.fagen@gmail.com"
] |
sammy.fagen@gmail.com
|
843b40a9cca2383171cf684bd6e4786637a3b6ab
|
21a6b7abace13cef3a6f55f081d52c3cf182f971
|
/ex7.py
|
3431e3e8171c7b247fb2fbb2caff92fecfca5a5c
|
[] |
no_license
|
PWynter/LPTHW
|
75ff58c1be70a7863b89b6e533a933acf978aee8
|
0d80dfad864cdf0ade01d35c7eb9561c5aac3197
|
refs/heads/master
| 2020-05-09T14:27:31.748840
| 2019-05-07T06:47:00
| 2019-05-07T06:47:00
| 181,194,973
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
print("Mary had a little lamb.")
print("It's fleece was white as {} .".format('snow'))
print("And everywhere that Mary went.")
print("." * 10)
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
print(end1 + end2 + end3 + end4 + end5+ end6, end=" ")
print(end7 + end8 + end9 + end10 + end11 + end12)
|
[
"noreply@github.com"
] |
PWynter.noreply@github.com
|
666ae8c346bf4ae8d956123385a762db5f032737
|
6fcdf0a4e0e11d86c20a468e5f663ead8c266bb7
|
/django_admin_api/comentarios/api/serializers.py
|
8a599f9fc55c4a16afc9b4fbc32416a7d6a9d076
|
[] |
no_license
|
carolbarreto/django-admin-api-restful
|
f393ac849a28370c571827ac92f1f148f756868d
|
4481e7689d1ed943e714d3d51dbd7244df33652c
|
refs/heads/main
| 2023-05-13T14:21:25.924552
| 2021-05-31T23:18:19
| 2021-05-31T23:18:19
| 368,322,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
from rest_framework.serializers import ModelSerializer
from comentarios.models import Comentarios
class ComentarioSerializer(ModelSerializer):
class Meta:
model = Comentarios
fields = ['usuario', 'comentario', 'data', 'aprovado']
|
[
"carolbarreto.ce@gmail.com"
] |
carolbarreto.ce@gmail.com
|
6b38b49072355bc0eac2193ea5f7f6e6fab66864
|
998c6bd0d3168c8619b18a478338caa753ea129b
|
/flat_bandpass.py
|
3b3f6d0cd911c6cb987a26252ae144ca60f9163e
|
[] |
no_license
|
louisbondonneau/psrfits_search
|
d87dce3b74bee8dee5c4caccce97fdeaa7e809a3
|
d76bc99da0531df6fc32a36628f14ef87ebe01cf
|
refs/heads/master
| 2023-07-09T12:50:42.794288
| 2023-06-29T14:06:29
| 2023-06-29T14:06:29
| 281,105,118
| 1
| 1
| null | 2023-06-29T14:06:30
| 2020-07-20T12:04:35
|
Python
|
UTF-8
|
Python
| false
| false
| 17,404
|
py
|
# RESCALE TIME
# Resize data array extracted from a FITS file to a new number of rows.
# Used to decrease the time lapse of a block for a involve the rfifind processing.
# MODULES
import numpy as np
import pyfits as fi
import sys
import argparse as arg
import os
# ARGUMENTS LIST
parser = arg.ArgumentParser( description = 'transforme 32 bits data to a flatband 8 bits without scales and offsets.' )
parser.add_argument( '-f' , dest='fileName' , type=str , help='Name of the FITS file to change.' )
parser.add_argument( '-o' , dest='newFileName' , type=str , help='Name of the new FITS file to write.' )
parser.add_argument( '-ds' , dest='ds' , type=int , default=1, help='downsample value.' )
parser.add_argument( '-pscrunch', dest='pscrunch', action='store_true', default = False, help="scrunch the polarisation")
parser.add_argument( '-intscales', dest='intscales', action='store_true', default = False, help="used 8 bits scales and offset (replace 32 bits float bu 8 bit uint)")
parser.add_argument( '-noscale', dest='noscale', action='store_true', default = False, help="force all scales to 1")
parser.add_argument( '-notimevar', dest='notimevar', action='store_true', default = False, help="do not take in count the time dependency of the offset and the scale")
parser.add_argument( '-threshold' , dest='threshold' , type=int , default=6, help='Change the threshold value (default threshold = 6).' )
parser.add_argument( '-plot', dest='plot', action='store_true', default = False, help="plot statistics")
parser.add_argument( '-flat_in_time', dest='flat_in_time', action='store_true', default = False, help="deduce the median per integrations")
args = parser.parse_args()
def data_to_offsets_and_scales(old_data):
ds = int(2**(round(np.log(args.ds)/np.log(2))))
SIGMA = args.threshold
SIGMA = SIGMA*(2./3)
##### calculate constantes
nline, line_lenght, npol, nchan = np.shape(old_data[:, :, :, :, 0])
##### calcul des std et median
print('---------calculate median_array---------')
median_array = np.median(old_data, axis=1) # OFFSET
if not (args.noscale):
print('---------calculate std_array---------')
std_array = np.std(old_data, axis=1) # SCAL
else:
std_array = 0*median_array
if (args.notimevar):
print(np.shape(median_array))
print(np.shape(std_array))
mean_median_array = np.median(median_array, axis=0)
mean_std_array = np.median(std_array, axis=0)
for line in range(nline):
median_array[line, :, :, :] = mean_median_array
std_array[line, :, :, :] = mean_std_array
OFFSET = median_array - 0.5*SIGMA*std_array
#The signal is between median_array-0.5*SIGMA*std and median_array+1.5*SIGMA*std
SCAL = 2.*SIGMA*std_array/256.
if (args.intscales):
saturation = np.where(OFFSET>255)
SCAL[saturation] = (OFFSET[saturation]-255 + 2.*SIGMA*std_array[saturation])/256.
SCAL = np.ceil(SCAL)
OFFSET[np.where(OFFSET>255)] = 255
OFFSET[np.where(OFFSET<0)] = 0
#SCAL = np.ceil(SCAL)
SCAL[np.where(SCAL>255)] = 255
SCAL[np.where(SCAL<1)] = 1
OFFSET = OFFSET.astype( 'uint8' ) #cast OFFSET matrix in a uint8 matrix
SCAL = SCAL.astype( 'uint8' ) #cast SCAL matrix in a uint8 matrix
#####some plots
if (args.plot):
print('---------make plot median-std.png---------')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 8))
plt.subplots_adjust(top=0.98,
bottom=0.07,
left=0.1,
right=0.980,
hspace=0.215,
wspace=0.25)
ax1 = plt.subplot(2, 2, 1)
ax2 = plt.subplot(2, 2, 2)
ax3 = plt.subplot(2, 2, 3)
ax4 = plt.subplot(2, 2, 4)
for i in range(npol):
mean_med = np.mean(median_array[:, i,:], axis=0)
ax1.semilogy(mean_med)
ax1.set_xlabel('channel number')
ax1.set_ylabel('median value')
bins = np.logspace(np.log10(1),np.log10(np.max(mean_med)), 32)
if(np.max(mean_med) < 10):
bins = np.logspace(np.log10(1),np.log10(10), 32)
ax3.hist(mean_med, bins=bins, alpha=0.3, log=True)
ax3.set_xscale("log")
ax3.set_xlabel('median value')
ax3.set_ylabel('number of value')
for i in range(npol):
mean_std = np.mean(std_array[:, i,:], axis=0)
ax2.semilogy(mean_std)
ax2.set_xlabel('channel number')
ax2.set_ylabel('standard deviation value')
bins = np.logspace(np.log10(1),np.log10(np.max(mean_std)), 32)
if(np.max(mean_std) < 10):
bins = np.logspace(np.log10(1),np.log10(10), 32)
ax4.hist(mean_std, bins=bins, alpha=0.3, log=True)
ax4.set_xscale("log")
ax4.set_xlabel('std')
ax4.set_ylabel('number of value')
plt.savefig('flat-std-median.png')
#####some plots
if (args.plot):
print('---------make plot scal-offset.png---------')
plt.clf()
fig = plt.figure(figsize=(8, 8))
plt.subplots_adjust(top=0.98,
bottom=0.07,
left=0.1,
right=0.980,
hspace=0.215,
wspace=0.25)
ax1 = plt.subplot(2, 2, 1)
ax2 = plt.subplot(2, 2, 2)
ax3 = plt.subplot(2, 2, 3)
ax4 = plt.subplot(2, 2, 4)
for i in range(npol):
mean_scal = np.mean(SCAL[:, i, :, 0], axis=0)
ax2.semilogy(mean_scal)
if (args.intscales):
ax2.axhline(256, color="r")
ax2.set_xlabel('channel number')
ax2.set_ylabel('scal')
bins = np.logspace(np.log10(1),np.log10(np.max(mean_scal)), 32)
if(np.max(mean_scal) < 10):
bins = np.logspace(np.log10(1),np.log10(10), 32)
ax4.hist(mean_scal, bins=bins, alpha=0.3, log=True)
ax4.set_xscale("log")
ax4.set_xlabel('scal')
ax4.set_ylabel('number of value')
for i in range(npol):
mean_offset = np.mean(OFFSET[:, i, :, 0], axis=0)
ax1.semilogy(mean_offset)
if (args.intscales):
ax1.axhline(256, color="r")
ax1.set_xlabel('channel number')
ax1.set_ylabel('offset')
bins = np.logspace(np.log10(1),np.log10(np.max(mean_offset)), 32)
if(np.max(mean_offset) < 10):
bins = np.logspace(np.log10(1),np.log10(10), 32)
ax3.hist(mean_offset, bins=bins, alpha=0.3, log=True)
ax3.set_xscale("log")
ax3.set_xlabel('offset')
ax3.set_ylabel('number of value')
plt.savefig('flat-scal-offset.png')
#
#####some plots
if (args.plot):
print('---------make plot data.png---PART1------')
plt.clf()
spectrum = np.mean(median_array, axis=0)
fig = plt.figure(figsize=(8, 8))
plt.subplots_adjust(top=0.98,
bottom=0.07,
left=0.1,
right=0.980,
hspace=0.215,
wspace=0.25)
ax1 = plt.subplot(2, 2, 1)
ax2 = plt.subplot(2, 2, 2)
ax3 = plt.subplot(2, 2, 3)
ax4 = plt.subplot(2, 2, 4)
pol = ['XX', 'YY', 'XY', 'YX']
for ipol in range(npol):
ax1.semilogy(spectrum[ipol, :, 0], label=pol[ipol])
ax1.set_xlabel('channel')
ax1.set_ylabel('OLD Amplitude (AU)')
ax1.legend(loc='upper right')
ax3.hist(np.resize(old_data, len(old_data)), alpha=1, log=True)
ax3.set_xlabel('OLD values')
ax3.set_ylabel('number of value')
print('---------apply offset and scaling---------')
#####apply offset and scalingine*ipol*ichan, nline*npol*nchan, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
for bin in range(line_lenght) :
old_data[:, bin, :, :, :] = (old_data[:, bin, :, :, :] - OFFSET)/SCAL
if (args.plot):
print('---------make plot data.png---PART2------')
spectrum = np.median(old_data, axis=1)
spectrum = np.mean(spectrum, axis=0)
for ipol in range(npol):
ax2.semilogy(spectrum[ipol, :, 0], label=pol[ipol])
if (args.intscales):
ax2.axhline(256, color="r")
ax2.set_xlabel('channel')
ax2.set_ylabel('NEW Amplitude (AU)')
ax2.legend(loc='upper right')
ax4.hist(np.resize(old_data, len(old_data)), alpha=1, log=True)
ax4.set_xlabel('NEW values')
ax4.set_ylabel('number of value')
plt.savefig('oldDATA_newDATA.png')
OFFSET = np.resize(OFFSET,(nline, npol, nchan))
SCAL = np.resize(SCAL,(nline, npol, nchan))
return (old_data, SCAL, OFFSET)
ds = int(2**(round(np.log(args.ds)/np.log(2))))
# CHECKING INPUT PARAMETERS
if os.path.isfile( args.fileName ) : # Checking file existence
print '\nExtraction of data from {:s}.\n'.format( args.fileName )
else :
print '\n{:s} is not a file.\n'.format( args.fileName )
sys.exit()
if args.newFileName : # Define the name of the new FITS file
print 'Scaled Integer arrays writed in {:s}.\n'.format( args.newFileName )
else :
print 'None new FITS file name defined. Default name used : new_{:s}.\n'.format( args.fileName )
# DATA EXTRACTION OF THE PREVIOUS FITS
headObs = fi.getheader( args.fileName , 0 , do_not_scale_image_data=True , scale_back=True ) # Extraction of the observation header
head = fi.getheader( args.fileName , 1 , do_not_scale_image_data=True , scale_back=True ) # Extraction of the data header
data = fi.getdata( args.fileName , do_not_scale_image_data=True , scale_back=True ) # Extraction of the data arrays
print(data.columns)
old_offset = data.field( 14 ).astype('float32')
old_scale = data.field( 15 ).astype('float32')
old_data = data.field( 16 ).astype('float32') # Copy of the old amplitude data array
print(np.shape(old_offset))
print(np.shape(old_scale))
print(np.shape(old_data))
#print(data.field( 1 ) )
#print(len(data.field( 1 ) ))
# RESIZING ARRAYS
colList = [] # Field list for the new fits file
for i in range( 14 ) :
oldArray = data.field( i ) # Copy of the old amplitude data array
oldCol = data.columns[ i ].copy() # Copy of the old corresponding header
print(i, oldCol.name, oldCol.format, oldCol.unit, oldCol.dim)
newCol = fi.Column(name=oldCol.name, # Creation of the new field
format=oldCol.format,
unit=oldCol.unit,
dim=oldCol.dim,
array=oldArray)
colList.append( newCol ) # Adding to the new field list
oldCol_offset = data.columns[ 14 ].copy() # Copy of the old corresponding header
oldCol_scale = data.columns[ 15 ].copy() # Copy of the old corresponding header
oldCol_data = data.columns[ 16 ].copy() # Copy of the old corresponding header
head[ 'NBITS' ] = 8
npol = int(head['NPOL'])
if(args.pscrunch and npol > 1):
if(args.intscales):
head[ 'TFORM15' ] = str(int(float(head[ 'TFORM15' ][0:-1])/npol))+'B'
head[ 'TFORM16' ] = str(int(float(head[ 'TFORM16' ][0:-1])/npol))+'B'
else:
head[ 'TFORM15' ] = str(int(float(head[ 'TFORM15' ][0:-1])/npol))+'E'
head[ 'TFORM16' ] = str(int(float(head[ 'TFORM16' ][0:-1])/npol))+'E'
head[ 'TFORM17' ] = str(int(float(head[ 'TFORM17' ][0:-1])/npol/ds))+'B'
head['NPOL'] = 1
head['POL_TYPE'] = 'AA+BB'
else:
if(args.intscales):
head[ 'TFORM15' ] = str(int(float(head[ 'TFORM15' ][0:-1])))+'B'
head[ 'TFORM16' ] = str(int(float(head[ 'TFORM16' ][0:-1])))+'B'
else:
head[ 'TFORM15' ] = str(int(float(head[ 'TFORM15' ][0:-1])))+'E'
head[ 'TFORM16' ] = str(int(float(head[ 'TFORM16' ][0:-1])))+'E'
head[ 'TFORM17' ] = str(int(float(head[ 'TFORM17' ][0:-1])/ds))+'B'
newFormat_offset = fi.column._ColumnFormat( head[ 'TFORM15' ] ) # Definition of the new data array format
newFormat_scale = fi.column._ColumnFormat( head[ 'TFORM16' ] ) # Definition of the new data array format
newFormat_data = fi.column._ColumnFormat( head[ 'TFORM17' ] ) # Definition of the new data array format
##### calculate constantes
nline, line_lenght, npol, nchan = np.shape(old_data[:, :, :, :, 0])
##### extract values
old_scale = np.resize(old_scale,(nline, npol, nchan))
old_offset = np.resize(old_offset,(nline, npol, nchan))
old_data = np.resize(old_data,(nline, line_lenght, npol, nchan, 1))
for bin in range(line_lenght) :
old_data[:, bin, :, :, 0] = (old_data[:, bin, :, :, 0]*old_scale + old_offset)
if (ds>1):
head[ 'NSBLK' ] = int(head[ 'NSBLK' ])/ds
head['TBIN'] = float(head['TBIN'])*ds
old_data = np.resize(old_data,(nline, line_lenght/ds, ds, npol, nchan, 1))
old_data = np.sum(old_data, axis=2)
old_data = np.resize(old_data,(nline, line_lenght/ds, npol, nchan, 1))
line_lenght = line_lenght/ds
##### calcul des std et median
if(args.pscrunch and npol > 1):
print('---------pscrunch---------')
old_data = np.sum(old_data[:, :, 0:1, :, :], axis=2)
old_data = np.resize(old_data,(nline, line_lenght, 1, nchan, 1))
npol = 1
print('---------calculate median_array---------')
median_array = np.median(old_data, axis=1) # OFFSET
mean_median_array = np.median(median_array, axis=0)
for line in range(nline):
median_array[line, :, :, :] = mean_median_array
#####some plots
if (args.plot):
print('---------make plot median.png---------')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 8))
plt.subplots_adjust(top=0.98,
bottom=0.07,
left=0.1,
right=0.980,
hspace=0.215,
wspace=0.25)
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
for i in range(npol):
mean_med = np.mean(median_array[:, i,:], axis=0)
ax1.semilogy(mean_med)
ax1.set_xlabel('channel number')
ax1.set_ylabel('median value')
bins = np.logspace(np.log10(1),np.log10(np.max(mean_med)), 32)
if(np.max(mean_med) < 10):
bins = np.logspace(np.log10(1),np.log10(10), 32)
ax2.hist(mean_med, bins=bins, alpha=0.3, log=True)
ax2.set_xscale("log")
ax2.set_xlabel('median value')
ax2.set_ylabel('number of value')
plt.savefig('notflat-median.png')
print('---------flat_in_frequency---------')
#####deduct the median of each channel then divide by the same median
for bin in range(line_lenght) :
old_data[:, bin, :, :, :] = (((old_data[:, bin, :, :, :] - median_array)/np.abs(median_array)))*np.mean(median_array) + 128.
if (args.flat_in_time):
print('---------flat_in_time---------')
median_array = np.median(old_data, axis=1)
median_array = np.median(median_array, axis=2)
for bin in range(line_lenght):
for ichan in range(nchan):
old_data[:, bin, :, ichan, :] = old_data[:, bin, :, ichan, :] - median_array + 128
(old_data, SCAL, OFFSET) = data_to_offsets_and_scales(old_data)
print(np.shape(OFFSET))
print(np.shape(SCAL))
print(np.shape(old_data))
### replace OFFSET and SCAL '(1,'+str(nchan)+','+str(npol)+')'
newCol = fi.Column( name=oldCol_offset.name , format=newFormat_offset , unit=oldCol_offset.unit , dim='(1,'+str(nchan)+','+str(npol)+')' , array=OFFSET ) # Creation of the new field
colList.append( newCol )
newCol = fi.Column( name=oldCol_scale.name , format=newFormat_scale , unit=oldCol_scale.unit , dim='(1,'+str(nchan)+','+str(npol)+')' , array=SCAL ) # Creation of the new field
colList.append( newCol )
old_data[np.where(old_data>255)] = 255
old_data[np.where(old_data<0)] = 0
newCol = fi.Column( name=oldCol_data.name , format=newFormat_data , unit=oldCol_data.unit , dim='(1,'+str(nchan)+','+str(npol)+','+str(line_lenght)+')' , array=old_data.astype('uint8') ) # Creation of the new field
colList.append( newCol ) # Adding to the new field list
# DEFINITION OF THE NEW FITS
print('---------save data to '+args.newFileName+' ---------')
colDefs = fi.ColDefs( colList ) # Creation of the new fields object
tbhdu = fi.BinTableHDU.from_columns( colDefs , header=head ) # Creation of the new data table object
prihdu = fi.PrimaryHDU( header=headObs ) # Creation of the new observation header (exactly the same that the old fits file)
hdulist = fi.HDUList( [ prihdu , tbhdu ] ) # Creation of the new HDU object
hdulist.writeto( args.newFileName ) #output_verify='exception' ) # Writing the new HDU object on the new fits file
hdulist.close()
|
[
"lbondonneau@nancep3.obs-nancay.fr"
] |
lbondonneau@nancep3.obs-nancay.fr
|
5728540498983482ad98af45aa516f85770d2519
|
bf695d6099e9e76c798cbe7ef93aa50589fb7447
|
/muon-histograms.py
|
8a18ed330abb0335a683f06ba8384e362e712d82
|
[] |
no_license
|
maxastyler/dah-checkpoint6
|
b45a0726d08349feaf7f32e672bd9bed88ac2fa8
|
c6ef84af40f51d2c844d023814f13bdb2377af7b
|
refs/heads/master
| 2021-06-07T19:47:38.317925
| 2016-11-02T18:05:44
| 2016-11-02T18:05:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
data = np.loadtxt("upsilons-mass-pt-xaa.txt").flatten()
#Range to take x from = [8.5 -> 10.99]
#Take away data from 9.17 -> 9.66 and 9.84 -> 10.55
histed=np.histogram(data, bins=1000, range=[8.5, 10.99])
frequencies=histed[0]
xvalues=histed[1]
background_freqs=[]
background_xs=[]
sifted_freqs = []
sifted_xs = []
for i in range(len(xvalues)):
if xvalues[i]>8.5 and xvalues[i]<10.99:
sifted_freqs.append(frequencies[i])
sifted_xs.append(xvalues[i])
if not ((xvalues[i]>9.17 and xvalues[i]<9.66) or (xvalues[i]>9.84 and xvalues[i]<10.55)):
background_freqs.append(frequencies[i])
background_xs.append(xvalues[i])
def exp(x, a, b, c):
return a*np.exp(-b*x)+c
def gaussian(x, a, b, c):
return a*np.exp(-(x-b)**2/2*c**2)
result = scipy.optimize.curve_fit(exp, background_xs, background_freqs, p0=[0, 1, 0])
new_exp=lambda x: result[0][0]*np.exp(-result[0][1]*x)+result[0][2]
no_background = []
for i in range(len(sifted_xs)):
no_background.append(sifted_freqs[i] - new_exp(sifted_xs[i]))
peaks=[[[], []], [[], []], [[],[]]]
for i in range(len(no_background)):
x=sifted_xs[i]
if x>9.25 and x<9.75:
peaks[0][0].append(x)
peaks[0][1].append(no_background[i])
if x>9.75 and x<10.25:
peaks[1][0].append(x)
peaks[1][1].append(no_background[i])
if x>10.25 and x<10.5:
peaks[2][0].append(x)
peaks[2][1].append(no_background[i])
results = []
for i in range(3):
results.append(scipy.optimize.curve_fit(gaussian, peaks[i][0], peaks[i][1], p0=[10, 10, 10]))
plt.plot(sifted_xs, sifted_freqs)
plt.plot(sifted_xs, no_background)
calculated=[[], [], []]
for i in range(len(peaks)):
for j in range(len(peaks[i][0])):
calculated[i].append(gaussian(peaks[i][0][j], results[i][0][0], results[i][0][1], results[i][0][2]))
for i in range(len(peaks)):
for j in range(len(peaks[i])):
plt.plot(peaks[i][0], calculated[i])
plt.show()
|
[
"mtyler88@gmail.com"
] |
mtyler88@gmail.com
|
cd4ed7ef774e0668bb1bb515c72a1a2ef5de833b
|
a0d4eaa576543f0c5da4c76a23ae5204599f7715
|
/ClinicAPI/ClinicAPI/wsgi.py
|
5b46f18f0d85fd2907b11779ee574c6246377d58
|
[] |
no_license
|
AYHD-youtube/Django-Clinic-Management-API
|
220f8f1a73caf9113a05927110fbf9d47200eba5
|
f2fadf7463f21e3988647e6c79d2a456af81aa32
|
refs/heads/main
| 2023-06-07T00:17:11.475466
| 2021-07-02T18:00:16
| 2021-07-02T18:00:16
| 365,146,936
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for ClinicAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ClinicAPI.settings')
application = get_wsgi_application()
|
[
"abhishek3yadav3@gmail.com"
] |
abhishek3yadav3@gmail.com
|
046554408249456c30a823c8ac25e8ff224ce88e
|
f6d5be69b92b5baf0d5f4ba683ad6210a5c7ff07
|
/backup.py
|
f69ba1eb634df25b82a61f6c80e9ca6cb714dd5a
|
[] |
no_license
|
vedanshvachani/python
|
3b148e4f2628a47ffab5c7a5a1604e5b5bbfbab7
|
167b2105cc2f2243fed4ad111237509cc78cb74c
|
refs/heads/master
| 2021-07-02T18:36:18.862533
| 2020-09-07T15:56:58
| 2020-09-07T15:56:58
| 158,910,349
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
#!/usr/bin/python
import os
import shutil
path = "/root/Downloads/backup/"
def copyPath(path,path1):
for root,dirs,files in os.walk(path1,topdown=True):
for i in files:
ab = os.path.abspath(os.path.join(root,i))
os.system("cp -r "+ab+" "+path)
path1 = raw_input("enter the path : ")
if(os.path.exists(path1)):
if(os.path.exists(path)):
print "yes"
print path
copyPath(path,path1)
os.system("zip -r /root/Downloads/backup.zip /root/Downloads/backup")
else:
os.system("mkdir /root/Downloads/backup")
print "done"
copyPath(path,path1)
os.system("zip -r /root/Downloads/backup.zip /root/Downloads/backup")
|
[
"noreply@github.com"
] |
vedanshvachani.noreply@github.com
|
e6df29d1f8e2bd031d22a4c4a6f8359cf0a2e06c
|
c27d623bc54e3f1d4d27a9dc432ab3f0142be7a7
|
/tech/models.py
|
11350f492e6b866c7daea33b119d500199890236
|
[] |
no_license
|
spconger/TechReviewW2020
|
0faaa63943c64d8721898e41a8e03b0a0b61c124
|
714dff9c68316de09f1bffb52b21dce9700fefae
|
refs/heads/main
| 2023-02-27T18:24:10.813532
| 2021-02-06T21:56:41
| 2021-02-06T21:56:41
| 316,020,832
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
'''
Models for techreviews:
Models become tables in the database.
Each model has an autonumbered id by default, though you can
change that and delare your own primary keys.
I won't do that here.
TechType, which describes the type of tech product, laptop, tablet, software
etc. Product--the actual product,
We are going to use the django built-in User model to store our users
Review to store the reviews
THESE ARE NOT THE MODELS FOR PYTHON CLUB--
LOOK AT THE ASSIGNMENT
'''
class TechType(models.Model):
typename=models.CharField(max_length=255)
typedescription=models.TextField(null=True, blank=True)
def __str__(self):
return self.typename
class Meta:
db_table='techtype'
class Product(models.Model):
productname=models.CharField(max_length=255)
producttype=models.ForeignKey(TechType, on_delete=models.DO_NOTHING)
user=models.ForeignKey(User, on_delete=models.DO_NOTHING)
dateentered=models.DateField()
price=models.DecimalField(max_digits=6, decimal_places=2)
producturl=models.URLField()
description=models.TextField()
def discountAmount(self):
self.discount=self.price * .05
return self.discount
#The problem in test was that this function did not have a return statement
def discountPrice(self):
disc=self.discountAmount()
self.discountedPrice=self.price-disc
return self.discountedPrice
def __str__(self):
return self.productname
class Meta:
db_table='product'
class Review(models.Model):
title=models.CharField(max_length=255)
user=models.ForeignKey(User, on_delete=models.CASCADE)
product=models.ForeignKey(Product, on_delete=models.CASCADE)
reviewdate=models.DateField()
reviewtext=models.TextField()
def __str__(self):
return self.title
class Meta:
db_table='review'
|
[
"spconger@gmail.com"
] |
spconger@gmail.com
|
d0bf900fa6f07d26d98117838971d665ef9a68b6
|
33d74150807183ca860581fdbcab5d4a6d7bbf8c
|
/dragonfly/string/decode.py
|
a8f115e944638adf4c39c1e8d907b0f47def4991
|
[] |
no_license
|
agoose77/hive2
|
c52623063773683ccd4f091dd411b6ad389c86fa
|
235a2f01dfd922f56d850062f2219bd444d24e7e
|
refs/heads/master
| 2021-01-24T02:47:08.831426
| 2017-06-29T00:41:09
| 2017-06-29T00:41:09
| 32,750,407
| 8
| 0
| null | 2015-08-05T00:33:29
| 2015-03-23T18:18:31
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
import hive
def build_decode(i, ex, args):
"""Decode bytes into a string"""
args.encoding = hive.parameter('str', 'utf-8')
ex.encoding = hive.variable('str', args.encoding)
i.string = hive.variable("str")
i.pull_string = hive.pull_out(i.string)
ex.string = hive.output(i.pull_string)
i.bytes_ = hive.variable('bytes')
i.pull_bytes_ = hive.pull_in(i.bytes_)
ex.bytes_ = hive.antenna(i.pull_bytes_)
def do_encoding(self):
self._string = self._bytes_.decode(self.encoding)
i.do_encoding = hive.modifier(do_encoding)
hive.trigger(i.pull_string, i.pull_bytes_, pretrigger=True)
hive.trigger(i.pull_bytes_, i.do_encoding)
Decode = hive.hive("Decode", build_decode)
|
[
"goosey15@gmail.com"
] |
goosey15@gmail.com
|
6012ed4f72f4cfd83e9f4e0b0c55f7c39f58b843
|
267298206e19567d2399cd32f7d4ac264f470760
|
/sorbet/feedmanager/migrations/0002_add_field_feed_hash.py
|
f777a667545d066636901fe89b43091e3552588a
|
[
"BSD-3-Clause",
"CC-BY-3.0",
"BSD-2-Clause"
] |
permissive
|
kklimonda/sorbet
|
e340b4d5749ddb06e313f6b17f968b4391072cf8
|
d7d0d04fbd6ba16700a7549cfe1d240ca51693af
|
refs/heads/master
| 2021-01-18T06:30:10.063506
| 2012-07-15T06:20:16
| 2012-07-15T06:20:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,368
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Feed.hash'
db.add_column('feedmanager_feed', 'hash',
self.gf('django.db.models.fields.CharField')(default=0, max_length=32),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Feed.hash'
db.delete_column('feedmanager_feed', 'hash')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feedmanager.feed': {
'Meta': {'object_name': 'Feed'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'feedmanager.item': {
'Meta': {'object_name': 'Item'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedmanager.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70'})
}
}
complete_apps = ['feedmanager']
|
[
"kklimonda@syntaxhighlighted.com"
] |
kklimonda@syntaxhighlighted.com
|
b4849bbe58f636c0f7b5ca205496c0dac740bc4b
|
7f63e3529b5a15e0f8221d92ca546c4715aae5a0
|
/venv/Scripts/easy_install-script.py
|
26ee29fab89bc688595d5d10d8e2693bed5eac5f
|
[] |
no_license
|
tuzzo77/djangotodo
|
953d73ff979b5915c6caaa5284a212604802e4e7
|
c71e7090a51f5ce1d1218c5111bd0b065a3769bb
|
refs/heads/master
| 2020-07-25T11:12:31.116114
| 2019-09-13T13:47:43
| 2019-09-13T13:47:43
| 208,270,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
#!C:\Users\alex\PycharmProjects\djangotodo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"tuzzo77@yahoo.it"
] |
tuzzo77@yahoo.it
|
ea3f3eaec893a324aea469678def793c22ab8495
|
4ef902205b2ecca863076e245aac1dd56116e93b
|
/newweb/test_templateparser.py
|
d7862e92d293f0dfe9cdae2e13f30b06aa0841be
|
[
"ISC"
] |
permissive
|
edelooff/newWeb
|
f4bce302c4847904be2a65bc12d507b23d4ee900
|
6ee9b512b9a42ef313032e7b79f779b44da3c319
|
refs/heads/master
| 2020-12-25T17:26:46.573023
| 2017-08-23T21:59:51
| 2017-08-23T21:59:51
| 19,253,023
| 0
| 2
| null | 2017-08-23T21:59:52
| 2014-04-28T20:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 37,443
|
py
|
#!/usr/bin/python
"""Tests for the templateparser module."""
# Too many public methods
# pylint: disable=R0904
# Standard modules
import os
import re
import time
import unittest
# Unittest target
from . import templateparser
class Parser(unittest.TestCase):
"""Basic tests for the Parser class and equality of Template objects."""
def setUp(self):
"""Creates a template file and a similar instance attribute."""
self.name = 'tmp_template'
self.raw = 'This is a basic [noun]'
self.template = templateparser.Template(self.raw)
with file(self.name, 'w') as template:
template.write('This is a basic [noun]')
template.flush()
def tearDown(self):
"""Removes the template file from the filesystem."""
os.unlink('tmp_template')
def testAddTemplate(self):
"""[Parser] AddTemplate adds a template to the parser"""
parser = templateparser.Parser()
self.assertEqual(len(parser), 0)
parser.AddTemplate(self.name)
self.assertEqual(len(parser), 1)
self.assertEqual(parser[self.name], self.template)
def testAccessTemplate(self):
"""[Parser] getting a template by key loads it when required"""
parser = templateparser.Parser()
self.assertEqual(len(parser), 0)
self.assertEqual(parser[self.name], self.template)
self.assertEqual(len(parser), 1)
def testOverWriteTemplate(self):
"""[Parser] AddTemplate overrides previously loaded template"""
custom_raw = 'My very own [adj] template'
custom_tmpl = templateparser.Template(custom_raw)
parser = templateparser.Parser()
parser.AddTemplate(self.name)
# Create a new template in place of the existing one, and reload it.
with file(self.name, 'w') as tmpl:
tmpl.write(custom_raw)
tmpl.flush()
# Assert the template has not yet changed, load it, assert that is has.
self.assertNotEqual(custom_tmpl, parser[self.name])
parser.AddTemplate(self.name)
self.assertEqual(parser[self.name], custom_tmpl)
def testPreloadTemplates(self):
"""[Parser] Templates can be preloaded when instantiating the Parser"""
parser = templateparser.Parser(templates=[self.name])
self.assertEqual(len(parser), 1)
self.assertEqual(parser[self.name], self.template)
def testParseVersusParseString(self):
"""[Parser] Parse and ParseString only differ in cached lookup"""
parser = templateparser.Parser()
result_parse = parser[self.name].Parse()
result_parse_string = parser.ParseString(self.raw)
self.assertEqual(result_parse, result_parse_string)
class ParserPerformance(unittest.TestCase):
"""Basic performance test of the Template's initialization and Parsing."""
@staticmethod
def testPerformance():
"""[Parser] Basic performance test for 2 template replacements"""
for _template in range(100):
template = 'This [obj:foo] is just a quick [bar]'
tmpl = templateparser.Template(template)
for _parse in xrange(100):
tmpl.Parse(obj={'foo': 'template'}, bar='hack')
class TemplateTagBasic(unittest.TestCase):
"""Tests validity and parsing of simple tags."""
def setUp(self):
"""Makes the Template class available on the instance."""
self.tmpl = templateparser.Template
def testTaglessTemplate(self):
"""[BasicTag] Templates without tags get returned verbatim as SafeString"""
template = 'Template without any tags'
self.assertEqual(self.tmpl(template).Parse(), template)
def testSafeString(self):
"""[BasicTag] Templates without tags get returned verbatim as SafeString"""
template = 'Template without any tags'
parsed_template = self.tmpl(template).Parse()
self.assertTrue(isinstance(parsed_template, templateparser.SafeString))
def testSingleTagTemplate(self):
"""[BasicTag] Templates with basic tags get returned proper"""
template = 'Template with [single] tag'
result = self.tmpl(template).Parse(single='just one')
self.assertEqual(result, 'Template with just one tag')
def testCasedTag(self):
"""[BasicTag] Tag names are case-sensitive"""
template = 'The parser has no trouble with [cAsE] [case].'
result = self.tmpl(template).Parse(cAsE='mixed')
self.assertEqual(result, 'The parser has no trouble with mixed [case].')
def testUnderscoredTag(self):
"""[BasicTag] Tag names may contain underscores"""
template = 'The template may contain [under_scored] tags.'
result = self.tmpl(template).Parse(under_scored='underscored')
self.assertEqual(result, 'The template may contain underscored tags.')
def testMultiTagTemplate(self):
"""[BasicTag] Multiple instances of a tag will all be replaced"""
template = '[adjective] [noun] are better than other [noun].'
result = self.tmpl(template).Parse(noun='cows', adjective='Beefy')
self.assertEqual(result, 'Beefy cows are better than other cows.')
def testEmptyOrWhitespace(self):
"""[BasicTag] Empty tags or tags containing whitespace aren't actual tags"""
template = 'This [is a] broken [] template, really'
result = self.tmpl(template).Parse(**{'is a': 'HORRIBLY', '': ', NASTY'})
self.assertEqual(result, template)
def testBadCharacterTags(self):
"""[BasicTag] Tags containing bad characters are not considered tags"""
bad_chars = """ :~!@#$%^&*()+-={}\|;':",./<>? """
template = ''.join('[%s] [check]' % char for char in bad_chars)
expected = ''.join('[%s] ..' % char for char in bad_chars)
replaces = dict((char, 'FAIL') for char in bad_chars)
replaces['check'] = '..'
self.assertEqual(self.tmpl(template).Parse(**replaces), expected)
def testUnreplacedTag(self):
"""[BasicTag] Template tags without replacement are returned verbatim"""
template = 'Template with an [undefined] tag.'
self.assertEqual(self.tmpl(template).Parse(), template)
def testBracketsInsideTag(self):
"""[BasicTag] Innermost bracket pair are the tag's delimiters"""
template = 'Template tags may not contain [[spam][eggs]].'
expected = 'Template tags may not contain [opening or closing brackets].'
result = self.tmpl(template).Parse(
**{'[spam': 'EPIC', 'eggs]': 'FAIL', 'spam][eggs': 'EPIC FAIL',
'spam': 'opening or ', 'eggs': 'closing brackets'})
self.assertEqual(result, expected)
def testTemplateInterpolationSyntax(self):
"""[BasicTag] Templates support string interpolation of dicts"""
template = 'Hello [name]'
self.assertEqual(self.tmpl(template) % {'name': 'Bob'}, 'Hello Bob')
class TemplateTagIndexed(unittest.TestCase):
"""Tests the handling of complex tags (those with attributes/keys/indexes)."""
def setUp(self):
"""Sets up a parser instance, as it never changes."""
self.tmpl = templateparser.Template
def testTemplateMappingKey(self):
"""[IndexedTag] Template tags can address mappings properly"""
template = 'This uses a [dictionary:key].'
result = self.tmpl(template).Parse(dictionary={'key': 'spoon'})
self.assertEqual(result, 'This uses a spoon.')
def testTemplateIndexing(self):
"""[IndexedTag] Template tags can access indexed iterables"""
template = 'Template that grabs the [obj:2] key from the given tuple/list.'
expected = 'Template that grabs the third key from the given tuple/list.'
numbers = 'first', 'second', 'third'
self.assertEqual(self.tmpl(template).Parse(obj=numbers), expected)
self.assertEqual(self.tmpl(template).Parse(obj=list(numbers)), expected)
def testTemplateAttributes(self):
"""[IndexedTag] Template tags will do attribute lookups after key-lookups"""
class Mapping(dict):
"""A subclass of a dictionary, so we can define attributes on it."""
NAME = 'attribute'
template = 'Template used [tag:NAME] lookup.'
lookup_attr = 'Template used attribute lookup.'
lookup_dict = 'Template used key (mapping) lookup.'
mapp = Mapping()
self.assertEqual(self.tmpl(template).Parse(tag=mapp), lookup_attr)
mapp['NAME'] = 'key (mapping)'
self.assertEqual(self.tmpl(template).Parse(tag=mapp), lookup_dict)
def testTemplateIndexingCharacters(self):
"""[IndexedTag] Tags indexes may be made of word chars and dashes only"""
good_chars = "aAzZ0123-_"
bad_chars = """ :~!@#$%^&*()+={}\|;':",./<>? """
for index in good_chars:
tag = {index: 'SUCCESS'}
template = '[tag:%s]' % index
self.assertEqual(self.tmpl(template).Parse(tag=tag), 'SUCCESS')
for index in bad_chars:
tag = {index: 'FAIL'}
template = '[tag:%s]' % index
self.assertEqual(self.tmpl(template).Parse(tag=tag), template)
def testTemplateMissingIndexes(self):
"""[IndexedTag] Tags with bad indexes will be returned verbatim"""
class Object(object):
"""A simple object to store an attribute on."""
NAME = 'Freeman'
template = 'Hello [titles:1] [names:NAME], how is [names:other] [date:now]?'
expected = 'Hello [titles:1] Freeman, how is [names:other] [date:now]?'
result = self.tmpl(template).Parse(titles=['Mr'], names=Object(), date={})
self.assertEqual(result, expected)
def testTemplateMultipleIndexing(self):
"""[IndexedTag] Template tags can contain multiple nested indexes"""
template = 'Welcome to the [foo:bar:zoink].'
result = self.tmpl(template).Parse(foo={'bar': {'zoink': 'World'}})
self.assertEqual(result, 'Welcome to the World.')
class TemplateTagFunctions(unittest.TestCase):
"""Tests the functions that are performed on replaced tags."""
def setUp(self):
"""Sets up a parser instance, as it never changes."""
self.parser = templateparser.Parser()
self.parse = self.parser.ParseString
def testBasicFunction(self):
"""[TagFunctions] Raw function does not affect output"""
template = 'This function does [none|raw].'
result = self.parse(template, none='"nothing"')
self.assertEqual(result, 'This function does "nothing".')
def testNonexistantFuntion(self):
"""[TagFunctions] An error is raised for functions that don't exist"""
template = 'This tag function is missing [num|zoink].'
self.assertEqual(self.parse(template), template)
# Error is only thrown if we actually pass an argument for the tag:
self.assertRaises(templateparser.TemplateNameError,
self.parse, template, num=1)
def testAlwaysString(self):
"""[TagFunctions] Tag function return is always converted to string."""
template = '[number]'
self.assertEqual(self.parse(template, number=1), '1')
template = '[number|raw]'
self.assertEqual(self.parse(template, number=2), '2')
template = '[number|int]'
self.parser.RegisterFunction('int', int)
self.assertEqual(self.parse(template, number=3), '3')
def testFunctionCharacters(self):
"""[TagFunctions] Tags functions may contain word chars and dashes only"""
good_funcs = "aAzZ0123-_"
good_func = lambda tag: 'SUCCESS'
bad_funcs = """ :~!@#$%^&*+={}\;':"./<>?| """
bad_func = lambda tag: 'FAIL'
for index in good_funcs:
template = '[tag|%s]' % index
self.parser.RegisterFunction(index, good_func)
self.assertEqual(self.parse(template, tag='foo'), 'SUCCESS')
for index in bad_funcs:
template = '[tag|%s]' % index
self.parser.RegisterFunction(index, bad_func)
self.assertEqual(self.parse(template, tag='foo'), template)
self.parser.RegisterFunction('|', bad_func)
def testDefaultHtmlSafe(self):
"""[TagFunctions] The default function escapes HTML entities"""
default = 'This function does [none].'
escaped = 'This function does [none|html].'
expected = 'This function does "nothing".'
self.assertEqual(self.parse(default, none='"nothing"'), expected)
self.assertEqual(self.parse(escaped, none='"nothing"'), expected)
def testAllHtmlEscapes(self):
"""[TagFunctions] The default function escapes all verboten characters."""
template = '[text]'
template_input = '"Quotes" \'n <brackets> & such'
expected = '"Quotes" 'n <brackets> & such'
self.assertEqual(self.parse(template, text=template_input), expected)
def testNoDefaultForSafeString(self):
"""[TagFunctions] The default function does not act upon SafeString parts"""
first_template = 'Hello doctor [name]'
second_template = '<assistant> [quote].'
result = '<assistant> Hello doctor "Who".'
result_first = self.parse(first_template, name='"Who"')
result_second = self.parse(second_template, quote=result_first)
self.assertEqual(result, result_second)
def testCustomFunction(self):
"""[TagFunctions] Custom functions can be added to the Parser"""
self.parser.RegisterFunction('twice', lambda x: x + ' ' + x)
template = 'The following will be stated [again|twice].'
result = 'The following will be stated twice twice.'
self.assertEqual(result, self.parse(template, again='twice'))
def testFunctionChaining(self):
"""[TagFunctions] Multiple functions can be chained after one another"""
self.parser.RegisterFunction('count', lambda x: '%s characters' % x)
template = 'A replacement processed by two functions: [spam|len|count].'
result = 'A replacement processed by two functions: 8 characters.'
self.assertEqual(result, self.parse(template, spam='ham&eggs'))
def testFunctionUse(self):
"""[TagFunctions] Tag functions are only called when requested by tags"""
fragments_received = []
def CountAndReturn(fragment):
"""Returns the given fragment after adding it to a counter list."""
fragments_received.append(fragment)
return fragment
self.parser.RegisterFunction('count', CountAndReturn)
template = 'Count only has [num|count] call, or it is [noun|raw].'
result = self.parse(template, num='one', noun='broken')
self.assertEqual(result, 'Count only has one call, or it is broken.')
self.assertEqual(len(fragments_received), 1)
def testTagFunctionUrl(self):
"""[TagFunctions] The tag function 'url' is present and works"""
template = 'http://example.com/?breakfast=[query|url]'
result = self.parse(template, query='"ham & eggs"')
self.assertEqual(result, 'http://example.com/?breakfast=%22ham+%26+eggs%22')
def testTagFunctionItems(self):
"""[TagFunctions] The tag function 'items' is present and works"""
template = '[tag|items]'
tag = {'ham': 'eggs'}
result = "[('ham', 'eggs')]"
self.assertEqual(result, self.parse(template, tag=tag))
def testTagFunctionValues(self):
"""[TagFunctions] The tag function 'values' is present and works"""
template = '[tag|values]'
self.assertEqual(self.parse(template, tag={'ham': 'eggs'}), "['eggs']")
def testTagFunctionSorted(self):
"""[TagFunctions] The tag function 'sorted' is present and works"""
template = '[numbers|sorted]'
numbers = [5, 1, 3, 2, 4]
self.assertEqual(self.parse(template, numbers=numbers), "[1, 2, 3, 4, 5]")
def testTagFunctionLen(self):
"""[TagFunctions] The tag function 'len' is present and works"""
template = '[numbers|len]'
self.assertEqual(self.parse(template, numbers=range(12)), "12")
class TemplateTagFunctionClosures(unittest.TestCase):
"""Tests the functions that are performed on replaced tags."""
@staticmethod
def Limit(length=80):
"""Returns a closure that limits input to a number of chars/elements."""
return lambda string: string[:length]
@staticmethod
def LimitString(length=80, endchar='...'):
"""Limits input to `length` chars and appends `endchar` if it was longer."""
def _Limit(string, length=length, endchar=endchar):
if len(string) > length:
return string[:length] + endchar
return string
return _Limit
def setUp(self):
"""Sets up a parser instance, as it never changes."""
self.parser = templateparser.Parser()
self.parser.RegisterFunction('limit', self.Limit)
self.parser.RegisterFunction('strlimit', self.LimitString)
self.parse = self.parser.ParseString
self.tag = 'hello world ' * 10
def testSimpleClosureWithoutArguments(self):
"""[TagClosures] Simple tag closure-functions without arguments succeed"""
template = '[tag|limit()]'
result = self.parse(template, tag=self.tag)
self.assertEqual(result, self.tag[:80])
def testSimpleClosureArgument(self):
"""[TagClosures] Simple tag-closure functions operate on their argument"""
template = '[tag|limit(20)]'
result = self.parse(template, tag=self.tag)
self.assertEqual(result, self.tag[:20])
def testComplexClosureWithoutArguments(self):
"""[TagClosures] Complex tag closure-functions without arguments succeed"""
template = '[tag|strlimit()]'
result = self.parse(template, tag=self.tag)
self.assertEqual(len(result), 83)
self.assertEqual(result[:80], self.tag[:80])
self.assertEqual(result[-3:], '...')
def testComplexClosureArguments(self):
"""[TagClosures] Complex tag closure-functions operate on arguments"""
template = '[tag|strlimit(20, "TOOLONG")]'
result = self.parse(template, tag=self.tag)
self.assertEqual(len(result), 27)
self.assertEqual(result[:20], self.tag[:20])
self.assertEqual(result[-7:], 'TOOLONG')
def testCharactersInClosureArguments(self):
"""[TagClosures] Arguments strings may contain specialchars"""
template = '[tag|strlimit(20, "`-=./<>?`!@#$%^&*_+[]\{}|;\':")]'
result = self.parser.ParseString(template, tag=self.tag)
self.assertTrue(result.endswith('`-=./<>?`!@#$%^&*_+[]\{}|;\':'))
def testCommaInArgument(self):
"""[TagClosures] String arguments may contain commas"""
template = '[tag|strlimit(10, "ham, eggs")]'
result = self.parse(template, tag=self.tag)
self.assertEqual(result[-9:], "ham, eggs")
def testNamedArguments(self):
"""[TagClosures] Named arguments are not allowed"""
template = '[tag|limit(length=20)]'
self.assertRaises(templateparser.TemplateSyntaxError,
self.parse, template, tag=self.tag)
def testTrailingComma(self):
"""[TagClosures] Arguments may not have a trailing comma"""
template = '[tag|limit(20,)]'
self.assertRaises(templateparser.TemplateSyntaxError,
self.parse, template, tag=self.tag)
class TemplateUnicodeSupport(unittest.TestCase):
"""TemplateParser handles Unicode gracefully."""
def setUp(self):
"""Sets up a parser instance, as it never changes."""
self.parser = templateparser.Parser()
self.parse = self.parser.ParseString
def testTemplateUnicode(self):
"""[Unicode] Templates may contain raw Unicode codepoints"""
# And they will be converted to UTF8 eventually
template = u'We \u2665 Python'
self.assertEqual(self.parse(template), template.encode('UTF8'))
def testTemplateUTF8(self):
"""[Unicode] Templates may contain UTF8 encoded text"""
# That is, input bytes will be left untouched
template = u'We \u2665 Python'.encode('UTF8')
self.assertEqual(self.parse(template), template)
def testUnicodeReplacements(self):
"""[Unicode] Unicode in tag replacements is converted to UTF8"""
template = 'Underdark Web framework, also known as [name].'
expected = u'Underdark Web framework, also known as \xb5Web.'.encode('UTF8')
self.assertEqual(self.parse(template, name=u'\xb5Web'), expected)
def testUnicodeTagFunction(self):
"""[Unicode] Template functions returning unicode are converted to UTF8"""
function_result = u'No more \N{BLACK HEART SUIT}'
def StaticReturn(_fragment):
"""Returns a static string, for any input fragment."""
return function_result
self.parser.RegisterFunction('nolove', StaticReturn)
template = '[love|nolove]'
expected = function_result.encode('UTF8')
self.assertEqual(self.parse(template, love='love'), expected)
def testTemplateTagUTF8(self):
"""[Unicode] Template tags may contain UTF8"""
template = u'We \u2665 \xb5Web!'.encode('UTF8')
self.assertEqual(self.parse(template), template)
class TemplateInlining(unittest.TestCase):
"""TemplateParser properly handles the include statement."""
def setUp(self):
"""Sets up a testbed."""
self.parser = templateparser.Parser()
self.parse = self.parser.ParseString
self.tmpl = templateparser.Template
def testInlineExisting(self):
"""{{ inline }} Parser will inline an already existing template reference"""
self.parser['template'] = self.tmpl('This is a subtemplate by [name].')
template = '{{ inline template }}'
expected = 'This is a subtemplate by Elmer.'
self.assertEqual(self.parse(template, name='Elmer'), expected)
def testInlineFile(self):
"""{{ inline }} Parser will load an inlined template from file if needed"""
with file('tmp_template', 'w') as inline_file:
inline_file.write('This is a subtemplate by [name].')
inline_file.flush()
try:
template = '{{ inline tmp_template }}'
expected = 'This is a subtemplate by Elmer.'
self.assertEqual(self.parse(template, name='Elmer'), expected)
finally:
os.unlink('tmp_template')
class TemplateConditionals(unittest.TestCase):
"""TemplateParser properly handles if/elif/else statements."""
def setUp(self):
"""Sets up a testbed."""
self.parse = templateparser.Parser().ParseString
def testBasicConditional(self):
"""{{ if }} Basic boolean check works for relevant data types"""
template = '{{ if [variable] }} ack {{ endif }}'
# Boolean True inputs should return a SafeString object stating 'foo'.
self.assertEqual(self.parse(template, variable=True), ' ack')
self.assertEqual(self.parse(template, variable='truth'), ' ack')
self.assertEqual(self.parse(template, variable=12), ' ack')
self.assertEqual(self.parse(template, variable=[1, 2]), ' ack')
# Boolean False inputs should yield an empty SafeString object.
self.assertFalse(self.parse(template, variable=None))
self.assertFalse(self.parse(template, variable=0))
self.assertFalse(self.parse(template, variable=''))
def testCompareTag(self):
"""{{ if }} Basic tag value comparison"""
template = '{{ if [variable] == 5 }} foo {{ endif }}'
self.assertFalse(self.parse(template, variable=0))
self.assertFalse(self.parse(template, variable=12))
self.assertTrue(self.parse(template, variable=5))
def testTagIsInstance(self):
"""{{ if }} Basic tag value comparison"""
template = '{{ if isinstance([variable], int) }} ack {{ endif }}'
self.assertFalse(self.parse(template, variable=[1]))
self.assertFalse(self.parse(template, variable='number'))
self.assertEqual(self.parse(template, variable=5), ' ack')
def testDefaultElse(self):
"""{{ if }} Else block will be parsed when `if` fails"""
template = '{{ if [var] }}foo {{ else }}bar {{ endif }}'
self.assertEqual(self.parse(template, var=True), 'foo')
self.assertEqual(self.parse(template, var=False), 'bar')
def testElif(self):
"""{{ if }} Elif blocks will be parsed until one matches"""
template = """
{{ if [var] == 1 }}a
{{ elif [var] == 2 }}b
{{ elif [var] == 3 }}c
{{ elif [var] == 4 }}d
{{ endif }}"""
self.assertEqual(self.parse(template, var=1), 'a')
self.assertEqual(self.parse(template, var=2), 'b')
self.assertEqual(self.parse(template, var=3), 'c')
self.assertEqual(self.parse(template, var=4), 'd')
self.assertFalse(self.parse(template, var=5))
def testIfElifElse(self):
"""{{ if }} Full if/elif/else branch is functional all work"""
template = """
{{ if [var] == "a" }}1
{{ elif [var] == "b"}}2
{{ else }}3 {{ endif }}"""
self.assertEqual(self.parse(template, var='a'), '1')
self.assertEqual(self.parse(template, var='b'), '2')
self.assertEqual(self.parse(template, var='c'), '3')
def testSyntaxErrorNoEndif(self):
"""{{ if }} Conditional without {{ endif }} raises TemplateSyntaxError"""
template = '{{ if [var] }} foo'
self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)
def testSyntaxErrorElifAfterElse(self):
"""{{ if }} An `elif` clause following `else` raises TemplateSyntaxError"""
template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'
self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)
def testSyntaxErrorDoubleElse(self):
"""{{ if }} Starting a second `else` clause raises TemplateSyntaxError"""
template = '{{ if [var] }} {{ else }} {{ else }} {{ endif }}'
self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)
def testSyntaxErrorClauseWithoutIf(self):
"""{{ if }} elif / else / endif without `if` raises TemplateSyntaxError"""
template = '{{ elif }}'
self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)
template = '{{ else }}'
self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)
template = '{{ endif }}'
self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)
def testTagPresence(self):
"""{{ if }} Clauses require the tag to be present as a replacement"""
template = '{{ if [absent] }} {{ endif }}'
self.assertRaises(templateparser.TemplateNameError, self.parse, template)
def testVariableMustBeTag(self):
"""{{ if }} Clauses must reference variables using a tag, not a name"""
good_template = '{{ if [var] }} x {{ else }} x {{ endif }}'
self.assertTrue(self.parse(good_template, var='foo'))
bad_template = '{{ if var }} x {{ else }} x {{ endif }}'
self.assertRaises(templateparser.TemplateNameError,
self.parse, bad_template, var='foo')
def testLazyEvaluation(self):
"""{{ if }} Variables are retrieved in lazy fashion, not before needed"""
# Tags are looked up lazily
template = '{{ if [present] or [absent] }}~ {{ endif }}'
self.assertEqual(self.parse(template, present=True), '~')
# Indices are looked up lazily
template = '{{ if [var:present] or [var:absent] }}~ {{ endif }}'
self.assertEqual(self.parse(template, var={'present': 1}), '~')
class TemplateLoops(unittest.TestCase):
"""TemplateParser properly handles for-loops."""
def setUp(self):
"""Sets up a testbed."""
self.parser = templateparser.Parser()
self.parse = self.parser.ParseString
self.tmpl = templateparser.Template
def testLoopCount(self):
"""{{ for }} Parser will loop once for each item in the for loop"""
template = '{{ for num in [values] }}x{{ endfor }}'
result = self.parse(template, values=range(5))
self.assertEqual(result, 'xxxxx')
def testLoopReplaceBasic(self):
"""{{ for }} The loop variable is available via tagname"""
template = '{{ for num in [values] }}[num],{{ endfor }}'
result = self.parse(template, values=range(5))
self.assertEqual(result, '0,1,2,3,4,')
def testLoopReplaceScope(self):
"""{{ for }} The loop variable overwrites similar names from outer scope"""
template = '[num], {{ for num in [numbers] }}[num], {{ endfor }}[num]'
result = self.parse(template, numbers=range(5), num='OUTER')
self.assertEqual(result, 'OUTER,0,1,2,3,4,OUTER')
def testLoopOverIndexedTag(self):
"""{{ for }} Loops can be performed over indexed tags"""
template = '{{ for num in [numbers:1] }}x{{ endfor }}'
result = self.parse( template, numbers=[range(10), range(5), range(10)])
self.assertEqual(result, 'xxxxx')
def testLoopVariableIndex(self):
"""{{ for }} Loops variable tags support indexing and functions"""
template = '{{ for bundle in [bundles]}}[bundle:1:name|upper], {{ endfor }}'
bundles = [('1', {'name': 'Spam'}), ('2', {'name': 'Eggs'})]
result = 'SPAM,EGGS,'
self.parser.RegisterFunction('upper', str.upper)
self.assertEqual(self.parse(template, bundles=bundles), result)
def testLoopOnFunctions(self):
"""{{ for }} Loops work on function results if functions are used"""
template = ('{{ for item in [mapping|items|sorted] }} '
'[item:0]=[item:1]{{ endfor }}')
mapping = {'first': 12, 'second': 42}
result = ' first=12 second=42'
self.assertEqual(self.parse(template, mapping=mapping), result)
# Assert that without sorted, this actually fails
unsorted = ('{{ for item in [mapping|items] }} '
'[item:0]=[item:1]{{ endfor }}')
self.assertNotEqual(self.parse(unsorted, mapping=mapping), result)
def testLoopTupleAssignment(self):
"""{{ for }} Loops support tuple unpacking for iterators"""
template = ('{{ for key,val in [mapping|items|sorted] }} '
'[key]=[val] {{ endfor }}')
mapping = {'first': 12, 'second': 42}
result = ' first=12 second=42'
self.assertEqual(self.parse(template, mapping=mapping), result)
def testLoopTupleAssignmentMismatch(self):
"""{{ for }} Loops raise TemplateValueError when tuple unpacking fails"""
template = '{{ for a, b, c in [iterator] }}[a] {{ endfor }}'
self.assertEqual(self.parse(template, iterator=['xyz']), 'x')
self.assertRaises(templateparser.TemplateValueError,
self.parse, template, iterator=['eggs'])
self.assertRaises(templateparser.TemplateValueError,
self.parse, template, iterator=range(10))
def testLoopTagPresence(self):
"""{{ for }} Loops require the loop tag to be present"""
template = '{{ for item in [absent] }} hello {{ endfor }}'
self.assertRaises(templateparser.TemplateNameError, self.parse, template)
def testLoopAbsentIndex(self):
"""{{ for }} Loops over an absent index result in no loops (no error)"""
template = '{{ for item in [tag:absent] }} x {{ endfor }}'
self.assertFalse(self.parse(template, tag='absent'))
class TemplateTagPresenceCheck(unittest.TestCase):
"""Test cases for the `ifpresent` TemplateParser construct."""
def setUp(self):
self.parse = templateparser.Parser().ParseString
def testBasicTagPresence(self):
"""{{ ifpresent }} runs the code block if the tag is present"""
template = '{{ ifpresent [tag] }} hello {{ endif }}'
self.assertEqual(self.parse(template, tag='spam'), ' hello')
def testBasicTagAbsence(self):
"""{{ ifpresent }} does not run the main block if the tag is missing"""
template = '{{ ifpresent [tag] }} hello {{ endif }}'
self.assertFalse(self.parse(template))
def testTagPresenceElse(self):
"""{{ ifpresent }} has a functioning `else` clause"""
template = '{{ ifpresent [tag] }} yes {{ else }} no {{ endif }}'
self.assertEqual(self.parse(template, tag='spam'), ' yes')
self.assertEqual(self.parse(template), ' no')
def testPresenceElif(self):
"""{{ ifpresent }} has functioning `elif` support"""
template = ('{{ ifpresent [one] }} first'
'{{ elif [two] }} second {{ else }} third {{ endif }}')
self.assertEqual(self.parse(template, one='present'), ' first')
self.assertEqual(self.parse(template, two='ready'), ' second', )
self.assertEqual(self.parse(template), ' third')
def testPresenceOfKey(self):
"""{{ ifpresent }} also works on index selectors"""
template = '{{ ifpresent [tag:6] }} yes {{ else }} no {{ endif }}'
self.assertEqual(self.parse(template, tag='longtext'), ' yes')
self.assertEqual(self.parse(template, tag='short'), ' no')
self.assertEqual(self.parse(template), ' no')
def testMultiTagPresence(self):
"""{{ ifpresent }} checks the presence of *all* provided tagnames/indices"""
template = '{{ ifpresent [one] [two] }} good {{ endif }}'
self.assertEqual(self.parse(template, one=1, two=2), ' good')
self.assertFalse(self.parse(template, one=1))
self.assertFalse(self.parse(template, two=2))
def testBadSyntax(self):
"""{{ ifpresent }} requires proper tags to be checked for presence"""
template = '{{ ifpresent var }} {{ endif }}'
self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)
class TemplateStringRepresentations(unittest.TestCase):
"""Test cases for string representation of various TemplateParser parts."""
def setUp(self):
self.strip = lambda string: re.sub('\s', '', string)
self.tmpl = templateparser.Template
self.parser = templateparser.Parser()
def testTemplateTag(self):
"""[Representation] TemplateTags str() echoes its literal"""
template = '[greeting] [title|casing] [person:name|casing] har'
self.assertEqual(self.strip(str(self.tmpl(template))), self.strip(template))
def testTemplateConditional(self):
"""[Representation] TemplateConditional str() echoes its literal"""
template = '{{ if [a] == "foo" }} foo [b] {{ else }} bar [b] {{ endif }}'
self.assertEqual(self.strip(str(self.tmpl(template))), self.strip(template))
def testTemplateInline(self):
"""[Representation] TemplateInline str() shows the inlined template part"""
example = 'Hello [location]'
template = '{{ inline example }}'
self.parser['example'] = self.tmpl(example)
self.assertEqual(self.strip(str(self.tmpl(template, parser=self.parser))),
self.strip(example))
def testTemplateLoop(self):
"""[Representation] TemplateLoop str() echoes its definition"""
template = ('{{ for a, b in [iter|items] }}{{ for c in [a] }}[c]'
'{{ endfor }}{{ endfor }}')
self.assertEqual(self.strip(str(self.tmpl(template))), self.strip(template))
class TemplateNestedScopes(unittest.TestCase):
"""Test cases for nested function scopes."""
def setUp(self):
"""Sets up a testbed."""
self.parser = templateparser.Parser()
self.parse = self.parser.ParseString
self.tmpl = templateparser.Template
def testLoopWithInline(self):
"""{{ nested }} Loops can contain an {{ inline }} section"""
inline = '<li>Hello [name]</li>'
self.parser['name'] = self.tmpl(inline)
template = '{{ for name in [names] }}{{ inline name }}{{ endfor }}'
result = self.parse(template, names=('John', 'Eric'))
self.assertEqual(result, '<li>Hello John</li><li>Hello Eric</li>')
def testLoopWithInlineLoop(self):
"""{{ nested }} Loops can contain {{ inline }} loops"""
inline = '{{ for char in [name] }}[char].{{ endfor }}'
self.parser['name'] = self.tmpl(inline)
template = '{{ for name in [names] }}<li>{{ inline name }}</li>{{ endfor }}'
result = self.parse(template, names=('John', 'Eric'))
self.assertEqual(result, '<li>J.o.h.n.</li><li>E.r.i.c.</li>')
def testInlineLoopsInConditional(self):
"""{{ nested }} Inlined loop in a conditional without problems"""
self.parser['loop'] = self.tmpl('{{ for i in [loops] }}[i]{{ endfor }}')
self.parser['once'] = self.tmpl('value: [value]')
tmpl = '{{ if [x] }}{{ inline loop }}{{ else }}{{ inline once }}{{ endif }}'
result_loop = self.parse(tmpl, loops=range(1, 6), x=True)
result_once = self.parse(tmpl, value='foo', x=False)
self.assertEqual(result_loop, '12345')
self.assertEqual(result_once, 'value: foo')
class TemplateReloading(unittest.TestCase):
"""Tests for FileTemplate automatic reloading upon modification."""
def setUp(self):
self.simple = 'simple.utp'
self.simple_raw = 'simple [noun]'
self.loop = 'loop.utp'
self.loop_raw = '{{ for bit in [blob] }}{{ inline simple.utp }}{{ endfor }}'
with file(self.simple, 'w') as simple:
simple.write(self.simple_raw)
with file(self.loop, 'w') as loop:
loop.write(self.loop_raw)
self.parser = templateparser.Parser()
self.parser.AddTemplate(self.simple)
self.parser.AddTemplate(self.loop)
def tearDown(self):
for tmpfile in (self.loop, self.simple):
if os.path.exists(tmpfile):
if os.path.isdir(tmpfile):
os.rmdir(tmpfile)
else:
os.unlink(tmpfile)
def testFileBasicReload(self):
"""[Reload] Template file is reloaded from disk after updating"""
first = self.parser[self.simple].Parse()
self.assertEqual(first, self.simple_raw)
with file(self.simple, 'w') as new_template:
new_template.write('new content')
time.sleep(.01) # short pause so that mtime will actually be different
second = self.parser[self.simple].Parse()
self.assertEqual(second, 'new content')
def testInlineReload(self):
"""[Reload] Inlined templates are not automatically reloaded"""
first = self.parser[self.loop].Parse(blob='four')
self.assertEqual(first, self.simple_raw * 4)
with file(self.simple, 'w') as new_template:
new_template.write('new content')
time.sleep(.01) # short pause so that mtime will actually be different
second = self.parser[self.loop].Parse(blob='four')
self.assertEqual(second, 'new content' * 4)
def testReloadDeletedTemplate(self):
"""[Reload] Deleted templates are not reloaded and don't trigger errors"""
os.unlink(self.simple)
self.assertEqual(self.parser[self.simple].Parse(), self.simple_raw)
def testReplaceTemplateWithDirectory(self):
"""[Reload] Deleted templates are not reloaded and don't trigger errors"""
os.unlink(self.simple)
time.sleep(.01) # short pause so that mtime will actually be different
os.mkdir(self.simple)
self.assertEqual(self.parser[self.simple].Parse(), self.simple_raw)
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
[
"elmer.delooff@gmail.com"
] |
elmer.delooff@gmail.com
|
2c83e23cf524ec8761f95cc6e52c17eb52eae1d8
|
ad3c7d3cb743dc625605f4c4323d02ff797e0584
|
/Code/prep_con.py
|
24ea0e329c004d2d134517b4a3368a651d375526
|
[] |
no_license
|
Shaw9575/Mathematical-Pattern-Recognition-
|
19b7af93148a4fd1c657441b0a97dfb0b8abe7e0
|
414da484c731bd8ee1b0f75d154cb4e63fbe3c24
|
refs/heads/master
| 2020-03-15T01:43:30.000732
| 2019-06-24T04:29:00
| 2019-06-24T04:29:00
| 131,900,390
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,947
|
py
|
# Pre-processing
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.preprocessing import Imputer
def prep_conti():
df = pd.read_csv('bank-additional.csv', delimiter=',')
newdata = np.zeros((4119, 20))
for i in range(0, 4119):
newdata[i - 1, 0] = df.iloc[i, 0]
if df.iloc[i, 1] == 'unknown':
newdata[i - 1, 1] = 'NaN'
if df.iloc[i, 1] == 'admin.':
newdata[i - 1, 1] = 1
if df.iloc[i, 1] == 'blue-collar':
newdata[i - 1, 1] = 2
if df.iloc[i, 1] == 'entrepreneur':
newdata[i - 1, 1] = 3
if df.iloc[i, 1] == 'housemaid':
newdata[i - 1, 1] = 4
if df.iloc[i, 1] == 'management':
newdata[i - 1, 1] = 5
if df.iloc[i, 1] == 'retired':
newdata[i - 1, 1] = 6
if df.iloc[i, 1] == 'self-employed':
newdata[i - 1, 1] = 7
if df.iloc[i, 1] == 'services':
newdata[i - 1, 1] = 8
if df.iloc[i, 1] == 'student':
newdata[i - 1, 1] = 9
if df.iloc[i, 1] == 'technician':
newdata[i - 1, 1] = 10
if df.iloc[i, 1] == 'unemployed':
newdata[i - 1, 2] = 11
if df.iloc[i, 2] == 'unknown':
newdata[i - 1, 2] = 'NaN'
if df.iloc[i, 2] == 'married':
newdata[i - 1, 2] = 1
if df.iloc[i, 2] == 'single':
newdata[i - 1, 2] = 2
if df.iloc[i, 2] == 'divorced':
newdata[i - 1, 2] = 3
if df.iloc[i, 3] == 'unknown':
newdata[i - 1, 3] = 'NaN'
if df.iloc[i, 3] == 'basic.4y':
newdata[i - 1, 3] = 1
if df.iloc[i, 3] == 'basic.6y':
newdata[i - 1, 3] = 2
if df.iloc[i, 3] == 'basic.9y':
newdata[i - 1, 3] = 3
if df.iloc[i, 3] == 'high.school':
newdata[i - 1, 3] = 4
if df.iloc[i, 3] == 'illiterate':
newdata[i - 1, 3] = 5
if df.iloc[i, 3] == 'professional.course':
newdata[i - 1, 3] = 6
if df.iloc[i, 3] == 'university.degree':
newdata[i - 1, 3] = 7
if df.iloc[i, 4] == 'unknown':
newdata[i - 1, 4] = 'NaN'
if df.iloc[i, 4] == 'no':
newdata[i - 1, 4] = 1
if df.iloc[i, 4] == 'yes':
newdata[i - 1, 4] = 2
if df.iloc[i, 5] == 'unknown':
newdata[i - 1, 5] = 'NaN'
if df.iloc[i, 5] == 'no':
newdata[i - 1, 5] = 1
if df.iloc[i, 5] == 'yes':
newdata[i - 1, 5] = 2
if df.iloc[i, 6] == 'unknown':
newdata[i - 1, 6] = 'NaN'
if df.iloc[i, 6] == 'no':
newdata[i - 1, 6] = 1
if df.iloc[i, 6] == 'yes':
newdata[i - 1, 6] = 2
if df.iloc[i, 7] == 'cellular':
newdata[i - 1, 7] = 1
if df.iloc[i, 7] == 'telephone':
newdata[i - 1, 7] = 2
if df.iloc[i, 8] == 'apr':
newdata[i - 1, 8] = 1
if df.iloc[i, 8] == 'aug':
newdata[i - 1, 8] = 2
if df.iloc[i, 8] == 'dec':
newdata[i - 1, 8] = 3
if df.iloc[i, 8] == 'jul':
newdata[i - 1, 8] = 4
if df.iloc[i, 8] == 'jun':
newdata[i - 1, 8] = 5
if df.iloc[i, 8] == 'mar':
newdata[i - 1, 8] = 6
if df.iloc[i, 8] == 'may':
newdata[i - 1, 8] = 7
if df.iloc[i, 8] == 'nov':
newdata[i - 1, 8] = 8
if df.iloc[i, 8] == 'oct':
newdata[i - 1, 8] = 9
if df.iloc[i, 8] == 'sep':
newdata[i - 1, 8] = 10
if df.iloc[i, 9] == 'fri':
newdata[i - 1, 9] = 1
if df.iloc[i, 9] == 'mon':
newdata[i - 1, 9] = 2
if df.iloc[i, 9] == 'thu':
newdata[i - 1, 9] = 3
if df.iloc[i, 9] == 'tue':
newdata[i - 1, 9] = 4
if df.iloc[i, 9] == 'wed':
newdata[i - 1, 9] = 5
newdata[i - 1, 10] = df.iloc[i, 10]
newdata[i - 1, 11] = df.iloc[i, 11]
newdata[i - 1, 12] = df.iloc[i, 12]
if df.iloc[i, 13] == 'nonexistent':
newdata[i - 1, 13] = 'NaN'
if df.iloc[i, 13] == 'failure':
newdata[i - 1, 13] = 1
if df.iloc[i, 13] == 'success':
newdata[i - 1, 13] = 2
newdata[i - 1, 14] = df.iloc[i, 14]
newdata[i - 1, 15] = df.iloc[i, 15]
newdata[i - 1, 16] = df.iloc[i, 16]
newdata[i - 1, 17] = df.iloc[i, 17]
newdata[i - 1, 18] = df.iloc[i, 18]
if df.iloc[i, 19] == 'no':
newdata[i - 1, 19] = 0
else:
newdata[i - 1, 19] = 1
data = newdata[:,0:19]
label = newdata[:,19]
imp = Imputer(missing_values='NaN', strategy='most_frequent', axis=0)
data_full = imp.fit_transform(data,label)
newdata[:, 0:19] = data_full
newdata[:, 19] = label
np.savetxt('new.csv', newdata, delimiter=',')
|
[
"noreply@github.com"
] |
Shaw9575.noreply@github.com
|
cde1922f458bcdb9b5bfa265ad908f346cc22976
|
ddb65ebc3e012501839f5effcdee87e990748364
|
/gitlo.py
|
a5ad2abbad1fb92010e58f71c799b5f6eb24c6c9
|
[
"MIT"
] |
permissive
|
quyencao/Gitlo
|
ea224de8e0f8346d4929b0f777d51bc58fb1e1d4
|
022e87e9cd346f8023c720f41ca68d489cf5a5ad
|
refs/heads/master
| 2021-09-22T01:13:38.304304
| 2018-09-04T11:42:39
| 2018-09-04T11:42:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,513
|
py
|
import requests
import click
@click.group()
def cli():
"""Command Line tool to access Github API.
Know user info--> gitlo user <username>
get repository-list by user--> gitlo repos <username>
\b
get language percentage of particular repository-->
gitlo languages <username> <reponame>
"""
pass
@cli.command()
@click.argument('username')
def user(username):
r = requests.get('https://api.github.com/users/{}'.format(username)).json()
name = r['name']
repos = r['public_repos']
bio = r['bio']
# print(f'Name: {name}, Repos: {repos}, Bio: {bio}')
print('Name: {}, Repos: {}, Bio: {}'.format(name, repos, bio))
@cli.command()
@click.argument('username')
def repos(username):
r = requests.get('https://api.github.com/users/{}/repos'.format(username)).json()
for i in range(len(r)):
print(r[i]['name'])
def calculate_percentage(langs, lang, total_bytes):
result = langs[lang] * 100 / total_bytes
return round(result, 2)
def convert_to_percentage(langs):
total_bytes = sum(langs.values())
return {lang: calculate_percentage(langs, lang, total_bytes) for (lang, v) in langs.items()}
@cli.command()
@click.argument('username')
@click.argument('reponame')
def languages(username, reponame):
r = requests.get('https://api.github.com/repos/{}/{}/languages'.format(username, reponame)).json()
change_r = convert_to_percentage(r)
for key, value in change_r.items():
print('{}: {}%'.format(key, value))
|
[
"siddharthshringi@gmail.com"
] |
siddharthshringi@gmail.com
|
e075e39a62758c8a8ed4b51f5533c3b24e335615
|
e753418f5f450ba80bd996e39bf5d71c26293c0f
|
/web/market/users/utils.py
|
4f0e71c52bc6dc3d9291a8df3c9b1d896a3c28f6
|
[] |
no_license
|
chrisfid/flask-phone-market
|
818aec1d341804ce8ff6189429844396366859ec
|
4a8fac88fabc1583aac9f13c0c8094f8738af916
|
refs/heads/main
| 2023-07-01T15:39:14.669343
| 2021-08-13T16:40:15
| 2021-08-13T16:40:15
| 356,641,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
import os
import secrets
from PIL import Image
from flask import url_for, current_app
from flask_mail import Message
from market import mail
def save_picture(form_picture) -> str:
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_file_name = random_hex + f_ext
picture_path = os.path.join(
current_app.root_path, 'static/profile_pics', picture_file_name)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_file_name
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='noreply@demo.com',
recipients=[user.email_address])
msg.body = f'''To reset your password, visit the following link:
{url_for('users.reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
|
[
"krzysztof.fidyka@gmail.com"
] |
krzysztof.fidyka@gmail.com
|
746473740108fc84e035a8fd663d66c9ffd07d05
|
f2575444e57696b83ce6dcec40ad515b56a1b3a9
|
/Algorithms/Strings/TheLoveLetterMystery.py
|
4a91f2fba1af78d5c73d22d0ccffa5384384ee65
|
[] |
no_license
|
abhi10010/Hackerrank-Solutions
|
046487d79fc5bf84b4df5ef2117578d29cb19243
|
da2a57b8ebfcc330d94d104c1755b8c62a9e3e65
|
refs/heads/master
| 2021-07-24T09:41:49.995295
| 2020-07-12T09:31:58
| 2020-07-12T09:31:58
| 195,647,097
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
import math
import os
import random
import re
import sys
def theLoveLetterMystery(s):
x=s[:int(len(s)/2)]
y=s[math.ceil(len(s)/2):]
c1, c2 = [],[]
ans = 0
for i in x:
c1.append(ord(i))
for i in y:
c2.append(ord(i))
c2 = c2[::-1]
for i in range(len(c1)):
if c1[i]!=c2[i]:
ans+=abs(c1[i]-c2[i])
return ans
|
[
"noreply@github.com"
] |
abhi10010.noreply@github.com
|
4ba9fe3ea1b0c3b6aa2f0c62e3e04fca78567ac9
|
00f83b1737f0b4dd7912838eac4b2f68b7f8f6ff
|
/pa2/__3_Missing_Data_Processing.py
|
880dc420c74cc1545890683fe6c6b04884bb5c12
|
[] |
no_license
|
jmausolf/Machine_Learning
|
dd4f2cf720c5b429d6617058239403f649d3eefd
|
f90bea92347f041a2fb3ab9ba25c349b6dfe2d9f
|
refs/heads/master
| 2020-05-21T01:04:36.718900
| 2015-06-07T02:05:28
| 2015-06-07T02:05:28
| 35,919,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,789
|
py
|
"""
Joshua Mausolf - CAPP 30254 Assignment 2
In this python module I pre-process the data by filling in missing values.
"""
import sys, os
import csv
import pandas as pd
import re
#_____________ PART 3A _______________________________________________ #
# Pre-process values by filling in missing values.
## I made the choice to round to the nearest integer values to match the formatting
## for the existing database.
def camel_to_snake(column_name):
"""
Converts a string that is camelCase into snake_case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', column_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#______________Impute Mean_________________#
def impute_mean(variable, data_in, data_out):
#Define Data
data = pd.read_csv(data_in, index_col=0)
data.columns = [camel_to_snake(col) for col in data.columns]
#Get Row Index for Variable
number = (data.columns.get_loc(str(variable)))+1
#Generate Mean
m_var = data[str(variable)].mean()
meanVar = int(round(m_var))
in_file = open(data_in, 'rU')
reader = csv.reader(in_file)
out_file = open(data_out, "w")
writer = csv.writer(out_file)
for row in reader:
#Monthly_income = row[6]
variable_observation = row[number]
if variable_observation == '':
row[number] = meanVar
writer.writerow(row)
elif variable_observation == 'NA':
row[number] = meanVar
writer.writerow(row)
else:
writer.writerow(row)
in_file.close()
out_file.close()
#Unhash to run
impute_mean('monthly_income', 'data/cs-training.csv', 'data/cs-training#3A.csv')
impute_mean('number_of_dependents', 'data/cs-training#3A.csv', 'data/cs-training#3B.csv')
|
[
"Joshua@joshuas-imac.attlocal.net"
] |
Joshua@joshuas-imac.attlocal.net
|
6cc469ea00e31b87cc76779d9e80678a81c4a06f
|
2b7250de4a549692b1dc8c187e7734b99d28d16c
|
/custom_components/volkswagencarnet/__init__.py
|
4e55fdb3393e4e291f4d91def35db73e9d326544
|
[] |
no_license
|
Jazzi0/homeassistant-volkswagencarnet
|
fe0a1d2db437f3427f08ff898ed57d411641d883
|
189b22a21f15374dd12e26a5c33e8ed7d0819a2a
|
refs/heads/master
| 2020-07-10T07:18:58.953774
| 2020-02-01T09:56:01
| 2020-02-01T09:56:01
| 204,202,941
| 0
| 1
| null | 2020-02-01T09:56:02
| 2019-08-24T19:27:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,951
|
py
|
# -*- coding: utf-8 -*-
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from datetime import timedelta
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD, CONF_RESOURCES, CONF_NAME, CONF_SCAN_INTERVAL)
from homeassistant.helpers import discovery
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.icon import icon_for_battery_level
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'volkswagencarnet'
DATA_KEY = DOMAIN
CONF_REGION = 'region'
DEFAULT_REGION = 'SV'
CONF_MUTABLE = 'mutable'
CONF_SPIN = 'spin'
REQUIREMENTS = ['volkswagencarnet==4.1.5']
SIGNAL_STATE_UPDATED = '{}.updated'.format(DOMAIN)
MIN_UPDATE_INTERVAL = timedelta(minutes=1)
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=5)
COMPONENTS = {
'sensor': 'sensor',
'binary_sensor': 'binary_sensor',
'lock': 'lock',
'device_tracker': 'device_tracker',
'switch': 'switch',
'climate': 'climate'
}
RESOURCES = [
'position',
'distance',
'climatisation',
'window_heater',
'combustion_engine_heating',
'charging',
'battery_level',
'fuel_level',
'service_inspection',
'oil_inspection',
'last_connected',
'charging_time_left',
'electric_range',
'combustion_range',
'combined_range',
'charge_max_ampere',
'climatisation_target_temperature',
'external_power',
'parking_light',
'climatisation_without_external_power',
'door_locked',
'trunk_locked',
'request_in_progress'
]
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_REGION, default=DEFAULT_REGION): cv.string,
vol.Optional(CONF_MUTABLE, default=True): cv.boolean,
vol.Optional(CONF_SPIN, default=''): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_UPDATE_INTERVAL): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))),
vol.Optional(CONF_NAME, default={}): vol.Schema(
{cv.slug: cv.string}),
vol.Optional(CONF_RESOURCES): vol.All(
cv.ensure_list, [vol.In(RESOURCES)])
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup Volkswagen Carnet component"""
interval = config[DOMAIN].get(CONF_SCAN_INTERVAL)
data = hass.data[DATA_KEY] = VolkswagenData(config)
from volkswagencarnet import Connection
_LOGGER.debug("Creating connection to volkswagen carnet")
connection = Connection(
username = config[DOMAIN].get(CONF_USERNAME),
password = config[DOMAIN].get(CONF_PASSWORD),
)
# login to carnet
_LOGGER.debug("Logging in to volkswagen carnet")
connection._login()
if not connection.logged_in:
_LOGGER.warning('Could not login to volkswagen carnet, please check your credentials')
def is_enabled(attr):
"""Return true if the user has enabled the resource."""
return attr in config[DOMAIN].get(CONF_RESOURCES, [attr])
def discover_vehicle(vehicle):
"""Load relevant platforms."""
data.vehicles.add(vehicle.vin)
data.entities[vehicle.vin] = []
dashboard = vehicle.dashboard(
mutable = config[DOMAIN][CONF_MUTABLE], spin = config[DOMAIN][CONF_SPIN])
for instrument in (
instrument
for instrument in dashboard.instruments
if instrument.component in COMPONENTS and
is_enabled(instrument.slug_attr)):
data.instruments.add(instrument)
discovery.load_platform(hass, COMPONENTS[instrument.component], DOMAIN, (vehicle.vin,instrument.component,instrument.attr), config)
def update(now):
"""Update status from Volkswagen Carnet"""
try:
# check if we can login again
if not connection.logged_in:
connection._login()
if not connection.logged_in:
_LOGGER.warning('Could not login to volkswagen carnet, please check your credentials')
return False
else:
if not connection.update(request_data = False):
_LOGGER.warning("Could not query update from volkswagen carnet")
return False
else:
_LOGGER.debug("Updating data from volkswagen carnet")
for vehicle in connection.vehicles:
if vehicle.vin not in data.vehicles:
_LOGGER.info("Adding data for VIN: %s from carnet" % vehicle.vin.lower())
discover_vehicle(vehicle)
for entity in data.entities[vehicle.vin]:
entity.schedule_update_ha_state()
dispatcher_send(hass, SIGNAL_STATE_UPDATED, vehicle)
return True
finally:
track_point_in_utc_time(hass, update, utcnow() + interval)
_LOGGER.info("Starting volkswagencarnet component")
return update(utcnow())
class VolkswagenData:
"""Hold component state."""
def __init__(self, config):
"""Initialize the component state."""
self.vehicles = set()
self.instruments = set()
self.entities = {}
self.config = config[DOMAIN]
self.names = self.config.get(CONF_NAME)
def instrument(self, vin, component, attr):
"""Return corresponding instrument."""
return next((instrument
for instrument in self.instruments
if instrument.vehicle.vin == vin and
instrument.component == component and
instrument.attr == attr), None)
def vehicle_name(self, vehicle):
"""Provide a friendly name for a vehicle."""
if (vehicle.vin and vehicle.vin.lower() in self.names):
return self.names[vehicle.vin.lower()]
elif vehicle.vin:
return vehicle.vin
else:
return ''
class VolkswagenEntity(Entity):
"""Base class for all Volkswagen entities."""
def __init__(self, data, vin, component, attribute):
"""Initialize the entity."""
self.data = data
self.vin = vin
self.component = component
self.attribute = attribute
self.data.entities[self.vin].append(self)
@property
def instrument(self):
"""Return corresponding instrument."""
return self.data.instrument(self.vin, self.component, self.attribute)
@property
def icon(self):
"""Return the icon."""
if self.instrument.attr in ['battery_level', 'charging']:
return icon_for_battery_level(battery_level = self.instrument.state, charging = self.vehicle.charging)
else:
return self.instrument.icon
@property
def vehicle(self):
"""Return vehicle."""
return self.instrument.vehicle
@property
def _entity_name(self):
return self.instrument.name
@property
def _vehicle_name(self):
return self.data.vehicle_name(self.vehicle)
@property
def name(self):
"""Return full name of the entity."""
return '{} {}'.format(self._vehicle_name,self._entity_name)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return dict(self.instrument.attributes, model='{}/{}'.format(self.vehicle.model,self.vehicle.model_year))
|
[
"me@robinostlund.name"
] |
me@robinostlund.name
|
88d9496d7bf4cf6e7d5a110cb9f93bcc7f8c312a
|
94dbcfecd9559bfdc7ed37d0224abdc89fdcca89
|
/test_start.py
|
148cf3bf855af4e77a1b13a55a348d705d7a882e
|
[] |
no_license
|
samtrot1524/6axes
|
57fd812a67d9edcda60d64ddf9f69dfe27807a24
|
1f8d0c3f7d131415a79aeffdd8e89162258681d6
|
refs/heads/master
| 2021-08-17T11:17:54.139018
| 2017-11-21T04:30:51
| 2017-11-21T04:30:51
| 111,498,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
bla bla bla
bla bla 2
|
[
"noreply@github.com"
] |
samtrot1524.noreply@github.com
|
f8d2d6ac9548efd47c1d02e712d553948d6ef950
|
dc1855a5d0b56dc11fc624589c3260451be8c81a
|
/store/mainapp/migrations/0006_auto_20180926_1127.py
|
cedc758d4031e8fd29c2e03ac0c3b0816b284dbc
|
[] |
no_license
|
nicolasechen/store
|
d73d93ba3e9532b38a5ccc66e112001ef741924c
|
487bd2df4735d065d43de560e938d2a104e59e92
|
refs/heads/master
| 2020-03-29T08:30:27.357857
| 2018-11-06T02:22:12
| 2018-11-06T02:22:12
| 149,714,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-26 03:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0005_auto_20180926_1036'),
]
operations = [
migrations.AddField(
model_name='item',
name='stock',
field=models.IntegerField(default=1, verbose_name='Stock'),
),
migrations.AddField(
model_name='item',
name='stock_updated_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
[
"nico@mail.eagleskytech.com"
] |
nico@mail.eagleskytech.com
|
076e2c21c54adfbbf6fb490ca0200703100e72eb
|
1b5404b8099de74d4e39e0a41b1d04c61defa8d4
|
/Олимпиады/Timus/1336.py
|
52e412e811226b5d3b987476dbb40a668420779e
|
[] |
no_license
|
ipeterov/random-stuff
|
5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd
|
dbb38d42331f636919fd149b23783e02ee2c9afb
|
refs/heads/master
| 2023-05-14T00:41:51.122251
| 2023-05-04T12:10:26
| 2023-05-04T12:10:26
| 206,028,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
n = int(input())
for y in range(1,n+1):
x = (n * y**3)**(0.5)
if type(x) == float and int(x) == x:
print(int(x))
print(y)
break
|
[
"ipeterov1@gmail.com"
] |
ipeterov1@gmail.com
|
2e3c829ab244fe65976b3694300d5c3a2eb342a8
|
1e83829918c7d26585b57c5cd2bed68d3deedf6b
|
/venv/bin/easy_install-3.6
|
74a88718369e1610214ceab836741fa8f317fedf
|
[] |
no_license
|
De4rm/flask-user-info-api
|
4e8a1a7f49e8e25842982f59da0f3fe4b56bc5fe
|
0dd5148e13a8e3feced75ee8e60a23115eba2d85
|
refs/heads/master
| 2020-03-07T20:56:07.405855
| 2018-04-02T05:57:34
| 2018-04-02T05:57:34
| 127,711,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
6
|
#!/home/de4rm/Documents/Flask/whomai/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"graurvadim91@gmail.com"
] |
graurvadim91@gmail.com
|
6cc1e53136510f8cd182137abdaeaa04503e0e81
|
3c1fa04d04cb2ba46bc8c1d0ba97aab3cb1fdfe2
|
/nova/tests/unit/api/openstack/placement/test_deploy.py
|
403df8a7a5dfdd9f53ffa585a534c9c62f22878d
|
[
"Apache-2.0"
] |
permissive
|
arvindn05/nova
|
ddd900d47a6c9a3832c4deda0a60bb28dea13799
|
1d335b333245198dbf39841eedd221377beb58c2
|
refs/heads/master
| 2021-01-25T11:28:19.669580
| 2018-04-26T13:55:56
| 2018-04-26T13:55:56
| 123,398,672
| 0
| 0
|
Apache-2.0
| 2018-03-01T07:36:40
| 2018-03-01T07:28:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the deply function used to build the Placement service."""
from oslo_config import cfg
import webob
from nova.api.openstack.placement import deploy
from nova import test
CONF = cfg.CONF
class DeployTest(test.NoDBTestCase):
def test_auth_middleware_factory(self):
"""Make sure that configuration settings make their way to
the keystone middleware correctly.
"""
auth_uri = 'http://example.com/identity'
authenticate_header_value = "Keystone uri='%s'" % auth_uri
self.flags(auth_uri=auth_uri, group='keystone_authtoken')
# ensure that the auth_token middleware is chosen
self.flags(auth_strategy='keystone', group='api')
app = deploy.deploy(CONF)
req = webob.Request.blank('/resource_providers', method="GET")
response = req.get_response(app)
self.assertEqual(authenticate_header_value,
response.headers['www-authenticate'])
|
[
"cdent@anticdent.org"
] |
cdent@anticdent.org
|
51635e5ed6ebc6f9ce37701cefd64f0a54aff036
|
44715bc474c7136c39c59c0c6fa09a7c654163dd
|
/utils/qiniu.py
|
a236eda89867b2bc34e69a6c31d13ae5f4b10f5f
|
[
"MIT"
] |
permissive
|
ggitta/blog
|
338674c199873dbb8698fe67f4205e1584de4dec
|
bffb1bf9eb43df98eee6c9a4b9f7d1fd06bc3ef1
|
refs/heads/master
| 2021-12-13T23:51:35.837052
| 2021-12-02T02:40:42
| 2021-12-02T02:40:42
| 191,341,921
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
from qiniu import Auth, put_file, etag
from jfsite.settings import ACCESS_KEY,SECRET_KEY,PERURL,BKNAME
q = Auth(ACCESS_KEY, SECRET_KEY)
#上传后保存的文件名
key = 'my-python-logo.png'
def upload(key,localfile):
#生成上传 Token,可以指定过期时间等
token = q.upload_token(BKNAME, key, 3600)
#要上传文件的本地路径
ret, info = put_file(token, key, localfile)
print(ret)
print(info)
assert ret['key'] == key
assert ret['hash'] == etag(localfile)
return PERURL+ret["key"]
|
[
"1392670234@qq.com"
] |
1392670234@qq.com
|
6f70359b893f70859a6f08eac88bddb34a844b23
|
ee3fe2c9167a382aad67cf5108be4edc7e520171
|
/hni/wifid.py
|
0ca93f394820ac10dc4fe02938f1872487f9429c
|
[] |
no_license
|
Rovertholley/WiringPi
|
5bdbd865a4021f0769119e6f0db9892c7de5efc4
|
3559ce4f467e96a81b3a7cd0ce5b2e42cf4f38bd
|
refs/heads/master
| 2020-03-22T14:57:35.270913
| 2018-07-17T02:26:31
| 2018-07-17T02:26:31
| 140,218,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
import os
import time
def setup_conf_files():
dir = os.path.dirname(__file__) + '/conf/'
_copy_file_no_overwriting(os.path.abspath(dir + 'dhcpd.conf'), os.path.abspath('/etc/dhcp/dhcpd.conf'))
_copy_file_no_overwriting(os.path.abspath(dir + 'udhcpd.conf'), os.path.abspath('/etc/udhcpd.conf'))
_copy_file_no_overwriting(os.path.abspath(dir + 'wpa_supplicant.conf'), os.path.abspath('/etc/wpa_supplicant/wpa_suplicant.conf'))
def _copy_file_no_overwriting(src, dst):
import shutil
if not os.path.isfile(dst):
print('copying... ', dst)
shutil.copyfile(src, dst)
def _system_critical(command):
if os.system(command) is not 0:
raise ConnectionError('wifi direct failed ')
def start_as_go_fedora(str_interface='wls35u1', str_static_ip_addr_for_p2p='192.168.1.2'):
os.system('sudo killall dhcpd') # dhcpd
os.system('sudo wpa_cli -i ' + str_interface + ' terminate -B')
# os.system('sudo wpa_cli -i p2p-' + str_interface + '-0 terminate -B')
time.sleep(2)
os.system('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward') # ip
# os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf')
_system_critical('sudo wpa_supplicant -d -Dnl80211 -c /etc/wpa_supplicant.conf -i' + str_interface + ' -B') #
_system_critical('sudo wpa_cli -i' + str_interface + ' p2p_group_add')
# p2p_group_add: Become an autonomous GO (p2p )
_system_critical('sudo ifconfig p2p-' + str_interface + '-0 ' + str_static_ip_addr_for_p2p) # p2p
_system_critical('sudo wpa_cli -i p2p-' + str_interface + '-0 p2p_find') # p2p_find: Enables discovery
os.system('sudo wpa_cli -ip2p-' + str_interface + '-0 p2p_peers')
# p2p_peers: Shows list of discovered peers (not necessary)
_system_critical('sudo wpa_cli -ip2p-' + str_interface + '-0 wps_pbc')
# wps_pbc: pushbutton for GO WPS authorization to accept incoming connections (When devices try to connect to GO)
_system_critical('sudo dhcpd')
def start_as_go_ubuntu(str_interface='wlan0', str_static_ip_addr_for_p2p='192.168.1.2'):
os.system('sudo killall udhcpd')
os.system('sudo wpa_cli -i ' + str_interface + ' terminate -B')
print("1")
# os.system('sudo wpa_cli -i p2p-' + str_interface + '-0 terminate -B')
time.sleep(1)
os.system('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward')
print("2")
# os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf')
_system_critical('sudo wpa_supplicant -d -Dnl80211 -c /etc/wpa_supplicant/wpa_supplicant.conf -i' + str_interface + ' -B')
print("3")
_system_critical('sudo wpa_cli -i' + str_interface + ' p2p_group_add')
print("4")
_system_critical('sudo ifconfig p2p-' + str_interface + '-0 ' + str_static_ip_addr_for_p2p)
print("5")
_system_critical('sudo wpa_cli -i p2p-' + str_interface + '-0 p2p_find')
print("6")
os.system('sudo wpa_cli -ip2p-' + str_interface + '-0 p2p_peers')
print("7")
_system_critical('sudo wpa_cli -ip2p-' + str_interface + '-0 wps_pbc')
print("8")
_system_critical('sudo udhcpd /etc/udhcpd.conf &')
if __name__ == "__main__":
# example
try:
start_as_go_ubuntu()
except ConnectionError:
print('ConnectionError from wifid')
|
[
"pjmpiop@gmail.com"
] |
pjmpiop@gmail.com
|
dfbff07e71bbb5bb4ed4d4f2d5c8735b40db0626
|
af7f4a92720e31ae8a1d616ec7c727491ad4ac7b
|
/src/roles/sharpshooter.py
|
bf68e934d40694af002381c2f5c2b176245f6184
|
[
"BSD-2-Clause"
] |
permissive
|
MishaCatskill/lykos
|
6a24bdc425125fed28b2d2299dca5057686ce57b
|
e4461416c18988b6195710abb2ce85824102ccd6
|
refs/heads/master
| 2021-07-01T06:53:11.905654
| 2018-10-19T02:15:44
| 2018-10-19T02:15:44
| 153,720,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
import re
import random
import itertools
import math
from collections import defaultdict
from src.utilities import *
from src import channels, users, debuglog, errlog, plog
from src.functions import get_players, get_all_players, get_main_role, get_reveal_role, get_target
from src.decorators import command, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.messages import messages
from src.events import Event
from src.roles.helper.gunners import setup_variables
GUNNERS = setup_variables("sharpshooter")
@event_listener("gun_chances")
def on_gun_chances(evt, var, user, role):
if role == "sharpshooter":
hit, miss, headshot = var.SHARPSHOOTER_GUN_CHANCES
evt.data["hit"] = hit
evt.data["miss"] = miss
evt.data["headshot"] = headshot
@event_listener("new_role")
def on_new_role(evt, var, user, old_role):
if old_role == "sharpshooter":
if evt.data["role"] != "sharpshooter":
del GUNNERS[user]
elif evt.data["role"] == "sharpshooter":
GUNNERS[user] = math.ceil(var.SHARPSHOOTER_MULTIPLIER * len(get_players()))
@event_listener("get_role_metadata")
def on_get_role_metadata(evt, var, kind):
if kind == "role_categories":
evt.data["sharpshooter"] = {"Village", "Safe", "Killer"}
|
[
"vgr255@live.ca"
] |
vgr255@live.ca
|
cf59b23911afcb4569495e1a1045a5c246cc01ce
|
d11d772a73b7fd61fba68def7cd69c8d211490a4
|
/glue/models.py
|
ac46b9b18557c2b12ec5bfbd6d4bb2f9bb6755cf
|
[] |
no_license
|
rharrell729/huddle
|
3ab7e41d1aaad80e1398166bf92bbdbf23e30e09
|
1f54969eaf3bb570d9215a4f74e658f4b7f7dc33
|
refs/heads/master
| 2020-06-04T18:47:21.170695
| 2015-11-04T16:54:06
| 2015-11-04T16:54:06
| 33,046,462
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Vote(models.Model):
user = models.ForeignKey(User)
class Option(models.Model):
title = models.TextField()
votes = models.ManyToManyField(Vote, related_name='votes', blank=True)
class Huddle(models.Model):
title = models.TextField(default='')
creator = models.ForeignKey(User, related_name='creator')
recipients = models.ManyToManyField(User, related_name='recipients', blank=True)
options = models.ManyToManyField(Option, related_name='options')
end = models.TimeField()
|
[
"d_leblond@live.com"
] |
d_leblond@live.com
|
51f6dd7806bf2d752fc5f09f43d5411e37c4d310
|
66e6360325b781ed0791868765f1fd8a6303726f
|
/TB2009/WorkDirectory/5121 Event Fraction Adc Cut/Profile_108537.py
|
cb307cc5fff2c5a591246c53aa8d83ea51af9714
|
[] |
no_license
|
alintulu/FHead2011PhysicsProject
|
c969639b212d569198d8fce2f424ce866dcfa881
|
2568633d349810574354ad61b0abab24a40e510e
|
refs/heads/master
| 2022-04-28T14:19:30.534282
| 2020-04-23T17:17:32
| 2020-04-23T17:17:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("VlsbInfo")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108537.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(3),
mip = cms.untracked.string("MIPCalibration.txt"),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(False),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(False),
adcMap = cms.untracked.string("FinalAdcMapping_All.txt")
)
process.averagecharge = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_108537.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(False) # interpolate for missing channels by averaging neighboring channels
)
process.averagecharge_interpolated = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_Interpolated_108537.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(True)
)
process.filladc = cms.EDAnalyzer("FillAdcDistributionAnalyzer",
invert = cms.untracked.bool(False),
highdef = cms.untracked.bool(True),
divideMIP = cms.untracked.bool(False),
baselineSubtraction = cms.untracked.bool(True),
output = cms.untracked.string("AdcDistribution_108537.root")
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.maxAdcCut = cms.EDFilter("MaxAdcCutFilter",
cut = cms.untracked.double(12600),
motherBoard0_cut = cms.untracked.vdouble(5000, 5725, 5750, 5720, 5600, 5710, 5780, 5610, 5695,
5715, 5700, 65536, 5755, 5650, 5730),
baselineSubtracted = cms.untracked.bool(True),
useRealAdc = cms.untracked.bool(True)
)
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(999)
)
)
process.p = cms.Path(
process.tbunpack *
process.ABCcut *
process.vlsbinfo *
process.maxAdcCut *
process.averagecharge *
process.averagecharge_interpolated *
process.filladc
)
|
[
"yichen@positron01.hep.caltech.edu"
] |
yichen@positron01.hep.caltech.edu
|
b52b5a81401c778303b14c1ce311786c7e3ff9d2
|
94bc9af204919775bd7f228b90c9111996d519cf
|
/linear/test.py
|
18bcc2573641f7e5c34af02828126e6c902edb70
|
[
"Apache-2.0"
] |
permissive
|
dkp-1024/my_machine_learning
|
e1cf3f36b152671875e27edb04c92627f2c18b45
|
11ace64c7ae5c709f20cb6691529768b42d08d22
|
refs/heads/master
| 2020-03-19T01:22:19.583402
| 2018-05-31T06:19:37
| 2018-05-31T06:19:37
| 135,540,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
x = tf.constant(35, name='x')
y = tf.Variable(x + 5, name='y')
print(y)
import tensorflow as tf
x = tf.constant(35, name='x')
y = tf.Variable(x + 5, name='y')
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
print(session.run(y))
|
[
"noreply@github.com"
] |
dkp-1024.noreply@github.com
|
6bd5e98a294805536427c0f47065d221a069340c
|
1539cae50c650ce77d806277b3ebe9a177dedb07
|
/compiler/compiler.py
|
0c932b303707ccd53ad5cc79e40f03f342ae3845
|
[] |
no_license
|
vudaoanhtuan/Assignment_Management_Server
|
58465c2e86fd0ade1635c538f5e9acd8505933d4
|
07667233b39bf8fd14b210e1536a14d46e07e120
|
refs/heads/master
| 2021-08-30T12:40:13.110662
| 2017-12-18T01:18:09
| 2017-12-18T01:18:09
| 112,940,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
import os
import xml.etree.ElementTree as ET
def compile(compiler_name, source_file_list, exe_file, log_file):
cm = compiler_name + " "
for filename in list(source_file_list):
cm = cm + filename + " "
cm = cm + " -o " + exe_file + " > " + log_file + " 2>&1"
res = os.system(cm)
return res
def getListFile(xmlFile):
tree = ET.parse(xmlFile)
root = tree.getroot()
header = []
source = []
for h in root.findall("./header/*"):
header.append(h.text)
for s in root.findall("./source/*"):
source.append(s.text)
return header, source
|
[
"vudaoanhtuan@gmail.com"
] |
vudaoanhtuan@gmail.com
|
e30f6f7fd13e31f856515cee88244d7ef0e1badd
|
95444cf00367b8afb7995f38e805824d0259e4ab
|
/DailyFrsh/apps/goods/views.py
|
64554118cfce81a3171b2be969c220ac47aca0ad
|
[
"MIT"
] |
permissive
|
GSFNE/b-b-b
|
1675fa3a55819afa0fa1aa701a3d3602e0325caf
|
6004306a752cefc35956cc20da1af4e47168666e
|
refs/heads/master
| 2021-08-29T01:47:06.202099
| 2017-12-13T10:17:58
| 2017-12-13T10:17:58
| 112,737,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,146
|
py
|
from django.shortcuts import render, redirect, HttpResponse
from django.views.generic import View
from django_redis import get_redis_connection # 链接redis数据库
from apps.goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner, GoodsSKU
from apps.order.models import OrderGoods
from django.core.urlresolvers import reverse
from django.core.cache import cache # 设置和清楚缓存
from django.core.paginator import Paginator # 分页使用
# Create your views here.
class IndexView(View):
# get方式,返回主页面
def get(self, request):
# 尝试从缓存读取数据
context = cache.get('index_page_data')
if context is None:
# 设置缓存
# 从数据库读取数据
# 要读取的数据:
# 商品的全部分类,(左侧的6个分类); 中间滚动的banner图; 右侧的两张搞活动的图;
# 1. 商品的全部分类
types = GoodsType.objects.all()
# 2. 中间滚动图片
index_banner = IndexGoodsBanner.objects.all().order_by('index')
# 3. 右侧两张搞活动的图片
promotion_banner = IndexPromotionBanner.objects.all().order_by('index')
# 首页商品分类展示信息
for type in types:
# 查询首页展示的文字商品信息
title_banner = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0).order_by('index')
# 查询首页展示的图片展示信息
image_banner = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1).order_by('index')
type.title_banner = title_banner
type.image_banner = image_banner
context = {
'types': types,
'index_banner': index_banner,
'promotion_banner': promotion_banner
}
# 设置缓存,三个参数: 缓存的key(要通过这个key取出缓存), 上下文, 缓存有效期
cache.set('index_page_data', context, 3600)
# else: 这里省略了, 表示有缓存, 注意缩进
# 获取用户
user = request.user
# 默认的购物车的数量为0, 如果有加入收藏的商品,从数据库中读取,没有为0
cart_count = 0
if user.is_authenticated():
con = get_redis_connection('default') # 链接redis缓存
cart_key = 'cart_%d' % user.id
cart_count = con.hlen(cart_key) # 取出保存的key的数量
# 这里保存收藏夹用的是hash 一个用户对应一个hash redis缓存
# cart_1(goods_id: num, goods_id: num)/ cart_2(goods_id:num, good_id: num)/...
# hlen 是哈希的一种方法,得到的是 键的个数
# 更新上下文, 购物车条目数
context.update(cart_count=cart_count)
# 返回主页面
return render(request, 'index.html', context)
# 前端向后端传递参数的方式:
# 1. url 参数传递 ,正则匹配,匹配到要请求的路径
# 2. get
# 3. post
class DetailView(View):
# 显示商品详情
def get(self, request, sku_id):
# 获取商品详情
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
# 商品不存在,跳转到首页
return redirect(reverse('goods:index'))
# else: 这里省略了
# 商品存在
# 1. 获取和商品同类型的两种新品
new_skus = GoodsSKU.objects.filter(type=sku.type).order_by('-create_time')[0:2]
# 2. 获取商品的评论信息
order_skus = OrderGoods.objects.filter(sku=sku).order_by('-create_time')
# 3. 获取和商品同一个spu的其他规格商品(比如: 盒装草莓和500g草莓,商品一样,规格不一样)
same_spu_skus = GoodsSKU.objects.filter(goods=sku.goods).exclude(id=sku.id) # exclude()不包括自己
# 4. 获取购物车中的条目数,就是显示购物车内商品数量
# <1. 获取用户
user = request.user
cart_count = 0
if user.is_authenticated():
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = con.hlen(cart_key) # 通过key取到key的长度
# 浏览记录是用户登陆才有的,用户不登陆是没有记录的,要写在if条件缩进里面
# 当用户访问详情页面的时候,应该添加客户浏览的历史记录, 用redis列表存储数据
con = get_redis_connection('default')
history_key = 'history_%d' % user.id
# 在客户访问浏览记录里已经有的商品时,应该先移除记录里的数据,然后在从最前面(左侧插入)插入该条商品的id
# 尝试移除数据, 如果列表没有该数据,不会报错
con.lrem(history_key, 0, sku_id)
# 添加新数据
con.lpush(history_key, sku_id)
# 保留客户 最新浏览的5个数据
con.ltrim(history_key, 0, 4)
# 组织上下文模板
context = {
'sku': sku,
'new_skus': new_skus,
'order_skus': order_skus,
'same_spu_skus': same_spu_skus,
'cart_count': cart_count
}
return render(request, 'detail.html', context)
'''
列表页: listview
前端向后台传递:
1. list/种类id/页码/排序方式
2. list/种类id?页码=x&排序方式=x
3. list/种类id/页码?sort=x 这里用的是这种
list/1/1?sort=default 列表页商品种类的id是1, 第一页,按照默认方式排序
'''
# /list/type_id/page?sort=
class ListView(View):
def get(self, request, type_id, page):
# 尝试通过请求的type_id 查询数据库,显示分类信息
try:
type = GoodsType.objects.get(id=type_id)
except GoodsType.DoesNotExist:
# 商品种类不存在
return redirect(reverse('goods:index'))
# else: 表示商品种类存在
# 获取商品的排列方式
sort = request.GET.get('sort', 'default') # 如果有排列方式就按照传递的排列方式排列,如果没有就按照默认的排列
# sort = default(默认, 这里使用id排列);
# sort = price (价格排列,升序,从低到高)
# sort = hot (人气, 按照销量, 降序,从高到低)
if sort == 'price':
skus = GoodsSKU.objects.filter(type=type).order_by('price')
elif sort == 'hot':
skus = GoodsSKU.objects.filter(type=type).order_by('-sales')
else:
sort = 'default'
skus = GoodsSKU.objects.filter(type=type).order_by('-id')
# 商品过多, 列表页需要分页
paginator = Paginator(skus, 1)
# 处理接收到的请求的页码: page
page = int(page)
# num_pages pagintor对象的属性,返回页面的总页数
if page > paginator.num_pages or page <= 0:
page = 1 # 如果请求的page数值正确,就默认是第一页
# 获取page页的实例对象, 就是获取请求的page页的信息对象
# page() paginator对象的方法 返回一个Page对象,通过页码,返回页面的对象
skus_page = paginator.page(page)
# 获取两个该类的新品的信息
new_skus = GoodsSKU.objects.filter(type=type).order_by('-create_time')[0:2]
# 购物车获取条目数
# 获取用户
cart_count = 0
user = request.user
if user.is_authenticated():
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = con.hlen(cart_key)
# 上下文,传递数据,前段继续构造数据
context = {
'type': type,
'skus_page': skus_page,
'new_skus': new_skus,
'cart_count': cart_count,
'sort': sort
}
# return HttpResponse('1')
return render(request, 'list.html', context)
|
[
"123@163.com"
] |
123@163.com
|
3e05e78e17b355c640da185e9558ca91381dce42
|
c411c5513ec5d58eb0e0edab0b6a697974d638fb
|
/model/my_model/ResUnet.py
|
2cd80c71c60bc1b404b2f28dc3968b7e15664933
|
[] |
no_license
|
blue88blue/Segmentation
|
ab7f9dec4ab1ab4cdb4b8ca5af0cb9e1a560e20f
|
69c4db1897a550a08a63811ffbb817754c20fbf2
|
refs/heads/master
| 2023-03-01T06:58:49.405779
| 2021-01-27T02:07:56
| 2021-01-27T02:07:56
| 296,049,616
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,669
|
py
|
from model.segbase import SegBaseModel
from model.model_utils import init_weights, _FCNHead
from .blocks import *
from .SPUnet import SPSP
from .ccr import ccr
class ResUnet(SegBaseModel):
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=True, deep_stem=False, **kwargs):
super(ResUnet, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, deep_stem=deep_stem, **kwargs)
self.aux = aux
self.dilated = dilated
channels = self.base_channel
if deep_stem or backbone == 'resnest101':
conv1_channel = 128
else:
conv1_channel = 64
if dilated:
self.donv_up3 = decoder_block(channels[0]+channels[3], channels[0])
self.donv_up4 = decoder_block(channels[0]+conv1_channel, channels[0])
else:
self.donv_up1 = decoder_block(channels[2] + channels[3], channels[2])
self.donv_up2 = decoder_block(channels[1] + channels[2], channels[1])
self.donv_up3 = decoder_block(channels[0] + channels[1], channels[0])
self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0])
if self.aux:
self.aux_layer = _FCNHead(256, n_class)
self.out_conv = nn.Sequential(
nn.Conv2d(channels[0], channels[0], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(channels[0]),
nn.ReLU(),
nn.Conv2d(channels[0], n_class, kernel_size=1, bias=False),
)
def forward(self, x):
outputs = dict()
size = x.size()[2:]
x = self.backbone.conv1(x)
x = self.backbone.bn1(x)
c1 = self.backbone.relu(x) # 1/2 64
x = self.backbone.maxpool(c1)
c2 = self.backbone.layer1(x) # 1/4 64
c3 = self.backbone.layer2(c2) # 1/8 128
c4 = self.backbone.layer3(c3) # 1/16 256
c5 = self.backbone.layer4(c4) # 1/32 512
if self.dilated:
x = self.donv_up3(c5, c2)
x = self.donv_up4(x, c1)
else:
x = self.donv_up1(c5, c4)
x = self.donv_up2(x, c3)
x = self.donv_up3(x, c2)
x = self.donv_up4(x, c1)
outputs.update({"feature": x})
x = self.out_conv(x)
x = F.interpolate(x, size, mode='bilinear', align_corners=True) # 最后上采样
outputs.update({"main_out": x})
if self.aux:
auxout = self.aux_layer(c3)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.update({"aux_out": [auxout]})
return outputs
|
[
"805207107@qq.com"
] |
805207107@qq.com
|
cb50aa2494245aa5c59ca56f3e2213d3ab7c947f
|
37aa95872615830c481320ab3e5c41e900a87bc8
|
/register/views.py
|
c1511e5c6bd4e23ae8135b7648e10551bb0b54c4
|
[] |
no_license
|
watermelon-nakatake/to_miyazaki
|
097fb4b858c86adffe7bbe7f8f3828a7d76ff6d9
|
36703f7266ee978604678643de877915cc3e43d0
|
refs/heads/master
| 2023-02-24T10:23:07.162120
| 2023-02-14T01:05:40
| 2023-02-14T01:05:40
| 264,803,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,858
|
py
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.views import (
LoginView, LogoutView, PasswordChangeView, PasswordChangeDoneView, PasswordResetView, PasswordResetDoneView,
PasswordResetConfirmView, PasswordResetCompleteView
)
from django.contrib.sites.shortcuts import get_current_site
from django.core.signing import BadSignature, SignatureExpired, loads, dumps
from django.http import HttpResponseBadRequest
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.views import generic
from django.urls import reverse_lazy
from .forms import (
LoginForm, UserCreateForm, MyPasswordChangeForm, MyPasswordResetForm, MySetPasswordForm
)
User = get_user_model()
class Login(LoginView):
"""ログインページ"""
form_class = LoginForm
template_name = 'register/login.html'
class Logout(LogoutView):
"""ログアウトページ"""
template_name = 'register/logout.html'
class UserCreate(generic.CreateView):
"""ユーザー仮登録"""
template_name = 'register/user_create.html'
form_class = UserCreateForm
def form_valid(self, form):
"""仮登録と本登録用メールの発行."""
# 仮登録と本登録の切り替えは、is_active属性を使うと簡単です。
# 退会処理も、is_activeをFalseにするだけにしておくと捗ります。
user = form.save(commit=False)
user.is_active = False
user.save()
# アクティベーションURLの送付
current_site = get_current_site(self.request)
domain = current_site.domain
context = {
'protocol': self.request.scheme,
'domain': domain,
'token': dumps(user.pk),
'user': user,
}
subject = render_to_string('register/mail_template/create/subject.txt', context)
message = render_to_string('register/mail_template/create/message.txt', context)
user.email_user(subject, message)
return redirect('register:user_create_done')
class UserCreateDone(generic.TemplateView):
"""ユーザー仮登録したよ"""
template_name = 'register/user_create_done.html'
class UserCreateComplete(generic.TemplateView):
"""メール内URLアクセス後のユーザー本登録"""
template_name = 'register/user_create_complete.html'
timeout_seconds = getattr(settings, 'ACTIVATION_TIMEOUT_SECONDS', 60 * 60 * 24) # デフォルトでは1日以内
def get(self, request, **kwargs):
"""tokenが正しければ本登録."""
token = kwargs.get('token')
try:
user_pk = loads(token, max_age=self.timeout_seconds)
# 期限切れ
except SignatureExpired:
return HttpResponseBadRequest()
# tokenが間違っている
except BadSignature:
return HttpResponseBadRequest()
# tokenは問題なし
else:
try:
user = User.objects.get(pk=user_pk)
except User.DoesNotExist:
return HttpResponseBadRequest()
else:
if not user.is_active:
# 問題なければ本登録とする
user.is_active = True
user.save()
return super().get(request, **kwargs)
return HttpResponseBadRequest()
class PasswordChange(PasswordChangeView):
"""パスワード変更ビュー"""
form_class = MyPasswordChangeForm
success_url = reverse_lazy('register:password_change_done')
template_name = 'register/password_change.html'
class PasswordChangeDone(PasswordChangeDoneView):
"""パスワード変更しました"""
template_name = 'register/password_change_done.html'
class PasswordReset(PasswordResetView):
"""パスワード変更用URLの送付ページ"""
subject_template_name = 'register/mail_template/password_reset/subject.txt'
email_template_name = 'register/mail_template/password_reset/message.txt'
template_name = 'register/password_reset_form.html'
form_class = MyPasswordResetForm
success_url = reverse_lazy('register:password_reset_done')
class PasswordResetDone(PasswordResetDoneView):
"""パスワード変更用URLを送りましたページ"""
template_name = 'register/password_reset_done.html'
class PasswordResetConfirm(PasswordResetConfirmView):
"""新パスワード入力ページ"""
form_class = MySetPasswordForm
success_url = reverse_lazy('register:password_reset_complete')
template_name = 'register/password_reset_confirm.html'
class PasswordResetComplete(PasswordResetCompleteView):
"""新パスワード設定しましたページ"""
template_name = 'register/password_reset_complete.html'
|
[
"kirishima3260@yahoo.co.jp"
] |
kirishima3260@yahoo.co.jp
|
1d14667b7ecc9cdfbde0a1e4777f83f5d2b51773
|
537e25911adae342c7b2f204ff9eff8b0cf08beb
|
/MyTensorflow/AlexNet.py
|
61f212deda4fd2b0726602336b17c9bd9ef946a3
|
[] |
no_license
|
zhulianhai/DeepLearning
|
e3302b459812402d6ba8ef31fb2030129c2a36b7
|
e4c33ad74189a157997bf92d2eec15763a52858f
|
refs/heads/master
| 2020-06-13T10:10:13.524626
| 2017-07-29T04:46:40
| 2017-07-29T04:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,711
|
py
|
# -*- coding: utf-8 -*-
"""
project:Tensorflow实现ALexNet卷积神经
@Author:Gene
@Github:https://github.com/Gene20/DeepLearning
@Email:GeneWithyou@gamil.com
@Website:www.gene20/top
"""
"""
网络结构:Input-->Conv+ReLu+LRN+Pool-->Conv+ReL+LRN+Pool-->Conv+ReLu-->Conv+ReLu-->Conv+ReLu-->Conv+ReLu
-->Pool-->FC+ReLu-->FC+ReLu-->FC+Softmax-->Output
"""
from datetime import datetime
import time
import math
import tensorflow as tf
batch_size=32
batch_nums=100
"""1.显示神经网络每一层结构"""
def print_activation(t):
print(t.op.name,'',t.get_shape().as_list())
""""2.定义权重和偏置"""
def weight_variables(shape,dtype=tf.float32,stddev,name):
return tf.Variable(tf.truncated_normal(shape=shape,dtype=dtype,stddev=stddev),name=name)
def bias_variables(val,shape,dtype=tf.float32,tb=True,name):
return tf.Variable(tf.constant(val,shape=shape,dtype=dtype),trainable=tb,name=name)
"""3.定义卷积和池化"""
def conv2d(x,W,strides,padding='SAME'):
return tf.conv2d(x,W,strides=strides,padding=padding)
def max_pool_2x2(x,ksize,strides,padding,name):
return tf.max_pool(x,ksize=ksize,strides=strides,padding=padding,name=name)
"""4.定义inference函数"""
def inference(images):
parameters=[]
"""第一个卷积层和池化层"""
with tf.name_scope('conv1') as scope:
W_conv1=weight_variables([11,11,3,64],tf.float32,1e-1,'weights')
b_conv1=bias_variables(0.0,[64],tf.float32,True,'bias')
h_conv1=tf.nn.relu(tf.nn.bias_add(conv2d(images,W_conv1,[1,4,4,1],'SAME'),b_conv1),name=scope)
print_activation(h_conv1)
parameters+=[W_conv1,b_conv1]
"""LRN层和池化层"""
lrn1=tf.nn.lrn(h_conv1,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1')
h_pool1=max_pool_2x2(lrn1,[1,3,3,1],[1,2,2,1],'VALID','pool1')
print_activation(h_pool1)
"""第二个卷积层和池化层"""
with tf.name_scope('conv2')as scope:
W_conv2=weight_variables([5,5,64,192],tf.float32,1e-1,'weights')
b_conv2=bias_variables(0.0,[192],tf.float32,True,'bias')
h_conv2=tf.nn.relu(tf.nn.bias_add(conv2d(h_pool1,W_conv2,[1,1,1,1],'SAME'),b_conv2),name=scope)
print_activation(h_conv2)
parameters+=[W_conv2,b_conv2]
"""LRN层和池化层"""
lrn2=tf.nn.lrn(h_conv2,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn2')
h_pool2=max_pool_2x2(lrn2,[1,3,3,1],[1,2,2,1],'VALID','pool2')
print_activation(h_pool2)
"""第三个卷积层和池化层"""
with tf.name_scope('conv3')as scope:
W_conv3=weight_variables([3,3,192,384],tf.float32,1e-1,'weights')
b_conv3=bias_variables(0.0,[384],tf.float32,True,'bias')
h_conv3=tf.nn.relu(tf.nn.bias_add(conv2d(h_pool2,W_conv3,[1,1,1,1],'SAME'),b_conv3),name=scope)
print_activation(h_conv3)
parameters+=[W_conv3,b_conv3]
"""第四个卷积层和池化层"""
with tf.name_scope('conv4')as scope:
W_conv4=weight_variables([3,3,384,256],tf.float32,1e-1,'weights')
b_conv4=bias_variables(0.0,[256],tf.float32,True,'bias')
h_conv4=tf.nn.relu(tf.nn.bias_add(conv2d(h_conv3,W_conv4,[1,1,1,1],'SAME'),b_conv4),name=scope)
print_activation(h_conv4)
parameters+=[W_conv4,b_conv4]
"""第五个卷积层和池化层"""
with tf.name_scope('conv5')as scope:
W_conv5=weight_variables([3,3,256,256],tf.float32,1e-1,'weights')
b_conv5=bias_variables(0.0,[256],tf.float32,True,'bias')
h_conv5=tf.nn.relu(tf.nn.bias_add(conv2d(h_conv4,W_conv5,[1,1,1,1],'SAME'),b_conv5),name=scope)
print_activation(h_conv5)
parameters+=[W_conv5,b_conv5]
"""池化层"""
h_pool5=max_pool_2x2(h_conv5,[1,3,3,1],[1,2,2,1],'VALID','pool5')
print_activation(h_pool5)
return h_pool5,parameters
"""5.评估每轮AlexNet时间"""
def cal_AlexNet_time(session,target,info_string):
first_steps=10
total_time=0.0
total_time_squared=0.0
for i in range(batch_nums+first_steps):
start_time=time.time()
_=session.run([target])
duration=time.time()-start_time
if i>=first_steps and not i%10:
print('%s :step %d, duration= %.3f'%(datetime.now(),(i-first_steps),duration))
total_time+=duration
total_time_squared+=duration*duration
sec_pre_bat=total_time/batch_nums
std_pre_bat=math.sqrt(total_time_squared/batch_nums-math.pow(sec_pre_bat,2))
print('%s: %s across %d steps,%.3f +/- %.3f seconds/bacth'%(datetime.now(),
info_string,batch_nums,sec_pre_bat,std_pre_bat))
"""6.主函数"""
def main_run():
with tf.Graph().as_default():
image_size=224
images=tf.Variable(tf.random_normal([batch_size,image_size,image_size,3],dtype=tf.float32,stddev=1e-1))
pool5,parameters=inference(images)
init=tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
cal_AlexNet_time(sess,pool5,'Forward')
obj=tf.nn.l2_loss(pool5)
#运用梯度下降
grad=tf.gradients(obj,parameters)
cal_AlexNet_time(sess,grad,'Forward-Backward')
|
[
"noreply@github.com"
] |
zhulianhai.noreply@github.com
|
1e6a902f205269146fc9ced1abef70cd4a716e1f
|
13de23a6ffd3ccc6b2a70003afcc410b5e3f3495
|
/exercises/Exercise 3 - Parking.py
|
10094f89c1042fa2e0765d394d3db75a049d38b1
|
[
"MIT"
] |
permissive
|
RedFantom/practice-sql-tasks
|
e736e11bfe30e12fd44ab61d2d4b8580d4d00a9d
|
2a08144c9e02a32670a2fed1838bd8f5cfed8dfc
|
refs/heads/master
| 2020-12-26T10:36:53.686089
| 2020-01-31T17:44:04
| 2020-01-31T17:44:04
| 237,483,327
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
"""
Author: RedFantom
License: MIT License
Copyright (C) 2018 RedFantom
"""
from datetime import datetime
from database import open_database, execute_query
"""Open database"""
connection = open_database("parking.db")
fmt = "%d-%m-%Y"
"""Determine name of the owner of the car that is parked the longest"""
query1 = """SELECT ..."""
customer, = execute_query(connection, query1)[0]
"""Determine car type and how long for the customer of the last query"""
query2 = """SELECT ...""".format(customer)
start, end, car_type, spot_id = execute_query(connection, query2)[0]
duration = (datetime.strptime(end, fmt) - datetime.strptime(start, fmt)).days
print("{} owns a {} and it is staying for {} days in spot {}.".format(customer, car_type, duration, spot_id))
|
[
"redfantom@outlook.com"
] |
redfantom@outlook.com
|
171d17dc48c12508bbd7b444b7de5fcb2580ec3d
|
4d1d8940ca0372d09c574278d42d1a68c9296f64
|
/app/console/admin/BCModelView.py
|
e63316a9f72e0a15e6f6b4cb524115b8e6cbac94
|
[] |
no_license
|
Eliaswilde/Flask_project
|
cfa5b46ee0a5e6d9002bff72544051513233e02e
|
d2000ef6cc7b422f5b3a17514c8693a8820c6aa6
|
refs/heads/master
| 2020-04-27T23:11:44.921296
| 2015-03-02T08:06:33
| 2015-03-02T08:06:36
| 31,529,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
from flask_admin.contrib import sqla
from flask import Flask, url_for, redirect, render_template, request, flash
from flask.ext import login
class BCModelView(sqla.ModelView):
list_template = 'bc_admin/model/list.html'
edit_template = 'bc_admin/model/edit.html'
create_template = 'bc_admin/model/create.html'
def _handle_view(self, name, **kwargs):
if not self.is_accessible():
return redirect('/%s/'%self.admin.name)
def is_accessible(self):
return login.current_user.is_authenticated()
def render(self, template, **kwargs):
kwargs['user'] = login.current_user
return super(BCModelView, self).render(template, **kwargs)
|
[
"Eliaswilde@mail.com"
] |
Eliaswilde@mail.com
|
7f2962f90f23c12cd02fa2c7b28275a154c8c151
|
10da822e3420c36fd4a05e4440b716bae016a756
|
/app/forms.py
|
d1bb4fa34554124e421c9b304ad25c963c3bbe0a
|
[] |
no_license
|
infinity-milesman/microblog
|
2cfdd1236b6309328e6d57a9545cc5b9d461878f
|
e3d59d8910967fea33cf7889d1ba0f7628198681
|
refs/heads/master
| 2023-01-07T21:35:35.021627
| 2020-11-12T12:18:29
| 2020-11-12T12:18:29
| 311,271,386
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Username',validators=[DataRequired()])
password = PasswordField('Password',validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
|
[
"amit.ssjhs@gmail.com"
] |
amit.ssjhs@gmail.com
|
83a0d8a02d7463ad092dc11dfc86c5bfc1e90331
|
f7630fd6c829cb306e72472296e3a513844d99af
|
/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_internetservice.py
|
db6da2648d4229af0b45afe9ef7d70e065698a46
|
[] |
no_license
|
baltah666/automation
|
6eccce20c83dbe0d5aa9a82a27937886e3131d32
|
140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52
|
refs/heads/master
| 2023-03-07T10:53:21.187020
| 2023-02-10T08:39:38
| 2023-02-10T08:39:38
| 272,007,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,302
|
py
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_internetservice
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "1.0.0"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: |
only set to True when module schema diffs with FortiManager API structure,
module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: |
the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
firewall_internetservice:
description: the top level parameters set
required: false
type: dict
suboptions:
database:
type: str
description: no description
choices:
- 'isdb'
- 'irdb'
direction:
type: str
description: no description
choices:
- 'src'
- 'dst'
- 'both'
entry:
description: description
type: list
suboptions:
id:
type: int
description: no description
ip-number:
type: int
description: no description
ip-range-number:
type: int
description: no description
port:
description: description
type: int
protocol:
type: int
description: no description
icon-id:
type: int
description: no description
id:
type: int
description: no description
name:
type: str
description: no description
offset:
type: int
description: no description
reputation:
type: int
description: no description
sld-id:
type: int
description: no description
extra-ip-range-number:
type: int
description: no description
ip-number:
type: int
description: no description
ip-range-number:
type: int
description: no description
jitter-threshold:
type: int
description: no description
latency-threshold:
type: int
description: no description
obsolete:
type: int
description: no description
packetloss-threshold:
type: int
description: no description
singularity:
type: int
description: no description
city:
description: no description
type: int
country:
description: no description
type: int
region:
description: no description
type: int
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: no description
fmgr_firewall_internetservice:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
firewall_internetservice:
database: <value in [isdb, irdb]>
direction: <value in [src, dst, both]>
entry:
-
id: <value of integer>
ip-number: <value of integer>
ip-range-number: <value of integer>
port: <value of integer>
protocol: <value of integer>
icon-id: <value of integer>
id: <value of integer>
name: <value of string>
offset: <value of integer>
reputation: <value of integer>
sld-id: <value of integer>
extra-ip-range-number: <value of integer>
ip-number: <value of integer>
ip-range-number: <value of integer>
jitter-threshold: <value of integer>
latency-threshold: <value of integer>
obsolete: <value of integer>
packetloss-threshold: <value of integer>
singularity: <value of integer>
city: <value of integer>
country: <value of integer>
region: <value of integer>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/internet-service',
'/pm/config/global/obj/firewall/internet-service'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/internet-service/{internet-service}',
'/pm/config/global/obj/firewall/internet-service/{internet-service}'
]
url_params = ['adom']
module_primary_key = None
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'adom': {
'required': True,
'type': 'str'
},
'firewall_internetservice': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'options': {
'database': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'isdb',
'irdb'
],
'type': 'str'
},
'direction': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'src',
'dst',
'both'
],
'type': 'str'
},
'entry': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'list',
'options': {
'id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'ip-number': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'ip-range-number': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'port': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'protocol': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
}
}
},
'icon-id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'name': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'str'
},
'offset': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'reputation': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'sld-id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'extra-ip-range-number': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'ip-number': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'ip-range-number': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'jitter-threshold': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'latency-threshold': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'obsolete': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'packetloss-threshold': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'singularity': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'city': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'country': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'region': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_internetservice'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_partial_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
|
[
"baltah666@gmail.com"
] |
baltah666@gmail.com
|
ffb89cb376990b6702c368100b3ab8fba60bca2c
|
9ca8613fd2d1e9fed477d2fa18797cb97924b4ba
|
/DynamicQ/views.py
|
e0cf93729513fcca7ecf31aaf182e6ce151e05fa
|
[] |
no_license
|
Ajinkya237/Dynamic-Questionnaire-Pre-Final-
|
a3c9d992d43fc8541b3061678f1f0c2aea8108a8
|
1d898f591aeb4e743f8a93043fbb25dc6643289a
|
refs/heads/master
| 2020-12-24T18:23:15.364371
| 2016-05-14T05:47:18
| 2016-05-14T05:47:18
| 58,790,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
from django.http import Http404
from django.shortcuts import render
from .models import Question, Choice
def index(request):
all_questions = Question.objects.all()
return render(request, 'DynamicQ/index.html', {'all_questions': all_questions})
def detail(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("Details to the choice does not exist")
return render(request, 'DynamicQ/detail.html', {'question': question})
|
[
"jagtap.as95@gmail.com"
] |
jagtap.as95@gmail.com
|
90b2a1a401f8aedf406f67a0ddf8c1a9d13df150
|
9c3a3a382378f60960efa1e425e531b1111acf82
|
/desafios/desafio 056.py
|
707740690dce4a501acc8194c5bd8a99693c28a0
|
[
"MIT"
] |
permissive
|
juaoantonio/curso_video_python
|
ae68858be07e7303a7c3d4f4bbd622e9ef0cbbd5
|
7520223d8647929530a1cd96f7c7d8c8f264ba1e
|
refs/heads/main
| 2023-08-23T08:00:16.741277
| 2021-11-07T20:50:23
| 2021-11-07T20:50:23
| 425,601,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
# Lendo o nome, idade e sexo de 4 pessoas:
idades = []
homem_mais_velho = 0
mulheres_menores_de_idade = 0
maior_idade = 0
for c in range(1, 5):
print(f' {c}ª PESSOA '.center(50, '='))
nome = str(input('Nome: ')).strip().title()
idade = int(input('Idade: '))
idades.append(idade)
sexo = str(input('Sexo [M/F]: ')).lower().strip()
if sexo == 'm':
if idade > maior_idade:
maior_idade = idade
homem_mais_velho = nome
elif sexo == 'f':
if idade < 20:
mulheres_menores_de_idade += 1
media_idades = sum(idades) / len(idades)
print(f'A média de idade do grupo é: {media_idades}')
if homem_mais_velho != 0:
print(f'O nome do homem mais velho é {homem_mais_velho}')
else:
print('Não foi encontrado nenhum homem')
print(f'E foi identificado um total de {mulheres_menores_de_idade} mulheres com menos de 20 anos')
|
[
"joaobarbosadev@gmail.com"
] |
joaobarbosadev@gmail.com
|
8e025573ea240fa84c85305b619a3efdc76b4de9
|
70ca39484feec6dd0a6a707966d546b0da40bbaf
|
/test/mysite/poe/views.py
|
bd1ecded2c8892bacc9a851519724ff44ee008ab
|
[] |
no_license
|
Lapeus/django
|
f184dd1fafd464df85e4d83b66b9ba9e6c21630c
|
ff4875a6a432052dbe4620162d01e9634713e398
|
refs/heads/master
| 2021-06-29T19:13:38.997166
| 2017-09-17T10:57:42
| 2017-09-17T10:57:42
| 103,821,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
import requests
# Create your views here.
def index(request):
r = requests.get('http://www.pathofexile.com/api/public-stash-tabs?id=6715219-6940400-6501061-7710885-6962087')
rJson = r.json()
change_id = rJson['next_change_id']
# get the latest stashes
count_changeIds = 1
for i in range(100):
r = requests.get('http://www.pathofexile.com/api/public-stash-tabs?id=' + change_id)
change_id = r.json()['next_change_id']
count_changeIds += 1
# stashes = rJson['stashes']
return HttpResponse(change_id)
|
[
"wolfma@uni-hildesheim.de"
] |
wolfma@uni-hildesheim.de
|
9251b2fe1bd84f36119cadde501ff41ed6d9f7c7
|
f6f632bee57875e76e1a2aa713fdbe9f25e18d66
|
/python/_0001_0500/0271_encode-and-decode-strings.py
|
bf50c9d80c7636e4d7816346540340a037e7719b
|
[] |
no_license
|
Wang-Yann/LeetCodeMe
|
b50ee60beeeb3661869bb948bef4fbe21fc6d904
|
44765a7d89423b7ec2c159f70b1a6f6e446523c2
|
refs/heads/master
| 2023-08-07T05:31:23.428240
| 2021-09-30T15:33:53
| 2021-09-30T15:33:53
| 253,497,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-07-22 21:56:24
# @Last Modified : 2020-07-22 21:56:24
# @Mail : lostlorder@gmail.com
# @Version : 1.0.0
"""
# 请你设计一个算法,可以将一个 字符串列表 编码成为一个 字符串。这个编码后的字符串是可以通过网络进行高效传送的,并且可以在接收端被解码回原来的字符串列表。
#
#
# 1 号机(发送方)有如下函数:
#
# string encode(vector<string> strs) {
# // ... your code
# return encoded_string;
# }
#
# 2 号机(接收方)有如下函数:
#
# vector<string> decode(string s) {
# //... your code
# return strs;
# }
#
#
# 1 号机(发送方)执行:
#
# string encoded_string = encode(strs);
#
#
# 2 号机(接收方)执行:
#
# vector<string> strs2 = decode(encoded_string);
#
#
# 此时,2 号机(接收方)的 strs2 需要和 1 号机(发送方)的 strs 相同。
#
# 请你来实现这个 encode 和 decode 方法。
#
# 注意:
#
#
# 因为字符串可能会包含 256 个合法 ascii 字符中的任何字符,所以您的算法必须要能够处理任何可能会出现的字符。
# 请勿使用 “类成员”、“全局变量” 或 “静态变量” 来存储这些状态,您的编码和解码算法应该是非状态依赖的。
# 请不要依赖任何方法库,例如 eval 又或者是 serialize 之类的方法。本题的宗旨是需要您自己实现 “编码” 和 “解码” 算法。
#
# Related Topics 字符串
# 👍 25 👎 0
"""
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Codec:
def encode(self, strs: [str]) -> str:
"""Encodes a list of strings to a single string.
"""
if not strs:
return chr(258)
return chr(257).join(strs)
def decode(self, s: str) -> [str]:
"""Decodes a single string to a list of strings.
"""
if s == chr(258):
return []
return s.split(chr(257))
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs))
# leetcode submit region end(Prohibit modification and deletion)
def test_solution():
coder = Codec()
s = ["abc"] * 10 + ["e", "fff", "1"]
assert coder.decode(coder.encode(s)) == s
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=tee-sys", __file__])
|
[
"wzy-511@163.com"
] |
wzy-511@163.com
|
b2857831cceb7e8ab2c811718a838e2640b4dc1f
|
9ce03a9d96213f6cb043d2d79a59ad50d37ac7d8
|
/venv/bin/python-config
|
df34b14cacfb2de2aa42e33e54b3a054ecf91e82
|
[] |
no_license
|
tawfung/first_Django_project
|
89d412d233c9fd4af038b50160d74f606c5c2bc4
|
f91142c9947b15e9b96776be3c81b75adabb09ac
|
refs/heads/master
| 2021-09-01T05:52:14.454803
| 2017-12-25T06:31:43
| 2017-12-25T06:31:43
| 113,393,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,375
|
#!/home/enclaveit/PycharmProjects/first_Django_project/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"tungvanle@gmail.com"
] |
tungvanle@gmail.com
|
|
f31b3de560e8ca53ca60e44c0619f24741dcbb04
|
f92269f32d7b1df48f7fce606c941315016b81d8
|
/pyredispg/redis_wrapper.py
|
9935da7b8b71389fde073790a3aa9c82933ab7a9
|
[] |
no_license
|
chimpler/pyredispg
|
28d4418eb6e7f50a74be1ef83bfe690d962ab655
|
06d19c4dd3d562997359cbe8fff3bf15078113b6
|
refs/heads/master
| 2023-09-01T01:02:48.324641
| 2017-06-08T04:31:13
| 2017-06-08T04:31:13
| 93,357,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,000
|
py
|
import json
import os
import sys
import time
from pyredispg.exceptions import RedisException
from pyredispg.postgres_dao import KeyValue
class RedisWrapper(object):
COMMAND_FILE = 'command.json'
def __init__(self, dao, redis_info):
with open(os.path.join(sys.path[0], self.COMMAND_FILE)) as fd:
self._command = json.loads(fd.read())
self._redis_info = redis_info
self._dao = dao
self._db = 0
def cluster(self, command, *args):
return '-ERR This instance has cluster support disabled'
def command(self):
return self._command
def delete(self, key):
return self._dao.delete(self._db, key)
def echo(self, value):
return value
def exists(self, key):
return 1 if self._dao.exists(self._db, key) else 0
def flushall(self):
self._dao.delete_all_dbs()
return '+OK'
def flushdb(self):
self._dao.delete_db(self._db)
return '+OK'
def get(self, key):
return self._dao.get(self._db, key)
def keys(self, pattern):
return self._dao.get_keys(self._db, pattern)
def dbsize(self):
return self._dao.dbsize()
def hexists(self, key, hkey):
# convert boolean to 0 or 1
return int(self._dao.hexists(self._db, key, hkey))
def hdel(self, key, hkey):
# convert boolean to 0 or 1
return int(self._dao.hdel(self._db, key, hkey))
def hget(self, key, hkey):
return self._dao.hget(self._db, key, hkey)
def hmget(self, key, *hkeys):
return self._dao.hmget(self._db, key, hkeys)
def hset(self, key, hkey, value):
return self._dao.hset(self._db, key, hkey, value)
def hmset(self, key, *hkey_values):
key_vals = [KeyValue(hkey_values[i * 2], hkey_values[i * 2 + 1]) for i in range(len(hkey_values) / 2)]
return self._dao.hmset(self._db, key, key_vals)
def hlen(self, key, hkey):
return self._dao.hlen(self._db, key, hkey)
def hgetall(self, key):
return [e for kv in self._dao.hgetall(self._db, key) for e in kv]
def hkeys(self, key):
return self._dao.hkeys(self._db, key)
def hvals(self, key):
return self._dao.hvals(self._db, key)
def hlen(self, key):
return self._dao.hlen(self._db, key)
def info(self):
return '\n\n'.join([
'# {title}\n{section}'.format(
title=title,
section='\n'.join(
'{k}:{v}'.format(k=k, v=v) for k, v in kvdict.items()
)
) for title, kvdict in self._redis_info.get_info().items()
]) + '\n'
def mget(self, *keys):
return self._dao.mget(self._db, keys)
def persist(self, key):
return self._dao.persist(self._db, key)
def ping(self, value='PONG'):
return value
def sadd(self, key, *values):
return self._dao.sadd(self._db, key, values)
def set(self, key, value, ex=None, mx=None, overwrite=True):
self._dao.set(self._db, key, value, ex, mx, overwrite)
return '+OK'
def scard(self, key):
return self._dao.scard(self._db, key)
def select(self, db):
def check_db():
try:
n = int(db)
if 0 <= n and n <= 15:
return n
else:
return None
except ValueError:
return None
n = check_db(db)
if n is None:
raise RedisException('invalid DB index')
else:
self._db = n
return '+OK'
def smembers(self, key):
return self._dao.smembers(self._db, key)
def time(self):
return self._redis_info.get_time()
def type(self, key):
t = self._dao.type_str(self._db, key)
return '+' + (t if t else 'none')
def unlink(self, key):
# Supposedly running the actual removal asynchronously
return self.delete(key)
|
[
"francois.dangngoc@gmail.com"
] |
francois.dangngoc@gmail.com
|
98b3bd1c4e143064d2663f9c23197d9aec982459
|
95b00f80e2ba9628652472ae315af7aca0febd41
|
/lab3b/InodesInfo.py
|
c075cf09391db7b1253c2cf37d99e07b7341c578
|
[] |
no_license
|
redhairdragon/OS_Project
|
ece96d893f220274a01483663140a5bf0370ac3f
|
ded4b1b1becf61e2e232c135e7b845a2c8978450
|
refs/heads/master
| 2020-04-27T05:24:27.846857
| 2019-03-06T05:49:57
| 2019-03-06T05:49:57
| 174,080,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
from SuperBlock import *
from Inode import *
class InodesInfo():
def __init__(self, report_list):
self.superblock=SuperBlock(report_list)
self.free_inodes_bitmap=[False]*self.superblock.number_inodes
self.inodes=[]
for entry in report_list:
if entry[0]=='IFREE':
self.free_inodes_bitmap[int(entry[1])-1]=True
if entry[0]=='INODE':
self.inodes.append(Inode(entry))
def is_free(self,inode_num):
if(self.in_range(inode_num)):
return self.free_inodes_bitmap[inode_num-1]
return False;
def in_range(self,inode_num):
if inode_num>=1 and inode_num<=self.superblock.number_inodes:
return True
else:
return False
def is_unallocated(self,inode_num):
allocated_nodes=[]
for inode in self.inodes:
if inode.file_type!='0':
allocated_nodes.append(inode.inode_num)
return inode_num not in allocated_nodes
def check_inodes_allocation(self):
allocated_nodes=[]
for inode in self.inodes:
if self.is_free(inode.inode_num)==True:
print("ALLOCATED INODE "+str(inode.inode_num)+" ON FREELIST")
else:
if inode.file_type=='0':
print("UNALLOCATED INODE "+str(inode.inode_num)+" NOT ON FREELIST")
else: allocated_nodes.append(inode.inode_num)
for i in range(self.superblock.first_non_reserved_inode,len(self.free_inodes_bitmap)):
bit=self.free_inodes_bitmap[i]
if bit == False and i+1 not in allocated_nodes:
print("UNALLOCATED INODE "+str(i+1)+" NOT ON FREELIST")
|
[
"shen_teng@engineering.ucla.edu"
] |
shen_teng@engineering.ucla.edu
|
d01723a024e12afc14be2a71f76f349b556d394c
|
9c3852b49ecda13bcc9a28ec7e48d5077b5a3ddc
|
/specialMath.py
|
e0cc663c8a130e5fdbc3836da7dd66ea4cd95cf9
|
[] |
no_license
|
netskink/evidint
|
4b22bae286978e31d3a1e0e5c0af106345b349ad
|
4b6ad67346aab0e3de70473dbde8a800bb3ca7d0
|
refs/heads/master
| 2016-09-06T19:26:46.688141
| 2015-06-17T21:07:28
| 2015-06-17T21:07:28
| 37,620,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
#!/sw/bin/python2.7
import sys
# the original recursive code
def specialMath0(n):
if(n==0):
return 0
elif(n==1):
return 1
return n + specialMath(n-1) + specialMath(n-2)
def specialMath(n):
# zero and one are special cases.
if(n==0):
return 0
elif(n==1):
return 1
# anything above zero is a recurrance. This has an an equivalence function of
# f(n) = (Fn + Ln)/2 which is nth Fibonanci sequence number and nth Lucas number.
# However, it would be simpler to just calculate the result instead of implmenting
# those routines for the purposes of a test. See below:
# https://www.wolframalpha.com/input/?i=f(n)%3Df(n-1)%2Bf(n-2)%2C+f(1)%3D1%2C+f(2)%3D2&lk=3
# and
# http://math.stackexchange.com/questions/536350/how-to-solve-recurrence-relation-fn-fn-1-2n-1-when-f1-1
F = range(n)
F[0] = 0
F[1] = 1
the_len = len(F)
for i in xrange(2,the_len):
F[i] = i + F[i-1] + F[i-2]
# do it one more time since the "array" will be from 0-n-1
return (n + F[n-1] + F[n-2])
print specialMath(int(sys.argv[1]))
#print specialMath0(17)
#print specialMath(17)
|
[
"davisjf@gmail.com"
] |
davisjf@gmail.com
|
dab3f0ab7a14720b772e04a7ed139ce003b8b5b2
|
b9cd129f18e2cbe3c4315e19018e02b60a641a3a
|
/16-Take comma seperated city names and print in sorted order.py
|
b7908b0e61028ad8cd6f75cc0274576cf1b4ff7d
|
[] |
no_license
|
VaibhaV0212/Sample-Programs
|
d61b294f3b40e05b2d24511eb5cc1a4f0c6a66ba
|
97f56a1b372c639ebd4fa5cfba2f770abd10a709
|
refs/heads/master
| 2022-11-14T23:16:21.762847
| 2020-07-02T04:44:10
| 2020-07-02T04:44:10
| 276,549,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
c = input('Enter City name wih comma seperated : ')
a = list(c.split(','))
print(sorted(a))
|
[
"noreply@github.com"
] |
VaibhaV0212.noreply@github.com
|
d96be9168efb4db78bb89d670d2b8578cb148ebc
|
bae992f3ac2c24d0a9741bd93ab4a6a7168fd51c
|
/pyprocessBarTest.py
|
46f9f8dbfbaee536f54899139af31e0e1f1780ee
|
[] |
no_license
|
duanchun/NordicMutilDownload
|
a01b00428ad3c23f4b9c5f537f55734a9d1740b6
|
9bb2ff39809dc0ecd32c3b87ee38c0be060702ff
|
refs/heads/master
| 2023-08-02T03:10:23.183631
| 2021-09-27T02:26:18
| 2021-09-27T02:26:18
| 410,434,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
import wx
import wx.lib.agw.pygauge as PG
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "PyGauge Demo")
panel = wx.Panel(self)
gauge1 = PG.PyGauge(panel, -1, size=(100, 25), style=wx.GA_HORIZONTAL)
gauge1.SetValue(0)
gauge1.SetBackgroundColour(wx.WHITE)
gauge1.SetBorderColor(wx.BLACK)
gauge1.Update(80, 2000)
gauge2 = PG.PyGauge(panel, -1, size=(100, 25), style=wx.GA_HORIZONTAL)
gauge2.SetValue([20, 80])
gauge2.SetBarColor([wx.RED, wx.GREEN])
gauge2.SetBackgroundColour(wx.WHITE)
gauge2.SetBorderColor(wx.BLACK)
gauge2.SetBorderPadding(2)
gauge2.Update([50, 20], 2000)
gauge3 = PG.PyGauge(panel, -1, size=(100, 25), style=wx.GA_HORIZONTAL)
gauge3.SetValue(50)
gauge3.SetBarColor(wx.GREEN)
gauge3.SetBackgroundColour(wx.WHITE)
gauge3.SetBorderColor(wx.BLACK)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(gauge1, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 20)
sizer.Add(gauge2, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 20)
sizer.Add(gauge3, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 20)
panel.SetSizer(sizer)
sizer.Layout()
# our normal wxApp-derived class, as usual
if __name__ == '__main__':
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
|
[
"xiaojian.tian@tuya.com"
] |
xiaojian.tian@tuya.com
|
3c32bd573510940440c654cb092bdc06a4d9c89b
|
dc3e24ee5bff9b1357175ff199453ad54efbdb86
|
/content/plugins/4k-vc-icon-shortcode/inc/util-generate-icon-list.py
|
4e10526c2c67fccea1b1231abf0520644731b7df
|
[] |
no_license
|
jruck/wp-boiler
|
298f8d12c0cddad214391ff8bf51f9ef7d34d100
|
4a726d6f8d957a84b70c59fe0ec33edff8501dd6
|
refs/heads/master
| 2021-01-16T22:22:15.137046
| 2014-06-18T09:06:32
| 2014-06-18T09:06:32
| 14,827,185
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 966
|
py
|
#!/usr/bin/python
import sys
# Uncomment if this will be used
sys.exit()
cssdir = 'icons/css/'
from os import walk
# Get all font CSS files
fontCSSFiles = []
for (dirpath, dirnames, filenames) in walk(cssdir):
fontCSSFiles.extend(filenames)
break
# Get all the font class names
fontClasses = []
for file in fontCSSFiles:
for line in open(cssdir + file,'r').readlines():
if not line.__contains__(':before'):
continue
if line[0] != '.':
continue
fontClasses.append(line[1:line.index(':before')])
# Print as a PHP array
print "array("
for fontClass in fontClasses:
print "\t\"" + fontClass + "\","
print ");"
# Print as js array
# sys.stdout.write("[")
# for fontClass in fontClasses:
# sys.stdout.write('"' + fontClass + '",')
# sys.stdout.write("]")
# Form CSS class list
# for fontClass in fontClasses:
# if fontClass.find('ls-') == 0:
# sys.stdout.write(' .' + fontClass + ',')
|
[
"justinruckman@gmail.com"
] |
justinruckman@gmail.com
|
875960fc03310282775e2805f07e80fa525a74ff
|
4ffc58eb4491a2f7826b2dc81d8f41690c08edf1
|
/exts/levelsystem.py
|
2940b764ec957b1e52a480669e4adb04c0a6031d
|
[
"MIT"
] |
permissive
|
AshutoshRajSingh/Zeta
|
4005b5642f6c9ab5d7f8b71e0f37eb81c5759419
|
aa93513c110ee8da24487911eeac9728c4fa9e6e
|
refs/heads/main
| 2023-08-11T01:45:10.130251
| 2021-09-16T07:20:03
| 2021-09-16T07:20:03
| 333,671,525
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,911
|
py
|
import asyncio
import discord
import datetime
from main import Zeta
from typing import Union
from math import floor, sqrt
from discord.ext import commands, tasks
def is_me(ctx):
return ctx.author.id == 501451372147769355
QUERY_INTERVAL_MINUTES = 10
class LevelSystem(commands.Cog, name="Levelling"):
"""
Commands related to levelling, as you send messages, you receive exp points which translate to different levels.
"""
def __init__(self, bot: Zeta):
super().__init__()
self.bot = bot
# Look up implementation inside the add_to_cache function docstring
self._cache = {}
asyncio.get_event_loop().create_task(self.load_cache())
# Start the loop that dumps cache to database every 10 minutes
self.update_level_db.start()
async def cog_check(self, ctx: commands.Context):
try:
return self.bot.guild_prefs[ctx.guild.id]['levelling']
except KeyError:
return False
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
if ctx.guild.id not in self.bot.guild_prefs:
cg = self.bot.get_cog('Configuration')
await cg.create_default_guild_prefs(ctx.guild.id)
if not self.bot.guild_prefs[ctx.guild.id].get('levelling'):
await ctx.send("The `levelling` plugin has been disabled on this server therefore related commands will not work\n"
"Hint: Server admins can enable it using the `plugin enable` command, use the help command to learn more.")
async def load_cache(self):
for guild in self.bot.guilds:
self._cache[guild.id] = {}
async def give_exp(self, guild_id: int, member_id: int, amount=None) -> None:
"""
Function to give exp to a particular member
Parameters:
:param guild_id: The id of the guild in question
:param member_id: The id of the member in the guild
:param amount: The amount of exp to give, default to None in which case the default level up exp is awarded
:return: None
"""
if member_id not in self._cache[guild_id]:
await self.add_to_cache(guild_id, member_id)
if not amount:
amount = 5 * self._cache[guild_id][member_id]['boost']
self._cache[guild_id][member_id]['exp'] += amount
async def add_to_cache(self, guild_id: int, member_id: int) -> Union[dict, None]:
"""
Function that adds a member to the cache
Cache: \n
type = dict \n
format: \n
{
guild_id : {
member_id : {
'id' : "the id of the member",\n
'level' : "the level of the member",\n
'exp' : "the exp of the member", \n
'boost' : "the boost multiplier", \n
}
}
}
:param guild_id: the id of the guild
:param member_id: the id of the member belonging to that guild
:return: data(dict) - The member that was just put inside db, same format as cache.
"""
if guild_id in self._cache and member_id in self._cache[guild_id]:
pass
if type(guild_id) is not int:
raise TypeError("guild id must be int")
data = await self.bot.db.fetch_member(guild_id, member_id)
if data:
self._cache[guild_id][member_id] = {
'id': data.get('id'),
'level': data.get('level'),
'exp': data.get('exp'),
'boost': data.get('boost'),
}
else:
await self.add_to_db(guild_id, member_id)
await self.add_to_cache(guild_id, member_id) # important
return data
async def add_to_db(self, guild_id: int, member_id: int) -> None:
"""
A function that adds a new entry to the database with default values
(and not dump existing cache into database)
:param guild_id: The relevant guild
:param member_id: The id of the member
:return: None
"""
await self.bot.db.make_member_entry(guild_id, member_id)
async def dump_single_guild(self, guildid: int):
"""
Function that dumps all entries from a single guild in the cache to the database.
:param guildid: the id of the guild whose cache entry needs to be dumped
:return: None
"""
data = self._cache[guildid]
for memberid in list(data):
current = data[memberid]
query = f"UPDATE server_members " \
f"SET level = $1, " \
f"exp = $2, " \
f"boost = $3" \
f"WHERE memberid = $4 AND guildid = $5"
await self.bot.pool.execute(query,
current['level'],
current['exp'],
current['boost'],
memberid, guildid)
async def fetch_top_n(self, guild: discord.Guild, limit: int):
"""
Function to fetch top n members of a guild based off exp, works by initially dumping the guild into the database
then using an sql query to fetch the top n members
:param guild: the guild in question
:param limit: the number of members to fetch
:return: None
"""
if guild.id in self._cache:
await self.dump_single_guild(guild.id)
async with self.bot.pool.acquire() as conn:
async with conn.transaction():
top10 = []
rank = 1
async for entry in conn.cursor("SELECT memberid, exp, level FROM server_members WHERE guildid = $1 ORDER BY exp DESC LIMIT $2", guild.id, limit):
top10 += [{'rank': rank, 'id': entry.get('memberid'), 'exp': entry.get('exp'), 'level': entry.get('level')}]
rank += 1
return top10
@tasks.loop(minutes=QUERY_INTERVAL_MINUTES)
async def update_level_db(self):
"""
Loop that dumps the cache into db every 10 minutes
:return: None
"""
for guildId in self._cache:
await self.dump_single_guild(guildId)
self._cache[guildId] = {}
print(f"Level system database updated at {datetime.datetime.utcnow()}")
@update_level_db.before_loop
async def preloop(self) -> None:
"""
using this neat little feature in the library you can make sure the cache is ready before the loop starts
"""
await self.bot.wait_until_ready()
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
"""
The listener that takes care of awarding exp, levelling up the members
:param message: the discord.Message object
:return: None
"""
try:
if self.bot.guild_prefs[message.guild.id] is None:
await self.bot.get_cog('Configuration').create_default_guild_prefs(message.guild.id)
return
elif not self.bot.guild_prefs[message.guild.id].get('levelling'):
return
except KeyError:
await self.bot.get_cog('Configuration').create_default_guild_prefs(message.guild.id)
return
# Bots shouldn't be levelling up
if not message.author.bot:
# This bit awards exp points
if message.guild.id not in self._cache:
self._cache[message.guild.id] = {}
await self.add_to_cache(message.guild.id, message.author.id)
if message.author.id not in self._cache[message.guild.id]:
await self.add_to_cache(message.guild.id, message.author.id)
await self.give_exp(message.guild.id, message.author.id)
# This bit checks if level up happened
OldLevel = self._cache[message.guild.id][message.author.id]['level']
NewLevel = floor((25 + sqrt(625 + 100 * self._cache[message.guild.id][message.author.id]['exp'])) / 50)
if NewLevel > OldLevel:
self._cache[message.guild.id][message.author.id]['level'] = NewLevel
embed = discord.Embed(title=f"{message.author}",
description=f"GZ on level {NewLevel}, {message.author.mention}",
color=discord.Colour.green())
await message.channel.send(embed=embed)
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild):
self._cache[guild.id] = {}
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member):
self._cache[member.guild.id].pop(member.id)
await self.bot.db.hakai_member(member.guild.id, member.id)
@commands.command()
async def level(self, ctx: commands.Context, target: discord.Member = None):
"""
Used to show own or someone else's level
`target` here is the member whose level you wish to know (can be mention, id or username), if no target specified, own level is shown.
"""
if not target:
target = ctx.author
if ctx.guild.id in self._cache and target.id in self._cache[ctx.guild.id]:
data = self._cache[ctx.guild.id][target.id]
else:
data = await self.add_to_cache(ctx.guild.id, target.id)
if not data:
await ctx.send(f"{target} hasn't been ranked yet! tell them to send some messages to start.")
return
embed = discord.Embed(title=f"{target}",
description=f"You are currently on level : {data['level']}\n"
f"With exp : {data['exp']}",
colour=discord.Colour.blue())
await ctx.send(embed=embed)
@commands.command()
async def lb(self, ctx):
"""
Shows the top 10 server members based off their exp.
"""
data = await self.fetch_top_n(ctx.guild, limit=10)
embed = discord.Embed(title="Server leaderboard",
colour=discord.Colour.green())
for entry in data:
m = ctx.guild.get_member(entry.get('id'))
if m is None:
display_name = f"Deleted user (id:{entry.get('id')})"
embed.set_footer(text="Hint: mods can use the reset command to get rid of the \"Deleted user\" in the leaderboard if they have left the server")
else:
display_name = m.display_name
embed.add_field(name=f"{entry.get('rank')}.{display_name}",
value=f"Level: {entry.get('level')} Exp: {entry.get('exp')}",
inline=False)
await ctx.send(embed=embed)
@commands.command()
@commands.has_guild_permissions(manage_messages=True)
async def setmultiplier(self, ctx: commands.Context, target: discord.Member, multiplier: int):
"""
Used to set exp multiplier of a member
Note that you need to have the server permisson "Manage messages" in order to use this command
`target` here is the member whose multiplier you wish to set, can be mention, id or username
`multiplier` here is the exp multiplier you want to set, a value of 2 will indicate twice as fast levelling
"""
if target.id not in self._cache[ctx.guild.id]:
await self.add_to_cache(ctx.guild.id, target.id)
self._cache[ctx.guild.id][target.id]['boost'] = int(multiplier)
await ctx.send(f"{target}'s multiplier has been set to {multiplier}")
@commands.command()
@commands.has_guild_permissions(manage_messages=True)
async def giveexp(self, ctx: commands.Context, target: discord.Member, amount: int):
"""
Used to award a certain amount of exp to a member
Note that you need to have the server permission "manage_messages" to use this command
`target` here is the member who you wish to give exp points to
`amount` is the number of exp points you wish to award that member
"""
await self.give_exp(ctx.guild.id, target.id, amount=int(amount))
e = discord.Embed(title="Success",
description=f"Added {amount} points to {target.mention}",
colour=discord.Colour.green())
await ctx.send(embed=e)
@commands.command()
@commands.has_guild_permissions(manage_messages=True)
async def reset(self, ctx: commands.Context, target: Union[discord.Member, int]):
"""
Resets a member's exp/level and basically removes them from the database.
This is useful in the event that a member leaves the guild when the bot is offline therefore it is not able to automatically delete their entry so it shows up as `deleted user` on the leaderboard
`target` here is the member you'd like to reset, can be id or mention
"""
if type(target) is int:
pass
else:
target = target.id
try:
self._cache[ctx.guild.id].pop(target)
except KeyError:
pass
finally:
await self.bot.db.hakai_member(ctx.guild.id, target)
@commands.command(hidden=True)
@commands.check(is_me)
async def update_db(self, ctx):
"""
Command to update the database manually, mostly used for testing purposes, or when planning to take bot down
for maintenance
"""
await self.update_level_db()
await ctx.send("db updated (hopefully)")
def setup(bot: Zeta):
bot.add_cog(LevelSystem(bot))
|
[
"ashutosh200210@outlook.com"
] |
ashutosh200210@outlook.com
|
46bb8b716321dba5c04ec4b2425eb153d1d83097
|
dc3eb0fd04a2c7cfa0c797d169c7ff17f77e232a
|
/tools/select_seqs_by_IDs.py
|
f687561ef34de3c0243468606c5b51d220f81e8d
|
[
"MIT"
] |
permissive
|
alexmsalmeida/virsearch
|
a7d3fda656040e8ef8109b6f9683ad2f6f7ff3b3
|
9807e6a4823170f29bb4e73f964c7f040bc85fa3
|
refs/heads/main
| 2023-08-14T17:13:58.265707
| 2021-10-05T21:37:31
| 2021-10-05T21:37:31
| 368,523,743
| 11
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
#!/usr/bin/env python
"""takes a list of record.ids and returns to you the sequences
from a fasta list that are part of the list"""
from Bio import SeqIO
from optparse import OptionParser
import sys
def test_file(option, opt_str, value, parser):
try:
with open(value): setattr(parser.values, option.dest, value)
except IOError:
print('%s file cannot be opened' % option)
sys.exit()
def main(in_fasta, ids, out_fasta):
infile = open(in_fasta, "U")
data = open(ids, "U").read().splitlines()
output_handle = open(out_fasta, "w")
seqrecords=[ ]
for record in SeqIO.parse(infile, "fasta"):
if record.id in data:
seqrecords.append(record)
SeqIO.write(seqrecords, output_handle, "fasta")
infile.close()
output_handle.close()
if __name__ == "__main__":
usage="usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input_fasta", dest="in_fasta",
help="/path/to/input fasta [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-d", "--headers", dest="ids",
help="/path/to/id file [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-o", "--output_fasta", dest="out_fasta",
help="/path/to/output fasta [REQUIRED]",
action="store", type="string")
options, args = parser.parse_args()
mandatories = ["in_fasta", "ids", "out_fasta"]
for m in mandatories:
if not options.__dict__[m]:
print("\nMust provide %s.\n" %m)
parser.print_help()
exit(-1)
main(options.in_fasta, options.ids, options.out_fasta)
|
[
"aalmeida@noah-login-02.ebi.ac.uk"
] |
aalmeida@noah-login-02.ebi.ac.uk
|
3655a0f79f14c18fd00f21d93f064a434f6d7338
|
bc1edfef479c8e3f652a30478c4645f1cee7b729
|
/modules/memory.py
|
c6ec04e2f6aa852031d5c71e73ea5814b45ebd00
|
[] |
no_license
|
maotianni/pj_2020_experiment_1
|
aa568678945a6a6efe30cde7aae321ebdc918763
|
86ea9583a5bcbfe72ac68efad295d9a72e255122
|
refs/heads/main
| 2023-02-10T10:32:29.287542
| 2021-01-06T04:14:33
| 2021-01-06T04:14:33
| 312,764,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,774
|
py
|
import torch
from torch import nn
from collections import defaultdict
class Memory(nn.Module):
def __init__(self, n_nodes, memory_dimension, input_dimension, message_dimension=None,
device="cpu", combination_method='sum'):
super(Memory, self).__init__()
self.n_nodes = n_nodes
#self.num_relations = num_relations
self.memory_dimension = memory_dimension
self.input_dimension = input_dimension
self.message_dimension = message_dimension
self.device = device
self.combination_method = combination_method
self.__init_memory__()
def __init_memory__(self):
"""
Initializes the memory to all zeros. It should be called at the start of each epoch.
"""
# Treat memory as parameter so that it is saved and loaded together with the model
self.memory = nn.Parameter(torch.zeros((self.n_nodes, self.memory_dimension)).to(self.device),
requires_grad=False)
self.last_update = nn.Parameter(torch.zeros(self.n_nodes).to(self.device),
requires_grad=False)
self.messages = defaultdict(list)
def store_raw_messages(self, nodes, node_id_to_messages):
# 节点有新消息,则加入
for node in nodes:
self.messages[node].extend(node_id_to_messages[node])
# 获取记忆
def get_memory(self, node_idxs):
return self.memory[node_idxs, :]
# 更新记忆
def set_memory(self, node_idxs, values):
self.memory[node_idxs, :] = values
# 获取记忆上次更新时刻,即上次交互时刻
def get_last_update(self, node_idxs):
return self.last_update[node_idxs]
def backup_memory(self):
messages_clone = {}
for k, v in self.messages.items():
messages_clone[k] = [(x[0].clone(), x[1].clone()) for x in v]
return self.memory.data.clone(), self.last_update.data.clone(), messages_clone
def restore_memory(self, memory_backup):
self.memory.data, self.last_update.data = memory_backup[0].clone(), memory_backup[1].clone()
self.messages = defaultdict(list)
for k, v in memory_backup[2].items():
self.messages[k] = [(x[0].clone(), x[1].clone()) for x in v]
def detach_memory(self):
self.memory.detach_()
# Detach all stored messages
for k, v in self.messages.items():
new_node_messages = []
for message in v:
new_node_messages.append((message[0].detach(), message[1]))
self.messages[k] = new_node_messages
def clear_messages(self, nodes):
for node in nodes:
self.messages[node] = []
|
[
"noreply@github.com"
] |
maotianni.noreply@github.com
|
6658aeb098ba128b738ec8f718872be030a2af32
|
98e6c91a853bc01fcc7036027f3df852d2fa9c98
|
/main.py
|
82915eb2c8ba591945aa118f37a4c2eb2dc0f907
|
[] |
no_license
|
ToyinY/HackMIT2019
|
fb7bee25a79290899dc0ea6fe4ff962ee71e7338
|
321cb56da97b8135d456955230764c648de7bf3b
|
refs/heads/master
| 2020-07-26T04:42:50.343210
| 2019-09-15T03:07:19
| 2019-09-15T03:07:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,927
|
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python37_render_template]
import datetime
import pymysql
from sqlalchemy import create_engine
# from werkzeug.security import check_password_hash, generate_password_hash
from flask import (Flask, render_template, sessions, flash, request, redirect, url_for)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'dev'
# Connect to the database
unix_socket = '/cloudsql/{}'.format('hackmit2019-252916:us-central1:hackmit2019')
connection = pymysql.connect(user='root',
password='root',
db='hackmit',
unix_socket=unix_socket,
charset='utf8mb4',
#cursorclass=pymysql.cursors.DictCursor
)
@app.route('/signup', methods=('GET', 'POST'))
def register():
"""Register a new user.
Validates that the username is not already taken. Hashes the
password for security.
"""
if request.method == 'POST':
first = request.form['first']
last = request.form['last']
email = request.form['email']
pwd_not_hash = request.form['password']
error = None
if not first:
error = 'First name is required.'
elif not last:
error = 'Last name is required.'
elif not email:
error = 'Email is required.'
elif not pwd_not_hash:
error = 'Password is required.'
elif connection.cursor().execute("SELECT `id` FROM `user_table` WHERE `email`=%s", (email,)) == "0":
error = 'User {0} is already registered.'.format(email)
print(connection.cursor().execute("SELECT `id` FROM `user_table` WHERE `email`=%s", (email,)))
if error is None:
with connection.cursor() as cursor:
sql = "INSERT INTO `user_table` (`first_name`, `last_name`, `email`, `password`) VALUES (%s, %s, %s, %s)"
print('past sql')
cursor.execute(sql, (first, last, email, pwd_not_hash))
print('past execute')
connection.commit()
print("I did things!")
else:
print("error is not none")
print(error)
flash(error)
#return redirect(url_for('index.html'))
print('about to render something')
return render_template('signup.html')
@app.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
error = None
user = connection.cursor.execute(
'SELECT * FROM user WHERE email = ?', (email,)
)
if user == "0":
error = 'Incorrect email.'
elif user['password'] != password:
error = 'Incorrect password.'
if error is None:
# store the user id in a new session and return to the index
session.clear()
session['user_id'] = user['id']
return redirect(url_for('index'))
flash(error)
return render_template('login.html')
@app.route('/')
def root():
# For the sake of example, use static information to inflate the template.
# This will be replaced with real information in later steps.
dummy_times = [datetime.datetime(2018, 1, 1, 10, 0, 0),
datetime.datetime(2018, 1, 2, 10, 30, 0),
datetime.datetime(2018, 1, 3, 11, 0, 0),
]
return render_template('index.html', times=dummy_times)
@app.route('/homepage')
def home():
return render_template('homepage.html')
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
# app.secret_key = 'dev'
# app.config['SESSION_TYPE'] = 'filesystem'
# sess.init_app(app)
app.run(host='127.0.0.1', port=8080, debug=True)
# [START gae_python37_render_template]
|
[
"43282748+kmerrill18@users.noreply.github.com"
] |
43282748+kmerrill18@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.