blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11faeae5f596c6f2c9fb11540a28c64304960907 | 542ef68721859a6b60558b074f2b3a9b6a9535a6 | /NexusStoreBackend/userProfile/migrations/0011_alter_address_user.py | 1d755ab38f3d2c89b3cd8b2bbc748e6991d4df92 | [] | no_license | Raman9937/NexusStore | e0ae96cf0d9e1130eaa3c560808e467ba28dbb01 | ceed4324995dde7623774548e6fad968f93a2ff5 | refs/heads/main | 2023-06-01T04:49:00.832649 | 2021-06-19T04:14:50 | 2021-06-19T04:14:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | # Generated by Django 3.2.4 on 2021-06-12 20:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('userProfile', '0010_auto_20210613_0125'),
]
operations = [
migrations.AlterField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
),
]
| [
"79391583+RitikVashisth007@users.noreply.github.com"
] | 79391583+RitikVashisth007@users.noreply.github.com |
bd6759f3f1eccc5507bd0d1fdb5dd71df14f6cc9 | e1df12efffc353132b9996899f92d2e9186ded51 | /src/util.py | 12c542992c489e1bf186c4d00d225dd0b90cafeb | [
"MIT"
] | permissive | fracogno/UNet-ResWDCGAN | f67583a43a6b9e1c39ecf73b94360db9a54c1f4b | 000a6e82111ccffe6189ae270c2465b168918d9a | refs/heads/master | 2020-07-16T09:03:52.128729 | 2020-02-03T20:39:50 | 2020-02-03T20:39:50 | 205,760,133 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | import numpy as np
import matplotlib.image as mpimg
import cv2
import pandas as pd
def sample_noise(size, mu=0., sigma=1.):
return np.random.normal(mu, sigma, size=size)
def getData(path, size, value="mel"):
DF = pd.read_pickle(path)
assert(len(DF["image"]) == len(DF["id"]))
X = []
for i in range(len(DF["image"])):
if DF["id"][i] == value:
tmp = cv2.resize(DF["image"][i], (int(size), int(size)), interpolation=cv2.INTER_CUBIC)
result = (tmp - 127.5) / 127.5
X.append(result)
return np.array(X, dtype=np.float32)
def saveImages(filename, images):
for i in range(len(images)):
mpimg.imsave(filename + "-" + str(i) + ".png", ( (images[i] * 127.5) + 127.5 ).astype(np.uint8) ) | [
"francesco.cognolato@hotmail.com"
] | francesco.cognolato@hotmail.com |
3f373e12c77e03b27c6309737dbc8c7e84b3db7d | 5d132708762372e770954532ad8e8f38f3596122 | /schoolsite/backs/backs/migrations/0013_auto_20210118_1350.py | 85a1fa3e48f501dd64da183c9376723eac589e6f | [] | no_license | ether2314/school-back | 95a8a1dac2130930a3b9c8fd2163ce983d0a6ff7 | c9a31a2fda979feaa1e190fcbb2d0538f6604518 | refs/heads/main | 2023-07-04T13:31:15.107774 | 2021-07-29T15:59:12 | 2021-07-29T15:59:12 | 390,728,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | # Generated by Django 3.0.8 on 2021-01-18 12:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backs', '0012_auto_20210118_1343'),
]
operations = [
migrations.RemoveField(
model_name='studclass',
name='Year',
),
migrations.AlterField(
model_name='studclass',
name='Id',
field=models.CharField(choices=[('100lvl', '100lvl'), ('200lvl', '200lvl'), ('300lvl', '300lvl'), ('400lvl', '400lvl'), ('500lvl', '500lvl')], max_length=6, primary_key='True', serialize=False),
),
]
| [
"onyii2313@gmail.com"
] | onyii2313@gmail.com |
fde93ece31ff06beaef52315467716f6064342e0 | 83efd4e51e7b8fabaa51cdd096fa7d863e83ab09 | /utils/track_utils.py | f1aaf0fcd7de1141ac18c7dadf2041d32bc12e2a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pbarsic/handtracking | 533bb9f9839e858f775ba10ea6edddbb2c12e132 | 9c5e59b0f706693a9fde7bb858aea1075daa4311 | refs/heads/master | 2020-08-24T21:49:48.730218 | 2019-10-23T17:21:50 | 2019-10-23T17:21:50 | 216,913,014 | 0 | 0 | Apache-2.0 | 2019-10-22T21:29:13 | 2019-10-22T21:29:13 | null | UTF-8 | Python | false | false | 614 | py | def group_detections(detection_box, detection_score, detection_category):
w=[]
for det in zip(detection_box, detection_score, detection_category):
m = [ a for a in det[0] ]
m.append(det[1])
m.append(det[2])
w.append(m)
return w
def group_detections_threshold(detection_box, detection_score, detection_category, threshold):
w=[]
for det in zip(detection_box, detection_score, detection_category):
if det[1] > threshold:
m = [ a for a in det[0] ]
m.append(det[1])
m.append(det[2])
w.append(m)
return w
| [
"pbarsic@gmail.com"
] | pbarsic@gmail.com |
2549239c2cb24167a54487c274b0d455622f7692 | 32ef8621468095bf9c6dd912767cb97e9863dc25 | /python/iterables-and-iterators.py | 31978c2aea6a3a158f486b5f938059dabb494a54 | [] | no_license | Seungju182/Hackerrank | 286f1666be5797c1d318788753245696ef52decf | 264533f97bcc8dc771e4e6cbae1937df8ce6bafa | refs/heads/master | 2023-08-17T22:49:58.710410 | 2021-10-25T09:40:46 | 2021-10-25T09:40:46 | 337,652,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import combinations
if __name__ == "__main__":
N = int(input())
letters = input().split()
K = int(input())
list_comb = list(combinations(letters, K))
print(len([c for c in list_comb if 'a' in c]) / len(list_comb))
| [
"tonysj@snu.ac.kr"
] | tonysj@snu.ac.kr |
3eb9faa27601591cf0d6b31b28370c3d97589540 | 61d08e23fbb62e16f7bd9d43673b1cf4e0558c37 | /miraPipeline/pipeline/preflight/preflight_libs/get_context.py | cc6dbb2fd318693a80edb4f861ef0a9019199413 | [] | no_license | jonntd/mira | 1a4b1f17a71cfefd20c96e0384af2d1fdff813e8 | 270f55ef5d4fecca7368887f489310f5e5094a92 | refs/heads/master | 2021-08-31T12:08:14.795480 | 2017-12-21T08:02:06 | 2017-12-21T08:02:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # -*- coding: utf-8 -*-
import get_file_name
import get_engine
def get_context():
try:
from miraLibs.pipeLibs import pipeFile
scene_name = get_file_name.get_file_name()
x = pipeFile.PathDetails.parse_path(scene_name)
return x.step
except:
engine = get_engine.get_engine()
if engine == "maya":
return "MidMdl"
elif engine == "nuke":
return "Comp"
elif engine == "houdini":
return "Vfx"
| [
"276575758@qq.com"
] | 276575758@qq.com |
fe918c512b07df47961368267c73f7c7f365e116 | 39875dd75edf170fd8f5ab7841a73ef50eaaca6f | /tests/utils.py | c6206de3606ae956ba3f13b63da84cdebee632fc | [] | no_license | Anmol1696/Thumbnailify | a224a0997051ec77eb2fd3ced26e055ddc63133c | c194d879320483b8ec173d6acf15580b28374c33 | refs/heads/master | 2021-06-20T00:25:29.862974 | 2019-08-28T16:40:44 | 2019-08-28T16:40:44 | 202,716,394 | 1 | 1 | null | 2021-03-25T22:55:52 | 2019-08-16T11:32:22 | Python | UTF-8 | Python | false | false | 351 | py | import configparser
import base64
from pathlib import Path
def load_config():
config_data = configparser.ConfigParser(allow_no_value=True)
config_data.read(Path(__file__).parent.glob("*.ini"))
return config_data
def file_encoder(filename):
with open(filename, 'rb') as fd:
data = fd.read()
return base64.b64encode(data)
| [
"anmol.yadav@rakuten.com"
] | anmol.yadav@rakuten.com |
de5121c04015867415044179babf4281786f2d99 | 0c81c371ce9023f3915f2b05c1c4ca7b9f435cf4 | /tools/migrate/rake.py | 577567fe8e2bbd01bc7f896d19a4c5e1de1480ff | [] | no_license | linbirg/om | 831fb7458d1c0176fd76ba4b6f739e95f9593788 | fd1328ebe3fb38893bf88519ca10768507519494 | refs/heads/master | 2021-06-30T16:22:50.327937 | 2020-09-14T07:36:36 | 2020-09-14T07:36:36 | 159,285,543 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,080 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Author: yizr
import os
import sys
__abs_file__ = os.path.abspath(__file__)
migrate_dir = os.path.dirname(__abs_file__)
tool_dir = os.path.dirname(migrate_dir)
code_dir = os.path.dirname(tool_dir)
sys.path.append(code_dir)
import re
import importlib
import importlib.util
# from lib.yom import DDL, Model
# from lib import dbutil as db
from tools.migrate.rake_migrate import RakeMigrate as Migrate
# class Rake(object):
# def __init__(self):
# self.__regx__ = r'(.*)(_)([0-9]+)(\.py)$'
# self.__p__ = re.compile(self.__regx__)
# return super().__init__()
# def rake(self):
# for task in self.tasks:
# task.rake()
def get_current_path():
# __abs_file__ = os.path.abspath(__file__)
# __cur_path__ = os.path.dirname(__abs_file__)
__cur_path__ = os.getcwd()
return __cur_path__
def dir_file(path):
pathDir = os.listdir(path)
return pathDir
# 下划线[num]_.py结尾
def is_name_numberd(name):
regx = r'^([0-9]+)(_)(.*)(\.py)$'
ma = re.match(regx, name)
return True if ma else False
def parse_number(name):
regx = r'^([0-9]+)(_)(.*)(\.py)$'
ma = re.match(regx, name)
if ma:
return int(ma.group(1))
def parse_module_name(file_name):
regx = r'^(.*)(\.py)$'
ma = re.match(regx, file_name)
if not ma:
return None
return ma.group(1)
def _sort_(list_names):
return sorted(list_names, key=lambda n: parse_number(n))
def list_all_migration_files(path):
files = dir_file(path)
migs = list(filter(lambda f: is_name_numberd(f), files))
return migs
def check_module(module_name):
"""
Checks if module can be imported without actually
importing it
"""
module_spec = importlib.util.find_spec(module_name)
if module_spec is None:
print("Module: {} not found".format(module_name))
return None
else:
print("Module: {} can be imported".format(module_name))
return module_spec
def import_module_from_spec(module_spec):
"""
Import the module via the passed in module specification
Returns the newly imported module
"""
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
return module
def loader(mdl_name):
module_spec = check_module(mdl_name)
if module_spec:
module = import_module_from_spec(module_spec)
return module
return None
# return importlib.reload(mdl_name)
def list_all_klass(module):
if type(module) == str:
module = loader(module)
klass = []
for name in dir(module):
var = getattr(module, name)
if isinstance(var, type):
klass.append(name)
return klass
def is_child_of(obj, cls):
try:
for i in obj.__bases__:
if i is cls or isinstance(i, cls):
return True
for i in obj.__bases__:
if is_child_of(i, cls):
return True
except AttributeError:
return is_child_of(obj.__class__, cls)
return False
def get_all_klass_type_in(module, klass=None):
if type(module) == str:
module = loader(module)
if not klass:
klass = type
klasses = []
for name in dir(module):
var = getattr(module, name)
if is_child_of(var, klass):
klasses.append(var)
return klasses
def max_number(path=None):
if path is None:
path = get_current_path()
childs = list_all_migration_files(path)
if len(childs) > 0:
sorted_childs = _sort_(childs)
num = parse_number(sorted_childs[-1])
return num
return 0
# def main(path=None):
# if path is None:
# path = get_cuurent_path()
# # print(path)
# childs = list_all_migration_files(path)
# sorted_childs = _sort_(childs)
# for f in sorted_childs:
# mdl = loader(parse_module_name(f))
# klasss = get_all_klass_type_in(mdl, Migrate)
# for k in klasss:
# # print(k)
# k().down()
def change_to_camel(name, sep='_'):
string_list = str(name).split(sep) # 将字符串转化为list
first = string_list[0].lower()
others = string_list[1:]
# str.capitalize():将字符串的首字母转化为大写
others_capital = [word.capitalize() for word in others]
others_capital[0:0] = [first]
# 将list组合成为字符串,中间无连接符。
hump_string = ''.join(others_capital)
return hump_string
# 包含下划线则认为是
def is_slash_name(name):
return '_' in name
def change_to_slash_name(name):
if is_slash_name(name):
return name.lower()
listx = name[0:len(name)]
listy = listx[0]
for i in range(1, len(listx) - 1):
# listx[i] 直接copy 或 先加'_'再copy
if listx[i].isupper(
) and not listx[i - 1].isupper(): # 加'_',当前为大写,前一个字母为小写
listy += '_'
listy += listx[i]
elif listx[i].isupper() and listx[i -
1].isupper() and listx[i +
1].islower():
# 加'_',当前为大写,前一个字母为小写
listy += '_'
listy += listx[i]
else:
listy += listx[i]
return listy.lower()
def generate_file(name='migrate_task', path=None):
if not path:
path = get_current_path()
slash_name = change_to_slash_name(name)
numbered_name = '%d_%s.py' % (max_number(path) + 1, slash_name)
full_path = os.path.sep.join([path, numbered_name])
with open(full_path, 'w') as f:
tmps = """#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Author: yizr
import os
import sys
__abs_file__ = os.path.abspath(__file__)
tool_dir = os.path.dirname(os.path.dirname(__abs_file__))
code_dir = os.path.dirname(tool_dir)
sys.path.append(code_dir)
from tools.migrate.rake_migrate import RakeMigrate
from lib import dbutil
import module.dao.base.pg_field_desc as fd
class %s(RakeMigrate):
def __init__(self):
super().__init__()
self.db_conn = dbutil.get_connection('risk_db')
def up(self):
self.create_table('%s',*columns, fd.UpdateAtField(), fd.CreateAtField())
def down(self):
self.drop('%s')
"""
class_name = change_to_camel(slash_name)
tmps = tmps % (class_name, slash_name, slash_name)
f.write(tmps)
def run_migrate(path=None):
if path is None:
path = get_current_path()
childs = list_all_migration_files(path)
sorted_childs = _sort_(childs)
for f in sorted_childs:
mdl = loader(parse_module_name(f))
klasss = get_all_klass_type_in(mdl, Migrate)
for k in klasss:
obj = k()
obj.down()
obj.up()
def run_rollback(path=None):
if path is None:
path = get_current_path()
childs = list_all_migration_files(path)
sorted_childs = _sort_(childs)
for f in reversed(sorted_childs):
mdl = loader(parse_module_name(f))
klasss = get_all_klass_type_in(mdl, Migrate)
for k in klasss:
obj = k()
obj.down()
def print_usage():
print('usage python rake.py [cmd]')
print('[cmd]:')
print(' g: generate content eg: g create_table_risk_order')
print(' m: excute all migration by order.')
print(' r: rollback by desc order.')
def console(args):
# run_migrate(path=None)
if len(args) <= 1:
print_usage()
return
if args[1] == 'g':
path = None
if len(args) > 3:
path = args[3]
generate_file(name=args[2], path=path)
if args[1] == 'm':
path = None
if len(args) > 2:
path = args[2]
run_migrate(path=path)
if args[1] == 'r':
path = None
if len(args) > 2:
path = args[2]
run_rollback(path=path)
if __name__ == '__main__':
console(sys.argv)
| [
"linbirg@gmail.com"
] | linbirg@gmail.com |
fbf87fadcf29cafc4cea4e426184591e1c1e483d | 6d920708bcf248e51734bb87f4032dd070603500 | /cats_library/tests/__init__.py | fe243b030cfec515c65f2a517d0b9d6490f1cb14 | [] | no_license | zhenv5/cats | 63e059ccb1bd84fc5225fce62812cbbc3bc0a724 | ff15338b15b437d56667e45c1bb28bc99b04aed0 | refs/heads/master | 2020-12-02T19:33:23.439128 | 2017-04-18T12:32:05 | 2017-04-18T12:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from __future__ import division, print_function, absolute_import
import numpy
__author__ = 'Alex Rogozhnikov'
def generate_dataset(n_samples=10000, n_features=3, n_categories=10):
data = numpy.random.randint(0, n_categories, size=[n_samples, n_features])
cat_predictions = numpy.random.normal(size=n_categories)
y = cat_predictions[data].sum(axis=1)
return data, y
| [
"axelr@man1-ipython01.cern.dev.yandex.net"
] | axelr@man1-ipython01.cern.dev.yandex.net |
86f2fa42be3a378d013ce34ab1665eeb061a4de5 | f07b7f546278c86ec91fe9fdacbe4acc266b5ef0 | /blog/blog/wvenv/Lib/site-packages/spyder/app/tour.py | 08ccd027dbdc5cb3695aea84507318de7ace5723 | [] | no_license | CankayaUniversity/ceng-407-408-2019-2020-Patent-Comparison-System | 0386a6d8651a9ce875a9cf56013c19d8242204c9 | d9c0f2d84d90932b962a0618b01652f3bd560f25 | refs/heads/master | 2020-08-18T09:55:23.676188 | 2020-06-27T21:19:20 | 2020-06-27T21:19:20 | 215,772,427 | 2 | 4 | null | 2020-06-27T21:26:31 | 2019-10-17T11:08:50 | null | UTF-8 | Python | false | false | 47,982 | py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Spyder interactive tours"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
from __future__ import division
import sys
from math import ceil
# Third party imports
from qtpy.QtCore import (QEasingCurve, QPoint, QPropertyAnimation, QRectF, Qt,
Signal)
from qtpy.QtGui import (QBrush, QColor, QIcon, QPainter, QPainterPath, QPen,
QPixmap, QRegion)
from qtpy.QtWidgets import (QAction, QApplication, QComboBox, QDialog,
QGraphicsOpacityEffect, QHBoxLayout, QLabel,
QLayout, QMainWindow, QMenu, QPushButton,
QSpacerItem, QToolButton, QVBoxLayout, QWidget)
# Local imports
from spyder.config.base import _, get_image_path
from spyder.config.gui import is_dark_interface
from spyder.py3compat import to_binary_string
from spyder.utils.qthelpers import add_actions, create_action
from spyder.utils import icon_manager as ima
if is_dark_interface():
MAIN_TOP_COLOR = MAIN_BG_COLOR = QColor.fromRgb(25, 35, 45)
else:
MAIN_TOP_COLOR = QColor.fromRgb(230, 230, 230)
MAIN_BG_COLOR = QColor.fromRgb(255, 255, 255)
# FIXME: Known issues
# How to handle if an specific dockwidget does not exists/load, like ipython
# on python3.3, should that frame be removed? should it display a warning?
class SpyderWidgets(object):
"""List of supported widgets to highlight/decorate"""
# Panes
ipython_console = 'ipyconsole'
editor = 'editor'
editor_line_number_area = 'editor.get_current_editor().linenumberarea'
editor_scroll_flag_area = 'editor.get_current_editor().scrollflagarea'
file_explorer = 'explorer'
help_plugin = 'help'
variable_explorer = 'variableexplorer'
history_log = "historylog"
# Toolbars
toolbars = ''
toolbars_active = ''
toolbar_file = ''
toolbar_edit = ''
toolbar_run = ''
toolbar_debug = ''
toolbar_main = ''
status_bar = ''
menu_bar = ''
menu_file = ''
menu_edit = ''
def get_tours(index=None):
"""
Get the list of available tours (if index=None), or the your given by
index
"""
return get_tour(index)
def get_tour(index):
"""
This function generates a list of tours.
The index argument is used to retrieve a particular tour. If None is
passed, it will return the full list of tours. If instead -1 is given,
this function will return a test tour
To add more tours a new variable needs to be created to hold the list of
dicts and the tours variable at the bottom of this function needs to be
updated accordingly
"""
sw = SpyderWidgets
qtconsole_link = "https://qtconsole.readthedocs.io/en/stable/index.html"
# This test should serve as example of keys to use in the tour frame dics
test = [{'title': "Welcome to Spyder introduction tour",
'content': "<b>Spyder</b> is an interactive development \
environment. This tip panel supports rich text. <br>\
<br> it also supports image insertion to the right so\
far",
'image': 'tour-spyder-logo.png'},
{'title': "Widget display",
'content': ("This show how a widget is displayed. The tip panel "
"is adjusted based on the first widget in the list"),
'widgets': ['button1'],
'decoration': ['button2'],
'interact': True},
{'title': "Widget display",
'content': ("This show how a widget is displayed. The tip panel "
"is adjusted based on the first widget in the list"),
'widgets': ['button1'],
'decoration': ['button1'],
'interact': True},
{'title': "Widget display",
'content': ("This show how a widget is displayed. The tip panel "
"is adjusted based on the first widget in the list"),
'widgets': ['button1'],
'interact': True},
{'title': "Widget display and highlight",
'content': "This shows how a highlighted widget looks",
'widgets': ['button'],
'decoration': ['button'],
'interact': False},
]
intro = [{'title': _("Welcome to the Introduction tour"),
'content': _("<b>Spyder</b> is a powerful Interactive "
"Development Environment (or IDE) for the Python "
"programming language.<br><br>"
"Here we are going to guide you through its most "
"important features.<br><br>"
"Please use the arrow keys or click on the buttons "
"below to move along the tour."),
'image': 'tour-spyder-logo.png'},
{'title': _("The Editor"),
'content': _("This is the pane where you write Python code before "
"evaluating it. You can get automatic suggestions "
"and completions while writing, by pressing the "
"<b>Tab</b> key next to a given text.<br><br>"
"The Editor comes "
"with a line number area (highlighted here in red), "
"where Spyder shows warnings and syntax errors. They "
"can help you to detect potential problems before "
"running the code.<br><br>"
"You can also set debug breakpoints in the line "
"number area, by doing a double click next to "
"a non-empty line."),
'widgets': [sw.editor],
'decoration': [sw.editor_line_number_area]},
{'title': _("The IPython console"),
'content': _("This is one of panes where you can run or "
"execute the code you wrote on the Editor. To do it "
"you need to press the <b>F5</b> key.<br><br>"
"This console comes with several "
"useful features that greatly improve your "
"programming workflow (like syntax highlighting and "
"inline plots). If you want to know more about them, "
"please follow this <a href=\"{0}\">link</a>.<br><br>"
"Please click on the button below to run some simple "
"code in this console. This will be useful to show "
"you other important features.").format(
qtconsole_link),
'widgets': [sw.ipython_console],
'run': ["li = list(range(100))", "d = {'a': 1, 'b': 2}"]
},
{'title': _("The Variable Explorer"),
'content': _("In this pane you can view and edit the variables "
"generated during the execution of a program, or "
"those entered directly in one of Spyder "
"consoles.<br><br>"
"As you can see, the Variable Explorer is showing "
"the variables generated during the last step of "
"this tour. By doing a double-click on any "
"of them, a new window will be opened, where you "
"can inspect and modify their contents."),
'widgets': [sw.variable_explorer],
'interact': True},
{'title': _("Help"),
'content': _("This pane displays documentation of the "
"functions, classes, methods or modules you are "
"currently using in the Editor or the Consoles.<br><br>"
"To use it, you need to press <b>Ctrl+I</b> in "
"front of an object. If that object has some "
"documentation associated with it, it will be "
"displayed here."),
'widgets': [sw.help_plugin],
'interact': True},
{'title': _("The File Explorer"),
'content': _("This pane lets you navigate through the directories "
"and files present in your computer.<br><br>"
"You can also open any of these files with its "
"corresponding application, by doing a double "
"click on it.<br><br>"
"There is one exception to this rule: plain-text "
"files will always be opened in the Spyder Editor."),
'widgets': [sw.file_explorer],
'interact': True},
{'title': _("The History Log"),
'content': _("This pane records all commands introduced in "
"the Python and IPython consoles."),
'widgets': [sw.history_log],
'interact': True},
]
# ['The run toolbar',
# 'Should be short',
# ['self.run_toolbar'], None],
# ['The debug toolbar',
# '',
# ['self.debug_toolbar'], None],
# ['The main toolbar',
# '',
# ['self.main_toolbar'], None],
# ['The editor',
# 'Spyder has differnet bla bla bla',
# ['self.editor.dockwidget'], None],
# ['The editor',
# 'Spyder has differnet bla bla bla',
# ['self.outlineexplorer.dockwidget'], None],
#
# ['The menu bar',
# 'Spyder has differnet bla bla bla',
# ['self.menuBar()'], None],
#
# ['The menu bar',
# 'Spyder has differnet bla bla bla',
# ['self.statusBar()'], None],
#
#
# ['The toolbars!',
# 'Spyder has differnet bla bla bla',
# ['self.variableexplorer.dockwidget'], None],
# ['The toolbars MO!',
# 'Spyder has differnet bla bla bla',
# ['self.extconsole.dockwidget'], None],
# ['The whole window?!',
# 'Spyder has differnet bla bla bla',
# ['self'], None],
# ['Lets try something!',
# 'Spyder has differnet bla bla bla',
# ['self.extconsole.dockwidget',
# 'self.variableexplorer.dockwidget'], None]
#
# ]
feat30 = [{'title': "New features in Spyder 3.0",
'content': _("<b>Spyder</b> is an interactive development "
"environment based on bla"),
'image': 'spyder.png'},
{'title': _("Welcome to Spyder introduction tour"),
'content': _("Spyder is an interactive development environment "
"based on bla"),
'widgets': ['variableexplorer']},
]
tours = [{'name': _('Introduction tour'), 'tour': intro},
{'name': _('New features in version 3.0'), 'tour': feat30}]
if index is None:
return tours
elif index == -1:
return [test]
else:
return [tours[index]]
class FadingDialog(QDialog):
"""A general fade in/fade out QDialog with some builtin functions"""
sig_key_pressed = Signal()
def __init__(self, parent, opacity, duration, easing_curve):
super(FadingDialog, self).__init__(parent)
self.parent = parent
self.opacity_min = min(opacity)
self.opacity_max = max(opacity)
self.duration_fadein = duration[0]
self.duration_fadeout = duration[-1]
self.easing_curve_in = easing_curve[0]
self.easing_curve_out = easing_curve[-1]
self.effect = None
self.anim = None
self._fade_running = False
self._funcs_before_fade_in = []
self._funcs_after_fade_in = []
self._funcs_before_fade_out = []
self._funcs_after_fade_out = []
self.setModal(False)
def _run(self, funcs):
""" """
for func in funcs:
func()
def _run_before_fade_in(self):
""" """
self._run(self._funcs_before_fade_in)
def _run_after_fade_in(self):
""" """
self._run(self._funcs_after_fade_in)
def _run_before_fade_out(self):
""" """
self._run(self._funcs_before_fade_out)
def _run_after_fade_out(self):
""" """
self._run(self._funcs_after_fade_out)
def _set_fade_finished(self):
""" """
self._fade_running = False
def _fade_setup(self):
""" """
self._fade_running = True
self.effect = QGraphicsOpacityEffect(self)
self.setGraphicsEffect(self.effect)
self.anim = QPropertyAnimation(self.effect, to_binary_string("opacity"))
# --- public api
def fade_in(self, on_finished_connect):
""" """
self._run_before_fade_in()
self._fade_setup()
self.show()
self.raise_()
self.anim.setEasingCurve(self.easing_curve_in)
self.anim.setStartValue(self.opacity_min)
self.anim.setEndValue(self.opacity_max)
self.anim.setDuration(self.duration_fadein)
self.anim.finished.connect(on_finished_connect)
self.anim.finished.connect(self._set_fade_finished)
self.anim.finished.connect(self._run_after_fade_in)
self.anim.start()
def fade_out(self, on_finished_connect):
""" """
self._run_before_fade_out()
self._fade_setup()
self.anim.setEasingCurve(self.easing_curve_out)
self.anim.setStartValue(self.opacity_max)
self.anim.setEndValue(self.opacity_min)
self.anim.setDuration(self.duration_fadeout)
self.anim.finished.connect(on_finished_connect)
self.anim.finished.connect(self._set_fade_finished)
self.anim.finished.connect(self._run_after_fade_out)
self.anim.start()
def is_fade_running(self):
""" """
return self._fade_running
def set_funcs_before_fade_in(self, funcs):
""" """
self._funcs_before_fade_in = funcs
def set_funcs_after_fade_in(self, funcs):
""" """
self._funcs_after_fade_in = funcs
def set_funcs_before_fade_out(self, funcs):
""" """
self._funcs_before_fade_out = funcs
def set_funcs_after_fade_out(self, funcs):
""" """
self._funcs_after_fade_out = funcs
class FadingCanvas(FadingDialog):
"""The black semi transparent canvas that covers the application"""
def __init__(self, parent, opacity, duration, easing_curve, color,
tour=None):
"""Create a black semi transparent canvas that covers the app."""
super(FadingCanvas, self).__init__(parent, opacity, duration,
easing_curve)
self.parent = parent
self.tour = tour
self.color = color # Canvas color
self.color_decoration = Qt.red # Decoration color
self.stroke_decoration = 2 # width in pixels for decoration
self.region_mask = None
self.region_subtract = None
self.region_decoration = None
self.widgets = None # The widget to uncover
self.decoration = None # The widget to draw decoration
self.interaction_on = False
self.path_current = None
self.path_subtract = None
self.path_full = None
self.path_decoration = None
# widget setup
self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setAttribute(Qt.WA_TransparentForMouseEvents)
self.setModal(False)
self.setFocusPolicy(Qt.NoFocus)
self.set_funcs_before_fade_in([self.update_canvas])
self.set_funcs_after_fade_out([lambda: self.update_widgets(None),
lambda: self.update_decoration(None)])
def set_interaction(self, value):
""" """
self.interaction_on = value
def update_canvas(self):
""" """
w, h = self.parent.size().width(), self.parent.size().height()
self.path_full = QPainterPath()
self.path_subtract = QPainterPath()
self.path_decoration = QPainterPath()
self.region_mask = QRegion(0, 0, w, h)
self.path_full.addRect(0, 0, w, h)
# Add the path
if self.widgets is not None:
for widget in self.widgets:
temp_path = QPainterPath()
# if widget is not found... find more general way to handle
if widget is not None:
widget.raise_()
widget.show()
geo = widget.frameGeometry()
width, height = geo.width(), geo.height()
point = widget.mapTo(self.parent, QPoint(0, 0))
x, y = point.x(), point.y()
temp_path.addRect(QRectF(x, y, width, height))
temp_region = QRegion(x, y, width, height)
if self.interaction_on:
self.region_mask = self.region_mask.subtracted(temp_region)
self.path_subtract = self.path_subtract.united(temp_path)
self.path_current = self.path_full.subtracted(self.path_subtract)
else:
self.path_current = self.path_full
if self.decoration is not None:
for widget in self.decoration:
temp_path = QPainterPath()
widget.raise_()
widget.show()
geo = widget.frameGeometry()
width, height = geo.width(), geo.height()
point = widget.mapTo(self.parent, QPoint(0, 0))
x, y = point.x(), point.y()
temp_path.addRect(QRectF(x, y, width, height))
temp_region_1 = QRegion(x-1, y-1, width+2, height+2)
temp_region_2 = QRegion(x+1, y+1, width-2, height-2)
temp_region = temp_region_1.subtracted(temp_region_2)
if self.interaction_on:
self.region_mask = self.region_mask.united(temp_region)
self.path_decoration = self.path_decoration.united(temp_path)
else:
self.path_decoration.addRect(0, 0, 0, 0)
# Add a decoration stroke around widget
self.setMask(self.region_mask)
self.update()
self.repaint()
def update_widgets(self, widgets):
""" """
self.widgets = widgets
def update_decoration(self, widgets):
""" """
self.decoration = widgets
def paintEvent(self, event):
"""Override Qt method"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
# Decoration
painter.fillPath(self.path_current, QBrush(self.color))
painter.strokePath(self.path_decoration, QPen(self.color_decoration,
self.stroke_decoration))
# decoration_fill = QColor(self.color_decoration)
# decoration_fill.setAlphaF(0.25)
# painter.fillPath(self.path_decoration, decoration_fill)
def reject(self):
"""Override Qt method"""
if not self.is_fade_running():
key = Qt.Key_Escape
self.key_pressed = key
self.sig_key_pressed.emit()
def mousePressEvent(self, event):
"""Override Qt method"""
pass
def focusInEvent(self, event):
"""Override Qt method."""
# To be used so tips do not appear outside spyder
if self.hasFocus():
self.tour.gain_focus()
def focusOutEvent(self, event):
"""Override Qt method."""
# To be used so tips do not appear outside spyder
if self.tour.step_current != 0:
self.tour.lost_focus()
class FadingTipBox(FadingDialog):
""" """
def __init__(self, parent, opacity, duration, easing_curve, tour=None,
color_top=None, color_back=None, combobox_background=None):
super(FadingTipBox, self).__init__(parent, opacity, duration,
easing_curve)
self.holder = self.anim # needed for qt to work
self.parent = parent
self.tour = tour
self.frames = None
self.offset_shadow = 0
self.fixed_width = 300
self.key_pressed = None
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint |
Qt.WindowStaysOnTopHint)
self.setModal(False)
# Widgets
def toolbutton(icon):
bt = QToolButton()
bt.setAutoRaise(True)
bt.setIcon(icon)
return bt
self.button_close = toolbutton(ima.icon("tour.close"))
self.button_home = toolbutton(ima.icon("tour.home"))
self.button_previous = toolbutton(ima.icon("tour.previous"))
self.button_end = toolbutton(ima.icon("tour.end"))
self.button_next = toolbutton(ima.icon("tour.next"))
self.button_run = QPushButton(_('Run code'))
self.button_disable = None
self.button_current = QToolButton()
self.label_image = QLabel()
self.label_title = QLabel()
self.combo_title = QComboBox()
self.label_current = QLabel()
self.label_content = QLabel()
self.label_content.setMinimumWidth(self.fixed_width)
self.label_content.setMaximumWidth(self.fixed_width)
self.label_current.setAlignment(Qt.AlignCenter)
self.label_content.setWordWrap(True)
self.widgets = [self.label_content, self.label_title,
self.label_current, self.combo_title,
self.button_close, self.button_run, self.button_next,
self.button_previous, self.button_end,
self.button_home, self.button_current]
arrow = get_image_path('hide.png')
self.color_top = color_top
self.color_back = color_back
self.combobox_background = combobox_background
self.stylesheet = '''QComboBox {{
padding-left: 5px;
background-color: {}
border-width: 0px;
border-radius: 0px;
min-height:20px;
max-height:20px;
}}
QComboBox::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top left;
border-width: 0px;
}}
QComboBox::down-arrow {{
image: url({});
}}
'''.format(self.combobox_background.name(), arrow)
# Windows fix, slashes should be always in unix-style
self.stylesheet = self.stylesheet.replace('\\', '/')
self.setFocusPolicy(Qt.StrongFocus)
for widget in self.widgets:
widget.setFocusPolicy(Qt.NoFocus)
widget.setStyleSheet(self.stylesheet)
layout_top = QHBoxLayout()
layout_top.addWidget(self.combo_title)
layout_top.addStretch()
layout_top.addWidget(self.button_close)
layout_top.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout_content = QHBoxLayout()
layout_content.addWidget(self.label_content)
layout_content.addWidget(self.label_image)
layout_content.addSpacerItem(QSpacerItem(5, 5))
layout_run = QHBoxLayout()
layout_run.addStretch()
layout_run.addWidget(self.button_run)
layout_run.addStretch()
layout_run.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout_navigation = QHBoxLayout()
layout_navigation.addWidget(self.button_home)
layout_navigation.addWidget(self.button_previous)
layout_navigation.addStretch()
layout_navigation.addWidget(self.label_current)
layout_navigation.addStretch()
layout_navigation.addWidget(self.button_next)
layout_navigation.addWidget(self.button_end)
layout_navigation.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout = QVBoxLayout()
layout.addLayout(layout_top)
layout.addStretch()
layout.addSpacerItem(QSpacerItem(15, 15))
layout.addLayout(layout_content)
layout.addLayout(layout_run)
layout.addStretch()
layout.addSpacerItem(QSpacerItem(15, 15))
layout.addLayout(layout_navigation)
layout.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout.setSizeConstraint(QLayout.SetFixedSize)
self.setLayout(layout)
self.set_funcs_before_fade_in([self._disable_widgets])
self.set_funcs_after_fade_in([self._enable_widgets, self.setFocus])
self.set_funcs_before_fade_out([self._disable_widgets])
self.setContextMenuPolicy(Qt.CustomContextMenu)
# signals and slots
# These are defined every time by the AnimatedTour Class
def _disable_widgets(self):
""" """
for widget in self.widgets:
widget.setDisabled(True)
def _enable_widgets(self):
""" """
self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint |
Qt.WindowStaysOnTopHint)
for widget in self.widgets:
widget.setDisabled(False)
if self.button_disable == 'previous':
self.button_previous.setDisabled(True)
self.button_home.setDisabled(True)
elif self.button_disable == 'next':
self.button_next.setDisabled(True)
self.button_end.setDisabled(True)
def set_data(self, title, content, current, image, run, frames=None,
step=None):
""" """
self.label_title.setText(title)
self.combo_title.clear()
self.combo_title.addItems(frames)
self.combo_title.setCurrentIndex(step)
# min_content_len = max([len(f) for f in frames])
# self.combo_title.setMinimumContentsLength(min_content_len)
# Fix and try to see how it looks with a combo box
self.label_current.setText(current)
self.button_current.setText(current)
self.label_content.setText(content)
self.image = image
if image is None:
self.label_image.setFixedHeight(1)
self.label_image.setFixedWidth(1)
else:
extension = image.split('.')[-1]
self.image = QPixmap(get_image_path(image), extension)
self.label_image.setPixmap(self.image)
self.label_image.setFixedSize(self.image.size())
if run is None:
self.button_run.setVisible(False)
else:
self.button_run.setDisabled(False)
self.button_run.setVisible(True)
# Refresh layout
self.layout().activate()
def set_pos(self, x, y):
""" """
self.x = ceil(x)
self.y = ceil(y)
self.move(QPoint(self.x, self.y))
def build_paths(self):
""" """
geo = self.geometry()
radius = 0
shadow = self.offset_shadow
x0, y0 = geo.x(), geo.y()
width, height = geo.width() - shadow, geo.height() - shadow
left, top = 0, 0
right, bottom = width, height
self.round_rect_path = QPainterPath()
self.round_rect_path.moveTo(right, top + radius)
self.round_rect_path.arcTo(right-radius, top, radius, radius, 0.0,
90.0)
self.round_rect_path.lineTo(left+radius, top)
self.round_rect_path.arcTo(left, top, radius, radius, 90.0, 90.0)
self.round_rect_path.lineTo(left, bottom-radius)
self.round_rect_path.arcTo(left, bottom-radius, radius, radius, 180.0,
90.0)
self.round_rect_path.lineTo(right-radius, bottom)
self.round_rect_path.arcTo(right-radius, bottom-radius, radius, radius,
270.0, 90.0)
self.round_rect_path.closeSubpath()
# Top path
header = 36
offset = 2
left, top = offset, offset
right = width - (offset)
self.top_rect_path = QPainterPath()
self.top_rect_path.lineTo(right, top + radius)
self.top_rect_path.moveTo(right, top + radius)
self.top_rect_path.arcTo(right-radius, top, radius, radius, 0.0, 90.0)
self.top_rect_path.lineTo(left+radius, top)
self.top_rect_path.arcTo(left, top, radius, radius, 90.0, 90.0)
self.top_rect_path.lineTo(left, top + header)
self.top_rect_path.lineTo(right, top + header)
def paintEvent(self, event):
""" """
self.build_paths()
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillPath(self.round_rect_path, self.color_back)
painter.fillPath(self.top_rect_path, self.color_top)
painter.strokePath(self.round_rect_path, QPen(Qt.gray, 1))
# TODO: Build the pointing arrow?
def keyReleaseEvent(self, event):
""" """
key = event.key()
self.key_pressed = key
keys = [Qt.Key_Right, Qt.Key_Left, Qt.Key_Down, Qt.Key_Up,
Qt.Key_Escape, Qt.Key_PageUp, Qt.Key_PageDown,
Qt.Key_Home, Qt.Key_End, Qt.Key_Menu]
if key in keys:
if not self.is_fade_running():
self.sig_key_pressed.emit()
def mousePressEvent(self, event):
"""override Qt method"""
# Raise the main application window on click
self.parent.raise_()
self.raise_()
if event.button() == Qt.RightButton:
pass
# clicked_widget = self.childAt(event.x(), event.y())
# if clicked_widget == self.label_current:
# self.context_menu_requested(event)
def focusOutEvent(self, event):
"""Override Qt method."""
# To be used so tips do not appear outside spyder
self.tour.lost_focus()
def context_menu_requested(self, event):
""" """
pos = QPoint(event.x(), event.y())
menu = QMenu(self)
actions = []
action_title = create_action(self, _('Go to step: '), icon=QIcon())
action_title.setDisabled(True)
actions.append(action_title)
# actions.append(create_action(self, _(': '), icon=QIcon()))
add_actions(menu, actions)
menu.popup(self.mapToGlobal(pos))
def reject(self):
"""Qt method to handle escape key event"""
if not self.is_fade_running():
key = Qt.Key_Escape
self.key_pressed = key
self.sig_key_pressed.emit()
class AnimatedTour(QWidget):
""" """
def __init__(self, parent):
QWidget.__init__(self, parent)
self.parent = parent
# Variables to adjust
self.duration_canvas = [666, 666]
self.duration_tips = [333, 333]
self.opacity_canvas = [0.0, 0.7]
self.opacity_tips = [0.0, 1.0]
self.color = Qt.black
self.easing_curve = [QEasingCurve.Linear]
self.current_step = 0
self.step_current = 0
self.steps = 0
self.canvas = None
self.tips = None
self.frames = None
self.spy_window = None
self.widgets = None
self.dockwidgets = None
self.decoration = None
self.run = None
self.is_tour_set = False
self.is_running = False
# Widgets
self.canvas = FadingCanvas(self.parent, self.opacity_canvas,
self.duration_canvas, self.easing_curve,
self.color, tour=self)
self.tips = FadingTipBox(self.parent, self.opacity_tips,
self.duration_tips, self.easing_curve,
tour=self, color_top=MAIN_TOP_COLOR,
color_back=MAIN_BG_COLOR,
combobox_background=MAIN_TOP_COLOR)
# Widgets setup
# Needed to fix spyder-ide/spyder#2204.
self.setAttribute(Qt.WA_TransparentForMouseEvents)
# Signals and slots
self.tips.button_next.clicked.connect(self.next_step)
self.tips.button_previous.clicked.connect(self.previous_step)
self.tips.button_close.clicked.connect(self.close_tour)
self.tips.button_run.clicked.connect(self.run_code)
self.tips.button_home.clicked.connect(self.first_step)
self.tips.button_end.clicked.connect(self.last_step)
self.tips.button_run.clicked.connect(
lambda: self.tips.button_run.setDisabled(True))
self.tips.combo_title.currentIndexChanged.connect(self.go_to_step)
# Main window move or resize
self.parent.sig_resized.connect(self._resized)
self.parent.sig_moved.connect(self._moved)
# To capture the arrow keys that allow moving the tour
self.tips.sig_key_pressed.connect(self._key_pressed)
# To control the focus of tour
self.setting_data = False
self.hidden = False
def _resized(self, event):
""" """
if self.is_running:
size = event.size()
self.canvas.setFixedSize(size)
self.canvas.update_canvas()
if self.is_tour_set:
self._set_data()
def _moved(self, event):
""" """
if self.is_running:
pos = event.pos()
self.canvas.move(QPoint(pos.x(), pos.y()))
if self.is_tour_set:
self._set_data()
def _close_canvas(self):
""" """
self.tips.hide()
self.canvas.fade_out(self.canvas.hide)
def _clear_canvas(self):
""" """
# TODO: Add option to also make it white... might be usefull?
# Make canvas black before transitions
self.canvas.update_widgets(None)
self.canvas.update_decoration(None)
self.canvas.update_canvas()
def _move_step(self):
""" """
self._set_data()
# Show/raise the widget so it is located first!
widgets = self.dockwidgets
if widgets is not None:
widget = widgets[0]
if widget is not None:
widget.show()
widget.raise_()
self._locate_tip_box()
# Change in canvas only after fadein finishes, for visual aesthetics
self.tips.fade_in(self.canvas.update_canvas)
self.tips.raise_()
def _set_modal(self, value, widgets):
""" """
platform = sys.platform.lower()
if 'linux' in platform:
pass
elif 'win' in platform:
for widget in widgets:
widget.setModal(value)
widget.hide()
widget.show()
elif 'darwin' in platform:
pass
else:
pass
def _process_widgets(self, names, spy_window):
""" """
widgets = []
dockwidgets = []
for name in names:
base = name.split('.')[0]
temp = getattr(spy_window, base)
# Check if it is the current editor
if 'get_current_editor()' in name:
temp = temp.get_current_editor()
temp = getattr(temp, name.split('.')[-1])
widgets.append(temp)
# Check if it is a dockwidget and make the widget a dockwidget
# If not return the same widget
temp = getattr(temp, 'dockwidget', temp)
dockwidgets.append(temp)
return widgets, dockwidgets
def _set_data(self):
"""Set data that is displayed in each step of the tour."""
self.setting_data = True
step, steps, frames = self.step_current, self.steps, self.frames
current = '{0}/{1}'.format(step + 1, steps)
frame = frames[step]
combobox_frames = [u"{0}. {1}".format(i+1, f['title'])
for i, f in enumerate(frames)]
title, content, image = '', '', None
widgets, dockwidgets, decoration = None, None, None
run = None
# Check if entry exists in dic and act accordingly
if 'title' in frame:
title = frame['title']
if 'content' in frame:
content = frame['content']
if 'widgets' in frame:
widget_names = frames[step]['widgets']
# Get the widgets based on their name
widgets, dockwidgets = self._process_widgets(widget_names,
self.spy_window)
self.widgets = widgets
self.dockwidgets = dockwidgets
if 'decoration' in frame:
widget_names = frames[step]['decoration']
deco, decoration = self._process_widgets(widget_names,
self.spy_window)
self.decoration = decoration
if 'image' in frame:
image = frames[step]['image']
if 'interact' in frame:
self.canvas.set_interaction(frame['interact'])
if frame['interact']:
self._set_modal(False, [self.tips])
else:
self._set_modal(True, [self.tips])
else:
self.canvas.set_interaction(False)
self._set_modal(True, [self.tips])
if 'run' in frame:
# Asume that the frist widget is the console
run = frame['run']
self.run = run
self.tips.set_data(title, content, current, image, run,
frames=combobox_frames, step=step)
self._check_buttons()
# Make canvas black when starting a new place of decoration
self.canvas.update_widgets(dockwidgets)
self.canvas.update_decoration(decoration)
self.setting_data = False
def _locate_tip_box(self):
""" """
dockwidgets = self.dockwidgets
# Store the dimensions of the main window
geo = self.parent.frameGeometry()
x, y, width, height = geo.x(), geo.y(), geo.width(), geo.height()
self.width_main = width
self.height_main = height
self.x_main = x
self.y_main = y
delta = 20
# Here is the tricky part to define the best position for the
# tip widget
if dockwidgets is not None:
if dockwidgets[0] is not None:
geo = dockwidgets[0].geometry()
x, y, width, height = geo.x(), geo.y(), geo.width(), geo.height()
point = dockwidgets[0].mapToGlobal(QPoint(0, 0))
x_glob, y_glob = point.x(), point.y()
# Check if is too tall and put to the side
y_fac = (height / self.height_main) * 100
if y_fac > 60: # FIXME:
if x < self.tips.width():
x = x_glob + width + delta
y = y_glob + height/2 - self.tips.height()/2
else:
x = x_glob - self.tips.width() - delta
y = y_glob + height/2 - self.tips.height()/2
else:
if y < self.tips.height():
x = x_glob + width/2 - self.tips.width()/2
y = y_glob + height + delta
else:
x = x_glob + width/2 - self.tips.width()/2
y = y_glob - delta - self.tips.height()
else:
# Center on parent
x = self.x_main + self.width_main/2 - self.tips.width()/2
y = self.y_main + self.height_main/2 - self.tips.height()/2
self.tips.set_pos(x, y)
def _check_buttons(self):
""" """
step, steps = self.step_current, self.steps
self.tips.button_disable = None
if step == 0:
self.tips.button_disable = 'previous'
if step == steps - 1:
self.tips.button_disable = 'next'
def _key_pressed(self):
""" """
key = self.tips.key_pressed
if ((key == Qt.Key_Right or key == Qt.Key_Down or
key == Qt.Key_PageDown) and self.step_current != self.steps - 1):
self.next_step()
elif ((key == Qt.Key_Left or key == Qt.Key_Up or
key == Qt.Key_PageUp) and self.step_current != 0):
self.previous_step()
elif key == Qt.Key_Escape:
self.close_tour()
elif key == Qt.Key_Home and self.step_current != 0:
self.first_step()
elif key == Qt.Key_End and self.step_current != self.steps - 1:
self.last_step()
elif key == Qt.Key_Menu:
pos = self.tips.label_current.pos()
self.tips.context_menu_requested(pos)
def _hiding(self):
self.hidden = True
self.tips.hide()
# --- public api
def run_code(self):
""" """
codelines = self.run
console = self.widgets[0]
for codeline in codelines:
console.execute_code(codeline)
def set_tour(self, index, frames, spy_window):
""" """
self.spy_window = spy_window
self.active_tour_index = index
self.last_frame_active = frames['last']
self.frames = frames['tour']
self.steps = len(self.frames)
self.is_tour_set = True
def start_tour(self):
""" """
geo = self.parent.geometry()
x, y, width, height = geo.x(), geo.y(), geo.width(), geo.height()
# self.parent_x = x
# self.parent_y = y
# self.parent_w = width
# self.parent_h = height
# FIXME: reset step to last used value
# Reset step to begining
self.step_current = self.last_frame_active
# Adjust the canvas size to match the main window size
self.canvas.setFixedSize(width, height)
self.canvas.move(QPoint(x, y))
self.canvas.fade_in(self._move_step)
self._clear_canvas()
self.is_running = True
def close_tour(self):
""" """
self.tips.fade_out(self._close_canvas)
self.canvas.set_interaction(False)
self._set_modal(True, [self.tips])
self.canvas.hide()
try:
# set the last played frame by updating the available tours in
# parent. This info will be lost on restart.
self.parent.tours_available[self.active_tour_index]['last'] =\
self.step_current
except:
pass
self.is_running = False
def hide_tips(self):
"""Hide tips dialog when the main window loses focus."""
self._clear_canvas()
self.tips.fade_out(self._hiding)
def unhide_tips(self):
"""Unhide tips dialog when the main window loses focus."""
self._clear_canvas()
self._move_step()
self.hidden = False
def next_step(self):
""" """
self._clear_canvas()
self.step_current += 1
self.tips.fade_out(self._move_step)
def previous_step(self):
""" """
self._clear_canvas()
self.step_current -= 1
self.tips.fade_out(self._move_step)
def go_to_step(self, number, id_=None):
""" """
self._clear_canvas()
self.step_current = number
self.tips.fade_out(self._move_step)
def last_step(self):
""" """
self.go_to_step(self.steps - 1)
def first_step(self):
""" """
self.go_to_step(0)
def lost_focus(self):
"""Confirm if the tour loses focus and hides the tips."""
if (self.is_running and not self.any_has_focus() and
not self.setting_data and not self.hidden):
self.hide_tips()
def gain_focus(self):
"""Confirm if the tour regains focus and unhides the tips."""
if (self.is_running and self.any_has_focus() and
not self.setting_data and self.hidden):
self.unhide_tips()
def any_has_focus(self):
"""Returns if tour or any of its components has focus."""
f = (self.hasFocus() or self.parent.hasFocus() or
self.tips.hasFocus() or self.canvas.hasFocus())
return f
# ----------------------------------------------------------------------------
# Used for testing the functionality
class TourTestWindow(QMainWindow):
""" """
sig_resized = Signal("QResizeEvent")
sig_moved = Signal("QMoveEvent")
def __init__(self):
super(TourTestWindow, self).__init__()
self.setGeometry(300, 100, 400, 600)
self.setWindowTitle('Exploring QMainWindow')
self.exit = QAction('Exit', self)
self.exit.setStatusTip('Exit program')
# create the menu bar
menubar = self.menuBar()
file_ = menubar.addMenu('&File')
file_.addAction(self.exit)
# create the status bar
self.statusBar()
# QWidget or its instance needed for box layout
self.widget = QWidget(self)
self.button = QPushButton('test')
self.button1 = QPushButton('1')
self.button2 = QPushButton('2')
effect = QGraphicsOpacityEffect(self.button2)
self.button2.setGraphicsEffect(effect)
self.anim = QPropertyAnimation(effect, to_binary_string("opacity"))
self.anim.setStartValue(0.01)
self.anim.setEndValue(1.0)
self.anim.setDuration(500)
lay = QVBoxLayout()
lay.addWidget(self.button)
lay.addStretch()
lay.addWidget(self.button1)
lay.addWidget(self.button2)
self.widget.setLayout(lay)
self.setCentralWidget(self.widget)
self.button.clicked.connect(self.action1)
self.button1.clicked.connect(self.action2)
self.tour = AnimatedTour(self)
def action1(self):
""" """
frames = get_tour('test')
index = 0
dic = {'last': 0, 'tour': frames}
self.tour.set_tour(index, dic, self)
self.tour.start_tour()
def action2(self):
""" """
self.anim.start()
def resizeEvent(self, event):
"""Reimplement Qt method"""
QMainWindow.resizeEvent(self, event)
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
QMainWindow.moveEvent(self, event)
self.sig_moved.emit(event)
def test():
""" """
app = QApplication([])
win = TourTestWindow()
win.show()
app.exec_()
if __name__ == '__main__':
test()
| [
"33146580+celkansimay@users.noreply.github.com"
] | 33146580+celkansimay@users.noreply.github.com |
b7d684c9b7991cd9e351025c9f0d7d12a0fae18f | ef0765f57796d397f67cb175707aa9eb56624500 | /train.py | 2154fdfff048e2a18e46c7968e53e6ea2b2d64f4 | [
"MIT"
] | permissive | X-CCS/Tacotron2-LPCNet | e3554a74c0c2f6fbd0ad4eb8bd2078998ecf4653 | de2ee5ef313ca109d8ccf4a13bf5adebd4ee42f6 | refs/heads/master | 2022-12-05T20:08:05.491707 | 2020-09-02T09:18:21 | 2020-09-02T09:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,686 | py | import argparse
import tensorflow as tf
from tacotron.train import tacotron_train
from wavenet_vocoder.train import wavenet_train
from tacotron.synthesize import tacotron_synthesize
from infolog import log
from hparams import hparams
import os
import infolog
from time import sleep
log = infolog.log
def save_seq(file, sequence, input_path):
'''Save Tacotron-2 training state to disk. (To skip for future runs)
'''
sequence = [str(int(s)) for s in sequence] + [input_path]
with open(file, 'w') as f:
f.write('|'.join(sequence))
def read_seq(file):
'''Load Tacotron-2 training state from disk. (To skip if not first run)
'''
if os.path.isfile(file):
with open(file, 'r') as f:
sequence = f.read().split('|')
return [bool(int(s)) for s in sequence[:-1]], sequence[-1]
else:
return [0, 0, 0], ''
def prepare_run(args):
modified_hp = hparams.parse(args.hparams)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name)
return log_dir, modified_hp
def train(args, log_dir, hparams):
state_file = os.path.join(log_dir, 'state_log')
#Get training states
(taco_state, GTA_state, wave_state), input_path = read_seq(state_file)
if not taco_state:
log('\n#############################################################\n')
log('Tacotron Train\n')
log('###########################################################\n')
checkpoint = tacotron_train(args, log_dir, hparams)
tf.reset_default_graph()
#Sleep 1 second to let previous graph close and avoid error messages while synthesis
sleep(1)
if checkpoint is None:
raise('Error occured while training Tacotron, Exiting!')
taco_state = 1
save_seq(state_file, [taco_state, GTA_state, wave_state], input_path)
if not GTA_state:
log('\n#############################################################\n')
log('Tacotron GTA Synthesis\n')
log('###########################################################\n')
input_path = tacotron_synthesize(args, hparams, checkpoint)
GTA_state = 1
save_seq(state_file, [taco_state, GTA_state, wave_state], input_path)
if input_path == '' or input_path is None:
raise RuntimeError('input_path has an unpleasant value -> {}'.format(input_path))
if not wave_state:
log('\n#############################################################\n')
log('Wavenet Train\n')
log('###########################################################\n')
checkpoint = wavenet_train(args, log_dir, hparams, input_path)
if checkpoint is None:
raise ('Error occured while training Wavenet, Exiting!')
wave_state = 1
save_seq(state_file, [taco_state, GTA_state, wave_state], input_path)
if wave_state and GTA_state and taco_state:
log('TRAINING IS ALREADY COMPLETE!!')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default='')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--tacotron_input', default='training_data/train.txt')
parser.add_argument('--wavenet_input', default='tacotron_output/gta/map.txt')
parser.add_argument('--name', help='Name of logging directory.')
parser.add_argument('--model', default='Tacotron-2')
parser.add_argument('--input_dir', default='training_data/', help='folder to contain inputs sentences/targets')
parser.add_argument('--output_dir', default='output/', help='folder to contain synthesized mel spectrograms')
parser.add_argument('--mode', default='synthesis', help='mode for synthesis of tacotron after training')
parser.add_argument('--GTA', default='True', help='Ground truth aligned synthesis, defaults to True, only considered in Tacotron synthesis mode')
parser.add_argument('--restore', type=bool, default=True, help='Set this to False to do a fresh training')
parser.add_argument('--summary_interval', type=int, default=250,
help='Steps between running summary ops')
parser.add_argument('--checkpoint_interval', type=int, default=1000,
help='Steps between writing checkpoints')
parser.add_argument('--eval_interval', type=int, default=1000,
help='Steps between eval on test data')
parser.add_argument('--tacotron_train_steps', type=int, default=500000, help='total number of tacotron training steps')
parser.add_argument('--wavenet_train_steps', type=int, default=360000, help='total number of wavenet training steps')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
args = parser.parse_args()
accepted_models = ['Tacotron', 'WaveNet', 'Both', 'Tacotron-2']
if args.model not in accepted_models:
raise ValueError('please enter a valid model to train: {}'.format(accepted_models))
log_dir, hparams = prepare_run(args)
if args.model == 'Tacotron':
tacotron_train(args, log_dir, hparams)
elif args.model == 'WaveNet':
wavenet_train(args, log_dir, hparams, args.wavenet_input)
elif args.model in ('Both', 'Tacotron-2'):
train(args, log_dir, hparams)
else:
raise ValueError('Model provided {} unknown! {}'.format(args.model, accepted_models))
if __name__ == '__main__':
main()
| [
"wangjiaqi12807@autohome.com.cn"
] | wangjiaqi12807@autohome.com.cn |
48b92115bf430ccfea52c8555cc132c5c87b9921 | 885a0893693db9298916b054cc74dd4aaa3a8e3c | /app.py | a418be0ec9a23201d1e49699fc6e8da53e06e24f | [
"Apache-2.0"
] | permissive | sshah98/NLP-Writing-Tool | 294fe79ea570d39ae4cb51e2267cc733f4ed0d9c | be85b7ad2fd0285d12c5a5348cc135819f7722fb | refs/heads/master | 2021-03-27T19:23:38.838339 | 2018-09-15T19:55:16 | 2018-09-15T19:55:16 | 111,587,608 | 0 | 0 | Apache-2.0 | 2018-09-25T21:45:24 | 2017-11-21T18:42:53 | Python | UTF-8 | Python | false | false | 1,831 | py | import os
from flask import Flask, url_for, render_template, request, redirect, session, Markup, flash
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "nlp-analysis-key.json"
app.secret_key = 'random-key'
from google_npl import GoogleNLP
from email_stats import EmailStats
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == "POST":
if not request.form['writing']:
return render_template('index.html')
else:
try:
user_text = request.form['writing']
word_count = EmailStats(user_text).word_count()
get_text_easiness = EmailStats(user_text).get_text_easiness()
sentence_count = EmailStats(user_text).sentence_count()
subjectivity = EmailStats(user_text).subjectivity()
complex_words = EmailStats(user_text).complex_words()
sentiment = GoogleNLP(user_text).sentiment_text()
sentiment_score, sentiment_mag = sentiment.split(' ')
print(word_count, get_text_easiness, sentence_count, subjectivity, complex_words, sentiment_score, sentiment_mag)
return render_template('index.html', results=[('Word Count', word_count), ('Sentence Count', sentence_count), ('Readability', get_text_easiness), ('Subjectivity', subjectivity), ('Complex Words', complex_words), ('Sentiment Score', sentiment_score), ('Sentiment Strength', sentiment_mag)])
except Exception as e:
flash('Error', e)
return render_template('index.html')
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
| [
"shahsuraj261@gmail.com"
] | shahsuraj261@gmail.com |
ecd88c4f72cff8b982179e05bb5a023c1dd7b9a8 | f49a5c06f858d8e9ca1d5485e3d68c722b2839df | /benchmarking_main.py | 8a6de5e17ac0c58e5b8cce6e3fa23282988ca14f | [
"CC-BY-4.0"
] | permissive | asd974512/breast-cancer-sub-types | ec8c9aa35f996aba0b6c095c8e853bc01e6315a0 | 8f6c9af9d66cdcf8260393ae737bfb54678b0032 | refs/heads/master | 2023-09-05T00:32:23.069810 | 2021-11-22T10:58:18 | 2021-11-22T10:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,711 | py |
############################# IMPORT LIBRARY #################################
import timeit
start_time = timeit.default_timer()
seed=75
import psutil
import os
import numpy as np
from tensorflow import set_random_seed
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import interp
from itertools import cycle
from xgboost import XGBClassifier
from collections import Counter
from sklearn.metrics import average_precision_score, precision_recall_curve, matthews_corrcoef, precision_score, recall_score, confusion_matrix, classification_report
from sklearn.metrics import f1_score, roc_auc_score, auc, cohen_kappa_score, precision_recall_curve, log_loss, roc_curve, classification_report, accuracy_score
from sklearn.model_selection import GridSearchCV, LeaveOneOut, cross_val_score, cross_val_predict, StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics.classification import accuracy_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn.ensemble import RandomTreesEmbedding, RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.neural_network import MLPClassifier, BernoulliRBM
from sklearn import model_selection
from sklearn.decomposition import PCA, KernelPCA, SparsePCA, MiniBatchSparsePCA, NMF, TruncatedSVD, FastICA, FactorAnalysis, LatentDirichletAllocation
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import make_classification
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.preprocessing import Normalizer, MinMaxScaler, StandardScaler, RobustScaler, LabelEncoder, label_binarize, QuantileTransformer
from sklearn.manifold import TSNE
from sklearn.feature_selection import SelectFromModel, SelectKBest, chi2, RFE, RFECV
from sklearn.pipeline import make_pipeline
from imblearn.over_sampling import RandomOverSampler, ADASYN, SMOTE
from imblearn.combine import SMOTEENN, SMOTETomek
from keras.initializers import RandomNormal
from keras.wrappers.scikit_learn import KerasClassifier
from keras.layers import Input, Dense
from keras.models import Model, load_model
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
from variational_autoencoder import *
from variational_autoencoder_multilayer import *
from aae_architechture import *
from deep_autoencoder import *
from denoising_autoencoder import *
from deep_denoising_autoencoder import *
from shallow_autoencoder import *
matplotlib.use('Agg')
np.random.seed(seed)
####################### LOAD BREAST CANCER DATASET #######################
file_1 = pd.read_csv('./data/subtype_molecular_rna_seq.csv')
data = file_1.iloc[0:20439,2:607].values
X=data.T
file_2 = pd.read_csv('./data/subtype_molecular_rna_seq_label.csv', low_memory=False)
label= file_2.iloc[0,2:607].values
y=label.T
print('Actual dataset shape {}'.format(Counter(y)))
############################ LOAD UCEC DATA ###########################
'''
file_1 = pd.read_csv('./data/ucec_rna_seq.csv')
data = file_1.iloc[0:20482,2:232].values
X=data.T
file_2 = pd.read_csv('./data/ucec_rna_seq_label.csv', low_memory=False)
label = file_2.iloc[0,2:232].values #First row then column from dataset
y=label.T
print('Actual dataset shape {}'.format(Counter(y)))
'''
count=0
aaecount=0
figure=0
classifiers = [
KNeighborsClassifier(3),
DecisionTreeClassifier(random_state=seed),
RandomForestClassifier(criterion='entropy', n_estimators=100, random_state=seed),
XGBClassifier(learning_rate=0.001,max_depth=4,n_estimators=100, nthread=1, subsample=0.65),
GradientBoostingClassifier(random_state=seed),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis(),
SVC(kernel='rbf', probability=True, random_state=seed),
LogisticRegression(C=0.1, multi_class= 'multinomial', solver='sag', random_state=seed),
MLPClassifier(hidden_layer_sizes=(500), random_state=seed, verbose=True, activation='tanh', solver='adam', alpha=0.0001, batch_size='auto'),
VotingClassifier(estimators=[('MLP', MLPClassifier(hidden_layer_sizes=(500), random_state=seed, verbose=True, activation='tanh', solver='adam', alpha=0.0001, batch_size='auto')),
('LDA', LinearDiscriminantAnalysis()),
('LR', LogisticRegression(C=0.1, multi_class= 'multinomial', solver='sag', random_state=seed))], voting='soft')
]
log_cols=["Classifier", "Accuracy", "F1-Score", "Recall", "Precision", "AUC", "MCC", "Kappa", "Log-Loss"]
log = pd.DataFrame(columns=log_cols)
def zero_mix(x, n):
temp = np.copy(x)
noise=n
if 'spilt' in noise:
frac = float(noise.split('-')[1])
for i in temp:
n = np.random.choice(len(i), int(round(frac * len(i))), replace=False)
i[n] = 0.0
return (temp)
def gaussian_mix(x):
n = np.random.normal(0, 0.1, (len(x), len(x[0])))
return (x + n)
# The above two functions are used to add noise in the data
# And used to train denoising autoencoder
skf = StratifiedKFold(n_splits=5, shuffle=False, random_state=seed)
skf.get_n_splits(X, y)
print(skf)
for train_index, test_index in skf.split(X, y):
#print("TRAIN:", train_index, "TEST:", test_index)
x_train, x_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
#print("TRAIN:", x_train, "TEST:", x_test)
print('Dataset shape for Train {}'.format(Counter(y_train)))
print('Dataset shape for Test {}'.format(Counter(y_test)))
################################# OVER SAMPLING ###############################
sm = SMOTE(sampling_strategy='auto', kind='borderline1', random_state=seed)
x_train, y_train = sm.fit_sample(x_train, y_train)
#oversample only traning data
'''
sm = SMOTEENN(ratio=1, random_state=seed)
x_train, y_train = sm.fit_sample(x_train, y_train)
'''
'''
sm = SMOTETomek(ratio=1, random_state=seed)
x_train, y_train = sm.fit_sample(x_train, y_train)
'''
'''
sm=RandomOverSampler(ratio=1, random_state=seed)
x_train, y_train = sm.fit_sample(x_train, y_train)
'''
'''
sm=ADASYN(ratio=1, random_state=seed, n_neighbors=5, n_jobs=1)
x_train, y_train = sm.fit_sample(x_train, y_train)
'''
print('Resampled dataset shape for Train {}'.format(Counter(y_train)))
print('Resampled dataset shape for Test {}'.format(Counter(y_test)))
n_samples, n_features = x_train.shape
############################# FEATURE SCALING/NORMALIZATION ##################
qt = QuantileTransformer(n_quantiles=10, random_state=seed)
qt.fit(x_train)
x_train=qt.transform(x_train)
x_test=qt.transform(x_test)
#use this when train denoising autoencoder
#use either gaussian mix or zero mix
#x_train_noisy=zero_mix(x_train, 'spilt-0.05')
#x_test_noisy=zero_mix(x_test, 'spilt-0.05')
#x_train_noisy=gaussian_mix(x_train)
#x_test_noisy=gaussian_mix(x_test)
'''
# Standart Scaling
sc = StandardScaler()
sc.fit(x_train)
x_train=sc.transform(x_train)
x_test=sc.transform(x_test)
'''
###############################DIMENSION REDUCTION ############################
pca = PCA(n_components=50, random_state=seed)
pca.fit(x_train, y_train)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
print ('After PCA', x_train.shape)
################ VARIOUS AUTOENCODERS ###############
aaecount= aaecount+1
aaenum=str(aaecount)
######### Shallow Autoencoder ############
'''
shallow_autoencoder_fit(x_train, x_test, encoding_dim=50, optimizer="adadelta",
loss_function="binary_crossentropy", nb_epoch=100,
batch_size=20, path='./feature_extraction/shallowAE/'+aaenum+'/')
#do not require fine tuning since this autoencoder does not have any hidden layer
shallow_autoencoder = load_model('./feature_extraction/shallowAE/'+aaenum+'/shallow_encoder'+'.h5')
x_train = shallow_autoencoder.predict(x_train)
print('X_Train Shape after ShallowAE :', x_train.shape)
x_test = shallow_autoencoder.predict(x_test)
print('X_Test Shape after ShallowAE :', x_train.shape)
'''
######### Denoising Autoencoder ############
#deep_denoising_autoencoder_fit
'''
deep_denoising_autoencoder_fit(x_train, x_test, x_train_noisy, x_test_noisy, encoding_dim=50, optimizer="adadelta",
loss_function="binary_crossentropy", nb_epoch=100,
batch_size=20, path='./feature_extraction/denoisingAE/'+aaenum+'/')
'''
'''
#do not require fine tuning since this autoencoder does not have any hidden layer
denoising_autoencoder = load_model('./feature_extraction/denoisingAE/'+aaenum+'/denoising_encoder'+'.h5')
x_train = denoising_autoencoder.predict(x_train)
print('X_Train Shape after ShallowAE :', x_train.shape)
x_test = denoising_autoencoder.predict(x_test)
print('X_Test Shape after ShallowAE :', x_train.shape)
'''
######### Deep Autoencoder ##########
'''
deep_autoencoder_fit(x_train, x_test, encoding_dim=50, optimizer="adadelta",
loss_function="binary_crossentropy", nb_epoch=100,
batch_size=20, path='./feature_extraction/deepAE/'+aaenum+'/')
####### don't need to use the following lines if autoencoder require fine tuning
deep_encoder = load_model('./feature_extraction/DeepAE/'+aaenum+'/deep_autoencoder'+'.h5')
x_train = deep_encoder.predict(x_train)
print('X_Train Shape after DeepAE :', x_train.shape)
x_test = deep_encoder.predict(x_test)
print('X_Test Shape after DeepAE :', x_test.shape)
'''
############## AAE ##############
'''
aae_model('./feature_extraction/AAE/'+aaenum+'/', AdversarialOptimizerSimultaneous(),
xtrain=x_train,ytrain=y_train, xtest=x_test, ytest=y_train, encoded_dim=50,img_dim=x_train.shape[1], nb_epoch=100)
'''
'''
####### don't need to use the following lines if autoencoder require fine tuning
model = load_model('./feature_extraction/AAE/'+aaenum+'/aae_encoder'+'.h5')
x_train = model.predict(x_train)
print('X_Train Shape after AAE :', x_train.shape)
x_test = model.predict(x_test)
print('X_Test Shape after AAE :', x_test.shape)
'''
################ Variational Autoencoder ####################
'''
vae_model('./feature_extraction/VAE/'+aaenum+'/',x_train.shape[1],
x_train,x_test,intermediate_dim=1000,batch_size=20,latent_dim=50,epochs=100)
'''
'''
####### don't need to use the following lines if autoencoder require fine tuning
model = load_model('./feature_extraction/VAE/'+aaenum+'/vae_encoder'+'.h5')
x_train = model.predict(x_train)
print('X_Train Shape after VAE :', x_train.shape)
x_test = model.predict(x_test)
print('X_Test Shape after VAE :', x_test.shape)
'''
#############################################################################
##################### FINE TUNING AUTOENCODER #########################
#############################################################################
'''
y_train_binarize = label_binarize(y_train, classes=[0,1,2,3])
y_test_binarize = label_binarize(y_test, classes=[0,1,2,3])
model = load_model('./feature_extraction/deepAE/'+aaenum+'/deep_encoder'+'.h5')
model.summary()
#pull z_mean for variational autoencoder
transfer_layer=model.get_layer('encoder_mu')
aae_prev_model=Model(inputs=model.input,outputs=transfer_layer.output)
new_model=Sequential()
new_model.add(aae_prev_model)
new_model.add(Dropout(p = 0.001))
new_model.add(Dense(units = 4, activation = 'softmax', name='new_layer_added'))
def print_layer_trainable():
for layer in aae_prev_model.layers:
print("{0}:\t{1}".format(layer.trainable,layer.name))
for layer in aae_prev_model.layers:
layer.trainable=False
print_layer_trainable()
# aae_prev_model.trainable=True
# it does not work as expected
### Now It is time to Modify the Layer ###
for layer in aae_prev_model.layers:
#trainable=('encoder_mu' in layer.name)
trainable=('encoder_mu' in layer.name)
#trainable=('encoder_h2' in layer.name or 'encoder_mu' in layer.name)
layer.trainable=trainable
print_layer_trainable()
#Here we choose adadelta as optimizer
new_model.compile(optimizer = 'adadelta', loss = 'categorical_crossentropy', metrics = ['accuracy'])
history=new_model.fit(x_train, y_train_binarize, batch_size = 20, epochs = 50) # for ANN keras
score = new_model.evaluate(x_test, y_test_binarize, verbose=1, batch_size=20)
print("Test Accuracy: \n%s: %.2f%%" % (new_model.metrics_names[1], score[1]*100))
path='./feature_extraction/deepAE/fine_tuned/'+aaenum+'/'
df = pd.DataFrame(history.history)
df.to_csv(os.path.join(path, 'fine_tuned_history.csv'))
new_model.summary()
new_model.layers.pop()
#Remove the last layer
new_model.summary()
new_model.layers.pop()
#Remove the dropout layer
new_model.summary()
new_model.save(os.path.join(path,'encoder_fine_tuned.h5'))
model = load_model('./feature_extraction/deepAE/fine_tuned/'+aaenum+'/encoder_fine_tuned'+'.h5')
model.summary()
x_train = model.predict(x_train)
print('X_Train Shape after AAE :', x_train.shape)
x_test = model.predict(x_test)
print('X_Test Shape after AAE :', x_test.shape)
'''
######################## CLASSIFICATION ##########################
for clf in classifiers:
clf.fit(x_train, y_train)
name = clf.__class__.__name__
print("="*30)
print(name)
print('****Results****')
y_pred = clf.predict(x_test)
y_pred_proba = clf.predict_proba(x_test)
y_test_binarize = label_binarize(y_test, classes=[0,1,2,3])
acc = accuracy_score(y_test, y_pred)
print("Accuracy: {:.4%}".format(acc))
f1=f1_score(y_test, y_pred,average='weighted')
print("F1 Score Weighted: {:.4%}".format(f1))
rs=recall_score(y_test, y_pred, average='weighted')
print("Recall Score Weighted: {:.4%}".format(rs))
ps=precision_score(y_test, y_pred, average='weighted')
print("Precision Weighted: {:.4%}".format(ps))
auc=roc_auc_score(y_test_binarize,y_pred_proba, average='macro')
print("AUC Score: {:.4%}".format(auc))
mcc=matthews_corrcoef(y_test, y_pred)
print("MCC Score: {:.4%}".format(mcc))
kappa=cohen_kappa_score(y_test, y_pred, labels=None, weights=None, sample_weight=None)
print("Kappa: {:.4%}".format(kappa))
ll = log_loss(y_test, y_pred_proba)
print("Log Loss: {:.4%}".format(ll))
log_entry = pd.DataFrame([[name, acc, f1, rs, ps, auc, mcc, kappa, ll]], columns=log_cols)
log = log.append(log_entry)
print("="*30)
print (log)
################################################################################
print('###########################################')
result_temp = pd.DataFrame(log)
result_final=result_temp.groupby('Classifier').mean()
result_final.to_csv("./results/result.tsv", sep='\t')
print (result_final)
print('###########################################')
print('Result Saved Successfully')
end_time = timeit.default_timer()
###### PRINT TIME ########
print('###############')
print('Wall Clock Time')
print ((end_time - start_time), 'Sec')
time=(end_time - start_time)
minutes = time // 60
time %= 60
seconds = time
print(minutes, 'Minutes', seconds,'Seconds')
######## CPU USAGE #######
print('###############')
print('CPU Usage')
print(psutil.cpu_percent(), '%')
print('THE END')
| [
"noreply@github.com"
] | noreply@github.com |
e56d8abf68eeabd78679feae85ab12666d37e27e | 3facdefca75155161d8a1a1c7ddfaf10f3f2c6fe | /venv/Lib/site-packages/eikon/streaming_session/streamingprices.py | f143ee7e2d99a21a6897b7324556870478b6e5fa | [
"Apache-2.0"
] | permissive | suppureme/FisherEmbeddingFinal | b0b171c4757e456046224dcdcc3418889dcaccfc | 0d07f09931658c838988c987cd6d8db5376ff715 | refs/heads/master | 2023-07-06T19:47:26.755177 | 2021-08-10T06:04:47 | 2021-08-10T06:04:47 | 394,538,875 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,146 | py | # coding: utf8
__all__ = ["StreamingPrices"]
import sys
import logging
import asyncio
from pandas import DataFrame
from pandas import to_numeric
from .streamingprice import StreamingPrice
from .stream import StreamState
class StreamingPrices:
"""
Open a streaming price subscription.
Parameters
----------
instruments: list[string]
List of RICs to subscribe.
service: string
Specified the service to subscribe on.
Default: None
fields: string or list[string]
Specified the fields to retrieve.
Default: None
on_refresh: callable object (streaming_prices, instrument_name, message)
Called when a stream on instrument_name was opened successfully or when the stream is refreshed by the server.
This callback is called with the reference to the streaming_prices object, the instrument name and the instrument full image.
Default: None
on_update: callable object (streaming_prices, instrument_name, message)
Called when an update is received for a instrument_name.
This callback is called with the reference to the streaming_prices object, the instrument name and the instrument update.
Default: None
on_status: callable object (streaming_prices, instrument_name, status)
Called when a status is received for a instrument_name.
This callback is called with the reference to the streaming_prices object, the instrument name and the instrument status.
Default: None
on_complete: callable object (streaming_prices, instrument_name)
Called when all subscriptions are completed.
This callback is called with the reference to the streaming_prices object.
Default: None
Raises
------
Exception
If request fails.
Examples
--------
>> import eikon as ek
>> fx = ek.StreamingPrices(['EUR=', 'GBP='])
>> fx.open()
>> bid_eur = fx['EUR']['BID']
>> ask_eur = fx['EUR']['ASK']
>>
>> def on_update(streams, instrument, msg):
... print(msg)
>> subscription = ek.StreamingPrices(['VOD.L', 'EUR=', 'PEUP.PA', 'IBM.N'],
... ['DSPLY_NAME', 'BID', 'ASK'],
... on_update=on_update)
>> subscription.open()
{"EUR=":{"DSPLY_NAME":"RBS LON","BID":1.1221,"ASK":1.1224}}
{"PEUP.PA":{"DSPLY_NAME":"PEUGEOT","BID":15.145,"ASK":15.155}}
{"IBM.N":{"DSPLY_NAME":"INTL BUS MACHINE","BID":"","ASK":""}}
...
"""
class Params(object):
def __init__(self, instruments, fields):
self._universe = instruments
self._fields = fields
@property
def instruments(self):
return self._universe
@property
def fields(self):
return self._fields
class StreamingPricesIterator:
""" StreamingPrices Iterator class """
def __init__(self, streaming_prices):
self._streaming_prices = streaming_prices
self._index = 0
def __next__(self):
"""" Return the next streaming item from streaming price list """
if self._index < len(self._streaming_prices.params.instruments):
result = self._streaming_prices[self._streaming_prices.params.instruments[self._index]]
self._index += 1
return result
raise StopIteration()
def __init__(self,
instruments,
session=None,
fields=[],
service=None,
on_refresh=None,
on_status=None,
on_update=None,
on_complete=None):
from eikon.Profile import get_desktop_session
if session is None:
self._session = get_desktop_session()
else:
self._session = session
if isinstance(instruments, str):
instruments = [instruments]
elif isinstance(instruments, list) and all(isinstance(item, str) for item in instruments):
pass
else:
raise EikonError(-1, "StreamingPrices: instruments must be a list of strings")
self._fields = fields
self.params = StreamingPrices.Params(instruments=instruments, fields=fields)
self._service = service
self._streaming_prices = {}
for name in instruments:
self._streaming_prices[name] = StreamingPrice(session=self._session,
name=name,
fields=self._fields,
service=self._service,
on_refresh=self._on_refresh,
on_update=self._on_update,
on_status=self._on_status,
on_complete=self._on_complete
)
self._on_refresh_cb = on_refresh
self._on_status_cb = on_status
self._on_update_cb = on_update
self._on_complete_cb = on_complete
self._state = StreamState.Closed
# set universe of on_complete
self._on_complete_set = None
@property
def state(self):
return self._state
###################################################
# Access to StreamingPrices as a dict #
###################################################
def keys(self):
if self._streaming_prices:
return self._streaming_prices.keys()
return {}.keys()
def values(self):
if self._streaming_prices:
return self._streaming_prices.values()
return {}.values()
def items(self):
if self._streaming_prices:
return self._streaming_prices.items()
return {}.items()
###################################################
# Make StreamingPrices iterable #
###################################################
def __iter__(self):
return StreamingPrices.StreamingPricesIterator(self)
def __getitem__(self, item):
if item in self.params.instruments:
return self._streaming_prices[item]
else:
raise KeyError(f"{item} not in StreamingPrices universe")
def __len__(self):
return len(self.params.instruments)
###################################################
# methods to open synchronously item stream #
###################################################
def open(self, with_updates=True):
"""
Open synchronously the streaming price
"""
return self._session._loop.run_until_complete(self.open_async(with_updates=with_updates))
################################################
# methods to open asynchronously item stream #
################################################
async def open_async(self, with_updates=True):
"""
Open asynchronously the streaming price
"""
self._session.log(1, f'StreamingPrices : open streaming on {self.params.instruments}')
if self._state == StreamState.Open:
return
self._state = StreamState.Pending
self._on_complete_set = set()
task_list = [stream.open_async(with_updates=with_updates) for stream in self._streaming_prices.values()]
await asyncio.wait(task_list, return_when=asyncio.ALL_COMPLETED)
self._state = StreamState.Open
self._session.log(1, f'StreamingPrices : start asynchrously streaming on {self.params.instruments} done')
return self._state
def close(self):
if self._state is not StreamState.Closed:
self._session.log(1, f'StreamingPrices : close streaming on {self.params.instruments}')
for stream in self._streaming_prices.values():
stream.close()
self._state = StreamState.Closed
return self._state
def get_snapshot(self, instruments=None, fields=None, convert=True):
"""
Returns a Dataframe filled with snapshot values for a list of instrument names and a list of fields.
Parameters
----------
instruments: list of strings
List of instruments to request snapshot data on.
fields: list of strings
List of fields to request.
convert: boolean
If True, force numeric conversion for all values.
Returns
-------
pandas.DataFrame
pandas.DataFrame content:
- columns : instrument and fieled names
- rows : instrument name and field values
Raises
------
Exception
If request fails or if server returns an error
ValueError
If a parameter type or value is wrong
Examples
--------
>>> import eikon as ek
>>> ek.set_app_key('set your app key here')
>>> streaming_prices = ek.StreamingPrices(instruments=["MSFT.O", "GOOG.O", "IBM.N"], fields=["BID", "ASK", "OPEN_PRC"])
>>> data = streaming_prices.get_snapshot(["MSFT.O", "GOOG.O"], ["BID", "ASK"])
>>> data
Instrument BID ASK
0 MSFT.O 150.9000 150.9500
1 GOOG.O 1323.9000 1327.7900
2 IBM.N NaN NaN
"""
from eikon.eikonError import EikonError
if instruments:
for name in instruments:
if name not in self.params.instruments:
raise ElektronError(-1, f'Instrument {name} was not requested : {self.params.instruments}')
if fields:
for field in fields:
if field not in self.params.fields:
raise EikonError(-1, f'Field {field} was not requested : {self.params.fields}')
_universe = instruments if instruments else self.params.instruments
_all_fields_value = {name: self._streaming_prices[name].get_fields(fields)
if name in self._streaming_prices else None
for name in _universe}
_fields = []
if not fields:
fields = []
for field_values in _all_fields_value.values():
if field_values:
_fields.extend(field for field in field_values.keys() if field not in _fields)
else:
_fields = fields
_df_source = {f: [_all_fields_value[name][f] if _all_fields_value[name].get(f) else None
for name in _universe] for f in _fields}
_price_dataframe = DataFrame(_df_source, columns=_fields)
if convert:
_price_dataframe = _price_dataframe.apply(to_numeric, errors='ignore')
_price_dataframe.insert(0, 'Instrument', _universe)
if convert and _df_source:
_price_dataframe = _price_dataframe.convert_dtypes()
return _price_dataframe
#########################################
# Messages from stream_cache connection #
#########################################
def _on_refresh(self, stream, message):
if self._on_refresh_cb:
try:
self._session.log(1, 'StreamingPrices : call on_refresh callback')
self._state = StreamState.Open
self._session._loop.call_soon_threadsafe(self._on_refresh_cb, self, stream.name, message)
# self._on_refresh_cb(self, name, message)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_refresh callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}')
def _on_status(self, stream, status):
if self._on_status_cb:
try:
self._session.log(1, 'StreamingPrices : call on_status callback')
self._session._loop.call_soon_threadsafe(self._on_status_cb, self, stream.name, status)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_status callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}')
# check for closed stream when status "Closed", "ClosedRecover", "NonStreaming" or "Redirect"
if stream.state == StreamState.Closed and stream.name not in self._on_complete_set:
# this stream has been closed, so it means completed also
self._on_complete(stream)
def _on_update(self, stream, update):
if self._on_update_cb:
try:
self._session.log(1, 'StreamingPrices : call on_update callback')
self._session._loop.call_soon_threadsafe(self._on_update_cb, self, stream.name, update)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_update callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}')
def _on_complete(self, stream):
assert self._on_complete_set is not None
# check for update completed set
if stream.name not in self._on_complete_set:
# update the stream to be in complete list
self._on_complete_set.update([stream.name, ])
# check for complete for all subscribe universe
if self._on_complete_set == set(self.params.instruments):
if self._on_complete_cb:
try:
self._session.log(1, 'StreamingPrices : call on_complete callback')
self._session._loop.call_soon_threadsafe(self._on_complete_cb, self)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_complete callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}') | [
"asahu99@gmail.com"
] | asahu99@gmail.com |
e4bcdf2e5a6ee879997a68875791a84f8e83bf15 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part009770.py | 6b7721e39926572acd750c7dcc8d9bfd53756e66 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher20347(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i3.1.2.2.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher20347._instance is None:
CommutativeMatcher20347._instance = CommutativeMatcher20347()
return CommutativeMatcher20347._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 20346
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 20348
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i3.1.2.2.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 20349
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher20351.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 20352
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy.utils import VariableWithCount
from collections import deque
from .generated_part009771 import *
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
2904e483645aab3aad4727f04b8cb19ab9e1ab65 | f7a1da15ba4941b5c7f13603455bf7e3c25b568e | /ggplot/tests/test_legend.py | a72d8475c032db2cb9c839b2d976b70db432c191 | [
"BSD-2-Clause"
] | permissive | ellisonbg/ggplot | 64b93f172ed729366cda12a1878733d3fc899cb9 | d9028b89c8ae81845b4653deccef897f7ecc8cb8 | refs/heads/master | 2020-05-29T11:57:16.338751 | 2014-05-02T18:14:37 | 2014-05-02T18:14:37 | 19,389,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup, assert_same_elements
assert_same_ggplot = get_assert_same_ggplot(__file__)
from nose.tools import (assert_true, assert_raises, assert_is,
assert_is_not, assert_equal)
from ggplot import *
import six
import pandas as pd
from ggplot.components import assign_visual_mapping
def test_legend_structure():
df = pd.DataFrame({
'xmin': [1, 3, 5],
'xmax': [2, 3.5, 7],
'ymin': [1, 4, 6],
'ymax': [5, 5, 9],
'fill': ['blue', 'red', 'green'],
'quality': ['good', 'bad', 'ugly'],
'alpha': [0.1, 0.5, 0.9],
'texture': ['hard', 'soft', 'medium']})
gg = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df, gg.aesthetics, gg)
# All mapped aesthetics must have an entry in the legend
for aesthetic in ('color', 'fill', 'alpha', 'linetype'):
assert(aesthetic in legend)
# None of the unassigned aesthetic should have an entry in the legend
assert('size' not in legend)
assert('shape' not in legend)
# legend entries should remember the column names
# to which they were mapped
assert(legend['fill']['column_name'] == 'fill')
assert(legend['color']['column_name'] == 'quality')
assert(legend['linetype']['column_name'] == 'texture')
assert(legend['alpha']['column_name'] == 'alpha')
# Discrete columns for non-numeric data
assert(legend['fill']['scale_type'] == 'discrete')
assert(legend['color']['scale_type'] == 'discrete')
assert(legend['linetype']['scale_type'] == 'discrete')
assert(legend['alpha']['scale_type'] == 'continuous')
# Alternate
df2 = pd.DataFrame.copy(df)
df2['fill'] = [90, 3.2, 8.1]
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df2, gg.aesthetics, gg)
assert(legend['fill']['scale_type'] == 'continuous')
| [
"has2k1@gmail.com"
] | has2k1@gmail.com |
acbceabe2af58b797b1e56d056e10142feda7758 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/googlecron/__init__.py | c16c11b78e2b1864918de3481da96215d94c1f8f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 640 | py | # Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"this file is needed to make this a package"
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
dc0e963aa23abe50e37b51a150717f3e95b98ee4 | e627d47d5102bd68c2012501aa120833b9271da7 | /aws_api/core/admin.py | deadee44fdc7c2eff24954c469f2c470d31764f1 | [] | no_license | aayushgupta97/django-km | 5ba275d1f85eaaf8bc052e47d2b6b6f1a5e4cf90 | d34cd4f8637718044832d9baeecee86df5e821a5 | refs/heads/master | 2023-01-02T18:12:31.384634 | 2020-10-24T09:21:50 | 2020-10-24T09:21:50 | 298,391,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from django.contrib import admin
from .models import AWSCredentials
# Register your models here.
admin.site.register(AWSCredentials) | [
"aayushgupta2097@gmail.com"
] | aayushgupta2097@gmail.com |
3efc22db73e35b507bc67c250be3584146d5c56e | 9d041cdca12fa685261bbd3d3efc1cb77ee3b701 | /DemConverter/dem_converter.py | 1e2e7a7bfa5cdf85eac28241df83672d04a96125 | [] | no_license | sagost/DemConverter | 45c3c5f3ea7ea9abe558c1d90cd2ba9088e7da6e | 99b193d669d45620e524d5eb3f33b19a3f1bb33d | refs/heads/master | 2021-05-14T12:56:46.303643 | 2018-01-05T20:20:19 | 2018-01-05T20:20:19 | 116,423,295 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,883 | py | # -*- coding: utf-8 -*-
"""
/***************************************************************************
DemConverter
A QGIS plugin
Convert DEM file
-------------------
begin : 2017-01-23
git sha : $Format:%H$
copyright : (C) 2017 by Salvatore Agosta
email : sagost@katamail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt
from PyQt4.QtGui import QAction, QIcon
# Initialize Qt resources from file resources.py
import resources
# Import the code for the DockWidget
from dem_converter_dockwidget import DemConverterDockWidget
import os.path
class DemConverter:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'DemConverter_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Dem Converter')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'DemConverter')
self.toolbar.setObjectName(u'DemConverter')
#print "** INITIALIZING DemConverter"
self.pluginIsActive = False
self.dockwidget = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('DemConverter', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToRasterMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/DemConverter/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Convert DEM files'),
callback=self.run,
parent=self.iface.mainWindow())
#--------------------------------------------------------------------------
def onClosePlugin(self):
"""Cleanup necessary items here when plugin dockwidget is closed"""
#print "** CLOSING DemConverter"
# disconnects
self.dockwidget.closingPlugin.disconnect(self.onClosePlugin)
# remove this statement if dockwidget is to remain
# for reuse if plugin is reopened
# Commented next statement since it causes QGIS crashe
# when closing the docked window:
# self.dockwidget = None
self.pluginIsActive = False
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
#print "** UNLOAD DemConverter"
for action in self.actions:
self.iface.removePluginRasterMenu(
self.tr(u'&Dem Converter'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
#--------------------------------------------------------------------------
def run(self):
"""Run method that loads and starts the plugin"""
if not self.pluginIsActive:
self.pluginIsActive = True
#print "** STARTING DemConverter"
# dockwidget may not exist if:
# first run of plugin
# removed on close (see self.onClosePlugin method)
if self.dockwidget == None:
# Create the dockwidget (after translation) and keep reference
self.dockwidget = DemConverterDockWidget(self.iface)
# connect to provide cleanup on closing of dockwidget
self.dockwidget.closingPlugin.connect(self.onClosePlugin)
# show the dockwidget
# TODO: fix to allow choice of dock location
self.iface.addDockWidget(Qt.LeftDockWidgetArea, self.dockwidget)
self.dockwidget.show()
| [
"noreply@github.com"
] | noreply@github.com |
3bfe9478a45b3ce6a71d86c7bffb8ca57afba743 | ec1fede858622eb8f18606e562324bf40aab8768 | /fibo.py | d7f55991d92d311b99c9fa5e18da436f365b1201 | [] | no_license | IgorBambach/ac4 | 4c501e21c08103ca87eea53987672aeed5b2cb87 | ecc9a3029ba9ed561761b993303bba247fa10004 | refs/heads/main | 2023-01-03T08:08:04.878007 | 2020-10-23T00:31:59 | 2020-10-23T00:31:59 | 306,482,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | importar os
from flask import Flask , jsonify , request
de matemática import sqrt
app = Flask ( __name__ )
@ app . rota ( '/' )
def nao_entre_em_panico ():
proximo = 1
anterior = 0
limite = 98
encontrado = 0
resposta = "1, \ n "
while ( encontrado < limite ):
tmp = proximo
proximo = proximo + anterior
anterior = tmp
encontrado = encontrado + 1
resposta + = str ( proximo ) + ", \ n "
resposta de retorno
if _name_ == "__main__" :
porta = int ( os . amb . get ( "PORT" , 5000 ))
app . executar ( host = '0.0.0.0' , porta = porta )
| [
"noreply@github.com"
] | noreply@github.com |
e2b9395ca306349905fc72c9f94e4caaa1cbe988 | 5d7684b486a23cb8177dbaa29b7540dc4c63a8f8 | /src/ds_project/urls.py | 242dd938317e4015238deefac20a32ded666f974 | [] | no_license | VitalR/DataAnalysis-App | 8cce543a1981e6dfb3b5204e01700a1349466252 | 32785b3d2d0e5e8042b4d34ca9c72c32c77b0816 | refs/heads/master | 2023-01-03T15:40:42.240473 | 2020-10-31T20:22:18 | 2020-10-31T20:22:18 | 304,565,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | """ds_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from .views import home_view, login_view, register_view
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', home_view, name='home'),
path('login/', login_view, name='login'),
path('register/', register_view, name='register'),
path('performance/', include('products.urls', namespace='products')),
path('upload/', include('csvs.urls', namespace='csvs')),
path('customers/', include('customers.urls', namespace='customers')),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"vitalii.rodikov@gmail.com"
] | vitalii.rodikov@gmail.com |
6b3660695db6f5815ec10bfa006908c75ed584b3 | ae4316d99cbd4d5f8fe26b5e905c2a14fd3a3575 | /lexer.py | ecfd89bed7fea867173f0d57020ca001548ea430 | [] | no_license | tanmaytirpankar/ForLoopParser | 229fbca10780e84d46faaed97c17d43e1ec1b2de | 20a365af04382a2e3de518d9a284e8b4fc284d24 | refs/heads/main | 2022-12-30T14:14:44.310651 | 2020-10-21T20:29:39 | 2020-10-21T20:29:39 | 305,006,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,938 | py | from sly import Lexer
from gtokens import *
class Flexer(Lexer):
# The tokens arranged on each line are type of input, arithmetic operators, logical operators, miscellaneous symbols,
# Grammar specific keywords
tokens = {INTEGER, DECIMAL,
PLUS, MINUS, DIV, MUL, MOD, INC, DEC, \
LT, GT, LET, GET, EQ, NEQ, AND, OR,
LPAREN, RPAREN, CLPAREN, CRPAREN, SLPAREN, SRPAREN, SEMICOLON, COMMA, ASSIGN, \
ID, FOR, BEGIN, END, DO, ENDFOR}
# String characters to be ignored
ignore = ' \t'
ignore_comment = r'\#.*'
# Regular expression rules for tokens
ID = r'[a-zA-Z_][a-zA-Z0-9_]*'
DECIMAL = r'\d+.\d+'
INTEGER = r'\d+'
DIV = r'/'
MUL = r'\*'
INC = r'\+\+'
DEC = r'--'
PLUS = r'\+'
MINUS = r'-'
MOD = r'\%'
EQ = r'=='
ASSIGN = r'='
LT = r'<'
GT = r'>'
LET = r'<='
GET = r'>='
NEQ = r'\!='
AND = r'&&'
OR = r'\|\|'
LPAREN = r'\('
RPAREN = r'\)'
CLPAREN = r'\{'
CRPAREN = r'\}'
SLPAREN = r'\['
SRPAREN = r'\]'
SEMICOLON = r';'
COMMA = r','
ID['for'] = FOR
ID['begin'] = BEGIN
ID['end'] = END
ID['do'] = DO
ID['endfor'] = ENDFOR
pos = 0
token_list = []
current_token = None
tok = None
# Define a rule so we can track line numbers
@_(r'\n+')
def ignore_newline(self, t):
self.lineno += len(t.value)
# Compute column.
# input is the input text string
# token is a token instance
def find_column(self, text, token):
last_cr = text.rfind('\n', 0, token.index)
if last_cr < 0:
last_cr = 0
column = (token.index - last_cr) + 1
return column
def error(self, t):
print('Line %d: Bad character %r' % (self.lineno, t.value[0]))
def create_token_generator(self, text):
self.tok = self.tokenize(text)
def get_current_token(self):
return self.current_token
def get_next_token(self):
try:
return self.tok.__next__()
except StopIteration:
return None
@_(r'\d+')
def INTEGER(self, tok):
tok.value = int(tok.value)
return tok
@_(r'\d+.\d+')
def DECIMAL(self, tok):
tok.value = float(tok.value)
return tok
def show_token(self, tok):
print('type=%r, value=%r' % (tok.type, tok.value))
if __name__ == '__main__':
import sys
text=open(sys.argv[1], 'r').read()
lexer = Flexer()
#for tok in lexer.tokenize(text):
# print('type=%r, value=%r' % (tok.type, tok.value))
"""
program : BEGIN <stmts> END
stmts : <stmt> <stmts>*
stmt : <assign_expr>
| <forblock>
| empty
forblock: FOR LPAREN <loop_assign> (COMMA <loop_assign>)* SEMICOLON <cond_expr> SEMICOLON (<op_expr> | <loop_assign>) (COMMA (<op_expr> | <loop_assign>))* RPAREN DO <stmts> ENDFOR
loop_assign: ID ASSIGN <loop_expr>
loop_expr : <loop_term> ((PLUS | MINUS) <loop_term>)*
loop_term : <loop_factor> ((MUL | DIV | MOD) <loop_factor>)*
loop_factor : MINUS <loop_factor>
| PLUS <loop_factor>
| LPAREN <loop_expr> RPAREN
| INTEGER
| ID
cond_expr : <cond_term> ( (AND | OR) <cond_term>)*
cond_term : <sym_expr> (LT | GT | LET | GET | EQ | NEQ) <sym_expr>
sym_expr : <sym_term> ((PLUS | MINUS) <sym_term>)*
sym_term : <sym_factor> ((MUL | DIV | MOD) <sym_factor>)*
sym_factor : MINUS <sym_factor>
| PLUS <sym_factor>
| LPAREN <sym_expr> RPAREN
| INTEGER
| ID
assign_expr : ID ASSIGN <expr> SEMICOLON
op_expr : ID (INC | DEC)
expr : <term> ((PLUS | MINUS) <term>)*
term : <factor> ((MUL | DIV | MOD) <factor>)*
factor : MINUS <factor>
| PLUS <factor>
| LPAREN <expr> RPAREN
| INTEGER
| ID
"""
| [
"tirpankartanmay@gmail.com"
] | tirpankartanmay@gmail.com |
335927b71241544311639bc055c64afdadfc1656 | 5ea7394e606d21a5a5bf84e657b33c580c6d0ec7 | /tablas/admin.py | 2a46cf4520366548238bba410a5e0f445f4c50c5 | [] | no_license | sangenispablo/erp.colegio | 0738e5e33bafecd6548e598147716e824954a14b | 31544e9016e0c8396d1fb63ff9cfc3b6ab6e9bf8 | refs/heads/master | 2023-04-21T19:40:19.538209 | 2021-05-07T17:06:22 | 2021-05-07T17:06:22 | 361,764,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.contrib import admin
from .models import Nivel, Turno, Aula
admin.site.register(Nivel)
admin.site.register(Turno)
admin.site.register(Aula)
| [
"sangenispablo@gmail.com"
] | sangenispablo@gmail.com |
12e50b360422f0d643d536e64911172c29b93a33 | 35fa34a9bfc8e8d87d033ac556a0a08a7a55ce16 | /crawl_frame/models/ocean_models.py | f178bb1e2dbe89585c93e5ce669fbeea1bda692b | [] | no_license | yinglingxianghen/spiders_business | 2b923bf502ec1bd8706951c1dea354345ee96c45 | 7e8e6ec911d30a66dc26d825940c78e43b39de14 | refs/heads/master | 2022-07-11T14:01:13.052718 | 2020-05-11T03:49:18 | 2020-05-11T03:49:18 | 262,930,639 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
engine = create_engine("mysql+pymysql://root:111111@127.0.0.1:3306/oceans",encoding="utf-8", echo=True, max_overflow=5)
# engine = create_engine("mysql+pymysql://root:111111@127.0.0.1:3306/oceans",encoding="utf-8", echo=True, max_overflow=5)
# 连接mysql数据库,echo为是否打印结果
Base = declarative_base() # 生成orm基类
class Ocean_Test_Adgs(Base):
__tablename__ = "ocean_test_adgs"
id = Column(Integer, primary_key=True,autoincrement=True)
budget = Column(String(64),comment="组预算")
show_cnt = Column(String(64),comment="展示")
click_cnt = Column(String(64),comment="点击数")
stat_cost = Column(String(64),comment="花费")
convert_cnt = Column(String(64),comment="转化数")
cpc_platform = Column(String(64),comment="cpc")
cpm_platform = Column(String(64),comment="cpm")
conversion_cost = Column(String(64),comment="转化成本")
conversion_rate = Column(String(64),comment="转化率")
click_start_rate = Column(String(64),comment="点击率")
campaign_name = Column(String(64),comment="广告组名称")
campaign_status = Column(Integer(),comment="广告组状态")
landing_type_name = Column(String(64),comment="推广目的")
# Base.metadata.create_all(engine)
# DBSession = sessionmaker(bind=engine)
# session = DBSession()
# newuser0=Admins111111(username="a",password="aaa")
# session.add(newuser0)
# session.commit()
# print("chengg")
# 父类Base调用所有继承他的子类来创建表结构
# if __name__ == '__main__':
# 创建表结构 | [
"1026719936@qq.com"
] | 1026719936@qq.com |
658637053dd75b704a6f500e279e42fd4a536cf6 | 2231cbaf3b3a4a67b8f25bc7f4150f86b28981d4 | /test/hw4/arithmetic/test7/test7.py | 47d2e11edfbe5ddb450631a472d8a8c4d07fca7c | [] | no_license | TLeben/compilerContruction | 2bd4c17a7682f0a75fb162864856179cf4d1a917 | 90d5ff9b115b066dab7ed98dc7772a8b6f6cab98 | refs/heads/master | 2016-08-04T20:36:03.416893 | 2015-04-09T04:36:39 | 2015-04-09T04:36:39 | 29,304,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | #!/usr/bin/env python
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
g = 7
h = 8
i = 9
j = 10
print a + b + c + d + e + f + g + h + i + j
| [
"schweikm@gmail.com"
] | schweikm@gmail.com |
de82ab414d200f43559d8f8c2c19e5dce45ff2a1 | b3241816119b6440b50e44a08440bcd22ebc6ad6 | /tests/test_gnn.py | 9a657d2a31870b361c7de9d4f876246c3bcf1e22 | [
"Apache-2.0"
] | permissive | maniospas/pygrank | 8cf7a8c6ca3dc4c1fecdba3994e6d59121d5faba | df1489763c55ecab764671f0d80635d989aa1139 | refs/heads/master | 2023-03-05T14:15:04.763234 | 2023-02-26T21:41:50 | 2023-02-26T21:41:50 | 210,811,921 | 1 | 0 | Apache-2.0 | 2019-09-25T09:54:51 | 2019-09-25T09:54:50 | null | UTF-8 | Python | false | false | 5,679 | py | import pygrank as pg
import tensorflow as tf
import torch
import pytest
def test_gnn_errors():
graph, features, labels = pg.load_feature_dataset('synthfeats')
training, test = pg.split(list(range(len(graph))), 0.8)
training, validation = pg.split(training, 1 - 0.2 / 0.8)
from tensorflow.keras.layers import Dropout, Dense
from tensorflow.keras.regularizers import L2
class APPNP(tf.keras.Sequential):
def __init__(self, num_inputs, num_outputs, hidden=64):
super().__init__([
Dropout(0.5, input_shape=(num_inputs,)),
Dense(hidden, activation="relu", kernel_regularizer=L2(1.E-5)),
Dropout(0.5),
Dense(num_outputs, activation="relu")])
self.ranker = pg.PageRank(0.9, renormalize=True, assume_immutability=True,
use_quotient=False, error_type="iters", max_iters=10) # 10 iterations
def call(self, features, graph, training=False):
predict = super().call(features, training=training)
propagate = self.ranker.propagate(graph, predict, graph_dropout=0.5 if training else 0)
return tf.nn.softmax(propagate, axis=1)
model = APPNP(features.shape[1], labels.shape[1])
with pytest.raises(Exception):
pg.gnn_train(model, graph, features, labels, training, validation, test=test, epochs=2)
pg.load_backend('tensorflow')
pg.gnn_train(model, features, graph, labels, training, validation, test=test, epochs=300, patience=2)
predictions = model(features, graph)
pg.load_backend('numpy')
with pytest.raises(Exception):
pg.gnn_accuracy(labels, predictions, test)
def test_appnp_tf():
from tensorflow.keras.layers import Dropout, Dense
from tensorflow.keras.regularizers import L2
class APPNP(tf.keras.Sequential):
def __init__(self, num_inputs, num_outputs, hidden=64):
super().__init__([
Dropout(0.5, input_shape=(num_inputs,)),
Dense(hidden, activation="relu", kernel_regularizer=L2(1.E-5)),
Dropout(0.5),
Dense(num_outputs, activation="relu")])
self.ranker = pg.ParameterTuner(
lambda par: pg.GenericGraphFilter([par[0] ** i for i in range(int(10))],
error_type="iters", max_iters=int(10)),
max_vals=[0.95], min_vals=[0.5], verbose=False,
measure=pg.Mabs, deviation_tol=0.1, tuning_backend="numpy")
def call(self, features, graph, training=False):
predict = super().call(features, training=training)
propagate = self.ranker.propagate(graph, predict, graph_dropout=0.5 if training else 0)
return tf.nn.softmax(propagate, axis=1)
graph, features, labels = pg.load_feature_dataset('synthfeats')
training, test = pg.split(list(range(len(graph))), 0.8)
training, validation = pg.split(training, 1 - 0.2 / 0.8)
model = APPNP(features.shape[1], labels.shape[1])
with pg.Backend('tensorflow'): # pygrank computations in tensorflow backend
graph = pg.preprocessor(renormalize=True, cors=True)(graph) # cors = use in many backends
pg.gnn_train(model, features, graph, labels, training, validation,
optimizer=tf.optimizers.Adam(learning_rate=0.01), verbose=True, epochs=50)
assert float(pg.gnn_accuracy(labels, model(features, graph), test)) == 1. # dataset is super-easy to predict
def test_appnp_torch():
graph, features, labels = pg.load_feature_dataset('synthfeats')
training, test = pg.split(list(range(len(graph))), 0.8)
training, validation = pg.split(training, 1 - 0.2 / 0.8)
class AutotuneAPPNP(torch.nn.Module):
def __init__(self, num_inputs, num_outputs, hidden=64):
super().__init__()
self.layer1 = torch.nn.Linear(num_inputs, hidden)
self.layer2 = torch.nn.Linear(hidden, num_outputs)
self.activation = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(0.5)
self.num_outputs = num_outputs
self.ranker = pg.ParameterTuner(
lambda par: pg.GenericGraphFilter([par[0] ** i for i in range(int(10))],
error_type="iters", max_iters=int(10)),
max_vals=[0.95], min_vals=[0.5], verbose=False,
measure=pg.Mabs, deviation_tol=0.1, tuning_backend="numpy")
def forward(self, features, graph, training=False):
predict = self.dropout(torch.FloatTensor(features))
predict = self.dropout(self.activation(self.layer1(predict)))
predict = self.activation(self.layer2(predict))
predict = self.ranker.propagate(graph, predict, graph_dropout=0.5 if training else 0)
ret = torch.nn.functional.softmax(predict, dim=1)
self.loss = 0
for param in self.layer1.parameters():
self.loss = self.loss + 1E-5*torch.norm(param)
return ret
def init_weights(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
model = AutotuneAPPNP(features.shape[1], labels.shape[1])
graph = pg.preprocessor(renormalize=True, cors=True)(graph)
model.apply(init_weights)
with pg.Backend('pytorch'):
pg.gnn_train(model, features, graph, labels, training, validation, epochs=50)
# TODO: investigate why this is not working as well as tf
#assert float(pg.gnn_accuracy(labels, model(features, graph), test)) == 0.5
| [
"maniospas@hotmail.com"
] | maniospas@hotmail.com |
0de59a9aa89e91f8a13f2348a8538cb213bac16f | 4fd581daa0c21ac36c19da710c85204b7608ac08 | /MyPizzas/piz/models.py | c829cddacd48650dd3783744a97e501049dffc20 | [] | no_license | trilok002/myPizza | a3c98ef7d80099bb82fa0b8739cc796768726629 | 1a7dd5636ae77f6b79563dc5ce6af685cd07467a | refs/heads/master | 2023-03-09T18:22:21.785280 | 2021-03-01T12:14:55 | 2021-03-01T12:14:55 | 343,404,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from djongo import models
# Create your models here.
class Pizza(models.Model):
type=models.CharField(max_length=20,null=True)
size=models.CharField(max_length=50,null=True)
toppings=models.TextField(null=True) | [
"61768477+trilok002@users.noreply.github.com"
] | 61768477+trilok002@users.noreply.github.com |
9388ed6505d0881d0e65812e0362e9978ec0feb0 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_conv3d_transpose_13.py | ed625c225a1cb9bf00eec92280375ae7f4468a6a | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 641 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_conv3d_transpose_13():
"""test conv3d_transpose_13"""
jit_case = JitTrans(case=yml.get_case_info("conv3d_transpose_13"))
jit_case.jit_run()
| [
"825276847@qq.com"
] | 825276847@qq.com |
e4c2f3156d33f3befa11455e042d24a1c9ee39bd | f3d3b3ea381f937109a9ef8c230078c35e5c8b65 | /Ether/upload/src/test/LexerSuite.py | e10206c3d5dc2993bd58af6a3c69fb58b84cc4f4 | [] | no_license | TanDung2512/parserSmartContractPython | e268fa8d2971250aab1cd5f87638d19e70c2d8e3 | 6b6a154004e8d0b9957e9c96b99a4b0d28888e37 | refs/heads/master | 2020-04-26T18:07:02.905020 | 2019-03-04T11:46:37 | 2019-03-04T11:46:37 | 173,734,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | import unittest
from TestUtils import TestLexer
class LexerSuite(unittest.TestCase):
def test_lơercase_identifier(self):
"""test identifiers"""
self.assertTrue(TestLexer.test("abc","abc,<EOF>",101))
| [
"kimtoan1998@gmail.com"
] | kimtoan1998@gmail.com |
ef7fa4042be75c55cebe7e526c05a66ca9081831 | 49d67f9b1abec8ddacd30b9a7ff99e036c1d2dfc | /App/Json_Class/TCPdevice_dto.py | 200fe093346990dd623926036a4591ecaf07c70d | [] | no_license | jacobpaul07/Project-BoschMCM-API | 3b9f25011f24d47089df3185e45e810d0f4b327f | 2cb6962ff825919c3f2d054333dcb7cce275379a | refs/heads/main | 2023-08-29T10:22:02.327369 | 2021-09-30T08:45:57 | 2021-09-30T08:45:57 | 390,989,406 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from dataclasses import dataclass
from typing import Any, List, Optional, TypeVar, Type, cast, Callable
from App.Json_Class.IOTag_dto import IOTag
from App.Json_Class.DtoUtilities import *
from App.Json_Class.TCPdeviceProperties_dto import TCPdeviceProperties
@dataclass
class TCPdevice:
properties: TCPdeviceProperties
IOTags: List[IOTag]
@staticmethod
def from_dict(obj: Any) -> 'TCPdevice':
assert isinstance(obj, dict)
properties = TCPdeviceProperties.from_dict(obj.get("properties"))
IOTags = from_list(IOTag.from_dict, obj.get("IO Tags"))
return TCPdevice(properties, IOTags)
def to_dict(self) -> dict:
result: dict = {"properties": to_class(TCPdeviceProperties, self.properties),
"IO Tags": from_list(lambda x: to_class(IOTag, x), self.IOTags)}
return result
| [
"p.jacobpaul07@gmail.com"
] | p.jacobpaul07@gmail.com |
59bcf1a18ede02687f34e00ecf37f4912b59fe5e | e878866c0e3bea4c8c24da2629e1c8f65135c59c | /ProyectoAutomation/New folder/ejercicio3.py | 81563ae26c5c2150dff8e941aac23932e00709a6 | [] | no_license | KariAltamirano/ProyectoAutomation | 4aaf6b66ddb80d26893354ae4ae596803dfebf67 | 631e5c4c65a21b17e23398792d6939b48aa987da | refs/heads/main | 2023-07-18T07:20:48.756419 | 2021-09-09T03:59:22 | 2021-09-09T03:59:22 | 403,185,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | def calcular_precio(marcas,puertas,color,ventas):
marcas = {'ford':100000, 'chevrolet':120000, 'fiat':80000}
colores = {'blanco':10000, 'azul':20000, 'negro':30000}
puertas = {2:50000,4:65000,5:78000}
precio = marcas[marca]+ colores [color]+ puertas[puerta]
if ventas > 5 and ventas <11:
precio= precio*0.9
elif ventas >10 and ventas <51:
precio = precio *0.85
elif ventas >10 and ventas <51:
precio = precio *0.85
elif ventas >50:
precio = precio *0.82
return precio
mas_clientes = 'si'
ventas = []
marcas =['ford', 'chevrolet' , 'fiat']
puertas =[2,4,5]
colores = ['blanco', 'azul' , 'negro']
while mas_clientes == 'si':
nombre = input('Ingrese nombre: ')
apellido = input('Ingrese el apellido: ')
marca= ''
puerta = 0
color = ''
while marca not in marcas:
marca = input('Ingrese la marca: ')
marca=marca.lower()
while puerta not in puertas:
puerta = int(input('ingrese puertas: '))
while color not in colores:
color = input('ingrese el color: ')
color = color.lower()
#precio = calcular_precio(marca,puerta,color)
ventas.append({'nombre':nombre, 'apellido':apellido, 'marca':marca, 'puertas':puerta, 'color':color})
mas_clientes = input('Hay mas clientes?: ')
largo = len(ventas)
for i in ventas:
precio = calcular_precio(marcas,puertas,color,largo)
#largo es la cant de ventas que voy a tener
print("La persona: "+ i ['nombre']+" "+ i ['apellido']+
" compro un auto marca "+ i ['marca'] +" de "+ str(i ['puertas'])+" puertas y color "+ i ['color'] +" con un precio de $"+ str(precio))
| [
"noreply@github.com"
] | noreply@github.com |
b27239657a5741c26fc636ccfde4758a19cdea07 | 4e8e9ed2a8fb69ed8b46066a8d967e4c107013a4 | /main/auth/reddit.py | b4e81c58ba20f36a1a1a70b8a93f407dda2e0712 | [
"MIT"
] | permissive | welovecoding/vote4code | a57b3d155096d362dca47587ad2985b4201ef036 | be265d553af35dc6c5322ecb3f7d5b3cf7691b75 | refs/heads/master | 2021-08-11T22:46:40.884030 | 2019-11-15T16:15:05 | 2019-11-15T16:15:05 | 90,191,931 | 14 | 0 | MIT | 2021-08-10T22:50:49 | 2017-05-03T20:46:02 | Python | UTF-8 | Python | false | false | 2,613 | py | # coding: utf-8
import base64
from flask_oauthlib import client
from werkzeug import urls
import flask
import auth
import config
import model
import util
from main import app
reddit_config = dict(
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
access_token_url='https://ssl.reddit.com/api/v1/access_token',
authorize_url='https://ssl.reddit.com/api/v1/authorize',
base_url='https://oauth.reddit.com/api/v1/',
consumer_key=model.Config.get_master_db().reddit_client_id,
consumer_secret=model.Config.get_master_db().reddit_client_secret,
request_token_params={'scope': 'identity', 'state': util.uuid()},
)
reddit = auth.create_oauth_app(reddit_config, 'reddit')
def reddit_handle_oauth2_response():
access_args = {
'code': flask.request.args.get('code'),
'client_id': reddit.consumer_key,
'redirect_uri': flask.session.get('%s_oauthredir' % reddit.name),
}
access_args.update(reddit.access_token_params)
auth_header = 'Basic %s' % base64.b64encode(
('%s:%s' % (reddit.consumer_key, reddit.consumer_secret)).encode('latin1')
).strip().decode('latin1')
response, content = reddit.http_request(
reddit.expand_url(reddit.access_token_url),
method=reddit.access_token_method,
data=urls.url_encode(access_args),
headers={
'Authorization': auth_header,
'User-Agent': config.USER_AGENT,
},
)
data = client.parse_response(response, content)
if response.code not in (200, 201):
raise client.OAuthException(
'Invalid response from %s' % reddit.name,
type='invalid_response', data=data,
)
return data
reddit.handle_oauth2_response = reddit_handle_oauth2_response
@app.route('/api/auth/callback/reddit/')
def reddit_authorized():
response = reddit.authorized_response()
if response is None or flask.request.args.get('error'):
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (response['access_token'], '')
me = reddit.request('me')
user_db = retrieve_user_from_reddit(me.data)
return auth.signin_user_db(user_db)
@reddit.tokengetter
def get_reddit_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/reddit/')
def signin_reddit():
return auth.signin_oauth(reddit)
def retrieve_user_from_reddit(response):
auth_id = 'reddit_%s' % response['id']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return auth.create_user_db(
auth_id=auth_id,
name=response['name'],
username=response['name'],
)
| [
"lipiridis@gmail.com"
] | lipiridis@gmail.com |
4aa5cc6c5f3ee72492708473c20d3ece613db6cb | 7a14adcd5150c970f6a5f1ab83180f091dc0f6cc | /items/migrations/0002_item_origin.py | 4fce527f993c1c330608ba087e0f33e0e273f96e | [] | no_license | synnea/the-modern-witcher | 0b4349e845e12e05f7abf6e0688200a6f54dd96d | f85c4de1fb3167b5c595ac6843c33a55495d7259 | refs/heads/master | 2022-12-09T15:09:23.602406 | 2020-03-05T18:17:27 | 2020-03-05T18:17:27 | 230,802,118 | 0 | 1 | null | 2022-11-22T05:13:41 | 2019-12-29T20:52:55 | HTML | UTF-8 | Python | false | false | 380 | py | # Generated by Django 2.2 on 2020-01-07 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('items', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='item',
name='origin',
field=models.CharField(blank=True, max_length=100),
),
]
| [
"carrie.poell@gmail.com"
] | carrie.poell@gmail.com |
6c670e880143af3d3df7f3fa48cd73def4f4535b | 0ee88932af5b6ed088e471abcbd5f40fd9cbd688 | /Other/eraser.py | 4011853bf7baa80b3ee2c2398547b2997ebdd682 | [] | no_license | BjaouiAya/Cours-Python | 48c740966f9814e1045035ffb902d14783d36194 | 14b306447e227ddc5cb04b8819f388ca9f91a1d6 | refs/heads/master | 2021-06-10T22:17:38.731030 | 2016-11-11T16:45:05 | 2016-11-11T16:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | #! /usr/bin/env python
# -*- coding:Utf8 -*-
"""Renaming class constructor"""
########################################
#### Classes and Methods imported : ####
########################################
import os
import re
#####################
#### Constants : ####
#####################
# Regex and folder parmeter for music file before burning
REGEX_MP3 = re.compile("\A[0-9]{2}\. " "|\A[0-9]{2} \- " "|\A[0-9]{2}[ \-]")
FOLDER_MP3 = "/home/pampi/Output/cd_test/"
#######################################
#### Classes, Methods, Functions : ####
#######################################
class RenameMe:
"""
In all files inside a directory (self.path) delete a part of the name
according to regex and rename old file.
To check another folder you only have to set self.path to new directory.
Can be used to remove numbered songs like "10 song_nb.mp3".
"""
def __init__(self, path="", regex=REGEX_MP3):
self.path = path
self.regex = regex
def change_regex(self, source, regex_expr=r'', replacement="", mode="rb"):
"""
Change file name according to regex replacement and path variable
"""
with open(source, mode) as f:
old = f.name[len(self.path):]
new = re.sub(self.regex, replacement, old)
os.rename(f.name, self.path+new)
if old is not new:
print(old, "------->", new)
else:
print(old, " didn't change")
def regex_loop(self):
"""
Check all elements inside self.path directory and call
change if element is a file
"""
for mp3 in os.listdir(self.path):
if os.path.isfile(self.path+mp3):
self.change_regex(self.path+mp3)
########################
#### Main Program : ####
########################
if __name__ == '__main__':
cd_dir = RenameMe(FOLDER_MP3)
cd_dir.regex_loop()
| [
"jeremybois@rocketmail.com"
] | jeremybois@rocketmail.com |
2ba40809f529128b9eee73cf5c7dcd8a5ac062cf | 877ee8bbd74479f11fe98b987c77bed0586da0fa | /src/utils/send_event.py | c6a047673e6900e0f3e80bc378129437bd193d07 | [] | no_license | HTD-Health/ai-smart-mirror | e33b219593ef12e1ff37d08f329440e12949616f | 5375ca64ac4b10e3efc0c136418e417c06619801 | refs/heads/main | 2023-07-20T16:01:04.948637 | 2021-08-16T13:32:34 | 2021-08-16T13:38:12 | 327,571,598 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | import argparse
import time
import zmq
import xdrlib
def main(host, port, topic, data):
"""
Sending events to the specific topic after connecting to the host.
Parameters:
host (str): host name to connect.
port (str): port of the host to connect.
topic (int): ID of topic to subscribe.
data (str): data to be send to topic.
"""
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.connect("tcp://%s:%s" % (host, port))
data_packer = xdrlib.Packer()
data_packer.pack_uint(topic)
data_packer.pack_bytes(data.encode('utf-8'))
# give some time to connect
time.sleep(1)
print(f'sending: {data_packer.get_buffer()}')
socket.send(data_packer.get_buffer())
if __name__ == "__main__":
# Creates Argument Parser object named parser
parser = argparse.ArgumentParser(description='Send Event to bus server')
# Set arguments
parser.add_argument(
'--port',
default="5555",
help='Port to which connect to'
)
parser.add_argument(
'topic',
type=int,
help='topic id'
)
parser.add_argument(
'data',
help='data to be send to topic'
)
args = parser.parse_args()
# Run module
main('127.0.0.1', args.port, args.topic, args.data)
| [
"noreply@github.com"
] | noreply@github.com |
67323a6cd63e9a194cb69a9c34a95d333bb40913 | 393f5e709700153dd8a4dbb4de229126a44fff43 | /cage/settings/local.py | 0fa4d63fd04ce97e4b8e9b2d444bc2b30378f6da | [] | no_license | CNcage/SCP | ec142bd536f3abc09a683e97e19a1ee48f4e8494 | 2ee0778774c9258a14e4d64f22d04537ea3dd7a5 | refs/heads/master | 2022-11-21T04:03:35.015907 | 2020-07-21T16:05:43 | 2020-07-21T16:05:43 | 279,333,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from decouple import config
from .base import * # noqa
SECRET_KEY = config("SECRET_KEY")
DEBUG = config("DEBUG", cast=bool)
DATABASES = { "default": { "ENGINE": config("DB_ENGINE"),
"NAME": config("DB_NAME"), "USER": config("DB_USER"),
"PASSWORD": config("DB_PASSWD"),
"HOST": config("DB_HOST"),
"PORT": config("DB_PORT", cast=int), }
} | [
"info@jmkit.com"
] | info@jmkit.com |
56b56b28494fd3e8c37f0d64125888e7aade5cf1 | d91391e7f093709fa90129f2d6bdfdb139213582 | /db/sms_pending.py | cd3a57d9b0bcaec4a40dcba6f4af7f59b8f8ad29 | [] | no_license | nomo-w/sms | f0f678dde12928963c0c341df2152a902d06bd5d | f7e8a0f6cdc8c2deee34a5e403245731e6c1e25c | refs/heads/main | 2023-01-23T19:45:53.855664 | 2020-12-07T09:07:21 | 2020-12-07T09:07:21 | 277,744,520 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,595 | py | # coding: utf-8
# 发送短信历史数据库
from config import Sql
from db.base import DBbase
from pymysql import escape_string
from db.history import SmsHistoryDB
from db.statistics import StatisticsDB
from db.cache_statistics import CacheStatisticsDB
import datetime
import random
import time
"""
CREATE TABLE `sms_pending` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`user` char(11) NOT NULL,
`user_id` int(11) DEFAULT -1,
`channel_id` int(11) NOT NULL,
`plateform_id` int(11) NOT NULL,
`to_number` char(100) DEFAULT NULL,
`text` char(200) DEFAULT NULL,
`time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`price` decimal(15,2) DEFAULT 0.00,
`description` char(50) DEFAULT 'pending',
`message_id` varchar(50) DEFAULT '0' COMMENT '唯一id',
`is_click` char(10) DEFAULT '未设置',
`callback_id` int(11) DEFAULT 0,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
class SmsPendingDB(DBbase):
"""发送等待返回结果表"""
def __init__(self):
super().__init__()
def is_have_text(self, text):
sql = f'select message_id from sms_pending where text="{text}" and message_id!="{Sql.kl_message_id}" limit 1;'
r = self.execute(sql, fetch=True)
return r[0][0] if r else None
def search_manual(self, message_id):
channel_id_sql = 'channel_id=(select id from sms_channel where channel_type="manual")'
if message_id in ['null', None, '']:
sql = f'select distinct message_id from sms_pending where {channel_id_sql} and ' \
f'message_id!="{Sql.kl_message_id}";'
r = []
for i in self.execute(sql, fetch=True):
_ = self.execute(f'select count(to_number),text from sms_pending where message_id="{i[0]}";', fetch=True)
r.append(_[0] + (i[0],))
else:
# text = escape_string(text)
sql = f'select to_number from sms_pending where {channel_id_sql} and message_id="{message_id}";'
r = self.execute(sql, fetch=True)
return r if r else []
def update_kl(self, message_id):
sql = f'select id,user,user_id,channel_id,plateform_id,to_number,text,is_click,callback_id,time from ' \
f'sms_pending where message_id="{message_id}";'
r = self.execute(sql, fetch=True)
for i in r:
if i:
i = list(i)
i[-1] = i[-1] + datetime.timedelta(seconds=+random.randint(1, 50))
keys = ['user', 'user_id', 'channel_id', 'plateform_id', 'to', 'text', 'is_click', 'callback', 'time_']
dic = dict(zip(keys, i[1:]))
# 扣钱
rate_sql = f'select rate from sms_rate where plateform_id={dic["plateform_id"]} and channel_id={dic["channel_id"]}'
price = self.execute(rate_sql + ';', fetch=True)[0][0]
c_b_sql = f'update sms_plateform set balance=balance-({price}) where id={dic["plateform_id"]};'
# c_b_sql = f'-- update sms_plateform set balance=balance-({price}) where balance>=({rate_sql}) and id={dic["plateform_id"]};'
self.execute(c_b_sql)
with SmsHistoryDB() as db:
db.add(**dic, message_id=message_id, price=price, description='success')
del_sql = f'delete from sms_pending where id={i[0]};'
self.execute(del_sql, commit=True)
return True
def add(self, user_id, message_id, to, text, channel_id, plateform_id=None, callback=0, is_click='未设置', user=None, commit=True):
"""
:param user: operator
:param message_id: unique message id
:param to: to-number
:param text: to-sms-text-body
:param price: sms-price / per
:param err: ret-code
:param err_text: ret error text
:return:
"""
new_text = escape_string(text)
if user is None:
user = f'(select user from sms_users where id={user_id})'
else:
user = f'"{user}"'
if plateform_id is None:
plateform_id = self.execute(f'select plateform_id from sms_users where id={user_id};', fetch=True)[0][0]
sql = f'insert into sms_pending (user,user_id,channel_id,plateform_id,to_number,text,message_id,' \
f'callback_id,is_click) values ({user},{user_id},{channel_id},{plateform_id},"{to}","{new_text}",' \
f'"{message_id}",{callback},"{is_click}");'
self.execute(sql, commit=commit)
with StatisticsDB() as db:
db.update_count(plateform_id, channel_id, "total_count", time.strftime("%Y-%m-%d", time.localtime()))
with CacheStatisticsDB() as db:
db.update_count(plateform_id, channel_id, "total_count", time.strftime("%Y-%m-%d", time.localtime()))
return True
def update(self, message_id, err, err_text, to=None):
# price 价格
# err 1为错误/
sql = f'select user,user_id,channel_id,plateform_id,to_number,text,is_click,callback_id,time from sms_pending ' \
f'where message_id="{message_id}"'
if to:
sql += f' and to_number="{to}";'
else:
sql += ';'
r = self.execute(sql, fetch=True)
if r:
keys = ['user', 'user_id', 'channel_id', 'plateform_id', 'to', 'text', 'is_click', 'callback', 'time_']
dic = dict(zip(keys, r[0]))
price = 0
if err == 0:
# 扣钱
rate_sql = f'select rate from sms_rate where plateform_id={dic["plateform_id"]} and channel_id={dic["channel_id"]}'
price = self.execute(rate_sql+';', fetch=True)[0][0]
c_b_sql = f'update sms_plateform set balance=balance-({price}) where id={dic["plateform_id"]};'
# c_b_sql = f'-- update sms_plateform set balance=balance-({price}) where balance>=({rate_sql}) and id={dic["plateform_id"]};'
self.execute(c_b_sql)
with SmsHistoryDB() as db:
db.add(**dic, message_id=message_id, price=price, description='success' if err == 0 else err_text)
del_sql = f'delete from sms_pending where message_id="{message_id}"'
if to:
del_sql += f' and to_number="{to}" limit 1;'
else:
del_sql += ' limit 1;'
return self.execute(del_sql, commit=True)
return False
| [
"1085640177@qq.com"
] | 1085640177@qq.com |
018f409190587b0f73f81fbc955a1fe528f94f44 | 1dfe57ecf168d954b38248f47bca080c8bb789d3 | /djangoenv/bin/pip | 34f5d03d3441d7c12d867e340ab3a3b70cb95d13 | [] | no_license | deb133/mysite | ab01cb053f2eac6675bde3eb429aa26def0bb76f | 1d465bbaaa736eecdf85231478f6cf5f05637726 | refs/heads/master | 2016-08-07T11:49:58.517062 | 2015-03-11T02:09:56 | 2015-03-11T02:09:56 | 31,602,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | #!/Users/DarcyElizabeth/Python/mysite/djangoenv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"darcy.e.balcarce@gmail.com"
] | darcy.e.balcarce@gmail.com | |
42bce4085193456af583fe4bd69f5b879e5fe92f | a39224fcd17ff2adb77fa643afed63bc3342a3f4 | /setup.py | e8128dd9f0742381369839c237e8c5bf807d6ee0 | [
"MIT"
] | permissive | HemuManju/reaction-time-classification | ef9ddb241803a16b4b9411eaa8375e8b25fcc9e1 | 8d468516c0591359e082fb8bc5850f8e89e5a6e4 | refs/heads/master | 2023-01-14T09:10:04.142946 | 2021-09-22T19:49:32 | 2021-09-22T19:49:32 | 179,614,766 | 0 | 0 | MIT | 2022-12-27T15:36:26 | 2019-04-05T03:39:43 | Python | UTF-8 | Python | false | false | 261 | py | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Classification of reaction time of an \
operator performing tele-operation',
author='Hemanth ',
license='MIT',
)
| [
"hemanthm2277@gmail.com"
] | hemanthm2277@gmail.com |
a5d02be7324103df8b24f058e3b8de41af441989 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02768/s686694566.py | 553aafe96bef5565407dfea61c0ba091a9ef4718 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | n, a, b = list(map(int, input().split(' ')))
# 二項係数 mod [検索]
mmm = 1000000000 + 7
fac = []
inv = []
inv_fac = []
def init(n):
fac.append(1)
fac.append(1)
inv.append(0)
inv.append(1)
inv_fac.append(1)
inv_fac.append(1)
for i in range(2, n):
fac.append(fac[-1] * i % mmm)
inv.append(mmm - inv[mmm%i] * (mmm // i) % mmm)
inv_fac.append(inv_fac[-1] * inv[-1] % mmm)
def choice(a, b):
if a < b:
return 0
v = 1
for i in range(b):
v = (v * (a-i)) % mmm # 偶然通っていたけどここはnではなくa (eの途中で気づいた)
return v * inv_fac[b]
init(int(2e5) + 1)
ans = pow(2, n, mmm) - 1 # v, e, mod
bunshi = 1
for i in range(a):
bunshi = (bunshi * (n-i)) % mmm
ans -= choice(n, a)
ans -= choice(n, b)
print(ans % mmm)
'''
4, 1, 3 => 4c2 + 4c4 -> 6+1 = 7
4 + 6 + 4 + 1 - 4c1 - 4c2
1 1
11 2
121 4
1331 8
14641 16, 0が無いので-1, 大きい combination -> 二項係数 mod [検索]
'''
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1463290d7d0b564ec4fce23796ea7e90f1adea53 | fbb1550dc5437d672ed0137bd7711eba3290dee3 | /students/ganyoa/lessons/lesson01/assignment/inventory_management/electric_appliances_class.py | 5b6a4aebc903f56735730c5623a3178938bba064 | [] | no_license | JavaRod/SP_Python220B_2019 | 2cc379daf5290f366cf92dc317b9cf68e450c1b3 | 5dac60f39e3909ff05b26721d602ed20f14d6be3 | refs/heads/master | 2022-12-27T00:14:03.097659 | 2020-09-27T19:31:12 | 2020-09-27T19:31:12 | 272,602,608 | 1 | 0 | null | 2020-06-16T03:41:14 | 2020-06-16T03:41:13 | null | UTF-8 | Python | false | false | 1,066 | py | '''Electric appliances class'''
from inventory_management.inventory_class import Inventory
#from inventory_class import Inventory
class ElectricAppliances(Inventory):
'''
initiates standard 'Inventory' attributes including brand and voltage for appliances
'''
def __init__(self, product_code, description, market_price, rental_price, brand, voltage):
'''Creates common instance variables from the parent class'''
Inventory.__init__(self, product_code, description, market_price, rental_price)
self.brand = brand
self.voltage = voltage
def return_as_dictionary(self):
'''
returns instance attributes in dictionary
'''
output_dict = {}
output_dict['product_code'] = self.product_code
output_dict['description'] = self.description
output_dict['market_price'] = self.market_price
output_dict['rental_price'] = self.rental_price
output_dict['brand'] = self.brand
output_dict['voltage'] = self.voltage
return output_dict
| [
"ganyoa@gmail.com"
] | ganyoa@gmail.com |
96c06261331dd8e373f7552b6c08266873eeb256 | ebe994a845740ff5b8951a1ad9f0eddf6ebab508 | /pokeSite/pokeSite/settings.py | c329a1888c6b7559e8c9da5cede45fd6c2273c6d | [] | no_license | brando-squire/Hack_Pokedex_app | 997c60cfc66b7a89a518df14b2fadbaae8abc107 | 1116487394e60cb26786ae20948330ab28632bad | refs/heads/master | 2020-04-07T01:06:49.257262 | 2018-11-17T19:42:21 | 2018-11-17T19:42:21 | 157,930,997 | 0 | 0 | null | 2018-11-17T18:46:58 | 2018-11-16T23:04:28 | JavaScript | UTF-8 | Python | false | false | 3,128 | py | """
Django settings for pokeSite project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kiyjff@b=1+azn!2wjg#2v+_)efu7xvdi^+q=!)m(253459=-='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pokedex.apps.PokedexConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pokeSite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pokeSite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"squiresbrandon@yahoo.com"
] | squiresbrandon@yahoo.com |
352ddf87ea2fdb4df2a799b35d0b72ef0da338c3 | b59a2ad40efead9850c2c9cbff93b2a00c6c47bb | /app/__init__.py | c5833da6e81fbcc2b9193c1bf3248a77a4f678d3 | [] | no_license | khaled-sayed/news_app_python_flask | 67704bb37e0a4dd32d2b7a26d22671d74f59aec1 | 440cc3d0915e79b601b0154d4d13912cd1c2aebd | refs/heads/main | 2023-01-03T21:53:38.227252 | 2020-11-04T15:21:56 | 2020-11-04T15:21:56 | 310,038,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | from flask import Flask, render_template
from dotenv import load_dotenv
import config
import os
from flask_login import LoginManager
from werkzeug.utils import secure_filename
base = os.path.dirname(os.path.abspath(__name__))
UPLOAD_FOLDER = 'app/static/images/upload/news'
# ALLOWED_EXTINSTIONS = set(['jpeg','jpg','png'])
app = Flask(__name__)
APP_ROOT = os.path.join(os.path.dirname(__file__), "..")
dontenv_path = os.path.join(APP_ROOT, ".env")
load_dotenv(dontenv_path)
app.config.from_object('config.settings.'+ os.environ.get('FLASK_ENV'))
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
test = app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
from app.models import db
from app.models.admins import Admin
from app.models.categories import Categorie
from app.models.posts import Post
from app.models.users import User
db.create_all()
db.session.commit()
login_manger = LoginManager()
login_manger.login_view = 'auth.admin_login'
login_manger.init_app(app)
# Create Page Not Found
@app.errorhandler(404)
def not_found(error):
return render_template('errors/404.html'), 404
@login_manger.user_loader
def load_user(user_id):
return Admin.query.get(int(user_id))
# Register BluePrints
from app.views.auth import auth as auth_blueprint
from app.views.dashboard import dash as dash_blueprint
from app.views.home import home as home_blueprint
app.register_blueprint(auth_blueprint)
app.register_blueprint(dash_blueprint)
app.register_blueprint(home_blueprint) | [
"62744648+khaled-sayed@users.noreply.github.com"
] | 62744648+khaled-sayed@users.noreply.github.com |
6cd3405367f78e286064a46db46b69d8c5f63689 | 900bd035cc429c05307cc6a757cb3482c9074daf | /venv/Scripts/pip3-script.py | 58e93e211506308ae97457d9bbaf4c500f759828 | [] | no_license | maxshtun999/PythonBot1 | 2eff02138b0fced9b8ad6e46608c449c7be83246 | 19f81538f51fe5d0bef7e733cb16938bf949c0da | refs/heads/master | 2022-10-13T10:15:53.866144 | 2020-01-06T14:48:51 | 2020-01-06T14:48:51 | 232,130,241 | 0 | 1 | null | 2022-10-02T05:43:22 | 2020-01-06T15:32:37 | Python | UTF-8 | Python | false | false | 421 | py | #!"C:\Users\Shtun Max\PycharmProjects\PythonBot\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"maxshtun99@gmail.com"
] | maxshtun99@gmail.com |
0d49f2b63100c3cb5e7ca83a6451e19ffa5f3676 | ecc17e230795f33d94ed33547b1d0f14c602b9ba | /HW12 (Ch04, HMM problem 1_Forward_Backward)/HW12.py | 4446294c4970e10df44507035199cda2add05e89 | [] | no_license | henk2525/NYUST-speech-recognition | 1e8a2159572c756152234f888b5814ddd7baf1ef | ea75d67d6c1d6a4db51064c00cae455961dd5b32 | refs/heads/master | 2022-05-07T12:50:39.224382 | 2019-08-04T12:38:55 | 2019-08-04T12:53:21 | 200,493,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | # -*- coding: utf-8 -*-
"""
Created at 2019/4/28
@author: henk guo
"""
import numpy as np
import dspBox
obs1 = dspBox.str2ndar(open('obser1.txt', 'r').read())
obs2 = dspBox.str2ndar(open('obser2.txt', 'r').read())
obs3 = dspBox.str2ndar(open('obser3.txt', 'r').read())
a1 = np.array([[0.2, 0.7, 0.1], [0.1, 0.2, 0.7], [0.7, 0.1, 0.2]])
b1 = np.array([[0.5, 0.4, 0.1], [0.7, 0.2, 0.1], [0.7, 0.1, 0.2]])
pi1 = np.array([0.7, 0.2, 0.1])
a2 = np.array([[0.7, 0.2, 0.1], [0.3, 0.6, 0.1], [0.1, 0.2, 0.7]])
b2 = np.array([[0.1, 0.8, 0.1], [0.2, 0.7, 0.1], [0.4, 0.5, 0.1]])
pi2 = np.array([0.1, 0.7, 0.2])
a3 = np.array([[0.2, 0.7, 0.1], [0.6, 0.3, 0.1], [0.2, 0.7, 0.1]])
b3 = np.array([[0.1, 0.2, 0.7], [0.2, 0.2, 0.6], [0.3, 0.1, 0.6]])
pi3 = np.array([0.2, 0.2, 0.6])
obs, a, b, pi = [obs1, obs2, obs3], [a1, a2, a3], [b1, b2, b3], [pi1, pi2, pi3]
for obsi in range(3): # Index of observation
print("obser" + str(obsi + 1))
for mi in range(3): # Index of model
# =====forward algorithm=====
forwardp = np.zeros((50, 3))
forwardp[0] = [pi[mi][state] * b[mi][state, obs[obsi][0]] for state in range(3)] # initial state
for i in range(1, 50): # start, 1~49
for state in range(3):
for from_ in range(3):
forwardp[i, state] += forwardp[i-1, from_] * a[mi][from_, state] * b[mi][state, obs[obsi][i]]
fp = np.sum(forwardp[-1])
# =====backward algorithm=====
backwardp = np.zeros((50, 3))
backwardp[-1, :] = 1 # initial state
for i in range(48, -1, -1): # start, 48~0
for state in range(3):
for to in range(3):
backwardp[i, state] += backwardp[i + 1, to] * a[mi][state, to] * b[mi][to, obs[obsi][i + 1]]
bp = sum([pi[mi][state] * backwardp[0, state] * b[mi][state, obs[obsi][0]] for state in range(3)])
print('model_{:d} forward:{:.6e} backward:{:.6e}'.format(mi, fp, bp)) | [
"a0956072892@gmail.com"
] | a0956072892@gmail.com |
7bdd5fad0213f6c34d28ec35b754a74ac25ce4db | 0d05c5e96e91986bbfeda648b502f585885a596c | /trekbooking/wsgi.py | 6bc6d65ef0a74cc3ade9ff8870050e907c71bdc5 | [] | no_license | callingsandesh/ComeOn | 9507e1eba984dca76a7342845394754b2e4f598d | f3c541a62fa79c81e167a661ec45b8ced2c5b4b2 | refs/heads/master | 2021-01-25T13:05:15.985282 | 2020-01-23T14:39:59 | 2020-01-23T14:39:59 | 123,523,896 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for trekbooking project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trekbooking.settings')
application = get_wsgi_application()
| [
"arvik.rai3377@gmail.com"
] | arvik.rai3377@gmail.com |
a3bc969b5283c5f611660bb173b2d3769ae854c3 | 2a68b03c923119cc747c4ffcc244477be35134bb | /interviews/A/VO/wordLadderII.py | a00dffb9d9e8ec178fca30545a4ec9ff564ba284 | [] | no_license | QitaoXu/Lintcode | 0bce9ae15fdd4af1cac376c0bea4465ae5ea6747 | fe411a0590ada6a1a6ae1166c86c585416ac8cda | refs/heads/master | 2020-04-24T20:53:27.258876 | 2019-09-24T23:54:59 | 2019-09-24T23:54:59 | 172,259,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,500 | py | from collections import deque
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: a list of lists of string
"""
def findLadders(self, start, end, dict):
# write your code here
dict.add(start)
dict.add(end)
distance = {}
self.bfs(end, start, dict, distance)
results = []
path = [start]
self.dfs(start, end, path, dict, distance, results)
return results
def bfs(self, start, end, wordDict, distance):
queue = deque()
queue.append(start)
distance[start] = 0
while queue:
size = len(queue)
for _ in range(size):
word = queue.popleft()
for next_word in self.get_next_words(word):
if next_word not in wordDict:
continue
if next_word in distance:
continue
queue.append(next_word)
distance[next_word] = distance[word] + 1
def get_next_words(self, word):
next_words = []
for i in range(len(word)):
left, right = word[: i], word[i + 1:]
for c in "abcdefghijklmnopqrstuvwxyz":
if c == word[i]:
continue
next_word = left + c + right
next_words.append(next_word)
return next_words
def dfs(self, curt, target, path, wordDict, distance, results):
if curt == target:
results.append(path.copy())
return
for next_word in self.get_next_words(curt):
if next_word not in wordDict:
continue
if distance[next_word] != distance[curt] - 1:
continue
path.append(next_word)
self.dfs(next_word, target, path, wordDict, distance, results)
path.pop()
| [
"jeremyxuqitao@outlook.com"
] | jeremyxuqitao@outlook.com |
2e1785d76fd4532236e24443c86dc1718f1136ed | 0e5bbc4eab562ae85c3e0972e5789dd2c14eb297 | /5.4_server.py | 772387cd4348bf536b82fc5ac4298df49aa43065 | [] | no_license | kint7/lab5 | c84d71a7b00b03a219d5c8fe7cb541d5ee0c29d2 | eb43c926a8d02f593ae7754a3d6e1feb31256833 | refs/heads/master | 2023-01-24T03:49:21.603390 | 2020-12-09T08:38:16 | 2020-12-09T08:38:16 | 317,923,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | # Importing socket library
import socket
# Now we can create socket object
s = socket.socket()
# Lets choose one port and start listening on that port
host_name = socket.gethostname()
IPADDRESS = socket.gethostbyname(host_name)
PORT = 9898
print(">>>IP address of the server: ", IPADDRESS)
print(">>>Server is listening on port: ", PORT)
print("\n>Waiting for connection from a client...")
# Now we need to bind to the above port at server side
s.bind(('', PORT))
# Now we will put server into listening mode
s.listen(10)
# Now we do not know when client will concatct server so server should be listening contineously
while True:
# Now we can establish connection with clien
conn, addr = s.accept()
# Send a hello message to client
msg = "\n\nHi, Client [IP address: "+ addr[0] + "], \nThank you for using our storage service. \nYour files are safe with us.\n-Server\n"
conn.send(msg.encode())
filename = conn.recv(1024).decode("utf-8")
file = open(filename, "wb")
# Receive any data from client side
RecvData = conn.recv(99999)
while RecvData:
file.write(RecvData)
RecvData = conn.recv(99999)
# Close the file opened at server side once copy is completed
file.close()
print("\n>File has been copied successfully \n")
# Close connection with client
conn.close()
print(">Server closed the connection \n")
# Come out from the infinite while loop as the file has been copied from client.
break
| [
"2018276758@isiswa.uitm.edu.my"
] | 2018276758@isiswa.uitm.edu.my |
1e73bcb3091075ebead0ba1e029588dec88fead0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/46/usersdata/98/17294/submittedfiles/funcoes1.py | 94f0f1ec40294a695cc95ea950a44bec636efae5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | # -*- coding: utf-8 -*-
from __future__ import division
def crescente (lista):
#escreva o código da função crescente aqui
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]<lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
#escreva as demais funções
def decrescente (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]>lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
def iguais (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]==lista[i+1]:
cont=cont+1
if cont>0:
return True
else:
return False
#escreva o programa principal
n=input('Digite a quantidade de intens nas listas: ')
a=[]
b=[]
c=[]
for i in range(0,n,1):
a.append(input('Digite um valor para a lista a: '))
for i in range(0,n,1):
b.append(input('Digite um valor para a lista b: '))
for i in range(0,n,1):
c.append(input('Digite um valor para a lista c: '))
if crescente (a):
print('S')
else:
print('N')
if decrescente (a):
print('S')
else:
print('N')
if iguais (a):
print('S')
else:
print('N')
if crescente (b):
print('S')
else:
print('N')
if decrescente (b):
print('S')
else:
print('N')
if iguais (b):
print('S')
else:
print('N')
if crescente (c):
print('S')
else:
print('N')
if decrescente (c):
print('S')
else:
print('N')
if iguais (c):
print('S')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
376003ae4440bb82d9c769811431381b61dbebbc | 61465dc9aded40737193de67edbbac89b98b2e83 | /leadmanager/accounts/api.py | 0a668ef7aa1a5a35e75aa82c97d3c58197b43a9b | [] | no_license | NganHaVan/PythonPractice | e0000f62f5eebc70695af447babc1e638ef874a9 | 3a7e376ef691458e720ec9bdef1d41b0b1fcc400 | refs/heads/master | 2023-01-08T22:15:37.082747 | 2019-06-05T09:36:51 | 2019-06-05T09:36:51 | 188,797,453 | 0 | 0 | null | 2023-01-07T05:49:32 | 2019-05-27T07:54:14 | Python | UTF-8 | Python | false | false | 1,401 | py | from knox.models import AuthToken
from rest_framework import generics, permissions
from rest_framework.decorators import permission_classes
from rest_framework.response import Response
from .serializers import LoginSerializer, RegisterSerializer, UserSerializer
# Register API
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data = request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
# Login API
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data = request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
# Get UserAPI
class UserAPI(generics.RetrieveAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserSerializer
# NOTE: Authorization should be 'Token + token code'
def get_object(self):
return self.request.user
| [
"vannunganha@gmail.com"
] | vannunganha@gmail.com |
c470665fd971ef55fbcbf2c680c5254eb0e69e51 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/memre/armv7l/obsolete/corp2/system/base/man-pages/actions.py | e17573a3dc5e34c142d651a5d3274ff1b0d7e803 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import crosstools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def install():
crosstools.rawInstall("DESTDIR=%s" % get.installDIR())
crosstools.rawInstall("DESTDIR=%s -C ../man-pages-posix-2003-a" % get.installDIR())
# These come from attr
pisitools.remove("/usr/share/man/man2/flistxattr.2")
pisitools.remove("/usr/share/man/man2/removexattr.2")
pisitools.remove("/usr/share/man/man2/fgetxattr.2")
pisitools.remove("/usr/share/man/man2/fsetxattr.2")
pisitools.remove("/usr/share/man/man2/lsetxattr.2")
pisitools.remove("/usr/share/man/man2/lremovexattr.2")
pisitools.remove("/usr/share/man/man2/listxattr.2")
pisitools.remove("/usr/share/man/man2/getxattr.2")
pisitools.remove("/usr/share/man/man2/setxattr.2")
pisitools.remove("/usr/share/man/man2/llistxattr.2")
pisitools.remove("/usr/share/man/man2/fremovexattr.2")
pisitools.remove("/usr/share/man/man2/lgetxattr.2")
# These come from libcap
pisitools.remove("/usr/share/man/man2/capget.2")
pisitools.remove("/usr/share/man/man2/capset.2")
# Comes from xorg-input
pisitools.remove("/usr/share/man/man4/mouse.4")
pisitools.dodoc("man-pages-*.Announce", "README")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
e09fa967d92023bca99cf54837fa105b810f030b | 4cb1fd4f6f4a8e57ca3d222007c1a5cde3deebfe | /Chapter04/pingmesh.py | 8fb17a404fcdee3635ee36367d652fcfefc59daf | [
"MIT"
] | permissive | PacktPublishing/Python-Network-Programming | 838097e82762fed54fbc2abffe4db71d5acd350b | 416cea10fe56b2312cf25150ed7ba27837cddf18 | refs/heads/master | 2023-02-11T00:34:43.611524 | 2023-01-30T08:58:32 | 2023-01-30T08:58:32 | 166,771,428 | 52 | 51 | null | null | null | null | UTF-8 | Python | false | false | 2,532 | py | import getmeshvalue
from getmeshvalue import getallvalues
getdevinformation={}
devicenamemapping={}
arraydeviceglobal=[]
pingmeshvalues={}
arraydeviceglobal=["192.168.255.240","192.168.255.245","192.168.255.248","192.168.255.249","4.2.2.2"]
devicenamemapping['192.168.255.240']="R1"
devicenamemapping['192.168.255.245']="R2"
devicenamemapping['192.168.255.248']="R3"
devicenamemapping['192.168.255.249']="R4"
devicenamemapping['4.2.2.2']="Random"
def getmeshvalues():
global arraydeviceglobal
global pingmeshvalues
arraydeviceglobal=sorted(set(arraydeviceglobal))
tval=getallvalues(arraydeviceglobal)
pingmeshvalues = dict(tval)
getmeshvalues()
def createhtml():
global arraydeviceglobal
fopen=open("C:\pingmesh\pingmesh.html","w") ### this needs to be changed as web path of the html location
head="""<html><head><meta http-equiv="refresh" content="60" ></head>"""
head=head+"""<script type="text/javascript">
function updatetime() {
var x = new Date(document.lastModified);
document.getElementById("modified").innerHTML = "Last Modified: "+x+" ";
}
</script>"""+"<body onLoad='updatetime();'>"
head=head+"<div style='display: inline-block;float: right;font-size: 80%'><h4><h4><p id='modified'></p></div>"
head=head+"<div style='display: inline-block;float: left;font-size: 90%'></h4><center><h2>Network Health Dashboard<h2></div>"
head=head+"<br><div><table border='1' align='center'><caption><b>Ping Matrix</b></caption>"
head=head+"<center><br><br><br><br><br><br><br><br>"
fopen.write(head)
dval=""
fopen.write("<tr><td>Devices</td>")
for fromdevice in arraydeviceglobal:
fopen.write("<td><b>"+devicenamemapping[fromdevice]+"</b></td>")
fopen.write("</tr>")
for fromdevice in arraydeviceglobal:
fopen.write("<tr>")
fopen.write("<td><b>"+devicenamemapping[fromdevice]+"</b></td>")
for todevice in arraydeviceglobal:
askvalue=fromdevice+":"+todevice
if (askvalue in pingmeshvalues):
getallvalues=pingmeshvalues.get(askvalue)
bgcolor='lime'
if (getallvalues == "False"):
bgcolor='salmon'
fopen.write("<td align='center' font size='2' height='2' width='2' bgcolor='"+bgcolor+"'title='"+askvalue+"'>"+"<font color='white'><b>"+getallvalues+"</b></font></td>")
fopen.write("</tr>\n")
fopen.write("</table></div>")
fopen.close()
createhtml()
print("All done!!!!")
| [
"rutujay@packt.com"
] | rutujay@packt.com |
f87a9d541cd2f410d4566f9b6a619b6f8385df77 | b7319d10078c0fdf5d9ece02f976e64d19ff870e | /Mathematical/Multiply.py | 6a1eca5fdc9c2b25bd4a3683d8376cc2f0685d5a | [] | no_license | parhamrp/Python3 | 5a4e91941a411e52f252819fdcd65924c0466e10 | dba4e7b22c39e10dfee8f49faa24e6705ce894a5 | refs/heads/master | 2020-09-11T01:19:34.543569 | 2019-12-08T17:14:09 | 2019-12-08T17:14:09 | 221,892,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | # Write a Python function to multiply all the numbers in a list.
def multiply(lst):
total = 1
for item in lst:
total *= item
# total = lst[item] * lst[item+1]
return total
# print(multiply([1,2,4,-3,3]))
| [
"noreply@github.com"
] | noreply@github.com |
dafc3e377763e40bd4c4d5e4406d87111ac9744b | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/interactive-physics-editor/operators/setup_phys_drawing.py | 01edd2b8c2993ca95f30bc14ca621432a93ca02a | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,321 | py | # Copyright (C) 2018 Christopher Gearhart
# chris@bblanimation.com
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import bmesh
import math
# Blender imports
import bpy
import bgl
import blf
from bpy_extras.view3d_utils import location_3d_to_region_2d, region_2d_to_location_3d, region_2d_to_origin_3d, region_2d_to_vector_3d
from bpy.types import SpaceView3D
from bpy.props import *
from ..functions import *
class interactive_sim_drawing():
##############################################
# Draw handler function
# from CG Cookie's retopoflow plugin
def ui_start(self):
# # report something useful to user
# bpy.context.area.header_text_set("Click & drag to add bricks (+'ALT' to remove). Press 'RETURN' to commit changes")
# update dpi
prefs = get_preferences(bpy.context)
ui_scale = prefs.view.ui_scale
pixel_size = prefs.system.pixel_size
self.dpi = int(72 * ui_scale * pixel_size)
# add callback handlers
self.cb_pr_handle = SpaceView3D.draw_handler_add(self.draw_callback_preview, (bpy.context, ), 'WINDOW', 'PRE_VIEW')
# self.cb_pv_handle = SpaceView3D.draw_handler_add(self.draw_callback_postview, (bpy.context, ), 'WINDOW', 'POST_VIEW')
# self.cb_pp_handle = SpaceView3D.draw_handler_add(self.draw_callback_postpixel, (bpy.context, ), 'WINDOW', 'POST_PIXEL')
# darken other spaces
self.spaces = [
bpy.types.SpaceClipEditor,
bpy.types.SpaceConsole,
bpy.types.SpaceDopeSheetEditor,
bpy.types.SpaceFileBrowser,
bpy.types.SpaceGraphEditor,
bpy.types.SpaceImageEditor,
bpy.types.SpaceInfo,
bpy.types.SpaceLogicEditor,
bpy.types.SpaceNLA,
bpy.types.SpaceNodeEditor,
bpy.types.SpaceOutliner,
bpy.types.SpaceProperties,
bpy.types.SpaceSequenceEditor,
bpy.types.SpaceTextEditor,
bpy.types.SpaceTimeline,
#bpy.types.SpaceUVEditor, # <- does not exist?
bpy.types.SpaceUserPreferences,
#'SpaceView3D', # <- specially handled
]
self.areas = [ 'WINDOW', 'HEADER' ]
# ('WINDOW', 'HEADER', 'CHANNELS', 'TEMPORARY', 'UI', 'TOOLS', 'TOOL_PROPS', 'PREVIEW')
# self.cb_pp_tools = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'TOOLS', 'POST_PIXEL')
self.cb_pp_props = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'TOOL_PROPS', 'POST_PIXEL')
self.cb_pp_ui = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'UI', 'POST_PIXEL')
self.cb_pp_header = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'HEADER', 'POST_PIXEL')
self.cb_pp_all = [
(s, a, s.draw_handler_add(self.draw_callback_cover, (bpy.context,), a, 'POST_PIXEL'))
for s in self.spaces
for a in self.areas
]
self.draw_preview()
tag_redraw_areas()
def ui_end(self):
# remove callback handlers
if hasattr(self, 'cb_pr_handle'):
SpaceView3D.draw_handler_remove(self.cb_pr_handle, "WINDOW")
del self.cb_pr_handle
if hasattr(self, 'cb_pv_handle'):
SpaceView3D.draw_handler_remove(self.cb_pv_handle, "WINDOW")
del self.cb_pv_handle
if hasattr(self, 'cb_pp_handle'):
SpaceView3D.draw_handler_remove(self.cb_pp_handle, "WINDOW")
del self.cb_pp_handle
if hasattr(self, 'cb_pp_tools'):
SpaceView3D.draw_handler_remove(self.cb_pp_tools, "TOOLS")
del self.cb_pp_tools
if hasattr(self, 'cb_pp_props'):
SpaceView3D.draw_handler_remove(self.cb_pp_props, "TOOL_PROPS")
del self.cb_pp_props
if hasattr(self, 'cb_pp_ui'):
SpaceView3D.draw_handler_remove(self.cb_pp_ui, "UI")
del self.cb_pp_ui
if hasattr(self, 'cb_pp_header'):
SpaceView3D.draw_handler_remove(self.cb_pp_header, "HEADER")
del self.cb_pp_header
if hasattr(self, 'cb_pp_all'):
for s,a,cb in self.cb_pp_all: s.draw_handler_remove(cb, a)
del self.cb_pp_all
tag_redraw_areas()
def draw_callback_preview(self, context):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS) # save OpenGL attributes
try: self.draw_preview()
except: interactive_physics_handle_exception()
bgl.glPopAttrib() # restore OpenGL attributes
# def draw_callback_postview(self, context):
# # self.drawing.update_dpi()
# # self.drawing.set_font_size(12, force=True)
# # self.drawing.point_size(1)
# # self.drawing.line_width(1)
# bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS) # save OpenGL attributes
# try: self.draw_postview()
# except: handle_exception()
# bgl.glPopAttrib() # restore OpenGL attributes
def draw_callback_postpixel(self, context):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS) # save OpenGL attributes
try: self.draw_postpixel()
except: handle_exception()
bgl.glPopAttrib() # restore OpenGL attributes
def draw_callback_cover(self, context):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS)
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glColor4f(0,0,0,0.5) # TODO: use window background color??
bgl.glEnable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glBegin(bgl.GL_QUADS) # TODO: not use immediate mode
bgl.glVertex2f(-1, -1)
bgl.glVertex2f( 1, -1)
bgl.glVertex2f( 1, 1)
bgl.glVertex2f(-1, 1)
bgl.glEnd()
bgl.glPopMatrix()
bgl.glPopAttrib()
def draw_preview(self):
bgl.glEnable(bgl.GL_MULTISAMPLE)
bgl.glEnable(bgl.GL_LINE_SMOOTH)
bgl.glHint(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)
bgl.glEnable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_POINT_SMOOTH)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPushMatrix()
bgl.glLoadIdentity()
# add background gradient
bgl.glBegin(bgl.GL_TRIANGLES)
for i in range(0,360,10):
r0,r1 = i*math.pi/180.0, (i+10)*math.pi/180.0
x0,y0 = math.cos(r0)*2,math.sin(r0)*2
x1,y1 = math.cos(r1)*2,math.sin(r1)*2
bgl.glColor4f(0,0,0.01,0.0)
bgl.glVertex2f(0,0)
bgl.glColor4f(0,0,0.01,0.8)
bgl.glVertex2f(x0,y0)
bgl.glVertex2f(x1,y1)
bgl.glEnd()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPopMatrix()
def draw_postpixel(self):
dtext = " 'D' for Draw/Cut Tool"
mtext = " 'S' for Merge/Split Tool"
ptext = " 'M' for Material Paintbrush Tool"
# draw instructions text
if self.mode == "DRAW":
text = "Click & drag to add bricks"
self.draw_text_2d(text, position=(50, 250))
text = "+'ALT' to remove"
self.draw_text_2d(text, position=(50, 220))
text = "+'SHIFT' to cut"
self.draw_text_2d(text, position=(50, 190))
dtext = "*" + dtext[1:]
elif self.mode == "MERGE/SPLIT":
text = "Click & drag to merge bricks"
self.draw_text_2d(text, position=(50, 250))
text = "+'ALT' to split horizontally"
self.draw_text_2d(text, position=(50, 220))
text = "+'SHIFT' to split vertically"
self.draw_text_2d(text, position=(50, 190))
mtext = "*" + mtext[1:]
elif self.mode == "PAINT":
text = "Click & drag to paint bricks with target material"
self.draw_text_2d(text, position=(50, 190))
ptext = "*" + ptext[1:]
text = "'RETURN' to commit changes"
self.draw_text_2d(text, position=(50, 160))
# ...api_current/bpy.types.Area.html?highlight=bpy.types.area
header_height = bpy.context.area.regions[0].height # 26px
height = bpy.context.area.height + header_height
# draw tool switcher text
text = "Switch Tools:"
self.draw_text_2d(text, position=(40, height - 200))
self.draw_text_2d(dtext, position=(40, height - 230))
self.draw_text_2d(mtext, position=(40, height - 260))
self.draw_text_2d(ptext, position=(40, height - 290))
# if self.mode == "DRAW":
# text = "Click & drag to add bricks (+'ALT' to remove, +'SHIFT' to cut)"
# elif self.mode == "PAINT":
# text = "Click & drag to paint bricks with target material"
# elif self.mode == "MERGE/SPLIT":
# text = "Click & drag to merge bricks (+'ALT' to split horizontally, +'SHIFT' to split vertically)"
# self.draw_text_2d(text, position=(127, 80))
# text = "Press 'RETURN' to commit changes"
# self.draw_text_2d(text, position=(127, 50))
def draw_text_2d(self, text, font_id=0, color=(1, 1, 1, 1), position=(0, 0)):
# draw some text
bgl.glColor4f(*color)
blf.position(font_id, position[0], position[1], 0)
blf.size(font_id, 11, self.dpi)
blf.draw(font_id, text)
bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
# def draw_centerpoint(color, point, width=1):
# bgl.glLineWidth(width)
# bgl.glColor4f(*color)
# bgl.glBegin(bgl.GL_POINTS)
# bgl.glVertex3f(*point)
#
# def Point_to_depth(self, xyz):
# xy = location_3d_to_region_2d(self.region, self.r3d, xyz)
# if xy is None: return None
# oxyz = region_2d_to_origin_3d(self.region, self.r3d, xy)
# return (xyz - oxyz).length
#
# # def Point2D_to_Vec(self, xy:Point2D):
# # if xy is None: return None
# # return Vector(region_2d_to_vector_3d(self.actions.region, self.actions.r3d, xy))
# #
# # def Point2D_to_Origin(self, xy:Point2D):
# # if xy is None: return None
# # return Point(region_2d_to_origin_3d(self.actions.region, self.actions.r3d, xy))
# #
# # def Point2D_to_Ray(self, xy:Point2D):
# # if xy is None: return None
# # return Ray(self.Point2D_to_Origin(xy), self.Point2D_to_Vec(xy))
# #
# # def Point2D_to_Point(self, xy:Point2D, depth:float):
# # r = self.Point2D_to_Ray(xy)
# # if r is None or r.o is None or r.d is None or depth is None:
# # return None
# # return Point(r.o + depth * r.d)
# #
# # def size2D_to_size(self, size2D:float, xy:Point2D, depth:float):
# # # computes size of 3D object at distance (depth) as it projects to 2D size
# # # TODO: there are more efficient methods of computing this!
# # p3d0 = self.Point2D_to_Point(xy, depth)
# # p3d1 = self.Point2D_to_Point(xy + Vector((size2D,0)), depth)
# # return (p3d0 - p3d1).length
#
# def update_ui_mouse_pos(self):
# if self.loc is None or self.normal is None:
# self.clear_ui_mouse_pos()
# return
# depth = self.Point_to_depth(self.loc)
# if depth is None:
# self.clear_ui_mouse_pos()
# return
# rmat = Matrix.Rotation(self.oz.angle(self.normal), 4, self.oz.cross(self.normal))
# self.hit = True
# self.scale = 1 # self.rfcontext.size2D_to_size(1.0, self.mouse, depth)
# self.hit_p = self.loc
# self.hit_x = Vector(rmat * self.ox)
# self.hit_y = Vector(rmat * self.oy)
# self.hit_z = Vector(rmat * self.oz)
# self.hit_rmat = rmat
#
# def clear_ui_mouse_pos(self):
# ''' called when mouse is moved outside View3D '''
# self.hit = False
# self.hit_p = None
# self.hit_x = None
# self.hit_y = None
# self.hit_z = None
# self.hit_rmat = None
#
# @staticmethod
# @blender_version('<','2.79')
# def update_dpi():
# paintbrush._dpi = get_preferences(bpy.context).system.dpi
# if get_preferences(bpy.context).system.virtual_pixel_mode == 'DOUBLE':
# paintbrush._dpi *= 2
# paintbrush._dpi *= get_preferences(bpy.context).system.pixel_size
# paintbrush._dpi = int(paintbrush._dpi)
# paintbrush._dpi_mult = paintbrush._dpi / 72
#
# @staticmethod
# @blender_version('>=','2.79')
# def update_dpi():
# paintbrush._ui_scale = get_preferences(bpy.context).view.ui_scale
# paintbrush._sysdpi = get_preferences(bpy.context).system.dpi
# paintbrush._pixel_size = get_preferences(bpy.context).system.pixel_size
# paintbrush._dpi = 72 # get_preferences(bpy.context).system.dpi
# paintbrush._dpi *= paintbrush._ui_scale
# paintbrush._dpi *= paintbrush._pixel_size
# paintbrush._dpi = int(paintbrush._dpi)
# paintbrush._dpi_mult = paintbrush._ui_scale * paintbrush._pixel_size * paintbrush._sysdpi / 72
# s = 'DPI information: scale:%0.2f, pixel:%0.2f, dpi:%d' % (paintbrush._ui_scale, paintbrush._pixel_size, paintbrush._sysdpi)
# if s != getattr(paintbrush, '_last_dpi_info', None):
# paintbrush._last_dpi_info = s
# print(s)
#
# def draw_postview(self):
# print("HERE")
# if not self.hit: return
# print("HERE2")
#
# cx,cy,cp = self.hit_x,self.hit_y,self.hit_p
# cs_outer = self.scale * self.radius
# cs_inner = self.scale * self.radius * math.pow(0.5, 1.0 / self.falloff)
# cr,cg,cb = self.color
#
# bgl.glDepthRange(0, 0.999) # squeeze depth just a bit
# bgl.glEnable(bgl.GL_BLEND)
# # self.drawing.line_width(2.0)
# # self.drawing.point_size(3.0)
# bgl.glPointSize(max(1, 3.0 * self._dpi_mult))
#
# ######################################
# # draw in front of geometry
#
# bgl.glDepthFunc(bgl.GL_LEQUAL)
# bgl.glDepthMask(bgl.GL_FALSE) # do not overwrite depth
#
# bgl.glColor4f(cr, cg, cb, 0.75 * self.strength)
# bgl.glBegin(bgl.GL_TRIANGLES)
# for p0,p1 in zip(self.points[:-1], self.points[1:]):
# x0,y0 = p0
# x1,y1 = p1
# outer0 = (cs_outer * ((cx * x0) + (cy * y0))) + cp
# outer1 = (cs_outer * ((cx * x1) + (cy * y1))) + cp
# inner0 = (cs_inner * ((cx * x0) + (cy * y0))) + cp
# inner1 = (cs_inner * ((cx * x1) + (cy * y1))) + cp
# bgl.glVertex3f(*outer0)
# bgl.glVertex3f(*outer1)
# bgl.glVertex3f(*inner0)
# bgl.glVertex3f(*outer1)
# bgl.glVertex3f(*inner1)
# bgl.glVertex3f(*inner0)
# bgl.glEnd()
#
# bgl.glColor4f(1, 1, 1, 1) # outer ring
# bgl.glBegin(bgl.GL_LINE_STRIP)
# for x,y in self.points:
# p = (cs_outer * ((cx * x) + (cy * y))) + cp
# bgl.glVertex3f(*p)
# bgl.glEnd()
#
# # bgl.glColor4f(1, 1, 1, 0.5) # inner ring
# # bgl.glBegin(bgl.GL_LINE_STRIP)
# # for x,y in self.points:
# # p = (cs_inner * ((cx * x) + (cy * y))) + cp
# # bgl.glVertex3f(*p)
# # bgl.glEnd()
#
# bgl.glColor4f(1, 1, 1, 0.25) # center point
# bgl.glBegin(bgl.GL_POINTS)
# bgl.glVertex3f(*cp)
# bgl.glEnd()
#
# # ######################################
# # # draw behind geometry (hidden below)
# #
# # bgl.glDepthFunc(bgl.GL_GREATER)
# # bgl.glDepthMask(bgl.GL_FALSE) # do not overwrite depth
# #
# # bgl.glColor4f(cr, cg, cb, 0.10 * self.strength)
# # bgl.glBegin(bgl.GL_TRIANGLES)
# # for p0,p1 in zip(self.points[:-1], self.points[1:]):
# # x0,y0 = p0
# # x1,y1 = p1
# # outer0 = (cs_outer * ((cx * x0) + (cy * y0))) + cp
# # outer1 = (cs_outer * ((cx * x1) + (cy * y1))) + cp
# # inner0 = (cs_inner * ((cx * x0) + (cy * y0))) + cp
# # inner1 = (cs_inner * ((cx * x1) + (cy * y1))) + cp
# # bgl.glVertex3f(*outer0)
# # bgl.glVertex3f(*outer1)
# # bgl.glVertex3f(*inner0)
# # bgl.glVertex3f(*outer1)
# # bgl.glVertex3f(*inner1)
# # bgl.glVertex3f(*inner0)
# # bgl.glEnd()
# #
# # bgl.glColor4f(1, 1, 1, 0.05) # outer ring
# # bgl.glBegin(bgl.GL_LINE_STRIP)
# # for x,y in self.points:
# # p = (cs_outer * ((cx * x) + (cy * y))) + cp
# # bgl.glVertex3f(*p)
# # bgl.glEnd()
# #
# # bgl.glColor4f(1, 1, 1, 0.025) # inner ring
# # bgl.glBegin(bgl.GL_LINE_STRIP)
# # for x,y in self.points:
# # p = (cs_inner * ((cx * x) + (cy * y))) + cp
# # bgl.glVertex3f(*p)
# # bgl.glEnd()
#
# ######################################
# # reset to defaults
#
# bgl.glDepthFunc(bgl.GL_LEQUAL)
# bgl.glDepthMask(bgl.GL_TRUE)
#
# bgl.glDepthRange(0, 1)
#
# return
#############################################
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9adaee7940a1f0dbfd276202c2578faa1ef36ad7 | a97792ea5b48c1d0aacca906bc2d393b59ceb59a | /accounts/migrations/0001_initial.py | 3b079163e2466e9166ba44051a16589ed2ecc286 | [] | no_license | ebrahimasifismail/custom_login_view_django | 0de9e66846d19101506232e67c2a32777f70bdff | c1d8b3aa712b601f2c9f0e8cbeaf9080d329f00d | refs/heads/main | 2023-07-25T17:52:11.821384 | 2021-09-09T14:36:56 | 2021-09-09T14:36:56 | 404,233,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | # Generated by Django 3.2.7 on 2021-09-04 15:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('user_type', models.CharField(choices=[('PTNT', 'Patient'), ('DOCT', 'Doctor'), ('NURS', 'Nurse'), ('ADMN', 'Admin')], default='PTNT', max_length=4)),
],
options={
'abstract': False,
},
),
]
| [
"ebrahimasifismail@gmail.com"
] | ebrahimasifismail@gmail.com |
5a5afc24b7232e7b0dfad88d7345ed4ac5c2c416 | ee27158cc32b364baa2693bee24c407886bd4d65 | /ecourses/migrations/0004_alter_student_universidad.py | 19677460db20b5a7ab125d35d9215cfb31320305 | [] | no_license | BenjaLepe/django-ecourses | e87cf3231de92509f7e012efced695901a93dd46 | caee50380149df71d114e27c6a2bdba7539b4222 | refs/heads/main | 2023-08-24T18:25:19.750885 | 2021-10-25T04:32:07 | 2021-10-25T04:32:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,797 | py | # Generated by Django 3.2.8 on 2021-10-25 02:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecourses', '0003_course_promedio'),
]
operations = [
migrations.AlterField(
model_name='student',
name='universidad',
field=models.IntegerField(choices=[(0, 'Universidad De Chile'), (1, 'Universidad De Santiago De Chile'), (2, 'Universidad De Valparaíso'), (3, 'Universidad De Antofagasta'), (4, 'Universidad De La Serena'), (5, 'Universidad Del Bío-bío'), (6, 'Universidad De La Frontera'), (7, 'Universidad De Magallanes'), (8, 'Universidad De Talca'), (9, 'Universidad De Atacama'), (10, 'Universidad De Tarapacá'), (11, 'Universidad Arturo Prat'), (12, 'Universidad Metropolitana De Ciencias De La Educación'), (13, 'Universidad De Playa Ancha De Ciencias De La Educación'), (14, 'Universidad De Los Lagos'), (15, 'Universidad Tecnológica Metropolitana'), (16, "Universidad De O'higgins"), (17, 'Universidad De Aysén'), (18, 'Pontificia Universidad Católica De Chile'), (19, 'Universidad De Concepción'), (20, 'Universidad Técnica Federico Santa María'), (21, 'Pontificia Universidad Católica De Valparaíso'), (22, 'Universidad Austral De Chile'), (23, 'Universidad Católica Del Norte'), (24, 'Universidad Católica Del Maule'), (25, 'Universidad Católica De La Santísima Concepción'), (26, 'Universidad Católica De Temuco'), (27, 'Universidad Gabriela Mistral'), (28, 'Universidad Finis Terrae'), (29, 'Universidad Diego Portales'), (30, 'Universidad Central De Chile'), (31, 'Universidad Bolivariana'), (32, 'Universidad Pedro De Valdivia'), (33, 'Universidad Mayor'), (34, 'Universidad Academia De Humanismo Cristiano'), (35, 'Universidad Santo Tomás'), (36, 'Universidad La República'), (37, 'Universidad Sek'), (38, 'Universidad De Las Américas'), (39, 'Universidad Andrés Bello'), (40, 'Universidad De Viña Del Mar'), (41, 'Universidad Adolfo Ibañez'), (42, 'Universidad De Artes, Ciencias Y Comunicación - Uniacc'), (43, 'Universidad Autónoma De Chile'), (44, 'Universidad De Los Andes'), (45, 'Universidad Adventista De Chile'), (46, 'Universidad San Sebastián'), (47, 'Universidad Católica Cardenal Raúl Silva Henríquez'), (48, 'Universidad Del Desarrollo'), (49, 'Universidad De Aconcagua'), (50, 'Universidad Los Leones'), (51, "Universidad Bernardo O'higgins"), (52, 'Universidad Tecnológica De Chile Inacap'), (53, 'Universidad Miguel De Cervantes'), (54, 'Universidad Alberto Hurtado'), (55, 'Instituto Profesional Agrario Adolfo Matthei'), (56, 'Instituto Profesional Inacap O Instituto Profesional Instituto Nacional De Capacitación Profesional Inacap'), (57, 'Instituto Profesional Libertador De Los Andes'), (58, 'Instituto Profesional Instituto De Estudios Bancarios Guillermo Subercaseaux'), (59, 'Instituto Profesional Escuela De Contadores Auditores De Santiago'), (60, 'Instituto Profesional Providencia'), (61, 'Instituto Profesional Chileno Británico De Cultura'), (62, 'Instituto Profesional Duoc Uc'), (63, 'Instituto Profesional Ipg'), (64, 'Instituto Profesional Santo Tomás'), (65, 'Instituto Profesional Instituto Superior De Artes Y Ciencias De La Comunicación'), (66, 'Instituto Profesional Alemán Wilhelm Von Humboldt'), (67, 'Instituto Profesional Diego Portales'), (68, 'Instituto Profesional De Chile'), (69, 'Instituto Profesional Escuela Moderna De Música'), (70, 'Instituto Profesional Esucomex'), (71, 'Instituto Profesional Eatri Instituto Profesional'), (72, 'Instituto Profesional De Los Angeles'), (73, 'Instituto Profesional Dr. Virginio Gómez G.'), (74, 'Instituto Profesional De Ciencias Y Educación Helen Keller'), (75, 'Instituto Profesional Aiep'), (76, 'Instituto Profesional De Arte Y Comunicación Arcos'), (77, 'Instituto Profesional Latinoamericano De Comercio Exterior - Iplacex'), (78, 'Instituto Profesional Los Leones'), (79, 'Instituto Profesional Ciisa'), (80, 'Instituto Profesional Los Lagos'), (81, 'Instituto Profesional De Ciencias De La Computación Acuario Data'), (82, 'Instituto Profesional Del Comercio'), (83, 'Instituto Profesional Del Valle Central'), (84, 'Instituto Profesional Carlos Casanueva'), (85, 'Instituto Profesional Instituto Nacional Del Fútbol'), (86, 'Instituto Profesional Instituto Internacional De Artes Culinarias Y Servicios'), (87, 'Instituto Profesional Projazz'), (88, 'Instituto Profesional Escuela De Cine De Chile'), (89, 'Instituto Profesional De Artes Escénicas Karen Connolly'), (90, 'Instituto Profesional Escuela De Marina Mercante Piloto Pardo'), (91, 'Instituto Profesional Vertical Instituto Profesional'), (92, 'Centro De Formación Técnica Instituto Central De Capacitación Educacional Icce'), (93, 'Centro De Formación Técnica De Enac O Centro De Formación Técnica De Los Establecimientos Nacionales De Educación Cáritas-chile'), (94, 'Centro De Formación Técnica Centro De Enseñanza De Alta Costura Paulina Diard'), (95, 'Centro De Formación Técnica Centro Tecnológico Superior Infomed'), (96, 'Centro De Formación Técnica Instituto Superior Alemán De Comercio Insalco'), (97, 'Centro De Formación Técnica Juan Bohon'), (98, 'Centro De Formación Técnica Barros Arana'), (99, 'Centro De Formación Técnica Santo Tomás'), (100, 'Centro De Formación Técnica Massachusetts'), (101, 'Centro De Formación Técnica Los Lagos'), (102, 'Centro De Formación Técnica Estudio Profesor Valero'), (103, 'Centro De Formación Técnica Cenco'), (104, 'Centro De Formación Técnica Prodata'), (105, 'Centro De Formación Técnica Instituto Superior De Estudios Jurídicos Canon'), (106, 'Centro De Formación Técnica Iprosec'), (107, 'Centro De Formación Técnica San Agustín De Talca'), (108, 'Centro De Formación Técnica Icel'), (109, 'Centro De Formación Técnica Alpes'), (110, 'Centro De Formación Técnica Instituto Técnológico De Chile - I.t.c.'), (111, 'Centro De Formación Técnica Educap'), (112, 'Centro De Formación Técnica Cámara De Comercio De Santiago'), (113, 'Centro De Formación Técnica Laplace O C.f.t. De Estudios Superiores Y Capacitación Profesional Laplace'), (114, 'Centro De Formación Técnica Inacap'), (115, 'Centro De Formación Técnica Del Medio Ambiente'), (116, 'Centro De Formación Técnica Lota-arauco'), (117, 'Centro De Formación Técnica Ceduc - Ucn'), (118, 'Centro De Formación Técnica Ceitec'), (119, 'Centro De Formación Técnica Proandes'), (120, 'Centro De Formación Técnica Accioma'), (121, 'Centro De Formación Técnica De Tarapacá'), (122, 'Centro De Formación Técnica Protec'), (123, 'Centro De Formación Técnica De La Industria Gráfica O Cft. Ingraf'), (124, 'Centro De Formación Técnica De La Pontificia Universidad Católica De Valparaíso O Cft Pucv'), (125, 'Centro De Formación Técnica Teodoro Wickel Kluwen'), (126, 'Centro De Formación Técnica Profasoc'), (127, 'Centro De Formación Técnica Manpower'), (128, 'Centro De Formación Técnica Escuela Culinaria Francesa'), (129, 'Centro De Formación Técnica De La Región Del Maule'), (130, 'Centro De Formación Técnica De La Región De La Araucanía'), (131, 'Centro De Formación Técnica De La Región De Tarapacá'), (132, 'Centro De Formación Técnica De La Región De Coquimbo'), (133, 'Centro De Formación Técnica De La Región De Los Lagos'), (134, 'Centro De Formación Técnica De La Región De Valparaíso'), (135, 'Centro De Formación Técnica De La Región De Los Ríos'), (136, 'Centro De Formación Técnica De La Región De Antofagasta'), (137, 'Centro De Formación Técnica De La Región Metropolitana'), (138, 'Centro De Formación Técnica De La Región De Magallanes Y La Antártica Chilena')], default=1, max_length=100),
),
]
| [
"balepe@uc.cl"
] | balepe@uc.cl |
f3b0975173509a971b078616b1369cd06e6dbd22 | b5db98d382f587857cc56f71dfde17034963fdec | /image_search/signature_database_base.py | 7019b092d2372d185e499c5bbce04dc496472001 | [] | no_license | boluoyu/image_search | abb1563f35675730973bdcff6829da198f6c8d5e | 5675371130eca0849e215c426f51a6fdce66ed45 | refs/heads/master | 2020-07-17T16:12:08.287835 | 2017-04-18T09:51:09 | 2017-04-18T09:51:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,162 | py | from image_search.image_signature import ImageSignature
from operator import itemgetter
import numpy as np
from datetime import datetime
import os.path
class SignatureDatabaseBase(object):
"""Base class for storing and searching image signatures in a database
Note:
You must implement the methods search_single_record and insert_single_record
in a derived class
"""
def search_single_record(self, rec):
"""Search for a matching image record.
Must be implemented by derived class.
Args:
rec (dict): an image record. Will be in the format returned by
make_record
For example, rec could have the form:
{'path': 'https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg',
'signature': [0.123456, 0.234567, ... ]
'metadata': {...},
}
Returns:
a formatted list of dicts representing matches.
For example, if three matches are found:
[
{'dist': 0.069116439263706961,
'id': u'AVM37oZq0osmmAxpPvx7',
'path': u'https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg'},
{'dist': 0.22484320805049718,
'id': u'AVM37nMg0osmmAxpPvx6',
'path': u'https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg/687px-Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg'},
{'dist': 0.42529792112113302,
'id': u'AVM37p530osmmAxpPvx9',
'metadata': {...},
'path': u'https://c2.staticflickr.com/8/7158/6814444991_08d82de57e_z.jpg'}
]
You can return any fields you like, but must include at least dist and id. Duplicate entries are ok,
and they do not need to be sorted
"""
raise NotImplementedError
def insert_single_record(self, rec):
"""Insert an image record.
Must be implemented by derived class.
Args:
rec (dict): an image record. Will be in the format returned by
make_record
For example, rec could have the form:
{'path': 'https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg',
'signature': [0.123456, 0.234567, ... ]
'metadata': {...}
}
The number of simple words corresponds to the attribute N
"""
raise NotImplementedError
def __init__(self, distance_cutoff=0.095, save_path='../thumbnail', imgserver_ip = '127.0.0.1', imgserver_port = 9202,
*signature_args, **signature_kwargs):
"""Set up storage scheme for images
Args:
distance_cutoff (Optional [float]): maximum image signature distance to
be considered a match (default 0.095)
save_path (Optional): thumbnail save path
*signature_args: Variable length argument list to pass to ImageSignature
**signature_kwargs: Arbitrary keyword arguments to pass to ImageSignature
"""
# Check float input
if type(distance_cutoff) is not float:
raise TypeError('distance_cutoff should be a float')
if distance_cutoff < 0.:
raise ValueError('distance_cutoff should be > 0 (got %r)' % distance_cutoff)
self.distance_cutoff = distance_cutoff
self.save_path = save_path
self.gis = ImageSignature(*signature_args, **signature_kwargs)
self.imgserver_port = imgserver_port
self.imgserver_ip = imgserver_ip
def add_image(self, path, msg_id, pic_id, img=None, bytestream=False, metadata=None, refresh_after=False):
"""Add a single image to the database
Args:
path (string): path or identifier for image. If img=None, then path is assumed to be
a URL or filesystem path
msg_id (string): message id
pic_id (string): picture id
img (Optional[string]): usually raw image data. In this case, path will still be stored, but
a signature will be generated from data in img. If bytestream is False, but img is
not None, then img is assumed to be the URL or filesystem path. Thus, you can store
image records with a different 'path' than the actual image location (default None)
bytestream (Optional[boolean]): will the image be passed as raw bytes?
That is, is the 'path_or_image' argument an in-memory image? If img is None but, this
argument will be ignored. If img is not None, and bytestream is False, then the behavior
is as described in the explanation for the img argument
(default False)
metadata (Optional): any other information you want to include, can be nested (default None)
"""
rec = make_record(path, self.gis, self.imgserver_ip, self.imgserver_port, msg_id, pic_id, self.save_path, img=img, bytestream=bytestream, metadata=metadata)
self.insert_single_record(rec, refresh_after=refresh_after)
def search_image(self, path, bytestream=False):
"""Search for matches
Args:
path (string): path or image data. If bytestream=False, then path is assumed to be
a URL or filesystem path. Otherwise, it's assumed to be raw image data
bytestream (Optional[boolean]): will the image be passed as raw bytes?
That is, is the 'path_or_image' argument an in-memory image?
(default False)
Returns:
a formatted list of dicts representing unique matches, sorted by dist
For example, if three matches are found:
[
{'dist': 0.069116439263706961,
'id': u'AVM37oZq0osmmAxpPvx7',
'path': u'https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg'},
{'dist': 0.0148712559918,
'id': u'AVM37nMg0osmmAxpPvx6',
'path': u'https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg/687px-Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.jpg'},
{'dist': 0.0307221687987,
'id': u'AVM37p530osmmAxpPvx9',
'path': u'https://c2.staticflickr.com/8/7158/6814444991_08d82de57e_z.jpg'}
]
"""
img = self.gis.preprocess_image(path, bytestream)
# generate the signature
record = make_record(img, self.gis, self.imgserver_ip, self.imgserver_port)
result = self.search_single_record(record)
ids = set()
unique = []
for item in result:
if item['id'] not in ids:
u_item = {}
# u_item['thumbnail'] = item['thumbnail']
# u_item['thumbnail'] = 'http://%s:%s/%s' % (self.imgserver_ip, self.imgserver_port, item['thumbnail'])
u_item['msg_id'] = item['msg_id']
u_item['pic_id'] = item['pic_id']
u_item['path'] = item['path']
u_item['dist'] = item['dist'][0]
unique.append(u_item)
ids.add(item['id'])
r = sorted(unique, key=itemgetter('dist'))
return r
def make_record(path, gis, imgserver_ip, imgserver_port, msg_id=None, pic_id=None, save_path=None, img=None, bytestream=False, metadata=None):
"""Makes a record suitable for database insertion.
Note:
This non-class version of make_record is provided for
CPU pooling. Functions passed to worker processes must
be picklable.
Args:
path (string): path or image data. If bytestream=False, then path is assumed to be
a URL or filesystem path. Otherwise, it's assumed to be raw image data
save_path: thumbnail save path
gis (ImageSignature): an instance of ImageSignature for generating the
signature
img (Optional[string]): usually raw image data. In this case, path will still be stored, but
a signature will be generated from data in img. If bytestream is False, but img is
not None, then img is assumed to be the URL or filesystem path. Thus, you can store
image records with a different 'path' than the actual image location (default None)
bytestream (Optional[boolean]): will the image be passed as raw bytes?
That is, is the 'path_or_image' argument an in-memory image? If img is None but, this
argument will be ignored. If img is not None, and bytestream is False, then the behavior
is as described in the explanation for the img argument
(default False)
metadata (Optional): any other information you want to include, can be nested (default None)
Returns:
An image record.
For example:
{'path': 'https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg',
'signature': [0.123456, 0.234567, ... ]
'metadata': {...}
}
"""
cur_time = datetime.now()
if save_path != None:
thumbnail_path = os.path.abspath(save_path)
try:
if not os.path.exists(thumbnail_path):
os.makedirs(thumbnail_path)
except OSError:
raise TypeError('Make thumbnail path error.')
thumbnail_name = cur_time.strftime("%Y_%m_%d_%H_%M_%S_%f") + '.jpg'
thumbnail_path = os.path.join(thumbnail_path, thumbnail_name)
else:
thumbnail_path = None
record = dict()
record['path'] = path
if msg_id is not None:
record['msg_id'] = msg_id
if pic_id is not None:
record['pic_id'] = pic_id
if img is not None:
signature = gis.generate_signature(img, bytestream=bytestream)
else:
signature = gis.generate_signature(path, thumbnail_path=thumbnail_path)
record['signature'] = signature.tolist()
if metadata:
record['metadata'] = metadata
record['timestamp'] = cur_time
if thumbnail_path != None:
# record['thumbnail'] = 'http://%s:%s/%s'%(imgserver_ip, imgserver_port, thumbnail_name)
record['thumbnail'] = '%s' % (thumbnail_name)
else:
record['thumbnail'] = 'null'
return record
def normalized_distance(_target_array, _vec):
"""Compute normalized distance to many points.
Computes 1 - a * b / ( ||a|| * ||b||) for every a in target_array
Args:
_target_array (numpy.ndarray): N x m array
_vec (numpy.ndarray): array of size m
Returns:
the normalized distance (float)
"""
topvec = np.dot(_target_array, _vec.reshape(_vec.size, 1))
norm_a = np.linalg.norm(_target_array, axis=1)
norm_a = norm_a.reshape(norm_a.size,1)
norm_b = np.linalg.norm(_vec)
finvec = 1.0 - topvec / (norm_a * norm_b)
return finvec
| [
"guoxiaolu06@126.com"
] | guoxiaolu06@126.com |
70f011987d0d83896b55fb107ebb146db7604a64 | 94ed2beea5ac8936f555824a6c799a4c5c810937 | /apps/aula01/cadastro_edital/migrations/0002_auto_20190313_1819.py | 02dc4db4fb0a40ca7b0654e0b90886d8feea2591 | [] | no_license | SarahRaq/handson_django | a4fc851022b4a20d314c149cb582aa6c60f95726 | 5277011ec8a25c3490089c4f7ba137bd5959ae9e | refs/heads/master | 2020-04-28T05:02:23.375150 | 2019-03-15T18:01:04 | 2019-03-15T18:01:04 | 175,004,387 | 0 | 0 | null | 2019-03-11T13:19:33 | 2019-03-11T13:19:33 | null | UTF-8 | Python | false | false | 1,494 | py | # Generated by Django 2.1.7 on 2019-03-13 18:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cadastro_edital', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='pagamento',
name='edital',
),
migrations.RemoveField(
model_name='usuario',
name='groups',
),
migrations.RemoveField(
model_name='usuario',
name='user_permissions',
),
migrations.RemoveField(
model_name='avaliador',
name='usuario',
),
migrations.RemoveField(
model_name='coordenador',
name='usuario',
),
migrations.AddField(
model_name='avaliador',
name='nome',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Nome'),
),
migrations.AddField(
model_name='coordenador',
name='nome',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Nome'),
),
migrations.AlterField(
model_name='edital',
name='data_publicacao',
field=models.DateField(verbose_name='Data de publicação'),
),
migrations.DeleteModel(
name='Pagamento',
),
migrations.DeleteModel(
name='Usuario',
),
]
| [
"sarahraquelrs@gmail.com"
] | sarahraquelrs@gmail.com |
ca152810fc429ad3a3aa2281e6960067671ebd20 | 5f862a5f0116030adb4ce8d1f66c22e52eb5546f | /test/test_automl/test_smbo.py | 7094e9c51ac478e5b9391d662872db4ddc3f1610 | [
"BSD-3-Clause"
] | permissive | IsoLATionzhw/auto-sklearn | 9c1adbffe8f077471cbf9eb1c0a89d4ab9593220 | a263efb49f7b7f597963bc1e787105ea7615ea75 | refs/heads/master | 2021-07-15T05:47:23.268566 | 2017-10-04T10:08:21 | 2017-10-04T10:08:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | import unittest
from autosklearn.smbo import AutoMLSMBO
from autosklearn.metrics import accuracy
from smac.facade.smac_facade import SMAC
from smac.scenario.scenario import Scenario
from smac.tae.execute_ta_run import StatusType
from ConfigSpace import ConfigurationSpace, UniformFloatHyperparameter, Configuration
class TestSMBO(unittest.TestCase):
def test_choose_next(self):
configspace = ConfigurationSpace()
configspace.add_hyperparameter(UniformFloatHyperparameter('a', 0, 1))
configspace.add_hyperparameter(UniformFloatHyperparameter('b', 0, 1))
dataset_name = 'foo'
func_eval_time_limit = 15
total_walltime_limit = 15
memory_limit = 3072
auto = AutoMLSMBO(
config_space=None,
dataset_name=dataset_name,
backend=None,
func_eval_time_limit=func_eval_time_limit,
total_walltime_limit=total_walltime_limit,
memory_limit=memory_limit,
watcher=None,
metric=accuracy
)
auto.config_space = configspace
scenario = Scenario({
'cs': configspace,
'cutoff_time': func_eval_time_limit,
'wallclock_limit': total_walltime_limit,
'memory_limit': memory_limit,
'run_obj': 'quality',
})
smac = SMAC(scenario)
self.assertRaisesRegex(
ValueError,
'Cannot use SMBO algorithm on empty runhistory',
auto.choose_next,
smac
)
config = Configuration(configspace, values={'a': 0.1, 'b': 0.2})
# TODO make sure the incumbent is always set?
smac.solver.incumbent = config
runhistory = smac.solver.runhistory
runhistory.add(config=config, cost=0.5, time=0.5,
status=StatusType.SUCCESS)
auto.choose_next(smac)
| [
"feurerm@informatik.uni-freiburg.de"
] | feurerm@informatik.uni-freiburg.de |
678b524eb856b015ab6f89278db01e09ee25aeee | c1dbc149888d9e17023c7efea77a1bcba21b5215 | /haarcasc.py | 96dc7a5b740f24b72236d3326c2c232c775b065a | [] | no_license | aneri2807/Opencv_scripts | 8361c0983425cae75dd546ddde6b5d35790564e5 | c842284be5208d83fd78d8576b8b7cbfd7c5c461 | refs/heads/master | 2021-04-12T11:14:15.069227 | 2018-03-22T15:22:14 | 2018-03-22T15:22:14 | 126,353,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y, w, h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w] #region of image y,x
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,255,0), 2)
## font = cv2.FONT_HERSHEY_SIMPLEX
## cv2.putText(roi_color,'EYE',(ex-ew,ey-eh), font, 0.5, (11,255,255), 2, cv2.LINE_AA)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"aneri2807@gmail.com"
] | aneri2807@gmail.com |
1bcd4f24598b0f823162ecf23a97fcaee260cc6b | be70c1a9e6600cdfb0691e746b7132d0f7f9890b | /bmi.py | 324aa713ddc12e44de9a142a077b0efe55b7f9c4 | [] | no_license | jasonmahony/python | ec16cf10407c40809f9f667828bbb5c8970a47d6 | 7676ee18425114a9084e74d10eb5d75fda2b9236 | refs/heads/master | 2021-06-14T16:42:49.390585 | 2021-05-30T15:29:12 | 2021-05-30T15:29:12 | 74,153,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | def bmi(weight, height):
bmi = (weight/height**2)
print(bmi)
return ("Underweight" if bmi <= 18.5 else "Normal" if bmi <= 25 else "Overweight" if bmi <= 30 else "Obese")
print(bmi(90, 1.80)) | [
"jason.mahony@autodesk.com"
] | jason.mahony@autodesk.com |
d691c4b54e635a2d515ac8e2806e29ed04c757c3 | c2285444392e8eb1904255b82f62b49f317aca07 | /tagger/db/models/tag.py | 95a487c8861a4b3aa4e29120ff433e8162fdbc16 | [] | no_license | tlskr/tagger | ced0ed36437bb29fe488eb2fae8b03314c5a9558 | 1230a1f36b91bd7ef2d57840dcfa013ca07e5a4a | refs/heads/master | 2022-12-16T00:44:36.798298 | 2018-08-17T13:23:02 | 2018-08-17T13:26:37 | 145,027,493 | 0 | 0 | null | 2022-12-08T02:46:37 | 2018-08-16T18:55:02 | Python | UTF-8 | Python | false | false | 2,353 | py | # coding: utf-8
from sqlalchemy import Column, Text
from sqlalchemy.dialects.postgresql.json import JSONB
from . base import Base
from tagger.db.session import get_session
class Tag(Base):
__tablename__ = 'tag'
tag_id = Column(Text, primary_key=True)
vendor_id = Column(Text, nullable=False)
tag_metadata = Column(JSONB(astext_type=Text()), nullable=False)
@classmethod
def get_session(cls):
Session = get_session() # TODO: clean up, ugly
return Session()
@classmethod
def list_vendors(cls):
session = cls.get_session()
x = session.query(cls.vendor_id).distinct()
return list(x.all())
@classmethod
def list_tags(cls):
session = cls.get_session()
x = session.query(Tag.tag_id).distinct()
return list(x.all())
@classmethod
def get_tag_instance(cls, tag_id):
''' get Tag instance by tag_id '''
session = cls.get_session()
data = session.query(Tag).filter(Tag.tag_id == tag_id)
return data[0]
@classmethod
def get_tag_data(cls, tag_id):
''' return Tag tag_metadata '''
tag = cls.get_tag_instance(tag_id)
return tag.all_data
@property
def all_data(self):
''' return all data in instance '''
# TODO: there must be a better way
retval = {}
for col in self.__table__.columns:
retval[col.name] = getattr(self, col.name)
return retval
@classmethod
def load_tags(cls, data):
session = cls.get_session()
vendor_id = data['vendor_id']
for tag in data['tags']:
new_tag = {}
for item in tag.get('metadata', []):
if 'key' not in item:
continue
new_tag[item['key']] = item.get('value')
this_tag = Tag(
tag_id=tag['tag_id'],
vendor_id=vendor_id,
tag_metadata=new_tag,
)
session.add(this_tag) # faster to use add_all()?
session.commit()
@classmethod
def query_tag_metadata(cls, qry):
session = cls.get_session()
qrydct = {qry['key']: qry['value']}
data = session.query(Tag).filter(Tag.tag_metadata.contains(qrydct))
return [i.all_data for i in list(data.all())]
| [
"gordon@practicalhorseshoeing.com"
] | gordon@practicalhorseshoeing.com |
ed804e3a0fd86fac05c8659b567fe28785d81b74 | 23004e07eb5923b69063d1a303707245ba185610 | /ADM_Project.py | 0aa88edf533bd86403a217a8cfb554678585e06d | [] | no_license | Vasireddydivya/Santander-Product-Recommendation_DataPrep_Python | 0bba05cbbab745b41bbdb7a0f15482047b0b0f26 | 0bf586237ccd73d796f6ec14e330773e0bdce256 | refs/heads/master | 2021-01-25T09:20:18.714274 | 2017-11-15T02:48:12 | 2017-11-15T02:48:12 | 93,812,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,076 | py | import numpy as np
import pandas as pd
#from tsne import bh_sne
import seaborn as sns
import matplotlib.pyplot as plt
#%matplotlib inline
#pylab.rcParams['figure.figsize'] = (10, 6)
def convert_int(dataframe,colname):
dataframe[colname]=pd.to_numeric(dataframe[colname],errors='coerce')
limit_rows=1000000
df=pd.read_csv("C:/Users/vasir/Desktop/ADM/train_ver2.CSV",nrows=limit_rows)
df.head()
unique_ids = pd.Series(df["ncodpers"].unique())
limit_people = 5.2e2
unique_id = unique_ids.sample(n=limit_people)
df = df[df.ncodpers.isin(unique_id)]
print df.describe()
#print df.dtypes
print df['fecha_dato'].dtype
#converting the fecha_dato and fecha_alta to DateTime
df["fecha_dato"]=pd.to_datetime(df["fecha_dato"],format="%Y-%m-%d")
df["fecha_alta"]=pd.to_datetime(df["fecha_alta"],format="%Y-%m-%d")
print df['fecha_dato'].unique()
#
##age is string value in data so converting the age to numeric value
convert_int(df,'age')
print df.dtypes
df["month"] = pd.DatetimeIndex(df["fecha_dato"]).month
#
##checking for null values
print df.isnull().any()
#Data Cleaning
with plt.rc_context(dict(sns.axes_style("whitegrid"),
**sns.plotting_context("notebook",font_scale=1.5))):
sns.distplot(df['age'].dropna(),bins=100,kde=False)
sns.plt.title("Age Distribution")
plt.ylabel("Count")
#I found few outliers below 20 and above 100, so we need to uniformly distribute the graph and replace the NA's with mean or median
#trying with median
df.loc[df.age<18,"age"] = df.loc[(df.age>=18) & (df.age<=30),"age"].median(skipna=True)
df.loc[df.age>100,"age"] = df.loc[(df.age>=30) & (df.age<=100),"age"].median(skipna=True)
df["age"].fillna(df["age"].median(),inplace=True)
df["age"] = df["age"].astype(int)
df.loc[df.age < 18,"age"] = df.loc[(df.age >= 18) & (df.age <= 30),"age"].mean(skipna=True)
df.loc[df.age > 100,"age"] = df.loc[(df.age >= 30) & (df.age <= 100),"age"].mean(skipna=True)
df["age"].fillna(df["age"].mean(),inplace=True)
df["age"] = df["age"].astype(int)
with plt.rc_context(dict(sns.axes_style("whitegrid"),
**sns.plotting_context("notebook",font_scale=1.5))):
sns.distplot(df['age'].dropna(),bins=80,kde=False)
sns.plt.title("Age Distribution")
plt.ylabel("Count")
#Both median and mean plots are almost similar i will use median value because we will not have bias issues.
print df.isnull().any()
#customer seniority and and new customer are correlated because customer seriority is in months and new customer will be '1' if he registered in past 6 months
#check the NA count for both columns
df['ind_nuevo'].isnull().sum()
#convert the customer seniority to int
convert_int(df,'antiguedad')
print df['antiguedad'].dtype
def find_nullVal_Count(dataframe,colname):
return dataframe[colname].isnull().sum()
val=find_nullVal_Count(df,'ind_nuevo')
print val
df['ind_nuevo'].isnull().sum()
months_active=df.loc[df["ind_nuevo"].isnull(),:].groupby("ncodpers",sort=False).size()
months_active.max()
df.loc[df['ind_nuevo'].isnull(),"ind_nuevo"]=1
print df.isnull().any()
df['antiguedad'].isnull().sum()
print df['antiguedad'].dtype
print df.loc[df['antiguedad'].isnull(),"ind_nuevo"].describe()
df.loc[df['antiguedad'].isnull(),"antiguedad"]=df.antiguedad.min()
df.loc[df['antiguedad']<0,"antiguedad"]=0
print df.isnull().any()
#indrel
df['indrel'].isnull().sum()
pd.Series([i for i in df.indrel]).value_counts()
df.loc[df['indrel'].isnull(),"indrel"]=1
print df.isnull().any()
#tipodom,customer's province
df.drop(['tipodom','cod_prov'],axis=1,inplace=True)
print df.isnull().any()
#fecha_alta
dates=df.loc[:,'fecha_alta'].sort_values().reset_index()
date_value=int(np.median(dates.index.values))
print date_value
df.loc[df['fecha_alta'].isnull(),"fecha_alta"]=dates.loc[date_value,"fecha_alta"]
df["fecha_alta"].describe()
print df.isnull().any()
df['nomprov'].unique()
df.loc[df['nomprov']=='CORU\xc3\x91A, A',"nomprov"]="CORUNA, A"
df.loc[df['nomprov'].isnull(),"nomprov"]="UNKNOWN"
df['nomprov'].unique()
print df.isnull().any()
df['ind_nom_pens_ult1'].isnull().sum()
df.loc[df['ind_nom_pens_ult1'].isnull(),"ind_nom_pens_ult1"]=0
df.loc[df['ind_nomina_ult1'].isnull(),"ind_nomina_ult1"]=0
print df.isnull().any()
pd.Series([i for i in df.indfall]).value_counts()
df.loc[df['indfall'].isnull(),"indfall"]='N'
pd.Series([i for i in df.tiprel_1mes]).value_counts()
df.loc[df['tiprel_1mes'].isnull(),"tiprel_1mes"]='A'
df.tiprel_1mes = df.tiprel_1mes.astype("category")
map_dict = { 1.0 : "1",
"1.0" : "1",
"1" : "1",
"3.0" : "3",
"P" : "P",
3.0 : "3",
2.0 : "2",
"3" : "3",
"2.0" : "2",
"4.0" : "4",
"4" : "4",
"2" : "2"}
df.indrel_1mes.fillna("P",inplace=True)
df.indrel_1mes = df.indrel_1mes.apply(lambda x: map_dict.get(x,x))
df.indrel_1mes = df.indrel_1mes.astype("category")
print df.isnull().any()
pd.Series([i for i in df.canal_entrada]).value_counts()
missing_col=['ind_empleado','pais_residencia','sexo','canal_entrada','conyuemp']
for col in missing_col:
df.loc[df[col].isnull(),col]='UNKNOWN'
print df.isnull().any()
pd.Series([i for i in df.indext]).value_counts()
df.loc[df['indext'].isnull(),"indext"]='N'
print df.isnull().any()
pd.Series([i for i in df.indresi]).value_counts()
df.loc[df['indresi'].isnull(),"indresi"]='S'
print df.isnull().any()
pd.Series([i for i in df.ult_fec_cli_1t]).value_counts()
pd.Series([i for i in df.ind_actividad_cliente]).value_counts()
print df['ult_fec_cli_1t'].isnull().sum()
df.loc[df.ind_actividad_cliente.isnull(),"ind_actividad_cliente"] = df["ind_actividad_cliente"].median()
print df['ult_fec_cli_1t'].dtype
#Data Visualizations
import numpy as np
import pandas as pd
#from tsne import bh_sne
import seaborn as sns
import matplotlib.pyplot as plt
from bokeh.charts import Histogram,Bar
from bokeh.io import gridplot, output_file, show
from bokeh.plotting import figure
from bokeh.layouts import row
#%matplotlib inline
#pylab.rcParams['figure.figsize'] = (10, 6)
def convert_int(dataframe,colname):
dataframe[colname]=pd.to_numeric(dataframe[colname],errors='coerce')
limit_rows=1000000
df=pd.read_csv("C:/Users/vasir/Desktop/ADM/train_ver2.CSV",nrows=limit_rows)
convert_int(df,'age')
print df['age'].dtype
df['age']=df['age'].fillna(-1);
cols=['age']
df[cols]=df[cols].applymap(np.int64)
df_frac=df.sample(frac=0.01)
p_age=Histogram(df_frac,values='age',title="Age Distribution")
#show(p_age)
dffrac1=df_frac.dropna(subset=['sexo'],how='any')
dffrac1.head()
#dffrac1['sexo']=dffrac1['sexo'].astype('category')
p=Bar(dffrac1,'sexo',title="Sex")
#show(p)
dffrac2=df_frac.dropna(subset=['renta'],how='any')
bar_renta=Bar(dffrac2,values='renta',label='nomprov',agg='mean',title="City Vs Renta",legend=False, plot_width=800)
#show(bar_renta)
features_columns=df.filter(regex='ind_+.*ult.*');
features=features_columns.columns.values;
#print features;
df1=df[features]
feature=features.tolist();
print feature;
df_na=df_frac.dropna(subset=['ind_nom_pens_ult1','ind_nomina_ult1'],how='any')
df_bar=Bar(df_na,label='ind_nom_pens_ult1',values='age',group='sexo',title="Sex vs ind_nom_pens_ult1", plot_width=200)
df_bar1=Bar(df_na,label='ind_nomina_ult1',values='age',group='sexo',title="Sex vs ind_nomina_ult1",legend=False, plot_width=200)
df_bar2=Bar(df_na,label='ind_ahor_fin_ult1',values='age',group='sexo',title="Sex vs ind_ahor_fin_ult1",legend=False, plot_width=200)
df_bar3=Bar(df_na,label='ind_aval_fin_ult1',values='age',group='sexo',title="Sex vs ind_aval_fin_ult1",legend=False, plot_width=200)
df_bar4=Bar(df_na,label='ind_cco_fin_ult1',values='age',group='sexo',title="Sex vs ind_cco_fin_ult1",legend=False, plot_width=200)
df_bar5=Bar(df_na,label='ind_cder_fin_ult1',values='age',group='sexo',title="Sex vs ind_cder_fin_ult1",legend=False, plot_width=200)
df_bar6=Bar(df_na,label='ind_cno_fin_ult1',values='age',group='sexo',title="Sex vs ind_cno_fin_ult1",legend=False, plot_width=200)
df_bar7=Bar(df_na,label='ind_ctju_fin_ult1',values='age',group='sexo',title="Sex vs ind_ctju_fin_ult1",legend=False, plot_width=200)
df_bar8=Bar(df_na,label='ind_ctma_fin_ult1',values='age',group='sexo',title="Sex vs ind_ctma_fin_ult1",legend=False, plot_width=200)
df_bar9=Bar(df_na,label='ind_ctop_fin_ult1',values='age',group='sexo',title="Sex vs ind_ctop_fin_ult1",legend=False, plot_width=200)
df_bar10=Bar(df_na,label='ind_ctpp_fin_ult1',values='age',group='sexo',title="Sex vs ind_ctpp_fin_ult1",legend=False, plot_width=200)
df_bar11=Bar(df_na,label='ind_deco_fin_ult1',values='age',group='sexo',title="Sex vs ind_deco_fin_ult1",legend=False, plot_width=200)
df_bar12=Bar(df_na,label='ind_deme_fin_ult1',values='age',group='sexo',title="Sex vs ind_deme_fin_ult1",legend=False, plot_width=200)
df_bar13=Bar(df_na,label='ind_dela_fin_ult1',values='age',group='sexo',title="Sex vs ind_dela_fin_ult1",legend=False, plot_width=200)
df_bar14=Bar(df_na,label='ind_ecue_fin_ult1',values='age',group='sexo',title="Sex vs ind_ecue_fin_ult1",legend=False, plot_width=200)
df_bar15=Bar(df_na,label='ind_fond_fin_ult1',values='age',group='sexo',title="Sex vs ind_fond_fin_ult1",legend=False, plot_width=200)
df_bar16=Bar(df_na,label='ind_hip_fin_ult1',values='age',group='sexo',title="Sex vs ind_hip_fin_ult1",legend=False, plot_width=200)
df_bar17=Bar(df_na,label='ind_plan_fin_ult1',values='age',group='sexo',title="Sex vs ind_plan_fin_ult1",legend=False, plot_width=200)
df_bar18=Bar(df_na,label='ind_pres_fin_ult1',values='age',group='sexo',title="Sex vs ind_pres_fin_ult1",legend=False, plot_width=200)
df_bar19=Bar(df_na,label='ind_reca_fin_ult1',values='age',group='sexo',title="Sex vs ind_reca_fin_ult1",legend=False, plot_width=200)
df_bar20=Bar(df_na,label='ind_tjcr_fin_ult1',values='age',group='sexo',title="Sex vs ind_tjcr_fin_ult1",legend=False, plot_width=200)
df_bar21=Bar(df_na,label='ind_valo_fin_ult1',values='age',group='sexo',title="Sex vs ind_valo_fin_ult1",legend=False, plot_width=200)
df_bar22=Bar(df_na,label='ind_viv_fin_ult1',values='age',group='sexo',title="Sex vs ind_viv_fin_ult1",legend=False, plot_width=200)
df_bar23=Bar(df_na,label='ind_recibo_ult1',values='age',group='sexo',title="Sex vs ind_recibo_ult1",legend=False, plot_width=200)
show(df_bar)
#fig=figure();
#fig.add_glyphs(df_bar.get_glyphs())
#fig.add_glyphs(bar_renta.get_glyphs())
#fig.add_glyphs(p_age.get_glyphs())
output_file('visulize.html')
p=gridplot([df_bar,df_bar1,p_age],[df_bar2,df_bar3,df_bar4],[df_bar5,df_bar6,df_bar7])
show(p)
df['ind_ahor_fin_ult1'].isnull().sum()
df['ind_nom_pens_ult1'].isnull().sum()
df['ind_nomina_ult1'].isnull().sum()
df['ind_recibo_ult1'].isnull().sum()
| [
"noreply@github.com"
] | noreply@github.com |
5bd0502dc889f757281d2e3246e31016f6681c03 | cf9f56f50d947cfb9490a9b3a25aec1e7506066f | /lpthw/ex20.py | 92462c8b1a9c2a23781fcd60cbdb6fff519aa234 | [] | no_license | Nipuncp/lyceaum | ce14758ae4834031e47df731de64a52b965f32db | 4ba26b89915786656bd93e1e62aa3002b59aab2b | refs/heads/master | 2020-03-26T08:24:19.309122 | 2018-08-14T09:28:00 | 2018-08-14T09:28:00 | 144,699,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from sys import argv
script, input_file = argv
def print_all(f):
print(f.read())
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print(line_count, f.readline())
current_file = open(input_file)
print("First let's print the whole file: \n")
print_all(current_file)
print("Now let's rewind. Kind of like a tape.")
rewind(current_file)
print("Let's print three lines:")
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
| [
"nipuncp123@gmail.com"
] | nipuncp123@gmail.com |
67fa6bad0b3fbe89d5c78e8cb9d16e3f6de865da | daef437c4cd7ed6ca4be3989f6c7b990ee1efcc7 | /djangogirls/asgi.py | 71b39e44a12e3493104e2d080c24e3277d71d1c8 | [] | no_license | YY-U/Myblog-web-app | b157624f5cd56b3b0b56d30f40fe5ae62973c78f | fd90497dc0aa7e6b3efb6e6d5a24c165fde6de33 | refs/heads/main | 2023-08-28T05:13:40.068384 | 2021-09-23T14:33:34 | 2021-09-23T14:33:34 | 401,692,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
ASGI config for djangogirls project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangogirls.settings')
application = get_asgi_application()
| [
"chxgw030@yahoo.co.jp"
] | chxgw030@yahoo.co.jp |
dced842cb0138a15c1c028b64d73971e74c64f60 | 9d37286183243fd2a040466f0df0b3b31f69ef6a | /hunt/wsgi.py | b0853fef3d467722dedf7cf93cef1e236a011cd8 | [] | no_license | ma9shah/Product-Hunt-Django | 4a5b95db2d042718f374d3fd7cd771b4b0d196ed | 19bbfb72889ebce6813a0845df10787f8f7a93f6 | refs/heads/master | 2022-01-25T15:04:42.207144 | 2019-07-27T08:54:29 | 2019-07-27T08:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for hunt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hunt.settings')
application = get_wsgi_application()
| [
"ma9shah@gmail.com"
] | ma9shah@gmail.com |
c4d8e805c1f6b10bf7e2aa5d93ed4a594334fcc9 | a7fce57f807f35ae9c417f03c8a92a431362a0ce | /app/models/base.py | 588953c21c2f9b63e6bd7f1c170b0b0f6818e57d | [] | no_license | ONSdigital/census-rm-case-processor-prototype-python | 5e9cf1645b63736bde9487508c1009946c41f7be | dfeeeecbae94b8bbedcc85c12b56699488b04f48 | refs/heads/master | 2020-05-14T05:08:33.683697 | 2019-04-16T14:58:37 | 2019-04-16T14:58:37 | 181,700,008 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from sqlalchemy.schema import MetaData
from sqlalchemy.ext.declarative import declarative_base
metadata = MetaData(schema='casesvcv2python')
Base = declarative_base(metadata=metadata)
| [
"neale.dj@gmail.com"
] | neale.dj@gmail.com |
5223e23b482a1bfdb8243afce164d650eb01a992 | 6d1af2b4930d6454ad215a645a6ee2e8900ec45d | /tests/context.py | d05a6b307936e3dc2d9c135d32f2382d7a94e390 | [
"MIT"
] | permissive | orange-erotic-bible/orange-erotic-bible | 7cc8c477118dea8e20e03883029b79cd57e788a9 | cd9e31f44ccdf4b69782c5a21af25407cfe5418b | refs/heads/master | 2023-01-13T04:17:57.275994 | 2020-04-05T12:09:10 | 2020-04-05T12:09:10 | 225,234,684 | 49 | 2 | MIT | 2022-12-26T21:00:58 | 2019-12-01T21:38:03 | Python | UTF-8 | Python | false | false | 142 | py | import os
import sys
module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, module_path)
import oeb
| [
"57139857+orange-erotic-bible@users.noreply.github.com"
] | 57139857+orange-erotic-bible@users.noreply.github.com |
a4dfcab1929a4549495921aec3b9178da6d67f3e | 7e0ea1a29084f9536e02f6d7dcf9a0fb80babf58 | /core/views.py | e1eeeec7ef5bcd9a087405bb5c4fbdd5e1cfbecf | [] | no_license | ABYARTH/mywallet | a5b0bdbd0d08d22eb55fbc55e61147b92fcc5805 | 8eb0ce84422b55d0211e391269a7716b4f9c90a7 | refs/heads/master | 2021-01-16T20:00:05.470714 | 2016-01-14T15:17:26 | 2016-01-14T15:17:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | import requests
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth import login, authenticate
from django.contrib.auth.models import User, AnonymousUser
from django.template import RequestContext
from django.conf import settings
from django.db.models import Q
from django.template.loader import render_to_string
def main_view(request):
if request.method == 'POST':
form_data = request.POST.dict()
if form_data.get('form') == 'signin':
if 'username' in form_data and 'password' in form_data and 'type' in form_data:
user = authenticate(
username=form_data.get('username', ''),
password=form_data.get('password', '')
)
res = requests.post('%s%s' % (settings.DOMAIN, reverse('api:signin')), data=form_data)
if res.ok:
user = authenticate(
username=form_data.get('username', ''),
password=form_data.get('password', '')
)
login(request, user)
return redirect(reverse('core:home'))
elif form_data.get('form') == 'signup':
form_data = request.POST.dict()
res = requests.post('%s%s' % (settings.DOMAIN, reverse('api:signup')), data=form_data)
if res.ok:
if 'biller' in res.json() or 'user' in res.json():
return render(
request,
'main.html',
{'message': 'Ask admin to activate your account you are now registred with MyWallet'}
)
return render(
request,
'main.html',
{'message': 'Kindly login to your account'}
)
return HttpResponse('Invalid data')
return render(request, 'main.html', {})
@login_required(login_url='/mywallet')
def transaction_view(request):
current_user = request.user
context = {}
res = requests.get('%s%s' % (settings.DOMAIN, reverse('api:transactions', args=[current_user.id])))
context['txns'] = res.json()
res = requests.get('%s%s' % (settings.DOMAIN, reverse('api:user', args=[current_user.id])))
context['mywallet_user'] = res.json()
return render(request, 'txn.html', context)
@login_required(login_url='/mywallet')
def billers_view(request):
context = {}
res = requests.get('%s%s' % (settings.DOMAIN, reverse('api:billers')))
context['billers'] = res.json()
return render(request, 'home.html', context)
@login_required(login_url='/mywallet')
def customers_view(request):
context = {}
res = requests.get('%s%s' % (settings.DOMAIN, reverse('api:customers')))
context['customers'] = res.json()
return render(request, 'home.html', context)
@login_required(login_url='/mywallet')
def users_view(request):
context = {}
res = requests.get('%s%s' % (settings.DOMAIN, reverse('api:users')))
context['users'] = res.json()
return render(request, 'home.html', context)
| [
"s.mohanty.006@gmail.com"
] | s.mohanty.006@gmail.com |
9687e2ef03ae47ee8da4844d73ec64b309f7fa7d | 3e95e4399ac1386b6ea21d1ed0841a0da9ed4ed8 | /Code/show_images.py | 16141eca01fd591d7594b76ba86a1acd71b2743b | [] | no_license | sbaio/Restricted-Boltzmann-Machine | 9ddbdef618600c68224ab9f6ffb3d7bced02c00b | e1c304aac444c3a30e29645aa5f9f6e76149f1f4 | refs/heads/master | 2020-01-23T21:41:12.593660 | 2017-01-19T13:42:24 | 2017-01-19T13:42:24 | 74,689,495 | 3 | 0 | null | 2016-12-08T11:13:51 | 2016-11-24T16:29:58 | Python | UTF-8 | Python | false | false | 2,206 | py |
from loadMNIST import load_mnist
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def showImage(image):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
imgplot = ax.imshow(image,cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show()
def show_10_Images(image):
fig = plt.figure()
for i in range(10):
ax = fig.add_subplot(2,5,i+1)
imgplot = ax.imshow(image,cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show()
def showImages(images):
# for small number of images
fig = plt.figure()
n = len(images)
for i in range(n):
ax = fig.add_subplot(1,n,i+1)
image = images[i]
imgplot = ax.imshow(image,cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show()
def plot_10_by_10_images(images):
""" Plot 100 MNIST images in a 10 by 10 table. Note that we crop
the images so that they appear reasonably close together. The
image is post-processed to give the appearance of being continued."""
n = images.shape[0]
q = n // 10
r = n%10
print n,q,r
fig = plt.figure()
plt.ion()
for x in range(q):
print x
if not x%10:
plt.clf()
for y in range(10):
ax = fig.add_subplot(10, 10, 10*y+x%10+1)
ax.matshow(images[10*y+x%10], cmap = mpl.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
_=raw_input("Press enter to show next 10")
def generate_random_image():
# generate random image of type uint8 and size 28*28
a = np.random.randint(256,size=28*28,dtype='uint8')
a = a.reshape((28,28))
return a
def image_to_vector(im):
b = np.squeeze(im.reshape((-1,1)))/255.
return b
def vec_to_image(vec):
b = np.reshape(vec,(28,28))
return b
images, labels = load_mnist('training', digits=np.arange(10), path = '../Data/')
a = generate_random_image()
#a = images[0]
#b = np.squeeze(a.reshape((-1,1)))/255.
#print b.shape
#print b[:]
showImage(images[0])
#c = vec_to_image(b)
#showImage(c)
#showImages([a,c])
#showImage(d)
#print c.shape
| [
"otossbai@gmail.com"
] | otossbai@gmail.com |
be0d2c0404f5a1d0904d36ae3e5636b9dbfb75f9 | 7fa176ce9ef29258ea3711612b0605ee82afd799 | /python/scikit-learn/svm/plot_dbscan.py | 778af1090ffaa7bbb5c36e76a8d5ecbfbdd491df | [] | no_license | fooyou/Exercise | c5137945821ee7f9f21a86f95c06d8e71941c19f | 1a5438d961f1716953b90921aa1ee9d60a97b23e | refs/heads/master | 2022-07-08T13:15:52.481707 | 2018-07-08T03:21:17 | 2018-07-08T03:21:17 | 37,168,041 | 1 | 1 | null | 2022-07-06T20:05:34 | 2015-06-10T01:18:11 | C++ | UTF-8 | Python | false | false | 2,478 | py | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show() | [
"liuchaozhenyu@gmail.com"
] | liuchaozhenyu@gmail.com |
b70f5aaaa9a23b47d5833e61e54bf71c2eab1650 | cf6fc33bb2a3790ddb5dacfc9246e3c42f4c3812 | /firstTier.py | cf4b3aa531961466b8a9ab9aa1df3f5bd164a881 | [] | no_license | oliverwangyi/Software-Supply-chain-Risk-Analysis- | 6965dc936c8d6a85f593285463cf18487477c816 | 166e5b309c8820a3cf5a64c2544c354f58fc853c | refs/heads/master | 2020-04-01T02:40:24.963984 | 2018-10-12T18:01:37 | 2018-10-12T18:01:37 | 152,789,676 | 0 | 0 | null | 2018-10-12T18:03:42 | 2018-10-12T18:03:41 | null | UTF-8 | Python | false | false | 2,659 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 17:21:09 2018
@author: krishna
"""
import glob2
#Creating a list of path for each python file
pyFilesPath = glob2.glob('/home/krishna/Desktop/fall 2018-19/Research/ipythonProject/**/*.py')
# Reading the libraries from each of the python files
firstTierList=[]
pyFilesPath.remove("/home/krishna/Desktop/fall 2018-19/Research/ipythonProject/ipython/IPython/core/tests/nonascii.py")
for files in pyFilesPath:
with open(files) as fs:
for line in fs:
if len(line)>=2:
match = line.split(' ')[0]
if match=="import" or match=="from":
libMatch = line.split(' ')[1]
cnt=0
for item in firstTierList:
if item ==libMatch:
cnt=1
if cnt==0:
firstTierList.append(libMatch)
#Extract only pakages from the list and exclude classes
checkChar='.'
finalList=[]
for item in firstTierList:
flag=0
for charecter in item:
if charecter==checkChar:
flag=1
break
if flag==0:
finalList.append(item)
else:
wordSplit=item.split('.')[0]
finalList.append(wordSplit)
finalList=list(set(finalList))
# Remove itself packages
temp = []
itselfList = []
start = ('.','%','IPython', 'ipython')
for item in finalList:
if item.startswith(start):
itselfList.append(item)
else:
temp.append(item)
finalList=temp.copy()
temp1=[]
for item in finalList:
if item.endswith('\n') or item.endswith(','):
item = item[:-1]
temp1.append(item)
else:
temp1.append(item)
newfinalList = temp1.copy()
newfinalList = list(set(newfinalList))
# Remove standard libraires to get external libraries
standardList = []
externalList = []
externalList = newfinalList.copy()
externalList.pop(0)
with open('standardLibraries.txt') as fs:
standardList = fs.read().splitlines()
checkChar = '.'
finalStandardList = []
for item in standardList:
flag = 0
for charecter in item:
if charecter == checkChar:
flag = 1
break
if flag == 0:
finalStandardList.append(item)
else:
wordSplit = item.split('.')[0]
finalStandardList.append(wordSplit)
finalStandardList = list(set(finalStandardList))
finalExternalList=[item for item in externalList if item not in finalStandardList]
| [
"noreply@github.com"
] | noreply@github.com |
d5633a2b848b581a3a034619a61450208a8052e8 | da1d21bb8d0760bfba61cd5d9800400f928868aa | /apps/common/utils/iterables.py | 3d4d2470b42a38d43cc00ac6ac9d420b5e00c8f0 | [] | no_license | biznixcn/WR | 28e6a5d10f53a0bfe70abc3a081c0bf5a5457596 | 5650fbe59f8dfef836503b8092080f06dd214c2c | refs/heads/master | 2021-01-20T23:53:52.887225 | 2014-05-13T02:00:33 | 2014-05-13T02:00:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # -*- coding: utf-8 -*-
from itertools import izip_longest
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return izip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
| [
"mbc@Mathiass-MacBook-Pro.local"
] | mbc@Mathiass-MacBook-Pro.local |
24477cb63cb6f686691370294340502c0cc654b2 | 10d9756f6c711ae20a0bbcb84f2be1adebb9ed5f | /examples/sparse_jacobi_matrix.py | 2fc5789dbff287ffd673d86521d98f0fd08f1579 | [
"BSD-3-Clause"
] | permissive | paulrozdeba/pyadolc | f44fa5589eeefb4a6b8e5e86625798c40aa529f0 | b3ab24a7181346545697a363ab5a3171d4bb218e | refs/heads/master | 2021-01-01T07:59:13.796652 | 2018-01-21T17:32:52 | 2018-01-21T17:32:52 | 97,529,945 | 0 | 0 | null | 2017-07-17T23:06:38 | 2017-07-17T23:06:37 | null | UTF-8 | Python | false | false | 752 | py | import adolc
import numpy
M,N = 4,2
sparsity_pattern_list = [numpy.random.randint(0,4*N,M)//(3*N) for n in range(N)]
def F(x):
y = numpy.ones(M, dtype=x.dtype)
for n,sp in enumerate(sparsity_pattern_list):
for ns, s in enumerate(sp):
if s == 1:
y[ns] *= x[n]
return y
x = numpy.random.rand(N)
adolc.trace_on(0)
x = adolc.adouble(x)
adolc.independent(x)
y = F(x)
adolc.dependent(y)
adolc.trace_off()
x = numpy.random.rand(N)
y = F(x)
y2 = adolc.function(0,x)
assert numpy.allclose(y,y2)
options = numpy.array([0,0,0,0],dtype=int)
pat = adolc.sparse.jac_pat(0,x,options)
result = adolc.colpack.sparse_jac_no_repeat(0,x,options)
print adolc.jacobian(0,x)
print pat
print result
| [
"sebastian.walter@gmail.com"
] | sebastian.walter@gmail.com |
ef36daca0df48a857bb2a548b85be845df066979 | 1b298205e6164314d5d2d41ef1fa4986494b24e2 | /viterbi.py | a87ac08ce85b47b4f4e208f7a5eefab054d4d5a0 | [] | no_license | ywng/pos-tagger | 8c925fb7e07423e1bc16a3c3ed648d350ee6e44f | 620c4fd8a91f2f3e96479bd6d58ad705d4d6095b | refs/heads/master | 2020-04-27T08:46:29.592314 | 2019-03-08T03:00:53 | 2019-03-08T03:00:53 | 174,185,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py | from __future__ import division
from const import START_END_OBS, START_END_TAG
import math
class Viterbi:
def __init__(self, obs_space, states, trans_prob, emit_prob):
'''
obs_space: all the possbile value of observations. In POS tagging, it is the vocabulary.
states: the HMM states. In POS tagging, it is the Part of Speech tag.
trans_prob: A matrix K by K, trans_prob[i][j] is prob of state i -> state j,
where K is the size of the states.
emit_prob: A matrix K by N, emit_prob[i][j] is the prob of observing obs_j given state i,
where K is the size of the states, N is the size of the obs_space.
'''
self.obs_space, self.states, self.trans_prob, self.emit_prob = \
obs_space, states, trans_prob, emit_prob
self.obs_space_size = len(self.obs_space)
self.states_size = len(self.states)
#observation index lookup table
self.indexOfObs = {}
for idx, observation in enumerate(self.obs_space):
self.indexOfObs[observation] = idx
def decode(self, obs):
'''
obs: a sequence of observations. In POS tagging, it is a sequence of words/tokens.
'''
seq_len = len(obs)
viterbi = [[0] * seq_len for i in range(self.states_size)]
backptr = [[None] * seq_len for i in range(self.states_size)]
#init start probability
start_tag_idx = self.states.index(START_END_TAG)
first_obs_idx = self.indexOfObs[obs[0]]
for s in range(self.states_size):
backptr[s][0] = 0
if self.trans_prob[start_tag_idx][s] == 0:
viterbi[s][0] = float("-inf")
else:
viterbi[s][0] = math.log(self.trans_prob[start_tag_idx][s]) + \
math.log(self.emit_prob[s][first_obs_idx])
for t in range(1, seq_len):
obs_t_idx = self.indexOfObs[obs[t]]
for curr_s in range(self.states_size):
max_path, max_prob = None, float("-inf")
for prev_s in range(self.states_size):
prob = viterbi[prev_s][t-1] + \
math.log(self.trans_prob[prev_s][curr_s]) + \
math.log(self.emit_prob[curr_s][obs_t_idx])
if prob > max_prob:
max_path, max_prob = prev_s, prob
viterbi[curr_s][t] = max_prob
backptr[curr_s][t] = max_path
#trace backward to get the state sequence path
state_seq = [None] * seq_len
state_idx_seq = [None] * seq_len
#start tracing back from the one with the highest prob,
#in the case of POS tagging, the last one should be an end node.
max_prob = viterbi[0][seq_len-1]
for state_idx in range(1, self.states_size):
if viterbi[state_idx][seq_len-1] > max_prob:
max_prob = viterbi[state_idx][seq_len-1]
state_idx_seq[seq_len-1] = state_idx
state_seq[seq_len-1] = self.states[state_idx_seq[seq_len-1]] #get the actual tag as return result
for t in range(seq_len-1, 0, -1):
state_idx_seq[t-1] = backptr[state_idx_seq[t]][t]
state_seq[t-1] = self.states[state_idx_seq[t-1]]
return state_seq, max_prob
| [
"victor.not.to.yield@gmail.com"
] | victor.not.to.yield@gmail.com |
0bf3e08e5e06852a8c723c159ca73d8b4cab3d49 | 8009c659b53e22f06593d849ee373f327372b49a | /Problems/Implement a function/main.py | ba88587a77a4fe3979c0e3cc06606a1288253caf | [] | no_license | rolmos14/bill_splitter | 6f7824b1d0b5a5c6e525c0fe164074ac94f148bd | 36c3ce9888d1089df6cbef4369f2b94a233df190 | refs/heads/master | 2023-06-27T09:23:50.319746 | 2021-07-31T15:27:15 | 2021-07-31T15:27:15 | 391,341,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | def get_number(num):
"""Return the number one less than the given positive number.
If the number is nonpositive, return a string "Enter a positive number!".
Arguments:
num -- an integer.
Return values:
An integer one less than the input number.
"""
if num <= 0:
return "Enter a positive number!"
else:
return num - 1
| [
"olmos.rafa@gmail.com"
] | olmos.rafa@gmail.com |
a2de1e5017153780b682b865a1dba29d1ab68342 | b1e304b259d0cbca2cb86640237e24be7d6f870f | /plugin/database_prefab.py | 439e22d0057e56ec4f6e491b9225ad1f8eb52a31 | [] | no_license | toshihr/alignbench | 4b30edd42a7dec1a4c36c8fc93e8ec8b3521a3ec | 7000427e3b0b015856893ee43b69dedb63cdf4c3 | refs/heads/master | 2021-07-21T09:10:47.624153 | 2017-10-30T18:33:06 | 2017-10-30T18:33:06 | 108,892,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # -*- coding: utf-8 -*-
RESOURCE['databases'].update({
'prefab1':{
'arcName': 'prefab1.tar.bz2',
},
'prefab2':{
'arcName': 'prefab2.tar.bz2',
},
'prefab3':{
'arcName': 'prefab3.tar.bz2',
},
'prefab4':{
'arcName': 'prefab4.tar.bz2',
},
})
| [
"toshihr@toshihr-mbp.local"
] | toshihr@toshihr-mbp.local |
b3c2d00bbbcb4af4d0d9fc7842cf3e8e929b270b | 659e6db1ae3488fa15f1df551380d8e1b747cfb9 | /model/model.py | b5e337983d3ef2bea60b88444e7a9f66f78e6282 | [] | no_license | wormys/fracture | 17ee9c7216937603c0f6c97eb23da20244001c2a | 39c62201f82d18a144e0507ad74fc96f3604e7c2 | refs/heads/master | 2023-08-24T15:14:49.062061 | 2021-10-18T12:57:54 | 2021-10-18T12:57:54 | 366,652,767 | 0 | 1 | null | 2021-09-26T08:39:04 | 2021-05-12T08:55:13 | Python | UTF-8 | Python | false | false | 2,208 | py | """
Date: 2021/05/10
Author: worith
"""
import torch
import torch.nn.functional as F
class NetX2Y(torch.nn.Module):
def __init__(self, hidden1, hidden2, hidden3, physical_hidden, is_physical_info, n_feature, n_output):
super(NetX2Y, self).__init__()
self.hidden1 = torch.nn.Linear(n_feature, hidden1) # 隐藏层线性输出
self.is_physical_info = is_physical_info
self.hidden2 = torch.nn.Linear(hidden1, hidden2) # 隐藏层线性输出
self.hidden3 = torch.nn.Linear(hidden2, hidden3) # 隐藏层线性输出
if self.is_physical_info:
self.predict = torch.nn.Linear(hidden3 + physical_hidden, n_output) # 输出层线性输出
else:
self.predict = torch.nn.Linear(hidden3, n_output)
# self.dropout = torch.nn.Dropout(p=0.5)
def forward(self, x):
x = torch.relu(self.hidden1(x))
x = torch.relu(self.hidden2(x))
x = torch.relu(self.hidden3(x))
if self.is_physical_info:
x = torch.cat([x, self.physical_info], 1)
x = self.predict(x) # 输出值
return x
def add_physical_info(self, physical_info):
self.physical_info = physical_info
class NetH2Y(torch.nn.Module):
def __init__(self, hidden1, hidden2, hidden3, hidden4, n_feature, n_output):
super(NetH2Y, self).__init__()
self.hidden1 = torch.nn.Linear(n_feature, hidden1) # 隐藏层线性输出
self.hidden2 = torch.nn.Linear(hidden1, hidden2) # 隐藏层线性输出
self.hidden3 = torch.nn.Linear(hidden2, hidden3) # 隐藏层线性输出
self.hidden4 = torch.nn.Linear(hidden3, hidden4) # 隐藏层线性输出
self.predict = torch.nn.Linear(hidden4, n_output) # 输出层线性输出
# self.dropout = torch.nn.Dropout(p=0.5)
def forward(self, x):
x = torch.relu(self.hidden1(x))
x = torch.relu(self.hidden2(x))
x = torch.relu(self.hidden3(x))
x = torch.relu(self.hidden4(x))
physical_info = x
x = self.predict(x) # 输出值
if self.training:
return x
else:
return x, physical_info
| [
"2589978839@qq.com"
] | 2589978839@qq.com |
a948cb00874c590197218356b4be33325d720557 | 317676b47c60e3a86ef58c93fab14829156a2f27 | /18.1/18.1/_18.1.py | dfd81a7d3b92e9485e3ed4361784def2d932865f | [] | no_license | bellontea/python_lab3 | aa650a6e42997f0b03837bb3850c0590e92a91bc | 756a2a4929a1c3a6a1e26c7dec21f52cf1b44b87 | refs/heads/master | 2023-04-26T20:10:40.406188 | 2021-05-15T20:15:01 | 2021-05-15T20:15:01 | 367,721,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def print_shrug_smile():
print("¯\_(ツ)_/¯")
def print_ktulhu_smile():
print("{:€")
def print_happy_smile():
print("(͡°ʖ ͡°)")
| [
"71792005+bellontea@users.noreply.github.com"
] | 71792005+bellontea@users.noreply.github.com |
bfb9ba31588b661323abab54f4fd5873f537d7ca | 42e063977c906d351f463d68bbcfbab6baf32488 | /Train1.py | 79b5edc185a7b3d4209cf0fde6c0c0dcb9e7595b | [] | no_license | Yk1n0gEa/Linear-Regression | 01edac563bfcb7f7eb09ff2f19dcd8a1ed6ee0ac | 0a3c07e84bc402bac032500d0b524f73109789f8 | refs/heads/master | 2020-03-19T01:49:44.275480 | 2018-05-29T09:10:49 | 2018-05-29T09:10:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,404 | py |
import tensorflow as tf
import numpy as np
import pandas as pd
import random
tf.reset_default_graph()
train_user=0
train_time=1
model_path=""
X_csv_train_file_path=""
y_csv_train_file_path=""
Y_prediction_file_path=""
root_directory = "D:\\jd\\model"
#train_object=train_user
train_object=train_time
if train_object == train_user :
model_path = root_directory+"\\User\\train.ckpt"
X_csv_train_file_path = "../data/X_train_user.csv"
y_csv_train_file_path = "../data/y_train_user.csv"
Y_prediction_file_path = "../data/y_pred_user.csv"
if train_object == train_time :
model_path = root_directory+"\\Time\\train.ckpt"
X_csv_train_file_path = "../data/X_train_time.csv"
y_csv_train_file_path = "../data/y_train_time.csv"
Y_prediction_file_path = "../data/y_pred_time.csv"
feature_size=57
dataset_size=0
X1=pd.read_csv(X_csv_train_file_path,header=0)
X1.fillna(0,inplace=True)
X=np.array(X1)
[dataset_size,feature_size]=X.shape
print(X.shape)
maximums, minimums, avgs = X.max(axis=0), X.min(axis=0), X.sum(
axis=0) / X.shape[0]
for i in range(feature_size - 1):
X[:, i] = (X[:, i] - avgs[i]) / (maximums[i] - minimums[i])
Y1 = pd.read_csv(y_csv_train_file_path,header=0)
Y1.fillna(0,inplace=True)
Y=np.array(Y1)
Y=Y.reshape(-1,1)
print(Y.shape)
# 定义训练数据batch的大小
batch_size=100
# 定义神经网络参数
w=tf.Variable(tf.random_normal([feature_size,1],stddev=1,seed=1))
b = tf.Variable(0.0, name="biases",dtype=tf.float32)
x=tf.placeholder(tf.float32,shape=(None,feature_size),name='x-input')
y_=tf.placeholder(tf.float32,shape=(None,1),name='y-input')
# 定义前向传播
y=tf.add(tf.matmul( x,w) ,b)
# 定义损失函数和反向传播算法
#cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
#train_step=tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)
cross_entropy = tf.reduce_mean(tf.square(y - y_))
#optimizer = tf.train.GradientDescentOptimizer(0.001)
optimizer = tf.train.AdamOptimizer(0.01)#查看不同的优化效果
train_step = optimizer.minimize(cross_entropy)
with tf.Session() as sess:
saver = tf.train.Saver()
init_op=tf.global_variables_initializer()
sess.run(init_op)
print('训练前网络参数的值为:')
print(sess.run(w))
print(sess.run(b))
# 设定训练的轮数 准备做随机批量
STEPS=200000
for i in range(STEPS):
# 每次选取batch_size个样本进行训练
#start=(i*batch_size)%dataset_size
#end=min(start+batch_size,dataset_size)
n=random.randint(0, dataset_size-batch_size)
start=n
end=n+batch_size
# 通过选取的样本训练神经网络并更新参数
sess.run(train_step,feed_dict={x:X[start:end],y_:Y[start:end]})
if i % 100==0:
# 每隔一段时间计算交叉熵并输出
total_cross_entropy=sess.run(cross_entropy,feed_dict={x:X,y_:Y})
print("start:{}: After{} training step(s),cross entropy on all data is {}".
format(start,i,total_cross_entropy))
print('训练后网络参数的值为:')
print(sess.run(w))
print(sess.run(b))
save_path = saver.save(sess, model_path)
print("complete") | [
"noreply@github.com"
] | noreply@github.com |
82a0c09f77fd08d7efdf8332cfffa06c22aff9ad | b55e0a122670a6a29b0b5e1581865f784ef200b9 | /chats/migrations/0005_alter_message_user.py | b4f25fa34cbf96c09ffe16b6c103ecfea68972d4 | [] | no_license | kattyeye/django-rest-chat-app | fc6b0a23535adb28efeff1d9f1af18ad982ce7bf | 052eadadcccd1e7f77d10f37424156e32b0210de | refs/heads/main | 2023-08-20T19:55:20.388391 | 2021-10-30T14:23:53 | 2021-10-30T14:23:53 | 416,809,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # Generated by Django 3.2.8 on 2021-10-16 13:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chats', '0004_message_user'),
]
operations = [
migrations.AlterField(
model_name='message',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='users', to=settings.AUTH_USER_MODEL),
),
]
| [
"69052750+kattyeye@users.noreply.github.com"
] | 69052750+kattyeye@users.noreply.github.com |
da9cb52b8cf4baa8f36751f5f276ece4eef2e963 | cfaa6c0c92e3325fa49b6d6fc0079223bdd67f08 | /pre_processing.py | a8b897f81b2725b50ad4618874e535b9c3304ae5 | [] | no_license | ieee820/metal_crack_detector | e08187d503b4d6e71b1e0625a41491b05e477b7d | 4d1fc779f688046eba10ea6babc8e5a8df1bfde6 | refs/heads/master | 2020-06-30T21:31:48.394439 | 2019-07-23T05:16:02 | 2019-07-23T05:16:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,969 | py | import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
from parameters import RESIZE_DIMENSION
HEALTHY_FENDER_APRON_PATH = './data/YE358311_Fender_apron_2/YE358311_Fender_apron/YE358311_Healthy'
DEFECTIVE_FENDER_APRON_PATH ='./data/YE358311_Fender_apron_2/YE358311_Fender_apron/YE358311_defects' \
'/YE358311_Crack_and_Wrinkle_defect'
def get_all_files():
healthy_images = os.listdir(HEALTHY_FENDER_APRON_PATH)
defetced_images = os.listdir(DEFECTIVE_FENDER_APRON_PATH)
return healthy_images, defetced_images
def pre_processing(data_path):
img = cv2.imread(data_path)
# resizing to fix dimension
resized = cv2.resize(img, RESIZE_DIMENSION, interpolation=cv2.INTER_AREA)
# Converting to Gray scale and apply differention
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5, scale=2)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5, scale=2)
return sobelx[..., np.newaxis], sobely[..., np.newaxis]
def get_preprocessed_data():
healthy_images, defected_images = get_all_files()
data, target = np.array([]), np.array([])
for image in healthy_images:
grad_x, grad_y = pre_processing(data_path=os.path.join(HEALTHY_FENDER_APRON_PATH, image))
if data.size != 0 and target.size != 0:
data = np.concatenate((data, grad_x[np.newaxis, ...], grad_y[np.newaxis, ...]), axis=0)
target = np.append(target, [1, 1])
else:
data = np.concatenate((grad_x[np.newaxis, ...], grad_y[np.newaxis, ...]), axis=0)
target = np.array([1, 1])
for image in defected_images:
grad_x, grad_y = pre_processing(data_path=os.path.join(DEFECTIVE_FENDER_APRON_PATH, image))
data = np.concatenate((data, grad_x[np.newaxis, ...], grad_y[np.newaxis, ...]), axis=0)
target = np.append(target, [0, 0])
return {'data': data, 'target': target}
| [
"john.janmejaya@gmail.com"
] | john.janmejaya@gmail.com |
021841d84dc923f063d7a2f09b346af1e7452d2a | 1d4f4c45da7465f2d39e567de31d79b578002bca | /old/collect_script/depth_crawl.py | 3d9a78ec3a152b89ed30696150fcdb5f702f741b | [] | no_license | lihn1987/CoinCollector | 9f1e7ab45273fea04b809a363b5af2f4fd6f1733 | 3115be42a6bf72e969bbdc245f5bf217b33b25d9 | refs/heads/master | 2022-07-16T07:39:27.269642 | 2021-03-31T16:45:03 | 2021-03-31T16:45:03 | 196,840,232 | 24 | 2 | null | 2022-06-22T04:12:20 | 2019-07-14T13:32:02 | Python | UTF-8 | Python | false | false | 171 | py | import time
import depth_huobi
import depth_ok
import depth_binance
depth_huobi.StartCrwal()
depth_ok.StartCrwal()
depth_binance.StartCrwal()
while True:
time.sleep(1) | [
"lihn1011@163.com"
] | lihn1011@163.com |
15455b46dcf914a8067898dd19c0856955501824 | 9398a9d53c2977df17465cceb500d2568dad311b | /Build a computer/nand2tetris/nand2tetris/projects/11/JackAnalyzer.py | 700bb22f2eaf92ff7d0a779159bdfb7720fca87a | [] | no_license | jianqiangq/coursera | 561650a59ec5768c3687b34788873125ccc8cb1e | 4530d986f33cd2281e43fb6c26fa1af8c302c2c3 | refs/heads/master | 2023-02-27T16:30:30.974446 | 2021-01-28T03:25:23 | 2021-01-28T03:25:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,632 | py | from JackTokenizer import JackTokenizer,TOKEN_TYPE,KEYWORD_TYPE,tokentype,tokendict
import fileinput
import sys, getopt
import os
from enum import Enum, unique
import sys
class JackAnalyzer:
def __init__(self,infile):
# read all source code string to the buffer
self.parser = JackTokenizer(infile)
outfile = infile[:infile.find(".jack")] + ".xml"
self.out = open(outfile,"w")
self.depth = 0
self.compileClass()
self.out.close()
def lowerLevel(self):
self.depth += 1
def upperLevel(self):
self.depth -= 1
def compileOut(self,str):
self.out.write(" "*self.depth)
self.out.write(str)
def compileOutElement(self,tkType,tkStr):
self.out.write(" "*self.depth)
typeStr = ""
if tkType == TOKEN_TYPE.TOKEN_KEYWORD:
typeStr = "keyword"
elif tkType == TOKEN_TYPE.TOKEN_SYMBOL:
typeStr = "symbol"
elif tkType == TOKEN_TYPE.TOKEN_IDENTIFIER:
typeStr = "identifier"
elif tkType == TOKEN_TYPE.TOKEN_INT_CONST:
typeStr = "integerConstant"
elif tkType == TOKEN_TYPE.TOKEN_STRING_CONST:
typeStr = "stringConstant"
elif tkType == TOKEN_TYPE.TOKEN_INVALID:
typeStr = "invalid"
self.out.write("<" + typeStr + "> " + tkStr + " </" + typeStr + ">\n")
def compileClass(self):
self.compileOut("<class>\n")
self.lowerLevel()
# parse class
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "class":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid class define!\n")
exit(1)
# parse class name
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid class define!\n")
exit(1)
#parse symbol '{'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "{":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid class define!\n")
exit(1)
#parse class val des
while self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
(self.parser.keyWord() == "static" or self.parser.keyWord() == "field"):
self.compileClassVarDec()
#parse class method
while self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
(self.parser.keyWord() == "method" or \
self.parser.keyWord() == "constructor" or \
self.parser.keyWord() == "function"):
self.compileSubroutine()
#parse symbol '{'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "}":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid class define!\n")
exit(1)
# end of parse class
self.upperLevel()
self.compileOut("</class>\n")
return True
def compileClassVarDec(self):
self.compileOut("<classVarDec>\n")
self.lowerLevel()
# parse key word
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
(self.parser.keyWord() == "static" or self.parser.keyWord() == "field"):
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid val define!\n")
exit(1)
# parse val type
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD or \
self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid val define!\n")
exit(1)
# parse val name
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid val define!\n")
exit(1)
# parse the left val name
while not (self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ";"):
# parse symbol ','
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == ",":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid val define!\n")
exit(1)
# parse val name
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid val define!\n")
exit(1)
# parse the end symbol
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == ";":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "valid val define!\n")
exit(1)
# end of class var descrtion
self.upperLevel()
self.compileOut("</classVarDec>\n")
return True
def compileSubroutine(self):
self.compileOut("<subroutineDec>\n")
self.lowerLevel()
# parse key word
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
(self.parser.keyWord() == "constructor" or \
self.parser.keyWord() == "function" or \
self.parser.keyWord() == "method"):
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "invalid subroutine!\n")
exit(1)
# parse type
if (self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
(self.parser.keyWord() == "int" or \
self.parser.keyWord() == "char" or \
self.parser.keyWord() == "void" or \
self.parser.keyWord() == "boolean")) or \
self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "invalid subroutine!\n")
exit(1)
# parse subroutineName
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "invalid subroutine!\n")
exit(1)
# parse '('
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == "(":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "invalid subroutine!\n")
exit(1)
# parse param list
self.compileParameterList()
# parse ')'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == ")":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print(str(sys._getframe().f_lineno) + "invalid subroutine!\n")
exit(1)
# parse body
self.compileSubroutineBody()
self.upperLevel()
self.compileOut("</subroutineDec>\n")
return True
def compileSubroutineBody(self):
self.compileOut("<subroutineBody>\n")
self.lowerLevel()
# parse '{'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "{":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("inValid sub routine body define!\n")
exit(1)
# parse var
while self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "var":
self.compileVarDec()
# parse statements
self.compileStatements()
# parse '}'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "}":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid sub routine body define!\n")
exit(1)
self.upperLevel()
self.compileOut("</subroutineBody>\n")
return True
def compileParameterList(self):
self.compileOut("<parameterList>\n")
self.lowerLevel()
# parse rest param
while not (self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and\
self.parser.symbol() == ")"):
# parse first element type
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER or \
(self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
(self.parser.keyWord() == "int" or self.parser.keyWord() == "char" or \
self.parser.keyWord() == "boolean")):
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
# parse first element varName
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
# parse ','
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL:
if self.parser.symbol() == ",":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
elif self.parser.symbol() == ")":
break
else:
print(str(sys._getframe().f_lineno) + "valid param list!\n")
exit(1)
else:
print(str(sys._getframe().f_lineno) + "valid param list!\n")
exit(1)
self.upperLevel()
self.compileOut("</parameterList>\n")
return True
def compileVarDec(self):
self.compileOut("<varDec>\n")
self.lowerLevel()
# parse key word
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "var":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid val define!\n")
exit(1)
# parse var type
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD or \
self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid val define!\n")
exit(1)
# parse var name
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid val define!\n")
exit(1)
# parse the rest var name
while not (self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ";"):
# parse ","
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == ",":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid val define!\n")
exit(1)
# parse var name
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid val define!\n")
exit(1)
# parse the end symbol
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ";":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid var define!\n")
exit(1)
# end of class var descrtion
self.upperLevel()
self.compileOut("</varDec>\n")
return True
def compileStatements(self):
self.compileOut("<statements>\n")
self.lowerLevel()
while self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
(self.parser.keyWord() == "do" or \
self.parser.keyWord() == "if" or \
self.parser.keyWord() == "while" or \
self.parser.keyWord() == "let" or \
self.parser.keyWord() == "return"):
if self.parser.keyWord() == "do":
self.compileDo()
elif self.parser.keyWord() == "if":
self.compileIf()
elif self.parser.keyWord() == "while":
self.compileWhile()
elif self.parser.keyWord() == "let":
self.compileLet()
elif self.parser.keyWord() == "return":
self.compileReturn()
else:
print("valid statement define!\n")
exit(1)
self.upperLevel()
self.compileOut("</statements>\n")
return True
def compileDo(self):
self.compileOut("<doStatement>\n")
self.lowerLevel()
# parse do
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "do":
self.compileOutElement(self.parser.tokenType(),self.parser.keyWord())
self.parser.advance()
else:
print("inValid do define!\n")
exit(1)
# parse '('
while not (self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "("):
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
# parse '('
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "(":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid do statement define!\n")
exit(1)
# parse expression list
self.compileExpressionList()
# parse ')'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ")":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid do statement body define!\n")
exit(1)
# parse ';'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ';':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid do statement define!\n")
exit(1)
self.upperLevel()
self.compileOut("</doStatement>\n")
return True
def compileLet(self):
self.compileOut("<letStatement>\n")
self.lowerLevel()
# parse let
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "let":
self.compileOutElement(self.parser.tokenType(),self.parser.keyWord())
self.parser.advance()
else:
print("inValid let define!\n")
exit(1)
# parse varname
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.identifier())
self.parser.advance()
else:
print("inValid let define!\n")
exit(1)
# parse `[expression]`
while self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == '[':
# parse '['
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
# parse expression
self.compileExpression()
# parse ']'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ']':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid let define!\n")
exit(1)
# parse '='
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == '=':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid let define!\n")
exit(1)
# parse expression
self.compileExpression()
# parse ';'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ';':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid let define!\n")
exit(1)
self.upperLevel()
self.compileOut("</letStatement>\n")
return True
def compileWhile(self):
self.compileOut("<whileStatement>\n")
self.lowerLevel()
# parse return
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "while":
self.compileOutElement(self.parser.tokenType(),self.parser.keyWord())
self.parser.advance()
else:
print("inValid while define!\n")
exit(1)
# parse '('
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == '(':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid while define!\n")
exit(1)
# parse expression
self.compileExpression()
# parse ')'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ')':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid while define!\n")
exit(1)
# parse '{'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == '{':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid while define!\n")
exit(1)
# parse statements
self.compileStatements()
# parse '}'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == '}':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("inValid while define!\n")
exit(1)
self.upperLevel()
self.compileOut("</whileStatement>\n")
return True
def compileReturn(self):
self.compileOut("<returnStatement>\n")
self.lowerLevel()
# parse return
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "return":
self.compileOutElement(self.parser.tokenType(),self.parser.keyWord())
self.parser.advance()
else:
print("valid if return statement!\n")
exit(1)
# parse expression list
if not (self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ';'):
self.compileExpression()
# parse ';'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ';':
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid if return statement!\n")
exit(1)
self.upperLevel()
self.compileOut("</returnStatement>\n")
return True
def compileIf(self):
self.compileOut("<ifStatement>\n")
self.lowerLevel()
# parse if
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "if":
self.compileOutElement(self.parser.tokenType(),self.parser.keyWord())
self.parser.advance()
else:
print("valid if define!\n")
exit(1)
# parse '('
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "(":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid if define!\n")
exit(1)
# parse expression
self.compileExpression()
# parse ')'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ")":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid if define!\n")
exit(1)
# parse '{'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "{":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid if define!\n")
exit(1)
# parse statements
self.compileStatements()
# parse '}'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "}":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid if define!\n")
exit(1)
# parse else
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD and \
self.parser.keyWord() == "else":
# parse 'else'
self.compileOutElement(self.parser.tokenType(),self.parser.keyWord())
self.parser.advance()
# parse '{'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "{":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid if define!\n")
exit(1)
# parse statements
self.compileStatements()
# parse '}'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == "}":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid if define!\n")
exit(1)
self.upperLevel()
self.compileOut("</ifStatement>\n")
return
def compileExpression(self):
self.compileOut("<expression>\n")
self.lowerLevel()
# parse term
self.compileTerm()
# parse op
while self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
(self.parser.symbol() == "+" or self.parser.symbol() == "-" or \
self.parser.symbol() == "*" or self.parser.symbol() == "/" or \
self.parser.symbol() == "&" or self.parser.symbol() == "|" or \
self.parser.symbol() == ">" or self.parser.symbol() == "<" or \
self.parser.symbol() == "="):
# parse op
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
# parse term
self.compileTerm()
self.upperLevel()
self.compileOut("</expression>\n")
return
def compileTerm(self):
self.compileOut("<term>\n")
self.lowerLevel()
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_INT_CONST:
# parse int const
self.compileOutElement(self.parser.tokenType(),self.parser.intVal())
self.parser.advance()
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_STRING_CONST:
# parse string const
self.compileOutElement(self.parser.tokenType(),self.parser.stringVal())
self.parser.advance()
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_KEYWORD:
# parse keword const
if self.parser.keyWord() == "true" or self.parser.keyWord() == "false" or \
self.parser.keyWord() == "null" or self.parser.keyWord() == "this":
self.compileOutElement(self.parser.tokenType(),self.parser.keyWord())
self.parser.advance()
else:
print("inValid expression define!\n")
exit(1)
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL:
if self.parser.symbol() == "(":
# parse '('
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
# parse expression
self.compileExpression()
# parse ')'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
self.parser.symbol() == ")":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid term define!\n")
exit(1)
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and \
(self.parser.symbol() == "-" or self.parser.symbol() == "~"):
# parse unaryOp
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
# parse term
self.compileTerm()
else:
print("valid term define!\n")
exit(1)
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
# parse subroutineName or varName
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
# parse expression
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == "[":
# parse '['
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
# parse expression
self.compileExpression()
# parse ']'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == "]":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid term define!\n")
exit(1)
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == ".":
# parse '.'
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
# parse subroutineName
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_IDENTIFIER:
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid term define!\n")
exit(1)
# parse '('
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == "(":
self.compileOutElement(self.parser.tokenType(),self.parser.symbol())
self.parser.advance()
else:
print("valid term define!\n")
exit(1)
# parse expressList
self.compileExpressionList()
# parse ')'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == ")":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid term define!\n")
exit(1)
elif self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == "(":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
# parse expressList
self.compileExpressionList()
# parse ')'
if self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL and self.parser.symbol() == ")":
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
else:
print("valid term define!\n")
exit(1)
self.upperLevel()
self.compileOut("</term>\n")
return True
def compileExpressionList(self):
self.compileOut("<expressionList>\n")
self.lowerLevel()
if self.parser.symbol() == ')' and \
self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL:
self.upperLevel()
self.compileOut("</expressionList>\n")
return True
# parse expression
self.compileExpression()
# parse `, expression`
while self.parser.symbol() == ',' and \
self.parser.tokenType() == TOKEN_TYPE.TOKEN_SYMBOL:
# parse ','
self.compileOutElement(self.parser.tokenType(),self.parser.currToken())
self.parser.advance()
# parse expression
self.compileExpression()
self.upperLevel()
self.compileOut("</expressionList>\n")
return
def main(input):
if os.path.exists(input):
if os.path.isdir(input):
files = os.listdir(input)
for f in files:
filename = input+"/" + f
if filename.find(".jack") >= 0:
JackAnalyzer(filename)
else:
JackAnalyzer(input)
else:
print("invalid path")
if __name__ == "__main__":
main(sys.argv[1]) | [
"mml1106@126.com"
] | mml1106@126.com |
2fab7f0d353b7e9fa2819146c61ec7b6bc83105a | 0cd096c51dda831ee64c405c7a994d18b0fe783f | /components/joystick.py | 792ba5075cc46b5ada499bf3459d4285c93c6d5d | [] | no_license | Denrur/ecs_turn_based | db1797ed787f644f334e7c08450139febf5ae7ae | 79ff9ea23fe74db7936d81afce8ddca4f0541e6f | refs/heads/master | 2020-07-12T22:47:09.800235 | 2019-11-06T12:47:24 | 2019-11-06T12:47:24 | 204,926,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | from bearlibterminal import terminal as blt
class Joystick:
def __init__(self):
self.action = None
def handle_player_turn_keys(self, key):
# r = blt.TK_KEY_RELEASED
if key == blt.TK_UP or key == blt.TK_W:
self.action = {'move': (0, -1)}
return {'move': (0, -1)}
elif key == blt.TK_DOWN or key == blt.TK_X:
self.action = {'move': (0, 1)}
return {'move': (0, 1)}
elif key == blt.TK_LEFT or key == blt.TK_A:
self.action = {'move': (-1, 0)}
return {'move': (-1, 0)}
elif key == blt.TK_RIGHT or key == blt.TK_D:
self.action = {'move': (1, 0)}
return {'move': (1, 0)}
elif key == blt.TK_Q:
self.action = {'move': (-1, -1)}
return {'move': (-1, -1)}
elif key == blt.TK_E:
self.action = {'move': (1, -1)}
return {'move': (1, -1)}
elif key == blt.TK_Z:
self.action = {'move': (-1, 1)}
return {'move': (-1, 1)}
elif key == blt.TK_C:
self.action = {'move': (-1, 1)}
return {'move': (1, 1)}
if key == blt.TK_G:
self.action = {'pickup': True}
return {'pickup': True}
if key == blt.TK_MOUSE_SCROLL:
return {'scroll': True}
# elif key == blt.TK_O:
# return{'scroll_up': True}
# elif key == blt.TK_L:
# return{'scroll_down': True}
elif key == blt.TK_I:
self.action = {'show_inventory': True}
return {'show_inventory': True}
elif key == blt.TK_O:
self.action = {'drop_inventory': True}
return {'drop_inventory': True}
elif key == blt.TK_S:
self.action = {'pass': True}
return {'pass': True}
if key == blt.TK_RETURN and blt.TK_ALT:
return {'fullscreen': True}
elif key == blt.TK_ESCAPE:
return {'exit': True}
if key == 133:
return {'mouse': True}
return {} | [
"denrurak@gmail.com"
] | denrurak@gmail.com |
dd901b37ae78074d1b136ce7ad9d125fb38bfa9b | 1f38af9bae11acbe20dd8f5057b374b9760e6659 | /pyscf/geomopt/geometric_solver.py | 6e63b860d5f970435b404aca3d39f5e5b97bdb6f | [
"Apache-2.0"
] | permissive | highlight0112/pyscf | d36104ef727f593d46fbfd3e5d865c6cd0316d84 | 4afbd42bad3e72db5bb94d8cacf1d5de76537bdd | refs/heads/master | 2020-03-25T01:16:59.927859 | 2019-03-06T01:11:59 | 2019-03-06T01:11:59 | 143,229,588 | 0 | 0 | Apache-2.0 | 2019-03-06T01:12:00 | 2018-08-02T02:05:59 | Python | UTF-8 | Python | false | false | 5,188 | py | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Interface to geomeTRIC library https://github.com/leeping/geomeTRIC
'''
import tempfile
import numpy
import geometric
import geometric.molecule
#from geometric import molecule
from pyscf import lib
from pyscf.geomopt.addons import as_pyscf_method, dump_mol_geometry
from pyscf import __config__
INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True)
ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True)
class PySCFEngine(geometric.engine.Engine):
def __init__(self, scanner):
molecule = geometric.molecule.Molecule()
mol = scanner.mol
molecule.elem = [mol.atom_symbol(i) for i in range(mol.natm)]
# Molecule is the geometry parser for a bunch of formats which use
# Angstrom for Cartesian coordinates by default.
molecule.xyzs = [mol.atom_coords()*lib.param.BOHR] # In Angstrom
super(PySCFEngine, self).__init__(molecule)
self.scanner = scanner
self.cycle = 0
def calc_new(self, coords, dirname):
scanner = self.scanner
mol = scanner.mol
lib.logger.note(scanner, '\nGeometry optimization step %d', self.cycle)
self.cycle += 1
# geomeTRIC handles coords and gradients in atomic unit
coords = coords.reshape(-1,3)
if scanner.verbose >= lib.logger.NOTE:
dump_mol_geometry(self.scanner.mol, coords*lib.param.BOHR)
mol.set_geom_(coords, unit='Bohr')
energy, gradient = scanner(mol)
if scanner.assert_convergence and not scanner.converged:
raise RuntimeError('Nuclear gradients of %s not converged' % scanner.base)
return energy, gradient.ravel()
def kernel(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, constraints=None, **kwargs):
'''Optimize geometry with geomeTRIC library for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'convergence_energy': 1e-6, # Eh
'convergence_grms': 3e-4, # Eh/Bohr
'convergence_gmax': 4.5e-4, # Eh/Bohr
'convergence_drms': 1.2e-3, # Angstrom
'convergence_dmax': 1.8e-3, # Angstrom
}
from pyscf import geometric_solver
geometric_solver.optimize(method, **conv_params)
'''
if isinstance(method, lib.GradScanner):
g_scanner = method
elif getattr(method, 'nuc_grad_method', None):
g_scanner = method.nuc_grad_method().as_scanner()
else:
raise NotImplementedError('Nuclear gradients of %s not available' % method)
if not include_ghost:
g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0]
g_scanner.assert_convergence = assert_convergence
tmpf = tempfile.mktemp(dir=lib.param.TMPDIR)
m = geometric.optimize.run_optimizer(customengine=PySCFEngine(g_scanner),
input=tmpf, constraints=constraints,
**kwargs)
#FIXME: geomeTRIC library keeps running until converged. We need a function
# to terminate the program even not converged.
conv = True
#return conv, method.mol.copy().set_geom_(m.xyzs[-1], unit='Bohr')
return method.mol.copy().set_geom_(m.xyzs[-1], unit='Angstrom')
optimize = kernel
del(INCLUDE_GHOST, ASSERT_CONV)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf, dft, cc, mp
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
mf = scf.RHF(mol)
conv_params = {
'convergence_energy': 1e-4, # Eh
'convergence_grms': 3e-3, # Eh/Bohr
'convergence_gmax': 4.5e-3, # Eh/Bohr
'convergence_drms': 1.2e-2, # Angstrom
'convergence_dmax': 1.8e-2, # Angstrom
}
mol1 = optimize(mf, **conv_params)
print(mf.kernel() - -153.219208484874)
print(scf.RHF(mol1).kernel() - -153.222680852335)
mf = dft.RKS(mol)
mf.xc = 'pbe,'
mf.conv_tol = 1e-7
mol1 = optimize(mf)
mymp2 = mp.MP2(scf.RHF(mol))
mol1 = optimize(mymp2)
mycc = cc.CCSD(scf.RHF(mol))
mol1 = optimize(mycc)
| [
"osirpt.sun@gmail.com"
] | osirpt.sun@gmail.com |
836924b6936b1ef655900c7fe38af18dfa353232 | 5c58587ebfbf56192b3dc6ed6f43bc002c8e2cff | /payments/api_clients/payeer.py | f71e3b1f3c7e7413e30dbc9dc0f7bf76fb48caaf | [] | no_license | hossamelneily/nexchange | fb9a812cfc72ac00b90cf64d6669a8129c2d2d4b | 6d69274cd3808989abe2f5276feb772d1f0fa8b4 | refs/heads/release | 2022-12-13T09:20:47.297943 | 2019-02-12T08:20:34 | 2019-02-12T08:20:34 | 210,064,740 | 1 | 2 | null | 2022-12-09T00:54:01 | 2019-09-21T23:19:34 | Python | UTF-8 | Python | false | false | 2,487 | py | from payments.api_clients.base import BasePaymentApi
import requests
import json
class PayeerAPIClient(BasePaymentApi):
""" Documentation: http://docs.payeercom.apiary.io/# """
def __init__(self, account='12345', apiId='12345', apiPass='12345',
url='https://payeer.com/ajax/api/api.php'):
self.account = account
self.apiId = apiId
self.apiPass = apiPass
self.url = url
def authorization_check(self):
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass
}
response = requests.post(self.url, payload)
return response
def balance_check(self):
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass,
'action': 'balance'
}
response = requests.post(self.url, payload)
return response
def get_transaction_history(self, from_date=None, to_date=None,
page_size=50, sort='desc',
trans_type='incoming'):
from_date, to_date = self.get_default_ranges(from_date, to_date)
# to is removed, because it is not UTC on Payeer side.
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass,
'action': 'history',
'sort': sort,
'count': page_size,
'from': from_date,
'type': trans_type
}
response = requests.post(self.url, payload)
content = json.loads(response.content.decode('utf-8'))
try:
res = content['history']
except KeyError:
res = content['errors']
return res
def transfer_funds(self, currency_in=None, currency_out=None, amount=None,
receiver=None, comment=None):
""" http://docs.payeercom.apiary.io/#reference/0/transferring-funds """
payload = {
'account': self.account,
'apiId': self.apiId,
'apiPass': self.apiPass,
'action': 'transfer',
'curIn': currency_in,
'sum': amount,
'curOut': currency_out,
'comment': comment,
'to': receiver
}
response = requests.post(self.url, payload)
content = json.loads(response.content.decode('utf-8'))
return content
| [
"oleg@nexchange.co.uk"
] | oleg@nexchange.co.uk |
a1ccf5b2c9bf663dea0c43f1ea78d0c558b05a2c | c744a0c0abb6a09144329deca0cfec542e0a147c | /trainw_keras.py | 08b3283e6097cddef24b8a68e41a2a81c355e20a | [] | no_license | pattywan234/Human-Activity-Recognition-HAR | c627421209910905be04035672e7f5158fd4a9bb | 636e0aa843350e7b61b5667b96ddcfdba009b82b | refs/heads/master | 2023-06-03T02:25:35.675593 | 2021-06-24T02:47:48 | 2021-06-24T02:47:48 | 235,273,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | # from numpy import array
import numpy as np
from LSTM_model05 import vanilla_LSTM, stacked_LSTM, bi_LSTM
#import training and test data
X_train = np.load('wesad/S2/Normalize/label_selected/train_keras/X_train.npy')
X_test = np.load('wesad/S2/Normalize/label_selected/train_keras/X_test.npy')
y_train = np.load('wesad/S2/Normalize/label_selected/train_keras/y_train.npy')
y_test = np.load('wesad/S2/Normalize/label_selected/train_keras/y_test.npy')
# def split_sequence(sequence, n_steps):
# X, y = list(), list()
# for i in range(len(sequence)):
# # find the end of this pattern
# end_ix = i + n_steps
# # check if we are beyond the sequence
# if end_ix > len(sequence)-1:
# break
# # gather input and output parts of the pattern
# seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
# X.append(seq_x)
# y.append(seq_y)
# return array(X), array(y)
#
# raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# time_step = 3
# X, y = split_sequence(raw_seq, time_step)
# n_feature = 1
# X = X.reshape((X.shape[0], X.shape[1], n_feature))
# n_hidden = 50
#
# x_input = array([70, 80, 90])
# x_input = x_input.reshape((1, time_step, n_feature))
n_hidden = 128
time_step = 200
n_feature = len(X_train[2][1])
epoch = 100
# vanilla LSTM
# training
vlstm = vanilla_LSTM(n_hidden, time_step, n_feature)
vlstm.fit(X_train, y_train, epochs=epoch, verbose=0)
# testing
vhat = vlstm.predict(X_test, verbose=0)
print('predicted result of vanilla LSTM', vhat)
# stacked LSTM
# training
slstm = stacked_LSTM(n_hidden, time_step, n_feature)
slstm.fit(X_train, y_train, epochs=100, verbose=0)
# testing
shat = slstm.predict(X_test, verbose=0)
print('predicted result of stacked LSTM', shat)
# Bidireectional LSTM
# training
blstm = bi_LSTM(n_hidden, time_step, n_feature)
blstm.fit(X_train, y_train, epochs=100, verbose=0)
# testing
bhat = blstm.predict(X_test, verbose=0)
print('predicted result of Bidireectional LSTM', bhat)
np.save('wesad/wesad/S2/Normalize/label_selected/train_keras/v_result.npy', vhat)
np.save('wesad/wesad/S2/Normalize/label_selected/train_keras/s_result.npy', shat)
np.save('wesad/wesad/S2/Normalize/label_selected/train_keras/b_result.npy', bhat)
| [
"noreply@github.com"
] | noreply@github.com |
3f1b92b2238951235a205e6a99611b26200bd26b | a03fad402b5380c290737ea31d2419bad404fe26 | /enfants/urls.py | 51b80d140f9125d428db40257faa56ef29ed9bc0 | [] | no_license | DavidS1106/pfe_backend | 982986ecf52b6c9b73ea1d45f55662841d51de60 | 7a66f8f1573488fa32c643fd1a3697f2fa4a3d65 | refs/heads/master | 2023-04-13T23:04:51.432942 | 2021-05-10T21:02:41 | 2021-05-10T21:02:41 | 276,981,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('enfants', views.EnfantsView, basename='enfant')
router.register('logged_enfant', views.logged_enfant, basename='logged_enfant')
router.register('non_logged_enfant', views.non_logged_enfant, basename='non_logged_enfant')
router.register('info_supplementaire', views.info_supplementaireview, basename='info_supplementaire')
router.register('personne_contact', views.personne_contactview, basename='personne_contact')
router.register('handicaps', views.HandicapsView, basename='handicap')
router.register('handicaps_enfants', views.HandicapsEnfantsView, basename='handicap_enfant')
urlpatterns = [
path('', include(router.urls)),
] | [
"david.sabo@hotmail.be"
] | david.sabo@hotmail.be |
2c60324b3fa048f21d4ddb7e4a4d608d2f4ae9fe | a8fa4a499c44dce9a82e768edc82bdd193797128 | /ScrapePlugins/Crunchyroll/Run.py | 072c151bc74086a6fe1c380808eb0b7785a732e7 | [] | no_license | oliuz/MangaCMS | d8b2e44922955f6b9310fb6e189115f1985f2e93 | 7e2a710a56248261ab01686d3e586c36ce4a857d | refs/heads/master | 2020-12-28T19:46:41.265347 | 2016-08-27T23:37:47 | 2016-08-27T23:37:47 | 67,316,457 | 1 | 0 | null | 2016-09-03T23:36:21 | 2016-09-03T23:36:21 | null | UTF-8 | Python | false | false | 505 | py |
from .DbLoader import DbLoader
from .ContentLoader import ContentLoader
import runStatus
import ScrapePlugins.RunBase
class Runner(ScrapePlugins.RunBase.ScraperBase):
loggerPath = "Main.Manga.CrunchyRoll.Run"
pluginName = "CrunchyRoll"
def _go(self):
fl = DbLoader()
fl.go()
fl.closeDB()
if not runStatus.run:
return
cl = ContentLoader()
cl.go()
cl.closeDB()
if __name__ == "__main__":
import utilities.testBase as tb
with tb.testSetup():
run = Runner()
run.go()
| [
"something@fake-url.com"
] | something@fake-url.com |
a8714593d0c65c179984a8185652a1628d449660 | cf398ec3d87f334ecc9aadf23de6813b729b4633 | /src/tests/test_logging.py | 9462dbcd83175d57d3de26cd2373117f6315e65c | [
"MIT",
"MS-PL",
"LicenseRef-scancode-generic-cla"
] | permissive | fuhuifang/confidential-ml-utils | 71a385efb81eb27acd59c6152584313176165092 | 6b530a893fc650a5cf2bb68d560c2833ac1bf014 | refs/heads/main | 2023-05-05T01:46:23.576045 | 2021-03-01T16:19:14 | 2021-03-01T16:19:14 | 344,288,160 | 0 | 0 | MIT | 2021-03-03T23:05:16 | 2021-03-03T23:05:15 | null | UTF-8 | Python | false | false | 3,609 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import confidential_ml_utils
from confidential_ml_utils.constants import DataCategory
import io
import logging
import pytest
import re
import sys
def test_basic_config():
logging.warning("before basic config")
logging.basicConfig()
logging.warning("warning from test_basic_config")
log = logging.getLogger("foo")
log.warning("warning from foo logger")
class StreamHandlerContext:
"""
Add, then remove a stream handler with the provided format string. The
`__str__` method on this class returns the value of the internal stream.
"""
def __init__(self, log, fmt: str):
self.logger = log
self.stream = io.StringIO()
self.handler = logging.StreamHandler(self.stream)
self.handler.setLevel(log.getEffectiveLevel())
self.handler.setFormatter(logging.Formatter(fmt))
def __enter__(self):
self.logger.addHandler(self.handler)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logger.removeHandler(self.handler)
self.handler.flush()
def __str__(self):
return self.stream.getvalue()
@pytest.mark.parametrize("level", ["debug", "info", "warning", "error", "critical"])
def test_data_category_and_log_info_works_as_expected(level):
confidential_ml_utils.enable_confidential_logging()
log = logging.getLogger()
log.setLevel(level.upper())
assert isinstance(log, confidential_ml_utils.logging.ConfidentialLogger)
with StreamHandlerContext(
log, "%(prefix)s%(levelname)s:%(name)s:%(message)s"
) as context:
func = getattr(log, level)
func("PRIVATE")
func("public", category=DataCategory.PUBLIC)
logs = str(context)
assert re.search(r"^SystemLog\:.*public$", logs, flags=re.MULTILINE)
assert not re.search(r"^SystemLog\:.*\:PRIVATE", logs, flags=re.MULTILINE)
@pytest.mark.parametrize("exec_type,message", [(ArithmeticError, "1+1 != 3")])
def test_exception_works_as_expected(exec_type, message):
confidential_ml_utils.enable_confidential_logging()
log = logging.getLogger()
assert isinstance(log, confidential_ml_utils.logging.ConfidentialLogger)
with StreamHandlerContext(
log, "%(prefix)s%(levelname)s:%(name)s:%(message)s"
) as context:
try:
raise exec_type(message)
except exec_type:
log.error("foo", category=DataCategory.PUBLIC)
logs = str(context)
assert re.search(r"^SystemLog\:.*foo$", logs, flags=re.MULTILINE)
def test_all_the_stuff():
confidential_ml_utils.enable_confidential_logging()
log = logging.getLogger("foo")
log.info("public", category=DataCategory.PUBLIC)
log.info("PRIVATE", category=DataCategory.PRIVATE)
log.info("PRIVATE2")
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Requires Python >= 3.8")
def test_enable_confidential_logging_sets_force():
# Pytest adds handlers to the root logger by default.
initial_handlers = list(logging.root.handlers)
confidential_ml_utils.enable_confidential_logging()
assert len(logging.root.handlers) == 1
assert all(h not in logging.root.handlers for h in initial_handlers)
def test_warn_if_root_handlers_already_exist(capsys):
# Pytest adds handlers to the root logger by default.
confidential_ml_utils.enable_confidential_logging()
# https://docs.pytest.org/en/stable/capture.html
stderr = capsys.readouterr().err
assert "SystemLog:The root logger already has handlers set!" in stderr
| [
"noreply@github.com"
] | noreply@github.com |
96f689b758e78d0bdd04395f059ac2cacaa4ac36 | 256e664847bf6a24d02b98f755348272f1e73368 | /merra/grid.py | d2a454a75ff7aab1101a7311ca434a67b3c75ecb | [
"BSD-3-Clause"
] | permissive | BRIK-Engenharia/merra | c4c5f9e3be5c6e534248c331bedfb9399d507a0d | 504d167b044a2cf236b727b6d0befcb435612e6a | refs/heads/master | 2021-10-21T14:01:43.391399 | 2019-03-04T10:46:25 | 2019-03-04T10:46:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | # The MIT License (MIT)
#
# Copyright (c) 2019, TU Wien
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The grid module implements the asymmetrical GMAO 0.5 x 0.625 grid
used in MERRA2 as a pygeogrids BasicGrid instance.
"""
import numpy as np
from pygeogrids.grids import BasicGrid
def create_merra_cell_grid():
"""
Function creates the asymmetrical GMAO 0.5 x 0.625 grid as a
BasicGrid instance.
Returns
-------
BasicGrid instance
"""
# define horizontal and vertical resolution of asymmetrical grid
lon_res = 0.625
lat_res = 0.5
# create 361 (lat) x 576 (lon) mesh grid
lon, lat = np.meshgrid(
np.arange(-180, 180, lon_res),
np.arange(-90, 90 + lat_res / 2, lat_res)
)
return BasicGrid(lon.flatten(), lat.flatten()).to_cell_grid(cellsize=5.)
| [
"felix.zaussinger@geo.tuwien.ac.at"
] | felix.zaussinger@geo.tuwien.ac.at |
08fa44a4100c48ec759dfc091669bab7cb0353f1 | 0805820a350d51e832e5a5be995a3046b0445a20 | /developer_test/migrations/0005_auto_20210513_1716.py | 9b2295dd42f55bd3f19053b990b44a588b15b724 | [] | no_license | nseetim/patricia_task | a36ffdac5c15c603c1231800815a62fc619b90ec | 85bf0b3e1d2fa2cef3d7450b7485af8efb078db1 | refs/heads/master | 2023-05-02T07:33:24.371578 | 2021-05-14T05:35:19 | 2021-05-14T05:35:19 | 367,241,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | # Generated by Django 3.1.5 on 2021-05-13 17:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('developer_test', '0004_auto_20210513_0013'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='RequestLogs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(max_length=100, null=True)),
('response_code', models.PositiveSmallIntegerField()),
('method', models.CharField(max_length=10, null=True)),
('remote_address', models.CharField(max_length=20, null=True)),
('exec_time', models.IntegerField(null=True)),
('date', models.DateTimeField(auto_now=True)),
('body_response', models.TextField()),
('body_request', models.TextField()),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"etimnseabasi@gmail.com"
] | etimnseabasi@gmail.com |
52bece35aa3f449fd4068d45847eb3aca3b36443 | 411eff94020c192d5e5f657fa6012232ab1d051c | /game/src/coginvasion/ai/AIBaseGlobal.py | e02f38f0e5f171a4dab307e0fed79073eeab559e | [] | no_license | xMakerx/cio-src | 48c9efe7f9a1bbf619a4c95a4198aaace78b8491 | 60b2bdf2c4a24d506101fdab1f51752d0d1861f8 | refs/heads/master | 2023-02-14T03:12:51.042106 | 2021-01-15T14:02:10 | 2021-01-15T14:02:10 | 328,268,776 | 1 | 0 | null | 2021-01-15T15:15:35 | 2021-01-09T23:51:37 | Python | UTF-8 | Python | false | false | 960 | py | from AIBase import AIBase
from direct.directnotify.DirectNotifyGlobal import directNotify
from panda3d.core import RescaleNormalAttrib, NodePath, Notify
__builtins__['base'] = AIBase()
__builtins__['ostream'] = Notify.out()
__builtins__['run'] = base.run
__builtins__['taskMgr'] = base.taskMgr
__builtins__['jobMgr'] = base.jobMgr
__builtins__['eventMgr'] = base.eventMgr
__builtins__['messenger'] = base.messenger
__builtins__['bboard'] = base.bboard
__builtins__['config'] = base.config
__builtins__['directNotify'] = directNotify
render = NodePath('render')
render.setAttrib(RescaleNormalAttrib.makeDefault())
render.setTwoSided(0)
__builtins__['render'] = render
from direct.showbase import Loader
base.loader = Loader.Loader(base)
__builtins__['loader'] = base.loader
directNotify.setDconfigLevels()
def inspect(anObject):
from direct.tkpanels import Inspector
Inspector.inspect(anObject)
__builtins__['inspect'] = inspect
taskMgr.finalInit()
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
04521521cd080fa531cf3cecce5a57426136edae | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/w11.py | d34dec34a1f3c4bc9a0709f495d47f09c917783a | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'w11':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
e1c0280fa4672db1594118ed124f9ae4c0c5f5c0 | 0acfe39cbbf843e35696c4c5e312192ac7b7159b | /renrenribao/article/migrations/0002_article_created_time.py | e974de0e9f1617a4c14c24ba56fbcc7905085d90 | [] | no_license | tiant167/renrenribao | c2d8bdaa2ecdb60cecfdfb53a17e96c940851920 | 1b0a1fdcd5ce3fa97adc515c3178402ec869f018 | refs/heads/master | 2021-01-25T07:39:45.462791 | 2014-12-17T08:50:58 | 2014-12-17T08:50:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='created_time',
field=models.DateTimeField(default=datetime.datetime(2014, 12, 14, 8, 40, 24, 800832, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
]
| [
"haotianchai@wandoujia.com"
] | haotianchai@wandoujia.com |
7a0349daadc974318ba22b4c139e3450f7f2d011 | b008f826a1eaccdadaf7cba6d7a757f709d41ee6 | /salt-2016.3.2/salt/modules/data.py | 7263f1316eed85e060a86740fb98a706aace7ed1 | [
"Apache-2.0"
] | permissive | stephane-martin/salt-debian-packaging | 76b3c6f53bf908c230774abc3c35c0c8dc31d5dd | 4ec73750ba67bfe35a5bc0faa110f2bdec5c6a66 | refs/heads/master | 2020-07-21T14:58:31.913198 | 2016-09-04T18:18:57 | 2016-09-04T18:18:57 | 66,794,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,972 | py | # -*- coding: utf-8 -*-
'''
Manage a local persistent data structure that can hold any arbitrary data
specific to the minion
'''
from __future__ import absolute_import
# Import python libs
import os
import ast
import logging
# Import salt libs
import salt.utils
import salt.payload
# Import 3rd-party lib
import salt.ext.six as six
log = logging.getLogger(__name__)
def clear():
'''
Clear out all of the data in the minion datastore, this function is
destructive!
CLI Example:
.. code-block:: bash
salt '*' data.clear
'''
try:
os.remove(os.path.join(__opts__['cachedir'], 'datastore'))
except (IOError, OSError):
pass
return True
def load():
'''
Return all of the data in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.load
'''
serial = salt.payload.Serial(__opts__)
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
fn_ = salt.utils.fopen(datastore_path, 'rb')
return serial.load(fn_)
except (IOError, OSError, NameError):
return {}
def dump(new_data):
'''
Replace the entire datastore with a passed data structure
CLI Example:
.. code-block:: bash
salt '*' data.dump '{'eggs': 'spam'}'
'''
if not isinstance(new_data, dict):
if isinstance(ast.literal_eval(new_data), dict):
new_data = ast.literal_eval(new_data)
else:
return False
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
with salt.utils.fopen(datastore_path, 'w+b') as fn_:
serial = salt.payload.Serial(__opts__)
serial.dump(new_data, fn_)
return True
except (IOError, OSError, NameError):
return False
def update(key, value):
'''
Update a key with a value in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.update <key> <value>
'''
store = load()
store[key] = value
dump(store)
return True
def getval(key):
'''
Get a value from the minion datastore
.. deprecated:: Carbon
Use ``get`` instead
CLI Example:
.. code-block:: bash
salt '*' data.getval <key>
'''
salt.utils.warn_until(
'Carbon',
'Support for \'getval\' has been deprecated and will be removed '
'in Salt Carbon. Please use \'get\' instead.'
)
return get(key)
def getvals(*keylist):
'''
Get values from the minion datastore
.. deprecated:: Carbon
Use ``get`` instead
CLI Example:
.. code-block:: bash
salt '*' data.getvals <key> [<key> ...]
'''
salt.utils.warn_until(
'Carbon',
'Support for \'getvals\' has been deprecated and will be removed '
'in Salt Carbon. Please use \'get\' instead.'
)
return get(keylist)
def cas(key, value, old_value):
'''
Check and set a value in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.cas <key> <value> <old_value>
'''
store = load()
if key not in store:
return False
if store[key] != old_value:
return False
store[key] = value
dump(store)
return True
def pop(key, default=None):
'''
Pop (return & delete) a value from the minion datastore
.. versionadded:: 2015.5.2
CLI Example:
.. code-block:: bash
salt '*' data.pop <key> "there was no val"
'''
store = load()
val = store.pop(key, default)
dump(store)
return val
def get(key, default=None):
'''
Get a (list of) value(s) from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.get <key(s)>
'''
store = load()
if isinstance(key, six.string_types):
return store.get(key, default)
elif default is None:
return [store[k] for k in key if k in store]
else:
return [store.get(k, default) for k in key]
def keys():
'''
Get all keys from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.keys
'''
store = load()
return store.keys()
def values():
'''
Get values from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.values
'''
store = load()
return store.values()
def items():
'''
Get items from the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.items
'''
store = load()
return store.items()
def has_key(key):
'''
Check if key is in the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.has_key <mykey>
'''
store = load()
return key in store
| [
"stephane.martin@vesperal.eu"
] | stephane.martin@vesperal.eu |
eee4cab21177b67375dd31d211b70f0587198b8e | 0e014984751a44761864c79546939bc21d699752 | /edx_AI_Week9/search.py | ba613c6121722cbb407f881c34bbefb2ba79bd47 | [] | no_license | miguel-ossa/AI-Columbia-Exercises | 8c76bb0e469f8b55976b22b035867ea68041d4da | 25758f35bc7f93e8e3dece6fed5b6db7a4cf5c81 | refs/heads/master | 2020-03-29T12:14:05.010584 | 2019-06-02T07:32:29 | 2019-06-02T07:32:29 | 149,854,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,167 | py | """Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from __future__ import generators
from utils import *
import agents
import math, random, sys, time, bisect, string
class Problem:
"""The abstract class for a formal problem. You should subclass this and
implement the method successor, and possibly __init__, goal_test, and
path_cost. Then you will create instances of your subclass and solve them
with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial; self.goal = goal
def successor(self, state):
"""Given a state, return a sequence of (action, state) pairs reachable
from this state. If there are many successors, consider an iterator
that yields the successors one at a time, rather than building them
all at once. Iterators will work fine within the framework."""
#abstract
return
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal, as specified in the constructor. Implement this
method if checking against a single self.goal is not enough."""
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
#abstract
return
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
update(self, state=state, parent=parent, action=action,
path_cost=path_cost, depth=0)
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node %s>" % (self.state,)
def path(self):
"Create a list of nodes from the root to this node."
x, result = self, [self]
while x.parent:
result.append(x.parent)
x = x.parent
return result
def expand(self, problem):
"Return a list of nodes reachable from this node. [Fig. 3.8]"
return [Node(next, self, act,
problem.path_cost(self.path_cost, self.state, act, next))
for (act, next) in problem.successor(self.state)]
class SimpleProblemSolvingAgent(agents.Agent):
"""Abstract framework for problem-solving agent. [Fig. 3.1]"""
def __init__(self):
Agent.__init__(self)
state = []
seq = []
def program(percept):
state = self.update_state(state, percept)
if not seq:
goal = self.formulate_goal(state)
problem = self.formulate_problem(state, goal)
seq = self.search(problem)
action = seq[0]
seq[0:1] = []
return action
self.program = program
## Uninformed Search algorithms
def tree_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
Don't worry about repeated paths to a state. [Fig. 3.8]"""
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
fringe.extend(node.expand(problem))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return tree_search(problem, Stack())
def graph_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
If two paths reach a state, only use the best one. [Fig. 3.18]"""
closed = {}
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
if node.state not in closed:
closed[node.state] = True
fringe.extend(node.expand(problem))
return None
def breadth_first_graph_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return graph_search(problem, FIFOQueue())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return graph_search(problem, Stack())
def depth_limited_search(problem, limit=50):
"[Fig. 3.12]"
def recursive_dls(node, problem, limit):
cutoff_occurred = False
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
for successor in node.expand(problem):
result = recursive_dls(successor, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result != None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Fig. 3.13]"
for depth in xrange(sys.maxint):
result = depth_limited_search(problem, depth)
if result is not 'cutoff':
return result
# Informed (Heuristic) Search
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have depth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f))
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search.
Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
h = h or problem.h
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f)
## Other search algorithms
def recursive_best_first_search(problem):
"[Fig. 4.5]"
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node
successors = expand(node, problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + s.h, node.f)
while True:
successors.sort(lambda x,y: x.f - y.f) # Order by lowest f value
best = successors[0]
if best.f > flimit:
return None, best.f
alternative = successors[1]
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result
return RBFS(Node(problem.initial), infinity)
def hill_climbing(problem):
"""From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better. [Fig. 4.11]"""
current = Node(problem.initial)
while True:
neighbor = argmax(expand(node, problem), Node.value)
if neighbor.value() <= current.value():
return current.state
current = neighbor
def exp_schedule(k=20, lam=0.005, limit=100):
"One possible schedule function for simulated annealing"
return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"[Fig. 4.5]"
current = Node(problem.initial)
for t in xrange(sys.maxint):
T = schedule(t)
if T == 0:
return current
next = random.choice(expand(node. problem))
delta_e = next.path_cost - current.path_cost
if delta_e > 0 or probability(math.exp(delta_e/T)):
current = next
def online_dfs_agent(a):
"[Fig. 4.12]"
pass #### more
def lrta_star_agent(a):
"[Fig. 4.12]"
pass #### more
# Genetic Algorithm
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.0, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires that the problem has a successor function that generates
reasonable states, and that it has a path_cost function that scores states.
We use the negative of the path_cost function, because costs are to be
minimized, while genetic-algorithm expects a fitness_fn to be maximized."""
states = [s for (a, s) in problem.successor(problem.initial_state)[:n]]
random.shuffle(states)
fitness_fn = lambda s: - problem.path_cost(0, s, None, s)
return genetic_algorithm(states, fitness_fn, ngen, pmut)
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.0):
"""[Fig. 4.7]"""
def reproduce(p1, p2):
c = random.randrange(len(p1))
return p1[:c] + p2[c:]
for i in range(ngen):
new_population = []
for i in len(population):
p1, p2 = random_weighted_selections(population, 2, fitness_fn)
child = reproduce(p1, p2)
if random.uniform(0,1) > pmut:
child.mutate()
new_population.append(child)
population = new_population
return argmax(population, fitness_fn)
def random_weighted_selection(seq, n, weight_fn):
"""Pick n elements of seq, weighted according to weight_fn.
That is, apply weight_fn to each element of seq, add up the total.
Then choose an element e with probability weight[e]/total.
Repeat n times, with replacement. """
totals = []; runningtotal = 0
for item in seq:
runningtotal += weight_fn(item)
totals.append(runningtotal)
selections = []
for s in range(n):
r = random.uniform(0, totals[-1])
for i in range(len(seq)):
if totals[i] > r:
selections.append(seq[i])
break
return selections
# The remainder of this file implements examples for the search algorithms.
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (verticies) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, dict=None, directed=True):
self.dict = dict or {}
self.directed = directed
if not directed: self.make_undirected()
def make_undirected(self):
"Make a digraph into an undirected graph by adding symmetric edges."
for a in self.dict.keys():
for (b, distance) in self.dict[a].items():
self.connect1(b, a, distance)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed: self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A,{})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.dict.setdefault(a, {})
if b is None: return links
else: return links.get(b)
def nodes(self):
"Return a list of nodes in the graph."
return self.dict.keys()
def UndirectedGraph(dict=None):
"Build a Graph where every edge (including future ones) goes both ways."
return Graph(dict=dict, directed=False)
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
## Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
## Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node,n): return infinity
return distance(g.locations[n], here)
neighbor = argmin(nodes, distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
romania = UndirectedGraph(Dict(
A=Dict(Z=75, S=140, T=118),
B=Dict(U=85, P=101, G=90, F=211),
C=Dict(D=120, R=146, P=138),
D=Dict(M=75),
E=Dict(H=86),
F=Dict(S=99),
H=Dict(U=98),
I=Dict(V=92, N=87),
L=Dict(T=111, M=70),
O=Dict(Z=71, S=151),
P=Dict(R=97),
R=Dict(S=80),
U=Dict(V=142)))
romania.locations = Dict(
A=( 91, 492), B=(400, 327), C=(253, 288), D=(165, 299),
E=(562, 293), F=(305, 449), G=(375, 270), H=(534, 350),
I=(473, 506), L=(165, 379), M=(168, 339), N=(406, 537),
O=(131, 571), P=(320, 368), R=(233, 410), S=(207, 457),
T=( 94, 410), U=(456, 350), V=(509, 444), Z=(108, 531))
australia = UndirectedGraph(Dict(
T=Dict(),
SA=Dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=Dict(WA=1, Q=1),
NSW=Dict(Q=1, V=1)))
australia.locations = Dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37))
class GraphProblem(Problem):
"The problem of searching a graph from one node to another."
def __init__(self, initial, goal, graph):
Problem.__init__(self, initial, goal)
self.graph = graph
def successor(self, A):
"Return a list of (action, result) pairs."
return [(B, B) for B in self.graph.get(A).keys()]
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A,B) or infinity)
def h(self, node):
"h function is straight-line distance from a node's state to goal."
locs = getattr(self.graph, 'locations', None)
if locs:
return int(distance(locs[node.state], locs[self.goal]))
else:
return infinity
#### NOTE: NQueensProblem not working properly yet.
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where the
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of None means that the c-th column has not been
filled in left. We fill in columns left to right."""
def __init__(self, N):
self.N = N
self.initial = [None] * N
def successor(self, state):
"In the leftmost empty column, try all non-conflicting rows."
if state[-1] is not None:
return [] ## All columns filled; no successors
else:
def place(col, row):
new = state[:]
new[col] = row
return new
col = state.index(None)
return [(row, place(col, row)) for row in range(self.N)
if not self.conflicted(state, row, col)]
def conflicted(self, state, row, col):
"Would placing a queen at (row, col) conflict with anything?"
for c in range(col-1):
if self.conflict(row, col, state[c], c):
return True
return False
def conflict(self, row1, col1, row2, col2):
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
return (row1 == row2 ## same row
or col1 == col2 ## same column
or row1-col1 == row2-col2 ## same \ diagonal
or row1+col1 == row2+col2) ## same / diagonal
def goal_test(self, state):
"Check if all columns filled, no conflicts."
if state[-1] is None:
return False
for c in range(len(state)):
if self.conflicted(state, state[c], c):
return False
return True
## Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
## iterative-repair and related search tehniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n*n)]
random.shuffle(cubes)
return map(random.choice, cubes)
## The best 5x5 board found by Boyan, with our word list this board scores
## 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board); n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0: print
if board[i] == 'Q': print 'Qu',
else: print str(board[i]) + ' ',
print
def boggle_neighbors(n2, cache={}):
""""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i+1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left: neighbors[i].append(i - n - 1)
if not on_right: neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left: neighbors[i].append(i + n - 1)
if not on_right: neighbors[i].append(i + n + 1)
if not on_left: neighbors[i].append(i - 1)
if not on_right: neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"If n2 is a perfect square, return its square root, else raise error."
n = int(math.sqrt(n2))
assert n * n == n2
return n
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, filename, min_len=3):
lines = open(filename).read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.words[bisect.bisect_left(self.words, word)] == word
def __len__(self):
return len(self.words)
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board. """
wordlist = None ## A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist("../data/wordlist")
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"Set the board, and find all the words in it."
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q': c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"The words found."
return self.found.keys()
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"The number of words found."
return len(self.found)
def boggle_hill_climbing(board=None, ntimes=100, print_it=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
print best, _, board
else:
board[i] = oldc ## Change back
if print_it:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
board[i] = random.choice(random.choice(cubes16)) ##random.choice(boyan_best)
return i, oldc
## Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def successor(self, state):
"Return a list of (action, state) pairs reachable from this state."
result = self.problem.successor(state)
self.succs += 1; self.states += len(result)
return result
def goal_test(self, state):
"Return true if the state is a goal."
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def __getattr__(self, attr):
if attr in ('succs', 'goal_tests', 'states'):
return self.__dict__[attr]
else:
return getattr(self.problem, attr)
def __repr__(self):
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
self.states, str(self.found)[0:4])
def compare_searchers(problems, header, searchers=[breadth_first_tree_search,
breadth_first_graph_search, depth_first_graph_search,
iterative_deepening_search, depth_limited_search,
astar_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
compare_searchers(problems=[GraphProblem('A', 'B', romania),
GraphProblem('O', 'N', romania),
GraphProblem('Q', 'WA', australia)],
header=['Searcher', 'Romania(A,B)', 'Romania(O, N)', 'Australia']) | [
"miguel.ossa.abellan@gmail.com"
] | miguel.ossa.abellan@gmail.com |
1c3fe5065e8682d0bc0842ad949a3e5b60eae09c | 9a51a7d5ddd3103e5d6ecce0077d70cf0db81927 | /148A.py | d5794a5dbf698af3b2db9c91475cde6fe6e2e5a4 | [] | no_license | Anikcb/Codeforces | 6eb9627eee07756d04e246e78ec1ad7c7b956c99 | 9a8d620447a23afa519f0aef8df6a8be189ddf12 | refs/heads/main | 2023-06-15T05:12:57.169088 | 2021-07-12T15:21:48 | 2021-07-12T15:21:48 | 380,301,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | k = int(input())
l = int(input())
m = int (input())
n = int (input())
d = int (input())
res=0
for i in range(1,d+1):
if i%k==0 or i%l==0 or i%m==0 or i%n==0:
res=res+1
print(res)
| [
"noreply@github.com"
] | noreply@github.com |
b7787491c00166a9f9516646d4c2054fe8fe1245 | 557ca4eae50206ecb8b19639cab249cb2d376f30 | /Chapter12/Ex12_3.py | 96ad465cf0df4d21b32435eb806eb5946bf1eb75 | [] | no_license | philipdongfei/Think-python-2nd | 781846f455155245e7e82900ea002f1cf490c43f | 56e2355b8d5b34ffcee61b38fbfd200fd6d4ffaf | refs/heads/master | 2021-01-09T19:57:49.658680 | 2020-03-13T06:32:11 | 2020-03-13T06:32:11 | 242,441,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from Ex12_2 import *
def metathesis_pairs(d):
for anagrams in d.values():
for word1 in anagrams:
for word2 in anagrams:
if word1 < word2 and word_distance(word1, word2) == 2:
print(word1, word2)
def word_distance(word1, word2):
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count
def main():
sets = all_anagrams('words.txt')
metathesis_pairs(sets)
if __name__ == '__main__':
main()
| [
"philip.dongfei@gmail.com"
] | philip.dongfei@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.