blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f02ce7c29dca4a27b5e73592695c617c66c56a61
|
15f4c8766ef89772d4e1ef9b4b680024681401b6
|
/Alumnos/views.py
|
adf7f453b91a3b6e09326c1835bd0bfa54de5c7b
|
[] |
no_license
|
Marabo98/DatosPersonales
|
445419eb4c5e8c2b4497acaa76dbee760e7a098d
|
c24e68c75940b568539b049313a7d5bfe6c82e8b
|
refs/heads/master
| 2020-05-19T15:43:31.614636
| 2019-05-05T22:47:59
| 2019-05-05T22:47:59
| 185,084,696
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from django.shortcuts import render
from .models import DatosPersonales
def index(request):
return render(request,"Alumnos/index.html")
def res(request):
Datos=DatosPersonales.objects.all()
return render(request, "Alumnos/resultado.html",{"Alumnos":Datos})
|
[
"noreply@github.com"
] |
Marabo98.noreply@github.com
|
d30137c4a1d499e1b0f216205e234323c2e84340
|
288714c273eae2bf13985c0f2927d915b3e84313
|
/version3/version3/urls.py
|
d5ccad7bda0cf8b46ff549b5e8f78fdd4a358bc3
|
[] |
no_license
|
To-Heaven/authority-management-system
|
2ae3fb4708daeba4498ae8dcca24f2fc43bece60
|
33bf101fa73ce5eb2c0b280666979ec89a34788a
|
refs/heads/master
| 2021-08-24T07:18:34.605907
| 2017-12-08T16:00:47
| 2017-12-08T16:00:47
| 113,591,564
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
"""version3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from app import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/', views.login),
url(r'^home/', views.home),
]
|
[
"ziawang2017@outlook.com"
] |
ziawang2017@outlook.com
|
36b47d0a16d7e6e8b51e7aaa3dd00334f08d3f44
|
2b690fe997b38b2b491cc8096e60196f2a976b19
|
/ch04modules/src/ds_using_list.py
|
a1979326446885bc482c964e2d88bcd2dfc43179
|
[] |
no_license
|
songxowe/PythonApp
|
9d15ce39aab8dfd62a88b5322d6b647102dc70fb
|
6fc81c69c1550799ac85ea18f79a819158780072
|
refs/heads/master
| 2021-01-23T15:27:46.427387
| 2018-07-02T09:51:34
| 2018-07-02T09:51:34
| 102,711,224
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
# This is my shopping list
shoplist = ['apple', 'mango', 'carrot', 'banana']
print('I have', len(shoplist), 'items to purchase.')
print('These items are:', end=' ')
for item in shoplist:
print(item, end=' ')
print('\nI also have to buy rice.')
shoplist.append('rice')
print('My shopping list is now', shoplist)
print('I will sort my list now')
shoplist.sort()
print('Sorted shopping list is', shoplist)
print('The first item I will buy is', shoplist[0])
olditem = shoplist[0]
del shoplist[0]
print('I bought the', olditem)
print('My shopping list is now', shoplist)
|
[
"songxowe@hotmail.com"
] |
songxowe@hotmail.com
|
79e6da3b4d658993b4978ba6089d86128dce378a
|
55c3a742168c1a5cf131df01d897b5e750d5c172
|
/hr_holidays_usability/wizard/hr_holidays_post.py
|
b178152e4ee7ae0d9a014221677fa50b1af5d5ea
|
[] |
no_license
|
ursais/odoo-usability
|
cd22e9176e5a4e45c079be45955f415178a86105
|
216369f66c8c2da45d94fefc72276483c00edeef
|
refs/heads/10.0
| 2021-12-10T16:50:23.111985
| 2017-04-05T17:23:18
| 2017-04-05T17:23:18
| 88,552,310
| 0
| 0
| null | 2021-12-02T22:49:23
| 2017-04-17T21:27:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,666
|
py
|
# -*- coding: utf-8 -*-
# © 2015-2017 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class HrHolidaysPost(models.TransientModel):
_name = 'hr.holidays.post'
_description = 'Wizard for post holidays'
before_date = fields.Date(
string='Select Leave Requests That Ended Before', required=True,
default=fields.Date.context_today,
help="The wizard will select the validated holidays "
"that ended before that date (including holidays that "
"ended on that date).")
holidays_to_post_ids = fields.Many2many(
'hr.holidays', string='Leave Requests to Post',
domain=[
('type', '=', 'remove'),
('holiday_type', '=', 'employee'),
('state', '=', 'validate'),
('posted_date', '=', False),
])
state = fields.Selection([
('draft', 'Draft'),
('done', 'Done'),
], string='State', readonly=True, default='draft')
@api.multi
def select_date(self):
self.ensure_one()
hols = self.env['hr.holidays'].search([
('type', '=', 'remove'),
('holiday_type', '=', 'employee'),
('state', '=', 'validate'),
('posted_date', '=', False),
('vacation_date_to', '<=', self.before_date),
])
self.write({
'state': 'done',
'holidays_to_post_ids': [(6, 0, hols.ids)],
})
action = self.env['ir.actions.act_window'].for_xml_id(
'hr_holidays_usability', 'hr_holidays_post_action')
action['res_id'] = self.id
return action
@api.multi
def run(self):
self.ensure_one()
# I have to make a copy of self.holidays_to_post_ids in a variable
# because, after the write, it doesn't have a value any more !!!
holidays_to_post = self.holidays_to_post_ids
today = fields.Date.context_today(self)
if not self.holidays_to_post_ids:
raise UserError(_('No leave request to post.'))
self.holidays_to_post_ids.write({'posted_date': today})
view_id = self.env.ref('hr_holidays_usability.hr_holiday_pivot').id
action = {
'name': _('Leave Requests'),
'res_model': 'hr.holidays',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', holidays_to_post.ids)],
'view_mode': 'pivot',
'view_id': view_id,
}
return action
|
[
"alexis.delattre@akretion.com"
] |
alexis.delattre@akretion.com
|
3be5ebce37acada992508c3ea1fe44a9c7c5231c
|
71e8291ec9658e85989db9c7dcad92ea517b8c89
|
/backend/polls/serializers.py
|
29569e0b1dd2a41316231bcd1fc8470e61087083
|
[
"Apache-2.0"
] |
permissive
|
IINamelessII/YesOrNo
|
b942104e8feadf3c1b08d5f66f85f3fff3e3ccd7
|
0ebbdfbae73f0be7c807a8f6ca0ec7c2040cca19
|
refs/heads/master
| 2023-01-07T06:54:06.384209
| 2019-05-26T11:34:11
| 2019-05-26T11:34:11
| 155,479,502
| 3
| 2
|
Apache-2.0
| 2023-01-04T19:50:11
| 2018-10-31T01:24:06
|
Python
|
UTF-8
|
Python
| false
| false
| 826
|
py
|
from django.contrib.auth.models import User
from rest_framework import serializers
from polls.models import Poll, Flow
class FlowSerializer(serializers.ModelSerializer):
class Meta:
model = Flow
fields = '__all__'
class PollSerializer(serializers.ModelSerializer):
flow = serializers.ReadOnlyField(source='flow.name')
class Meta:
model = Poll
fields = ('id', 'flow', 'statement', 'agree', 'disagree', 'likes', 'dislikes')
class ShortPollSerializer(serializers.ModelSerializer):
class Meta:
model = Poll
fields = ('agree_rate', 'rate')
class UserSerializer(serializers.ModelSerializer):
polls = serializers.PrimaryKeyRelatedField(many=True, queryset=Poll.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'polls')
|
[
"justnitro26@gmail.com"
] |
justnitro26@gmail.com
|
0efce5e84b3ecd6417977d55a558b91a32a83e07
|
e420b0cd5f02b088d83327882ae149042c1232ea
|
/build.py
|
47cc18572832eee74fb5d1a2c910032a860869ca
|
[] |
no_license
|
aechaechaech/Jerma-Imposter-Message-Generator
|
e6129bb9b5da6e8af2f87dffe10debef5fab5beb
|
583100fc208186e0f680abec0a81bf35d047b744
|
refs/heads/main
| 2023-04-12T06:23:50.578470
| 2021-05-10T15:45:37
| 2021-05-10T15:45:37
| 327,178,852
| 11
| 3
| null | 2021-04-03T01:28:13
| 2021-01-06T02:36:23
|
Python
|
UTF-8
|
Python
| false
| false
| 77
|
py
|
from distutils.core import setup
import py2exe
setup(console=['main.py'])
|
[
"noreply@github.com"
] |
aechaechaech.noreply@github.com
|
e79d5279d124db882897b667b9e1754ebabd8484
|
2f5033c5d6a4e8d668933a066332aabff1e337d0
|
/docs/conf.py
|
e2910e1b3fe6f266b8983004af140329adda0f16
|
[
"BSD-3-Clause"
] |
permissive
|
linlearn/linlearn
|
fad723b360606b22646b88e0e3718c26a458664e
|
b37a2ce810573a4b603142c8f2902b66c2154647
|
refs/heads/master
| 2022-10-11T00:12:57.326079
| 2022-09-27T17:26:30
| 2022-09-27T17:26:30
| 227,420,583
| 6
| 2
|
BSD-3-Clause
| 2022-09-27T17:26:31
| 2019-12-11T17:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,777
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# import sphinx_readable_theme
# html_theme_path = [sphinx_readable_theme.get_html_theme_path()]
# html_theme = 'readable'
html_theme = 'sphinx_rtd_theme'
# html_theme = 'python_docs_theme'
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("../.."))
sys.path.insert(0, os.path.abspath("sphinx_ext"))
# from linlearn import *
# from github_link import make_linkcode_resolve
# -- Project information -----------------------------------------------------
project = "linlearn"
copyright = "2020, Stéphane Gaïffas"
author = "Stéphane Gaïffas"
# The full version, including alpha/beta/rc tags
release = "0.0.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.napoleon",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
# "sphinx.ext.linkcode",
# "sphinx_gallery.gen_gallery",
]
# autosummary_generate = True
autoclass_content = "class"
autodoc_inherit_docstrings = True
autodoc_default_flags = "inherited-members"
autodoc_default_options = {
# "members": None,
"member-order": "bysource",
# "inherited-members": None,
"autoclass_content": "class",
}
# sphinx_gallery_conf = {
# "examples_dirs": "../examples",
# "doc_module": "linlearn",
# "gallery_dirs": "auto_examples",
# "ignore_pattern": "../run_*|../playground_*",
# "backreferences_dir": os.path.join("modules", "generated"),
# "show_memory": False,
# "reference_url": {"onelearn": None},
# }
# linkcode_resolve = make_linkcode_resolve(
# "linlearn",
# u"https://github.com/linlearn/"
# "linlearn/blob/{revision}/"
# "{package}/{path}#L{lineno}",
# )
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
source_suffix = ".rst"
# Generate the plots for the gallery
plot_gallery = "True"
# The master toctree document.
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "press"
# html_theme = "readable"
# html_sidebars = {
# "**": ["about.html", "navigation.html", "searchbox.html"],
# "auto_examples": ["index.html"],
# }
html_theme_options = {
# "description": "Linear methods in Python",
# "github_user": "linlearn",
# "github_repo": "linlearn",
# "github_button": True,
# "fixed_sidebar": True,
# "travis_button": False,
# "logo_text_align": "center",
# "github_banner": True,
}
# html_logo = "images/logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# from datetime import datetime
# now = datetime.now()
# html_show_copyright = copyright = (
# str(now.year)
# + ', <a href="https://github.com/linlearn/linlearn/graphs/contributors">linlearn developers</a>. Updated on '
# + now.strftime("%B %d<, %Y")
# )
# intersphinx_mapping = {
# "python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
# "numpy": ("https://docs.scipy.org/doc/numpy/", None),
# "scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
# "matplotlib": ("https://matplotlib.org/", None),
# "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
# "joblib": ("https://joblib.readthedocs.io/en/latest/", None),
# "sklearn": ("https://scikit-learn.org/stable/", None),
# }
|
[
"stephane.gaiffas@gmail.com"
] |
stephane.gaiffas@gmail.com
|
b7db04729ee4eed895c34171563555bd33dda7f0
|
61bfee44a4566670f7ed1c6218892d575c73d952
|
/DataManagement/Phase1/CsvDistinctExtractor.py
|
bc269ea2bcfe744d19fadc109b01296d9d60398d
|
[] |
no_license
|
yaoReadingCode/CSCI-620
|
45b5fc840fe7bba93492fe44ea8e6ff48c03289d
|
fe26b99dba3462305cf6a0497da321505bec53c3
|
refs/heads/master
| 2022-06-06T06:08:48.914593
| 2019-12-02T03:53:55
| 2019-12-02T03:53:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
import pandas as pd
import json
import re
def extractValues(qualifiedFilePath, column):
distinct = set()
fileData = pd.read_csv(qualifiedFilePath, sep='\t', header=0)
for index in range(len(fileData)):
rowValue: str = fileData[column][index]
match = re.match("^\\[(.*)\\]$", str(rowValue))
if match != None:
rowValue = match.group(1)
distinct.update(str(rowValue).split(','))
with open(column + '.txt', 'w', encoding="utf-8") as file:
file.write(",".join( [ "('" + str(v).replace("'", "''") + "')" for v in distinct ] ))
print("processed " + qualifiedFilePath + " : " + column)
def main():
with open('distinct_file_config.json') as jsonFile:
distinctFileConfig = json.load(jsonFile)
filePath = distinctFileConfig["filePath"]
for key in distinctFileConfig["files"].keys():
for column in distinctFileConfig["files"][key]:
print("processing " + key + " : " + column)
extractValues(filePath + key, column)
if __name__ == "__main__":
main()
|
[
"apurav.khare@gmail.com"
] |
apurav.khare@gmail.com
|
ac144cfea22e1272d79d2dedb4eaf49d65fb89b4
|
637c1fb7d20c6f6f0bcfdc398ce663b26a0c5629
|
/marathon/marathon-lb/tests/test_common.py
|
77963524d70802f37fc17658e74fbecb450a1315
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
slpcat/docker-images
|
e00ae70158f540c45aede98874cbfed00d1bf12f
|
cd99c1f375ea153d9d50ab4fa49ceb13583e6bab
|
refs/heads/master
| 2023-08-31T23:57:07.582361
| 2023-08-17T05:34:05
| 2023-08-17T05:34:05
| 107,383,466
| 44
| 31
|
BSD-3-Clause
| 2020-03-17T17:00:45
| 2017-10-18T08:59:12
|
Go
|
UTF-8
|
Python
| false
| false
| 645
|
py
|
import unittest
from mock import Mock
import common
class TestCommon(unittest.TestCase):
def test_setup_logging_log_level(self):
logger = Mock()
common.setup_logging(logger, '/dev/null',
'%(name)s: %(message)s', 'info')
logger.setLevel.assert_called_with(20)
def test_setup_logging_invalid_log_level(self):
logger = Mock()
with self.assertRaises(Exception) as context:
common.setup_logging(logger, '/dev/null',
'%(name)s: %(message)s', 'banana')
assert str(context.exception) == 'Invalid log level: BANANA'
|
[
"slpcat@qq.com"
] |
slpcat@qq.com
|
b1b60c4d44cf84dc029df650bb592b547e9ea863
|
debb553bf97dbc550a619f97598e35b2b43fbfa0
|
/ChatBot/Lec 01 聊天机器人的基础模型与综述/Debugs/debug_find_path.py
|
27c35b820133a3b413d72251a1f96248d23f3148
|
[] |
no_license
|
jacky-guo/July-NLP
|
e9c050ae1cab32a6fa534da858544d11c56fdb22
|
f8aa6c01d5e7349b7e4e89ce103dd7acb3f62048
|
refs/heads/master
| 2020-08-06T19:09:38.877281
| 2018-10-19T09:47:20
| 2018-10-19T09:47:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
# -*- coding:utf-8 -*-
graph = {'上海': ['苏州', '常州'],
'苏州': ['常州', '镇江'],
'常州': ['镇江'],
'镇江': ['常州'],
'盐城': ['南通'],
'南通': ['常州']}
# 明确如何找到从A到B的路径
def find_path(start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph:
return None
for node in graph[start]:
if node not in path:
newpath = find_path(node, end, path)
if newpath:
return newpath
return None
if __name__=='__main__':
print(find_path('上海','镇江'))
|
[
"740465595@qq.com"
] |
740465595@qq.com
|
01d6d7f9f190a36964f6276c6d587f83b503bfe0
|
6349c8168b6f4cb57a0370e46f789a566d60dc21
|
/tweet_analyzer_old.py
|
585b5dbc2b10b7faed6e0e5bd0c9f36ea655d391
|
[] |
no_license
|
bksim/tweet-moods
|
daf99f21cefdc30a097fcb96b3f69c0d92731798
|
809778986f9aec724c4fa8263cf111dbc1957e6f
|
refs/heads/master
| 2021-05-04T11:07:51.701789
| 2019-09-18T03:19:26
| 2019-09-18T03:19:26
| 36,324,786
| 0
| 0
| null | 2019-11-02T23:11:49
| 2015-05-26T21:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,527
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from datetime import date
from datetime import timedelta
from sklearn.naive_bayes import BernoulliNB
from TwitterSearch import *
import tweepy
import sys
import pickle
reload(sys)
sys.setdefaultencoding('utf8')
#outputs past numdays days in format yyyy-mm-dd *NOT* including todays date
def last_dates(numdays):
return [date.today()-timedelta(days=x) for x in range(1, numdays)]
# queries twitter given the query
def grab_tweets(query):
#consumer_key = 'RXZnGOT0HIMzlvluC49971qnW'
consumer_key = 'uTAwmsVanLasrKPnPL2vMYWFe'
#consumer_secret = 'f6ZXMiirMw1b9a9QNSZYxhIXHAV4MoxOA4y16pmWfJV8zG0Gam'
consumer_secret = '9mZvzzsdbv1Hf1RzrOd1cMRTn2PjivqE4XWgzr9mlScz95rMM7'
#access_token = '1121237479-li2tQfxLq8eCpgJu5JAG33U0ViLXgZQg2KBLhEm'
access_token = '2521477255-6FWcso042sTIZ3eSzHofYLnNCHsPRanJmidOzF0'
#access_token_secret = 'm9Ucvzl6HkqE1yxrK946BsEu6wfMBDI9YoXlnUbpz95Af'
access_token_secret = 'wWAlsb14aYwp3LwQOimyPdwam5aE3B2x1h4XEyYyvuuy4'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
all_tweets = []
# get today's tweets
tweets = api.search(query, lang='en', rpp=100, count=100, result_type='recent')
for tweet in tweets:
all_tweets.append(tweet)
#print tweet.created_at
tweet.created_at = tweet.created_at.date()
#print tweet.text
# get tweets for past week
for date in last_dates(7):
#print "**HERE**"
#tweets = api.search(query, lang='en', rpp=100, count=100, result_type='recent')
tweets = api.search(query, lang='en', rpp=100, count=100, result_type='mixed', until=date)
for tweet in tweets:
all_tweets.append(tweet)
#print tweet.created_at
tweet.created_at = tweet.created_at.date()
#print tweet.text
return all_tweets
# writes training set to file for the michigan data set.
# currently: NOT IN USE
def write_training_set_mich(filename):
word_freq = {}
stopwords = []
with open("training/stopwords.txt") as f:
for line in f:
stopwords.append(line.rstrip('\n').lower())
labels = []
texts = []
with open(filename) as f:
for line in f:
temp = line.split('\t')
labels.append('pos' if temp[0] == '1' else 'neg')
words = temp[1].rstrip('\n').lower().split(' ')
texts.append(words)
for w in words:
if w not in stopwords:
if w in word_freq:
word_freq[w] += 1
else:
word_freq[w] = 1
features = []
for t in texts:
features.append([1 if w in t else 0 for w in word_freq.keys()])
with open('training/umich/results.txt', 'w') as fout:
pickle.dump(features, fout)
with open('training/umich/labels.txt', 'w') as fout:
pickle.dump(labels, fout)
with open('training/umich/wordslist.txt', 'w') as fout:
pickle.dump(word_freq.keys(), fout)
return
# writes training set for the movie review training set. currently: NOT IN USE
#numwords is the number of keys to consider. most common numwords number of keys will be used as feature vectors
#numsamples number of reviews to use (total 5331)
def write_training_set(numwords=500, numsamples=1000):
word_freq = {}
stopwords = []
with open("training/stopwords.txt") as f:
for line in f:
stopwords.append(line.rstrip('\n').lower())
#words_list = set()
positive = []
i = 0
with open('training/rt-polaritydata/rt-polarity.pos') as f:
for line in f:
i += 1
if i <= numsamples:
words = line.rstrip('\n').lower().split()
positive.append(words)
#words_list |= set(words)
for w in words:
if w not in stopwords:
if w in word_freq:
word_freq[w] += 1
else:
word_freq[w] = 1
negative = []
i = 0
with open('training/rt-polaritydata/rt-polarity.neg') as f:
for line in f:
i += 1
if i <= numsamples:
words = line.rstrip('\n').lower().split()
negative.append(words)
#words_list |= set(words)
for w in words:
if w not in stopwords:
if w in word_freq:
word_freq[w] += 1
else:
word_freq[w] = 1
words_list_truncated = zip(*sorted(word_freq.items(), key=lambda x:x[1], reverse=True))[0][0:numwords]
train_pos = []
train_neg = []
for p in positive:
train_pos.append([1 if w in p else 0 for w in words_list_truncated])
with open('training/rt-polaritydata/training_pos.txt', 'w') as fout:
pickle.dump(train_pos, fout)
for n in negative:
train_neg.append([1 if w in n else 0 for w in words_list_truncated])
with open('training/rt-polaritydata/training_neg.txt', 'w') as fout:
pickle.dump(train_neg, fout)
with open('training/rt-polaritydata/words_list_truncated.txt', 'w') as fout:
pickle.dump(words_list_truncated, fout)
print "done"
def load_data(filename):
with open(filename) as f:
data = pickle.load(f)
return data
def get_sentiment_data(query, training_set):
tweets = grab_tweets(query)
#print "****HERE****"
#train_pos = load_data('training_pos.txt')
#train_neg = load_data('training_neg.txt')
#words_list = load_data('words_list_truncated.txt')
#clf.fit(train_pos+train_neg, ['pos']*len(train_pos) + ['neg']*len(train_neg))
train = load_data('training/' + training_set + '/results.txt')
#print "HERE"
labels = load_data('training/' + training_set + '/labels.txt')
words_list = load_data('training/' + training_set + '/wordslist.txt')
#print labels
clf = BernoulliNB(binarize=None)
clf.fit(train, labels)
classified = {}
for tweet in tweets:
if tweet.created_at in classified.keys():
classified[tweet.created_at] = classified[tweet.created_at] + [classify(tweet.text, clf, words_list)[0]]
else:
classified[tweet.created_at] = [classify(tweet.text, clf, words_list)[0]]
#print classified
returndata = {}
for key in classified:
numpos = sum([1 if v=='pos' else 0 for v in classified[key]])
#returndata[key] = (numpos, len(classified[key]) - numpos) #tuple of positive, negative
# percent:
#returndata[key] = float(sum([1 if v == 'pos' else 0 for v in classified[key]]))/len(classified[key])
returndata[key] = ceil(float(sum([1 if v == 'pos' else 0 for v in classified[key]]))/len(classified[key])*100)/100.0
return returndata
#print percents
def classify(query, clf, words_list):
tokens = query.rstrip('\n').lower().split()
feature = [1 if w in tokens else 0 for w in words_list]
return clf.predict(feature)
if __name__ == "__main__":
#write_training_set_mich('training/umich/training.txt')
print get_sentiment_data('%23ucsb', 'umich')
|
[
"simpavid@gmail.com"
] |
simpavid@gmail.com
|
9570c49843328e84d51d010763252fc26e8d87e5
|
f04a0ebca2b4ef6348755be8e1aff6d0febd50f1
|
/CAI_p5/GA.py
|
677609851e6f1f9bf6e514270daf9d85a8287c76
|
[] |
no_license
|
kiarash97/CAI_5_Genetic
|
9e3cb0dae1cbc515da02fc345e8a9477c73af236
|
43f8473e8ee4eb5e05e59771ac3cee5a8aa4243c
|
refs/heads/master
| 2020-04-02T00:41:27.155608
| 2018-10-19T17:14:24
| 2018-10-19T17:14:24
| 153,816,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,081
|
py
|
import random
class genome:
def __init__(self,lst):
self.gens = []
for i in lst:
self.gens.append(i)
#random gens
if len(lst) == 0 :
self.randomGens()
def randomGens(self):
while (len(self.gens) < 8):
x = random.randint(0,7)
if x not in self.gens:
self.gens.append(x)
def isCorrect(self):
for i in self.gens:
if self.gens.count(i) != 1 :
return False
return True
def __str__(self):
return str(self.gens)
def initialize_population(num = 100):
population = []
for i in range(num):
population.append(genome(lst = []))
return population
def one_point_cross_over(gen1 , gen2):
cross_point = random.randint(0,6)
result = []
result.append(gen1.gens[0:cross_point])
result = result[0]
i = cross_point
while len(result)<8:
if gen2.gens[i] not in result :
result.append(gen2.gens[i])
i+=1
if i>7:
i=0
result = genome(lst = result)
if result.isCorrect():
return result
else :
return False
def mutation(gen ,p = 0.001):
for i in range(len(gen.gens)):
if random.random() < p :
x = random.randint(0,7)
s = gen.gens.index(x)
gen.gens[i] , gen.gens[s] = gen.gens[s] , gen.gens[i]
def fitness(gen):
threat_count = 0
max_threat = 28
for i in range(len(gen.gens)):
for j in range(i,len(gen.gens)):
if i != j :
if i-gen.gens[i] == j-gen.gens[j] or i+gen.gens[i] == j+gen.gens[j]:
threat_count +=1
return max_threat-threat_count
def sort_by_fitness(population):
result = []
for i in range(len(population)):
for j in range(len(population)):
if fitness(population[i]) > fitness(population[j]):
population[i] , population[j] = population[j] , population[i]
def genetic(initPopulationSize = 100 , pairsNumber = 100, populationSize = 30 , mutationProbability = 0.001):
generation = 1
population = initialize_population(num= initPopulationSize)
sort_by_fitness(population)
while True:
print ("genaration = ",generation , "population number = ", len(population))
if fitness(population[0]) == 28:
print("Urekaaa !! ")
return population[0]
#each epoch
for i in range(pairsNumber):
x= random.randint(0,len(population)-1)
y= random.randint(0,len(population)-1)
if x!=y :
child = one_point_cross_over(population[x],population[y])
if child:
mutation(child , p=mutationProbability)
population.append(child)
sort_by_fitness(population)
population = population[0:populationSize]
generation += 1
result = genetic(initPopulationSize = 100 , pairsNumber = 100, populationSize = 30 , mutationProbability = 0.001)
print ("answer = " , result)
|
[
"kasirikiarash@gmail.com"
] |
kasirikiarash@gmail.com
|
8cebf3db3d8ed108a7aa3b6a756866083b2da720
|
bc2c2eb2f74c345e91f2c49b1de37321227a90b8
|
/src/preprocess.py
|
97e234b1a3ddbb7a0ea6cf6369573f5e874252dd
|
[] |
no_license
|
emilebourban/ML-Project-2
|
a9567aad95372adfc75d8117a5a85a7a4005fe23
|
9e443ff5ad7870939616057a58d36b9606718d4b
|
refs/heads/master
| 2021-07-02T22:36:59.492531
| 2020-10-05T14:50:38
| 2020-10-05T14:50:38
| 159,149,665
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,074
|
py
|
# -*- coding: utf-8 -*-
import os
import re
from nltk.stem.porter import *
from nltk.corpus import brown
import itertools
def load_dicts(DICT_PATH):
"""
"""
dict_typo = {}
with open(os.path.join(DICT_PATH, "emnlp_dict.txt"), mode='rt') as f:
for line in f:
key, value = line.rstrip('\n').split('\t')
dict_typo.update({key:value})
with open(os.path.join(DICT_PATH, "Test_Set_3802_Pairs.txt") , mode='r') as f:
for line in f:
try:
key, value = line.rstrip('\n').split('\t')[1].split(' | ')
dict_typo.update({key:value})
# Some values have multiple keys affected to them
except ValueError:
ls = line.rstrip('\n').split('\t')[1].split(' | ')
key = ls[0]
value= ls[1]
# Update dict with all the keys
dict_typo.update({key:value})
with open(os.path.join(DICT_PATH, "typo-corpus-r1.txt"), mode='rt') as f:
for line in f:
key, value, _, _, _, _ = line.rstrip('\n').split('\t')
dict_typo.update({key:value})
return dict_typo
def remove_repetitions(tweet):
"""
Functions that remove noisy character repetition like for instance :
llloooooooovvvvvve ====> love
This function reduce the number of character repetition to 2 and checks if the word belong the english
vocabulary by use of pyEnchant and reduce the number of character repetition to 1 otherwise
Arguments: tweet (the tweet)
"""
tweet = tweet.split()
for i in range(len(tweet)):
# tweet[i]=''.join(''.join(s)[:2] for _, s in itertools.groupby(tweet[i])).replace('#', '')
# if len(tweet[i])>0:
# if tweet[i] not in word_list:
tweet[i] = ''.join(''.join(s)[:1] for _, s in itertools.groupby(tweet[i])).replace('#', '')
tweet=' '.join(tweet)
return tweet
def clean_tweets(filename, in_path, out_path, dict_typo, word_list, only_words=False, stemmer=None, min_len=None):
"""
"""
print("Cleaning with: only words={}, stemmer={}, minimal length={}".format(only_words, stemmer!=None, min_len))
with open(os.path.join(in_path, filename), mode='rt', encoding='utf-8') as rf:
with open(os.path.join(out_path, 'cl_'+filename), mode='wt', encoding='utf-8') as wf:
for line in rf:
if 'test' in filename:
ID = line.strip().split(',')[0]+','
tweet = ' '.join(line.strip().split()[1:])
else:
ID = ''
tweet = line.strip()
remove_repetitions(tweet)
tweet = tweet.strip().split()
for i, word in enumerate(tweet):
if word in dict_typo.keys():
tweet[i] = dict_typo[word]
tweet = ' '.join(tweet)
tweet = re.sub(r"\'s", " \'s", tweet)
tweet = re.sub(r"\'ve", " \'ve", tweet)
tweet = re.sub(r"n\'t", " n\'t", tweet)
tweet = re.sub(r" ca ", " can ", tweet)
tweet = re.sub(r"\'re", " \'re", tweet)
tweet = re.sub(r"\'d", " \'d", tweet)
tweet = re.sub(r"\'l", " \'ll", tweet)
tweet = re.sub(r"\'ll", " \'ll", tweet)
# tweet = re.sub(r",", " , ", tweet)
# tweet = re.sub(r"!", " ! ", tweet)
# tweet = re.sub(r"\(", " \( ", tweet)
# tweet = re.sub(r"\)", " \) ", tweet)
# tweet = re.sub(r"\?", " \? ", tweet)
tweet = re.sub(r"\s{2,}", " ", tweet)
tweet = re.sub(r'<([^>]+)>', ' ',tweet) # Removes usr and url
tweet = re.sub(r'^#| #', ' ', tweet) # Removes hashtags
tweet = re.sub(r'\d+(x)\d+', '<img>', tweet) # Removes picture frames
# tweet = re.sub(r'n\'t$|n\'t ', ' not', tweet) # Converts negation contraction to verb + not
if only_words:
tweet = re.sub(r'[^a-z]', ' ', tweet) # Only keeps words
tweet = tweet.strip().split()
if stemmer != None:
tweet = [stemmer.stem(word) for word in tweet] # stemming
# Spell checker for commonly missspeled words
for i, word in enumerate(tweet):
if word in dict_typo.keys():
tweet[i] = dict_typo[word]
if min_len is not None:
wf.write(ID+' '.join([word for word in tweet if len(word) >= min_len])+'\n')
else:
wf.write(ID+' '.join(tweet)+'\n')
def main():
DICT_PATH = "../dict"
OR_TWITT_PATH = "../data/twitter-datasets-original"
NEW_TWITT_PATH = "../data/twitter-datasets"
DATA_PATH = "../data"
FULL = True
#
dict_typo = load_dicts(DICT_PATH)
word_list = brown.words()
if FULL:
files = [i for i in os.listdir(OR_TWITT_PATH) if i.endswith('.txt')]
else:
files = [i for i in os.listdir(OR_TWITT_PATH) if not i.endswith('full.txt')]
stemmer = PorterStemmer()
for file in files:
print("Processing {} ...".format(file))
clean_tweets(file, OR_TWITT_PATH, NEW_TWITT_PATH, dict_typo, word_list, only_words=False, stemmer=None, min_len=None)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
emilebourban.noreply@github.com
|
b6c46a8f86767ee4d504896126681b76d2154528
|
247dad430fd808f516def928cbcd64ae07da0e51
|
/article/migrations/0001_initial.py
|
90fa2f05ac7cfcb32cc7e0e9de97841abeac08b6
|
[] |
no_license
|
manlife2017/my_blog
|
f8f33083bf9c4763c2f3727d4320a48692c4e4d5
|
9e43fbc79ae3f9c36ed05f47071d33ffcf27131d
|
refs/heads/master
| 2021-01-21T19:01:38.668765
| 2017-05-23T00:33:37
| 2017-05-23T00:33:37
| 92,107,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-19 12:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('category', models.CharField(max_length=50)),
('date_time', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(blank=True, null=True)),
],
options={
'ordering': ['-date_time'],
},
),
]
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
fdf2f52741bb4e9aed4af0d9607587f3c2a94a64
|
5ad5e0880164736c0ffcebcb631e803f0e44c78e
|
/dumbo/migrations/0013_comment.py
|
6c37a4f5dcc3e3b0cea42424f1eabbd859c55acf
|
[] |
no_license
|
adarshsanjeev/dumbo
|
4f45400430aa7bcc773457529df6c6b46f1b2cdc
|
9a35187df26973cf379ac55c09223a98ab9a66dd
|
refs/heads/master
| 2020-04-15T03:52:20.824305
| 2016-06-06T04:48:00
| 2016-06-06T04:48:00
| 60,191,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-05 00:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dumbo', '0012_auto_20160604_1835'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dumbo.Issue')),
],
),
]
|
[
"adarshsanjeev@gmail.com"
] |
adarshsanjeev@gmail.com
|
04218200d29ae2eb7ca49c722ea5dda1781ec7ae
|
851a1203432c6e3f2a233094cdc756c6775105ad
|
/Ladies/migrations/0003_auto_20160619_1329.py
|
37793e2b63008793b4903febde161d17b0fea7d0
|
[] |
no_license
|
AliSamir730/afportal
|
b859530fcd4c0b1a596f6ce6f3204a7e2417baa3
|
1e1b1c45521dc8a438b4237b235bb495b68b1840
|
refs/heads/master
| 2021-01-17T20:50:00.653370
| 2016-06-20T09:23:26
| 2016-06-20T09:23:26
| 61,533,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-19 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Ladies', '0002_auto_20160619_1326'),
]
operations = [
migrations.AlterField(
model_name='lady',
name='pictures',
field=models.ImageField(upload_to='Ladies/%Y/%m/%d/'),
),
]
|
[
"Sayyed Ali Hashimi"
] |
Sayyed Ali Hashimi
|
c2043258d0cb1b2340d9b37092c14dd00108570a
|
ae9e7000a0929c66ca6f37fdcbd6bf1b074f4ce6
|
/prime number.py
|
931db63ba68aa56297f885b55553f43ae7df473c
|
[] |
no_license
|
Rafsun83/Python-Basic-Practice-Code-
|
cd5c9b840c3a39254678ba452c4d152e323e1616
|
9f5b952403420c4f61a967e575e8ac4bd9969c7b
|
refs/heads/main
| 2023-05-31T23:01:15.279984
| 2021-06-13T15:13:10
| 2021-06-13T15:13:10
| 376,364,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import math
n = int(input("enter the number: "))
count = 0
val = int(math.sqrt(n))
for x in range(2, val+1, 1):
if (n % x) == 0:
count = 1
break
if count==0:
print(n, "prime")
else:
print(n, "not prime")
|
[
"noreply@github.com"
] |
Rafsun83.noreply@github.com
|
b0e2c4a1e312ad2f25c5eb05bb59537b4ea2d16d
|
465a569faf4c7c08462c19b16ee7a69b799099cf
|
/ImagineCup/Backend/BERT_Model.py
|
135910286c772d8e6a7af1187ea8990fc00181ae
|
[] |
no_license
|
SaiG18/ReadAR
|
9d8387fa3d792a4e5c3ed57cd04fc6f58ffe5a40
|
8ea22ce1812002e56add19d56e2df4d312a3a4df
|
refs/heads/master
| 2023-03-04T23:18:37.443075
| 2021-02-17T20:42:34
| 2021-02-17T20:42:34
| 240,667,249
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,604
|
py
|
import xml.etree.ElementTree as ET
import torch
import pickle
import glob
import argparse
import numpy as np
from pytorch_pretrained_bert import BertTokenizer, BertModel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
from nltk.stem import WordNetLemmatizer
from WordNet_Lookup import WN_lookup
from tqdm import tqdm, trange
from copy import deepcopy
import warnings
warnings.filterwarnings('ignore')
class BERT:
def __init__(self, use_cuda=False):
self.use_cuda = use_cuda
self.tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
self.model = BertModel.from_pretrained('bert-large-uncased')
self.model.eval()
self.model.to(torch.device("cpu"))
class Word_Sense_Model:
def __init__(self, use_cuda=False):
self.use_cuda = use_cuda
self.sense_number_map = {'N': 1, 'V': 2, 'J': 3, 'R': 4}
self.Bert_Model = BERT(use_cuda)
self.lemmatizer = WordNetLemmatizer()
def open_xml_file(self, file_name):
tree = ET.parse(file_name)
root = tree.getroot()
return root, tree
def wngt_sent_sense_collect(self, xml_struct):
_sent = []
_sent1 = []
_senses = []
temp_list_pos = []
_back_sent = []
_back_sent1 = ""
_back_senses = []
for idx, j in enumerate(xml_struct.iter('word')):
_temp_dict = j.attrib
if 'lemma' in _temp_dict:
_word = _temp_dict['lemma'].lower()
else:
_word = _temp_dict['surface_form'].lower()
_back_sent.extend([_word])
_back_sent1 += _word + " "
if 'wn30_key' in _temp_dict:
_back_senses.extend([_temp_dict['wn30_key']] * len([_word]))
else:
_back_senses.extend([0] * len([_word]))
_temp_dict = xml_struct.attrib
if 'wn30_key' in _temp_dict:
_senses1 = _temp_dict['wn30_key'].split(';')
for i in _senses1:
_word = [str(i.split('%')[0]), 'is']
_temp_sent = []
_temp_sent1 = ""
_temp_senses = []
_temp_sent.extend(_word)
_temp_sent.extend(_back_sent)
_temp_sent1 += ' '.join(_word) + " " + _back_sent1
_temp_senses.extend([i, 0])
_temp_senses.extend(_back_senses)
_sent.append(_temp_sent)
_sent1.append(_temp_sent1)
_senses.append(_temp_senses)
return _sent, _sent1, _senses, temp_list_pos
def semcor_sent_sense_collect(self, xml_struct):
_sent = []
_sent1 = ""
_senses = []
temp_list_pos = []
for idx, j in enumerate(xml_struct.iter('word')):
_temp_dict = j.attrib
flag = 0
if 'lemma' not in _temp_dict:
words = _temp_dict['surface_form'].lower()
_sent1 += words + " "
words = words.split('_')
words1 = words[0:1]
words2 = words[1:]
else:
_pos = _temp_dict['pos'].lower()[0]
if _pos not in ['a', 'v', 'n']:
_pos = 'n'
w2 = _temp_dict['lemma'].lower().split('_')
words = _temp_dict['surface_form'].lower()
_sent1 += words + " "
words = words.split('_')
l = self.lemmatizer.lemmatize(words[0], pos=_pos)
if str(l).startswith(w2[0]) or str(w2[0]).startswith(l):
words1 = words[0:1]
words2 = words[1:]
else:
flag = 1
_sent.extend(words)
if 'wn30_key' in _temp_dict:
if not flag:
_senses.extend([_temp_dict['wn30_key']] * len(words1))
_senses.extend([0] * len(words2))
else:
_senses.extend([0] * len(words))
else:
_senses.extend([0] * len(words))
return _sent, _sent1, _senses, temp_list_pos
def semeval_sent_sense_collect(self, xml_struct):
_sent = []
_sent1 = ""
_senses = []
pos = []
for idx, j in enumerate(xml_struct.iter('word')):
_temp_dict = j.attrib
if 'lemma' in _temp_dict:
words = _temp_dict['lemma'].lower()
else:
words = _temp_dict['surface_form'].lower()
if '*' not in words:
_sent1 += words + " "
_sent.extend([words])
if 'pos' in _temp_dict:
pos.extend([_temp_dict['pos']] * len([words]))
else:
pos.extend([0] * len([words]))
if 'wn30_key' in _temp_dict:
_senses.extend([_temp_dict['wn30_key']] * len([words]))
else:
_senses.extend([0] * len([words]))
return _sent, _sent1, _senses, pos
def apply_bert_tokenizer(self, word):
return self.Bert_Model.tokenizer.tokenize(word)
def collect_bert_tokens(self, _sent, lemma=False):
_bert_tokens = ['[CLS]', ]
if lemma:
for idx, j in enumerate(_sent):
_sent[idx] = self.lemmatizer.lemmatize(_sent[idx])
_tokens = self.apply_bert_tokenizer(_sent[idx])
_bert_tokens.extend(_tokens)
else:
for idx, j in enumerate(_sent):
_tokens = self.apply_bert_tokenizer(_sent[idx])
_bert_tokens.extend(_tokens)
_bert_tokens.append('[SEP]')
return _bert_tokens
def get_bert_embeddings(self, tokens):
_ib = self.Bert_Model.tokenizer.convert_tokens_to_ids(tokens)
_st = [0] * len(_ib)
_t1, _t2 = torch.tensor([_ib]), torch.tensor([_st])
with torch.no_grad():
_encoded_layers, _ = self.Bert_Model.model(
_t1, _t2, output_all_encoded_layers=True)
_e1 = _encoded_layers[-4:]
_e2 = torch.cat((_e1[0], _e1[1], _e1[2], _e1[3]), 2)
if self.use_cuda:
_final_layer = _e2[0].cpu().numpy()
else:
_final_layer = _e2[0].numpy()
return _final_layer
def create_word_sense_maps(self, _word_sense_emb):
_sense_emb = {}
_sentence_maps = {}
_sense_word_map = {}
_word_sense_map = {}
for i in _word_sense_emb:
if i not in _word_sense_map:
_word_sense_map[i] = []
for j in _word_sense_emb[i]:
if j not in _sense_word_map:
_sense_word_map[j] = []
_sense_word_map[j].append(i)
_word_sense_map[i].append(j)
if j not in _sense_emb:
_sense_emb[j] = []
_sentence_maps[j] = []
_sense_emb[j].extend(_word_sense_emb[i][j]['embs'])
_sentence_maps[j].extend(_word_sense_emb[i][j]['sentences'])
return _sense_emb, _sentence_maps, _sense_word_map, _word_sense_map
def train(self, train_file, training_data_type):
print("Training Embeddings")
_word_sense_emb = {}
_train_root, _train_tree = self.open_xml_file(train_file)
for i in tqdm(_train_root.iter('sentence')):
if training_data_type == "SE":
all_sent, all_sent1, all_senses, _ = self.semeval_sent_sense_collect(
i)
all_sent, all_sent1, all_senses = [
all_sent], [all_sent1], [all_senses]
elif training_data_type == "SEM":
all_sent, all_sent1, all_senses, _ = self.semcor_sent_sense_collect(
i)
all_sent, all_sent1, all_senses = [
all_sent], [all_sent1], [all_senses]
elif training_data_type == "WNGT":
all_sent, all_sent1, all_senses, _ = self.wngt_sent_sense_collect(
i)
else:
print("Argument train_type not specified properly")
quit()
for sent, sent1, senses in zip(all_sent, all_sent1, all_senses):
try:
bert_tokens = self.collect_bert_tokens(sent)
final_layer = self.get_bert_embeddings(bert_tokens)
count = 1
for idx, j in enumerate(zip(senses, sent)):
sense = j[0]
word = j[1]
if sense != 0:
embedding = np.mean(
final_layer[count: count + len(self.apply_bert_tokenizer(word))], 0)
if word not in _word_sense_emb:
_word_sense_emb[word] = {}
for s in sense.split(';'):
if s not in _word_sense_emb[word]:
_word_sense_emb[word][s] = {}
_word_sense_emb[word][s]['embs'] = []
_word_sense_emb[word][s]['sentences'] = []
_word_sense_emb[word][s][
'embs'].append(embedding)
_word_sense_emb[word][s][
'sentences'].append(sent1)
count += len(self.apply_bert_tokenizer(word))
except Exception as e:
print(e)
return _word_sense_emb
def load_embeddings(self, pickle_file_name, train_file, training_data_type):
try:
with open(pickle_file_name, 'rb') as h:
_x = pickle.load(h)
print("EMBEDDINGS FOUND")
return _x
except:
print("Embedding File Not Found \n")
word_sense_emb = self.train(train_file, training_data_type)
with open(pickle_file_name, 'wb') as h:
pickle.dump(word_sense_emb, h)
print("Embeddings Saved to " + pickle_file_name)
return word_sense_emb
def test(self,
train_file,
test_file,
emb_pickle_file,
training_data_type,
save_to,
k=1,
use_euclidean=False,
reduced_search=True):
word_sense_emb = self.load_embeddings(
emb_pickle_file, train_file, training_data_type)
print("Testing")
sense_emb, sentence_maps, sense_word_map, word_sense_map = self.create_word_sense_maps(
word_sense_emb)
_test_root, _test_tree = self.open_xml_file(test_file)
_correct, _wrong = [], []
open(save_to, "w").close()
for i in tqdm(_test_root.iter('sentence')):
sent, sent1, senses, pos = self.semeval_sent_sense_collect(i)
print(senses, sent)
bert_tokens = self.collect_bert_tokens(sent)
final_layer = self.get_bert_embeddings(bert_tokens)
count, tag, nn_sentences = 1, [], []
for idx, j in enumerate(zip(senses, sent, pos)):
word = j[1]
pos_tag = j[2][0]
if j[0] != 0:
_temp_tag = 0
max_score = -99
nearest_sent = 'NONE'
embedding = np.mean(
final_layer[count:count + len(self.apply_bert_tokenizer(word))], 0)
min_span = 10000
if word in word_sense_map:
concat_senses = []
concat_sentences = []
index_maps = {}
_reduced_sense_map = []
if reduced_search:
for sense_id in word_sense_map[word]:
if self.sense_number_map[pos_tag] == int(sense_id.split('%')[1][0]):
_reduced_sense_map.append(sense_id)
if len(_reduced_sense_map) == 0:
_reduced_sense_map = list(word_sense_map[word])
for sense_id in _reduced_sense_map:
index_maps[sense_id] = {}
index_maps[sense_id]['start'] = len(concat_senses)
concat_senses.extend(sense_emb[sense_id])
concat_sentences.extend(sentence_maps[sense_id])
index_maps[sense_id]['end'] = len(
concat_senses) - 1
index_maps[sense_id]['count'] = 0
if min_span > (index_maps[sense_id]['end'] - index_maps[sense_id]['start'] + 1):
min_span = (index_maps[sense_id][
'end'] - index_maps[sense_id]['start'] + 1)
min_nearest = min(min_span, k)
concat_senses = np.array(concat_senses)
if use_euclidean:
simis = euclidean_distances(
embedding.reshape(1, -1), concat_senses)[0]
nearest_indexes = simis.argsort()[:min_nearest]
else:
simis = cosine_similarity(
embedding.reshape(1, -1), concat_senses)[0]
nearest_indexes = simis.argsort(
)[-min_nearest:][::-1]
for idx1 in nearest_indexes:
for sense_id in _reduced_sense_map:
if index_maps[sense_id]['start'] <= idx1 and index_maps[sense_id]['end'] >= idx1:
index_maps[sense_id]['count'] += 1
score = index_maps[sense_id]['count']
if score > max_score:
max_score = score
_temp_tag = sense_id
nearest_sent = concat_sentences[idx1]
tag.append(_temp_tag)
nn_sentences.append(nearest_sent)
print(_temp_tag)
count += len(self.apply_bert_tokenizer(word))
_counter = 0
for j in i.iter('word'):
temp_dict = j.attrib
try:
if 'wn30_key' in temp_dict:
if tag[_counter] == 0:
pass
else:
# lemma_key = tag[_counter].split(";")[0]
# wrd_def = WN_lookup(lemma_key)
# print(wrd_def)
j.attrib['WSD'] = str(tag[_counter])
if j.attrib['WSD'] in str(temp_dict['wn30_key']).split(';'):
_correct.append([temp_dict['wn30_key'], j.attrib[
'WSD'], (sent1), nn_sentences[_counter]])
else:
_wrong.append([temp_dict['wn30_key'], j.attrib[
'WSD'], (sent1), nn_sentences[_counter]])
_counter += 1
except Exception as e:
print(e)
with open(save_to, "w") as f:
_test_tree.write(f, encoding="unicode")
print("OUTPUT STORED TO FILE: " + str(save_to))
return _correct, _wrong
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='WSD using BERT')
parser.add_argument('--device', type=str,
default='cuda:2', help='GPU Device to Use?')
parser.add_argument('--train_corpus', type=str,
required=True, help='Training Corpus')
parser.add_argument('--train_type', type=str,
required=True, help='SEM/WNGT/SE')
parser.add_argument('--trained_pickle', type=str,
help='Pickle file of Trained Bert Embeddings/Save Embeddings to this file')
parser.add_argument('--test_corpus', type=str,
required=True, help='Testing Corpus')
parser.add_argument('--start_k', type=int, default=1,
help='Start value of Nearest Neighbour')
parser.add_argument('--end_k', type=int, default=1,
help='End value of Nearest Neighbour')
parser.add_argument('--save_xml_to', type=str,
help='Save the final output to?')
parser.add_argument('--use_euclidean', type=int, default=0,
help='Use Euclidean Distance to Find NNs?')
parser.add_argument('--reduced_search', type=int,
default=0, help='Apply Reduced POS Search?')
args = parser.parse_args()
print("Training Corpus is: " + args.train_corpus)
print("Testing Corpus is: " + args.test_corpus)
print("Nearest Neighbour start: " + str(args.start_k))
print("Nearest Neighbour end: " + str(args.end_k))
if args.reduced_search:
print("Using Reduced POS Search")
else:
print("Using the Search without POS")
if args.use_euclidean:
print("Using Euclidean Distance")
else:
print("Using Cosine Similarity")
print("Loading WSD Model")
WSD = Word_Sense_Model(False)
print("Loaded WSD Model")
for nn in range(args.start_k, args.end_k + 1):
correct, wrong = WSD.test(train_file=args.train_corpus,
test_file=args.test_corpus,
training_data_type=args.train_type,
emb_pickle_file=args.trained_pickle,
save_to=args.save_xml_to[
:-4] + "_" + str(nn) + args.save_xml_to[-4:],
k=nn,
use_euclidean=args.use_euclidean,
reduced_search=args.reduced_search)
print(float(len(correct)) / (len(wrong) + len(correct)))
|
[
"saigurrapu18@gmail.com"
] |
saigurrapu18@gmail.com
|
0b3ffa2472130f693f78b74241bc9622c723b9ae
|
bfd1401812b40d8d03f3244197602ce881ba8d18
|
/disquaire_project/store/migrations/0001_initial.py
|
e1370a5821780da13df47c088023f0c14b7fb15d
|
[] |
no_license
|
zohraDev/Web_site_Discaire
|
9ead99c93f236a13a61742b89288a0e621afa2a5
|
ff77855790cd68dbffac62a469fa4e7ee5bba12d
|
refs/heads/master
| 2023-04-21T05:41:08.560462
| 2021-05-04T22:03:09
| 2021-05-04T22:03:09
| 362,398,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
# Generated by Django 3.2 on 2021-04-30 10:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference', models.IntegerField(null=True)),
('created_at', models.DateField(auto_now_add=True)),
('available', models.BooleanField(default=True)),
('title', models.CharField(max_length=200)),
('picture', models.URLField()),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=100)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Booking',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateField(auto_now_add=True)),
('contacted', models.BooleanField(default=True)),
('album', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='store.album')),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.contact')),
],
),
migrations.AddField(
model_name='album',
name='artists',
field=models.ManyToManyField(blank=True, related_name='albums', to='store.Artist'),
),
]
|
[
"zohra.bachi@edu.itescia.fr"
] |
zohra.bachi@edu.itescia.fr
|
ec284b07ef46968568cf8bc5f0c5444b6a483e9c
|
c3e93cba8c95d47c08e04a7d40478a894bb34c75
|
/point_pick_test.py
|
a14f509f5c7c32c71fff2dcaba5c7f0ddbd263de
|
[] |
no_license
|
aransena/home_ws
|
2cef097f2961a1519b078346383b53f9fe67df21
|
b240d257e8b65ba88acc5ad28a017b7f4b9f8ff4
|
refs/heads/master
| 2021-01-12T12:12:54.897028
| 2019-10-11T14:46:36
| 2019-10-11T14:46:36
| 72,365,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
import matplotlib.pyplot as plt
fig = plt.figure()
plt.axis([0, 1, 0, 1])
ax = fig.add_subplot(111)
#ax.plot(np.random.rand(10))
def onclick(event):
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
(event.button, event.x, event.y, event.xdata, event.ydata))
plt.scatter(event.xdata, event.ydata,c='r',marker='o')
plt.draw()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
|
[
"Aran Sena"
] |
Aran Sena
|
44fb15e133cabafabb0c6d5f000523ccf9ea8efc
|
c7c37bf0fac9b0fa35def68c3e8e6a6dde26e586
|
/accounts/urls.py
|
3a281c92ba2957f271d598c22e872810fab5ca4c
|
[] |
no_license
|
prakashjha18/Django-app
|
281eb9f490f9ee8db6f0cde38a0eda58f826c179
|
16a7fb015f389031d5cd676a99f98f3df0e2b12b
|
refs/heads/master
| 2020-09-16T14:14:34.130277
| 2019-11-26T10:00:51
| 2019-11-26T10:00:51
| 223,795,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path("register",views.register, name="register"),
path("login",views.login,name="login"),
path("logout",views.logout,name="logout")
]
|
[
"prkjha25@gmail.com"
] |
prkjha25@gmail.com
|
77bce1362f9ddb1605900f9b2a128548e9573777
|
6090a53bf400d979e221f4751e9f77a053da1dba
|
/fuzzy.random.py
|
ffa39e2e8b5142563f679652d54856c893b52f81
|
[
"MIT"
] |
permissive
|
ronniechong/python-fuzzyrandom
|
2db73f6b2e248ba674b63fe29d6a99d8339d6e87
|
2dce5e37bf1efbca171d76f7cde701bb23c38537
|
refs/heads/master
| 2020-05-30T13:06:06.651374
| 2014-06-14T13:40:18
| 2014-06-14T13:40:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
class Shape(object):
locX = 0
locY = 0
rgba = [0,0,0,1]
radius = 1
def __init__(self, locX, locY,rgba,radius):
self.locX = locX
self.locY = locY
self.rgba = rgba
self.radius = radius
def fnGetShapes():
shape = Shape(10,10,[0,0,0,1],1)
if __name__ == "__main__":
fnGetShapes()
|
[
"ronnie@otakuness.com"
] |
ronnie@otakuness.com
|
fc55ca8cdf98282b6938cf1baeadca9c0a489bc6
|
cc213aac5f386f9384a098913b24e035a5d989d9
|
/customer/urls.py
|
c9f4311675e69c697f6a0a4a25786e1964e23927
|
[
"MIT"
] |
permissive
|
arunkgupta/e-commerce-with-django
|
0d92dff9d3d2d02f379b073a5d756a6893868db3
|
f3ed08f6a1e2de1d787c10aad08a3e13ec25d489
|
refs/heads/master
| 2022-10-27T13:33:55.047909
| 2020-06-07T15:29:43
| 2020-06-07T15:29:43
| 270,604,127
| 1
| 0
|
MIT
| 2020-06-08T09:15:52
| 2020-06-08T09:15:51
| null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path("",views.home_page,name="home_page"),
path("shirts",views.shirts_page,name="Shirt"),
path("sports-wear",views.sports_wear_page,name="Sport Wear"),
path("outwear",views.outwear_page,name="Outwear"),
path("checkout/",views.checkout,name="checkout"),
path("product-detail/<int:id>/",views.product_detail,name="product_detail"),
path("cart/",views.cart,name="cart"),
path("update_item/",views.update_item,name="update_item"),
path("sign-in/",views.sign_in,name="sign_in"),
path("sign-out/",views.signOut,name="sign_out"),
path("register/",views.register,name="register"),
]
|
[
"49620321+UsamaKashif@users.noreply.github.com"
] |
49620321+UsamaKashif@users.noreply.github.com
|
e6be3362e3ca61dd08ffe293157ca23f84ef44ef
|
43b91b923df83cbae79c17fdb56217ffecb4ec61
|
/models/lfesm.py
|
98f2989c181a744a5645ebf6fd4cd723705d8951
|
[
"Apache-2.0"
] |
permissive
|
Thesharing/LFESM
|
c6248552688f71a7cec636a50f7dd72566c4352c
|
e956ed76f5a85259000742db093726d4b4c51751
|
refs/heads/master
| 2020-12-24T04:04:15.257184
| 2020-02-17T19:32:11
| 2020-02-17T19:32:11
| 237,376,207
| 12
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,286
|
py
|
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertModel
from .esim.layers import Seq2SeqEncoder, SoftmaxAttention
from .esim.utils import replace_masked
class LFESM(BertPreTrainedModel):
"""
Legal Feature Enhanced Semantic Matching Network for Similar Case Matching.
"""
def __init__(self, config):
super(LFESM, self).__init__(config)
self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.seq_relationship = nn.Linear(config.hidden_size, 2)
self.init_weights()
# dropout = 0.5
# self._rnn_dropout = RNNDropout(p=dropout)
feature_size = 28
self._feature = nn.Linear(feature_size, config.hidden_size)
self._attention = SoftmaxAttention()
self._projection = nn.Sequential(nn.Linear(4 * config.hidden_size, config.hidden_size),
nn.ReLU())
self._composition = Seq2SeqEncoder(nn.LSTM,
config.hidden_size,
config.hidden_size,
bidirectional=True)
self._classification = nn.Sequential(nn.Dropout(p=config.hidden_dropout_prob), # p=dropout
nn.Linear(4 * 2 * config.hidden_size, config.hidden_size),
nn.Tanh(),
nn.Dropout(p=config.hidden_dropout_prob), # p=dropout
nn.Linear(config.hidden_size, 2))
self.apply(self.init_esim_weights)
def forward(self, a, b, c, labels=None, mode="prob"):
# the parameter is: input_ids, attention_mask, token_type_ids
# which is corresponding to input_ids, input_mask and segment_ids in InputFeatures
a_output = self.bert(*a[:3])[0]
b_output = self.bert(*b[:3])[0]
c_output = self.bert(*c[:3])[0]
# The return value: sequence_output, pooled_output, (hidden_states), (attentions)
a_feature = self._feature(a[3].unsqueeze(1))
b_feature = self._feature(b[3].unsqueeze(1))
c_feature = self._feature(c[3].unsqueeze(1))
a_extend = torch.cat([a_feature, a_output], dim=1)
b_extend = torch.cat([b_feature, b_output], dim=1)
c_extend = torch.cat([c_feature, c_output], dim=1)
a_mask = torch.cat([a[4], a[1]], dim=1).float()
b_mask = torch.cat([b[4], b[1]], dim=1).float()
c_mask = torch.cat([c[4], c[1]], dim=1).float()
v_ab = self.siamese(a_extend, b_extend, a_mask, b_mask)
v_ac = self.siamese(a_extend, c_extend, a_mask, c_mask)
subtraction = v_ab - v_ac
# TODO: Try three solutions
# Solution 1: v_ab - v_ac
# Solution 2: cat(v_ab, v_ac)
# Solution 3: margin - sim_a + sim_b
output = self._classification(subtraction)
if mode == "prob":
prob = torch.nn.functional.softmax(Variable(output), dim=1)
return prob
elif mode == "logits":
return output
elif mode == "loss":
loss_fct = CrossEntropyLoss()
loss = loss_fct(output.view(-1, 2), labels.view(-1))
return loss
elif mode == "evaluate":
prob = torch.nn.functional.softmax(Variable(output), dim=1)
loss_fct = CrossEntropyLoss()
loss = loss_fct(output.view(-1, 2), labels.view(-1))
return output, prob, loss
def siamese(self, a_output, b_output, a_mask, b_mask):
a_length = a_mask.sum(dim=-1).long()
b_length = b_mask.sum(dim=-1).long()
attended_a, attended_b = self._attention(a_output, a_mask, b_output, b_mask)
enhanced_a = torch.cat([a_output,
attended_a,
a_output - attended_a,
a_output * attended_a],
dim=-1)
enhanced_b = torch.cat([b_output,
attended_b,
b_output - attended_b,
b_output * attended_b],
dim=-1)
projected_a = self._projection(enhanced_a)
projected_b = self._projection(enhanced_b)
# TODO: Add RNN Dropout
# projected_a = self._rnn_dropout(projected_a)
# projected_b = self._rnn_dropout(projected_b)
v_ai = self._composition(projected_a, a_length)
v_bj = self._composition(projected_b, b_length)
v_a_avg = torch.sum(v_ai * a_mask.unsqueeze(1)
.transpose(2, 1), dim=1) / torch.sum(a_mask, dim=1, keepdim=True)
v_b_avg = torch.sum(v_bj * b_mask.unsqueeze(1)
.transpose(2, 1), dim=1) / torch.sum(b_mask, dim=1, keepdim=True)
v_a_max, _ = replace_masked(v_ai, a_mask, -1e7).max(dim=1)
v_b_max, _ = replace_masked(v_bj, b_mask, -1e7).max(dim=1)
v = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1)
return v
@staticmethod
def init_esim_weights(module):
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight.data)
nn.init.constant_(module.bias.data, 0.0)
elif isinstance(module, nn.LSTM):
nn.init.xavier_uniform_(module.weight_ih_l0.data)
nn.init.orthogonal_(module.weight_hh_l0.data)
nn.init.constant_(module.bias_ih_l0.data, 0.0)
nn.init.constant_(module.bias_hh_l0.data, 0.0)
hidden_size = module.bias_hh_l0.data.shape[0] // 4
module.bias_hh_l0.data[hidden_size:(2 * hidden_size)] = 1.0
if module.bidirectional:
nn.init.xavier_uniform_(module.weight_ih_l0_reverse.data)
nn.init.orthogonal_(module.weight_hh_l0_reverse.data)
nn.init.constant_(module.bias_ih_l0_reverse.data, 0.0)
nn.init.constant_(module.bias_hh_l0_reverse.data, 0.0)
module.bias_hh_l0_reverse.data[hidden_size:(2 * hidden_size)] = 1.0
|
[
"cyprestar@outlook.com"
] |
cyprestar@outlook.com
|
21e15bdd3e1ab4966d5e747d337391f72e4f311a
|
c6a2e58311b2d81429a1c5daadfd77891527cc35
|
/easypr_ng/migrations/0001_initial.py
|
a44403dbc328a8f1427e1757933293eae6570303
|
[] |
no_license
|
EzechukwuJI/easypr
|
5ab1115d84aa843cf9684af1184626cb42f7d8e0
|
32fab5e1236d3098c7b5ea884c243def1075ad96
|
refs/heads/master
| 2020-05-21T22:38:07.872764
| 2016-12-23T10:46:41
| 2016-12-23T10:46:41
| 65,491,758
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,577
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blogs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('blog_url', models.URLField()),
('name_slug', models.CharField(max_length=275)),
('category', models.CharField(max_length=125, choices=[(b'top-blogs', b'Top Blogs'), (b'technology', b'Technology')])),
('price', models.FloatField(default=0.0)),
('active', models.BooleanField(default=True)),
],
options={
'verbose_name_plural': 'Blogs',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_posted', models.DateTimeField(auto_now_add=True)),
('comment', models.TextField(max_length=1000)),
('website', models.CharField(max_length=150, null=True, blank=True)),
],
),
migrations.CreateModel(
name='CommentReply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_posted', models.DateTimeField(auto_now_add=True)),
('reply', models.TextField(max_length=1000)),
('comment', models.ForeignKey(to='easypr_ng.Comment')),
('posted_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InterviewRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ticket_number', models.CharField(max_length=14)),
('date_requested', models.DateTimeField(auto_now_add=True)),
('date_closed', models.DateTimeField(null=True, blank=True)),
('status', models.CharField(default=b'new', max_length=25, choices=[(b'new', b'new'), (b'contacted', b'contacted'), (b'in_progress', b'In progress'), (b'closed', b'closed')])),
('request_outcome', models.CharField(default=b'pending', max_length=25, choices=[(b'pending', b'pending'), (b'success', b'success'), (b'declined', b'declined'), (b'deferred', b'deferred'), (b'dropped', b'dropped')])),
('contact_person', models.CharField(max_length=125)),
('contact_email', models.EmailField(max_length=255)),
('phone_number', models.CharField(max_length=15)),
('preferred_interview_date', models.DateTimeField()),
('interview_venue', models.TextField(max_length=300, null=True)),
('interview_date', models.DateTimeField()),
('interview_time', models.DateTimeField()),
('person_to_be_interviewed', models.CharField(max_length=125)),
('closed_by', models.OneToOneField(related_name='interview_closed_by', null=True, blank=True, to=settings.AUTH_USER_MODEL)),
('contacted_by', models.OneToOneField(related_name='interview_contacted_by', null=True, blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date_requested',),
'verbose_name_plural': 'Interview request',
},
),
migrations.CreateModel(
name='MediaContact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=125)),
('last_name', models.CharField(max_length=125)),
('date_added', models.DateTimeField(auto_now_add=True)),
('email', models.CharField(max_length=225)),
('phone_number', models.CharField(max_length=15, null=True, blank=True)),
],
),
migrations.CreateModel(
name='MediaHouse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('name_slug', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='MediaPlatform',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=60)),
('name_slug', models.CharField(max_length=75)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Package',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=75, choices=[(b'basic', b'Basic'), (b'regular', b'Regular'), (b'premium', b'Premium'), (b'premium-plus', b'Premium Plus')])),
('media_outreach_credit', models.CharField(default=1, max_length=25)),
('online', models.CharField(max_length=5, verbose_name=b'online_newspaper_publishing', choices=[(b'yes', b'yes'), (b'no', b'no')])),
('monitoring', models.CharField(max_length=5, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('free_consulting', models.CharField(max_length=5, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('newsroom', models.CharField(max_length=5, verbose_name=b'Newsroom via EasyPR Media Desk', choices=[(b'yes', b'yes'), (b'no', b'no')])),
('google_news_inclusions', models.CharField(max_length=5, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('reuters_news_network', models.CharField(max_length=5, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('hyperlinks', models.CharField(max_length=5, verbose_name=b'hyperlinks in online press release')),
('notification', models.CharField(max_length=5, verbose_name=b'publication notification via email', choices=[(b'yes', b'yes'), (b'no', b'no')])),
('autopost', models.CharField(max_length=5, verbose_name=b'autopost to social media account', choices=[(b'yes', b'yes'), (b'no', b'no')])),
('analytics', models.CharField(max_length=5, verbose_name=b'detailed analytics report', choices=[(b'yes', b'yes'), (b'no', b'no')])),
('expedited', models.CharField(max_length=5, verbose_name=b'expedited release processing', choices=[(b'yes', b'yes'), (b'no', b'no')])),
('available_on_homepage', models.CharField(max_length=5, verbose_name=b'news made available to journalists, bloggers and researchers via EasyPR homepage', choices=[(b'yes', b'yes'), (b'no', b'no')])),
('content_writing', models.CharField(max_length=5, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('content_editing', models.CharField(max_length=5, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('featured_package', models.CharField(max_length=5, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('price_naira', models.FloatField(default=0.0, max_length=25)),
('price_dollar', models.FloatField(default=0.0, max_length=25)),
('active', models.BooleanField(default=False)),
('is_promo', models.BooleanField(default=False)),
('promo_starts', models.DateTimeField(auto_now_add=True)),
('promo_ends', models.DateTimeField(auto_now_add=True)),
('promo_price_dollar', models.FloatField(default=0.0, max_length=25)),
('promo_price_naira', models.FloatField(default=0.0, max_length=25)),
],
options={
'verbose_name_plural': 'Packages',
},
),
migrations.CreateModel(
name='PayDetails',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=25, null=True)),
('payment_method', models.CharField(default=b'', max_length=75, choices=[(b'Bank Deposit', b'Bank Deposit'), (b'Card Payment', b'Card Payment'), (b'Bank Transfer', b'Bank Transfer')])),
('amount_paid', models.FloatField(default=0.0)),
('date_paid', models.CharField(max_length=100, null=True, blank=True)),
('bank_name', models.CharField(blank=True, max_length=100, null=True, choices=[(b'Diamond Bank', b'Diamond Bank'), (b'GTB', b'GTB')])),
('currency', models.CharField(max_length=100, null=True, blank=True)),
('teller_number', models.CharField(max_length=15, null=True, blank=True)),
('pay_status', models.CharField(default=b'pending', max_length=25, choices=[(b'verified', b'verified'), (b'pending', b'pending'), (b'failed', b'failed')])),
('verified_by', models.BooleanField(default=False)),
('date_verified', models.DateTimeField(null=True, blank=True)),
('user', models.ForeignKey(verbose_name=b'Payment By', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PressMaterial',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name_slug', models.CharField(max_length=150)),
('media_type', models.CharField(max_length=150)),
('price_per', models.FloatField(default=0.0)),
('date_added', models.DateTimeField(auto_now_add=True)),
('caption', models.CharField(max_length=125)),
],
),
migrations.CreateModel(
name='PRStrategy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('anon_userID', models.CharField(max_length=75, verbose_name=b'Annonymous user ID')),
('business_type', models.CharField(default=b'Company', max_length=25, choices=[(b'NA', b'NA'), (b'Company', b'Company'), (b'Individual', b'Individual')])),
('company_type', models.CharField(default=b'Private', max_length=75, choices=[(b'NA', b'NA'), (b'Public', b'Public'), (b'Private', b'Private')])),
('is_pr_agent', models.CharField(default=b'No', max_length=75, choices=[(b'Yes', b'Yes'), (b'No', b'No')])),
('size_of_pr_team', models.IntegerField(default=0)),
('target_audience', models.TextField(max_length=1000, null=True)),
('pr_goals', models.TextField(max_length=1000, null=True)),
('frequency_of_pr', models.CharField(default=b'monthly', max_length=100, choices=[(b'NA', b'NA'), (b'weekly', b'Weekly'), (b'monthly', b'Monthly'), (b'several-times-a-month', b'Several Times a Month'), (b'quartely', b'Quartely'), (b'annually', b'Annually'), (b'first-time-user', b'First Time User')])),
('target_audience_location', models.CharField(max_length=250, null=True)),
('currently_use_pr_db', models.BooleanField(default=False)),
('social_media_used', models.TextField(max_length=1000, null=True)),
('pr_db_used', models.TextField(max_length=1000, null=True)),
('require_pr_writing', models.BooleanField(default=False)),
('require_media_pitching', models.BooleanField(default=False)),
('do_you_have_newsroom', models.BooleanField(default=False)),
('name_pr_newsroom_link', models.CharField(max_length=200)),
('date_submitted', models.DateTimeField(auto_now_add=True)),
('action_status', models.CharField(default=b'Contacted', max_length=75, choices=[(b'new', b'new'), (b'contacted', b'contacted'), (b'in_progress', b'In progress'), (b'closed', b'closed')])),
('company_name', models.CharField(max_length=200, null=True)),
('contact_name', models.CharField(max_length=125, null=True)),
('email', models.CharField(max_length=125, null=True)),
('phone_number', models.CharField(max_length=25, null=True)),
('completed', models.BooleanField(default=False)),
],
options={
'ordering': ['date_submitted'],
'verbose_name_plural': 'PR Strategy',
},
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=15)),
('post_title', models.CharField(max_length=175)),
('title_slug', models.CharField(max_length=200)),
('status', models.CharField(default=b'New', max_length=50, choices=[(b'New', b'New'), (b'sent_to_external_editor', b'sent_to_external_editor'), (b'Processing', b'Processing'), (b'Published', b'Published'), (b'Rejected', b'Rejected')])),
('post_body', models.TextField(max_length=3000, null=True, blank=True)),
('person_to_quote', models.CharField(max_length=125, null=True, blank=True)),
('persons_position', models.CharField(max_length=125, null=True, blank=True)),
('uploaded_text', models.FileField(null=True, upload_to=b'uploads/publication/%Y-%M-%D', blank=True)),
('date_published', models.DateTimeField(null=True, blank=True)),
('date_posted', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('publish_online', models.BooleanField(default=False, verbose_name=b'Do you also want online publication of the chosen media? ')),
('completed', models.BooleanField(default=False)),
('assigned_to', models.ForeignKey(related_name='Third_party_Editor', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('media_houses', models.ManyToManyField(to='easypr_ng.MediaHouse')),
('platform', models.ForeignKey(verbose_name=b'Media platform', blank=True, to='easypr_ng.MediaPlatform', null=True)),
('posted_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('press_material', models.ForeignKey(verbose_name=b'Media category', blank=True, to='easypr_ng.PressMaterial', null=True)),
('published_by', models.ForeignKey(related_name='Edited_and_published_by', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='PublicationImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(null=True, upload_to=b'uploads/images/publication', blank=True)),
('caption', models.CharField(max_length=200, null=True, blank=True)),
('post', models.ForeignKey(blank=True, to='easypr_ng.Publication', null=True)),
],
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=15)),
('deleted', models.BooleanField(default=False)),
('ordered', models.BooleanField(default=False)),
('status', models.CharField(default=b'New', max_length=75, choices=[(b'New', b'New'), (b'Processing', b'Processing'), (b'Pending', b'Pending'), (b'Rejected', b'Rejected'), (b'Closed', b'Closed')])),
('date_purchased', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Redirect_url',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(default=None, max_length=200, null=True, blank=True)),
('post', models.ForeignKey(blank=True, to='easypr_ng.Publication', null=True)),
],
),
migrations.CreateModel(
name='RequestImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(null=True, upload_to=b'uploads/images/request', blank=True)),
('caption', models.CharField(max_length=200, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Sector',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=60)),
('name_slug', models.CharField(max_length=75)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ServiceRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ticket_number', models.CharField(max_length=14)),
('date_requested', models.DateTimeField(auto_now_add=True)),
('date_closed', models.DateTimeField(null=True, blank=True)),
('status', models.CharField(default=b'new', max_length=25, choices=[(b'new', b'new'), (b'contacted', b'contacted'), (b'in_progress', b'In progress'), (b'closed', b'closed')])),
('request_outcome', models.CharField(default=b'pending', max_length=25, choices=[(b'pending', b'pending'), (b'success', b'success'), (b'declined', b'declined'), (b'deferred', b'deferred'), (b'dropped', b'dropped')])),
('contact_person', models.CharField(max_length=125)),
('contact_email', models.EmailField(max_length=255)),
('phone_number', models.CharField(max_length=15)),
('service_type', models.CharField(max_length=100)),
('sector', models.CharField(max_length=100, choices=[(b'Finance', b'Finance')])),
('brief_description', models.TextField(max_length=500, null=True, blank=True)),
('target_media', models.CharField(max_length=125, null=True, choices=[(b'Newspaper', b'Newspaper'), (b'Blog', b'Blog')])),
('time_service_needed', models.CharField(max_length=75, null=True)),
('preferred_call_time', models.CharField(max_length=50, null=True)),
('allow_call', models.BooleanField(default=False)),
('name_of_event', models.CharField(default=b'Not Applicable', max_length=100)),
('event_date', models.DateTimeField(null=True)),
('event_time', models.CharField(max_length=10, null=True)),
('event_venue', models.CharField(max_length=225, null=True)),
('total_price', models.FloatField(default=0.0)),
('post_content', models.TextField(max_length=3000)),
('uploaded_post_content', models.FileField(help_text=b'for blogger distibution submission', upload_to=b'uploads/images/blogger_distribution/contents/')),
('page_size', models.CharField(max_length=125, null=True, blank=True)),
('page_color', models.CharField(blank=True, max_length=125, null=True, choices=[(b'black and white', b'black and white'), (b'color', b'color')])),
('media_house', models.CharField(max_length=125, null=True)),
('region', models.CharField(max_length=125, null=True)),
('adv_duration', models.CharField(max_length=125, null=True)),
('adv_service_type', models.CharField(max_length=125, null=True)),
('audio_file', models.FileField(help_text=b'for Radio advert submission', null=True, upload_to=b'uploads/audio/')),
('video_file', models.FileField(help_text=b'for TV advert submission', null=True, upload_to=b'uploads/video/')),
('advert_image_file', models.FileField(help_text=b'for newpaper advert submission', null=True, upload_to=b'uploads/newspaper/advert')),
('adv_instructions', models.TextField(max_length=250, null=True)),
('allow_content_editing', models.BooleanField(default=False, help_text=b'for newpaper advert submission')),
('blog_list', models.ManyToManyField(to='easypr_ng.Blogs')),
('closed_by', models.OneToOneField(related_name='closed_by', null=True, blank=True, to=settings.AUTH_USER_MODEL)),
('contacted_by', models.OneToOneField(related_name='contacted_by', null=True, blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date_requested',),
'verbose_name_plural': 'Service request',
},
),
migrations.CreateModel(
name='Testimonial',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('comment', models.CharField(max_length=165)),
('date_posted', models.DateTimeField(auto_now_add=True)),
('posted_by', models.CharField(max_length=150)),
('persons_position', models.CharField(max_length=75)),
('persons_company', models.CharField(max_length=125)),
('persons_image', models.FileField(upload_to=b'uploads/testimonial/images')),
],
),
migrations.CreateModel(
name='PurchaseInvoice',
fields=[
('purchase_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='easypr_ng.Purchase')),
('receipt_no', models.CharField(max_length=12)),
('invoice', models.FileField(null=True, upload_to=b'Invoices/%Y-%M-%D', blank=True)),
],
bases=('easypr_ng.purchase',),
),
migrations.AddField(
model_name='requestimage',
name='request',
field=models.ForeignKey(blank=True, to='easypr_ng.ServiceRequest', null=True),
),
migrations.AddField(
model_name='purchase',
name='package',
field=models.ForeignKey(to='easypr_ng.Package'),
),
migrations.AddField(
model_name='purchase',
name='payment_details',
field=models.ForeignKey(default=None, verbose_name=b'Payment details', to='easypr_ng.PayDetails'),
),
migrations.AddField(
model_name='purchase',
name='publication',
field=models.OneToOneField(to='easypr_ng.Publication'),
),
migrations.AddField(
model_name='purchase',
name='user',
field=models.ForeignKey(verbose_name=b'Purchased By', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='publication',
name='sector',
field=models.ForeignKey(verbose_name=b'Media sector', blank=True, to='easypr_ng.Sector', null=True),
),
migrations.AddField(
model_name='publication',
name='site',
field=models.ManyToManyField(to='sites.Site'),
),
migrations.AddField(
model_name='package',
name='category',
field=models.ForeignKey(to='easypr_ng.PressMaterial'),
),
migrations.AddField(
model_name='mediahouse',
name='platform',
field=models.ManyToManyField(to='easypr_ng.MediaPlatform'),
),
migrations.AddField(
model_name='mediacontact',
name='media_house',
field=models.ForeignKey(to='easypr_ng.MediaHouse'),
),
migrations.AddField(
model_name='interviewrequest',
name='preferred_media_house',
field=models.ManyToManyField(to='easypr_ng.MediaHouse'),
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(to='easypr_ng.Publication'),
),
migrations.AddField(
model_name='comment',
name='posted_by',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
|
[
"ezechukwuji@gmail.com"
] |
ezechukwuji@gmail.com
|
628248bbe6a9431ccf0dcc7c8c86b2be54c57be1
|
4b57d0707a5a9336749cdeab89289d9359b90ade
|
/main/routes.py
|
6ed5b1a1e8ea2cb56964044f1ec498307dd77e4f
|
[] |
no_license
|
rahulsingh03/flask_blog
|
b7a761a6ce650d54e3a99475b3a9b46069a16e54
|
5686a9d3cc328ad39193df425b323a87b5671e37
|
refs/heads/main
| 2023-03-11T12:55:31.741863
| 2021-03-06T17:33:56
| 2021-03-06T17:33:56
| 343,853,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from flask import render_template,request,Blueprint
from flask_blog.models import Post
main = Blueprint('main',__name__)
@main.route("/")
@main.route("/home")
def home():
page = request.args.get('page',1,type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)
return render_template('home.html',posts = posts)
@main.route("/about")
def about():
return render_template('about.html',title='About')
|
[
"rahulsingh03"
] |
rahulsingh03
|
7084db07473bea0106fc62aa37080ec18e0b155d
|
f3baf8b850c896231b4c254a22567fd5d7a5035c
|
/Aula 5/Aula5.py
|
9a5f8d8ffe7262bf95b7a16cc7342e14b5900b56
|
[
"MIT"
] |
permissive
|
Katakhan/TrabalhosPython2
|
e1c23119ef582038ceea0004c872c00778fd326e
|
ab47af0ff3c00922857578e58a1a149d9e65e229
|
refs/heads/master
| 2020-09-21T02:15:04.505791
| 2020-03-19T13:23:41
| 2020-03-19T13:23:41
| 224,650,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
# crie um programa que leia 4 notas
#imprima a maior
# imprima a menor
#imprima a media
#imprima se o aluno foi aprovado ou reprovado (Média 7 )
n1 = float(input('Insira sua 1 Nota'))
n2 = float(input('Insira sua 2 Nota'))
n3 = float(input('Insira sua 3 Nota'))
n4 = float(input('Insira sua 4 Nota'))
if n1 > n2 and n1>n3 and n1>n4:
print(f'A maior nota é: {n1}')
elif n2 > n1 and n2>n3 and n2>n4:
print(f'A maior nota é :{n2}')
elif n3 > n1 and n3>n2 and n3>n4:
print(f'A maior nota é :{n3}')
else:
print(f'a maior nota é: {n4}')
if n1>n2:
print(f'A menor nota é: {n2}')
elif n2 > n3:
print(f'A menor nota é :{n3}')
elif n3 > n4 :
print(f'A menor nota é : {n4}')
else:
print(f'a menor nota é: {n1}')
media = (n1+n2+n3+n4)/4
print(f'A média é: {media}')
if media >= 7:
print('aluno aprovado')
else:
print('aluno reprovado')
|
[
"antoniorafaelgastaldi@hotmail.com"
] |
antoniorafaelgastaldi@hotmail.com
|
e6b31beaef8c8f88ec504ad4710f02fb3f5d4da8
|
03e910208d4a1709f976ca985769a1179ca58b36
|
/prueba2.py
|
06114c0d88a35b13e10d5d2b0a2a7e7f476d5751
|
[] |
no_license
|
Aleredfer/kata1_morse
|
2a8aef9cd2c65d8e87521f1f18f76cd2ca5cc0f0
|
a26e029a2106e627aee77f37c26fbc1f275728c9
|
refs/heads/master
| 2020-06-09T23:29:54.351569
| 2019-07-03T16:54:31
| 2019-07-03T16:54:31
| 193,527,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
import morse
from docx import Document
mensaje = input("dime algo: ")
telegrama = morse.toMorse(mensaje)
print(telegrama)
original = morse.toPlain(telegrama)
print(original)
|
[
"aleredfer@hotmail.com"
] |
aleredfer@hotmail.com
|
fff21f3b86b8f64b2a98ff4439c32671983f23a0
|
97ba9d460d60617f43649699e6f58bc27053786e
|
/bankapp/MainClass.py
|
54051541cfaac60e3ea9b8d951e3c67ba852b0bb
|
[] |
no_license
|
prasad417/PythonDataScience
|
259db1c022d030c20cfbbb6e8f6592e7cec9f0a3
|
6f4b28c5b3530fa783fe8efc652f850f43d11abb
|
refs/heads/master
| 2020-06-12T22:39:21.508630
| 2019-07-25T03:20:36
| 2019-07-25T03:20:36
| 194,450,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
from bankapp.bank.Bank import Bank
from bankapp.address.Address import Address
from bankapp.bankaccount.CertificateOfDeposit import CertificateOfDeposit
from bankapp.bankaccount.CheckingAccount import CheckingAccount
from bankapp.bankaccount.SavingsAccount import SavingsAccount
from bankapp.bankcustomer.Customer import Customer
from bankapp.bankcustomer.Identification import Identification
from bankapp.bankcustomer.PersonalInformation import PersonalInformation
import json
class MainClass:
bank = Bank()
bank.get_bank_details()
personal_info = PersonalInformation('Ryan', '', 'Beck', '11/28/1986', 'email@email.com', '1234567890', 'Male')
address = Address('4101 W 98th', '101', 'Minneapolis', 'MN', '55437')
identification = Identification('MN1234567890', '11/22', '123456789')
customer = Customer(personal_info, address, identification)
ca = CheckingAccount()
customer.open_bank_account(ca)
sa = SavingsAccount()
customer.open_bank_account(sa)
customer1 = Customer(personal_info, address, identification)
ca = CheckingAccount()
customer1.open_bank_account(ca)
sa = SavingsAccount()
customer1.open_bank_account(sa)
cd = CertificateOfDeposit()
customer.open_bank_account(cd)
print('Test')
|
[
"417.prasad@gmail.com"
] |
417.prasad@gmail.com
|
491a75c45c5d9ecbdff33f07e22e1616e566865c
|
f395dd29a02de72ee5d07a2bd50fc6548d427838
|
/02-Welcome-to-Python/the-print-function-III-parameters-and-arguments.py
|
c78ab769e71a0fd9f4a5af86d5452f12881f1dd7
|
[] |
no_license
|
susanahidalgo/python_class
|
f629b112d7a3c90dfa9cea2e37be17a0c44c6b4f
|
34a8108e3515e00db1213e9126ada7c9754ba25a
|
refs/heads/main
| 2023-03-02T23:23:36.404238
| 2021-02-03T11:18:08
| 2021-02-03T11:18:08
| 315,392,344
| 0
| 0
| null | 2021-02-03T11:18:10
| 2020-11-23T17:35:42
|
Python
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
print("ABC", "DEF", "GHI")
print("ABC", "DEF", "GHI", sep="!")
print("ABC", "DEF", "GHI", sep="__*__")
print("ABC", "DEF", "GHI", sep=" ")
print ("Hello", "Goodbye", end="!§$")
print ("Hello")
print("A", "B", "C", sep="**", end="#")
print("A", "B", "C", end="#", sep="**")
|
[
"susanahidalgofernandez@MacBook-Air-de-Susana.local"
] |
susanahidalgofernandez@MacBook-Air-de-Susana.local
|
160f25500408285b50b57c713a0c4a7f21c5bad5
|
fddc7a97ec337255247432140121519971121c25
|
/boilerplate/model/Employee.py
|
b0d903e9ee07350b647bfbf430729e568ed4705a
|
[
"MIT"
] |
permissive
|
DitoHI/flask-graphql
|
5eaf6c46b617d008d535dc94b1a75db882fa791d
|
e27924ffe72f61feb8108f63af9245a1a5efa42f
|
refs/heads/master
| 2020-05-18T11:48:37.320122
| 2019-05-01T08:34:54
| 2019-05-01T08:34:54
| 184,389,474
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
from datetime import datetime
from mongoengine import Document
from mongoengine.fields import (
StringField,
DateTimeField,
ReferenceField,
ListField,
EmbeddedDocumentField,
)
from .Department import Department
from .Role import Role
from .Task import Task
class Employee(Document):
meta = {"collection": "employee"}
name = StringField()
hired_on = DateTimeField(default=datetime.now())
department = ReferenceField(Department)
roles = ListField(ReferenceField(Role))
leader = ReferenceField("self")
tasks = ListField(EmbeddedDocumentField(Task))
|
[
"ditohafizh@gmail.com"
] |
ditohafizh@gmail.com
|
ef9fb4dc93e0a719098e904c830a62c0ff4593f6
|
3d4c4c95c5599626ee5fd753261af5474b265a4a
|
/alice/karen.py
|
1eac535056e5be8e7dff132d9c335d846a270901
|
[] |
no_license
|
nya3jp/icfpc2014
|
a605119561be61ad0015c4c6477cbed7cebb25fc
|
0617ce46d619e99db5763d3d25581fe9da842414
|
refs/heads/master
| 2020-05-19T21:44:37.047921
| 2014-07-28T17:36:18
| 2014-07-28T17:36:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,148
|
py
|
###
### How is the progress?
###
def main(world, ghosts):
map = convert_2d_array(get_map(world))
return (map, step)
def step(map, world):
x, y = get_my_pos(world)
map = set_2d_array(map, y, x, 1)
dir = decide(map, world)
print dir
return (map, dir)
def decide(map, world):
cx, cy = get_my_pos(world)
cdir = get_my_dir(world)
best_dir = 0
best_score = -1000000
for dir in xrange(4):
score = eval_move(adj_move(cx, cy, dir), dir, 0, 1, map, world)
if dir == (cdir + 2) % 4:
score -= 1
print dir, score
if score > best_score:
best_dir = dir
best_score = score
return best_dir
def eval_move(cx, cy, cdir, branches, dist, map, world):
cell_score = eval_cell(cx, cy, dist, map, world)
if cell_score < 0:
return cell_score
future_score = -1000000
choices = []
for ndir in range(4):
if ndir == (cdir + 2) % 4:
continue
if get_cell(adj_move(cx, cy, ndir), map) > 0:
choices = (ndir, choices) # prepend
if len(choices) >= 2:
branches += 1
if branches >= 3:
return cell_score
for ndir in choices:
nscore = eval_move(adj_move(cx, cy, ndir), ndir, branches, dist + 1, map, world)
if nscore > future_score:
future_score = nscore
return cell_score + future_score
def eval_cell(x, y, dist, map, world):
star = get_my_vitality(world) / 150
for ghost in get_ghosts(world):
gx, gy = unpair(car(cdr(ghost)))
if x == gx and y == gy:
if dist < star:
return 10000
return -1000000
cell = get_cell(x, y, map)
if cell == 0:
return -1000000
if cell == 2:
return 100
if cell == 3:
return 1000
return 0
def get_cell(x, y, map):
return get_2d_array(map, y, x)
@rank(2)
def adj_move(x, y, dir):
if dir == 0:
y -= 1
elif dir == 1:
x += 1
elif dir == 2:
y += 1
elif dir == 3:
x -= 1
return x, y
###
### 2D-arrays
###
def convert_2d_array(map):
h = len(map)
w = len(map[0])
array = make_array(h, make_array(w, 0))
y = 0
for row in map:
x = 0
for cell in row:
array = set_2d_array(array, y, x, cell)
x += 1
y += 1
return array
@nolocals
def set_2d_array(array, i, j, value):
return set_array(array, i, set_array(get_array(array, i), j, value))
@nolocals
def get_2d_array(array, i, j):
return get_array(get_array(array, i), j)
###
### world accessors
###
@rank(2)
def get_my_pos(world):
return unpair(car(cdr(car(cdr(world)))))
def get_my_dir(world):
return car(cdr(cdr(car(cdr(world)))))
def get_my_vitality(world):
return car(car(cdr(world)))
def get_ghosts(world):
return car(cdr(cdr(world)))
def get_map(world):
return car(world)
###
### immutable arrays
###
def make_array(n, value):
width = 1
while width < n:
width *= 2
return (width, _make_tree(width, value))
@nolocals
def _make_tree(width, value):
if width == 1:
return value
return (_make_tree(width / 2, value), _make_tree(width / 2, value))
@asm
@rank(1)
def get_array(array, index):
"""
; left_size = car(array) / 2
LD 0 0
CAR
LDC 2
DIV
; array = cdr(array)
LD 0 0
CDR
ST 0 0
LDF %_body
AP 1
RTN
%_body:
; while left_size:
LD 0 0
TSEL %_descent %_exit
%_descent:
; if index >= left_size:
LD 1 1
LD 0 0
CGTE
TSEL %_right %_left
%_right:
; array = cdr(array)
LD 1 0
CDR
ST 1 0
; index -= left_size
LD 1 1
LD 0 0
SUB
ST 1 1
; left_size /= 2
LD 0 0
LDC 2
DIV
ST 0 0
LDC 283283283
TSEL %_body %_body
%_left:
; array = car(array)
LD 1 0
CAR
ST 1 0
; left_size /= 2
LD 0 0
LDC 2
DIV
ST 0 0
LDC 283283283
TSEL %_body %_body
%_exit:
; return array
LD 1 0
RTN
"""
def set_array(array, index, value):
width = car(array)
left_size = width / 2
tree = cdr(array)
return (width, _set_tree_impl(tree, index, value, left_size))
@nolocals
def _set_tree_impl(tree, index, value, left_size):
if left_size == 0:
return value
if index >= left_size:
return (car(tree), _set_tree_impl(cdr(tree), index - left_size, value, left_size / 2))
return (_set_tree_impl(car(tree), index, value, left_size / 2), cdr(tree))
|
[
"takahashi.shuhei@gmail.com"
] |
takahashi.shuhei@gmail.com
|
6a0e594abf0e7e7595f7a372a3e2cf905d28aec4
|
a4c8cdda03d435c782ace8e5ef7923e925265a8c
|
/src/web_store_django/asgi.py
|
ff3dedfa7f677d01f1b7a889c0683d5741f4b20a
|
[] |
no_license
|
orbitturner/WebStoreDjango
|
a67102ba803ec0c265779c174563a150f5c5e86d
|
4377b2c915f95aac89f922486a15b067c41eedb4
|
refs/heads/master
| 2023-02-13T09:50:15.892916
| 2021-01-17T20:05:46
| 2021-01-17T20:05:46
| 327,995,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
ASGI config for web_store_django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_store_django.settings')
application = get_asgi_application()
|
[
"orbitturner@gmail.com"
] |
orbitturner@gmail.com
|
5a618f4ae147e389b88cd9be40e2216243232861
|
9c85b53419c785421360bfbbe89a78ac6bc78861
|
/data/df_tools.py
|
1c812f8bebc9a380c2a49345a2c28685b6dc68ad
|
[] |
no_license
|
quant911/gafe_py
|
a5755e8b0a722d30bc691d7efb85abf0172db772
|
2be86ac6c13c131989fdeeb5cc0eab5667b91111
|
refs/heads/master
| 2022-04-07T12:50:32.037532
| 2020-03-15T03:32:43
| 2020-03-15T03:32:43
| 226,588,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
# DataFrame and other Pandas related tools
# below is less than ideal, do we know why?
import pandas as pd
def waterfall_combine(combine_field, *args):
# type: (str, List[pd.DataFrame]) -> pd.DataFrame
df = pd.DataFrame()
for one_df in args:
if not one_df.empty:
if one_df.index.name != combine_field:
one_df = one_df.set_index(combine_field, drop=False)
if not df.empty:
one_df = one_df.drop(df.index, errors='ignore')
df = pd.concat([df, one_df])
df.reset_index(inplace=True, drop=True)
if len(df) == 0:
df[combine_field] = None # minimal requirement: the waterfall col exists
#todo: do we impose condition on Index of output DataFrame
return df
def waterfall_combine_dict(combine_field, df_dic, waterfall_sources):
# type: (str, Dict[str, pd.DataFrame], List[str]) -> pd.DataFrame
return waterfall_combine(combine_field, [df_dic[s] for s in waterfall_sources])
|
[
"feng.ning@iaqs.edu"
] |
feng.ning@iaqs.edu
|
445242551b300d4f1f525847dbb3e8b08e0460ff
|
da3d09b4449e311bc16a3c253a99169eacba2f83
|
/muddery/utils/quest_dependency_handler.py
|
03d9310255340b493b14b4eb7508553f4d3bf8b1
|
[
"BSD-3-Clause"
] |
permissive
|
lynnyuue/muddery
|
02848c3d83626edb3eddd8e46d534a95e981c759
|
8211d4e98843b557bd45198474fa8966806f6245
|
refs/heads/master
| 2021-01-19T10:34:33.113253
| 2017-03-13T15:56:49
| 2017-03-13T15:56:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
"""
QuestDependencyHandler deals quest's dependencies.
"""
from muddery.utils import defines
from django.conf import settings
from django.apps import apps
from evennia.utils import logger
class QuestDependencyHandler(object):
"""
This class handels the relation of quest.
"""
def __init__(self):
"""
Initialize handler
"""
self.quest_depencences = {}
def get_quest_dependencies(self, quest):
"""
Get quest's dependencies.
"""
if not quest:
return
self.load_dependencies_cache(quest)
return self.quest_depencences[quest]
def load_dependencies_cache(self, quest):
"""
To reduce database accesses, add a cache.
"""
if not quest:
return
if quest in self.quest_depencences:
# already cached
return
# Add cache of the whole dialogue.
self.quest_depencences[quest] = []
# Get db model
dependencies = []
model_dependencies = apps.get_model(settings.WORLD_DATA_APP, settings.QUEST_DEPENDENCIES)
if model_dependencies:
# Get records.
dependencies = model_dependencies.objects.filter(quest=quest)
# Add db fields to data object.
data = []
for dependency in dependencies:
data.append({"quest": dependency.dependency,
"type": dependency.type})
# Add to cache.
self.quest_depencences[quest] = data
def match_quest_dependencies(self, caller, quest):
"""
If the quest matches its dependencies.
"""
if not caller:
return False
if not quest:
return False
for dependency in self.get_quest_dependencies(quest):
# match each dependency
if not self.match_dependency(caller, dependency["quest"], dependency["type"]):
return False
return True
def match_dependency(self, caller, quest, dependency_type):
"""
check a dependency
"""
if dependency_type == defines.DEPENDENCY_QUEST_CAN_PROVIDE:
if not caller.quest_handler.can_provide(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_IN_PROGRESS:
if not caller.quest_handler.is_in_progress(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_NOT_IN_PROGRESS:
if caller.quest_handler.is_in_progress(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_ACCOMPLISHED:
if not caller.quest_handler.is_accomplished(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_NOT_ACCOMPLISHED:
if not caller.quest_handler.is_in_progress(quest):
return False
if caller.quest_handler.is_accomplished(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_COMPLETED:
if not caller.quest_handler.is_completed(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_NOT_COMPLETED:
if caller.quest_handler.is_completed(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_ACCEPTED:
if not caller.quest_handler.is_completed(quest) and \
not caller.quest_handler.is_in_progress(quest):
return False
elif dependency_type == defines.DEPENDENCY_QUEST_NOT_ACCEPTED:
if caller.quest_handler.is_completed(quest) or \
caller.quest_handler.is_in_progress(quest):
return False
return True
def clear(self):
"""
clear cache
"""
self.quest_depencences = {}
# main quest_dependendy_handler
QUEST_DEP_HANDLER = QuestDependencyHandler()
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
5a3358d07af3369d81ddbf4aabc3a0d348ac7017
|
13e101e5bf17a12f1b2053974c0bff09d9b39e45
|
/commands/api/api_func/func_weather_api.py
|
f8569bf7f09f3edf2f33ca8aff3ba8c720bfd1cc
|
[
"MIT"
] |
permissive
|
WayniD1973/Discord_bot.py
|
6f823c34e3806c32cf451987d111a0685915b084
|
c8cc3c7adcccbe2f1bc8a77cfb5a58eab2f7e10b
|
refs/heads/master
| 2023-03-16T12:36:33.987243
| 2020-04-30T10:43:09
| 2020-04-30T10:43:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
from urllib import request
import json
from commands.api.api_func.api_setting_and_keys.api_keys import weather_api_key
from settings import (
setting_city,
setting_countries_code,
temprature_scale,
)
from list_and_dicts.countries_codes import countries_list
def weather():
# Weather check command
url = f"https://api.openweathermap.org/data/2.5/weather?q={setting_city},{setting_countries_code}&appid={weather_api_key}"
weather_dict_reformating(url=url)
def city_weather(city, countries_code):
# Weather check command
url = f"https://api.openweathermap.org/data/2.5/weather?q={city},{countries_code}&appid={weather_api_key}"
weather_dict_reformating(url=url)
def weather_dict_reformating(url):
weather_info_dict = request.urlopen(url)
weather_info_dict = json.load(weather_info_dict)
print(weather_info_dict)
# Reformating
weather_info_dict["humidity"] = weather_info_dict["main"]["humidity"]
weather_info_dict["temp"] = weather_info_dict["main"]["temp"]
weather_info_dict["clouds"] = weather_info_dict["clouds"]["all"]
weather_info_dict["description_weather"] = weather_info_dict["weather"][0][
"description"
]
weather_info_dict["weather_icon"] = weather_info_dict["weather"][0]["icon"]
weather_info_dict["weather"] = weather_info_dict["weather"][0]["main"]
weather_info_dict["wind_speed"] = weather_info_dict["wind"]["speed"]
weather_info_dict["wind_direction"] = weather_info_dict["wind"]["deg"]
weather_info_dict["city"] = weather_info_dict["name"]
# Removing unused information
del_list = (
"coord",
"base",
"main",
"wind",
"visibility",
"dt",
"sys",
"timezone",
"id",
"cod",
"name",
)
for item in del_list:
del weather_info_dict[item]
# Temp reformating to celsius with one decimal
if temprature_scale.upper() == "C":
weather_info_dict["temp"] = -273.15
elif temprature_scale.upper() == "F":
weather_info_dict["temp"] = 1.8 * (weather_info_dict["temp"] - 273.15) + 32
elif temprature_scale.upper() == "K":
pass
else:
weather_info_dict["temp"] = -273.15
weather_info_dict["temp"]
weather_info_dict["temp"] = round(weather_info_dict["temp"], 1)
wind_dict = {
"NE": {"min_deg": 23, "max_deg": 68},
"E": {"min_deg": 68, "max_deg": 113},
"SE": {"min_deg": 113, "max_deg": 158},
"S": {"min_deg": 158, "max_deg": 203},
"SW": {"min_deg": 203, "max_deg": 248},
"W": {"min_deg": 248, "max_deg": 293},
"NW": {"min_deg": 293, "max_deg": 338},
}
temporary_wind = "nordlig"
for wind_direction in wind_dict:
if (
int(weather_info_dict["wind_direction"])
< wind_dict[wind_direction]["min_deg"]
and int(weather_info_dict["wind_direction"])
< wind_dict[wind_direction]["max_deg"]
):
temporary_wind = "wind_direction"
weather_info_dict["wind_direction"] = temporary_wind
return weather_info_dict
|
[
"Fredrik.Berzins@gmail.com"
] |
Fredrik.Berzins@gmail.com
|
a2b82844efe85b5b365a9021beec559448a80aaa
|
11a883d6016167deffe5cc065fc08d492e727d34
|
/dzisholiday/holidays.py
|
f56fd90a3439f086a56d7a1b884a8ed8ae3a9c75
|
[
"CC0-1.0"
] |
permissive
|
zichd/dzisholiday
|
2a6d759a7ea0f9e6206e05769d55b87c3bf19a98
|
512886557eae5e99a79d03597bf1a8e073add702
|
refs/heads/master
| 2020-05-14T17:57:36.378782
| 2019-04-17T15:37:29
| 2019-04-17T15:37:29
| 181,902,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
#!/usr/bin/env python3
import datetime
def getholidays(year):
"""Return a set public holidays in CZ as (day, month) tuples."""
holidays = {
(1, 1), #Novy rok
(1, 5), #Svatek prace
(8, 5), #Den vitezstvi
(5, 7), #Cyril a Metodej
(6, 7), #Jan Hus
(28, 9), #Den Ceske statnosti
(28, 10), #Vznik CS statu
(17, 11), #Den boje za svobodu a dem.
(24, 12), #Stedry den
(25, 12), #1. svatek Vanocni
(26, 12), #2. svatek Vanocni
}
# Easter holiday LUT source http://www.whyeaster.com/customs/dateofeaster.shtml
easterlut = [(4, 14), (4, 3), (3, 23), (4, 11), (3, 31), (4, 18), (4, 8),
(3, 28), (4, 16), (4, 5), (3, 25), (4, 13), (4, 2), (3, 22),
(4, 10), (3, 30), (4, 17), (4, 7), (3, 27)]
easterday = datetime.date(year, *easterlut[year%19])
easterday += datetime.timedelta(6 - easterday.weekday())
# print("Easter Sunday is on ", easterday)
holidays.update(((d.day, d.month) for d in [easterday - datetime.timedelta(2),
easterday + datetime.timedelta(1)]))
return holidays
def isholiday(date):
return (date.day, date.month) in getholidays(date.year)
|
[
"dzich@us.ibm.com"
] |
dzich@us.ibm.com
|
cba8906ed12ed46f8421e36895b3d60b14d9ffa7
|
5811411533f0afb074fcdac1b4c4be7a5165ca0b
|
/uva12250/uva12250.py
|
c95dbb0b899412509197e8bd94fbe488800b07d2
|
[] |
no_license
|
juanestebancg2806/uvaProblems
|
f49ef4de9b22106c14a5b60bac7ae84f2f02032b
|
d1ba9dd04705f53d5422651269a421d5066486f9
|
refs/heads/master
| 2022-04-26T04:12:45.906249
| 2020-04-27T00:13:31
| 2020-04-27T00:13:31
| 258,819,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
from sys import stdin
def solve(s):
ans = ""
if s == "HELLO":
ans = "ENGLISH"
elif s == "HOLA":
ans = "SPANISH"
elif s == "HALLO":
ans = "GERMAN"
elif s == "BONJOUR":
ans = "FRENCH"
elif s == "CIAO":
ans = "ITALIAN"
elif s == "ZDRAVSTVUJTE":
ans = "RUSSIAN"
else:
ans = "UNKNOWN"
return ans
def main():
s = stdin.readline().strip()
tc = 1
while s != '#':
ans = solve(s)
print("Case {0}: {1}".format(tc,ans))
tc += 1
s = stdin.readline().strip()
main()
|
[
"juanestebancg_@hotmail.com"
] |
juanestebancg_@hotmail.com
|
bc3d096110ff058ef2e3871a7175bde4321301b6
|
1983f6ee12926998d7f74e7842604374dd90a133
|
/surrogate_models/surrogate_model.py
|
a1fa9ed9f74f1d769b66e6c32f345a6d490ceafc
|
[] |
no_license
|
LMZimmer/temp
|
89548560d9263141f1d8cdecaecb4a2833942db9
|
4a4505657aeb5765631bc2cb46307f797a555f2d
|
refs/heads/main
| 2023-02-27T05:52:53.998222
| 2021-02-04T13:23:54
| 2021-02-04T13:23:54
| 300,592,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,918
|
py
|
import json
import logging
import os
import sys
from abc import ABC, abstractmethod
from IPython import embed
import numpy as np
import pathvalidate
import torch
import torch.backends.cudnn as cudnn
from surrogate_models import utils
class SurrogateModel(ABC):
def __init__(self, data_root, log_dir, seed, model_config, data_config):
self.data_root = data_root
self.log_dir = log_dir
self.model_config = model_config
self.data_config = data_config
self.seed = seed
# Seeding
np.random.seed(seed)
cudnn.benchmark = True
torch.manual_seed(seed)
cudnn.enabled = True
torch.cuda.manual_seed(seed)
# Create config loader
self.config_loader = utils.ConfigLoader('configspace.json')
# Load the data
if log_dir is not None:
os.makedirs(log_dir, exist_ok=True)
# Add logger
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Dump the config of the run to log_dir
self.data_config['seed'] = seed
logging.info('MODEL CONFIG: {}'.format(model_config))
logging.info('DATA CONFIG: {}'.format(data_config))
self._load_data()
logging.info(
'DATA: No. train data {}, No. val data {}, No. test data {}'.format(len(self.train_paths),
len(self.val_paths),
len(self.test_paths)))
with open(os.path.join(log_dir, 'model_config.json'), 'w') as fp:
json.dump(model_config, fp)
with open(os.path.join(log_dir, 'data_config.json'), 'w') as fp:
json.dump(data_config, fp)
#with open(os.path.join(log_dir, 'train_paths.json'), 'w') as fp:
# json.dump(self.train_paths, fp)
#with open(os.path.join(log_dir, 'val_paths.json'), 'w') as fp:
# json.dump(self.val_paths, fp)
#with open(os.path.join(log_dir, 'test_paths.json'), 'w') as fp:
# json.dump(self.test_paths, fp)
def _load_data(self):
# Get the result train/val/test split
train_paths = []
val_paths = []
test_paths = []
for key, data_config in self.data_config.items():
if type(data_config) == dict:
result_loader = utils.ResultLoader(
self.data_root, filepath_regex=data_config['filepath_regex'],
train_val_test_split=data_config, seed=self.seed)
train_val_test_split = result_loader.return_train_val_test()
# Save the paths
for paths, filename in zip(train_val_test_split, ['train_paths', 'val_paths', 'test_paths']):
file_path = os.path.join(self.log_dir,
pathvalidate.sanitize_filename('{}_{}.json'.format(key, filename)))
json.dump(paths, open(file_path, 'w'))
train_paths.extend(train_val_test_split[0])
val_paths.extend(train_val_test_split[1])
test_paths.extend(train_val_test_split[2])
'''
# Add extra paths to test
# Increased ratio of skip-connections.
matching_files = lambda dir: [str(path) for path in Path(os.path.join(self.data_root, dir)).rglob('*.json')]
test_paths.extend(matching_files('groundtruths/low_parameter/'))
# Extreme hyperparameter settings
# Learning rate
test_paths.extend(matching_files('groundtruths/hyperparameters/learning_rate/'))
test_paths.extend(matching_files('groundtruths/hyperparameters/weight_decay/'))
# Load the blacklist to filter out those elements
if self.model_config["model"].endswith("_time"):
blacklist = json.load(open('surrogate_models/configs/data_configs/blacklist_runtimes.json'))
else:
blacklist = json.load(open('surrogate_models/configs/data_configs/blacklist.json'))
filter_out_black_list = lambda paths: list(filter(lambda path: path not in blacklist, paths))
train_paths, val_paths, test_paths = map(filter_out_black_list, [train_paths, val_paths, test_paths])
'''
# Shuffle the total file paths again
rng = np.random.RandomState(6)
rng.shuffle(train_paths)
rng.shuffle(val_paths)
rng.shuffle(test_paths)
self.train_paths = train_paths
self.val_paths = val_paths
self.test_paths = test_paths
def _get_labels_and_preds(self, result_paths):
"""Get labels and predictions from json paths"""
labels = []
preds = []
for result_path in result_paths:
config_space_instance, val_accuracy_true, test_accuracy_true, _ = self.config_loader[result_path]
val_pred = self.query(config_space_instance.get_dictionary())
labels.append(val_accuracy_true)
preds.append(val_pred)
return labels, preds
def _log_predictions(self, result_paths, labels, preds, identifier):
"""Log paths, labels and predictions for one split"""
if not isinstance(preds[0], float):
preds = [p[0] for p in preds]
logdir = os.path.join(self.log_dir, identifier+"_preds.json")
dump_dict = {"paths": result_paths, "labels": labels, "predictions": preds}
with open(logdir, "w") as f:
json.dump(dump_dict, f)
def log_dataset_predictions(self):
"""Log paths, labels and predictions for train, val, test splits"""
data_splits = {"train": self.train_paths, "val": self.val_paths, "test": self.test_paths}
for split_identifier, result_paths in data_splits.items():
print("==> Logging predictions of %s split" %split_identifier)
labels, preds = self._get_labels_and_preds(result_paths)
self._log_predictions(result_paths, labels, preds, split_identifier)
@abstractmethod
def train(self):
raise NotImplementedError()
@abstractmethod
def validate(self):
raise NotImplementedError()
@abstractmethod
def test(self):
raise NotImplementedError()
@abstractmethod
def save(self):
raise NotImplementedError()
@abstractmethod
def load(self, model_path):
raise NotImplementedError()
@abstractmethod
def query(self, config_dict):
raise NotImplementedError()
|
[
"zimmerl@informatik.uni-freiburg.de"
] |
zimmerl@informatik.uni-freiburg.de
|
8b24409e35432496ca5297dc316aab3b31fa776c
|
38536d63a23e3ce1df00ff065f5c8a29bdb9dcf8
|
/archivehb/settings.py
|
f144cb9ac61503e9a8fbd81e7d38dfdb89cbde3d
|
[] |
no_license
|
himelbikon/archivehb
|
08b84cff1f98c66a62abd431bddbefb8955b7e23
|
e238b3344ccf4b5d53e5a5f9f8e3743bb903a249
|
refs/heads/master
| 2023-02-20T06:04:36.979158
| 2021-01-12T15:38:55
| 2021-01-12T15:38:55
| 318,464,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,571
|
py
|
"""
Django settings for archivehb project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l)fkimczhvbuycbyacca8u@%&+j9-u$dxfkf38zsaaizvq&yn3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['himelbikon.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'note',
'quiz',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'archivehb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'archivehb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
'''STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]'''
try:
from .local_settings import *
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
print('Setup is completed for local server')
except ImportError:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
print('Here is no local_settings file. you must be on production.')
|
[
"skhimelhaque@gmail.com"
] |
skhimelhaque@gmail.com
|
a422c47d80ec8012d7987dbd727c5b148a59e05d
|
7d018a6dd9afeb1a1086538991c11cae73462a01
|
/rss/migrations/0003_auto_20200902_1526.py
|
82f14d5ca3bb6356de1a4e6dab9dff11fde78697
|
[] |
no_license
|
dbrian57/rss-reader-api
|
289c82aae9998e2c7bb24533bd5c9b203387ffaf
|
0ae718eee23e05b045739983b14fb28a8ed0198c
|
refs/heads/main
| 2023-08-16T19:22:30.985148
| 2021-09-27T23:12:16
| 2021-09-27T23:12:16
| 400,309,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
# Generated by Django 3.1.1 on 2020-09-02 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss', '0002_auto_20200902_1424'),
]
operations = [
migrations.AlterField(
model_name='rssfeed',
name='categories',
field=models.ManyToManyField(blank=True, to='rss.Category'),
),
]
|
[
"mason@masonegger.com"
] |
mason@masonegger.com
|
637bc4fa6b7e8999fe8509be90307bb89d4a1521
|
580113f827c013c2248d8ed4410cc5bb3cf9f112
|
/oscar/lib/python3.6/site-packages/easy_thumbnails/tests/test_widgets.py
|
6f7259e83c75b5efa3b9e5f5488edab55cbc7f44
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
aiegoo/oscar
|
2019a04f13920d0c0451e22fd20753bef01480cc
|
ab9b8a278cbc92a76ef2a92347cc44ced5a4e1d3
|
refs/heads/master
| 2022-12-23T14:07:29.747431
| 2019-10-03T05:06:02
| 2019-10-03T05:06:02
| 212,476,972
| 0
| 1
|
BSD-3-Clause
| 2022-12-10T04:26:14
| 2019-10-03T01:42:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,464
|
py
|
from easy_thumbnails import widgets
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms.widgets import ClearableFileInput
from easy_thumbnails.tests import utils as test
class ImageClearableFileInput(test.BaseTest):
def setUp(self):
super(ImageClearableFileInput, self).setUp()
self.storage = test.TemporaryStorage()
def tearDown(self):
self.storage.delete_temporary_storage()
super(ImageClearableFileInput, self).tearDown()
def test_options_default(self):
"""
If thumbnail options are not passed, default options will be used.
"""
widget = widgets.ImageClearableFileInput()
self.assertEqual(widget.thumbnail_options, {'size': (80, 80)})
def test_options_custom(self):
"""
A dictionary can be passed as the thumbnail options. The dictionary is
copied so it isn't just a mutable reference of the original.
"""
options = {'size': (300, 100), 'crop': True}
widget = widgets.ImageClearableFileInput(thumbnail_options=options)
# Changing the options won't change the thumbnail options in the widget
# now.
options['crop'] = False
self.assertEqual(
widget.thumbnail_options, {'size': (300, 100), 'crop': True})
def test_render(self):
"""
The output contains a link to both the source image and the thumbnail.
"""
source_filename = self.create_image(self.storage, 'test.jpg')
widget = widgets.ImageClearableFileInput()
source_file = self.storage.open(source_filename)
source_file.storage = self.storage
source_file.thumbnail_storage = self.storage
html = widget.render('photo', source_file)
self.assertIn(source_filename, html)
self.assertIn('.80x80_', html)
def test_render_custom(self):
"""
The thumbnail is generated using the options provided to the widget.
"""
source_filename = self.create_image(self.storage, 'test.jpg')
options = {'size': (100, 500), 'quality': 90, 'crop': True}
widget = widgets.ImageClearableFileInput(thumbnail_options=options)
source_file = self.storage.open(source_filename)
source_file.storage = self.storage
source_file.thumbnail_storage = self.storage
html = widget.render('photo', source_file)
self.assertIn(source_filename, html)
self.assertIn('.100x500_q90_crop.jpg', html)
def test_custom_template(self):
"""
The template used to render the thumbnail and the standard
``ClearableFileInput`` output can be customized.
"""
source_filename = self.create_image(self.storage, 'test.jpg')
widget = widgets.ImageClearableFileInput()
widget.template_with_thumbnail = (
u'%(template)s<br />'
u'<a href="%(source_url)s">%(thumb)s</a> FOO'
)
source_file = self.storage.open(source_filename)
source_file.storage = self.storage
source_file.thumbnail_storage = self.storage
html = widget.render('photo', source_file)
self.assertIn(source_filename, html)
self.assertIn('.80x80_', html)
self.assertIn('FOO', html)
def test_render_without_value(self):
"""
If value not passed, use super widget.
"""
widget = widgets.ImageClearableFileInput()
base_widget = ClearableFileInput()
html = widget.render('photo', None)
base_html = base_widget.render('photo', None)
self.assertEqual(base_html, html)
def test_render_uploaded(self):
"""
The widget treats UploadedFile as no input.
Rationale:
When widget is used in ModelForm and the form (submitted with upload)
is not valid, widget should discard the value (just like standard
Django ClearableFileInput does).
"""
widget = widgets.ImageClearableFileInput()
base_widget = ClearableFileInput()
file_name = 'test.jpg'
# storage=None to get raw content.
image = self.create_image(None, file_name)
upload_file = SimpleUploadedFile(file_name, image.getvalue())
html = widget.render('photo', upload_file)
base_html = base_widget.render('photo', upload_file)
self.assertEqual(base_html, html)
self.assertNotIn(file_name, html) # Widget is empty.
|
[
"eozz21@gmail.com"
] |
eozz21@gmail.com
|
808f8d67e107a2468bd12ea900d3b6bcad763710
|
c0f56e9d408dcb636a553e1b124a904feed88a6a
|
/python/test-dask.py
|
690a8b76ead85a1967ccc003cb2a6c8bcda79f2f
|
[] |
no_license
|
andrejsim/test-analytic
|
7be77153768f2860cee52a4f41546f11edc38987
|
b0300ccb415f0fae0b51d0e95cb8c7c7f0e74068
|
refs/heads/master
| 2020-04-30T00:23:25.988682
| 2019-05-14T08:31:33
| 2019-05-14T08:31:33
| 176,501,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from dask.distributed import Client, progress
# pip install dask[complete]
client = Client(processes=False,
threads_per_worker=4,
n_workers=1,
memory_limit='2GB')
print(client)
import dask.array as da
x = da.random.random((10000, 10000), chunks=(1000, 1000))
y = x + x.T
z = y[::2, 5000:].mean(axis=1)
print(z.compute())
y.sum().compute()
|
[
"andrej.mihajlovski@meteogroup.com"
] |
andrej.mihajlovski@meteogroup.com
|
63233c2df1086b23480471755aed96e3819bcd65
|
1cd48a9c3e558e05e0ceeb9ab4b689a2d2be5cf8
|
/sebs/gcp/__init__.py
|
f76e7c75d61c025df110e81852d4e582b46949f6
|
[
"BSD-3-Clause"
] |
permissive
|
spcl/serverless-benchmarks
|
e4d51042ab0c658e65a828d74e6123da8f4f97f6
|
c9f0e25455ae9e0ebf8dc86fdc88bf5e6720db48
|
refs/heads/master
| 2023-08-31T08:57:25.961768
| 2023-08-23T11:50:35
| 2023-08-23T11:50:35
| 227,820,874
| 92
| 43
|
BSD-3-Clause
| 2023-08-23T11:27:10
| 2019-12-13T11:04:54
|
Python
|
UTF-8
|
Python
| false
| false
| 149
|
py
|
from .gcp import GCP # noqa
from .config import GCPConfig # noqa
from .storage import GCPStorage # noqa
from .function import GCPFunction # noqa
|
[
"mcopik@gmail.com"
] |
mcopik@gmail.com
|
297aedb083d337383a8b7258cd2fc3d034ba1dd1
|
85584daa79b6c43fd07c49a977a129e04315d8ce
|
/enemy.py
|
0bafe9d181d7987359e0d4239d0336748f59cca8
|
[] |
no_license
|
VoX/mazerun
|
7a23df4bc8b2ccccbb4bcf9d999fead92424b7a2
|
a6c5691104db59f70dd5002fb8d7dc9610bf98de
|
refs/heads/master
| 2021-01-17T22:45:19.633251
| 2015-02-24T04:29:13
| 2015-02-24T04:29:13
| 31,239,967
| 1
| 0
| null | 2015-02-24T01:43:54
| 2015-02-24T01:43:54
| null |
UTF-8
|
Python
| false
| false
| 2,216
|
py
|
import pygame
from constants import *
import random
from util.perlin import SimplexNoise
perlin = SimplexNoise(period=500)
class Enemy(pygame.sprite.Sprite):
# base enemy class
# set speed
change_x = 0
change_y = 0
def __init__(self):
# call parent
super(Enemy, self).__init__()
# set height/width
self.image = pygame.image.load(IMG_DIR + 'slime.png')
# set location
self.rect = self.image.get_rect()
self.health = 30
self.counter = 30
@property
def damage(self):
return 5
@property
def EXP(self):
return 25
def change_speed(self, x, y):
# change enemy speed, called with keypress
self.change_x += x
self.change_y += y
def move(self, walls):
# find new enemy position
if self.counter < 20:
self.counter += 1
else:
self.change_x = random.randint(-1,1)
self.change_y = random.randint(-1,1)
# move left/right
self.rect.x += self.change_x
# horizontal collision?
block_hit_list = pygame.sprite.spritecollide(self, walls, False)
for block in block_hit_list:
# if moving right, set our right side to the left side of
# collided object
if self.change_x > 0:
self.rect.right = block.rect.left
else:
# if moving left, do opposite
self.rect.left = block.rect.right
# move up/down
self.rect.y += self.change_y
# vertical collision?
block_hit_list = pygame.sprite.spritecollide(self, walls, False)
for block in block_hit_list:
# if moving down, set our bottom side to the top side of
# collided object
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
# if moving up, do opposite
self.rect.top = block.rect.bottom
def take_damage(self, damage, incoming_x, incoming_y):
self.damage_taken = damage
self.health -= self.damage_taken
self.incoming_x = incoming_x
self.incoming_y = incoming_y
self.counter = 0
rand = 2*(perlin.noise2(self.incoming_x, self.incoming_y))
if (self.rect.x - self.incoming_x) < 0:
self.change_x = (-0.5)
elif (self.rect.x - self.incoming_x) > 0:
self.change_x = (1.5+rand)
if (self.rect.y - self.incoming_y) < 0:
self.change_y = (-0.5)
elif (self.rect.y - self.incoming_y) > 0:
self.change_y = (1.5+rand)
|
[
"nlax76@gmail.com"
] |
nlax76@gmail.com
|
3fdda1fb75e41e5c677d25dc130e7d22dcd5c2cc
|
2b9d16318d5d38a72e96e31b4e568b4fde5c9cee
|
/venv/bin/pip3
|
bc57ca82760da914df822a4e7e1328d12e1f7959
|
[] |
no_license
|
shubhaminstazen/twitter_rest
|
b500d3e26ae6cfffd32ff4933a28b20aad419640
|
d3b0c815bd29f86524d9e3e642ae7e6845a07fa1
|
refs/heads/master
| 2020-07-08T03:48:30.897080
| 2019-08-24T11:57:10
| 2019-08-24T11:57:10
| 203,555,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
#!/home/shubhamkathe/PycharmProjects/twitter/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"shubham.kathe@instazen.com"
] |
shubham.kathe@instazen.com
|
|
bb44827457f093c2de5ae63b100be439f73457ad
|
db12d6ac87844e8a3d6e1c2e4e9f01743c35e63d
|
/mygroup.py
|
1b2d9a07c3d0c516965bb89e5e44f74ebdd0d32d
|
[] |
no_license
|
Kisuyamii/Iamcountingonlove
|
a67da9decb729ec87578cd97857e6c72ed9fa446
|
38244e218ff036926b7da68bc3ebe92b2d7fd181
|
refs/heads/main
| 2023-01-23T08:18:28.766345
| 2020-11-25T22:09:46
| 2020-11-25T22:09:46
| 316,063,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
groupmates = [
{
"name": "Василий ",
"surname": "Петрушкин",
"exams": ["Информатика", "Физика", "Web"],
"marks": [5, 5, 5]
},
{
"name": "Савелий",
"surname": "Петров",
"exams": ["Физика", "Web", "КТП"],
"marks": [5, 3, 4]
},
{
"name": "Эльдар",
"surname": "Смирнов",
"exams": ["Философия", "ИС", "КТП"],
"marks": [5, 4, 5]
},
{
"name": "Фёдор",
"surname": "Жизин",
"exams": ["Философия", "ИС", "КТП"],
"marks": [2, 3, 2]
},
{
"name": "Гомер",
"surname": "Симпсон",
"exams": ["", "ИС", "Web"],
"marks": [5, 4, 3]
},
{
"name": "Анастасия",
"surname": "Васина",
"exams": ["Философия", "ИС", "КТП"],
"marks": [5, 5, 5]
},
]
def count_mark(students,mark):
print ("surname".ljust(15), "marks".ljust(20))
for student in students:
marks_list = student["marks"]
result = (sum(marks_list)/len(marks_list))
if result >= need:
print(student["surname"].ljust(15), str(student["marks"]).ljust(20))
need = int(input('Input :'))
count_mark(groupmates,need)
|
[
"noreply@github.com"
] |
Kisuyamii.noreply@github.com
|
b9ed4c62008e28d2bf0ade31b99bc33a80d4800b
|
18d11ee454b05f0344b9f5542916d18685473ba5
|
/chipy_org/libs/slack_utils.py
|
37489aaf26a481deafb8e4734377c3d8dd39982f
|
[
"MIT"
] |
permissive
|
chicagopython/chipy.org
|
d117040761d35bb106d6e45e092c96c073827277
|
c14182b7ad3d9d889c0127d20d32a8c78c05924d
|
refs/heads/main
| 2023-07-12T22:00:54.381936
| 2023-06-20T04:48:02
| 2023-06-20T04:48:02
| 4,348,048
| 94
| 86
|
MIT
| 2023-07-06T21:29:09
| 2012-05-16T14:47:15
|
Python
|
UTF-8
|
Python
| false
| false
| 897
|
py
|
import json
import requests
def post_message_to_slack(channel_key: str, channel_name: str, message: str):
"""
This function is used to post messages from chipy.org to the chipy slack
channel_key: secret key for slack channel
channel_name: human readable description of the slack channel
message: string formatted text to post to the slack channel
"""
webhook_url = f"https://hooks.slack.com/services/{channel_key}"
slack_data = {"text": message}
response = requests.post(
webhook_url,
data=json.dumps(slack_data),
headers={"Content-Type": "application/json"},
allow_redirects=False,
)
if response.status_code != 200:
raise requests.HTTPError(
f"Failed to post message '{message[:25]}...' to slack channel '{channel_name}'. \
Status code {response.status_code} != 200."
)
|
[
"noreply@github.com"
] |
chicagopython.noreply@github.com
|
95aec0d065df00a7668e67fcb6e19b3cd23b539b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/XYYdtkhGPXXJ3QQNB_24.py
|
997a52f1323cf1df1a0b52572d1f0f25f1fb8cdf
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
"""
Create a function that takes a list of strings and returns a list with only
the strings that have numbers in them. If there are no strings containing
numbers, return an empty list.
### Examples
num_in_str(["1a", "a", "2b", "b"]) ➞ ["1a", "2b"]
num_in_str(["abc", "abc10"]) ➞ ["abc10"]
num_in_str(["abc", "ab10c", "a10bc", "bcd"]) ➞ ["ab10c", "a10bc"]
num_in_str(["this is a test", "test1"]) ➞ ["test1"]
### Notes
* The strings can contain white spaces or any type of characters.
* **Bonus:** Try solving this without regex.
"""
def contains_num(s):
nums = [str(x) for x in range(10)]
for x in nums:
if x in s:
return True
return False
def num_in_str(lst):
return [s for s in lst if contains_num(s)]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f50106ca09c41774778c8e2b795cdcdfef3b92a2
|
f49530a0c567260bb4fda613a19f37b95c9ec91f
|
/Website/data_omzetten_bruikbare_table.py
|
5e03d9c24873b5fbb36deb23ae0cf7026e82376a
|
[] |
no_license
|
brandonhillert/GroupProjectAI
|
8be70503b5a8387cc9a079efe73e29f58d79d204
|
e201807c6a6344ebf547aa9128626ef39836c328
|
refs/heads/master
| 2021-04-20T11:21:36.325546
| 2020-04-08T20:16:03
| 2020-04-08T20:16:03
| 249,678,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
import psycopg2
import re
def insert_into_table(newrecs):
try:
if len(newrecs) >= 6:
c.execute(
"INSERT INTO categoryrecommendations (id, prod1, prod2, prod3, prod4, prod5) VALUES (%s, %s, %s, %s, %s, %s)",
(newrecs[0], newrecs[1], newrecs[2], newrecs[3], newrecs[4], newrecs[5]))
elif len(newrecs) == 5:
c.execute(
"INSERT INTO categoryrecommendations (id, prod1, prod2, prod3, prod4) VALUES ( %s, %s, %s, %s,%s)",
(newrecs[0], newrecs[1], newrecs[2], newrecs[3]))
elif len(newrecs) == 4:
c.execute("INSERT INTO categoryrecommendations (id, prod1, prod2, prod3) VALUES ( %s, %s, %s, %s)",
(newrecs[0], newrecs[1], newrecs[2]))
elif len(newrecs) == 3:
c.execute("INSERT INTO categoryrecommendations (id, prod1, prod2) VALUES ( %s, %s, %s)",
(newrecs[0], newrecs[1]))
elif len(newrecs) == 2:
c.execute("INSERT INTO categoryrecommendations (id, prod1) VALUES ( %s, %s)", (newrecs[0], newrecs[1]))
elif len(newrecs) == 1:
c.execute("INSERT INTO categoryrecommendations (id) VALUES ( %s)", (newrecs[0]))
except:
pass
def only_numerics(seq): # Behoud alleen nummer van string
seq_type = type(seq)
return seq_type().join(filter(seq_type.isdigit, seq))
def formatting_item(product):
product = product[1:-1]
product = product.split(",")
product = product[0]
product = only_numerics(product)
if product != '':
product = int(product)
return product
connect = psycopg2.connect("user=postgres password=pgadminJTgeest dbname=voordeelshopgpx")
c = connect.cursor()
c.execute("DROP TABLE IF EXISTS categoryrecommendations CASCADE")
c.execute("CREATE TABLE categoryrecommendations (id VARCHAR PRIMARY KEY, "
"prod1 VARCHAR, prod2 VARCHAR, prod3 VARCHAR, prod4 VARCHAR, prod5 VARCHAR);")
c.execute("select id, recommendations from profile where recommendations != '{}'")
idprevrec = c.fetchall()
counter = 0
for item in idprevrec:
product = item[1]
product = formatting_item(product)
try:
c.execute("select id, product1, product2, product3, product4, product5 from categorie_algoritme "
"where id = '{}';".format(product))
newrecs = c.fetchall()
if newrecs != []:
newrecs = newrecs[0]
newrecs = list(newrecs)
if len(newrecs) > 6:
newrecs = newrecs[0:6]
newrecs.remove(newrecs[0])
copy = newrecs.copy()
for rec in copy:
if rec == None:
newrecs.remove(rec)
newrecs.insert(0, item[0])
else:
continue
insert_into_table(newrecs)
except:
print("foutmelding")
counter += 1
if counter % 10000 == 0:
print(counter)
connect.commit()
c.close()
|
[
"brandon.hillert@student.hu.nl"
] |
brandon.hillert@student.hu.nl
|
556bd1d5550ba4a7b8c38f6abbe20587e61facf1
|
abcd9c54883eef187a8314988431945a1a0f8676
|
/old/tests/test_zebra2/test_simulation/test_controller.py
|
817519986b9e28135811bb57d4812489d0459c1a
|
[
"Apache-2.0"
] |
permissive
|
PandABlocks/PandABlocks-FPGA
|
834f6eba17ffc0372e82495e1a456858b20b55c5
|
9ad5512556c94d38f817b0c02a38d660c8777f43
|
refs/heads/master
| 2023-08-16T23:40:38.067162
| 2023-08-01T16:36:39
| 2023-08-02T10:08:47
| 86,712,341
| 17
| 11
|
Apache-2.0
| 2023-09-08T13:19:55
| 2017-03-30T14:29:10
|
VHDL
|
UTF-8
|
Python
| false
| false
| 6,591
|
py
|
#!/usr/bin/env python
import unittest
import sys
import os
import time
from pkg_resources import require
require("numpy")
# Module import
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..",
"python"))
from zebra2.simulation.controller import Controller, Block, CLOCK_TICK
class ControllerTest(unittest.TestCase):
def setUp(self):
config_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"config_d")
self.c = Controller(config_dir)
def test_init(self):
for i, val in enumerate(Block.bit_bus):
if i == Block.parser.bit_bus["BITS.ONE"]:
self.assertEqual(val, 1, "Bit bus index %d != 1" % i)
else:
self.assertEqual(val, 0, "Bit bus index %d != 0" % i)
for i, val in enumerate(Block.pos_bus):
self.assertEqual(val, 0)
def do_set_reg(self, blockname, blocknum, regname, val):
config = Block.parser.blocks[blockname]
reg, _ = config.registers[regname]
self.c.do_write_register(config.base, blocknum, reg, val)
def get_instance(self, blockname, blocknum=0):
config = Block.parser.blocks[blockname]
for (base, num, reg), (block, attr) in self.c.lookup.items():
if base == config.base and num == blocknum:
return block
def test_clocks_set(self):
clocks = self.get_instance("CLOCKS")
clocks_a_idx = Block.parser.bit_bus["CLOCKS.OUTA"]
self.assertEqual(clocks.A_PERIOD, 0)
self.assertEqual(self.c.wakeups, [])
self.assertEqual(clocks.OUTA, 0)
# set 1s period
s = int(1.0 / CLOCK_TICK + 0.5)
self.do_set_reg("CLOCKS", 0, "A_PERIOD", s)
# number of clockticks to reg set
reg_ticks = (time.time() - self.c.start_time) / CLOCK_TICK
self.assertEqual(clocks.A_PERIOD, s)
self.assertEqual(len(self.c.wakeups), 1)
# check wakeup scheduled for 0.5s from now
self.assertAlmostEqual(
self.c.wakeups[0][0], reg_ticks + s / 2, delta=10000)
self.assertEqual(self.c.wakeups[0][1], clocks)
self.assertEqual(self.c.wakeups[0][2], {})
self.assertEqual(clocks.OUTA, 0)
self.assertEqual(Block.bit_bus[clocks_a_idx], 0)
# handle another event
timeout = self.c.calc_timeout()
self.assertAlmostEqual(timeout, 0.5, delta=0.001)
time.sleep(0.5)
self.c.do_tick()
self.assertEqual(clocks.A_PERIOD, s)
self.assertEqual(len(self.c.wakeups), 1)
self.assertAlmostEqual(
self.c.wakeups[0][0], reg_ticks + s, delta=10000)
self.assertEqual(self.c.wakeups[0][1], clocks)
self.assertEqual(clocks.OUTA, 1)
self.assertEqual(Block.bit_bus[clocks_a_idx], 1)
def test_changing_inp(self):
# get the div block, and set it enabled
bits_one_idx = Block.parser.bit_bus["BITS.ONE"]
div = self.get_instance("DIV", 0)
self.do_set_reg("DIV", 0, "ENABLE", bits_one_idx)
self.assertEqual(div.ENABLE, 1)
# get the bits block
bits = self.get_instance("BITS", 0)
bits_outa_idx = Block.parser.bit_bus["BITS.OUTA"]
# check disconnected
self.assertEqual(div.INP, 0)
# connect to BITS.OUTA
self.do_set_reg("DIV", 0, "INP", bits_outa_idx)
self.assertEqual(div.INP, 0)
self.assertEqual(self.c.listeners[(bits, "OUTA")], [(div, "INP")])
self.assertEqual(bits.OUTA, 0)
self.assertEqual(div.OUTD, 0)
# toggle
self.do_set_reg("BITS", 0, "A", 1)
self.assertEqual(bits.OUTA, 1)
self.assertEqual(Block.bit_bus[bits_outa_idx], 1)
self.assertEqual(div.INP, 0)
self.assertEqual(div.OUTN, 0)
self.assertEqual(div.OUTD, 0)
# Check that there is a wakeup queued
self.assertEqual(len(self.c.wakeups), 1)
# do another tick and check it has propogated
self.c.do_tick()
self.assertEqual(div.INP, 1)
self.assertEqual(div.OUTN, 0)
self.assertEqual(div.OUTD, 1)
def test_delay(self):
# get the div blocks and set them enabled
bits_one_idx = Block.parser.bit_bus["BITS.ONE"]
div1 = self.get_instance("DIV", 0)
div2 = self.get_instance("DIV", 1)
self.do_set_reg("DIV", 0, "ENABLE", bits_one_idx)
self.do_set_reg("DIV", 1, "ENABLE", bits_one_idx)
self.assertEqual(div1.ENABLE, 1)
self.assertEqual(div2.ENABLE, 1)
# get the bits block
bits = self.get_instance("BITS", 0)
bits_outa_idx = Block.parser.bit_bus["BITS.OUTA"]
# check disconnected
self.assertEqual(div1.INP, 0)
# connect to BITS.OUTA
self.do_set_reg("DIV", 0, "INP", bits_outa_idx)
self.do_set_reg("DIV", 1, "INP", bits_outa_idx)
self.assertEqual(div1.INP, 0)
self.assertEqual(div2.INP, 0)
self.assertEqual(self.c.listeners[(bits, "OUTA")],
[(div1, "INP"), (div2, "INP")])
# Add a delay on the second
self.do_set_reg("DIV", 1, "INP_DLY", 4)
self.assertEqual(self.c.delays, {(div2, "INP"): 4})
self.assertEqual(bits.OUTA, 0)
self.assertEqual(div1.OUTD, 0)
self.assertEqual(div2.OUTD, 0)
# toggle
self.do_set_reg("BITS", 0, "A", 1)
self.assertEqual(bits.OUTA, 1)
self.assertEqual(Block.bit_bus[bits_outa_idx], 1)
self.assertEqual(div1.INP, 0)
self.assertEqual(div1.OUTN, 0)
self.assertEqual(div1.OUTD, 0)
# check that there are two wakeups queued
self.assertEqual(len(self.c.wakeups), 2)
self.assertEqual(self.c.wakeups[0][1], div1)
self.assertEqual(self.c.wakeups[1][1], div2)
self.assertEqual(self.c.wakeups[1][0] - self.c.wakeups[0][0], 4)
# do another tick and check it has propogated
self.c.do_tick()
self.assertEqual(div1.INP, 1)
self.assertEqual(div1.OUTN, 0)
self.assertEqual(div1.OUTD, 1)
self.assertEqual(div2.INP, 0)
self.assertEqual(div2.OUTN, 0)
self.assertEqual(div2.OUTD, 0)
# And again for the delayed
self.c.do_tick()
self.assertEqual(div1.INP, 1)
self.assertEqual(div1.OUTN, 0)
self.assertEqual(div1.OUTD, 1)
self.assertEqual(div2.INP, 1)
self.assertEqual(div2.OUTN, 0)
self.assertEqual(div2.OUTD, 1)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"tom.cobb@diamond.ac.uk"
] |
tom.cobb@diamond.ac.uk
|
5bdfe72f4ae8eae50237cbd9ac83a08e78210719
|
3821faabdf6d2cf8577ebcceea78986018766a80
|
/project/configuration.py
|
3624cd4408a5d0c3c28396b1898c9237f24aefc9
|
[] |
no_license
|
andrei4b/Music-Transcription-with-Semantic-Segmantation
|
b40ac200ff5389ed190f29b4214b6658bbb1166b
|
5c5f1b5ee1af90e37079a6b0f18c15e31e1ba2b6
|
refs/heads/master
| 2020-04-16T07:43:11.908510
| 2019-06-21T11:31:45
| 2019-06-21T11:31:45
| 165,396,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
MusicNet_Instruments = ["Piano", "Harpsichord", "Violin", "Viola", "Cello", "Contrabass", "Horn", "Oboe", "Bassoon", "Clarinet", "Flute"]
Harmonic_Num = 6
def get_MusicNet_label_num_mapping(offset=1, spec_inst=None):
ori_num = [1, 7, 41, 42, 43, 44, 61, 69, 71, 72, 74]
mapping = {}
if spec_inst is None:
return mapping
if "All" in spec_inst:
spec_inst = MusicNet_Instruments
for idx, name in enumerate(spec_inst):
ii = MusicNet_Instruments.index(name)
n = ori_num[ii]
mapping[n] = idx + offset
return mapping
def get_instruments_num(insts):
if insts is None:
instruments = 1
elif "All" in insts:
instruments = 11 # For MusicNet
else:
instruments = len(insts)
return instruments
def epsilon():
return 0.000000001
|
[
"whitebreeze@MCTLAb-Tim.iis.sinica.edu.tw"
] |
whitebreeze@MCTLAb-Tim.iis.sinica.edu.tw
|
35c7a6434893bd97a06d7e1efc7402d9569d27e1
|
ff05d487fa7a79e3045c139421ce9031b2aa7360
|
/effdet/__init__.py
|
e18f261c1e0db380dc4c33563dd29d204da34d34
|
[
"Apache-2.0"
] |
permissive
|
phager90/efficientdet-pytorch
|
eb92e6d5d4eb6c367d23081ce6abd0b2d1fa0cf2
|
bbd84c0e7ec2a23c6ae7447c437789524ba141dd
|
refs/heads/master
| 2023-07-17T13:33:25.835335
| 2021-08-30T18:23:37
| 2021-08-30T18:23:37
| 291,003,268
| 0
| 0
|
Apache-2.0
| 2020-08-28T09:14:34
| 2020-08-28T09:14:34
| null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from .efficientdet import EfficientDet
from .bench import DetBenchPredict, DetBenchTrain, unwrap_bench
from .evaluator import COCOEvaluator, FastMapEvalluator
from .config import get_efficientdet_config, default_detection_model_configs
from .factory import create_model, create_model_from_config
from .helpers import load_checkpoint, load_pretrained
|
[
"rwightman@gmail.com"
] |
rwightman@gmail.com
|
dfb72750f57261a472645ea9dcd6437e8b99810f
|
5a7a0a268e281dd985eb9889a152114da249f7ea
|
/0x01-python-if_else_loops_functions/1-last_digit.py
|
b1ec9ca9294af9e4471fc3cd19cf83d6ce7b3db4
|
[] |
no_license
|
Gt-gih/holbertonschool-higher_level_programming
|
3b17b910c3496f482e203fca26d72b71be3fe471
|
fb765e2d1f2a4a6fba1a24bbb1c2a0b2a7ca269a
|
refs/heads/master
| 2023-03-17T01:53:12.367576
| 2020-11-04T20:39:32
| 2020-11-04T20:39:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
#!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number < 0:
sign = number * -1
lastdigit = sign % 10
lastdigit = lastdigit * -1
else:
lastdigit = number % 10
print("Last digit of {} is {} and is ".format(number, lastdigit), end="")
if lastdigit > 5:
print("greater than 5")
elif lastdigit is 0:
print("0")
else:
print("less than 6 and not 0")
|
[
"juan.bueno01@usc.edu.co"
] |
juan.bueno01@usc.edu.co
|
b539d96fb9dfac175343b0bfb1eb0c9df1eaa14c
|
d2fe4daf1d56448da419c4f821d33480571d614d
|
/polls/migrations/0001_initial.py
|
34084c4d0ef07b0c6d2e8c9bf61653af689b0713
|
[] |
no_license
|
ahmadRagheb/-Python-Django-Polls
|
94c6299d56f11624619d8cc0783a03897ad60fb3
|
59ed11cbf171f05ce3d8f484a9d389ba797c74c1
|
refs/heads/master
| 2021-01-18T15:46:07.404407
| 2017-04-05T06:33:46
| 2017-04-05T06:33:46
| 86,681,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-27 11:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"pq4@hotmail.co.uk"
] |
pq4@hotmail.co.uk
|
fc85e3f36afff9d60b70d5c926133f8d8913420a
|
e22e03d9761f5c6d581b5af2e77343e8ee4b201d
|
/edk2/UefiCpuPkg/ResetVector/Vtf0/Tools/FixupForRawSection.py
|
f88c8a4cd7d55b1a6d81d116255186447fc8022a
|
[
"OpenSSL",
"BSD-2-Clause"
] |
permissive
|
SamuelTulach/SecureFakePkg
|
759975fcc84d62f05ac577da48353752e5334878
|
f34080a6c0efb6ca3dd755365778d0bcdca6b991
|
refs/heads/main
| 2023-08-17T07:51:22.175924
| 2021-10-01T10:46:14
| 2021-10-01T10:46:14
| 410,938,306
| 94
| 14
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
## @file
# Apply fixup to VTF binary image for FFS Raw section
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import sys
filename = sys.argv[1]
d = open(sys.argv[1], 'rb').read()
c = ((len(d) + 4 + 7) & ~7) - 4
if c > len(d):
c -= len(d)
f = open(sys.argv[1], 'wb')
f.write('\x90' * c)
f.write(d)
f.close()
|
[
"samtulach@gmail.com"
] |
samtulach@gmail.com
|
d3edf2087f2d3f1fd55bfdc256ecf4c7e731bc80
|
220535ba153c2fca757c264cab56d2ff835f4fd4
|
/06_支持向量机SVM/SVM.py
|
ccd576a8492ed9058fb35c7649529824503ccbdf
|
[] |
no_license
|
cp4011/Machine-Learning
|
d41b5b8533b8d2210d1e69b944f8ea997c2488b4
|
7f4576278990f8a2de302e69eb6373d169f9fbc8
|
refs/heads/master
| 2020-04-04T15:41:53.912450
| 2019-03-26T15:37:35
| 2019-03-26T15:37:35
| 156,048,459
| 2
| 0
| null | 2018-11-04T06:14:57
| 2018-11-04T04:16:03
|
Python
|
UTF-8
|
Python
| false
| false
| 34,559
|
py
|
from numpy import *
import matplotlib.pyplot as plt
"""SMO算法工作原理:每次循环中选择两个alpha进行优化处理。一旦找到一对合适的alpha,那么就增大其中一个同时减小另一个。
这里所谓的“合适”就是指两个alpha必须要符合一定的条件,条件之一就是这两个alpha必须要在间隔边界之外,另一个条件则是这两个
alpha还没有进行过区间化处理或者不在边界上
"""
# SMO算法相关辅助中的辅助函数
# 1.解析文本数据函数,提取每个样本的特征组成向量,添加到数据矩阵; 添加样本标签到标签向量
def loadDataSet(filename):
data_matrix = []
label_matrix = []
with open(filename, 'r') as fr:
for line in fr.readlines():
line_array = line.strip().split('\t') # strip删除头尾空白字符(空格+回车),再进行分割
data_matrix.append([float(line_array[0]), float(line_array[1])]) # 填充数据集
label_matrix.append(float(line_array[2])) # 填充类别标签
return data_matrix, label_matrix # 返回数据集和标签列表
# 测试
# data_matrix, label_matrix = loadDataSet("testSet.txt")
# print(label_matrix)
# 2.在样本集中采取随机选择的方法选取第二个不等于第一个alphai的优化向量alphaj
def selectJrand(i, m):
"""
Function: 随机选择
Input: i:alpha下标
m:alpha数目
Output: j:随机选择的alpha下标
"""
# 初始化下标j
j = i
# 随机化产生j,直到不等于i
while (j == i):
j = int(random.uniform(0,m))
# 返回j的值
return j
def clipAlpha(aj, H, L):
"""
Function: 设定alpha阈值
Input: aj:alpha的值
H:alpha的最大值
L:alpha的最小值
Output: aj:处理后的alpha的值
"""
# 如果输入alpha的值大于最大值,则令aj=H
if aj > H:
aj = H
# 如果输入alpha的值小于最小值,则令aj=L
if L > aj:
aj = L
# 返回处理后的alpha的值
return aj
"""简化版SMO伪代码:
创建一个alpha向量并将其初始化为0向量
当迭代次数小于最大迭代次数时(外循环):
对数据集中的每个数据向量(内循环):
如果该数据向量可以被优化:
随机选择另外一个数据向量
同时优化这两个向量
如果两个向量都不能被优化,退出内循环
如果所有向量都没被优化,则增加迭代数目,继续下一次循环
"""
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
"""
Function: 简化版SMO算法
Input: dataMatIn:数据集
classLabels:类别标签
C:常数C
toler:容错率
maxIter:最大的循环次数
Output: b:常数项
alphas:数据向量
"""
# 将输入的数据集和类别标签转换为NumPy矩阵
dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose()
# 初始化常数项b,初始化行列数m、n
b = 0; m, n = shape(dataMatrix)
# 初始化alphas数据向量为0向量
alphas = mat(zeros((m, 1)))
# 初始化iter变量,存储的是在没有任何alpha改变的情况下遍历数据集的次数
iter = 0
# 外循环,当迭代次数小于maxIter时执行
while (iter < maxIter):
# alpha改变标志位每次都要初始化为0
alphaPairsChanged = 0
# 内循环,遍历所有数据集
for i in range(m):
# multiply将alphas和labelMat进行矩阵相乘,求出法向量w(m,1),w`(1,m)
# dataMatrix * dataMatrix[i,:].T,求出输入向量x(m,1)
# 整个对应输出公式f(x)=w`x + b
fXi = float(multiply(alphas, labelMat).T * (dataMatrix * dataMatrix[i, :].T)) + b
# 计算误差
Ei = fXi - float(labelMat[i])
# 如果标签与误差相乘之后在容错范围之外,且超过各自对应的常数值,则进行优化
if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i] * Ei > toler) and (alphas[i] > 0)):
# 随机化选择另一个数据向量
j = selectJrand(i, m)
# 对此向量进行同样的计算
fXj = float(multiply(alphas, labelMat).T * (dataMatrix * dataMatrix[j,:].T)) + b
# 计算误差
Ej = fXj - float(labelMat[j])
# 利用copy存储刚才的计算值,便于后期比较
alphaIold = alphas[i].copy(); alpahJold = alphas[j].copy()
# 保证alpha在0和C之间
if (labelMat[i] != labelMat[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
# 如果界限值相同,则不做处理直接跳出本次循环
if L == H: print("L == H"); continue
# 最优修改量,求两个向量的内积(核函数)
eta = 2.0 * dataMatrix[i, :] * dataMatrix[j,:].T - dataMatrix[i,:] * dataMatrix[i,:].T - dataMatrix[j,:] * dataMatrix[j,:].T
# 如果最优修改量大于0,则不做处理直接跳出本次循环,这里对真实SMO做了简化处理
if eta >= 0: print("eta >= 0"); continue
# 计算新的alphas[j]的值
alphas[j] -= labelMat[j] * (Ei - Ej) / eta
# 对新的alphas[j]进行阈值处理
alphas[j] = clipAlpha(alphas[j], H, L)
# 如果新旧值差很小,则不做处理跳出本次循环
if (abs(alphas[j] - alpahJold) < 0.00001): print("j not moving enough"); continue
# 对i进行修改,修改量相同,但是方向相反
alphas[i] += labelMat[j] * labelMat[i] * (alpahJold - alphas[j])
# 新的常数项
b1 = b - Ei - labelMat[i] * (alphas[i] - alphaIold) * dataMatrix[i,:] * dataMatrix[i,:].T - labelMat[i] * (alphas[j] - alpahJold) * dataMatrix[i,:] * dataMatrix[j,:].T
b2 = b - Ej - labelMat[i] * (alphas[i] - alphaIold) * dataMatrix[i,:] * dataMatrix[j,:].T - labelMat[j] * (alphas[j] - alpahJold) * dataMatrix[j,:] * dataMatrix[j,:].T
# 谁在0到C之间,就听谁的,否则就取平均值
if (0 < alphas[i]) and (C > alphas[i]): b = b1
elif (0 < alphas[j]) and (C > alphas[j]): b = b2
else: b = (b1 + b2) / 2.0
# 标志位加1
alphaPairsChanged += 1
# 输出迭代次数,alphas的标号以及标志位的值
print("iter: %d i: %d, pairs changed %d" % (iter, i, alphaPairsChanged))
# 如果标志位没变,即没有进行优化,那么迭代值加1
if (alphaPairsChanged == 0): iter += 1
# 否则迭代值为0
else: iter = 0
# 打印迭代次数
print("iteration number: %d" % iter)
# 返回常数项和alphas的数据向量
return b, alphas
"""和简化版一样,完整版也需要一些支持函数。
首要的事情就是建立一个数据结构来保存所有的重要值,而这个过程可以通过一个对象来完成;
对于给定的alpha值,第一个辅助函数calcEk()能够计算E值并返回(因为调用频繁,所以必须要单独拎出来);
selectJ()用于选择第二个alpha或者说内循环的alpha值,选择合适的值以保证在每次优化中采用最大步长;
updateEk()用于计算误差值并将其存入缓存中。
"""
# SMO完整版(Non-Kernel VErsions below)
class optStruct:
"""
Function: 存放运算中重要的值
Input: dataMatIn:数据集
classLabels:类别标签
C:常数C
toler:容错率
Output: X:数据集
labelMat:类别标签
C:常数C
tol:容错率
m:数据集行数
b:常数项
alphas:alphas矩阵
eCache:误差缓存
"""
def __init__(self, dataMatIn, classLabels, C, toler):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m, 1)))
self.b = 0
self.eCache = mat(zeros((self.m, 2)))
def calcEk(oS, k):
"""
Function: 计算误差值E
Input: oS:数据结构
k:下标
Output: Ek:计算的E值
"""
# 计算fXk,整个对应输出公式f(x)=w`x + b
fXk = float(multiply(oS.alphas, oS.labelMat).T * (oS.X * oS.X[k,:].T)) + oS.b
# 计算E值
Ek = fXk - float(oS.labelMat[k])
# 返回计算的误差值E
return Ek
def selectJ(i, oS, Ei):
"""
Function: 选择第二个alpha的值
Input: i:第一个alpha的下标
oS:数据结构
Ei:计算出的第一个alpha的误差值
Output: j:第二个alpha的下标
Ej:计算出的第二个alpha的误差值
"""
# 初始化参数值
maxK = -1; maxDeltaE = 0; Ej = 0
# 构建误差缓存
oS.eCache[i] = [1, Ei]
# 构建一个非零列表,返回值是第一个非零E所对应的alpha值,而不是E本身
validEcacheList = nonzero(oS.eCache[:, 0].A)[0]
# 如果列表长度大于1,说明不是第一次循环
if (len(validEcacheList)) > 1:
# 遍历列表中所有元素
for k in validEcacheList:
# 如果是第一个alpha的下标,就跳出本次循环
if k == i: continue
# 计算k下标对应的误差值
Ek = calcEk(oS, k)
# 取两个alpha误差值的差值的绝对值
deltaE = abs(Ei - Ek)
# 最大值更新
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
# 返回最大差值的下标maxK和误差值Ej
return maxK, Ej
# 如果是第一次循环,则随机选择alpha,然后计算误差
else:
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
# 返回下标j和其对应的误差Ej
return j, Ej
def updateEk(oS, k):
"""
Function: 更新误差缓存
Input: oS:数据结构
j:alpha的下标
Output: 无
"""
# 计算下表为k的参数的误差
Ek = calcEk(oS, k)
# 将误差放入缓存
oS.eCache[k] = [1, Ek]
"""优化例程
接下来简单介绍一下用于寻找决策边界的优化例程。大部分代码和之前的smoSimple()是一样的,区别在于:
使用了自己的数据结构,该结构在oS中传递;
使用selectJ()而不是selectJrand()来选择第二个alpha的值;
在alpha值改变时更新Ecache。
"""
def innerL(i, oS):
"""
Function: 完整SMO算法中的优化例程
Input: oS:数据结构
i:alpha的下标
Output: 无
"""
# 计算误差
Ei = calcEk(oS, i)
# 如果标签与误差相乘之后在容错范围之外,且超过各自对应的常数值,则进行优化
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
# 启发式选择第二个alpha值
j, Ej = selectJ(i, oS, Ei)
# 利用copy存储刚才的计算值,便于后期比较
alphaIold = oS.alphas[i].copy(); alpahJold = oS.alphas[j].copy();
# 保证alpha在0和C之间
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS. alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
# 如果界限值相同,则不做处理直接跳出本次循环
if L == H: print("L==H"); return 0
# 最优修改量,求两个向量的内积(核函数)
eta = 2.0 * oS.X[i, :]*oS.X[j, :].T - oS.X[i, :]*oS.X[i, :].T - oS.X[j, :]*oS.X[j, :].T
# 如果最优修改量大于0,则不做处理直接跳出本次循环,这里对真实SMO做了简化处理
if eta >= 0: print("eta>=0"); return 0
# 计算新的alphas[j]的值
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
# 对新的alphas[j]进行阈值处理
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
# 更新误差缓存
updateEk(oS, j)
# 如果新旧值差很小,则不做处理跳出本次循环
if (abs(oS.alphas[j] - alpahJold) < 0.00001): print("j not moving enough"); return 0
# 对i进行修改,修改量相同,但是方向相反
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alpahJold - oS.alphas[j])
# 更新误差缓存
updateEk(oS, i)
# 更新常数项
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :]*oS.X[i, :].T - oS.labelMat[j] * (oS.alphas[j] - alpahJold) * oS.X[i, :]*oS.X[j, :].T
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :]*oS.X[j, :].T - oS.labelMat[j] * (oS.alphas[j] - alpahJold) * oS.X[j, :]*oS.X[j, :].T
# 谁在0到C之间,就听谁的,否则就取平均值
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[i]): oS.b = b2
else: oS.b = (b1 + b2) / 2.0
# 成功返回1
return 1
# 失败返回0
else: return 0
"""外循环代码
外循环代码的输入和函数smoSimple()完全一样。整个代码的主体是while循环,终止条件:当迭代次数超过指定的最大值,或者遍历
整个集合都未对任意alpha对进行修改时,就退出循环。while循环内部与smoSimple()中有所不同,一开始的for循环在数据集上遍历任意
可能的alpha。通过innerL()来选择第二个alpha,并在可能时对其进行优化处理。如果有任意一对alpha值发生改变,就会返回1.第二个
for循环遍历所有的非边界alpha值,也就是不在边界0或C上的值。
"""
def smoP(dataMatIn, classLabels, C, toler, maxIter):
"""
Function: 完整SMO算法
Input: dataMatIn:数据集
classLabels:类别标签
C:常数C
toler:容错率
maxIter:最大的循环次数
Output: b:常数项
alphas:数据向量
"""
# 新建数据结构对象
oS = optStruct(mat(dataMatIn), mat(classLabels).transpose(), C, toler)
# 初始化迭代次数
iter = 0
# 初始化标志位
entireSet = True; alphaPairsChanged = 0
# 终止条件:迭代次数超限、遍历整个集合都未对alpha进行修改
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
# 根据标志位选择不同的遍历方式
if entireSet:
# 遍历任意可能的alpha值
for i in range(oS.m):
# 选择第二个alpha值,并在可能时对其进行优化处理
alphaPairsChanged += innerL(i, oS)
print("fullSet, iter: %d i: %d, pairs changed %d" % (iter, i, alphaPairsChanged))
# 迭代次数累加
iter += 1
else:
# 得出所有的非边界alpha值
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
# 遍历所有的非边界alpha值
for i in nonBoundIs:
# 选择第二个alpha值,并在可能时对其进行优化处理
alphaPairsChanged += innerL(i, oS)
print("non-bound, iter: %d i: %d, pairs changed %d" % (iter, i, alphaPairsChanged))
# 迭代次数累加
iter += 1
# 在非边界循环和完整遍历之间进行切换
if entireSet: entireSet = False
elif (alphaPairsChanged == 0): entireSet =True
print("iteration number: %d" % iter)
# 返回常数项和数据向量
return oS.b, oS.alphas
"""分类测试
可以拿我们计算出来的alpha值进行分类了。首先必须基于alpha值得到超平面,这也包括了w的计算
"""
def calcWs(alphas, dataArr, classLabels):
"""
Function: 计算W
Input: alphas:数据向量
dataArr:数据集
classLabels:类别标签
Output: w:w*x+b中的w
"""
# 初始化参数
X = mat(dataArr); labelMat = mat(classLabels).transpose()
# 获取数据行列值
m, n = shape(X)
# 初始化w
w = zeros((n, 1))
# 遍历alpha,更新w
for i in range(m):
w += multiply(alphas[i]*labelMat[i],X[i,:].T)
# 返回w值
return w
# 测试test
# dataArr, labelArr = loadDataSet('testSet.txt')
# b, alphas = smoP(dataArr, labelArr, 0.6, 0.001, 40)
# ws = calcWs(alphas, dataArr, labelArr)
# # print(ws)
# datMat = mat(dataArr)
# print(datMat[0] * mat(ws)+b, labelArr[0])
# print(datMat[1] * mat(ws)+b, labelArr[1])
# print(datMat[2] * mat(ws)+b, labelArr[2])
"""至此,线性分类器介绍完了,如果数据集非线性可分,那么我们就需要引入核函数的概念了"""
# 核函数实现
"""如果在svmMLiA.py文件中添加一个函数并稍作修改,那么我们就能在已有代码中使用核函数了(所有与核函数实现相关的函数,函数
名末尾都是K)。其中主要区分代码在innerLK()和calcEk()中
"""
# 新添加的核转换函数,主要用于填充结构体和后续的计算
def kernelTrans(X, A, kTup):
"""
Function: 核转换函数
Input: X:数据集
A:某一行数据
kTup:核函数信息
Output: K:计算出的核向量
"""
# 获取数据集行列数
m, n = shape(X)
# 初始化列向量
K = mat(zeros((m, 1)))
# 根据键值选择相应核函数
# lin表示的是线性核函数
if kTup[0] == 'lin': K = X * A.T
# rbf表示径向基核函数
elif kTup[0] == 'rbf':
for j in range(m):
deltaRow = X[j, :] - A
K[j] = deltaRow * deltaRow.T
# 对矩阵元素展开计算,而不像在MATLAB中一样计算矩阵的逆
K = exp(K/(-1*kTup[1]**2))
# 如果无法识别,就报错
else: raise NameError('Houston We Have a Problem -- That Kernel is not recognized')
# 返回计算出的核向量
return K
# 其他函数
class optStructK:
"""
Function: 存放运算中重要的值
Input: dataMatIn:数据集
classLabels:类别标签
C:常数C
toler:容错率
kTup:速度参数
Output: X:数据集
labelMat:类别标签
C:常数C
tol:容错率
m:数据集行数
b:常数项
alphas:alphas矩阵
eCache:误差缓存
K:核函数矩阵
"""
def __init__(self, dataMatIn, classLabels, C, toler, kTup):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m, 1)))
self.b = 0
self.eCache = mat(zeros((self.m, 2)))
""" 主要区分 """
self.K = mat(zeros((self.m, self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X, self.X[i,:], kTup)
""" 主要区分 """
def calcEkK(oS, k):
"""
Function: 计算误差值E
Input: oS:数据结构
k:下标
Output: Ek:计算的E值
"""
""" 主要区分 """
# 计算fXk,整个对应输出公式f(x)=w`x + b
# fXk = float(multiply(oS.alphas, oS.labelMat).T * (oS.X * oS.X[k,:].T)) + oS.b
fXk = float(multiply(oS.alphas, oS.labelMat).T*oS.K[:, k] + oS.b)
""" 主要区分 """
# 计算E值
Ek = fXk - float(oS.labelMat[k])
# 返回计算的误差值E
return Ek
def selectJK(i, oS, Ei):
"""
Function: 选择第二个alpha的值
Input: i:第一个alpha的下标
oS:数据结构
Ei:计算出的第一个alpha的误差值
Output: j:第二个alpha的下标
Ej:计算出的第二个alpha的误差值
"""
# 初始化参数值
maxK = -1; maxDeltaE = 0; Ej = 0
# 构建误差缓存
oS.eCache[i] = [1, Ei]
# 构建一个非零列表,返回值是第一个非零E所对应的alpha值,而不是E本身
validEcacheList = nonzero(oS.eCache[:, 0].A)[0]
# 如果列表长度大于1,说明不是第一次循环
if (len(validEcacheList)) > 1:
# 遍历列表中所有元素
for k in validEcacheList:
# 如果是第一个alpha的下标,就跳出本次循环
if k == i: continue
# 计算k下标对应的误差值
Ek = calcEkK(oS, k)
# 取两个alpha误差值的差值的绝对值
deltaE = abs(Ei - Ek)
# 最大值更新
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
# 返回最大差值的下标maxK和误差值Ej
return maxK, Ej
# 如果是第一次循环,则随机选择alpha,然后计算误差
else:
j = selectJrand(i, oS.m)
Ej = calcEkK(oS, j)
# 返回下标j和其对应的误差Ej
return j, Ej
def updateEkK(oS, k):
"""
Function: 更新误差缓存
Input: oS:数据结构
j:alpha的下标
Output: 无
"""
# 计算下表为k的参数的误差
Ek = calcEkK(oS, k)
# 将误差放入缓存
oS.eCache[k] = [1, Ek]
def innerLK(i, oS):
"""
Function: 完整SMO算法中的优化例程
Input: oS:数据结构
i:alpha的下标
Output: 无
"""
# 计算误差
Ei = calcEkK(oS, i)
# 如果标签与误差相乘之后在容错范围之外,且超过各自对应的常数值,则进行优化
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
# 启发式选择第二个alpha值
j, Ej = selectJK(i, oS, Ei)
# 利用copy存储刚才的计算值,便于后期比较
alphaIold = oS.alphas[i].copy(); alpahJold = oS.alphas[j].copy();
# 保证alpha在0和C之间
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS. alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
# 如果界限值相同,则不做处理直接跳出本次循环
if L == H: print("L==H"); return 0
""" 主要区分 """
# 最优修改量,求两个向量的内积(核函数)
# eta = 2.0 * oS.X[i, :]*oS.X[j, :].T - oS.X[i, :]*oS.X[i, :].T - oS.X[j, :]*oS.X[j, :].T
eta = 2.0 * oS.K[i, j] - oS.K[i, i] - oS.K[j, j]
""" 主要区分 """
# 如果最优修改量大于0,则不做处理直接跳出本次循环,这里对真实SMO做了简化处理
if eta >= 0: print("eta>=0"); return 0
# 计算新的alphas[j]的值
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
# 对新的alphas[j]进行阈值处理
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
# 更新误差缓存
updateEkK(oS, j)
# 如果新旧值差很小,则不做处理跳出本次循环
if (abs(oS.alphas[j] - alpahJold) < 0.00001): print("j not moving enough"); return 0
# 对i进行修改,修改量相同,但是方向相反
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alpahJold - oS.alphas[j])
# 更新误差缓存
updateEkK(oS, i)
""" 主要区分 """
# 更新常数项
# b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :]*oS.X[i, :].T - oS.labelMat[j] * (oS.alphas[j] - alpahJold) * oS.X[i, :]*oS.X[j, :].T
# b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :]*oS.X[j, :].T - oS.labelMat[j] * (oS.alphas[j] - alpahJold) * oS.X[j, :]*oS.X[j, :].T
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i, i] - oS.labelMat[j] * (oS.alphas[j] - alpahJold) * oS.K[i, j]
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i, j] - oS.labelMat[j] * (oS.alphas[j] - alpahJold) * oS.K[j, j]
""" 主要区分 """
# 谁在0到C之间,就听谁的,否则就取平均值
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[i]): oS.b = b2
else: oS.b = (b1 + b2) / 2.0
# 成功返回1
return 1
# 失败返回0
else: return 0
def smoPK(dataMatIn, classLabels, C, toler, maxIter, kTup = ('lin', 0)):
"""
Function: 完整SMO算法
Input: dataMatIn:数据集
classLabels:类别标签
C:常数C
toler:容错率
maxIter:最大的循环次数
kTup:速度参数
Output: b:常数项
alphas:数据向量
"""
# 新建数据结构对象
oS = optStructK(mat(dataMatIn), mat(classLabels).transpose(), C, toler, kTup)
# 初始化迭代次数
iter = 0
# 初始化标志位
entireSet = True; alphaPairsChanged = 0
# 终止条件:迭代次数超限、遍历整个集合都未对alpha进行修改
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
# 根据标志位选择不同的遍历方式
if entireSet:
# 遍历任意可能的alpha值
for i in range(oS.m):
# 选择第二个alpha值,并在可能时对其进行优化处理
alphaPairsChanged += innerLK(i, oS)
print("fullSet, iter: %d i: %d, pairs changed %d" % (iter, i, alphaPairsChanged))
# 迭代次数累加
iter += 1
else:
# 得出所有的非边界alpha值
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
# 遍历所有的非边界alpha值
for i in nonBoundIs:
# 选择第二个alpha值,并在可能时对其进行优化处理
alphaPairsChanged += innerLK(i, oS)
print("non-bound, iter: %d i: %d, pairs changed %d" % (iter, i, alphaPairsChanged))
# 迭代次数累加
iter += 1
# 在非边界循环和完整遍历之间进行切换
if entireSet: entireSet = False
elif (alphaPairsChanged == 0): entireSet =True
print("iteration number: %d" % iter)
# 返回常数项和数据向量
return oS.b, oS.alphas
"""测试函数。整个代码中最重要的是for循环开始的那两行,他们给出了如何利用核函数进行分类。首先利用结构初始化方法中使用过的
kernelTrans()函数,得到转换后的数据。然后,再用其与前面的alpha及类别标签值求积。特别需要注意观察的是,我们是如何做到只需
要支持向量数据就可以进行分类的
"""
def testRbf(k1=1.3):
"""
Function: 利用核函数进行分类的径向基测试函数
Input: k1:径向基函数的速度参数
Output: 输出打印信息
"""
# 导入数据集
dataArr, labelArr = loadDataSet('testSetRBF.txt')
# 调用Platt SMO算法
b, alphas = smoPK(dataArr, labelArr, 200, 0.00001, 10000, ('rbf', k1))
# 初始化数据矩阵和标签向量
datMat = mat(dataArr); labelMat = mat(labelArr).transpose()
# 记录支持向量序号
svInd = nonzero(alphas.A > 0)[0]
# 读取支持向量
sVs = datMat[svInd]
# 读取支持向量对应标签
labelSV = labelMat[svInd]
# 输出打印信息
print("there are %d Support Vectors" % shape(sVs)[0])
# 获取数据集行列值
m, n = shape(datMat)
# 初始化误差计数
errorCount = 0
# 遍历每一行,利用核函数对训练集进行分类
for i in range(m):
# 利用核函数转换数据
kernelEval = kernelTrans(sVs, datMat[i,:], ('rbf', k1))
# 仅用支持向量预测分类
predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b
# 预测分类结果与标签不符则错误计数加一
if sign(predict) != sign(labelArr[i]): errorCount += 1
# 打印输出分类错误率
print("the training error rate is: %f" % (float(errorCount)/m))
#导入测试数据集
dataArr, labelArr = loadDataSet('testSetRBF2.txt')
# 初始化误差计数
errorCount = 0
#初始化数据矩阵和标签向量
datMat = mat(dataArr); labelMat = mat(labelArr).transpose()
# 获取数据集行列值
m, n = shape(datMat)
# 遍历每一行,利用核函数对测试集进行分类
for i in range(m):
#利用核函数转换数据
kernelEval = kernelTrans(sVs, datMat[i,:], ('rbf', k1))
# 仅用支持向量预测分类
predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b
# 预测分类结果与标签不符则错误计数加一
if sign(predict) != sign(labelArr[i]): errorCount += 1
# 打印输出分类错误率
print("the test error rate is: %f" % (float(errorCount)/m))
# test
# testRbf()
# SVM识别手写数据集
def img2vector(filename):
"""
Function: 32*32图像转换为1*1024向量
Input: filename:文件名称字符串
Output: returnVect:转换之后的1*1024向量
"""
# 初始化要返回的1*1024向量
returnVect = zeros((1, 1024))
# 打开文件
fr = open(filename)
# 读取文件信息
for i in range(32):
# 循环读取文件的前32行
lineStr = fr.readline()
for j in range(32):
# 将每行的头32个字符存储到要返回的向量中
returnVect[0, 32*i+j] = int(lineStr[j])
# 返回要输出的1*1024向量
return returnVect
def loadImages(dirName):
"""
Function: 加载图片
Input: dirName:文件路径
Output: trainingMat:训练数据集
hwLabels:数据标签
"""
from os import listdir
# 初始化数据标签
hwLabels = []
# 读取文件列表
trainingFileList = listdir(dirName)
# 读取文件个数
m = len(trainingFileList)
# 初始化训练数据集
trainingMat = zeros((m, 1024))
# 填充数据集
for i in range(m):
# 遍历所有文件
fileNameStr = trainingFileList[i]
# 提取文件名称
fileStr = fileNameStr.split('.')[0]
# 提取数字标识
classNumStr = int(fileStr.split('_')[0])
# 数字9记为-1类
if classNumStr == 9: hwLabels.append(-1)
# 其他数字记为+1类
else: hwLabels.append(1)
# 提取图像向量,填充数据集
trainingMat[i, :] = img2vector('%s/%s' % (dirName, fileNameStr))
# 返回数据集和数据标签
return trainingMat, hwLabels
def testDigits(kTup = ('rbf', 10)):
"""
Function: 手写数字分类函数
Input: kTup:核函数采用径向基函数
Output: 输出打印信息
"""
# 导入数据集
dataArr, labelArr = loadImages('trainingDigits')
# 调用Platt SMO算法
b, alphas = smoPK(dataArr, labelArr, 200, 0.0001, 10000, kTup)
# 初始化数据矩阵和标签向量
datMat = mat(dataArr); labelMat = mat(labelArr).transpose()
# 记录支持向量序号
svInd = nonzero(alphas.A > 0)[0]
# 读取支持向量
sVs = datMat[svInd]
# 读取支持向量对应标签
labelSV = labelMat[svInd]
# 输出打印信息
print("there are %d Support Vectors" % shape(sVs)[0])
# 获取数据集行列值
m, n = shape(datMat)
# 初始化误差计数
errorCount = 0
# 遍历每一行,利用核函数对训练集进行分类
for i in range(m):
# 利用核函数转换数据
kernelEval = kernelTrans(sVs, datMat[i, :], kTup)
# 仅用支持向量预测分类
predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b
# 预测分类结果与标签不符则错误计数加一
if sign(predict) != sign(labelArr[i]): errorCount += 1
# 打印输出分类错误率
print("the training error rate is: %f" % (float(errorCount)/m))
# 导入测试数据集
dataArr, labelArr = loadImages('testDigits')
# 初始化误差计数
errorCount = 0
# 初始化数据矩阵和标签向量
datMat = mat(dataArr); labelMat = mat(labelArr).transpose()
# 获取数据集行列值
m, n = shape(datMat)
# 遍历每一行,利用核函数对测试集进行分类
for i in range(m):
# 利用核函数转换数据
kernelEval = kernelTrans(sVs, datMat[i, :], kTup)
# 仅用支持向量预测分类
predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b
# 预测分类结果与标签不符则错误计数加一
if sign(predict) != sign(labelArr[i]): errorCount += 1
# 打印输出分类错误率
print("the test error rate is: %f" % (float(errorCount)/m))
testDigits(('rbf', 20))
"""支持向量机是一种分类器。之所以称为“机”是因为它会产生一个二值决策结果,即它是一种决策“机”。支持向量机的泛化错误率
较低,具有良好的学习能力,且学到的结果具有很好的推广性。这些优点使得支持向量机十分流行,有些人认为他是监督学习中
最好的定式算法
"""
|
[
"957628963@qq.com"
] |
957628963@qq.com
|
7cb2c5805eecc9c8faee87a4930868df40309f57
|
d18a109c3edea57ffada8b7e8aa52e6923986ca2
|
/farmacia/Farmaceutico.py
|
7c2022c8e44fac6a8eead73383cc2dea472377f0
|
[] |
no_license
|
LuisNataan/sistema-farmacia
|
ac9bbd71a6d3e72e2b93e8e740731dd1adb3a2fa
|
174b106044b1a68c3cce5f2ac2441ba50196de4d
|
refs/heads/master
| 2023-04-09T19:20:20.044014
| 2021-04-15T20:13:55
| 2021-04-15T20:13:55
| 358,262,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
import Receita
import Paciente
from datetime import date
class Farmaceutico:
def __init__(self, nome):
self.nome = nome
def receber_receita(self, paciente: Paciente, receita: Receita):
self.paciente = paciente
self.receita = paciente
def validar_receita(self):
validade = self.receita.validade.split("/")
if len(validade) == 2:
print(validade.reverse())
ano = int(validade[0])
mes = int(validade[1])
dia = int(validade[2])
validacoes = list()
validacoes.append(self.paciente == self.receita.nome)
validacoes.append(date(dia, mes, ano) >= date.today())
validacoes.append(self.paciente.idade >= 18)
return all(validacoes)
def verificar_estoque(self):
if not self.validar_receita():
return False
nome = ''
quantidade = 0
with open("Estoque.txt", "r") as file:
for line in file:
medicamento = line.split(';')
# Verifica se
if medicamento[1] == self.receita.medicamento:
nome = medicamento[1]
quantidade = int(medicamento[3].strip())
break
def entregar_medicamento(self):
pass
def retirar_item_estoque(self):
pass
|
[
"luisnataan556@gmail.com"
] |
luisnataan556@gmail.com
|
b84badf5453b9da77d6585eaf0168e546e5134ae
|
bd79fcab4cc4914acca2b0c8b13c0917124ef22b
|
/gwmemory_install/waveforms/__init__.py
|
7c2b03b41f0c1e5c49cc360d7efc990437b4f212
|
[] |
no_license
|
TheArctarax/Mumma-project-data
|
14b9cfe667999e5b5f072d6d23edd575aa0e0d65
|
1f7ed64be8cf5dfbf20ea4b16c4b245f2c50ee27
|
refs/heads/master
| 2023-01-01T00:23:46.302217
| 2020-10-17T00:31:58
| 2020-10-17T00:31:58
| 272,562,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
from .base import MemoryGenerator
from . import approximant, mwm, nr, surrogate
|
[
"alvin.li@ligo.org"
] |
alvin.li@ligo.org
|
46aee3666a0496d5b3452ceceaeb8da128d70334
|
6ec91b363b077bffd33f15300a0935124e9fb915
|
/Cracking_the_Code_Interview/Leetcode/1.Array/83.Remove_Duplicates_from_Sorted_List.py
|
a9f8e6c2b167393289ada32678731d6b428df83f
|
[] |
no_license
|
lzxyzq/Cracking_the_Coding_Interview
|
03232515ae8eb50394d46322d36b230d1a626fcf
|
79dee7dab41830c4ff9e38858dad229815c719a0
|
refs/heads/master
| 2023-06-05T19:52:15.595289
| 2021-06-23T22:46:02
| 2021-06-23T22:46:02
| 238,068,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
'''
@Author: your name
@Date: 2020-05-31 21:12:01
@LastEditTime: 2020-05-31 21:13:10
@LastEditors: your name
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/Array/83.Remove_Duplicates_from_Sorted_List.py
'''
# Given a sorted linked list, delete all duplicates such that each element appear only once.
''' Example 1:
Input: 1->1->2
Output: 1->2
Example 2:
Input: 1->1->2->3->3
Output: 1->2->3
'''
class Solution:
def deleteDuplicates(self, head):
current = head
while current and current.next:
if current.next.val == current.val:
current.next = current.next.next
else:
current = current.next
return head
|
[
"lzxyzq@gmail.com"
] |
lzxyzq@gmail.com
|
d86206d066645cd8abe5f89fee940a84256ca9f5
|
d9c4257225ec78fa89f5fffdb831078baa7a6831
|
/generate_googleip.py
|
43417a5e18c6a51d36d578990b4a9a8051f5a13f
|
[] |
no_license
|
bygloam/checkiptools
|
e892be612748411db762029447f0e46b50fabbcf
|
0a0b9e6511f7b718142beabb3096b5e363917307
|
refs/heads/master
| 2021-01-18T03:50:23.526187
| 2015-09-28T06:38:09
| 2015-09-28T06:38:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,947
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import netaddr
import ip_utils
# generate_ip_range 整合IP段去黑名单IP并排序
# test_load 计数IP数量
# ip_range_to_cidrs 将IP转换为CIDR格式
# Support format(支持的格式):
# # Comment (#后面为注释)
#
# range seperater:
# 每个范围可以用 逗号(,) 和竖线(|) 或分行进行分割
#
# Single rang format: (单个范围的格式):
# "xxx.xxx.xxx-xxx.xxx-xxx" (范围格式)
# "xxx.xxx.xxx." (前缀格式)
# "xxx.xxx.xxx.xxx/xx" (掩码格式)
# "xxx.xxx.xxx.xxx" (单个ip)
# 包含原始IP段的文件
f = open("ip_original_list.txt")
input_good_range_lines = f.read()
# IP段黑名单
input_bad_ip_range_lines = """
45.64.20.0/24 #中国澳门
58.205.224.0/24 #湖北省武汉市华中科技大学教育网无线校园项目
58.205.224.0/24 #中国 湖北 武汉 华中科技大学
58.240.77.0/24 #中国 江苏 南京
58.240.77.0/24 #江苏省南京市联通
59.78.209.0/24 #中国 上海 上海
59.78.209.0/24 #上海市腾讯公司教育网节点
101.198.128.0/19 #北京市奇虎360科技有限公司
103.7.28.0/22 #香港腾讯公司数据中心
110.75.151.0/24 #中国 浙江 杭州
111.30.128.0/24 #中国 天津 天津
111.30.136.0/24 #中国 天津 天津
111.30.139.0/24 #中国 天津 天津
111.30.140.0/24 #中国 天津 天津
115.159.0.0/24 #中国 上海 上海
119.28.0.0/16 #香港北京康盛新创科技有限责任公司
119.29.0.0/16 #广东省广州市海珠区腾讯云服务器(广州市海珠区新港中路397号TIT创意园)
119.29.0.0/24 #中国 广东 广州
119.29.17.0/24 #中国 广东 广州
119.57.55.0/24 #中国 北京 北京
119.57.55.0/24 #北京市东四IDC机房
119.147.146.0/24 #中国 广东 东莞
121.51.0.0/24 #中国 广东
121.194.0.0/24 #中国 北京 北京
121.195.178.0/24 #中国 北京 北京
124.160.89.0/24 #中国 浙江 杭州
130.211.0.0/16 #用了会出现错误
180.93.32.0/24 #越南 CZ88.NET, 用了会出现错误
180.96.70.0/23 #江苏省南京市电信
180.149.61.0/24 #印度NKN Research Institutes, 用了会出现错误
180.150.1.0/24 #澳大利亚 CZ88.NET, 用了会出现错误
180.188.250.0/24 #印度 CZ88.NET, 用了会出现错误
182.254.0.0/24 #中国 广东 广州
202.69.26.0/24 #中国 广东 深圳
202.86.162.0/24 #中国 澳门
202.106.93.0/24 #中国 北京 北京
203.195.128.0/24 #中国 广东 广州
203.208.32.0/24 #中国 北京 北京 GOOGLE
203.208.40.0/24 #中国 北京 北京 GOOGLE
203.208.41.0/24 #中国 北京 北京 GOOGLE
203.208.48.0/24 #中国 北京 北京 GOOGLE
203.208.49.0/24 #中国 北京 北京 GOOGLE
203.208.50.0/24 #中国 北京 北京 GOOGLE
203.208.52.0/24 #中国 北京 北京 GOOGLE
203.205.128.0/19 #香港腾讯公司数据中心
216.58.196.0/24 #有问题216段
216.58.208.0/20 #有问题216段
255.255.255.255/32 #for algorithm
"""
def print_range_list(ip_range_list):
for ip_range in ip_range_list:
begin = ip_range[0]
end = ip_range[1]
print ip_utils.ip_num_to_string(begin), ip_utils.ip_num_to_string(end)
def parse_range_string(input_lines):
ip_range_list = []
ip_lines_list = re.split("\r|\n", input_lines)
for raw_line in ip_lines_list:
raw_s = raw_line.split("#")
context_line = raw_s[0]
context_line = context_line.replace(' ', '')
ips = re.split(",|\|", context_line)
for line in ips:
if len(line) == 0:
#print "non line:", line
continue
begin, end = ip_utils.split_ip(line)
if ip_utils.check_ip_valid(begin) == 0 or ip_utils.check_ip_valid(end) == 0:
print("ip format is error,line:%s, begin: %s,end: %s" % (line, begin, end))
continue
nbegin = ip_utils.ip_string_to_num(begin)
nend = ip_utils.ip_string_to_num(end)
ip_range_list.append([nbegin,nend])
#print begin, end
ip_range_list.sort()
return ip_range_list
def merge_range(input_ip_range_list):
output_ip_range_list = []
range_num = len(input_ip_range_list)
last_begin = input_ip_range_list[0][0]
last_end = input_ip_range_list[0][1]
for i in range(1,range_num):
ip_range = input_ip_range_list[i]
begin = ip_range[0]
end = ip_range[1]
#print "now:",ip_utils.ip_num_to_string(begin), ip_utils.ip_num_to_string(end)
if begin > last_end + 2:
#print "add:",ip_utils.ip_num_to_string(begin), ip_utils.ip_num_to_string(end)
output_ip_range_list.append([last_begin, last_end])
last_begin = begin
last_end = end
else:
print "merge:", ip_utils.ip_num_to_string(last_begin), ip_utils.ip_num_to_string(last_end), ip_utils.ip_num_to_string(begin), ip_utils.ip_num_to_string(end)
if end > last_end:
last_end = end
output_ip_range_list.append([last_begin, last_end])
return output_ip_range_list
def filter_ip_range(good_range, bad_range):
out_good_range = []
bad_i = 0
bad_range_num = len(bad_range)
bad_begin, bad_end = bad_range[bad_i]
for good_begin, good_end in good_range:
while True:
if good_begin > good_end:
print("bad good ip range when filter:%s-%s" % (ip_utils.ip_num_to_string(good_begin), ip_utils.ip_num_to_string(good_end)))
if good_end < bad_begin:
# case:
# [ good ]
# [ bad ]
out_good_range.append([good_begin, good_end])
break
elif bad_end < good_begin:
# case:
# [ good ]
# [ bad ]
bad_i += 1
bad_begin, bad_end = bad_range[bad_i]
continue
elif good_begin <= bad_begin and good_end <= bad_end:
# case:
# [ good ]
# [ bad ]
print("cut bad ip case 1:%s - %s" % (ip_utils.ip_num_to_string(bad_begin), ip_utils.ip_num_to_string(good_end)))
if bad_begin - 1 > good_begin:
out_good_range.append([good_begin, bad_begin - 1])
break
elif good_begin >= bad_begin and good_end >= bad_end:
# case:
# [ good ]
# [ bad ]
print("cut bad ip case 2:%s - %s" % (ip_utils.ip_num_to_string(good_begin), ip_utils.ip_num_to_string(bad_end)))
good_begin = bad_end + 1
bad_i += 1
bad_begin, bad_end = bad_range[bad_i]
continue
elif good_begin <= bad_begin and good_end >= bad_end:
# case:
# [ good ]
# [ bad ]
out_good_range.append([good_begin, bad_begin - 1])
print("cut bad ip case 3:%s - %s" % (ip_utils.ip_num_to_string(bad_begin), ip_utils.ip_num_to_string(bad_end)))
good_begin = bad_end + 1
bad_i += 1
bad_begin, bad_end = bad_range[bad_i]
continue
elif good_begin >= bad_begin and good_end <= bad_end:
# case:
# [good]
# [ bad ]
print("cut bad ip case 4:%s - %s" % (ip_utils.ip_num_to_string(good_begin), ip_utils.ip_num_to_string(good_end)))
break
else:
print("any case?")
return out_good_range
def generate_ip_range():
print("\nGood ip range:")
ip_range_list = parse_range_string(input_good_range_lines)
ip_range_list = merge_range(ip_range_list)
print("\nBad ip range:")
bad_range_list = parse_range_string(input_bad_ip_range_lines)
bad_range_list = merge_range(bad_range_list)
print("\nCut Bad ip range:")
ip_range_list = filter_ip_range(ip_range_list, bad_range_list)
print("\nOutput ip range")
print_range_list(ip_range_list)
# write out
fd = open("googleip.txt", "w")
for ip_range in ip_range_list:
begin = ip_range[0]
end = ip_range[1]
#print ip_utils.ip_num_to_string(begin), ip_utils.ip_num_to_string(end)
fd.write(ip_utils.ip_num_to_string(begin)+ "-" + ip_utils.ip_num_to_string(end)+"\n")
fd.close()
generate_ip_range()
# 计数IP数量,源代码来自XX-Net
def test_load():
print("\nBegin test load googleip.txt")
fd = open("googleip.txt", "r")
if not fd:
print "open googleip.txt fail."
exit()
amount = 0
for line in fd.readlines():
if len(line) == 0 or line[0] == '#':
continue
begin, end = ip_utils.split_ip(line)
nbegin = ip_utils.ip_string_to_num(begin)
nend = ip_utils.ip_string_to_num(end)
num = nend - nbegin
amount += num
print ip_utils.ip_num_to_string(nbegin), ip_utils.ip_num_to_string(nend), num
fd.close()
print "amount ip:", amount
test_load()
# 转换IP范围,需要 netaddr,将 xxx.xxx.xxx.xxx-xxx.xxx.xxx.xxx 转换成 xxx.xxx.xxx.xxx/xx
def ip_range_to_cidrs():
ip_lists = []
ip_lists_2 = []
ip_range = open('googleip.txt')
for x in ip_range:
sline = x.strip().split('-')
ip_lists.append(sline)
for ip_line in ip_lists:
cidrs = netaddr.iprange_to_cidrs(ip_line[0], ip_line[1])
for k, v in enumerate(cidrs):
iplist = v
ip_lists_2.append(iplist)
#print ip_lists_2
fd = open('googleip.ip.txt', 'w')
for ip_cidr in ip_lists_2:
#print ip_cidr
fd.write(str(ip_cidr) + "\n")
fd.close()
ip_range_to_cidrs()
|
[
"x19960620"
] |
x19960620
|
8e8d199f66b95bc37ea3a43acad4e07b5f58432a
|
b4b232b43fe6e78958fd7acc89bf4ad3cf952e14
|
/hw1bp6.py
|
946ca62e1e6ac458207c2385cdd345ff1ecbfb43
|
[] |
no_license
|
tsbertalan-homework/holmes323hw1b
|
0e174fa218d89a4a617e5b7fb1f666c5ec203cf6
|
222d90f58668bd2f110032ceec48e620290551de
|
refs/heads/master
| 2020-05-30T23:10:44.152141
| 2014-02-25T18:18:51
| 2014-02-25T18:18:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,970
|
py
|
'''
Created on Feb 24, 2014, 3:58:19 PM
@author: bertalan
'''
import numpy as np
import matplotlib.pyplot as plt
from Integrators import integrate, logging
def FitzhughNagumo(Iapp):
'''This closure gives the RHS function for a particular applied current value.'''
def dXdt(X, t):
'''t is not used (the ODE is autonomous)'''
v = X[0]
r = X[1]
tv = .1
tr = 1
dvdt = (v - v**3 / 3 - r + Iapp) / tv
drdt = (-r + 1.25 * v + 1.5) / tr
return np.array([dvdt, drdt])
return dXdt
fig = plt.figure()
Iapps = 0, 0.9, 1.01, 1.5, 2.05, 2.1
# for Iapp = 0, the fixed point is (-3/2, -3/8)
axes = []
for i in range(6):
axes.append(fig.add_subplot(2, 3, i+1))
for ax, Iapp in zip(axes, Iapps):
logging.info('Iapp=%f' % Iapp)
dXdt = FitzhughNagumo(Iapp)
ax.axhline(0, color='red')
ax.axvline(0, color='red')
ax.scatter(-3./2, -3./8, color='blue')
for v0 in np.linspace(-2, 2.5, 8):
# logging.info('v0=%s' % str(v0))
for r0 in np.linspace(-1, 3.5, 8):
X0 = v0, r0
logging.info('X0=%s' % str(X0))
# Uncomment these two lines to verify the Euler solution with RK4:
#X, T = integrate(dXdt, .01, 0, 10.0, X0, method='rungekutta')
#ax.plot(X[0,:], X[1,:], 'r.')
X, T = integrate(dXdt, .01, 0, 10.0, X0, method='euler')
ax.plot(X[0,:], X[1,:], 'k')
ax.scatter(X[0,0], X[1,0], color='green') # the initial condition...
ax.scatter(X[0,-1], X[1,-1], color='red') # ...and the final point
ax.set_title('$I_\mathrm{app} = %.2f$' % Iapp)
for i in 0, 1, 2:
axes[i].set_xticks([])
for i in 1, 2, 4, 5:
axes[i].set_yticks([])
for i in 0, 3:
axes[i].set_ylabel('$r(t)$')
for i in 3, 4, 5:
axes[i].set_xlabel('$v(t)$')
for ax in axes:
ax.set_xlim(-3, 3)
ax.set_ylim(-2, 4)
ax.legend()
fig.savefig('hw1bp6-flows.pdf')
plt.show()
|
[
"tom@tombertalan.com"
] |
tom@tombertalan.com
|
908ca3d0e98cfce504182b72c1cda8b7034148f1
|
ce972e94fcdf19d6809d94c2a73595233d1f741d
|
/catkin_ws/devel/lib/python3/dist-packages/tf2_msgs/msg/_TFMessage.py
|
7c572a7bef8e93d5b177fa3298b0e13c1f0b6527
|
[] |
no_license
|
WilliamZipanHe/reward_shaping_ttr
|
cfa0e26579f31837c61af3e09621b4dad7eaaba2
|
df56cc0153147bb067bc3a0eee0e1e4e1044407f
|
refs/heads/master
| 2022-02-23T05:02:00.120626
| 2019-08-07T21:52:50
| 2019-08-07T21:52:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
/local-scratch/xlv/catkin_ws/devel/.private/tf2_msgs/lib/python3/dist-packages/tf2_msgs/msg/_TFMessage.py
|
[
"xlv@cs-mars-01.cmpt.sfu.ca"
] |
xlv@cs-mars-01.cmpt.sfu.ca
|
79045d0e08c92e2a3c5caab247a184ca753ba7e7
|
10f047c7631b3aad90c7410c567c588993bfa647
|
/PythonDispersal/src/testingTheequations/testingOneandMxRnk.py
|
fd3836064bed75df86f5c4fa20d5f02e4d5e77cc
|
[] |
no_license
|
ruthubc/ruthubc
|
ee5bc4aa2b3509986e8471f049b320e1b93ce1d5
|
efa8a29fcff863a2419319b3d156b293a398c3a9
|
refs/heads/master
| 2021-01-24T08:05:40.590243
| 2017-08-30T01:37:56
| 2017-08-30T01:37:56
| 34,295,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
'''
Created on Mar 18, 2015
@author: Ruth
'''
med_rnk = 50.0
numJuvs = 100.0
inputSlope = 2.0
xbr = 0.5
slp = inputSlope/numJuvs
oneRnk = ((-1 + (med_rnk * slp) + xbr) /slp) # The max rank where everyone gets 1 (max) food
mxRnk = (((med_rnk * slp) + xbr) / slp) # the max rank that receives food
print "one rank", oneRnk
print "max rank", mxRnk
|
[
"rvsharpe.ubc@gmail.com@69186f74-b5a6-b451-f1d6-44d06303a42d"
] |
rvsharpe.ubc@gmail.com@69186f74-b5a6-b451-f1d6-44d06303a42d
|
5bd256c007d5edd5a6b9ae55770467ce2b973d6e
|
99c04a62991acb6149c719baed073d64999c8275
|
/test/pos.py
|
be997fd354ea8a1dae104af2546b4c3de1d9741c
|
[] |
no_license
|
SaraBazargan/SpellChecker
|
924e6dddd456cba57df5318c4de247e57deffb1b
|
22866d806647973a12738fb8177efe44d3021339
|
refs/heads/master
| 2020-07-07T16:59:15.974046
| 2019-08-20T16:30:11
| 2019-08-20T16:30:11
| 203,414,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,601
|
py
|
import codecs
import lxmls.sequences.hmm as hmmc
import simple_sequence as sq
simple=sq.SimpleSequence()
hmm = hmmc.HMM(simple.x_dict, simple.y_dict)
hmm.train_supervised(simple.train,smoothing=0.0000001)
'''
print "Initial Probabilities:" , hmm.initial_probs
print "Transition Probabilities:" , hmm.transition_probs
print "Final Probabilities:" , hmm.final_probs
print "Emission Probabilities" , hmm.emission_probs
initial_scores, transition_scores, final_scores, emission_scores = hmm.compute_scores(simple.train.seq_list[0])
print 'initial_scores=' , initial_scores
print 'transition_scores' , transition_scores
print 'final_scores' , final_scores
print 'emission_scores' ,emission_scores
import numpy as np
#from lxmls.sequences.log_domain import *
a = np.random.rand(10)
print np.log(sum(np.exp(a)))
log_likelihood, forward = hmm.decoder.run_forward(initial_scores, transition_scores,final_scores, emission_scores)
print 'Log-Likelihood =', log_likelihood
state_posteriors, _, _ = hmm.compute_posteriors(initial_scores,transition_scores,final_scores,emission_scores)
print state_posteriors[0]
print len(state_posteriors)
y_pred = hmm.posterior_decode(simple.test.seq_list[0])
print len(simple.test.seq_list)
print "Prediction test 0:", y_pred
print "Truth test 0:", simple.train.seq_list[0]
log_likelihood, backward = hmm.decoder.run_backward(initial_scores, transition_scores, final_scores, emission_scores)
#print 'Log-Likelihood =', log_likelihood
'''
Err=[]
a=float(len(simple.test.seq_list))
print 'Number Of Characters For Test :',int(a)
output = ''
#h=0
#q=0
#count=0
for e in range(0,len(simple.test.seq_list)):
y_pred, score = hmm.viterbi_decode(simple.test.seq_list[e])
#print y_pred
#print "Viterbi decoding Prediction test 0 with smoothing:", y_pred, score
#h=h+1
#x=str(simple.test.seq_list[e])
y=str(y_pred)
#count =count +1
#x1=x.rstrip().split(' ')
y1=y.rstrip().split(' ')
#for u in range(0,len(x1)):
# tmp1=x1[u].split('/')
# tmp1=str(tmp1[1]).rstrip().split('@')
tmp2=y1[0].split('/')
tmp2=str(tmp2[1]).rstrip().split('@')
if tmp2[0] == '_':
output = output + ' '
else:
output = output + tmp2[0]
fp = open('output.txt', 'w' , 'utf8')
fp.write(output)
fp.close()
# if tmp1[1]==tmp2[1]:
# q=q+1
#print x1[u].split('/'),y1[u].split('/')
#print 'q=',q
#print 'count=',count
#print y
#print x[0],x[6]
#print 'Mean_Err_Correcting typos10 without a dictionary=',1-(float(q)/count)
#print Err
|
[
"sara.bazargan@gmail.com"
] |
sara.bazargan@gmail.com
|
768845d0f820cd84af0e1e5bef463ee48cb0ca5c
|
f441552f25387a2d6be4052fa54fb83d1ef0de68
|
/make_test_valid.py
|
8a66b57f3e5c5e54258051623141d892c3f2e5a3
|
[] |
no_license
|
knkumar/find_mentions
|
e7d3138c47dc420204a4740fb359d5864b726c9d
|
4fddcdae0621a078864f70f4f98f9d906017af70
|
refs/heads/master
| 2021-01-21T12:39:28.209813
| 2012-03-27T21:56:21
| 2012-03-27T21:56:21
| 2,906,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,851
|
py
|
#L645 Final Project
#Dec 24 2011
#This file writes the "test file" with the mention/coref information as input to the scorer
# The way we do this is to mark every start with (0 and every end with 0)
# if there are more than 2 coref-spans starting on the same word, then the longer span start first followed by | - (0|(0 and end with 0) or 0)|0)
import re
import pickle
import numpy as np
out = open("../conll_st_Data/test/final_test.txt","wb")
class mention_frame:
def __init__(self, number, sent, pos, bracket, nps_finder):
self.sent_number = number
self.sent = sent
self.pos_tags = pos
self.bracket = bracket
self.nps = nps_finder
def get_nps(self):
ret_np = {}
def insert_nps(key, val):
ret_np[key] = val
map(lambda key: insert_nps(key,map(lambda x,y,z: [x,x+y,z], self.nps[key][0],self.nps[key][1],self.nps[key][2])), self.nps.keys())
return ret_np
def get_sent(self,start,end):
return ' '.join(self.sent[start:end])
def get_sent_full(self):
return ' '.join(self.sent)
#insert the tokens into the dictionary
def insert_tokens(sent_feat,token, num):
if num in sent_feat.keys():
temp = sent_feat[num]
temp.append(token)
sent_feat[num] = temp
else:
sent_feat[num] = [token]
return sent_feat
#extract interesting features from the data
def extract_features(sent_features, tokens):
for i in range(len(tokens)):
sent_features = insert_tokens(sent_features, tokens[i], i)
return sent_features
def flatten_spans(mention_spans,num):
spans = []
for key in mention_spans[num].keys():
for item in mention_spans[num][key]:
if item[0] == item[1]:
spans.append([ item[0], item[1]])
else:
spans.append([ item[0], item[1] ])
return spans
def mark_spans(sent_features, span, coref):
start = sent_features[coref][span[0]]
try:
end = sent_features[coref][span[1]]
except:
span[1] = span[1]-1
end = sent_features[coref][span[1]]
if start==end:
if start == '-':
sent_features[coref][span[0]] = '(0)'
elif re.match(".*0\)",start):
sent_features[coref][span[0]] = start+'(0)'
else:
sent_features[coref][span[0]] = start+'|(0)'
else:
if start == '-':
sent_features[coref][span[0]] = '(0'
elif re.match(".*0\)",start):
sent_features[coref][span[0]] = start+'(0'
else:
sent_features[coref][span[0]] = start+'|(0'
if end == '-':
sent_features[coref][span[1]] = '0)'
elif re.match("\(0.*",end):
sent_features[coref][span[1]] = '0)'+end
else:
sent_features[coref][span[1]] = '0)|'+end
return sent_features[coref]
def mark_all_spans(sent_features, mention_spans, singleton_span, num):
coref = max(sent_features.keys())
spans = flatten_spans(mention_spans,num)
for span in spans:
sent_features[coref] = mark_spans(sent_features, span, coref)
return sent_features
def write_back(sent_features):
#print sent_features
line = {}
values = np.array(sent_features.values())
for i in range(len(sent_features.values()[0])):
out.write(' '.join(values[:,i])+"\n")
out.write("\n")
def mark_coref(lines, mention_span, singleton_span):
# need a dict for every line in the input, enter the coref_column according to the spans for the sent_num in mention_span
sent_features = {}
sent_num = 0
for line in lines:
#for every line in input
tokens = line.strip().split()
#get all the columns for that word
if len(tokens) !=0:
if len(tokens) < 12:
# if this is a new sentence
## write this as it is :)
out.write(line)
sent_features = {}
continue
sent_features = extract_features(sent_features,tokens)
# first mark all endings
# if coref_column has - insert (0 --- if coref_column has (0 insert |(0
else:
sent_features = mark_all_spans(sent_features, mention_span, singleton_span, sent_num)
write_back(sent_features)
sent_num += 1
sent_features = {}
def main():
#"""
data = open("../conll_st_Data/test/en.finalCoNLL_test.txt","rb")
mention_span = pickle.load(open("result.pkl","rb"))
singleton_span = pickle.load(open("singleton.pkl","rb"))
lines = iter(data.readlines())
sent_features = ['','','']
lines.next()
mark_coref(lines,mention_span,singleton_span) # contains parses for all the sentences
#sent_num : {column_no : [[words],[spans],[nps|args]]}
#pickle.dump(sent_dict,sent_out)
data.close()
"""
test = "(NP(NP**)**)(VP*(ADJP*(PP*(NP***))))*"
ret_np = get_nps(test,"NP")
print ret_np
#"""
if __name__ == "__main__":
main()
|
[
"krankumar@gmail.com"
] |
krankumar@gmail.com
|
ed47896ec418bbdf18a3fc7db9e939b7324fa5ca
|
5cf44be60a9accc2aa8b0cea1c8790f023b10553
|
/build/lib.linux-x86_64-2.7/npmc/atom_class.py
|
e8140144adf3d3aa48c68cbd9a1137b351c65eed
|
[] |
no_license
|
smerz1989/np-mc
|
7ecc36864a1975bde42d0ddf579e8a1bb9666de9
|
3d13104edf1b36f0817928f262a0b9ae40f6bfb1
|
refs/heads/master
| 2023-07-20T03:25:48.891576
| 2023-03-07T20:55:58
| 2023-03-07T20:55:58
| 37,389,780
| 0
| 2
| null | 2023-07-06T21:15:55
| 2015-06-13T22:14:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,224
|
py
|
"""This module contains the Atom class and all the helper functions associated with atoms.
"""
import read_lmp_rev6 as rdlmp
class Atom(object):
"""The Atom class represents an atom described by the LAMMPS full atom style.
Parameters
----------
atomID : int
The unique integer identifier of the atom as specified in the LAMMPS input file.
molID : int
The unique integer identifier of the molecule associated with this atom as specified in the LAMMPS input file.
atomType : int
The integer that identifies the atom type as specified in the LAMMPS input file.
charge : float, optional
The charge on the atom defaults to 0.
position : float vector, optional
A three element vector which represent the X,Y,Z coordinates of the atom. It defaults to a vector of [X=0,Y=0,Z=0].
"""
def __init__(self,atomID,molID,atomType,charge=0.,position=[0.,0.,0.]):
self.atomID = int(atomID)
self.molID = int(molID)
self.atomType = int(atomType)
self.charge = float(charge)
self.position = position
def __eq__(self,other):
if isinstance(other, self.__class__):
return self.atomID == other.atomID
else:
return False
def __neq__(self,other):
return not self.__eq__(other)
def get_pos(self):
return self.position
def get_charge(self):
return self.charge
def get_type(self):
return self.atomType
def get_mol_ID(self):
return self.molID
def get_atom_ID(self):
return self.atomID
def loadAtoms(filename):
"""Loads the atoms from a LAMMPS input file and returns a list of Atom object which represent those atoms.
Parameters
----------
filename : str
The name of the LAMMPS input file which contain the atoms
Returns
-------
Atom List
A list of Atom objects which contains the atom info in the given LAMMPS input file.
"""
atoms = rdlmp.readAtoms(filename)
atom_list = [Atom(atom[0],atom[1],atom[2],atom[3],atom[4:7]) for atom in atoms]
return atom_list
|
[
"stevennmerz@gmail.com"
] |
stevennmerz@gmail.com
|
07669307b0bba93e4d0932b52de1644fa2f27b4f
|
5e24036ad211e48d3563e894706f9ac4e061cb6b
|
/flatdata-py/generator/tree/nodes/trivial/enumeration_value.py
|
25cbf82d8e24defe0542ab18359903f5848dcdc1
|
[
"Apache-2.0"
] |
permissive
|
tianchengli/flatdata
|
b7a02d23cf8bb5e84e8e1b01d2f94874c832876b
|
03ccf3669254ddef171d23a287c643dcd28650d2
|
refs/heads/master
| 2020-04-29T15:49:02.229669
| 2019-03-17T21:34:51
| 2019-03-17T21:34:51
| 176,240,604
| 0
| 0
|
Apache-2.0
| 2019-03-18T08:50:36
| 2019-03-18T08:50:35
| null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
from generator.tree.nodes.node import Node
class EnumerationValue(Node):
def __init__(self, name, value, properties=None):
super(EnumerationValue, self).__init__(name=name, properties=properties)
self._value = value
@staticmethod
def create(properties, value):
result = EnumerationValue(name=properties.name, properties=properties, value=value)
return result
@property
def doc(self):
return self._properties.doc
@property
def value(self):
return self._value
|
[
"noreply@github.com"
] |
tianchengli.noreply@github.com
|
66953d5904633965862c7f85713173bc73e0f6f7
|
0a5a8a2a5ca8156abd04b9ec01f43dac15d6d396
|
/lists.py
|
0fcc574ae9f8424992b72194fc43fed57d77941d
|
[] |
no_license
|
AlexandrTyurikov/rock_paper_scissors_bot
|
1343a095bd877e218b1746991cb3d3304848a548
|
ae0f547fb23cb08dfea4988d50b7b73734b52988
|
refs/heads/master
| 2020-08-23T20:46:11.678830
| 2020-03-12T01:01:19
| 2020-03-12T01:01:19
| 216,704,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,802
|
py
|
from random import choice
def rand_rps():
list_rps = [
"👊🏻",
"✌🏻",
"✋🏻"
]
ran_rps = choice(list_rps)
return ran_rps
def rand_win():
list_win = [
"👍 Ты победил, давай еще❗️",
"👍 Ты справился❗️",
"👍 Я проиграл❗️",
"👍 Я повержен, сыграем вновь❓",
"👍 Ты одержал победу❗️",
"👍 Красавчик, может еще разок❓",
"👍 Твоя взяла, еще?",
"👍 Победа осталась за табой❗️",
"👍 Уделал меня, повторим?",
"👍 Разбил меня в пух и прах❗️",
"👍 Разгромил меня❗️",
"👍 Переиграл меня, может еще❓"
]
ran_win = choice(list_win)
return ran_win
def rand_lose():
list_lose = [
"👎 Я победил, повторим❓",
"👎 Я справился❗️",
"👎 Ты проиграл, давай еще❗️",
"👎 Ты повержен, сыграем вновь❓",
"👎 Я одержал победу❗️",
"👎 Тебе не повезло, может еще разок❓",
"👎 Моя взяла, еще❓",
"👎 Победа осталась за мной❗️",
"👎 Уделал тебя❗️",
"👎 Разгромил тебя, может еще❓",
"👎 Я переиграл тебя❗️",
"👎 Ты продул❗️"
]
ran_lose = choice(list_lose)
return ran_lose
def rand_draw():
list_draw = [
"🤝 Ничья, ходи еще раз❗️",
"🤝 Победила дружба, еще❓",
"🤝 Мир! Может еще разок❓"
]
ran_draw = choice(list_draw)
return ran_draw
def rand_emoji():
list_emoji = [
'😀',
'😃',
'😄',
'😁',
'😆',
'😅',
'🤣',
'😂',
'🙂',
'🙃',
'😉',
'😊',
'😇',
'🥰',
'😍',
'🤩',
'😘',
'😗',
'☺️',
'☺',
'😚',
'😙',
'😋',
'😛',
'😜',
'🤪',
'😝',
'🤑',
'🤗',
'🤭',
'🤫',
'🤔',
'🤐',
'🤨',
'😐',
'😑',
'😶',
'😏',
'😒',
'🙄',
'😬',
'🤥',
'😌',
'😔',
'😪',
'🤤',
'😴',
'😷',
'🤒',
'🤕',
'🤢',
'🤮',
'🤧',
'🥵',
'🥶',
'🥴',
'😵',
'🤯',
'🤠',
'🥳',
'😎',
'🤓',
'🧐',
'😕',
'😟',
'🙁',
'☹️',
'☹',
'😮',
'😯',
'😲',
'😳',
'🥺',
'😦',
'😧',
'😨',
'😰',
'😥',
'😢',
'😭',
'😱',
'😖',
'😣',
'😞',
'😓',
'😩',
'😫',
'🥱',
'😤',
'😡',
'😠',
'🤬',
'😈',
'👿',
'💀',
'☠️',
'☠',
'💩',
'🤡',
'👹',
'👺',
'👻',
'👽',
'👾',
'🤖',
'😺',
'😸',
'😹',
'😻',
'😼',
'😽',
'🙀',
'😿',
'😾',
'🙈',
'🙉',
'🙊',
'🐵',
'🐒',
'🦍',
'🦧',
'🐶',
'🐕',
'🦮',
'🐕🦺',
'🐩',
'🐺',
'🦊',
'🦝',
'🐱',
'🐈',
'🦁',
'🐯',
'🐅',
'🐆',
'🐴',
'🐎',
'🦄',
'🦓',
'🦌',
'🐮',
'🐂',
'🐃',
'🐄',
'🐷',
'🐖',
'🐗',
'🐽',
'🐏',
'🐑',
'🐐',
'🐪',
'🐫',
'🦙',
'🦒',
'🐘',
'🦏',
'🦛',
'🐭',
'🐁',
'🐀',
'🐹',
'🐰',
'🐇',
'🐿️',
'🐿',
'🦔',
'🦇',
'🐻',
'🐨',
'🐼',
'🦥',
'🦦',
'🦨',
'🦘',
'🦡',
'🐾',
'🦃',
'🐔',
'🐓',
'🐣',
'🐤',
'🐥',
'🐦',
'🐧',
'🕊️',
'🕊',
'🦅',
'🦆',
'🦢',
'🦉',
'🦩',
'🦚',
'🦜',
'🐸',
'🐊',
'🐢',
'🦎',
'🐍',
'🐲',
'🐉',
'🦕',
'🦖',
'🐳',
'🐋',
'🐬',
'🐟',
'🐠',
'🐡',
'🦈',
'🐙',
'🐚',
'🐌',
'🦋',
'🐛',
'🐜',
'🐝',
'🐞',
'🦗',
'🕷️',
'🕷',
'🕸️',
'🕸',
'🦂',
'🦟',
'🦠',
'🦀',
'🦞',
'🦐',
'🦑',
'🦪',
]
ran_em = choice(list_emoji)
return ran_em
|
[
"tyur.sh@gmail.com"
] |
tyur.sh@gmail.com
|
10fe56ba44affdbad6d967b5a30aa266284a8baf
|
15c1f0560c3f910fb7c332b2cd0cd9ccdcb230f5
|
/auto-grader/testCode/testhomework2.py
|
2e7d967f958b3791e0f1f19bbd26bf2652d2a5a0
|
[] |
no_license
|
IzayoiNamida/JHU2020_WebSecurity
|
3356ef578dab48981ca601c8c302947047d721d1
|
7ce466eace4f9725124e4db43c56790b3f73fea3
|
refs/heads/master
| 2023-01-29T15:57:48.556969
| 2020-12-14T03:46:18
| 2020-12-14T03:46:18
| 320,150,131
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_manager.chrome import ChromeDriverManager
url = "http://127.0.0.1/sampletest.html"
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get(url)
total = 10
passed = 0
def test(a,b,c):
global passed
driver.find_element_by_id("a").clear()
driver.find_element_by_id("a").send_keys(a)
driver.find_element_by_id("b").clear()
driver.find_element_by_id("b").send_keys(b)
driver.find_element_by_id("bt").click()
val = driver.find_element_by_id("c").text
if int(val) == int(c):
passed = passed + 1
if __name__ == "__main__":
test(1,2,3)
test(3,4,7)
test(6,7,13)
test(10,20,30)
print(passed/total)
|
[
"cuigang@MacBook-Pro.local"
] |
cuigang@MacBook-Pro.local
|
8c0d4280ef7b0ca550d5ae718533d5ce2a45db18
|
2500528e85c5629432c8e16120891eb75c722475
|
/dataset_specific.py
|
8028ba99c37603e4267329763d4e5dce98c3fa1e
|
[
"BSD-3-Clause"
] |
permissive
|
sunnyhardasani/caleydo_server
|
443c1dbdcaafcb48ac5ad356a72c16b26911848e
|
58414e5088ddb668d0e209f24445c7aea7090a1a
|
refs/heads/master
| 2020-09-05T12:22:18.676944
| 2015-09-17T11:02:19
| 2015-09-17T11:02:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,333
|
py
|
__author__ = 'Samuel Gratzl'
import flask
import range as ranges
from caleydo_server.util import jsonify
import caleydo_server.plugin
def asrange(r):
if r is None:
return None
return ranges.parse(r)
def format_json(dataset, range, args):
d = dataset.asjson(range)
if bool(args.get('f_pretty_print', False)):
return jsonify(d, indent=' ')
return jsonify(d)
def format_csv(dataset, range, args):
include_rows = bool(args.get('f_rows', False))
include_cols = bool(args.get('f_cols', False))
delimiter = args.get('f_delimiter',';')
import itertools
def gen():
if include_cols and dataset.type == 'matrix':
cols = dataset.cols(range[1] if range is not None else None)
header = delimiter.join(cols)
elif dataset.type == 'table':
header = delimiter.join([d.name for d in dataset.columns])
else:
header = ''
d = dataset.asnumpy(range)
if include_rows:
rows = dataset.rows(range[0] if range is not None else None)
yield dataset.idtype if dataset.type == 'table' else dataset.rowtype
yield delimiter
yield header
yield '\n'
if include_rows:
#extend with the row ids
for row, line in itertools.izip(rows, d):
yield row
yield delimiter
yield delimiter.join(map(str, line))
yield '\n'
else:
for line in d:
yield delimiter.join(map(str, line))
yield '\n'
return flask.Response(gen(), mimetype='text/csv', headers={'Content-Disposition': 'attachment;filename='+dataset.name+'.csv'})
def format_image(dataset, range, args):
format = args.get('format','png')
import scipy.misc
import io
#TODO set a palette to specify colors instead of gray scales
#how to interpolate / sample colors - which space?
minmax = dataset.range
img = scipy.misc.toimage(dataset.asnumpy(range), cmin=minmax[0], cmax=minmax[1])
b = io.BytesIO()
img.save(b, format=format)
b.seek(0)
return flask.send_file(b, mimetype='image/'+format.replace('jpg','jpeg'))
def resolve_formatter(type, format):
for p in caleydo_server.plugin.list(type+'-formatter'):
if p.format == format:
return p.load()
flask.abort(400,'unknown format "{0}" possible formats are: {1}'.format(format, ','.join((p.format for p in caleydo_server.plugin.list(type+'-formatter')))))
def _add_handler(app, dataset_getter, type):
def desc_gen(dataset_id):
d = dataset_getter(dataset_id, type)
return jsonify(d.to_description())
app.add_url_rule('/'+type+'/<dataset_id>','desc_'+type, desc_gen)
def rows_gen(dataset_id):
d = dataset_getter(dataset_id, type)
r = asrange(flask.request.args.get('range',None))
return jsonify(d.rows(r[0] if r is not None else None))
app.add_url_rule('/'+type+'/<dataset_id>/rows','rows_'+type, rows_gen)
def rowids_gen(dataset_id):
d = dataset_getter(dataset_id, type)
r = asrange(flask.request.args.get('range',None))
return jsonify(d.rowids(r[0] if r is not None else None))
app.add_url_rule('/'+type+'/<dataset_id>/rowIds','rowids_'+type, rowids_gen)
def data_gen(dataset_id):
d = dataset_getter(dataset_id, type)
r = asrange(flask.request.args.get('range',None))
formatter = resolve_formatter(type, flask.request.args.get('format','json'))
return formatter(d, r, args=flask.request.args)
app.add_url_rule('/'+type+'/<dataset_id>/data','data_'+type, data_gen)
def add_table_handler(app, dataset_getter):
_add_handler(app, dataset_getter, 'table')
def add_vector_handler(app, dataset_getter):
_add_handler(app, dataset_getter, 'vector')
def add_matrix_handler(app, dataset_getter):
"""
add the handlers for handling a matrix
:param app:
:param dataset_getter:
:return:
"""
_add_handler(app, dataset_getter, 'matrix')
def cols_matrix(dataset_id):
d = dataset_getter(dataset_id, 'matrix')
r = asrange(flask.request.args.get('range',None))
return jsonify(d.cols(r[0] if r is not None else None))
app.add_url_rule('/matrix/<dataset_id>/cols','cols_matrix', cols_matrix)
def colids_matrix(dataset_id):
d = dataset_getter(dataset_id, 'matrix')
r = asrange(flask.request.args.get('range',None))
return jsonify(d.colids(r[0] if r is not None else None))
app.add_url_rule('/matrix/<dataset_id>/colIds','colids_matrix', colids_matrix)
|
[
"samuel_gratzl@gmx.at"
] |
samuel_gratzl@gmx.at
|
4788aecda97b3c91c49bc69c3976cb26b649805f
|
e5d8bf505f6988be4a6c9e65b1d8e6a424bad033
|
/app/main_test.py
|
88ac1bf328d638a7c8b8eaf0d33a65fa086ff260
|
[] |
no_license
|
nyctrn/fastapi-postgres-example
|
0c274728912fd9988d9f4b7db04db32ff15311d0
|
0d1497021bff5beb6c37d9022742e73e91c7f365
|
refs/heads/orm
| 2023-04-28T14:06:27.782104
| 2021-05-15T14:49:14
| 2021-05-15T14:49:14
| 366,810,296
| 0
| 0
| null | 2021-05-12T18:14:23
| 2021-05-12T18:14:23
| null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
from fastapi.testclient import TestClient
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from data.database import Base
from main import app, get_db
import pytest
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base.metadata.create_all(bind=engine)
client = TestClient(app)
def override_get_db():
try:
db = TestingSessionLocal()
yield db
finally:
db.close()
app.dependency_overrides[get_db] = override_get_db
def test_read_main():
response = client.get("/items")
assert response.status_code == 200
def test_create_user():
response = client.post(
"/users/",
headers={'Content-Type': 'application/json'},
json={"email": "user@test.com", "password": "pass123"},
)
assert response.status_code == 200
assert response.json() == {
"email": "user@test.com",
"id": 1,
"is_active": True,
"items": []
}
|
[
"argiris@norbloc.com"
] |
argiris@norbloc.com
|
0566f90892410f98c94c99c1f00e2466aee8578f
|
659d44eb7549cfa346a0224edc7559520d776529
|
/ColumnDeriver/__init__.py
|
7c1f598d85fccf352b12ac63e534c78cc5f6ce5f
|
[] |
no_license
|
willsmithorg/gridtools
|
862db6a3573785128e7348dd1332b309fbb17cbb
|
cfe83b0a9b33992c68fd885ae10c5c954314c912
|
refs/heads/master
| 2023-04-18T19:38:34.536593
| 2021-05-03T00:57:48
| 2021-05-03T00:57:48
| 336,426,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50
|
py
|
__all__ = ['Base',
'Len',
]
|
[
"will@willsmith.org"
] |
will@willsmith.org
|
5dcd673fb7d0be9ebfc26aecdd35d14589069ac0
|
8021e122ba6bad9caf3b7ceed25fd3aab93dc2a2
|
/bookexchanger/asgi.py
|
ba5c9ec98c4285ff7add0d6581e887766b66ff35
|
[] |
no_license
|
shubhnagar/bookexchanger
|
55c94b311662e25161f638a0024d1ff8065e34cc
|
354cc75e9f101d99773f110344f89bd5892f2e1a
|
refs/heads/master
| 2022-04-06T14:26:14.407920
| 2020-03-07T19:33:51
| 2020-03-07T19:33:51
| 245,693,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
ASGI config for bookexchanger project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookexchanger.settings')
application = get_asgi_application()
|
[
"nagarshubh2000@gmail.com"
] |
nagarshubh2000@gmail.com
|
f55dc596a671b7e0de6e99d22584ed490c2d2b65
|
f1d48c192ea2fac7736c6d24476dff3ddb34f9be
|
/company/forms.py
|
ce0fbee091135dd8f22bcb5c6b02ac1a6f5e159f
|
[] |
no_license
|
amrithrajvb/EmployeeDjango
|
99e58bca60a14bc99951492df3443908ca384c9e
|
5fd00aca92ef78876a8c8e94bdf605acff497882
|
refs/heads/master
| 2023-07-14T21:34:02.786231
| 2021-09-02T08:01:36
| 2021-09-02T08:01:36
| 401,602,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,155
|
py
|
from django import forms
from django.forms import ModelForm
from company.models import Employee
import re
class EmployeeAddForm(ModelForm):
class Meta:
model=Employee
fields="__all__"
widgets={
"emp_name":forms.TextInput(attrs={"class":"form-control"}),
"department":forms.TextInput(attrs={"class":"form-control"}),
"salary":forms.TextInput(attrs={"class":"form-control"}),
"experience":forms.TextInput(attrs={"class":"form-control"})
}
labels={
"emp_name":"Employee Name"
}
# emp_name=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control"}))
# department=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control"}))
# salary=forms.CharField(widget=forms.NumberInput(attrs={"class":"form-control"}))
# experience=forms.CharField(widget=forms.NumberInput(attrs={"class":"form-control"}))
def clean(self):
cleaned_data=super().clean()
emp_name=cleaned_data["emp_name"]
salary=cleaned_data["salary"]
department=cleaned_data["department"]
experience=cleaned_data["experience"]
x="[a-zA-Z]*"
if int(salary)<0:
msg="invalid salary"
self.add_error("salary",msg)
if int(experience)<0:
msg="invalid experience"
self.add_error("experience",msg)
matcher=re.fullmatch(x,emp_name)
if matcher is not None:
pass
else:
msg="please enter valid employee name"
self.add_error("emp_name",msg)
depmatcher = re.fullmatch(x, department)
if depmatcher is not None:
pass
else:
msg = "please enter valid department name"
self.add_error("department", msg)
class EmployeeChange(ModelForm):
class Meta:
model=Employee
fields="__all__"
widgets={
"emp_name":forms.TextInput(attrs={"class":"form-control"}),
"department":forms.TextInput(attrs={"class":"form-control"}),
"salary":forms.TextInput(attrs={"class":"form-control"}),
"experience":forms.TextInput(attrs={"class":"form-control"})
}
labels={
"emp_name":"Employee Name"
}
# emp_name = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control"}))
# department = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control"}))
# salary = forms.CharField(widget=forms.NumberInput(attrs={"class": "form-control"}))
# experience = forms.CharField(widget=forms.NumberInput(attrs={"class": "form-control"}))
def clean(self):
cleaned_data=super().clean()
salary = cleaned_data["salary"]
experience = cleaned_data["experience"]
if int(salary) < 0:
msg="invalid price"
self.add_error("salary",msg)
if int(experience) < 0:
msg="invalid experience"
self.add_error("experience",msg)
class SearchEmpForm(forms.Form):
emp_name=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control"}))
|
[
"amrithrajvb012@gmail.com"
] |
amrithrajvb012@gmail.com
|
4934988b2db6dae86e095bc58f45321b12ccc344
|
ac10533762ea45494b7f17f9dcb5ed8e83e81aaa
|
/InvoiceSchedule.py
|
6720f4255ddcdb2887ee8ed704fbb20bee962b43
|
[] |
no_license
|
mdabdulkhaliq/workspace
|
9b1916a8863f0ad83331768c039670abbed57b39
|
4bc434e29f3e0b67c8ee5a1b342dd97397737e97
|
refs/heads/master
| 2022-12-06T04:24:52.216773
| 2020-08-20T13:02:59
| 2020-08-20T13:02:59
| 288,977,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,225
|
py
|
from datetime import datetime
from datetime import timedelta
Order_Term_Start_Date = datetime(Quote.EffectiveDate).date()
Order_Term_End_Date_String = Quote.GetCustomField('Order Term End Date').Content
Order_Term_End_Date = datetime.strptime(Order_Term_End_Date_String, '%d/%m/%y').date()
No_Of_Years = (Order_Term_End_Date.year - Order_Term_Start_Date.year)
No_Of_Months = Order_Term_End_Date.month - Order_Term_Start_Date.month
if No_Of_Months < 0:
globals()['No_Of_Years'] -= 1
globals()['No_Of_Months'] = 12 - abs(No_Of_Months)
No_Of_Days = Order_Term_End_Date.day - Order_Term_Start_Date.day
if No_Of_Days < 0:
globals()['No_Of_Months'] = globals()['No_Of_Months'] - 1
globals()['No_Of_Days'] = Order_Term_End_Date.day + 30 - Order_Term_Start_Date.day
Total_No_Of_Months = No_Of_Years * 12 + No_Of_Months
Invoice_Date = Order_Term_Start_Date
Quote_Items = Quote.Items
Total_Amounts = {"Subscriptions": 0, "Events": 0, "Education Services": 0}
for Item in Quote_Items:
Total_Amounts[Item.CategoryName] = Total_Amounts[Item.CategoryName] + Item.ExtendedAmount
quoteTable = Quote.QuoteTables['Invoice_Details']
quoteTable.Rows.Clear()
if Total_Amounts['Events'] > 0:
invRow = quoteTable.AddNewRow()
invRow['Invoice_Schedule'] = 'Knowledge & Other'
invRow['Invoice_Date'] = "Upon Signature"
invRow['Amount'] = Total_Amounts['Events']
invRow['Estimated_Tax'] = 0
invRow['Grand_Total'] = Total_Amounts['Events']
if Total_Amounts['Education Services'] > 0:
invRow = quoteTable.AddNewRow()
invRow['Invoice_Schedule'] = 'Training Fees'
invRow['Invoice_Date'] = "Upon Signature"
invRow['Amount'] = Total_Amounts['Education Services']
invRow['Estimated_Tax'] = 0
invRow['Grand_Total'] = Total_Amounts['Education Services']
Invoice_Schedule = ''
if No_Of_Months > 0 and No_Of_Days > 0:
globals()['Invoice_Schedule'] = str(No_Of_Months) + ' Months and ' + str(No_Of_Days) + " days Subscription Fee"
elif No_Of_Months > 0:
globals()['Invoice_Schedule'] = str(No_Of_Months) + " Months Subscription Fee"
else:
globals()['Invoice_Schedule'] = str(No_Of_Days) + " Days Subscription Fee"
if Total_Amounts['Subscriptions'] > 0:
if Invoice_Schedule != '':
invRow = quoteTable.AddNewRow()
invRow['Invoice_Schedule'] = Invoice_Schedule
invRow['Invoice_Date'] = "Upon Signature"
invRow['Amount'] = (Total_Amounts['Subscriptions'] / Total_No_Of_Months) * No_Of_Months
invRow['Estimated_Tax'] = 0
invRow['Grand_Total'] = (Total_Amounts['Subscriptions'] / Total_No_Of_Months) * No_Of_Months
for count in range(No_Of_Years):
invRow = quoteTable.AddNewRow()
invRow['Invoice_Schedule'] = 'Annual Subscription Fee'
if count == 0:
invRow['Invoice_Date'] = "Upon Signature"
else:
Invoice_Date = Invoice_Date + timedelta(days=365)
invRow['Invoice_Date'] = Invoice_Date.strftime("%B %d,%Y").ToString()
invRow['Amount'] = (Total_Amounts['Subscriptions'] / Total_No_Of_Months) * 12
invRow['Estimated_Tax'] = 0
invRow['Grand_Total'] = (Total_Amounts['Subscriptions'] / Total_No_Of_Months) * 12
quoteTable.Save()
|
[
"noreply@github.com"
] |
mdabdulkhaliq.noreply@github.com
|
2925395fc52a4049109545beef4c2ae0eb988174
|
ef9ff20d1983b018cb768320f1cf01af0376fe0e
|
/ppmessage/api/handlers/ppaddpredefinedscriptgroup.py
|
743916545473f0af76710312de4fd03c2f92f821
|
[
"Apache-2.0"
] |
permissive
|
Vegisau/ppmessage
|
f3f802903f3fb50b2efed319ca7da46674f97cae
|
d63337bf03f47570a9dd722b1f4c76464bffbc85
|
refs/heads/master
| 2020-04-08T11:01:28.744508
| 2018-11-27T07:01:12
| 2018-11-27T07:01:12
| 159,290,481
| 0
| 0
|
NOASSERTION
| 2018-11-27T07:00:32
| 2018-11-27T07:00:31
| null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.core.constant import API_LEVEL
from ppmessage.db.models import PredefinedScript
import copy
import uuid
import json
import logging
class PPAddPredefinedScriptGroup(BaseHandler):
def _add(self):
_request = json.loads(self.request.body)
_app_uuid = _request.get("app_uuid")
_group_name = _request.get("group_name")
if _group_name == None or len(_group_name) == 0 or \
len(_group_name) > PredefinedScriptGroup.group_name.property.columns[0].type.length:
self.setErrorCode(API_ERR.NO_PARA)
return
_uuid = str(uuid.uuid1())
_row = PredefinedScriptGroup(uuid=_uuid, app_uuid=_app_uuid, group_name=_group_name)
_row.async_add(self.application.redis)
_row.create_redis_keys(self.application.redis)
_ret = self.getReturnData()
_ret = copy.deepcopy(_request)
_ret.update({"uuid": _uuid})
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPAddPredefinedScriptGroup, self)._Task()
self._add()
return
|
[
"dingguijin@gmail.com"
] |
dingguijin@gmail.com
|
1481ce54c560bf109cb0a252866aa68141fad3cf
|
a49b59bc6e24f8f0e4967e6ad42568c0b4225f01
|
/HotelBedsSimulation.py
|
48f0387e6f9e7efb1fa8a583ffc0492760538165
|
[
"Apache-2.0"
] |
permissive
|
matabares/NaxcaServer
|
5738fa137df76b743832906fbd86bc7a783a85a7
|
be3fd1df1d015f5099e1684d6b5b6309e3aeb45d
|
refs/heads/master
| 2020-04-03T23:15:02.654103
| 2019-07-12T22:48:30
| 2019-07-12T22:48:30
| 155,624,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,166
|
py
|
class HotelBedsSimulation:
def HotelBedsResponse(self, info):
info.send_response(200)
info.send_header('Content-Type', 'text/xml;charset=UTF-8')
info.end_headers()
contentLen = int(info.headers['Content-Length'])
postBody = info.rfile.read(contentLen)
body = str(postBody, "utf-8")
if "HotelValuedAvailRQ" in body:
if "CTG" in body:
file = open(
"providersimulation/hotelBeds/Busqueda_HotelValuedAvailRS.xml",
"r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
else:
file = open("providersimulation/hotelBeds/Search 113715943-808B5D-DFROM_20190301_DTO_20190305_CTO_MDE_HotelValuedAvailRS.xml","r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
if "ServiceAddRQ" in body:
if "MIA" in body:
file = open("providersimulation/hotelBeds/Tarifa mas alta 113933827-F87ED6-DFROM_20190301_DTO_20190305_CTO_MDE_ServiceAddRS.xml", "r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
if "PAR" in body:
file = open("providersimulation/hotelBeds/Cambio de tarifa y descuento 113933827-F87ED6-DFROM_20190301_DTO_20190305_CTO_MDE_ServiceAddRS.xml", "r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
if "BOG" in body:
file = open("providersimulation/hotelBeds/Cambio de tarifa 113933827-F87ED6-DFROM_20190301_DTO_20190305_CTO_MDE_ServiceAddRS.xml", "r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
if "MZL" in body:
file = open("providersimulation/hotelBeds/Cambio de políticas 113933827-F87ED6-DFROM_20190301_DTO_20190305_CTO_MDE_ServiceAddRS.xml", "r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
if "ADZ" in body:
file = open("providersimulation/hotelBeds/Cambio de descuento 113933827-F87ED6-DFROM_20190301_DTO_20190305_CTO_MDE_ServiceAddRS.xml", "r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
if "CTG" in body:
file = open(
"providersimulation/hotelBeds/Habitaciones-ServiceAddRS.xml",
"r", encoding='utf8')
data = file.read()
file.close()
info.wfile.write(bytes(data, 'UTF-8'))
return info
|
[
"cdaniel94@gmail.com"
] |
cdaniel94@gmail.com
|
a3710acc85b2f157b0c4e2eb34b2128a47cdb2d7
|
cf1c2e7420c4432f3b51e421d56a48fd58bbf6cf
|
/main.py
|
d6edbf6bb45648d91ded43319c458785780d777f
|
[
"MIT"
] |
permissive
|
jaggiJ/alpharena
|
aac443c93c84196fdbe820bc60de9e94e8d37591
|
bd5a359fc11be0d9a5f2f0a711caa10ddc3c984d
|
refs/heads/master
| 2020-04-02T21:38:42.013247
| 2018-11-21T13:00:14
| 2018-11-21T13:00:14
| 154,806,451
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,714
|
py
|
import sys
from classes.base_class import Glad
from modules.base_mods import yes_or_no
magic = [{'name': 'Fire', 'cost': 40, 'dmg': 180},
{'name': 'Water', 'cost': 30, 'dmg': 150},
{'name': 'Earth', 'cost': 20, 'dmg': 110},
{'name': 'Air', 'cost': 10, 'dmg': 60},
{'name': 'Heal', 'cost': 30, 'dmg': 100}]
round_number = 1
level = 1
print('You are sitting in first row of a big arena.\n Crowds are crazy for blood and show, but '
'no one seems to come on the arena?\n WHOOP.\n You have been pushed down right into the fray'
' by member of your own family. Soon gate is opened and tough peasant charges at you.')
input('Press (Enter)')
""" MAIN LOOP
Player chooses option of attack, then based on actions he took resolution is applied to his
HP and MP and enemy HP until one reaches 0 HP
"""
while True:
""" FIGHT ROUND
Player chooses action and resolution of that action is applied. Round ends with HP summary.
"""
if round_number == 1: # New game generation
player = Glad(460, 60, 60, magic)
enemy = Glad(545 + 5*level, 0, 60, [])
print('=' * 79)
print('Player HP ', player.hp, '| MP', player.mp) # Prints HP MP stats of player & enemy
print('Enemy HP ', enemy.hp, '\n' + '=' * 79)
user_input = player.print_actions() # Print actions and returns correct user input (index of action as integer)
if user_input == 0: # attacking
print('You strike the enemy for ', enemy.reduce_hp(player.attack_damage()),
'he retaliates with ', player.reduce_hp(enemy.attack_damage()))
elif user_input == 1: # casing spell
while True: # user cast spell resolution
print('='*79)
print('Choose your spell:') # Player spells
player.print_magic()
user_input = input()
if user_input not in [str(item) for item in range(len(player.magic))]: # input err handler
continue
user_input = int(user_input)
if player.mp < player.magic[user_input]['cost']:
print('-'*34+'Not enough MP !!!'+'-'*(79-17-35))
break
player.mp -= player.magic[user_input]['cost'] # Decrease player MP by amount cast
if player.magic[user_input]['name'] == 'Heal': # Casting heal spell
print('test OK')
amount_healed = player.spell_damage(user_input)
player.hp += amount_healed
print('Player heals himself for ' + str(amount_healed))
break
print('Player casts', player.magic[user_input]['name'], 'for',
enemy.reduce_hp(player.spell_damage(user_input)),
'damage. Enemy retaliates with',
player.reduce_hp(enemy.attack_damage()))
break
while player.hp == 0 or enemy.hp == 0: # End game conditions
print('The fight lasted %d rounds' % round_number)
if player.hp > 0:
print('The enemy is dead. You have win')
elif enemy.hp > 0:
print('You are dead. You have lost.')
else:
print('The enemy is dead. Who is left to be happy? That is a draw.')
print('=' * 79)
print('Player HP ', player.hp, '| MP', player.mp) # Prints HP MP stats of player & enemy
print('Enemy HP ', enemy.hp, '\n' + '=' * 79)
print('New game? (y)es or (n)o')
new_game = yes_or_no()
if new_game == 'yes':
level += 1
print('Soon gate is opened and tougher one charges at you.')
round_number = 0
break
else:
sys.exit()
round_number += 1
|
[
"jagged93@gmail.com"
] |
jagged93@gmail.com
|
c019c47ddd586097167ac1adad4d580e98f8f0b3
|
6fde327fb62be930204d97105453323cb51009fd
|
/list3.py
|
14d4d82bea9e44a5c638e28bb52cb2b9ecc89116
|
[
"MIT"
] |
permissive
|
Kantheesh/Learning-Python
|
ff7551c77972fd788651ad95026786eb1c897bb6
|
d2dc9f1b9f652e6a6d84028e86a1daf77551eb5f
|
refs/heads/master
| 2020-06-04T21:36:24.266039
| 2019-08-13T19:11:53
| 2019-08-13T19:11:53
| 192,200,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
names = ['ajay', 'kiran', 'ramesh']
print(names[1])
# update -> change ajay to vijay
names[0] = 'vijay'
print(names)
# names[3] = "new" # IndexError
|
[
"noreply@github.com"
] |
Kantheesh.noreply@github.com
|
5b7d6b08d473ac8bdbbca0d226f93664b99bb724
|
668fe42b774e7bac84722aae166346436946f6fc
|
/test/test_modify_group.py
|
9fefc2c8959b63b39f45693888a044da651d3f19
|
[
"Apache-2.0"
] |
permissive
|
olgadmitrishena/python_training2
|
38ab84a7245c486057d0e13e0d632d24090179e3
|
0b618f8aab2603c3fa42fdcb4d4bcc3f0a84a1c3
|
refs/heads/master
| 2020-03-21T02:31:17.363059
| 2018-08-20T05:59:17
| 2018-08-20T05:59:17
| 138,002,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
from model.group import Group
from random import randrange
def test_modify_group_name(app):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = app.group.get_group_list()
index = randrange(len(old_groups))
group = Group(name="New group")
group.id = old_groups[index].id
app.group.modify_group_by_index(index, group)
assert len(old_groups) == app.group.count()
new_groups = app.group.get_group_list()
old_groups[index] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
#def test_modify_group_header(app):
# old_groups = app.group.get_group_list()
# app.group.modify_first_group(Group(header="New header"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
|
[
"olga.dmitrishena@gmail.com"
] |
olga.dmitrishena@gmail.com
|
3e9ab12e3f05521915f005600258b1671d2a2c34
|
2b807998aefb868f0c64c69a8dc9be1fd11a3380
|
/WiCRM/settings/views.py
|
17cfa1320b0e7eaf67e69ec8c0e901d33e730549
|
[] |
no_license
|
WiWhite/WiCRM
|
a7d1b12531f235948645f1dbb2c69a1cb284ab38
|
d85de1112b3cfc6b26b89450551682ea88c0a4fb
|
refs/heads/master
| 2023-04-05T17:20:18.890662
| 2021-04-13T12:27:46
| 2021-04-13T12:27:46
| 333,537,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,767
|
py
|
from smtplib import SMTPException
from django.shortcuts import render, redirect
from django.views.generic import CreateView, View
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.contrib import messages
from django.conf import settings
from django.core.mail import send_mail, get_connection
from .models import *
from .forms import *
from .mixins import CreateDelObjectMixin, CheckConnSaveMixin
from .utils import *
from referral.models import Referrals
from accounts.models import User
class SettingsPersonnel(LoginRequiredMixin, CreateView):
"""
View for displaying, creating, updating and deleting employees.
"""
model = Personnel
form_class = PersonnelForm
template_name = 'settings/settings_staff.html'
success_url = reverse_lazy('settings_staff')
raise_exception = True
def get(self, requests, *args, **kwargs):
owner = User.objects.get(username=self.request.user)
self.object = owner
form = self.form_class(owner=owner)
context = super().get_context_data(**kwargs)
context['staff'] = Personnel.objects.filter(
owner=self.request.user,
dismissal=None
)
context['fields'] = Personnel._meta.fields
context['form'] = form
return render(requests, self.template_name, context)
def post(self, request, *args, **kwargs):
owner = User.objects.get(username=self.request.user)
self.object = owner
self.request.POST = self.request.POST.copy()
self.request.POST['owner'] = f'{owner.pk}'
# staff delete request
delete_pk = self.request.POST.get('delete_pk')
if delete_pk:
staff = self.model.objects.get(pk=delete_pk)
staff.delete()
messages.success(self.request, f'{staff} successfully deleted!')
return redirect('settings_staff')
# staff delete request
update_pk = self.request.POST.get('update_pk')
if update_pk:
staff = self.model.objects.get(pk=update_pk)
form = self.form_class(owner.pk, self.request.POST, instance=staff)
if form.is_valid():
form.save()
messages.success(
self.request,
f'{staff} successfully updated!'
)
return redirect('settings_staff')
else:
messages.error(
self.request,
f'{staff} has\'t been changed. The data isn\'t correct!'
)
return redirect('settings_staff')
# staff created
form = self.form_class(owner.pk, self.request.POST)
if form.is_valid():
try:
service = EmailService.objects.get(pk=owner.pk)
check_connection(service.__dict__)
form.save()
messages.success(
self.request,
f'{form.cleaned_data["first_name"]} '
f'{form.cleaned_data["last_name"]} successfully created!'
)
staff = Personnel.objects.get(email=form.cleaned_data['email'])
connection = create_connection(service)
body = f'{settings.ALLOWED_HOSTS[0]}:8000/' \
f'registration-referral={staff.referral}'
send_invite(
service.email_login,
[form.cleaned_data['email']],
connection,
body,
)
return redirect(self.request.path)
except SMTPException:
messages.error(
self.request,
'Something is wrong with the connection to the mailing '
'server! Check if the settings are correct on the Email'
' Service tab.'
)
return redirect(self.request.path)
else:
messages.error(
self.request,
f'{form.errors}'
)
return self.form_invalid(form)
class SettingsPositions(LoginRequiredMixin, CreateDelObjectMixin):
"""
View to display, create and delete positions of my employees.
"""
model = Positions
form_class = PositionsForm
template_name = 'settings/settings_positions.html'
success_url = reverse_lazy('settings_positions')
raise_exception = True
class SettingsService(LoginRequiredMixin, CreateDelObjectMixin):
"""
View to display, create and delete the services provided.
"""
model = Services
form_class = ServicesForm
template_name = 'settings/settings_services.html'
success_url = reverse_lazy('settings_services')
raise_exception = True
class SettingsEmailService(CheckConnSaveMixin):
def get(self, request):
try:
obj = EmailService.objects.get(owner=request.user)
form = EmailServiceForm(instance=obj)
context = {
'form': form,
'obj': obj,
}
return render(
request,
'settings/settings_email_service.html',
context
)
except models.ObjectDoesNotExist:
form = EmailServiceForm()
context = {
'form': form,
}
return render(
request,
'settings/settings_email_service.html',
context
)
def post(self, request):
data = self.request.POST.copy()
data['owner'] = self.request.user
form = EmailServiceForm(data)
if self.request.POST.get('update'):
obj = EmailService.objects.get(owner=request.user)
form = EmailServiceForm(data, instance=obj)
if form.is_valid():
self.check_save(request, form)
return redirect(self.request.path)
else:
messages.error(request, f'{form.errors}')
return render(
request,
'settings/settings_email_service.html',
{'form': form}
)
elif form.is_valid():
self.check_save(request, form)
context = {'form': form}
return render(
request,
'settings/settings_email_service.html',
context,
)
else:
messages.error(request, f'{form.errors}')
return render(
request,
'settings/settings_email_service.html',
{'form': form}
)
|
[
"bigsplash1990@gmail.com"
] |
bigsplash1990@gmail.com
|
db23dd07ed6c386f68b59e8625caf8620674e3d0
|
44e8cea8e2c995e82c6e829d724cd74c2c48a3be
|
/prob2.py
|
a564470589063ef90f6bd2d2342c5e65c0fc8030
|
[] |
no_license
|
paulneves77/8.942-HW5
|
84a7f2fa851e77ea91c84f0a45869cf72d1f3954
|
d4c5ca98500788c85ea4db8e7f922fe738929fe1
|
refs/heads/main
| 2022-12-20T02:03:28.744007
| 2020-10-02T07:16:29
| 2020-10-02T07:16:29
| 300,437,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
# -*- coding: utf-8 -*-
import getpass
username = getpass.getuser()
from pathlib import Path
from astropy import constants as const
from astropy import units as u
import numpy as np
from scipy.special import zeta, gamma
# formatting for matplotlib
import matplotlib.pyplot as plt
plt.style.use(Path(f'C:\\Users\\{username}\\Dropbox (MIT)\\Research\\useful_code\\python\\paul_style.mplstyle'))
import matplotlib.ticker as ticker
import addcopyfighandler
plt.close(fig = 'all')
# calc number densities
photon_density = (zeta(3) * gamma(3) * (2.725 * u.K)**3 / np.pi**2)
photon_density = photon_density * const.k_B**3 * const.hbar**-3 * const.c**-3
print(f"photon density = {photon_density.to(u.cm**-3)}")
neutrino_density = 3 / 11* photon_density
print(f"neutrino density = {neutrino_density.to(u.cm**-3)}")
# calc temps
print(f"min temp = {(0.06 * u.eV / const.k_B).to(u.K)}")
print(f"max temp = {(0.12 * u.eV / const.k_B).to(u.K)}")
# neutrino temperature, a, z when became non-relativistic
T_nu_final = (4/11)**(1/3) * 2.725 * u.K
print(f"final neutrino temp = {T_nu_final}")
a_max = T_nu_final/(1390*u.K)
a_min = T_nu_final/(700*u.K)
print(f"max a = {a_max}")
print(f"min a = {a_min}")
z_max = 1/a_min-1
z_min = 1/a_max-1
print(f"max z = {z_max}")
print(f"min z = {z_min}")
print(f"avg z = {np.mean([z_max,z_min])}")
print(f"z unc = {np.ptp([z_max,z_min])}")
# calc min and max omega_nu*h^2
rho_crit = 3*(100 * u.km/u.s/u.Mpc)**2/(8*np.pi*const.G)
print(f"rho crit = {rho_crit.to(u.g*u.cm**-3)}")
prefactor = 112*u.cm**-3/(rho_crit*const.c**2)
print(f"inverse prefactor = {(prefactor**-1).to(u.eV)}")
print(f"neutrino fraction max = {(prefactor*0.06*u.eV).decompose()}")
print(f"neutrino fraction max = {(prefactor*0.12*u.eV).decompose()}")
|
[
"paulneves77@gmail.com"
] |
paulneves77@gmail.com
|
473071e28a906231ae8b197469deb81a91806af8
|
52e32bccdbc85d41d97b81d7ebaf48cea0d783b7
|
/constants.py
|
018c78aa9ef112a3fa1d35c5c276fad84c6b3ca1
|
[] |
no_license
|
Casidi/ParallelPBT
|
996203dee1bdb1f18c847530c3da6e4208f8b2ff
|
82fdaf0954a17d29800881fae6d4293f0920d2da
|
refs/heads/master
| 2020-04-04T10:30:14.843043
| 2018-12-08T05:37:42
| 2018-12-08T05:37:42
| 155,857,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,541
|
py
|
from enum import Enum
from hyperopt import hp
import hyperopt.pyll.stochastic
class WorkerInstruction(Enum):
ADD_GRAPHS = 0
EXIT = 1
TRAIN = 2
GET = 3
SET = 4
EXPLORE = 5
def get_hp_range_definition():
range_def_dict = {
'h_0': [0.0, 1.0], 'h_1': [0.0, 1.0],
'optimizer_list': ['Adadelta', 'Adagrad', 'Momentum', \
'Adam', 'RMSProp', 'gd'],
'lr': {
'Adadelta': [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0],
'Adagrad': [1e-3, 1e-2, 1e-1, 0.5, 1.0],
'Momentum': [1e-3, 1e-2, 1e-1, 0.5, 1.0],
'Adam': [1e-4, 1e-3, 1e-2, 1e-1],
'RMSProp': [1e-5, 1e-4, 1e-3],
'gd': [1e-2, 1e-1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]
},
'momentum': [0.00, 0.9],
'grad_decay': [0.00, 0.9],
'decay_steps': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
'decay_rate': [0.1, 1.0],
'weight_decay': [1e-8, 1e-2],
'regularizer': ['l1_regularizer', \
'l2_regularizer', \
'l1_l2_regularizer', \
'None'],
'initializer': ['glorot_normal', \
'orthogonal', \
'he_init',
'None'],
'batch_size': [191]
}
return range_def_dict
def load_hp_space():
range_def = get_hp_range_definition()
space = {
'opt_case':hp.choice('opt_case',
[
{
'optimizer': 'Adadelta',
'lr': hp.choice('lr', range_def['lr']['Adadelta'])
},
{
'optimizer': 'Adagrad',
'lr': hp.choice('lr', range_def['lr']['Adagrad'])
},
{
'optimizer': 'Momentum',
'lr': hp.choice('lr', range_def['lr']['Momentum']),
'momentum': hp.uniform('momentum', \
range_def['momentum'][0], range_def['momentum'][1])
},
{
'optimizer': 'Adam',
'lr': hp.choice('lr', range_def['lr']['Adam'])
},
{
'optimizer': 'RMSProp',
'lr': hp.choice('lr', range_def['lr']['RMSProp']),
'grad_decay': hp.uniform('grad_decay', \
range_def['grad_decay'][0], range_def['grad_decay'][1]),
'momentum': hp.uniform('momentum', \
range_def['momentum'][0], range_def['momentum'][1])
},
{
'optimizer': 'gd',
'lr': hp.choice('lr', range_def['lr']['gd'])
}
]),
'decay_steps': hp.choice('decay_steps', \
range_def['decay_steps']),
'decay_rate': hp.uniform('decay_rate', \
range_def['decay_rate'][0], range_def['decay_rate'][1]),
'weight_decay': hp.uniform('weight_decay', \
range_def['weight_decay'][0], range_def['weight_decay'][1]),
'regularizer': hp.choice('regularizer', \
range_def['regularizer']),
'initializer': hp.choice('initializer', \
range_def['initializer']),
'batch_size': hp.randint('batch_size', range_def['batch_size'][0])
}
space['batch_size'] += 65
return space
def generate_random_hparam():
hp_space = load_hp_space()
return hyperopt.pyll.stochastic.sample(hp_space)
|
[
"tcfsh22215@gmail.com"
] |
tcfsh22215@gmail.com
|
100e43951fa34d533d26b0d85b60addf2a22611b
|
0a06fe98a9c5938e7f4befbf1d256a19e00adf6c
|
/projects/mdb/rsf/su/rsflab5/SConstruct
|
df7b143604b2d7c43b5a296712765efdcb6dd7c4
|
[] |
no_license
|
chenyk1990/useful-utilities
|
44259cc7ceb8804b9da86133ec81fa9b65096cea
|
55d4a220f8467d666358b2ebff4b5cee1ade33c3
|
refs/heads/master
| 2023-08-15T18:42:55.899015
| 2023-08-10T03:00:13
| 2023-08-10T03:00:13
| 49,804,476
| 17
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,568
|
from rsf.proj import *
# Velocity model
Flow('syncline.asc',None,
'''
echo
0 1100
0 1100
500 1100
1000 1300
2000 1000
2700 1100
3200 1000
4000 1050
n1=2 n2=8 in=$TARGET
data_format=ascii_float
''')
Flow('syncline','syncline.asc',
'''
dd form=native |
spline n1=400 d1=10 o1=0 |
unif2 n1=200 d1=10 v00=2000,3000 |
put label1=Depth unit1=m label2=Distance unit2=m
label=Velocity unit=m/s
''')
Result('syncline',
'''
grey color=j scalebar=y barreverse=y mean=y
screenht=7 screenwd=14 title="Syncline Model"
''')
# Source
Flow('sou',None,'spike n1=2 nsp=2 k1=1,2 mag=1000,20')
# Receivers
Flow('recx','syncline','window n1=1 squeeze=n | math output=x2')
Flow('recz','recx','math output=20')
Flow('rec','recx recz','cat axis=1 ${SOURCES[1]}')
# Wavelet
Flow('wav',None,
'''
spike n1=2200 d1=0.001 k1=200 |
ricker1 frequency=20
''')
# Density
Flow('den','syncline','math output=1')
# Finite-difference modeling
Flow('hseis seis','wav syncline sou rec den',
'''
transp |
awefd2d verb=y free=n snap=y dabc=y jdata=2 jsnap=10
vel=${SOURCES[1]} sou=${SOURCES[2]} rec=${SOURCES[3]} den=${SOURCES[4]}
wfl=${TARGETS[1]} |
window f2=100 | put o2=0 | transp |
put label1=Time label2=Distance unit1=s unit2=m
''')
Plot('seis',
'''
grey gainpanel=a title=Wave
label1=Depth label2=Distance unit1=m unit2=m
''',view=1)
Result('hseis','grey title=Data')
End()
|
[
"chenyk1990@gmail.com"
] |
chenyk1990@gmail.com
|
|
8fb31fd438861607ff4d545d20935ab962ad9aa6
|
7afdb701907c6e1ed983cec7b6fbabb5db4948a7
|
/cmake-build-debug/catkin_generated/pkg.develspace.context.pc.py
|
5c799291511e6728d79cd4f1640930f774563027
|
[] |
no_license
|
kmpchk/hector_quadrotor_swarm
|
09addf613f430a94dc47a8e40feb628ae70c808a
|
e96bdce7c3c94d623f5e320aba34eb58a783d6c0
|
refs/heads/master
| 2020-04-07T06:24:22.025484
| 2018-11-18T23:31:42
| 2018-11-18T23:31:42
| 158,133,693
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "quadrotor_flight"
PROJECT_SPACE_DIR = "/home/optio32/catkin_ws/src/quadrotor_flight/cmake-build-debug/devel"
PROJECT_VERSION = "0.0.0"
|
[
"iregin50"
] |
iregin50
|
382f559a6b0e883aaa56c6502e8f06fe7b0e9a2c
|
29ea573a3cdde125282292948b5868dc7d02d49d
|
/Educated_cinema/child/src/cinema/models.py
|
bf922797e42aadc2a1e488be8afde97546e26e33
|
[] |
no_license
|
EngCoder-Hawraa/CHILD-CINEMA
|
dff3a2c4e679b333982159aec3be3b21e2790040
|
2ca3314a3fb8e67c66a2ee687dfdc19060051651
|
refs/heads/master
| 2022-02-07T03:57:57.055761
| 2019-08-16T03:17:51
| 2019-08-16T03:17:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=150)
def __str__(self):
return self.name
class Cinema(models.Model):
name = models.CharField(max_length=150)
date = models.DateTimeField(auto_now='')
content = models.TextField(null=False)
pic = models.ImageField('default.jpg', upload_to='profile_pics')
cat = models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Meta:
ordering = ('content',)
# python manage.py makemigrations
# python manage.py migrate
|
[
"hawraa arkan"
] |
hawraa arkan
|
643ce15c8585b0ffba4cc46cf62532e5c3935ec6
|
1b211954e6056644f4c2485fed2a87529c272cf6
|
/db.py
|
0404738a1bf40225b3a320875525f5a3fe5e3dc7
|
[] |
no_license
|
shushanfx/quick-stock
|
d75fe1a5f8b6f31263a3950b0877c42de26b32b8
|
0ba6a48135b461d1993d688ba3c8d093a0e09064
|
refs/heads/master
| 2020-03-21T18:05:51.255136
| 2019-02-01T11:22:11
| 2019-02-01T11:22:11
| 138,872,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
import os.path
import codecs
import demjson
def save(stock_id, a_list):
file_path = os.path.join('./data', stock_id + '.json')
return demjson.encode_to_file(file_path, a_list, encoding ='utf-8', overwrite=True, compactly = False)
def fetch(stock_id):
file_path = os.path.join('./data', stock_id + '.json')
return demjson.decode_file(file_path, encoding='utf-8')
|
[
"dengjianxin@qiyi.com"
] |
dengjianxin@qiyi.com
|
af5ae2b698dc0367aac55dff558ad448399ff2c2
|
e6b4985e2c492c6cc329ff8490ead0f88642e250
|
/Amazon/array/ Implement strStr().py
|
0e78039d2f5ab3518bb0e28fd8994821496ddf72
|
[] |
no_license
|
rohith788/Leetcode
|
6ded5e80728159391069b688d47eb74e0315b165
|
a86f569fd752fcc4ca15d8fdf0b0c29dd00d614e
|
refs/heads/master
| 2021-07-08T02:49:14.746255
| 2021-03-07T22:08:51
| 2021-03-07T22:08:51
| 228,939,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if len(needle) == 0: return 0 #if needle is blank
if len(haystack) != 0:
if len(needle) > len(haystack): #if haystack is smaller than needle
return -1
n_len = len(needle)
h_len = len(haystack)
for i in range(h_len):#iterate through haystack
j = 0
if(i + n_len <= h_len): #check if needle is bigger than the rest of haystack
if(haystack[i:(i+n_len)] == needle): return i #check match and return index
return -1
|
[
"srohith788@gmail.com"
] |
srohith788@gmail.com
|
386602ebed8e9d086f216a0cc688279869226701
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/errors/types/invoice_error.py
|
431d0930370e00ac85c01434493a999174a378c9
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"InvoiceErrorEnum",},
)
class InvoiceErrorEnum(proto.Message):
r"""Container for enum describing possible invoice errors.
"""
class InvoiceError(proto.Enum):
r"""Enum describing possible invoice errors."""
UNSPECIFIED = 0
UNKNOWN = 1
YEAR_MONTH_TOO_OLD = 2
NOT_INVOICED_CUSTOMER = 3
BILLING_SETUP_NOT_APPROVED = 4
BILLING_SETUP_NOT_ON_MONTHLY_INVOICING = 5
NON_SERVING_CUSTOMER = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
f80e20e6ce7eef5b226bd559cf2d35b7f2a0700c
|
3b4c766ed91d0b7b5244df0b9cf2b66215e7f280
|
/hello/urls.py
|
272c2b7ea0bfc90962bbcd2253ace604591c16cd
|
[] |
no_license
|
shake551/SNS_wannabe
|
d36c0e0907a7c53476e441cb096b103c15c2c787
|
562adafdf4157fc58d5b10a2a71bfe33baa0918d
|
refs/heads/main
| 2023-02-21T06:55:43.569986
| 2021-01-05T07:00:33
| 2021-01-05T07:00:33
| 324,520,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
from django.urls import path
from . import views
from .views import FriendList
from .views import FriendDetail
urlpatterns = [
path('', views.index, name='index'),
path('<int:num>', views.index, name='index'),
path('create', views.create, name='create'),
path('edit/<int:num>', views.edit, name='edit'),
path('delete/<int:num>', views.delete, name='delete'),
path('list', FriendList.as_view()),
path('detail/<int:pk>', FriendDetail.as_view()),
path('find', views.find, name='find'),
path('check', views.check, name='check'),
path('message/', views.message, name='message'),
path('message/<int:page>', views.message, name='message'),
]
|
[
"hiroki.yamako@ezweb.ne.jp"
] |
hiroki.yamako@ezweb.ne.jp
|
720b0285c77d6be38d36e2c71c5c67d9c734648d
|
4b529326a5ada3a655c38301be781c33669839f5
|
/tsunami_tools.py
|
9233ce158eee338ae81a0591bbdf21f02e276d66
|
[] |
no_license
|
harshita-kaushal/TsunamiGlobe
|
0ab816ee84065dfca73b97e6d3de0652f05b7997
|
d2267fb0e79466fbe5ed6c086d83446be09c169b
|
refs/heads/master
| 2020-03-19T01:24:04.675889
| 2018-05-09T17:30:05
| 2018-05-09T17:30:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,048
|
py
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
from matplotlib import pyplot as plt
import matplotlib.colors as mcolor
import matplotlib.animation as manimation
import matplotlib.colorbar as mcolorbar
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.basemap import Basemap
import matplotlib.font_manager as mfont
import scipy as sp
import argparse
from geographiclib.geodesic import Geodesic as geo
from netCDF4 import Dataset
import os
import read_ETOPO1
import pandas as pd
import json
import itertools
#import quakelib
# --------------------------------------------------------------------------------
class simAnalyzer:
def __init__(self, sim_file_path):
self.save_file_prefix = os.path.splitext(sim_file_path)[0]
#sim_data = np.genfromtxt(sim_file, dtype=[('time','f8'),('lat','f8'),('lon','f8'), ('z','f8'), ('alt','f8')])
self.sim_data = Dataset(sim_file_path, 'r', format='NETCDF4')
# These arrays shouldn't be too big, so go ahead and load them into memory as numpy arrays
self.times = np.array(self.sim_data.variables['time'])
self.lons = np.array(self.sim_data.variables['longitude'])
self.lats = np.array(self.sim_data.variables['latitude'])
#TODO: check for simulation that wraps around int date line.
self.numlons, self.minlon, self.maxlon, self.meanlon = len(self.lons), self.lons.min(), self.lons.max(), self.lons.mean()
self.numlats, self.minlat, self.maxlat, self.meanlat = len(self.lats), self.lats.min(), self.lats.max(), self.lats.mean()
self.dlon = abs(self.lons[1]-self.lons[0])
self.dlat = abs(self.lats[1]-self.lats[0])
height_ncVar = self.sim_data.variables['height']
alt_ncVar = self.sim_data.variables['altitude']
# Calculate locations where we simulated ocean runup
ever_has_water = np.any(height_ncVar, axis=0)
self.sim_inundation_array = np.ma.masked_where(alt_ncVar[0] < 0, ever_has_water)
def make_grid_animation(self, FPS, DPI, zminmax=None, doBasemap=False):
save_file = self.save_file_prefix+"_grid.mp4"
# Keep the data from each time step in netCDF variable form, and slice into it as needed
level_ncVar = self.sim_data.variables['level']
height_ncVar = self.sim_data.variables['height']
alt_ncVar = self.sim_data.variables['altitude']
# Get ranges
N_STEP = len(self.times)
z_min = np.inf
z_max = -np.inf
z_avs = []
for i, levelstep in enumerate(level_ncVar):
masked_data = np.ma.masked_where(height_ncVar[i] == 0.0000, levelstep)
z_min = min(masked_data.min(), z_min)
z_max = max(masked_data.max(), z_max)
z_avs.append(masked_data.mean())
z_max = np.max(np.ma.masked_where(height_ncVar[0] == 0.0000, level_ncVar[0]))
print("min: {}, max: {}, av: {}".format(z_min, z_max, np.array(z_avs).mean()))
if(zminmax != None): z_min,z_max = zminmax
# Initialize movie writing stuff
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='TsunamiSquares', artist='Matplotlib', comment='Animation')
writer = FFMpegWriter(fps=FPS, metadata=metadata, bitrate=1000)
# Initialize the frame and axes
fig = plt.figure()
if not doBasemap:
ax = fig.add_subplot(111)
plt.xlim(self.minlon, self.maxlon)
plt.ylim(self.minlat, self.maxlat)
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
#ax.get_yaxis().get_major_formatter().set_useOffset(False)
else:
m = Basemap(projection='cyl', llcrnrlat=self.minlat, urcrnrlat=self.maxlat,
llcrnrlon=self.minlon, urcrnrlon=self.maxlon, lat_0=self.meanlat, lon_0=self.meanlon, resolution='h')
m.drawmeridians(np.linspace(self.minlon, self.maxlon, num=5.0), labels=[0,0,0,1], linewidth=0)
m.drawparallels(np.linspace(self.minlat, self.maxlat, num=5.0), labels=[1,0,0,0], linewidth=0)
m.drawcoastlines(linewidth=0.5)
m.ax = fig.add_subplot(111)
ax = m.ax
# Colorbar
cmap = plt.get_cmap('Blues_r')
landcolor = 'orange'#'black'#'#FFFFCC'
cmap.set_bad(landcolor, 1.0)
norm = mcolor.Normalize(vmin=z_min, vmax=z_max)
divider = make_axes_locatable(ax)
cbar_ax = divider.append_axes("right", size="5%",pad=0.05)
cb = mcolorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=norm)
framelabelfont = mfont.FontProperties(style='normal', variant='normal', size=14)
plt.figtext(0.95, 0.7, r'water altitude $[m]$', rotation='vertical', fontproperties=framelabelfont)
surface = None
with writer.saving(fig, save_file, DPI):
for index in range(int(N_STEP)):
# Get the subset of data corresponding to current time
this_level = level_ncVar[index]
this_height = height_ncVar[index]
this_alt = alt_ncVar[index]
time = self.times[index]
# Masked array via conditional, don't color the land unless it has water on it
masked_data = np.ma.masked_where(this_height == 0.0000, this_level)
print("step: {} time: {}".format(index, time))
# Plot the surface for this time step
if surface is None:
ax.imshow(masked_data, cmap=cmap, origin='lower', norm=norm, extent=[self.minlon, self.maxlon, self.minlat, self.maxlat], interpolation='none')
else:
surface.set_data(masked_data)
# Text box with the time
plt.figtext(0.02, 0.5, 'Time: {:02d}:{:02d}'.format(int(time/60), int(time%60)), bbox={'facecolor':'yellow', 'pad':5})
writer.grab_frame()
def make_crosssection_animation(self, FPS, DPI):
save_file = self.save_file_prefix+"_crosssection.mp4"
#sim_data is expected to be a netcdf dataset
lons = np.array(self.sim_data.variables['longitude'])
lats = np.array(self.sim_data.variables['latitude'])
# But keep the data from each time step in netCDF variable form, and slice into it as needed
level_ncVar = self.sim_data.variables['level']
height_ncVar = self.sim_data.variables['height']
alt_ncVar = self.sim_data.variables['altitude']
# Get ranges
N_STEP = len(self.times)
z_min = np.inf
z_max = -np.inf
z_avs = []
for i, levelstep in enumerate(level_ncVar):
masked_data = np.ma.masked_where(height_ncVar[i] == 0.0000, levelstep)
z_min = min(masked_data.min(), z_min)
z_max = max(masked_data.max(), z_max)
z_avs.append(masked_data.mean())
print("min: {}, max: {}, av: {}".format(z_min, z_max, np.array(z_avs).mean()))
# Initialize movie writing stuff
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='TsunamiSquares', artist='Matplotlib', comment='Animation')
writer = FFMpegWriter(fps=FPS, metadata=metadata, bitrate=1000)
# Initialize the frame and axes
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlim(self.minlon, self.maxlon)
plt.ylim(z_min, z_max)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
divider = make_axes_locatable(ax)
midlat_index = int(len(self.lats)/2)
# Make array of distances from mean lat/lon
dist_array = []
geod = geo(6371000, 0)
for lon in self.lons:
distance = geod.Inverse(self.lats[midlat_index], lon, self.meanlat, self.meanlon)['s12']
dist_array.append(abs(distance))
dist_array = np.array(dist_array)
sim_plot = ax.plot([], [], 'b')[0]
analytic_plot = ax.plot([], [], 'r')[0]
with writer.saving(fig, save_file, DPI):
for index in range(int(N_STEP)):
time = self.times[index]
print("step: {} time: {}".format(index, time))
analytic_Z = []
for dist in dist_array:
analytic_Z.append(analyticGaussPile(dist, time, 10, 5000, 1000))
# Plot the cross section for this time step
sim_plot.set_data(self.lons, level_ncVar[index][midlat_index])
analytic_plot.set_data(self.lons, analytic_Z)
# Text box with the time
plt.figtext(0.02, 0.5, 'Time: {:02f}:{:02f}'.format(int(time)/60, int(time)%60), bbox={'facecolor':'yellow', 'pad':5})
writer.grab_frame()
def load_historical_runup_CSV(self, csv_file_path, date, fill_bool):
year = date[0]
month = date[1]
day = date[2]
self.fill_bool = fill_bool
print("Loading historical runup for simulation region...")
inund_df = pd.read_csv(csv_file_path, sep='\t')
inund_df = inund_df[inund_df.LATITUDE.notnull()]
inund_df = inund_df[inund_df.LONGITUDE.notnull()]
inund_df = inund_df[inund_df.HORIZONTAL_INUNDATION.notnull()]
self.inund_df = inund_df[((inund_df.YEAR == year) & (inund_df.MONTH == month) & (inund_df.DAY == day) &
(inund_df.LATITUDE >= self.minlat) & (inund_df.LATITUDE <= self.maxlat) &
(inund_df.LONGITUDE >= self.minlon) & (inund_df.LONGITUDE <= self.maxlon) &
(inund_df.HORIZONTAL_INUNDATION > 0))]
# Now create an array to match the simulated inundation array showing observed inundations
obs_histogram, xedge, yedge = np.histogram2d(inund_df.LONGITUDE, inund_df.LATITUDE, bins=[len(self.lons), len(self.lats)],
range=[[self.minlon-self.dlon/2.0, self.maxlon+self.dlon/2.0], [self.minlat-self.dlat/2.0, self.maxlat+self.dlat/2.0]])
#TODO: figure out right orientation of array obs_histogram
obs_histogram = np.flipud(obs_histogram.T)
if self.fill_bool:
# Fill the map with unobserved inundation by "flowing" observed points into nearby squares
obs_array = self._obs_array_filler(obs_histogram)
else:
obs_array = obs_histogram
alt_ncVar = self.sim_data.variables['altitude']
self.obs_inundation_array = np.ma.masked_where(alt_ncVar[0] < 0, (obs_array>0))
def _obs_array_filler(self, obs_histogram):
alt_array = self.sim_data.variables['altitude'][0]
filled_array = np.copy(obs_histogram)
filling_pass_count = 0
did_some_filling = True
print("Filling pass: ")
while did_some_filling:
did_some_filling = False
filling_pass_count += 1
print(filling_pass_count)
for j, i in itertools.product(range(self.numlats), range(self.numlons)):
#I find a square with water:
if filled_array[j][i] > 0:
this_height = alt_array[j][i]
# Look to neighboring squares
for vert_ind_mod, horiz_ind_mod in itertools.combinations_with_replacement([-1,0,1], 2):
# Avoid indexing out of the arrays
if i==0 and horiz_ind_mod == -1: continue
if i==self.numlons-1 and horiz_ind_mod == 1: continue
if j==0 and vert_ind_mod == -1: continue
if j==self.numlats-1 and vert_ind_mod == 1: continue
if i==0 and j==0: continue
# If the neighbor is lower altitude and empty, fill it
if filled_array[j+vert_ind_mod][i+horiz_ind_mod] == 0 and alt_array[j+vert_ind_mod][i+horiz_ind_mod] < this_height:
did_some_filling = True
filled_array[j+vert_ind_mod][i+horiz_ind_mod] = 1
return filled_array
def compare_sim_and_obs_runup(self):
print("Comparing simulation inundation and observation...")
all_inundation_results = np.zeros_like(self.sim_inundation_array.astype(int))
all_inundation_results[np.logical_and(self.sim_inundation_array==0, self.obs_inundation_array==1)] = 1
all_inundation_results[np.logical_and(self.sim_inundation_array==1, self.obs_inundation_array==0)] = 2
all_inundation_results[np.logical_and(self.sim_inundation_array==1, self.obs_inundation_array==1)] = 3
alt_ncVar = self.sim_data.variables['altitude']
self.all_inundation_results = np.ma.masked_where(alt_ncVar[0]<0, all_inundation_results)
plt.close(1)
fig, ax = plt.subplots(num=1)
m = Basemap(projection='cyl',llcrnrlat=self.minlat, urcrnrlat=self.maxlat,
llcrnrlon=self.minlon, urcrnrlon=self.maxlon, lat_0=self.meanlat, lon_0=self.meanlon, resolution='i')
m.drawmeridians(np.linspace(self.minlon, self.maxlon, num=5.0), labels=[0,0,0,1], linewidth=0)
m.drawparallels(np.linspace(self.minlat, self.maxlat, num=5.0), labels=[1,0,0,0], linewidth=0)
#m.drawcoastlines(linewidth=0.5)
cm = mcolor.LinearSegmentedColormap.from_list('custom_cmap', ['gray', 'maroon', 'blue', 'lime'], N=4)
map_ax = m.imshow(self.all_inundation_results, origin='upper', extent=[self.minlon, self.maxlon, self.maxlat, self.minlat], interpolation='nearest', cmap=cm)
cbar = fig.colorbar(map_ax, ticks=[3/8., 9/8., 15/8., 21/8.])
cbar.ax.set_yticklabels(['Dry', 'Miss', 'False\nAlarm', 'Success'])
fill_suffix = ''
if self.fill_bool: fill_suffix = '_filled'
plt.savefig(self.save_file_prefix+'_inundation{}.png'.format(fill_suffix), dpi=100)
def analyticGaussPileIntegrand(k, r, t, Dc, Rc, depth):
dispersion = np.sqrt(9.80665*k*np.tanh(k*depth))
#dispersion = k*np.sqrt(9.80665*depth)
return Dc*Rc**2*k/2*np.cos(dispersion*t)*sp.special.jv(0, k*r)*np.exp(-(k*Rc/2)**2)
def analyticGaussPile(r, t, Dc, Rc, depth):
return sp.integrate.quad(analyticGaussPileIntegrand, 0, 1e3, args=(r, t, Dc, Rc, depth), points=[0, 2e-3])[0]
# k = np.linspace(0, 2e-3, 1e4)
# sumd = np.sum(analyticGaussIntegrand(k, r, t, Dc, Rc, depth))
# return np.diff(k)[0]*sumd
def plot_eq_displacements(disp_file):
save_file = os.path.splitext(disp_file)[0] + "_disp_z.png"
# Read displacement data
disp_data = np.genfromtxt(LLD_FILE, dtype=[('lat','f8'),('lon','f8'), ('z','f8')],skip_header=3)
# Data ranges
lon_min,lon_max = disp_data['lon'].min(),disp_data['lon'].max()
lat_min,lat_max = disp_data['lat'].min(),disp_data['lat'].max()
mean_lat = 0.5*(lat_min + lat_max)
mean_lon = 0.5*(lon_min + lon_max)
lon_range = lon_max - lon_min
lat_range = lat_max - lat_min
z_min,z_max = disp_data['z'].min(),disp_data['z'].max()
z_lim = max(np.abs(z_min),np.abs(z_max))
cmap = plt.get_cmap('seismic')
LEVELS = np.concatenate((-1*np.linspace(0.01, z_lim, 6)[::-1], np.linspace(0.01, z_lim, 6)))
norm = mcolor.Normalize(vmin=-z_lim, vmax=z_lim)
interp = 'cubic'
landcolor = '#FFFFCC'
framelabelfont = mfont.FontProperties(family='Arial', style='normal', variant='normal', size=14)
# Initialize the frame and axes
fig = plt.figure()
m = Basemap(projection='cyl',llcrnrlat=lat_min, urcrnrlat=lat_max,
llcrnrlon=lon_min, urcrnrlon=lon_max, lat_0=mean_lat, lon_0=mean_lon, resolution='h')
m.ax = fig.add_subplot(111)
m.drawmeridians(np.linspace(lon_min,lon_max,num=5.0),labels=[0,0,0,1], linewidth=0)
m.drawparallels(np.linspace(lat_min,lat_max,num=5.0),labels=[1,0,0,0], linewidth=0)
m.drawcoastlines(linewidth=0.5)
m.fillcontinents(color=landcolor, zorder=0)
# Colorbar
divider = make_axes_locatable(m.ax)
cbar_ax = divider.append_axes("right", size="5%",pad=0.05)
plt.figtext(0.96, 0.7, r'displacement $[m]$', rotation='vertical', fontproperties=framelabelfont)
cb = mcolorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=norm)
# Reshape into matrices
Ncols = len(np.unique(disp_data['lon']))
Nrows = len(np.unique(disp_data['lat']))
X = disp_data['lon'].reshape(Nrows, Ncols)
Y = disp_data['lat'].reshape(Nrows, Ncols)
Z = disp_data['z'].reshape(Nrows, Ncols)
# Masked array via conditional, don't color the land unless it has water on it
zero_below = int(len(LEVELS)/2)-1
zero_above = zero_below+1
masked_data = np.ma.masked_where(np.logical_and(np.array(Z <= LEVELS[zero_above]),np.array(Z >= LEVELS[zero_below])), Z)
# Set masked pixels to the land color
cmap.set_bad(landcolor, 0.0) # set alpha=0.0 for transparent
# Plot the contours
m.contourf(X, Y, masked_data, LEVELS, cmap=cmap, norm=norm, extend='both', zorder=1)
plt.savefig(save_file,dpi=100)
print("Saved to "+save_file)
def plot_eq_disps_horiz(disp_file):
# Read displacement data
disp_data = np.genfromtxt(disp_file, dtype=[('lon','f8'), ('lat','f8'), ('z','f8'), ('eU','f8'), ('nV','f8')])
save_file_prefix = os.path.splitext(disp_file)[0]+"_disp"
# Data ranges
lon_min,lon_max = disp_data['lon'].min(),disp_data['lon'].max()
lat_min,lat_max = disp_data['lat'].min(),disp_data['lat'].max()
mean_lat = 0.5*(lat_min + lat_max)
mean_lon = 0.5*(lon_min + lon_max)
lon_range = lon_max - lon_min
lat_range = lat_max - lat_min
# Reshape into matrices
Ncols = len(np.unique(disp_data['lon']))
Nrows = len(np.unique(disp_data['lat']))
X = disp_data['lon'].reshape(Nrows, Ncols)
Y = disp_data['lat'].reshape(Nrows, Ncols)
Z = disp_data['z'].reshape(Nrows, Ncols)
eU = disp_data['eU'].reshape(Nrows, Ncols)
nV = disp_data['nV'].reshape(Nrows, Ncols)
cmap = plt.get_cmap('seismic')
z_min,z_max = disp_data['z'].min(),disp_data['z'].max()
z_lim = max(np.abs(z_min),np.abs(z_max))
normz = mcolor.Normalize(vmin=-z_lim, vmax=z_lim)
#LEVELSz = np.concatenate((-1*np.logspace(-3, np.log10(z_lim), 6)[::-1], np.logspace(-3, np.log10(z_lim), 6)))
LEVELSz = np.concatenate((-1*np.linspace(0.01, z_lim, 6)[::-1], np.linspace(0.01, z_lim, 6)))
e_min,e_max = disp_data['eU'].min(),disp_data['eU'].max()
e_lim = max(np.abs(e_min),np.abs(e_max))
norme = mcolor.Normalize(vmin=-e_lim, vmax=e_lim)
#LEVELSe = np.concatenate((-1*np.logspace(-3, np.log10(e_lim), 6)[::-1], np.logspace(-3, np.log10(e_lim), 6)))
LEVELSe = np.concatenate((-1*np.linspace(0.01, e_lim, 6)[::-1], np.linspace(0.01, e_lim, 6)))
n_min,n_max = disp_data['nV'].min(),disp_data['nV'].max()
n_lim = max(np.abs(n_min),np.abs(n_max))
normn = mcolor.Normalize(vmin=-n_lim, vmax=n_lim)
#LEVELSn = np.concatenate((-1*np.logspace(-3, np.log10(n_lim), 6)[::-1], np.logspace(-3, np.log10(n_lim), 6)))
LEVELSn = np.concatenate((-1*np.linspace(0.01, n_lim, 6)[::-1], np.linspace(0.01, n_lim, 6)))
interp = 'cubic'
landcolor = '#FFFFCC'
framelabelfont = mfont.FontProperties(family='Arial', style='normal', variant='normal', size=14)
# Initialize the frame and axes
fig = plt.figure(0)
m = Basemap(projection='cyl',llcrnrlat=lat_min, urcrnrlat=lat_max,
llcrnrlon=lon_min, urcrnrlon=lon_max, lat_0=mean_lat, lon_0=mean_lon, resolution='h')
m.ax = fig.add_subplot(111)
m.drawmeridians(np.linspace(lon_min,lon_max,num=5.0),labels=[0,0,0,1], linewidth=0)
m.drawparallels(np.linspace(lat_min,lat_max,num=5.0),labels=[1,0,0,0], linewidth=0)
m.drawcoastlines(linewidth=0.5)
m.fillcontinents(color=landcolor, zorder=0)
# Colorbar
divider = make_axes_locatable(m.ax)
cbar_ax = divider.append_axes("right", size="5%",pad=0.05)
plt.figtext(0.96, 0.7, r'Vertical disp $[m]$', rotation='vertical', fontproperties=framelabelfont)
cbz = mcolorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=normz)
# Masked array via conditional, don't color the land unless it has water on it
zero_below = int(len(LEVELSz)/2)-1
zero_above = zero_below+1
masked_data = np.ma.masked_where(np.logical_and(np.array(Z <= LEVELSz[zero_above]),np.array(Z >= LEVELSz[zero_below])), Z)
# Set masked pixels to the land color
cmap.set_bad(landcolor, 0.0) # set alpha=0.0 for transparent
# Plot the contours
m.contourf(X, Y, masked_data, LEVELSz, cmap=cmap, norm=normz, extend='both', zorder=1)
plt.savefig(save_file_prefix+'_z.png',dpi=100)
# Initialize the frame and axes
fig = plt.figure(1)
m = Basemap(projection='cyl',llcrnrlat=lat_min, urcrnrlat=lat_max,
llcrnrlon=lon_min, urcrnrlon=lon_max, lat_0=mean_lat, lon_0=mean_lon, resolution='h')
m.ax = fig.add_subplot(111)
m.drawmeridians(np.linspace(lon_min,lon_max,num=5.0),labels=[0,0,0,1], linewidth=0)
m.drawparallels(np.linspace(lat_min,lat_max,num=5.0),labels=[1,0,0,0], linewidth=0)
m.drawcoastlines(linewidth=0.5)
m.fillcontinents(color=landcolor, zorder=0)
# Colorbar
divider = make_axes_locatable(m.ax)
cbar_ax = divider.append_axes("right", size="5%",pad=0.05)
plt.figtext(0.96, 0.7, r'East disp $[m]$', rotation='vertical', fontproperties=framelabelfont)
cbe = mcolorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=norme)
# Masked array via conditional, don't color the land unless it has water on it
zero_below = int(len(LEVELSe)/2)-1
zero_above = zero_below+1
masked_data = np.ma.masked_where(np.logical_and(np.array(eU <= LEVELSe[zero_above]),np.array(eU >= LEVELSe[zero_below])), eU)
# Set masked pixels to the land color
cmap.set_bad(landcolor, 0.0) # set alpha=0.0 for transparent
# Plot the contours
m.contourf(X, Y, masked_data, LEVELSe, cmap=cmap, norm=norme, extend='both', zorder=1)
plt.savefig(save_file_prefix+'_e.png',dpi=100)
# Initialize the frame and axes
fig = plt.figure(2)
m = Basemap(projection='cyl',llcrnrlat=lat_min, urcrnrlat=lat_max,
llcrnrlon=lon_min, urcrnrlon=lon_max, lat_0=mean_lat, lon_0=mean_lon, resolution='h')
m.ax = fig.add_subplot(111)
m.drawmeridians(np.linspace(lon_min,lon_max,num=5.0),labels=[0,0,0,1], linewidth=0)
m.drawparallels(np.linspace(lat_min,lat_max,num=5.0),labels=[1,0,0,0], linewidth=0)
m.drawcoastlines(linewidth=0.5)
m.fillcontinents(color=landcolor, zorder=0)
# Colorbar
divider = make_axes_locatable(m.ax)
cbar_ax = divider.append_axes("right", size="5%",pad=0.05)
plt.figtext(0.96, 0.7, r'North disp $[m]$', rotation='vertical', fontproperties=framelabelfont)
cbn = mcolorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=normn)
# Masked array via conditional, don't color the land unless it has water on it
zero_below = int(len(LEVELSn)/2)-1
zero_above = zero_below+1
masked_data = np.ma.masked_where(np.logical_and(np.array(nV <= LEVELSn[zero_above]),np.array(nV >= LEVELSn[zero_below])), nV)
# Set masked pixels to the land color
cmap.set_bad(landcolor, 0.0) # set alpha=0.0 for transparent
# Plot the contours
m.contourf(X, Y, masked_data, LEVELSn, cmap=cmap, norm=normn, extend='both', zorder=1)
plt.savefig(save_file_prefix+'_n.png',dpi=100)
print("Saved to "+save_file)
def bathy_topo_map(LLD_FILE):
save_file = os.path.splitext(LLD_FILE)[0]+'_bathymap.png'
extension = os.path.splitext(LLD_FILE)[1]
# Read bathymetry/topography data
if extension == ".txt":
data = np.genfromtxt(LLD_FILE, dtype=[('lat','f8'),('lon','f8'), ('z','f8')], skip_header=6)
# Reshape into matrices
Ncols = len(np.unique(data['lon']))
Nrows = len(np.unique(data['lat']))
X = data['lon'].reshape(Nrows, Ncols)
Y = data['lat'].reshape(Nrows, Ncols)
Z = data['z'].reshape(Nrows, Ncols)
# Data ranges
lon_min,lon_max = data['lon'].min(),data['lon'].max()
lat_min,lat_max = data['lat'].min(),data['lat'].max()
elif extension == ".nc":
data = Dataset(LLD_FILE, 'r')
# Reshape into matrices
Ncols = len(data['longitude'][:])
Nrows = len(data['latitude'][:])
X, Y = np.meshgrid(data['longitude'][:], data['latitude'][:])
Z = data['altitude'][::]
# Data ranges
lon_min, lon_max = data['longitude'][:].min(), data['longitude'][:].max()
lat_min, lat_max = data['latitude'][:].min(), data['latitude'][:].max()
else:
raise BaseException("Bathymetry file is an unsupported file type")
mean_lat = 0.5*(lat_min + lat_max)
mean_lon = 0.5*(lon_min + lon_max)
lon_range = lon_max - lon_min
lat_range = lat_max - lat_min
cmap = plt.get_cmap('terrain')
interp = 'nearest'
framelabelfont = mfont.FontProperties(family='Arial', style='normal', variant='normal', size=14)
# catch any nan values
masked_data = np.ma.masked_invalid(Z)
cmap.set_bad('red')
# Color limits
z_min,z_max = masked_data.min(),masked_data.max()
z_lim = max(np.abs(z_min),np.abs(z_max))
norm = mcolor.Normalize(vmin=-z_lim, vmax=z_lim)
# Initialize the frame and axes
fig = plt.figure()
m = Basemap(projection='cyl',llcrnrlat=lat_min, urcrnrlat=lat_max,
llcrnrlon=lon_min, urcrnrlon=lon_max, lat_0=mean_lat, lon_0=mean_lon, resolution='h')
m.ax = fig.add_subplot(111)
m.drawmeridians(np.linspace(lon_min,lon_max,num=5.0),labels=[0,0,0,1], linewidth=0)
m.drawparallels(np.linspace(lat_min,lat_max,num=5.0),labels=[1,0,0,0], linewidth=0)
m.drawcoastlines(linewidth=0.5)
# Colorbar
divider = make_axes_locatable(m.ax)
cbar_ax = divider.append_axes("right", size="5%",pad=0.05)
plt.figtext(0.96, 0.7, r'elevation $[m]$', rotation='vertical', fontproperties=framelabelfont)
cb = mcolorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=norm)
# Plot the contours
#m.contourf(X, Y, masked_data, 100, cmap=cmap, norm=norm, extend='both', zorder=1)
m.ax.imshow(masked_data, cmap=cmap, origin='lower', norm=norm, extent=[lon_min, lon_max, lat_min, lat_max], interpolation=interp)
plt.savefig(save_file,dpi=100)
print("Saved to "+save_file)
# --------------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='mode', description='valid modes of usage', dest='mode')
# Arguments for generating bathymetry LLD files
parser_gen = subparsers.add_parser('generate_bathy', help='generate interpolated bathymetry subset from NOAA\'s ETOPO1 dataset')
parser_gen.add_argument('--info_file', required=True, help='json containing regional and earthquake information')
parser_gen.add_argument('--etopo1_file', required=False, default="~/Tsunami/ETOPO1/ETOPO1_Ice_g_gmt4.grd",
help="NOAA ETOPO1 combined topography and bathymetry file path")
parser_gen.add_argument('--resolution', type=int, required=False, default=1,
help="Resolution interpolation multiplier for NOAA topography")
parser_gen.add_argument('--text', action="store_true", required=False,
help="Store bathymetry as a text lld file")
# Arguments for plotting bathymetry
parser_plot_bathy = subparsers.add_parser('plot_bathy', help='Plot previously-generated bathymetry LLD file')
parser_plot_bathy.add_argument('--lld_file', required=True,
help="Path of bathymetry file")
# Arguments for generating EQ surface displacement fields
parser_field_eval = subparsers.add_parser('eq_field_eval', help='Generate surface displacement for a VQ fault model')
parser_field_eval.add_argument('--info_file', required=True, help='json containing regional and earthquake information')
parser_field_eval.add_argument('--lld_file', required=True,
help="Path of bathymetry file")
parser_field_eval.add_argument('--slip_from', choices=['vq_sim', 'uniform', 'slipmap'], required=True,
help="Sources of displacements")
# Arguments for plotting eq displacement fields
parser_field_plot = subparsers.add_parser('eq_field_plot', help='Plot a surface displacement field')
parser_field_plot.add_argument('--field_file', required=True,
help="Path of surface displacement file")
parser_field_plot.add_argument('--plot_horizontal', action="store_true", required=False,
help="Whether to plot the horizontal displacements in addition to vertical")
# Arguments for plotting simulation results
parser_animate = subparsers.add_parser('animate', help='Make animations from simulation output')
parser_animate.add_argument('--type', choices=['grid', 'xsection'], required=True,
help="Type of animation to make; birds-eye grid or cross-sectional verification against analytic solution")
parser_animate.add_argument('--sim_file', required=True,
help="Name of simulation file to analyze.")
parser_animate.add_argument('--zminmax', type=float, nargs=2, required=False,
help="Bounds for water height color bar")
parser_animate.add_argument('--use_basemap', action="store_true", required=False,
help="Whether to plot a basemap coastal outline over the grid animation")
parser_animate.add_argument('--fps', type=int, required=False, default=20,
help="Frames per second for animations")
parser_animate.add_argument('--dpi', type=int, required=False, default=100,
help="Bounds for water height color bar")
# Arguments for verifying simualtion against observed historical runups
parser_verify = subparsers.add_parser('verify', help='Verify simulation results against NOAA historical runup data')
parser_verify.add_argument('--sim_file', required=True,
help="Name of simulation file to analyze.")
parser_verify.add_argument('--obs_file', required=True,
help="Observed historical tsunami runup CSV file")
parser_verify.add_argument('--info_file', required=True, help='json containing regional and earthquake information')
parser_verify.add_argument('--fill', required=False, action="store_true", help='json containing regional and earthquake information')
# args = parser.parse_args(['verify', '--sim_file', 'outputs/Tohoku/TohokuSmall_x2_contRealisticX1_output_000-1600.nc',
# '--obs_file', '../historical_runups/tsrunup.csv', '--ymd', '2011', '3', '11'])
args = parser.parse_args()
if args.mode == 'generate_bathy':
with open(args.info_file, "r") as open_info_file:
region_info = json.load(open_info_file)
# ====== PARSE ETOPO1 FILE, SAVE SUBSET =====
ETOPO1_FILE = args.etopo1_file
FACTOR = args.resolution
SAVE_NAME = os.path.join(os.path.split(args.info_file)[0], 'bathymetry', region_info['name']+'_x'+str(FACTOR)+'_lld.txt')
save_dir = os.path.join(os.path.split(args.info_file)[0], 'bathymetry')
if args.text:
save_name = region_info['name']+'_x'+str(FACTOR)+'_lld.txt'
else:
save_name = region_info['name']+'_x'+str(FACTOR)+'_lld.nc'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
SAVE_PATH = os.path.join(save_dir, save_name)
MIN_LAT = region_info['lat_bounds'][0]
MAX_LAT = region_info['lat_bounds'][1]
MIN_LON = region_info['lon_bounds'][0]
MAX_LON = region_info['lon_bounds'][1]
# --- write grid ------
# TODO: transition from txt files to netCDF files containing bathymetry data
lats, lons, bathy = read_ETOPO1.grab_ETOPO1_subset_interpolated(ETOPO1_FILE, min_lat=MIN_LAT, max_lat=MAX_LAT, min_lon=MIN_LON, max_lon=MAX_LON, factor=FACTOR)
if args.text:
read_ETOPO1.write_grid(SAVE_PATH, lats, lons, bathy)
else:
read_ETOPO1.write_grid_netCDF(SAVE_PATH, lats, lons, bathy)
if args.mode == 'plot_bathy':
bathy_topo_map(args.lld_file)
if args.mode == 'eq_field_eval':
with open(args.info_file, "r") as open_info_file:
region_info = json.load(open_info_file)
VQ_DIR = "~/VirtQuake/"
system_command = "python "+VQ_DIR+"vq/PyVQ/pyvq/pyvq.py --field_eval --netCDF --horizontal --model_file {} --lld_file {}".format(region_info['model_file'], args.lld_file)
if args.slip_from == 'vq_sim':
if not region_info['event_file'] or not region_info['event_id']:
raise BaseException("Must specify both an event file and an event ID in info file")
system_command += " --event_file {} --event_id {}".format(region_info['event_file'], region_info['event_id'])
elif args.slip_from == 'uniform':
system_command += " --uniform_slip 10"
elif args.slip_from == 'slipmap':
if not region_info['slip_map']:
raise BaseException("Must specify slipmap file")
system_command += " --slipmap_file {}".format(region_info['slip_map'])
os.system(system_command)
if args.mode == 'eq_field_plot':
if not args.plot_horizontal:
plot_eq_displacements(args.field_file)
else:
if not os.path.splitext(args.field_file)[1] == 'xyuen':
raise BaseException("Must have .xyuen file format for horizontal field plotting")
plot_eq_disps_horiz(args.field_file)
if args.mode in ['animate', 'verify']:
this_sim = simAnalyzer(args.sim_file)
if args.mode == 'verify':
with open(args.info_file, "r") as open_info_file:
region_info = json.load(open_info_file)
# Load historical inundation data and compare it to simulated results
this_sim.load_historical_runup_CSV(args.obs_file, region_info["date"], args.fill)
this_sim.compare_sim_and_obs_runup()
if args.mode == 'animate':
if args.type == 'grid':
#zminmax = (-1,1)#(-sim_data['z'].std(), sim_data['z'].std())
# Makes animation
this_sim.make_grid_animation(args.fps, args.dpi, zminmax = args.zminmax, doBasemap = args.use_basemap)
if args.type == 'xsection':
this_sim.make_crosssection_animation(args.fps, args.dpi)
|
[
"johnmaxwilson@gmail.com"
] |
johnmaxwilson@gmail.com
|
3c500db05684cd0c0482c6ba1043cb8711687527
|
739696cb824ea23405173c5051f08534c6ae27d3
|
/day2-homework/exercise4.py
|
9c0428e01ac97b9a3b19e6ee41d8ba21360985af
|
[] |
no_license
|
hgalagali/qbb2015-homework
|
025285d9dd0dbf2faaa3921f8e76aa2ce30654ea
|
1d78925658acbed8ef990e5fec6e0ebc8ddcba00
|
refs/heads/master
| 2020-05-31T12:13:37.868899
| 2015-08-31T15:25:39
| 2015-08-31T15:25:39
| 41,310,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
#!/usr/bin/env python
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_table("/Users/cmdb/qbb2015/stringtie/SRR072893/t_data.ctab")
df2=df["FPKM"]
top=df2[0:11572]
middle=df2[11572:23144]
bottom=df2[23144:34718]
plt.figure()
plt.title("FPKM values")
plt.boxplot([top, middle, bottom])
plt.savefig("boxplot.png")
|
[
"hgalaga1@jhu.edu"
] |
hgalaga1@jhu.edu
|
513488fb677a28b865c94788e32575b2a4c788da
|
1c965e975218b6478e50413fb87b75b7d5e9dadd
|
/blog/urls.py
|
bb0e157d0d0f4a7436063f7bee1b4dfef2c68f15
|
[] |
no_license
|
latishasumardy/my-first-blog
|
6f97371091fac1af9afaf915694c422f131a2e31
|
51ae0593ee693c497fbb9d5104f3b89ccb6f7e20
|
refs/heads/master
| 2021-01-01T16:02:15.781327
| 2017-07-20T23:00:20
| 2017-07-20T23:00:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>\d+)/edit/$', views.post_edit, name='post_edit'),
url(r'^aboutme$', views.aboutme, name='aboutme'),
]
|
[
"lsmarty23@gmail.com"
] |
lsmarty23@gmail.com
|
ac97c7b842716eb74b97408e214b725c9617484a
|
b59c2f4d476dae4e37acb30bb9df71daf589c708
|
/singleton.py
|
733d8fd7f5317813e5a9058b91c6ce7d5573fa75
|
[] |
no_license
|
catVSdog/design_patterns
|
3cd5ab583b37e4149e55c59fa08d1c6e0b1ffcfd
|
b9384ef5b40259f9c855f4d60bf02b0e2d3bd933
|
refs/heads/master
| 2021-07-09T10:43:23.437706
| 2021-03-19T01:38:10
| 2021-03-19T01:38:10
| 231,211,122
| 2
| 0
| null | 2021-03-03T09:17:25
| 2020-01-01T12:02:49
|
Python
|
UTF-8
|
Python
| false
| false
| 6,358
|
py
|
"""
单例模式
一般情况下,一个类每次实例化都会产生一个单独的对象。而单例模式的要求是:多次实例化,只产生一个对象。
这就要求对类的实例化过程进行干预,使其只有在第一次实例化时产生一个对象,以后的多次实例化,都是返回第一次创建的对象。
这就需要两个步骤:
1.是否已经有了实例了。
2.1 如果没有,那么创建实例,存储实例,将实例返回
2.2 如果已经有了,那么取出实例,将其返回
首先说,如何存储实例,一般利用具有唯一性的东西, 如 类变量
在说,如何干预实例化:
1.将类视作类,类实际通过 __new__ 方法,创建新对象,那么就重写 __new__方法。
2.有一个对象object, 当 object()时,其实调用的是 object的类的 __call__ 方法.python中一切皆对象,类也是其他类(又叫元类, 默认是type) 的对象。
因此可以重写类的元类的 __call__ 方法
> 以上两种都是从类创建对象的角度,进行干预, 还有一种方法是将类创建对象的过程,转换为一个函数调用的过程,在函数调用的过程中,创建/查找 返回类.
即:使用装饰器,将 cls() 操作转换为函数调用过程. 具体又分:
3.函数装饰器
4.类装饰器
还有一种是利用 python 包导入特性——每个包只导入一次,在一个模块中实例化一个类,然后在其他模块中直接导入该实例。
"""
### 第一种,重写 __new__
class Sun:
__instance__ = None # 使用类变量指代实例
def __new__(cls, *args, **kwargs):
if cls.__instance__ is None:
cls.__instance__ = super().__new__(cls)
return cls.__instance__
sun_1 = Sun()
sun_2 = Sun()
assert sun_1 == sun_2
class Changjiang:
__instance__ = {} # 使用类变量存储实例
def __new__(cls, *args, **kwargs):
if cls not in cls.__instance__:
cls.__instance__[cls] = super().__new__(cls)
return cls.__instance__[cls]
changjiang_1 = Changjiang()
changjiang_2 = Changjiang()
assert changjiang_1 == changjiang_2
# 第二种, 重写 __call__
class Person(type):
__instance__ = None # 使用类变量指代实例
def __call__(self, *args, **kwargs): # 这里的self 其实是 ChairMan 这个类
if self.__instance__ is None:
self.__instance__ = super().__call__(*args, **kwargs)
return self.__instance__
class ChairMan(metaclass=Person):
pass
chairman_1 = ChairMan()
chairman_2 = ChairMan()
assert chairman_1 == chairman_2
class River(type): # 注意这里, 元类继承自 type
__instance__ = {} # 使用类变量存储实例
def __call__(cls, *args, **kwargs): # 为了更明显,我们将 self 改为了 cls.
if cls not in cls.__instance__:
cls.__instance__[cls] = super().__call__(*args, **kwargs)
return cls.__instance__[cls]
class Huanghe(metaclass=River): # 更改元类为 River
pass
huanghe_1 = Huanghe()
huanghe_2 = Huanghe()
assert huanghe_1 == huanghe_2
# 第三种 函数装饰器
def singletone(cls):
__instance__ = {}
def wrapper(*args, **kwargs):
if cls not in __instance__:
__instance__[cls] = cls(*args, **kwargs)
return __instance__[cls]
return wrapper
@singletone
class Beijing: # 经过装饰以后,通过访问 globals(),可以看到 Beijing 已经不是类了,而是一个函数
pass
beijing_1 = Beijing() # 其实是 wrapper()
beijing_2 = Beijing()
assert beijing_1 == beijing_2
# print(globals()) # {'Beijing': <function singletone.<locals>.wrapper at 0x7f5df7d88f70> }
# def singletone(cls):
# __instance__ None
#
# def wrapper(*args, **kwargs):
# if __instance__ is None: # 这么做是不行的,这涉及到作用域的嵌套与覆盖,以及变量的遮罩
# __instance__ = cls(*args, **kwargs)
# return __instance__[cls]
# return wrapper
# 第四种 类装饰器. 被类装饰器装饰的东西(类/函数),都会作为装饰器类的初始化参数,创建一个装饰器类的实例。通过重写装饰器类的__call__方法,
# 干预类实例化的过程
class Singletone1:
__instance__ = None # 使用类变量指向实例
def __init__(self, cls):
self.cls = cls
def __call__(self, *args, **kwargs):
if self.__instance__ is None:
self.__instance__ = self.cls(*args, **kwargs)
return self.__instance__
@Singletone1
class Guangzhou: # 这里的Guangzhou已经不是类了,而是Singletone1的一个对象 'Guangzhou': <__main__.Singletone1 object at 0x7fcc0cdfd430>
pass
guangzhou_1 = Guangzhou()
guangzhou_2 = Guangzhou()
assert guangzhou_1 == guangzhou_2
class Singletone2:
__instance__ = {} # 使用类变量存储实例
def __init__(self, cls):
self.cls = cls
def __call__(self, *args, **kwargs):
if self.cls not in self.__instance__:
self.__instance__[self.cls] = self.cls(*args, **kwargs)
return self.__instance__[self.cls]
@Singletone2
class Shanghai: # 这里的 Shanghai 已经不是类了,而是 Singletone2的一个对象 'Shanghai': <__main__.Singletone2 object at 0x7fcc0cdfd520>
pass
shanghai_1 = Shanghai()
shanghai_2 = Shanghai()
assert shanghai_1 == shanghai_2
class Singletone3:
def __init__(self, cls):
self.cls = cls
self.__instance = None # 使用实例变量指向实例
def __call__(self, *args, **kwargs):
if self.__instance is None:
self.__instance = self.cls(*args, **kwargs)
return self.__instance
@Singletone3
class Shenzhen:
pass
shenzhen_1 = Shenzhen()
shenzhen_2 = Shenzhen()
assert shenzhen_1 == shenzhen_2
class Singletone4:
def __init__(self, cls):
self.cls = cls
self.__instance = {} # 使用实例变量存储实例
def __call__(self, *args, **kwargs):
if self.cls not in self.__instance:
self.__instance[self.cls] = self.cls(*args, **kwargs)
return self.__instance[self.cls]
@Singletone4
class Hangzhou:
pass
hangzhou_1 = Hangzhou()
hangzhou_2 = Hangzhou()
assert hangzhou_1 == hangzhou_2
# 第五种
# a.py
class A:
pass
aa = A()
# b.py
# from a import aa
# aa.xxxx
|
[
"windorbird@gmail.com"
] |
windorbird@gmail.com
|
821a2b4b9edb37d15409ef8739f0428c789f0cff
|
7b95d4f78b6bd1e6eb089b57ac38f89c48685ee8
|
/migrations/versions/2b1dbac24b45_new_fields_in_user_model.py
|
98f3f2ea72fb664f37ade67431735d833dbe0f3d
|
[] |
no_license
|
pandreyn/microblog-python-flex
|
77e822b6f564f39180f50aa294d2708a2a67df18
|
f02759fd98264c2efeae5bf828c1ccfd8b3332d1
|
refs/heads/master
| 2021-04-29T23:12:12.930862
| 2018-03-06T05:33:07
| 2018-03-06T05:33:07
| 121,550,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
"""new fields in user model
Revision ID: 2b1dbac24b45
Revises: c414e32cfcd9
Create Date: 2018-02-14 18:55:15.665604
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2b1dbac24b45'
down_revision = 'c414e32cfcd9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
|
[
"v-anpetk@microsoft.com"
] |
v-anpetk@microsoft.com
|
7329b36832858ee7f92c5421d1a10f57c7733671
|
f1e203b154f309dc0025f88acc5d0f369c031a70
|
/reddit_api.py
|
230feead556e84f490815697b941201d231b8af8
|
[] |
no_license
|
radiochickenwax/algo_challenge
|
eb8382d5713dce4f1f2880cf36e6451907eca7a7
|
4992ed7ba2d11bf80a0f64c8c90f4e7d370d7f9d
|
refs/heads/master
| 2021-08-15T12:25:45.034102
| 2017-11-17T21:12:36
| 2017-11-17T21:12:36
| 111,150,761
| 0
| 0
| null | 2017-11-17T21:08:20
| 2017-11-17T21:08:20
| null |
UTF-8
|
Python
| false
| false
| 4,808
|
py
|
import requests
import json
import re
from bson import json_util
import os
from models import User, Team, Game, Challenge
from datetime import datetime
from random import shuffle
from slackclient import SlackClient
num_teams = int(os.environ['NUM_TEAMS'])
team_members = int(os.environ['NUM_MEMBERS'])
token = os.environ["SLACK_TOKEN"]
sc = SlackClient(token)
def get_random_post(requested_difficulty='Easy', any_difficulty=False):
r = requests.get('http://reddit.com/r/dailyprogrammer/random.json', headers = {'User-agent': 'reddit_daily_algo_bot 0.1'})
data = r.json()
post = data[0]['data']['children'][0]['data']
title = post['title']
regex = r"\[[^)]+\].*?\[([^)]+)\]"
try:
difficulty = re.search(regex, title).group(1)
except:
return get_random_post(requested_difficulty)
# if difficulty != requested_difficulty and not any_difficulty:
# print("wrong difficulty", difficulty)
# return get_random_post(requested_difficulty)
description = post['selftext']
description = description.replace('\n', '\\n')
regex = r"^#(.*?)\\n#"
try:
description = re.search(regex, description).group(1)
except:
# print("regex failed for desc",post['selftext'])
return get_random_post(requested_difficulty)
description = description.replace('\\n', '\n')
try:
Challenge.objects.get(description=description)
print("challenge already exists")
return get_random_post(requested_difficulty)
except:
pass
url = post['url']
data = {
'title': title,
'description': description,
'url': url,
'difficulty': difficulty
}
return data
def diff_color(diff):
if diff == "Hard":
return "#CB3535"
elif diff == "Intermediate":
return "#E7AB17"
elif diff == "Easy":
return "#54D600"
else:
return "#2E5DFF"
def background_worker(response_url, channel):
data = []
for i in range(3):
challenge = get_random_post(any_difficulty=True)
new_challenge = Challenge(
title=challenge['title'],
description=challenge['description'],
difficulty=challenge['difficulty'],
url=challenge['url'])
data.append(new_challenge.save())
game = Game(choices=data)
game = game.save()
game_id = json.loads(json_util.dumps(game.id))
message = {
"response_type": "in_channel",
"text": "Here are three random Algorithm challenges!",
"attachments": []}
choices = {
"title": "Choose which Algo you'd like to solve!",
"callback_id": game_id['$oid'],
"attachment_type": "default",
"actions": []
}
for i, chall in enumerate(data):
challenge_attachment = {}
challenge_attachment["title"] = "<" + chall.url + "|" + chall.title + ">"
challenge_attachment["text"] = chall.description
challenge_attachment["color"] = diff_color(chall.difficulty)
message["attachments"].append(challenge_attachment)
choice = {}
choice["name"] = "choice"
choice["text"] = "Challenge #" + str(i+1)
choice["type"] = "button"
choice["value"] = json.loads(json_util.dumps(chall.id))["$oid"]
choices["actions"].append(choice)
message["attachments"].append(choices)
sc.api_call(
"chat.postMessage",
channel=channel,
text=message["text"],
response_type=message["response_type"],
attachments=message["attachments"]
)
def randomize_teams(names, no_teams, game):
teams = []
shuffle(names)
for _ in range(no_teams):
teams.append([])
last_game = Game.objects.first()
last = last_game.teams
while names:
for team in teams:
if names:
team.append(names.pop())
for team in teams:
if team in last:
return randomize_teams(names, no_teams, game)
teams_object = []
for team in teams:
max_driver_time = datetime.now()
current_driver = ""
for (idx, member) in enumerate(team):
if 'last_lead' in member:
if member.last_lead < max_driver_time:
max_driver_time = member.last_lead
current_driver = idx
else:
current_driver = idx
break
temp = team[0]
team[0] = team[current_driver]
team[current_driver] = temp
team[0].last_lead = datetime.now()
team[0].save()
team = Team(members=team)
teams_object.append(team)
print(teams,teams_object)
game.teams = teams_object
game.save()
return teams
if __name__ == "__main__":
get_random_post()
|
[
"kyle.bible@gmail.com"
] |
kyle.bible@gmail.com
|
2d73ae346589bc5cd766d0a44c3a57acc84e8252
|
48350e98d638656d8cac51b539c98a000400f8da
|
/ex5.py
|
dda6bab367d22b99294f885352766cfbd9a80d64
|
[] |
no_license
|
WenHuber/Learn-Python3-the-Hard-Way-ExampleCodes
|
25a09ea5c95bd8a3a644e8da33f813f7562dce31
|
1cfdb1f7be08906ba18abc6408f8b24f7acb2094
|
refs/heads/master
| 2020-04-18T13:18:13.251863
| 2019-01-25T15:10:31
| 2019-01-25T15:10:31
| 167,558,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#Exercise5. More Variables and Printing
my_name = 'Zed A. Shaw'
my_age = 35 #not a lie
my_height = 74 #inches
my_weight = 180 #lbs
my_eyes = 'Blue'
my_teeth = 'White'
my_hair = 'Brown'
print(f"Let's talk about {my_name}.")
print(f"He's {my_height} inches tall.")
print(f"He's {my_weight} pounds heavy.")
print("Actually that's not too heavy.")
print(f"He's got {my_eyes} eyes and {my_hair} hair.")
print(f"His teeth are usually {my_teeth} depending on the coffee.")
#this line is tricky, try to get it exactly right
total = my_age + my_height + my_weight
print(f"If I add {my_age}, {my_height}, and {my_weight} I get {total}.")
|
[
"noreply@github.com"
] |
WenHuber.noreply@github.com
|
7a5391e4a8541d1248b470d05a858b472d81f6cb
|
713028844e118751c0bcf79627b8ebfe7599c608
|
/RealTimeFaceDetectionOnLiveVideoFeed/src/GoogleNet.py
|
c3ff2046b015f6b6ff98b41e53de87e2a37ea4a4
|
[] |
no_license
|
mahima14/FaceDetectionRealTimeVideo
|
c103e2d1e7a3df2b8c4ca99e118196acd97d6ea0
|
5d6561c709c4b7144a677b28b5b88e8f781c7ced
|
refs/heads/master
| 2021-04-06T01:23:28.220467
| 2018-07-25T16:08:51
| 2018-07-25T16:08:51
| 124,431,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,181
|
py
|
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Dropout, Flatten, Dense
import numpy as np
import pandas as pd
from keras.utils import np_utils
from keras.layers.containers import Graph
from keras.models import Sequential
#DATA PREPROCESSING
df = pd.read_csv("fer2013.csv")
df.head()
df.shape
df["Usage"].value_counts()
train = df[["emotion", "pixels"]][df["Usage"] == "Training"]
train.isnull().sum()
train['pixels'] = train['pixels'].apply(lambda im: np.fromstring(im, sep=' '))
x_train = np.vstack(train['pixels'].values)
y_train = np.array(train["emotion"])
x_train.shape, y_train.shape
public_test_df = df[["emotion", "pixels"]][df["Usage"]=="PublicTest"]
public_test_df["pixels"] = public_test_df["pixels"].apply(lambda im: np.fromstring(im, sep=' '))
x_test = np.vstack(public_test_df["pixels"].values)
y_test = np.array(public_test_df["emotion"])
x_train = x_train.reshape(-1, 48, 48, 1)
x_test = x_test.reshape(-1, 48, 48, 1)
x_train.shape, x_test.shape
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
y_train.shape, y_test.shape
batch_size = 128
nb_epoch = 50
print 'X_train shape: ', x_train.shape
print 'y_train shape: ', y_train.shape
print 'img size: ', x_train.shape[2], x_train.shape[3]
print 'batch size: ', batch_size
print 'nb_epoch: ', nb_epoch
# model architecture:
model = Graph()
model.add_input(name='n00', input_shape=(1,48,48))
# layer 1
model.add_node(Convolution2D(64,1,1, activation='relu'), name='n11', input='n00')
model.add_node(Flatten(), name='n11_f', input='n11')
model.add_node(Convolution2D(96,1,1, activation='relu'), name='n12', input='n00')
model.add_node(Convolution2D(16,1,1, activation='relu'), name='n13', input='n00')
model.add_node(MaxPooling2D((3,3),strides=(2,2)), name='n14', input='n00')
# layer 2
model.add_node(Convolution2D(128,3,3, activation='relu'), name='n22', input='n12')
model.add_node(Flatten(), name='n22_f', input='n22')
model.add_node(Convolution2D(32,5,5, activation='relu'), name='n23', input='n13')
model.add_node(Flatten(), name='n23_f', input='n23')
model.add_node(Convolution2D(32,1,1, activation='relu'), name='n24', input='n14')
model.add_node(Flatten(), name='n24_f', input='n24')
# output layer
model.add_node(Dense(1024, activation='relu'), name='layer4',
inputs=['n11_f', 'n22_f', 'n23_f', 'n24_f'], merge_mode='concat')
model.add_node(Dense(10, activation='softmax'), name='layer5', input='layer4')
model.add_output(name='output1',input='layer5')
print 'Training....'
model.compile(loss={'output1':'categorical_crossentropy'}, optimizer='adam',metrics=['accuracy'])
model.fit({'n00':x_train, 'output1':y_train}, nb_epoch=nb_epoch, batch_size=batch_size,validation_split=0.3, shuffle=True, verbose=1)
#Model result:
loss_and_metrics = model.evaluate(x_train, y_train, batch_size=batch_size, verbose=1)
print 'Done!'
print 'Loss: ', loss_and_metrics[0]
print ' Acc: ', loss_and_metrics[1]
#Saving Model
json_string = model.to_json()
model.save_weights('emotion_weights_googleNet.h5')
open('emotion_model_googleNet.json', 'w').write(json_string)
model.save_weights('emotion_weights_googleNet.h5')
|
[
"mahima.agarwal14@gmail.com"
] |
mahima.agarwal14@gmail.com
|
c0789c0f07c9a60555475f1427905abf58911feb
|
591018f4f72b57028f65ac840d81f19a6ad9368b
|
/zimu_ml_sys/core/graph_embedding/graph_embedding_base_task.py
|
c66255bc4f4cc1dd070a681949068fdad23683d6
|
[] |
no_license
|
nicaibutou1993/ZimuMLSys
|
4bea11be191218061d13c8ca816db5aa2ea42f3f
|
2ff8f7e4eff5ad30f1d52c75348cf1e93adce52e
|
refs/heads/main
| 2023-05-08T09:46:29.674308
| 2021-06-04T06:45:31
| 2021-06-04T06:45:31
| 365,101,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,905
|
py
|
# -*- coding: utf-8 -*-
import os
import time
import numpy as np
import pandas as pd
from zimu_ml_sys.constant import PREPROCESS_DATA_PATH, FEATURE_DATA_PATH
from zimu_ml_sys.core.graph_embedding.deep_walk import DeepWalk
from zimu_ml_sys.core.graph_embedding.graph import build_graph
from zimu_ml_sys.core.graph_embedding.node2vector import Node2Vec
from zimu_ml_sys.utils.data_util import df_group_parallel
from gensim.models import KeyedVectors
class GraphEmbeddingBaseTask():
"""
根据用户历史点击行为,将 item 向量化处理
1. node2vector: 有向图
边与边的权重 即为:item 与 item 权重:
使用用户点击 序列,两两相邻点击的item 作为边与边的关系。
权重: 取决于 相邻两个item:
1. 点击时间差: 两个item 点击时间越相近,表示两个item 关系越重要,权重值 越大
2. item1的热度 与item2 的热度: 这个主要是:
1. 如果你想要优先 热门item,那么冷门item 到热门item 的权重 你就设置 大一点
2. 如果你想要近可能 降低热门item,那么热门item 到冷门item 的权重 设置大一点
max(3, np.log(1, item1_click / item2_click)) ## 降低热门,使得在node2vector 随机游走的时候 能尽量不要总是选择热门item
/ (1 + 时间差) ## 时间差
2. deepwalker:无向图
仅仅根据 用户的点击序列:,item与item 连接边的权重都是1,
如果item1 与item2 共线次数多,item1 与item3 仅出现一次,他们在被item1 选择的概率是一样的。
"""
"""原始数据"""
preprocess_root_data_path = ''
"""item embedding 存放根路径"""
feature_root_data_path = ''
"""句子的长度"""
walk_length = 20
"""循环步数 总的样本数: 80 * len(items)"""
num_walks = 80
"""当前节点 返回上一次节点 惩罚项,值 越大,表示当前节点越不会返回上一个节点
w * 1 / p
"""
p = 2
"""当前节点 选择邻居节点, 且 该邻居节点 与 上一个节点 不是邻居,值越小,表示 当前节点 会去比较深的地方去
w * 1 / q
"""
q = 0.5
"""item向量维度"""
embed_size = 128
"""窗口大小"""
window_size = 10
"""word2vector 训练 并发数"""
workers = 6
"""word2vector 模型 迭代次数"""
epochs = 3
user_field_name = 'user_id'
item_field_name = 'item_id'
time_field_name = 'time'
train_df = None
def read_data(self):
"""读取数据集"""
# train_df = pd.read_csv(os.path.join(self.preprocess_root_data_path, 'train.csv'))
# self.train_df = train_df.sort_values('time')
raise NotImplementedError
def train_node2vector(self):
"""
构建有向图
训练node2vector 模型
边与边的权重 即为:item 与 item 权重:
使用用户点击 序列,两两相邻点击的item 作为边与边的关系。
权重: 取决于 相邻两个item:
1. 点击时间差: 两个item 点击时间越相近,表示两个item 关系越重要,权重值 越大
2. item1的热度 与item2 的热度: 这个主要是:
1. 如果你想要优先 热门item,那么冷门item 到热门item 的权重 你就设置 大一点
2. 如果你想要近可能 降低热门item,那么热门item 到冷门item 的权重 设置大一点
max(3, np.log(1, item1_click / item2_click)) ## 降低热门,使得在node2vector 随机游走的时候 能尽量不要总是选择热门item
/ (1 + 时间差) ## 时间差
:return:
"""
if self.train_df is None:
self.train_df = self.read_data()
self.train_df = self.train_df.sort_values(self.time_field_name)
edges = self.build_edges()
graph = build_graph(edges)
print('load node2vector')
node_model = Node2Vec(graph, walk_length=self.walk_length, num_walks=self.num_walks, p=self.p, q=self.q,
workers=1)
print('train node2vector')
node_model.train(embed_size=self.embed_size, window_size=self.window_size, workers=self.workers,
epochs=self.epochs)
node_model.w2v_model.wv.save_word2vec_format(
os.path.join(self.feature_root_data_path, "node2vec_embedding.bin"), binary=True)
def train_deepwalker(self):
"""
构建无向图
训练deepwalker
仅仅根据 用户的点击序列:,item与item 连接边的权重都是1,
如果item1 与item2 共线次数多,item1 与item3 仅出现一次,他们在被item1 选择的概率是一样的。
:return:
"""
direction = False
if self.train_df is None:
self.train_df = self.read_data()
self.train_df = self.train_df.sort_values(self.time_field_name)
edges = self.build_edges(direction=direction)
graph = build_graph(edges, direction=direction)
deep_model = DeepWalk(graph, walk_length=self.walk_length, num_walks=self.num_walks, workers=1)
deep_model.train(embed_size=self.embed_size, window_size=self.window_size, workers=self.workers,
epochs=self.epochs)
deep_model.w2v_model.wv.save_word2vec_format(
os.path.join(self.feature_root_data_path, "deepwalk_embedding.bin"), binary=True)
def run(self):
"""
执行入口,训练node2vector 和 deepwalker 两个模型
:return:
"""
time_time = time.time()
self.train_df = self.read_data()
self.train_df = self.train_df.sort_values(self.time_field_name)
self.train_node2vector()
print(time.time() - time_time)
self.train_deepwalker()
print(time.time() - time_time)
def build_edges(self, direction=True):
"""
构建边与边
:param direction: 是否有向图
:return: 边的集合
"""
item_value_counts = dict(self.train_df[self.item_field_name].value_counts())
edgelist = []
for _, user_group_data in self.train_df.groupby(self.user_field_name):
items = user_group_data[self.item_field_name].values
times = user_group_data[self.time_field_name].values
for i in range(len(items) - 1):
delta = abs(times[i] - items[i + 1]) / (60 * 60 * 12)
ai, aj = item_value_counts[items[i]], item_value_counts[items[i + 1]]
"""热门与非热门:之比 在 20倍以上,体现当前3"""
"""1. delta_t: 时间差 时间差越大,表示两个节点 相关性越低
2. np.log(1 + ai / aj) 前者相比后者热度越高,则权重越大,使得node2vector 加大对冷门item 进行采样
3. 0.8:表示逆序情况下,需要降低 两item 之间的权重值
有向有权图,热门商品-->冷门商品权重=热门商品个数/冷门商品个数
"""
if direction:
edgelist.append([items[i], items[i + 1], max(3, np.log(1 + ai / aj)) * 1 / (1 + delta)])
edgelist.append([items[i + 1], items[i], max(3, np.log(1 + aj / ai)) * 0.8 * 1 / (1 + delta)])
else:
edgelist.append([items[i], items[i + 1], 1.0])
print('load edges success')
return edgelist
def load_node2vector_model(self):
node_model = KeyedVectors.load_word2vec_format(
os.path.join(self.feature_root_data_path, "node2vec_embedding.bin"), binary=True)
return node_model
def load_deepwalker_model(self):
deep_model = KeyedVectors.load_word2vec_format(
os.path.join(self.feature_root_data_path, "deepwalk_embedding.bin"), binary=True)
return deep_model
def explore_time_importance(self):
"""
探索 用户相邻点击的item,时间相关性,一般来说 上一次点击 如果与 当前点击的 商品 点击距离越近 则与之 关系越近
相邻item: 时间关系权重,时间越大,相邻item 权重越小:
delta = abs( time(item1) - time(item2) ) / (60 * 60 * 12) ## 60 * 60 * 12 时间 约束小一点
1 / ( 1 + delta )
min 0.000000
1% 4.955338
5% 10.901743
10% 19.821351
25% 62.437254
50% 291.373855
75% 6101.011731
90% 46305.648330
95% 77996.023756
99% 152713.001474
max 327660.801257
:param train_df:
:return:
"""
self.train_df = self.read_data()
train_df = self.train_df.sort_values(self.time_field_name)
def compute_interval(name, group_df):
group_df['time_shift'] = group_df[self.time_field_name].shift(-1)
group_df['time_interval'] = group_df['time_shift'] - group_df[self.time_field_name]
return group_df
train_df = df_group_parallel(train_df.groupby(self.user_field_name), compute_interval, n_jobs=1)
train_df = train_df[train_df['time_interval'].notnull()]
print(train_df['time_interval'].describe([.01, .05, .10, .25, .5, .75, .90, .95, .99]))
if __name__ == '__main__':
GraphEmbeddingBaseTask().run()
|
[
"nijian@jiangduoduo.com"
] |
nijian@jiangduoduo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.