blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba97794c6bdae848bc13d24f3bde25199dd1f016 | 3b65a1502d0e2847079f2fcd8e81b644e752072b | /venv/Lib/site-packages/doc/conf.py | c7beb206c93e200ae770294205bc0d686fc021d4 | [] | no_license | triplew86-cl/ontology_demo | 109436558e76a731b2cf6741f6e25cf9b4ca83ee | e780052c79e717d1259b6849d964d80680bf7261 | refs/heads/master | 2022-10-21T03:46:32.838250 | 2019-03-21T05:41:30 | 2019-03-21T05:41:30 | 176,867,270 | 0 | 1 | null | 2022-10-17T22:49:19 | 2019-03-21T04:02:19 | Python | UTF-8 | Python | false | false | 7,608 | py | # -*- coding: utf-8 -*-
#
# azure-storage-python documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 27 15:42:45 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pip
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../azure-cosmosdb-table'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Azure CosmosDB SDK for Python'
copyright = u'2015, Microsoft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.5'
# The full version, including alpha/beta/rc tags.
release = '1.0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for extensions ----------------------------------------------------
autoclass_content = 'both'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
#html_theme_options = {'collapsiblesidebar': True}
# Activate the theme.
#pip.main(['install', 'sphinx_bootstrap_theme'])
#import sphinx_bootstrap_theme
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'azure-cosmosdb-python-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'azure-cosmosdb-python.tex', u'Azure SDK for Python Documentation',
u'Microsoft', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
| [
"wenwenwang@crimsonlogic.com"
] | wenwenwang@crimsonlogic.com |
b2b5ff0bd1ab1d7aed0a02913cc5eba5962ed9e5 | af4edde514d5e4b1823ebd77b4df3d18c4165047 | /parsl/tests/test_sites/test_CC-IN2P3/cc_in2p3.py | 568b9db60c01cf60e4caf43c2efa02f3f8b1e5f4 | [
"Apache-2.0"
] | permissive | kylechard/parsl | 5f48d51f06631248b7885cee165c1b11a5881cff | 1080cc17e0c42f877cab18d1452bb6a2bc4c40b7 | refs/heads/master | 2020-12-30T09:26:39.786962 | 2018-03-29T18:08:30 | 2018-03-29T18:08:30 | 100,407,921 | 0 | 0 | null | 2018-03-29T01:27:15 | 2017-08-15T18:40:47 | Python | UTF-8 | Python | false | false | 22 | py | ../configs/cc_in2p3.py | [
"ybabuji@cca009.in2p3.fr"
] | ybabuji@cca009.in2p3.fr |
fb93ec9ad172b75595a7d1dbfe69925d43ebec3b | 6b4346ab5d8e3cc5d1857ecb5fbc7160b9002566 | /backend/src/logbug.py | f4c49d4d8c705191ec2e8af6e99c4c45cc4cdedb | [] | no_license | theballkyo/SDN-handmade | eb582eb0aa3e23e40107dfea8d9e30e03bf4ded5 | bba04cf24220cef378a458b87b62af8fe09c2411 | refs/heads/master | 2021-01-22T13:22:28.830473 | 2018-01-28T02:58:16 | 2018-01-28T02:58:16 | 100,669,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,721 | py | # import pprint
import logging
import logging.handlers
import struct
# import readline
from datetime import datetime
try:
import readline
except ImportError:
import pyreadline as readline
class LogBugHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
def emit(self, record):
""" nothing
"""
pass
def debug(msg):
""" Pretty print for debug
"""
pprint.pprint(msg)
class LogBug:
def __init__(self, queue):
self.queue = queue
self.prompt = ''
self.is_wait_input = False
self.shutdown = False
self.log_level = 0
# self.sys = sys
def pre_shutdown(self):
self.prompt = ""
def post_shutdown(self):
self.shutdown = True
self.queue.put(None)
def read_input(self, prompt=None):
if prompt is not None:
self.prompt = prompt
self.is_wait_input = True
data = input(self.prompt)
self.is_wait_input = False
return data
def worker_config(self):
h = logging.handlers.QueueHandler(self.queue)
root = logging.getLogger()
root.addHandler(h)
root.setLevel(logging.DEBUG)
def listener_thread(self):
self.listener_configurer()
# import fcntl
# import termios
import sys
while True:
try:
# time.sleep(1)
record = self.queue.get()
if record is None:
if self.shutdown: # We send this as a sentinel to tell the listener to quit.
print("LogBug -> shutdown listener thread")
break
continue
if record.levelno < self.log_level:
continue
# # logger = logging.getLogger(record.name)
buff = readline.get_line_buffer()
# print("Buff2: " + str(len(buff)))
# _, cols = struct.unpack('hh', fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234'))
# # print(readline.get_)
text_len = len(buff) + 2
text_len += len(self.prompt)
#
# ANSI escape sequences (All VT100 except ESC[0G)
# Clear current line
sys.stdout.write('\x1b[2K')
# sys.stdout.write('\x1b[1A\x1b[2K'*(text_len//cols)) # Move cursor up and clear line
# Move to start of line
sys.stdout.write('\x1b[1000D')
# print(record.__dict__)
data = {
'created': datetime.fromtimestamp(record.created),
'levelno': record.levelno,
'levelname': record.levelname,
'message': record.message
}
sys.stdout.write(
"{created} [{levelname}({levelno})] {message}\n".format(**data))
# sys.stdout.write(record)
if self.is_wait_input:
sys.stdout.write(self.prompt + buff)
sys.stdout.flush()
# # logger.handle(record) # No level or filter logic applied - just do it!
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
import sys
import traceback
# print >> sys.stderr, 'Whoops! Problem:'
traceback.print_exc(file=sys.stderr)
def listener_configurer(self):
root = logging.getLogger()
# h = logging.StreamHandler()
h = LogBugHandler()
root.addHandler(h)
| [
"theball_kyo@hotmail.com"
] | theball_kyo@hotmail.com |
3ea14480eea3e779545c922160ad1ab5bcf348fd | aa0c7a133f4aae575e526a9e0dd392626f217fff | /build_bipartites_train.py | 6c5ce23ccc8d6fc9ddc2e0ff94b300c7b181d1cb | [
"Apache-2.0"
] | permissive | jodsche/BachPropagate | 526472f8649c33d7f2a61f1ed102dfbdd4b4efbb | 0bb0054d182c954bad6f002189285991fa62daa2 | refs/heads/master | 2023-05-15T06:10:39.227227 | 2018-07-04T15:22:06 | 2018-07-04T15:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,591 | py | import itertools as it
import sys
import json
import re
import collections
import os
from utils import write_to_pickle
path = 'data/raw'
bipartite_path = 'data/bipartite_challenge'
# Generate BiPartites and save as Objects.
filenames = os.listdir(path)
count = 0
AllDataPidTitleBipartite = {}
AllDataPidTrackListBipartite = {}
AllDataAlbumTrackSetBipartite = {}
AllDataArtistTrackSetBipartite = {}
AllDataTrackArtistBipartite = {}
AllDataTrackAlbumBipartite = {}
AllDataTrackNameBipartite = {}
AllDataAlbumNameBipartite = {}
AllDataAritstNameBipartite = {}
AllDataPidDescriptionBipartite = {}
# read data in
for filename in sorted(filenames):
if filename.startswith("mpd.slice.") and filename.endswith(".json"):
fullpath = os.sep.join((path, filename))
f = open(fullpath)
js = f.read()
f.close()
mpd_slice = json.loads(js)
for playlist in mpd_slice['playlists']:
playlistId = str(playlist['pid'])
playlistTracks = []
playlistTitle = playlist['name']
for track in playlist['tracks']:
trackId = track['track_uri']
trackName = track['track_name']
trackArtistId = track['artist_uri']
trackArtistName = track['artist_name']
trackAlbumId = track['album_uri']
trackAlbumName = track['album_name']
playlistTracks.append(trackId)
AllDataAlbumTrackSetBipartite.setdefault(
trackAlbumId, []).append(trackId)
AllDataArtistTrackSetBipartite.setdefault(
trackArtistId, []).append(trackId)
AllDataTrackArtistBipartite[trackId] = trackArtistId
AllDataTrackAlbumBipartite[trackId] = trackAlbumId
AllDataTrackNameBipartite[trackId] = trackName
AllDataAlbumNameBipartite[trackAlbumId] = trackAlbumName
AllDataAritstNameBipartite[trackArtistId] = trackArtistName
AllDataPidTitleBipartite[playlistId] = playlistTitle
AllDataPidTrackListBipartite[playlistId] = playlistTracks
if 'description' in playlist:
AllDataPidDescriptionBipartite[playlistId] = playlist['description']
count = count + 1
if count % 10000 == 0:
print 'processed' + str(count)
write_to_pickle(bipartite_path, 'AllDataPidTitleBipartite.pkl',
AllDataPidTitleBipartite)
write_to_pickle(bipartite_path, 'AllDataPidTrackListBipartite.pkl',
AllDataPidTrackListBipartite)
# todo: check if used.. or delete
write_to_pickle(bipartite_path, 'AllDataTrackAlbumBipartite.pkl',
AllDataTrackAlbumBipartite)
write_to_pickle(bipartite_path, 'AllDataTrackNameBipartite.pkl',
AllDataTrackNameBipartite)
write_to_pickle(bipartite_path, 'AllDataAlbumNameBipartite.pkl',
AllDataAlbumNameBipartite)
write_to_pickle(bipartite_path, 'AllDataAritstNameBipartite.pkl',
AllDataAritstNameBipartite)
write_to_pickle(bipartite_path, 'AllDataPidDescriptionBipartite.pkl',
AllDataPidDescriptionBipartite)
bipartite_path = 'data/bipartite_train'
filenames = os.listdir(path)
count = 0
pid750 = []
AlbumTrackSetBipartite750 = {}
ArtistTrackSetBipartite750 = {}
for filename in sorted(filenames[:750]):
if filename.startswith("mpd.slice.") and filename.endswith(".json"):
fullpath = os.sep.join((path, filename))
f = open(fullpath)
js = f.read()
f.close()
mpd_slice = json.loads(js)
for playlist in mpd_slice['playlists']:
playlistId = str(playlist['pid'])
pid750.append(playlistId)
for track in playlist['tracks']:
trackId = track['track_uri']
trackName = track['track_name']
trackArtistId = track['artist_uri']
trackArtistName = track['artist_name']
trackAlbumId = track['album_uri']
trackAlbumName = track['album_name']
AlbumTrackSetBipartite750.setdefault(
trackAlbumId, []).append(trackId)
ArtistTrackSetBipartite750.setdefault(
trackArtistId, []).append(trackId)
count = count + 1
if count % 10000 == 0:
print 'processed' + str(count)
write_to_pickle(bipartite_path, 'AlbumTrackSetBipartite750.pkl',
AlbumTrackSetBipartite750)
write_to_pickle(bipartite_path, 'ArtistTrackSetBipartite750.pkl',
ArtistTrackSetBipartite750)
# with normalized text
filenames = os.listdir(path)
count = 0
pid750 = []
TrackIdTitle750 = {}
TitleTrackId750 = {}
TrackIdArtistName750 = {}
TrackIdAbumName750 = {}
TrackIdTrackName750 = {}
AlbumTrackSetBipartite750 = {}
ArtistTrackSetBipartite750 = {}
for filename in filenames:
if filename.startswith("mpd.slice.") and filename.endswith(".json"):
fullpath = os.sep.join((path, filename))
f = open(fullpath)
js = f.read()
f.close()
mpd_slice = json.loads(js)
for playlist in mpd_slice['playlists']:
playlistId = str(playlist['pid'])
pid750.append(playlistId)
pname = playlist['name']
normpName = normalize_nameTitle(pname).strip()
if normpName == '':
normpName = 'emptyTitle'
for track in playlist['tracks']:
trackId = track['track_uri']
trackName = track['track_name']
trackArtistId = track['artist_uri']
trackArtistName = track['artist_name']
trackAlbumId = track['album_uri']
trackAlbumName = track['album_name']
TrackIdTitle750.setdefault(
trackId, []).append(normpName) # --Done
TitleTrackId750.setdefault(
normpName, []).append(trackId) # --Done
TrackIdArtistName750[trackId] = trackArtistName # --meta2
TrackIdAbumName750[trackId] = trackAlbumName # --meta2
TrackIdTrackName750[trackId] = trackName # --meta2
AlbumTrackSetBipartite750.setdefault(
trackAlbumId, []).append(trackId) # done
ArtistTrackSetBipartite750.setdefault(
trackArtistId, []).append(trackId) # done
count = count + 1
if count % 10000 == 0:
print 'processed' + str(count)
| [
"t.iofciu@mytaxi.com"
] | t.iofciu@mytaxi.com |
c2ee78250d0f3860d8ec164c11ab88e734704bed | 8efd8bcd3945d88370f6203e92b0376ca6b41c87 | /problems100_200/151_Reverse_Words_in_a_String.py | 11b5357b6300152e2debfd6b3f1328822ffebdd4 | [] | no_license | Provinm/leetcode_archive | 732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5 | 3e72dcaa579f4ae6f587898dd316fce8189b3d6a | refs/heads/master | 2021-09-21T08:03:31.427465 | 2018-08-22T15:58:30 | 2018-08-22T15:58:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #coding=utf-8
'''
151. Reverse Words in a String
Given an input string, reverse the string word by word.
Example:
Input: "the sky is blue",
Output: "blue is sky the".
Note:
A word is defined as a sequence of non-space characters.
Input string may contain leading or trailing spaces. However, your reversed string should not contain leading or trailing spaces.
You need to reduce multiple spaces between two words to a single space in the reversed string.
Follow up: For C programmers, try to solve it in-place in O(1) space.
'''
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
lst = [i for i in s.split(" ") if i]
return ' '.join(reversed(lst))
s = " the sky is blue"
ss = Solution()
r = ss.reverseWords(s)
print(r)
| [
"zhouxin@gmail.com"
] | zhouxin@gmail.com |
e348b3e0dfab26e0cc1f9c6a114ae59be50476c4 | 4c8755443320f0e8fde2718aec40c49ef27ab6fe | /{{cookiecutter.repo_name}}/cookiecutter_repo/utils/loaders.py | 0d90448aa34fd2244e0f3ef816996b8e56608d99 | [
"MIT"
] | permissive | ethman/cookiecutter-nussl | 28266f2b714607493016aa554794617e1cb431aa | 302df1bee74b13ff0e2c6725997f7b7fa26b32d5 | refs/heads/master | 2020-12-09T23:50:09.844838 | 2020-01-12T17:19:06 | 2020-01-12T17:19:06 | 233,449,725 | 0 | 0 | null | 2020-01-12T19:54:48 | 2020-01-12T19:54:47 | null | UTF-8 | Python | false | false | 586 | py | from .. import dataset, model
def load_dataset(dataset_class, dataset_folder, dataset_config):
DatasetClass = getattr(dataset, dataset_class)
dataset_instance = DatasetClass(dataset_folder, dataset_config)
return dataset_instance
def load_model(model_config):
model_class = model_config.pop('class', 'SeparationModel')
ModelClass = getattr(model, model_class)
if model_class == 'SeparationModel':
model_instance = ModelClass(model_config, extra_modules=model.extras)
else:
model_instance = ModelClass(model_config)
return model_instance | [
"prem@u.northwestern.edu"
] | prem@u.northwestern.edu |
e9efde3650a1b2f848407adf06cf593657f835a2 | 01fa58693f3cca73d37837bae6792b7c4c1c32f6 | /urls.py | 87c9ebc979e591309f7fd8dfb19f3034538bfcc1 | [] | no_license | Penchal9959/PayTM_Paymentgateway_Integration | 80af615504f021f90930313d1fb4f1be93d12d20 | 3bf4e9c88f4d52b6758d325afc41866a38b61b5e | refs/heads/master | 2023-01-10T10:52:19.949707 | 2020-11-14T07:48:55 | 2020-11-14T07:48:55 | 288,612,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | """paytm_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from payments import views
urlpatterns = [
path('', views.home),
path('payment/', views.payment),
path('response/', views.response),
path('admin/', admin.site.urls),
path('failure/', views.failure),
path('success/', views.success),
] | [
"noreply@github.com"
] | noreply@github.com |
e971626e0f9e825a4073e567b737aeff836e3c06 | 946fa06e50cf680f25af3560e84969b123c9b43b | /CopyTranslator.py | 963a3c7ae8ad080a6328f71bb372912b1bf0503f | [] | no_license | getnewday/copyTranslate-sample-pyqt | f0bc05f7f79f08f145003a42730d0b8ca25f7ea0 | f0ce44ec72238e23f1ee07f1b2404f1518e49a48 | refs/heads/master | 2023-02-09T05:20:21.518029 | 2020-12-27T04:41:48 | 2020-12-27T04:41:48 | 324,686,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,730 | py | import sys,math,time
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from translate import Translate
from settings import Settings
from utils import Utils
class CopyTranslator(QMainWindow):
def __init__(self):
super(CopyTranslator, self).__init__()
self.settings = Settings()
self.initUI()
self.clipboard = QApplication.clipboard()
self.temp_clipboard = self.clipboard.text()
self.clipboard_timer = QTimer()
self.clipboard_timer.timeout.connect(self.monitorClipboard)
self.clipboard_timer.start(1000)
self.utils = Utils()
def initUI(self):
self.setWindowTitle('CopyTranslator')
self.outerLayout = QHBoxLayout()
self.innerLayout1 = QVBoxLayout()
self.innerLayout2 = QVBoxLayout()
self.plainEdit = QTextEdit()
self.translatedEdit = QTextEdit()
self.innerLayout1.addWidget(self.plainEdit)
self.innerLayout1.addWidget(self.translatedEdit)
self.autoCopyCheckBox = QCheckBox('自动复制')
self.incrementCopyCheckBox = QCheckBox('增量复制')
self.autoHideCheckBox = QCheckBox('自动隐藏')
self.autoShowCheckBox = QCheckBox('自动显示')
self.autoFormatCheckBox = QCheckBox('自动格式化')
self.enableNotificationCheckBox = QCheckBox('启用通知')
self.dragCopyCheckBox = QCheckBox('拖拽复制')
self.alwaysTopCheckBox = QCheckBox('总是置顶')
self.monitorClipboardCheckBox = QCheckBox('监听剪切板')
self.label1 = QLabel('源语言')
self.label2 = QLabel('目标语言')
self.sourceCombobox = QComboBox()
self.aimCombobox = QComboBox()
# self.focusModeButton = QPushButton('专注模式')
self.translateButton = QPushButton('翻译')
self.settingButton = QPushButton('设置')
self.innerLayout2.addWidget(self.autoCopyCheckBox)
self.innerLayout2.addWidget(self.incrementCopyCheckBox)
self.innerLayout2.addWidget(self.autoHideCheckBox)
self.innerLayout2.addWidget(self.autoShowCheckBox)
self.innerLayout2.addWidget(self.autoFormatCheckBox)
self.innerLayout2.addWidget(self.enableNotificationCheckBox)
self.innerLayout2.addWidget(self.dragCopyCheckBox)
self.innerLayout2.addWidget(self.alwaysTopCheckBox)
self.innerLayout2.addWidget(self.monitorClipboardCheckBox)
self.innerLayout2.addWidget(self.label1)
self.innerLayout2.addWidget(self.sourceCombobox)
self.innerLayout2.addWidget(self.label2)
self.innerLayout2.addWidget(self.aimCombobox)
# self.innerLayout2.addWidget(self.focusModeButton)
self.innerLayout2.addWidget(self.translateButton)
self.innerLayout2.addWidget(self.settingButton)
self.innerLayout2.addStretch(1)
self.outerLayout.addLayout(self.innerLayout1)
self.outerLayout.addLayout(self.innerLayout2)
self.autoCopyCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.autoCopyCheckBox))
self.incrementCopyCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.incrementCopyCheckBox))
self.autoHideCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.autoHideCheckBox))
self.autoShowCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.autoShowCheckBox))
self.autoFormatCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.autoFormatCheckBox))
self.enableNotificationCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.enableNotificationCheckBox))
self.dragCopyCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.dragCopyCheckBox))
self.alwaysTopCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.alwaysTopCheckBox))
self.monitorClipboardCheckBox.stateChanged.connect(lambda: self.processCheckBox(self.monitorClipboardCheckBox))
self.translateButton.clicked.connect(self.translate)
mainWidget = QWidget()
self.setCentralWidget(mainWidget)
mainWidget.setLayout(self.outerLayout)
def processCheckBox(self, e):
checkboxList = [self.autoCopyCheckBox,
self.incrementCopyCheckBox,
self.autoHideCheckBox,
self.autoShowCheckBox,
self.autoFormatCheckBox,
self.enableNotificationCheckBox,
self.dragCopyCheckBox,
self.alwaysTopCheckBox,
self.monitorClipboardCheckBox]
settingsList = []
for i in checkboxList:
if i.checkState() == 0:
settingsList.append(False)
elif i.checkState() ==2:
settingsList.append(True)
else:
pass
self.settings.setSettings(settingsList)
# self.settings.showSettings()
# 检测是否选中了置顶窗口
if self.settings.alwaysTop:
self.setWindowFlag(Qt.WindowStaysOnTopHint,True)
self.show()
else:
self.setWindowFlag(Qt.WindowStaysOnTopHint,False)
self.show()
def monitorClipboard(self):
if self.clipboard.text() != self.temp_clipboard and self.settings.monitorClipboard:
self.temp_clipboard = self.clipboard.text()
if self.clipboard.text() == self.translatedEdit.toPlainText():
return
else:
if self.settings.increment:
if self.plainEdit.toPlainText() != '':
temp_string = self.plainEdit.toPlainText() + ' ' + self.clipboard.text()
else:
temp_string = self.clipboard.text()
self.plainEdit.setText(temp_string)
else:
self.plainEdit.setText(self.clipboard.text())
self.translate()
else:
pass
def translate(self):
t = Translate()
target = 'zh'
plainText = self.plainEdit.toPlainText()
if len(plainText) == 0:
return
else:
# TODO 处理原始文本
plainText = self.utils.processPlainText(plainText)
translatedText = t.translated_content(plainText, target)
self.translatedEdit.setText(translatedText)
if self.settings.autoCopy:
self.clipboard.setText(translatedText)
else:
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
main = CopyTranslator()
main.show()
sys.exit(app.exec_())
| [
"756459364@qq.com"
] | 756459364@qq.com |
08f5bd7c3e007ee5cc2828e16065a3548eafdfe4 | a0194ea7a08913847c2cfc3a9c5262816b188c5d | /TVGL/exampleTVGL.py | 0c825edbcd9c70961845ea16edd6a7497c5e9f28 | [] | no_license | aboomer07/HighDimensional_TVGL | 39f93c832c22589c807e535903d0ab58deda9bea | 1db3ab011fb8a61bb861c8663ca37f74067cb9e6 | refs/heads/master | 2023-08-16T00:28:02.529881 | 2021-10-08T22:14:12 | 2021-10-08T22:14:12 | 411,791,968 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from TVGL import *
import numpy as np
Cov = np.array([[5, 1], [1, 7]])
data = np.random.multivariate_normal(np.zeros(2), Cov, 50)
data = np.genfromtxt('PaperCode/Datasets/finance.csv', delimiter=',')
data = data[0:30,0:10]
lamb = 2.5
beta = 12
lengthOfSlice = 10
thetaSet = TVGL(data, lengthOfSlice, lamb, beta, indexOfPenalty = 2, verbose=True)
print(thetaSet)
| [
"aboomer07@gmail.com"
] | aboomer07@gmail.com |
8b13f1478a12075ef8ff3736fa85e9679d78bfa6 | 85594f78a0bb3d790e08f3805add72d9bb2a6ab8 | /Codes/keyboard_teleop.py | 7b7ac8fc989140c87ad4a79e2e8fde86c02987c5 | [] | no_license | PatilVrush/TAL-BRABO-ROS-CONTROL | b5c8e89210a6eb7cbb746744e11a06cadedf4377 | 9e8a573c52d9b4454ace9b47d4d2af9501146b73 | refs/heads/master | 2020-09-04T01:53:28.765498 | 2019-12-13T23:24:22 | 2019-12-13T23:24:22 | 219,633,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,004 | py | #!/usr/bin/env python
# license removed for brevity
import rospy
import serial
import struct
import math
import time
import pygame, time
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption('Pygame Keyboard Test')
pygame.mouse.set_visible(0)
pygame.key.set_repeat(1,1)
global run_program
run_program=True
def write_Int16(data,ser):
ser.write(struct.pack('>B',(data>>8)&0xFF))
ser.write(struct.pack('>B',data&0xFF))
def write_Byte(data,ser):
ser.write(struct.pack('>B',(data)&0xFF))
def move_x_right():
print('Moving X to Right')
def move_x_left():
print('Moving X to Left')
def move_y_back():
print('Moving Y Back')
def move_y_front():
print('Moving Y Front')
def move_z_up():
print('Moving Z Up')
def move_z_down():
print('Moving Z Down')
def move_u_up():
print('Moving U Up')
def move_u_down():
print('Moving U Down')
def move_v_right():
print('Moving V to Right')
def move_v_left():
print('Moving V to Left')
#Declaring global variables
global pulses
global dir_ena
def key_pressed_listener():
#Initializing global varibales
global pulses
global dir_ena
global run_program
pulses = [0,0,0,0,0]
dir_ena = 0
enable = 1
#Starting Serial Interface
print ('Starting connection to arduino')
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=0.005)
rospy.sleep(3.0)
print('Connected to Arduino')
print('You can now control Robot using KeyBoard')
while (run_program==True):
tm = time.clock()
write_Int16(pulses[0],ser)
write_Int16(pulses[1],ser)
write_Int16(pulses[2],ser)
write_Int16(pulses[3],ser)
write_Int16(pulses[4],ser)
write_Byte(dir_ena,ser)
pulses = [0,0,0,0,0]
#dir_ena = 0
n = ser.inWaiting()
if(n>0):
if(n==1):
sensor_val = ord(ser.read(n))
#print(sensor_val)
else:
#Clearing buffer in case of wrong data from arduino
ser.read(n)
for event in pygame.event.get():
if (pygame.key.get_pressed()[pygame.K_q]):
run_program=False
print('Existing')
if (pygame.key.get_pressed()[pygame.K_LEFT] and pygame.key.get_pressed()[pygame.K_x]):
#move_x_left()
print('Moving X to Left')
pulses[0]=5
dir_ena = 0b00100001
if (pygame.key.get_pressed()[pygame.K_RIGHT] and pygame.key.get_pressed()[pygame.K_x]):
#move_x_right()
print('Moving X to Right')
pulses[0]=5
dir_ena = 0b00100000
if (pygame.key.get_pressed()[pygame.K_UP] and pygame.key.get_pressed()[pygame.K_y]):
#move_y_back()
print('Moving Y Back')
pulses[1]=5
dir_ena = 0b00100000
if (pygame.key.get_pressed()[pygame.K_DOWN] and pygame.key.get_pressed()[pygame.K_y]):
#move_y_front()
print('Moving Y Front')
pulses[1]=5
dir_ena = 0b00100010
if (pygame.key.get_pressed()[pygame.K_UP] and pygame.key.get_pressed()[pygame.K_z]):
#move_z_up()
print('Moving Z Up')
pulses[2]=5
dir_ena = 0b00100000
if (pygame.key.get_pressed()[pygame.K_DOWN] and pygame.key.get_pressed()[pygame.K_z]):
#move_z_down()
print('Moving Z Down')
pulses[2]=5
dir_ena = 0b00100100
if (pygame.key.get_pressed()[pygame.K_UP] and pygame.key.get_pressed()[pygame.K_u]):
move_u_up()
pulses[3]=5
dir_ena = 0b00100000
if (pygame.key.get_pressed()[pygame.K_DOWN] and pygame.key.get_pressed()[pygame.K_u]):
move_u_down()
pulses[3]=5
dir_ena = 0b00101000
if (pygame.key.get_pressed()[pygame.K_LEFT] and pygame.key.get_pressed()[pygame.K_v]):
move_v_left()
pulses[4]=5
dir_ena = 0b00100000
if (pygame.key.get_pressed()[pygame.K_RIGHT] and pygame.key.get_pressed()[pygame.K_v]):
move_v_right()
pulses[4]=5
dir_ena = 0b00110000
tm = time.clock()-tm
time.sleep(0.01-tm)
if __name__ == '__main__':
try:
key_pressed_listener()
except rospy.ROSInterruptException:
pass
| [
"noreply@github.com"
] | noreply@github.com |
c0d4bd4492c238dfe6bf330055dd6f46d240b564 | c043029afde70a73e55f181ca2a86607b69f847b | /1_fundamentals/statistics_python/covid-19.py | 024ca2151c211e3d3be1341897ad7430f512ef29 | [] | no_license | viasmo1/edem-mda | 9918a2e1178ef661e34a0eb002342b994ea12e1b | 4bba5b1252bec405ead2d45f4eb6b49652a99584 | refs/heads/master | 2023-06-14T16:20:45.725054 | 2021-07-10T08:18:48 | 2021-07-10T08:18:48 | 296,832,549 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 14:42:01 2020
@author: vicent
EDEM
Master Data Analytics
Statistics with Python
Analysis Covid-19 in Spain
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %%
# Get working directory
print("Current Working directory: ", os.getcwd())
# Change working directory
os.chdir("/Users/vicent/repos-github/edem-mda/statistics_python/data/covid-19_spain")
print("Data directory: ", os.getcwd())
print("\n")
# %%
# Read csv file and stores it in a dataframe called rentals_2011
covid = pd.read_csv("nacional_covid19_rango_edad.csv", sep=",", decimal=".")
print("Covid-19 Dataframe info:")
print("Shape: ", covid.shape)
print("Columns: ", covid.columns)
print(covid.head())
print(covid.tail())
print("\n")
# QC OK
#%%
# Describe cnt variable (numerical/quantitative variable)
covid_per_day = covid.groupby(["fecha"]).sum()
covid_per_day_desc = covid_per_day.casos_confirmados.describe()
print(covid_per_day_desc)
print(covid_per_day_desc[["mean", "std"]])
# Plot histogram
# Select variable to plot
x = covid["casos_confirmados"]
# Plot
plt.hist(x, bins=12, edgecolor="black", color="skyblue")
ticks = np.arange(0, 10000, 1000)
plt.xticks(ticks)
plt.title(
"Figure 1. Daily Bicylce rentals in Washington DC \n by Capital bikeshare. 2011-2012"
)
plt.xlabel("Number of rented bicycles")
plt.ylabel("Frecuency (days)")
# Add text with main statistics to the plot
# count, mean, std
n = wbr_desc["count"]
m = wbr_desc["mean"].round(1)
std = wbr_desc["std"].round(1)
textstr = "$\mathrm{n}=%.0f$\n$\mathrm{mean}=%.1f$\n$\mathrm{std}=%.1f$" % (n, m, std)
props = dict(boxstyle="round", facecolor="white", lw=0.5)
plt.text(0, 95, textstr, bbox=props)
# Add vertical line in mean, -1std & +1std
plt.axvline(x=m, linewidth=1, linestyle="solid", color="red", label="Mean")
plt.axvline(x=m - std, linewidth=1, linestyle="dashed", color="green", label="-1std")
plt.axvline(x=m + std, linewidth=1, linestyle="dashed", color="green", label="+1std")
# Add legend
plt.legend(loc="upper left", bbox_to_anchor=(0.73, 0.98))
plt.show()
# %%
| [
"vicent.asenmol@gmail.com"
] | vicent.asenmol@gmail.com |
49dc43df0df8a4ad400a6dcaf0937a7ba4713c34 | 3c1c9d89ef6a2324fbd439d88a79fa45fca58b43 | /VAEs/refer-multi-vaes/main_fonts_betavae.py | bfc3927b375871430156711dd905a94337d09044 | [
"MIT",
"Python-2.0"
] | permissive | charlesxin97/fonts_interpolation | 746e4802c229267a246a0163016cfe395cc2e2bc | e4b9cea37778c4fb370a2849cbe31fe058920681 | refs/heads/master | 2022-12-15T20:08:46.347162 | 2020-09-21T22:04:41 | 2020-09-21T22:04:41 | 297,469,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,305 | py | import argparse
import logging
import sys
import os
from configparser import ConfigParser
from torch import optim
from disvae import init_specific_model, Trainer, Evaluator
from disvae.utils.modelIO import save_model, load_model, load_metadata
from disvae.models.losses import LOSSES, RECON_DIST, get_loss_f
from disvae.models.vae import MODELS
from utils.datasets import get_dataloaders, get_img_size, DATASETS
from utils.helpers import (create_safe_directory, get_device, set_seed, get_n_param,
get_config_section, update_namespace_, FormatterNoDuplicate)
from utils.visualize import GifTraversalsTraining
import torch
CONFIG_FILE = "hyperparam.ini"
RES_DIR = "results"
LOG_LEVELS = list(logging._levelToName.values())
ADDITIONAL_EXP = ['custom', "debug", "best_celeba", "best_dsprites"]
EXPERIMENTS = ADDITIONAL_EXP + ["{}_{}".format(loss, data)
for loss in LOSSES
for data in DATASETS]
def parse_arguments(args_to_parse):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
default_config = get_config_section([CONFIG_FILE], "Custom")
description = "PyTorch implementation and evaluation of disentangled Variational AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description,
formatter_class=FormatterNoDuplicate)
# General options
general = parser.add_argument_group('General options')
general.add_argument('name', type=str,
help="Name of the model for storing and loading purposes.")
general.add_argument('-L', '--log-level', help="Logging levels.",
default=default_config['log_level'], choices=LOG_LEVELS)
general.add_argument('--no-progress-bar', action='store_true',
default=default_config['no_progress_bar'],
help='Disables progress bar.')
general.add_argument('--no-cuda', action='store_true',
default=default_config['no_cuda'],
help='Disables CUDA training, even when have one.')
general.add_argument('-s', '--seed', type=int, default=default_config['seed'],
help='Random seed. Can be `None` for stochastic behavior.')
# Learning options
training = parser.add_argument_group('Training specific options')
# training.add_argument('--checkpoint-every',
# type=int, default=default_config['checkpoint_every'],
# help='Save a checkpoint of the trained model every n epoch.')
training.add_argument('--checkpoint-every',
type=int, default=5,
help='Save a checkpoint of the trained model every n epoch.')
training.add_argument('-d', '--dataset', help="Path to training data.",
default=default_config['dataset'], choices=DATASETS)
training.add_argument('-x', '--experiment',
default=default_config['experiment'], choices=EXPERIMENTS,
help='Predefined experiments to run. If not `custom` this will overwrite some other arguments.')
# training.add_argument('-e', '--epochs', type=int,
# default=default_config['epochs'],
# help='Maximum number of epochs to run for.')
training.add_argument('-e', '--epochs', type=int,
default=30,
help='Maximum number of epochs to run for.')
# training.add_argument('-b', '--batch-size', type=int,
# default=default_config['batch_size'],
# help='Batch size for training.')
training.add_argument('-b', '--batch-size', type=int,
default=256,
help='Batch size for training.')
training.add_argument('--lr', type=float, default=default_config['lr'],
help='Learning rate.')
# Model Options
model = parser.add_argument_group('Model specfic options')
model.add_argument('-m', '--model-type',
default=default_config['model'], choices=MODELS,
help='Type of encoder and decoder to use.')
model.add_argument('-z', '--latent-dim', type=int,
default=default_config['latent_dim'],
help='Dimension of the latent variable.')
model.add_argument('-l', '--loss',
default=default_config['loss'], choices=LOSSES,
help="Type of VAE loss function to use.")
model.add_argument('-r', '--rec-dist', default=default_config['rec_dist'],
choices=RECON_DIST,
help="Form of the likelihood ot use for each pixel.")
model.add_argument('-a', '--reg-anneal', type=float,
default=default_config['reg_anneal'],
help="Number of annealing steps where gradually adding the regularisation. What is annealed is specific to each loss.")
model.add_argument('--which_epoch', default='latest',
help="which_epoch you will load as pretrain model.")
# Loss Specific Options
betaH = parser.add_argument_group('BetaH specific parameters')
betaH.add_argument('--betaH-B', type=float,
default=default_config['betaH_B'],
help="Weight of the KL (beta in the paper).")
betaB = parser.add_argument_group('BetaB specific parameters')
betaB.add_argument('--betaB-initC', type=float,
default=default_config['betaB_initC'],
help="Starting annealed capacity.")
betaB.add_argument('--betaB-finC', type=float,
default=default_config['betaB_finC'],
help="Final annealed capacity.")
betaB.add_argument('--betaB-G', type=float,
default=default_config['betaB_G'],
help="Weight of the KL divergence term (gamma in the paper).")
factor = parser.add_argument_group('factor VAE specific parameters')
factor.add_argument('--factor-G', type=float,
default=default_config['factor_G'],
help="Weight of the TC term (gamma in the paper).")
factor.add_argument('--lr-disc', type=float,
default=default_config['lr_disc'],
help='Learning rate of the discriminator.')
btcvae = parser.add_argument_group('beta-tcvae specific parameters')
btcvae.add_argument('--btcvae-A', type=float,
default=default_config['btcvae_A'],
help="Weight of the MI term (alpha in the paper).")
btcvae.add_argument('--btcvae-G', type=float,
default=default_config['btcvae_G'],
help="Weight of the dim-wise KL term (gamma in the paper).")
btcvae.add_argument('--btcvae-B', type=float,
default=default_config['btcvae_B'],
help="Weight of the TC term (beta in the paper).")
# Learning options
evaluation = parser.add_argument_group('Evaluation specific options')
evaluation.add_argument('--is-eval-only', action='store_true',
default=default_config['is_eval_only'],
help='Whether to only evaluate using precomputed model `name`.')
evaluation.add_argument('--is-metrics', action='store_true',
default=default_config['is_metrics'],
help="Whether to compute the disentangled metrcics. Currently only possible with `dsprites` as it is the only dataset with known true factors of variations.")
evaluation.add_argument('--no-test', action='store_true',
default=default_config['no_test'],
help="Whether not to compute the test losses.`")
evaluation.add_argument('--eval-batchsize', type=int,
default=default_config['eval_batchsize'],
help='Batch size for evaluation.')
args = parser.parse_args(args_to_parse)
if args.experiment != 'custom':
if args.experiment not in ADDITIONAL_EXP:
# update all common sections first
model, dataset = args.experiment.split("_")
common_data = get_config_section([CONFIG_FILE], "Common_{}".format(dataset))
update_namespace_(args, common_data)
common_model = get_config_section([CONFIG_FILE], "Common_{}".format(model))
update_namespace_(args, common_model)
try:
experiments_config = get_config_section([CONFIG_FILE], args.experiment)
update_namespace_(args, experiments_config)
except KeyError as e:
if args.experiment in ADDITIONAL_EXP:
raise e # only reraise if didn't use common section
return args
def main(args):
"""Main train and evaluation function.
Parameters
----------
args: argparse.Namespace
Arguments
"""
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(funcName)s: %(message)s',
"%H:%M:%S")
logger = logging.getLogger(__name__)
logger.setLevel(args.log_level.upper())
stream = logging.StreamHandler()
stream.setLevel(args.log_level.upper())
stream.setFormatter(formatter)
logger.addHandler(stream)
set_seed(args.seed)
device = get_device(is_gpu=not args.no_cuda)
exp_dir = os.path.join(RES_DIR, args.name)
logger.info("Root directory for saving and loading experiments: {}".format(exp_dir))
if not args.is_eval_only:
create_safe_directory(exp_dir, logger=logger)
if args.loss == "factor":
logger.info("FactorVae needs 2 batches per iteration. To replicate this behavior while being consistent, we double the batch size and the the number of epochs.")
args.batch_size *= 2
args.epochs *= 2
# PREPARES DATA
train_loader = get_dataloaders(args.dataset,
batch_size=args.batch_size,
logger=logger, root='/lab/tmpig23b/u/andy/VAE_data_fonts/')
# train_loader = get_dataloaders(args.dataset,
# batch_size=args.batch_size,
# logger=logger)
logger.info("Train {} with {} samples".format(args.dataset, len(train_loader.dataset)))
# PREPARES MODEL
args.img_size = get_img_size(args.dataset) # stores for metadata
model = init_specific_model(args.model_type, args.img_size, args.latent_dim)
if args.which_epoch=='latest':
model_dict = torch.load(os.path.join('./results/btcvae_fonts_betaVae_5epoch/', 'model.pt'))
model.load_state_dict(model_dict)
# pretrain_path = os.path.join(exp_dir, 'model.pt')
logger.info('Num parameters in model: {}'.format(get_n_param(model)))
# TRAINS
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model = model.to(device) # make sure trainer and viz on same device
gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)
loss_f = get_loss_f(args.loss,
n_data=len(train_loader.dataset),
device=device,
**vars(args))
trainer = Trainer(model, optimizer, loss_f,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar,
gif_visualizer=gif_visualizer)
trainer(train_loader,
epochs=args.epochs,
checkpoint_every=args.checkpoint_every,)
# SAVE MODEL AND EXPERIMENT INFORMATION
save_model(trainer.model, exp_dir, metadata=vars(args))
if args.is_metrics or not args.no_test:
model = load_model(exp_dir, is_gpu=not args.no_cuda)
metadata = load_metadata(exp_dir)
# TO-DO: currently uses train datatset
test_loader = get_dataloaders(metadata["dataset"],
batch_size=args.eval_batchsize,
shuffle=False,
logger=logger)
loss_f = get_loss_f(args.loss,
n_data=len(test_loader.dataset),
device=device,
**vars(args))
evaluator = Evaluator(model, loss_f,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar)
evaluator(test_loader, is_metrics=args.is_metrics, is_losses=not args.no_test)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args)
| [
"845992541@qq.com"
] | 845992541@qq.com |
2ca0d2411629882a527b6fd48b184d6699705bea | 55bed1d892c8eecc91d2c49711f08871c11fdbe4 | /web_app/views.py | a49ab48c2fa095a21d1bfa273243cff4eda18606 | [] | no_license | RydhaValenda/Website-Using-Flask-Admin1 | 7c2a32c42ec918a10d2e389ef84f6a6d678f3ca3 | 2076769bd1ac0288e199f7079011ba129fe36af9 | refs/heads/master | 2022-06-20T21:55:59.236219 | 2020-05-03T09:24:20 | 2020-05-03T09:24:20 | 259,565,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | from flask import request, url_for
from flask_admin import AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_login import current_user
from werkzeug.utils import redirect
from wtforms import TextAreaField
from wtforms.widgets import TextArea
class CKEditorWidget(TextArea):
def __call__(self, field, **kwargs):
if kwargs.get('class'):
kwargs['class'] += " ckeditor"
else:
kwargs.setdefault('class', 'ckeditor')
return super(CKEditorWidget, self).__call__(field, **kwargs)
class CKEditorField(TextAreaField):
widget = CKEditorWidget()
class SecureAdminIndexView(AdminIndexView):
#secure function above
def is_accessible(self):
return current_user.has_role('admin')
def inaccessible_callback(self, name, **kwargs):
if current_user.is_authenticated:
return redirect(request.full_path)
return redirect(url_for('security.login', next=request.full_path))
class AdminOnlyView(ModelView):
#secure function above
def is_accessible(self):
return current_user.has_role('admin')
def inaccessible_callback(self, name, **kwargs):
if current_user.is_authenticated:
return redirect(request.full_path)
return redirect(url_for('security.login', next=request.full_path))
class PageModelView(AdminOnlyView):
form_overrides = dict(contents=CKEditorField)
create_template = 'admin/ckeditor.html'
edit_template = 'admin/ckeditor.html'
column_list = ('title','url')
# form_columns = ('title', 'contents', 'url')
# tidak bisa melakukan extend dari template yg ada
class MenuModelView(AdminOnlyView):
pass | [
"ryval.study@gmail.com"
] | ryval.study@gmail.com |
c7d28a4fe1f480a771b61ed94b499e75064b41c0 | abd5d19050e1c6eaf9fb0a760533177be6bdf8ad | /Chp 15. Webcam Face Detection/cam.py | f7009acd342a1f9addfaed7a416c246671767371 | [] | no_license | Dodant/practical-opencv | 9184899b7a0484bb8ca258de6e4906839c27861c | 12d2dbe80791a1b8fec684640fe3975c58522b7c | refs/heads/master | 2021-02-18T01:49:50.935088 | 2020-04-02T06:29:09 | 2020-04-02T06:29:09 | 245,146,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | from pyimagesearch.facedetector import FaceDetector
import imutils
import argparse
import cv2
ap = argparse.ArgumentParser()
# ap.add_argument("-f", "--face", required=True, help="Path to where the face cascade resides")
# ap.add_argument("-v", "--video", required=True, help="Path to the (optional) video file")
args = vars(ap.parse_args())
fd = FaceDetector()
if not args.get("video", False):
cam = cv2.VideoCapture(0)
else:
cam = cv2.VideoCapture(args["video"])
while True:
(grabbed, frame) = cam.read()
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=300)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faceRects = fd.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30,30))
frameClone = frame.copy()
for (x,y,w,h) in faceRects:
cv2.rectangle(frameClone, (x,y), (x+w, y+h), (0,255,0), 2)
cv2.imshow("Faces", frameClone)
if cv2.waitKey(0) & 0xff == ord("q"):
break
cam.release()
cv2.destroyAllWindows()
| [
"ohho0728@naver.com"
] | ohho0728@naver.com |
e4ef6f4daa034338122443da3d297af7f4609fc5 | 0a79ec5939314e206f6b34744927711278eca712 | /author_publication.py | d4e1a3acf3f5dc083c29cf1baa56da5fb89cccce | [] | no_license | jbragg/cscw16-bootstrapping | 9c6d69b8f5c6e54b190d29298447165e97c62f6e | bef2dcf5be09a1da4dafae262dc772b96f40bcd4 | refs/heads/master | 2021-03-24T10:19:11.952218 | 2018-08-15T04:59:34 | 2018-08-15T04:59:34 | 71,861,467 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py |
import sys
import urllib
import urllib2
import mechanize
from bs4 import BeautifulSoup
import csv
import json
import os
import re
#http://academic.research.microsoft.com/RankList?entitytype=2&topDomainID=2&subDomainID=5&last=0&start=1&end=100
def main():
fout = open('authors_publication.csv','a')
writer = csv.writer(fout)
start = 1
end = 100
for i in range(2661):
print "-----" + str(i) + "-----"
authors_url = "http://academic.research.microsoft.com/RankList?entitytype=2&topDomainID=2&subDomainID=5&last=0&start="+str(start)+"&end="+str(end)
soup = BeautifulSoup(urllib.urlopen(authors_url).read())
#print soup
authors = soup.findAll('div',{'class':'content-narrow'})
for author in authors:
try:
publications = 0
citations = 0
spans = author.findAll('span')
for span in spans:
if 'Publications' in span.text:
publications = span.text.split(': ')[1]
elif 'Citations' in span.text:
citations = span.text.split(': ')[1]
name = author.find('div',{'class':'title'}).text.strip().encode('ascii','ignore')
print name, publications, citations
writer.writerow([name,publications,citations])
except:
print "something wrong in parsing"
start+=100
end+=100
if __name__ == '__main__':
main() | [
"wenhuang98@gmail.com"
] | wenhuang98@gmail.com |
e85cfa7c32176fd928719363cbe5642a5916c867 | d59fce89fd7e7e93e0f8ebfe677e8060e5c2144a | /loops/prime_check.py | 2dda2f9c13cf248522c5276f435ea40a3840555b | [
"MIT"
] | permissive | Rhoynar/pysel | a7cdb91063270c1f41fb88661702b0127c009931 | 7a283cfdcaea3b1e33e615d1e655b6bd1f23f09f | refs/heads/master | 2021-05-07T03:38:22.376371 | 2017-12-01T22:19:02 | 2017-12-01T22:19:02 | 110,787,353 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # Check if a number is prime
num = int(raw_input('Enter number: '))
prime = True
for i in range(2, int(num/2)):
if num % i == 0:
prime = False
break
if prime:
print('Number {} is a prime number!'.format(num))
else:
print('Number {} is NOT a prime number!'.format(num))
| [
"harsh@rhoynar.com"
] | harsh@rhoynar.com |
0d6abab4f087b0d960991fcb7127878d6ac60af7 | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /neoOkpara/Phase-1/Day7/environmentVariables.py | 65ec4f88a10f613d3f23446bd640b865aa59094e | [
"MIT"
] | permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 110 | py | import os
print(os.environ['JAVA_HOME'])
for k, v in os.environ.items():
print('{0}={1}'.format(k, v))
| [
"emmox55@gmail.com"
] | emmox55@gmail.com |
84c2e76f5f17862565434bb732ecf476cd09b662 | 44ee04c2447612c00691bebf3bc9b2f4742bc4f2 | /contact/migrations/0002_delete_contact_page.py | fcfd44612b9315c5db7c2b4b706260393bd2e266 | [] | no_license | mohamedabouseif/Resume | 1bb52522b74ac358e9c71dc4df09a48a3af15a3d | 7a2abc4c1fd55b593842fdffa7301556e8717c3d | refs/heads/master | 2020-05-16T06:23:39.547341 | 2019-04-22T20:21:31 | 2019-04-22T20:21:31 | 175,219,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2019-03-14 13:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contact', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='contact_page',
),
]
| [
"mohammedfaried364@gmail"
] | mohammedfaried364@gmail |
7c2df051bd8cf8322dd447539b9d462156b9db26 | a52ff0573d248b13d40c605f6d70b14be3fc696b | /cienciaDaComputacaoPython/lista3/fatorial.py | 315006f68ac44ab4c8e8ea3add307246a6e7f553 | [] | no_license | jlucasldm/coursera | 29007f21c3d0f4aae8652bde37748c3b083c7875 | 5a5b974c5ba296f96786c1124f6d03cd695a9a75 | refs/heads/master | 2022-12-05T23:41:45.036283 | 2020-08-10T01:27:44 | 2020-08-10T01:27:44 | 275,711,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # Escreva um programa que receba um número natural nn na
# entrada e imprima n!n! (fatorial) na saída. Exemplo:
# Digite o valor de n: 5
# 120
valor = int(input('Digite o valor de n: '))
produto = 1
while valor!=0:
produto*=valor
valor-=1
print(produto) | [
"49415623+jlucasldm@users.noreply.github.com"
] | 49415623+jlucasldm@users.noreply.github.com |
977abdd38f889b2f3c099b3ed2094d0a74a80e64 | d5f34a573256944cffd9f89e646f38cd3c4a8207 | /bin/module/Name.py | 90832e8376c5c9f4da430940bd1930045bcd7b95 | [
"MIT"
] | permissive | Skadisson/phoenix | 03df6064ec6215472721753c5d09040382966ab6 | 5026088a224b71a6c3d51b5f921cf45ef4ff7327 | refs/heads/master | 2021-12-20T05:35:06.295600 | 2021-12-07T14:50:28 | 2021-12-07T14:50:28 | 217,016,873 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | from bin.service import Logger, UserStorage
class Name:
def __init__(self):
self.logger = Logger.Logger()
def run(self, name):
result = {
'message': '',
'success': True,
'error': None
}
try:
user_storage = UserStorage.UserStorage()
try:
user_storage.rename_user(name)
result['message'] = 'Name geändert'
except Exception as e:
result['message'] = str(e)
result['success'] = False
except Exception as e:
result['error'] = str(e)
result['success'] = False
self.logger.add_entry(self.__class__.__name__, e)
return result
| [
"skadisson@mailbox.org"
] | skadisson@mailbox.org |
289ffbd3b38c89023985e5dd2c6fb39330486eeb | d215aa9d6a07a93c5d0f62181e19812cd5724599 | /onMoped/control/state.py | e630e49aedf08b91fa86d88c753795dd18cb4d7e | [] | no_license | petrosdeb/Group-Stierna | 4f471276c37b59dd14b960a7f0f64a11fd7d5146 | 0789ef4aba54cf8d1da6a2ed501977311678a7a9 | refs/heads/master | 2021-01-21T12:11:26.214735 | 2017-10-27T14:57:43 | 2017-10-27T14:57:43 | 102,047,524 | 4 | 0 | null | 2017-10-26T09:45:52 | 2017-08-31T21:36:15 | Python | UTF-8 | Python | false | false | 510 | py | """
Simple enum-class representing the 3 driving
states of the MOPED.
Used in core to determine what output values
are written
"""
from enum import Enum
class State(Enum):
"""Basic enum class"""
UNDEF = -1
MANUAL = 0
ACC = 1
PLATOONING = 2
def char_to_state(char):
"""Returns a State for a given character"""
if char == 'm':
return State.MANUAL
elif char == 'a':
return State.ACC
elif char == 'p':
return State.PLATOONING
return State.UNDEF
| [
"wamby@student.chalmers.se"
] | wamby@student.chalmers.se |
6659b4d8145e55d900dcabb7398db42929c560f4 | d75560d9acde4f1f6457898d8862b06ba5f8dd7b | /backend/msm_sgsjhsjh4803_de_13561/wsgi.py | 3cd1976c5b367e8dc49ef8d3516ab2cc510980f7 | [] | no_license | crowdbotics-apps/msm-sgsjhsjh4803-de-13561 | af6563f775832664041dbd8abc5d05af9d8d4a4f | 9364d828ffee0edfe68d263fce2b0a7cb2949039 | refs/heads/master | 2022-12-29T15:25:29.870944 | 2020-10-19T08:18:12 | 2020-10-19T08:18:12 | 305,263,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for msm_sgsjhsjh4803_de_13561 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'msm_sgsjhsjh4803_de_13561.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4642c3af95c961a08c5a33c1113ba484420e0887 | 5f51709bb1a8fd1e120e1aaaaaf655379d44e0e7 | /venv/bin/flask | 9f21f568c130daf42c7c3b7e8935124c0b5ed910 | [] | no_license | amruthasanthosh0/todomanager | f58d026fd300db759104a01a2a55837f56f05082 | b37c241718ef98bfe19fc6a49d91b8a8f55f121f | refs/heads/main | 2023-06-26T06:07:00.515518 | 2021-07-31T07:35:13 | 2021-07-31T07:35:13 | 391,285,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | #!/home/amrutha/todo/api/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"amrutha123santhosh@gmail.com"
] | amrutha123santhosh@gmail.com | |
423634247b347229c7434b61802502f83d82c1ff | b61163a79208557226f1868478693c314972d436 | /Project_MNIST_Miniflow/OR.py | f58b3e0119db55d28b81dcc783d31b8f207acbc1 | [] | no_license | nonlining/DeepLearningFoundation | e2fc5e5690e29b3ead68ca64d47e9fd8aa293edf | 5614c4fdad194661e7837a4bf109551bf9e8551a | refs/heads/master | 2022-11-12T09:46:33.886504 | 2017-12-12T14:37:59 | 2017-12-12T14:37:59 | 81,570,144 | 0 | 1 | null | 2022-10-26T13:56:56 | 2017-02-10T13:57:18 | HTML | UTF-8 | Python | false | false | 1,473 | py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: MJWang
#
# Created: 02/11/2017
# Copyright: (c) MJWang 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
import numpy as np
from miniflow import *
def main():
X_data = [[0.,0.], [1.,0.] ,[0.,1.] ,[1.,1.]]
X_data = np.array(X_data)
y_label = [[0., 1., 1., 1.]]
y_label = np.array(y_label)
W_layer1 = np.random.randn(2,1)
b_layer1 = np.random.randn(1)
X, y = Input(), Input()
W1, b1 = Input(), Input()
L1 = Linear(X, W1, b1)
S1 = Sigmoid(L1)
cost = MSE(y, S1)
feed_dict = {
X: X_data,
y: y_label,
W1: W_layer1,
b1: b_layer1
}
graph = topological_sort(feed_dict)
trainables = [W1, b1]
epochs = 50000
learning_rate=0.1
for i in range(epochs):
forward_and_backward(graph)
for t in trainables:
partial = t.gradients[t]
t.value -= learning_rate * partial
if i%10000 == 0:
print "epoch",i,"MSE",graph[-1].value
print "W", W1.value
print "B",b1.value[0]
X.value = np.array([[1,1], [1,0], [0,1],[0,0]])
res = predict(graph)
for i in range(X.value.shape[0]):
print X.value[i], "result is ",
print '{:3.1f}'.format(res[i][0])
if __name__ == '__main__':
main() | [
"nonlining@gmail.com"
] | nonlining@gmail.com |
0051e201f83b7918036521db7a336b590e2f1a20 | e7291fab42533b944bf7b97df5f0f57428784c43 | /web2attack/w2a/modules/info/reverse_ip.py | f4a348db5dd907bdba5a2b638795342a1d06f4a5 | [] | no_license | carson0321/Py-Web-vul | 10a60a393b212ba243146af30d852fb7c656dc03 | 4a66a66096b7393aa048b18cf6c1a4737c60c4cd | refs/heads/master | 2022-11-23T01:25:00.958775 | 2017-03-12T13:45:19 | 2017-03-12T13:45:19 | 84,727,875 | 1 | 2 | null | 2020-07-24T07:22:43 | 2017-03-12T13:31:43 | Python | UTF-8 | Python | false | false | 10,563 | py | # modules/reverse_ip.py
#
# Copyright 2012 Kid :">
from w2a.core.templates import Templates
from w2a.lib.net.http import HTTP
from w2a.config import CONFIG
from w2a.lib.thread import Thread
from w2a.lib.dbconnect import IPInSerter, getDomain, getIP
from w2a.lib.file import FullPath, ReadFromFile, AppendFile
from re import findall,search
from urllib.parse import urlencode
from socket import gethostbyname, timeout
class Module(Templates):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
############################
self.version = 1
self.author = [ 'Kid' ]
self.description = 'Get all domain in IP'
self.detailed_description = 'Module dùng để reverse ip từ 1 domain/ip\n'+\
'- Có thể set nhiều domain/ip ngăn cách bằng dấu phẩy\n'+\
'- Option CHECK sẽ kiểm tra kết quả cùng ip với domain/ip nhập vào\n'+\
'- Option THREADS là thread để dùng Option CHECK\n'+\
'- Option RHOSTLIST là reverse ip từ file chứa list domain/ip\n'+\
'nếu không set RHOST thì sẽ get domain/list tù RHOSTLIST\n'
############################
self.options.addString('RHOST', 'IP/Domain to reverse(support : ip1,ip2...)', False)
self.options.addBoolean('CHECK', 'check domain is in this IP ', default = True)
self.options.addInteger('THREADS', 'thread check domain', default = 10)
############################
self.advanced_options.addPath('RHOSTLIST', 'Path to domain list', default = CONFIG.DATA_PATH + '/victim.lst')
self.advanced_options.addPath('OUTPUT', 'Output directory', default = CONFIG.TMP_PATH + '/reverseip/')
############################
self.fmt_string = "Site: {0:<30} {1}"
self.SEARCHERS = [
{
'SITE' : "My-ip-neighbors.com",
'URL' : "http://www.my-ip-neighbors.com/?domain=%s",
'REGEX' : r'<td class="action"\starget="\_blank"><a\shref="http\:\/\/whois\.domaintools\.com\/(.*?)"\starget="\_blank"\sclass="external">Whois<\/a><\/td>',
},
{
'SITE' : "Yougetsignal.com",
'DATA' : 'remoteAddress=%s',
'URL' : "http://www.yougetsignal.com/tools/web-sites-on-web-server/php/get-web-sites-on-web-server-json-data.php",
'REGEX' : r'\["(.*?)",\s"?"\]',
},
# {
# 'SITE' : "Whois.WebHosting.info",
# 'URL' : "http://whois.webhosting.info/%s?pi=%s&ob=SLD&oo=DESC",
# 'SP' : self.Whoiswebhosting,
# },
{
'SITE' : "Ip-adress.com",
'URL' : "http://www.ip-adress.com/reverse_ip/%s",
'REGEX' : r'<td style\=\"font\-size\:8pt\">.\n\[<a href="\/whois\/(.*?)">Whois<\/a>\]',
},
{
'SITE' : "Bing.com",
'URL' : "http://api.search.live.net/xml.aspx?Appid=%s&query=ip:%s&Sources=Web&Version=2.0&Web.Count=50&Web.Offset=%s",
'SP' : self.BingApi,
},
{
'SITE' : "Ewhois.com",
'URL' : "http://www.ewhois.com/",
'SP' : self.eWhois,
},
{
'SITE' : "Sameip.org",
'URL' : "http://sameip.org/ip/%s/",
'REGEX' : r'<a href="http:\/\/.*?" rel=\'nofollow\' title="visit .*?" target="_blank">(.*?)<\/a>',
},
{
'SITE' : "Robtex.com",
'URL' : "http://www.robtex.com/ajax/dns/%s.html",
'REGEX' : r'[host|dns]\.robtex\.com\/(.*?)\.html',
},
{
'SITE' : "Tools.web-max.ca",
'URL' : "http://ip2web.web-max.ca/?byip=1&ip=%s",
'REGEX' : r'<a href="http:\/\/.*?" target="_blank">(.*?)<\/a>',
},
{
'SITE' : "DNStrails.com",
'URL' : "http://www.DNStrails.com/tools/lookup.htm?ip=%s&date=recent",
'REGEX' : r'<a\shref="lookup\.htm\?.*?=(.*?)&date=recent">',
},
{
'SITE' : "Pagesinventory.com",
'URL' : "http://www.pagesinventory.com/ip/%s.html",
'REGEX' : r'<td><a\shref="/domain/.*?\.html">(.*?)</a></td>'
},
{
'SITE' : "ViewDNS.info",
'URL' : "http://viewdns.info/reverseip/?host=%s",
'REGEX' : r'<tr><td>([a-zA-Z0-9\.\-_]{1,50}?\.[a-zA-Z0-9\.\-_]{1,50}?)</td>'
}
]
def run(self, frmwk, args):
self.frmwk = frmwk
hosts = []
hosts = self.options['RHOST'].split(',') if self.options['RHOST'] else ReadFromFile(FullPath(self.advanced_options['HOSTLIST']))
for host in hosts:
if self.worker(host.strip()) and self.advanced_options['OUTPUT']:
output = FullPath(self.advanced_options['OUTPUT'] + '/' + self.ip + '.txt')
AppendFile(output, self.domains)
self.frmwk.print_line()
self.frmwk.print_success('Saved: ' + output)
def worker(self, rhost):
self.domains = []
self.victim = rhost
try:
self.ip = gethostbyname(self.victim)
except:
self.frmwk.print_error('Cann\' get IP Address')
return False
self.domains.append(self.victim)
if self.ip in CONFIG.IP_WHITE_LIST:
self.frmwk.print_error('Site down!')
return False
self.threadlist = []
self.frmwk.print_status("IP : %s" % self.ip)
self.frmwk.print_line("-------------------------------------------")
for searcher in self.SEARCHERS:
thread = Thread(target = self.reverseip, args = (searcher,))
self.threadlist.append(thread)
thread.start()
for thread in self.threadlist:
try:
thread.join(CONFIG.TIME_OUT)
if thread.isAlive():
thread.terminate()
except timeout:
self.frmwk.print_error('Exception Timeout')
pass
self.frmwk.print_line("-------------------------------------------\n")
#import from db
if self.frmwk.dbconnect:
self.frmwk.print_status('Getting subdomain in database')
cursor = self.frmwk.dbconnect.db.cursor()
iprow = getIP(cursor, self.ip)
if iprow:
dmrow = getDomain(cursor, ['domain_name'], {'ip_id_list': '%%!%s|%%' % iprow[0]})
for dm in dmrow:
self.domains.append(dm[0])
cursor.close()
self.domains = sortlistdomain(self.domains)
if self.options['CHECK']:
self.frmwk.print_status('Checking domain\'s in this IP')
checker = checkdomains(self.frmwk, self.ip, self.domains)
checker.checklistdomain(self.options['THREADS'])
self.domains = sorted(list(set(checker.response)))
if self.frmwk.dbconnect and self.options['CHECK']:
self.frmwk.print_status('Saving database!')
self.Saver()
self.frmwk.print_success('List domain:')
self.frmwk.print_line("----------------")
self.frmwk.print_line("\n".join(self.domains))
return True
def reverseip(self, searcher):
try:
if 'SP' not in searcher:
req = HTTP(searcher['URL'])
if 'DATA' in searcher:
data = req.Request(searcher['URL'], 'POST', searcher['DATA'] % self.ip)
else:
data = req.Request(searcher['URL'] % self.ip)
urls = findall(searcher['REGEX'],data)
self.frmwk.print_status(self.fmt_string.format(searcher['SITE'],urls.__len__()))
self.domains += urls
else:
searcher['SP'](searcher)
except Exception as e:
pass
def BingApi(self, searcher):
KEY = "49EB4B94127F7C7836C96DEB3F2CD8A6D12BDB71"
req = HTTP(searcher['URL'])
data = req.Request(searcher['URL'] % (KEY, self.ip, 0))
total = search('<web:Total>([0-9]+)<\/web:Total>',data).group(1)
page = int(int(total)/50 + 1)
for i in range(1, page):
data += req.Request(searcher['URL'] % (KEY, self.ip, i))
result = findall(r'<web:Url>(.+?)<\/web:Url>',data)
urls = []
for url in result:
urls.append(url.split('/',3)[2])
self.frmwk.print_status(self.fmt_string.format(searcher['SITE'],urls.__len__()))
self.domains += urls
def eWhois(self, searcher):
params = urlencode({'_method':'POST','data[User][email]':'r12xr00tu@gmail.com','data[User][password]':'RitX:::R1tX','data[User][remember_me]':'0'})
req = HTTP("http://www.ewhois.com/")
req.storecookie = True
req.rand_useragent = False
data = req.Request('http://www.ewhois.com/login/', 'POST', params)
data = req.Request("http://www.ewhois.com/export/ip-address/%s/" % self.ip)
urls = findall(r'"(.*?)","","","[UA\-[0-9]+\-[0-9]+|]",""',data)
self.frmwk.print_status(self.fmt_string.format(searcher['SITE'],urls.__len__()))
self.domains += urls
def Whoiswebhosting(self, searcher):
req = HTTP(searcher['URL'])
urls = []
data = req.Request(searcher['URL'] % (self.ip,1))
last = search(r'\?pi=([0-9]+)\&ob=SLD\&oo=DESC">\ \;\ \;Last\ \;>\>\;<\/a>', data)
url = findall(r'<td><a href="http:\/\/whois\.webhosting\.info\/.*?\.">(.*?)\.<\/a><\/td>', data)
urls += url
if last:
page = last.group(1)
for i in range(2,int(page)):
data = req.Request(searcher['URL'] % (self.ip,i))
if search('The security key helps us prevent automated searches', data):
break
url = findall(r'<td><a href="http:\/\/whois\.webhosting\.info\/.*?\.">(.*?)\.<\/a><\/td>', data)
urls += url
self.frmwk.print_status(self.fmt_string.format(searcher['SITE'],urls.__len__()))
self.domains += urls
else:
self.frmwk.print_status(self.fmt_string.format(searcher['SITE'],urls.__len__()))
self.domains += urls
def Saver(self):
listip = {self.ip : self.domains}
info = []
for ip in listip.keys():
ipinfo = {}
ipinfo['ip'] = ip
dminfo = []
for dm in listip[ip]:
dmi = {}
dmi['domain_name'] = dm
dminfo.append(dmi)
ipinfo['domains'] = dminfo
info.append(ipinfo)
IPInSerter(self.frmwk.dbconnect.db, info)
##################################
def realdomain(d):
if search('([0-9]*?)\.([0-9]*?)\.([0-9]*?)\.([0-9]*?)', d):
return False
return d.lower().replace('www.', '')
def sortlistdomain(domains):
result = []
for domain in domains:
domain = realdomain(domain)
if domain:
result.append(domain)
return sorted(list(set(result)))
##################################
class checkdomains:
def __init__(self,frmwk , ip, domains):
self.frmwk = frmwk
self.ip = ip
self.domains = domains
self.dmslen = len(domains)
self.response = []
self.threadlist = []
def checklistdomain(self,threads): # threading to check all domain
for i in range(threads):
thread = Thread(target = self.checkdomain, args = ())
self.threadlist.append(thread)
thread.start()
for thread in self.threadlist:
thread.join()
self.frmwk.print_line('')
def checkdomain(self): #check domain is true
while len(self.domains) > 0: #loop check if have domain in list checking
domain = self.domains.pop(0)
dip = ''
try:
dip = gethostbyname(domain)
except:
try:
dip = gethostbyname('www.' + domain)
except:
pass
if dip == self.ip:
self.response.append(domain)
percent = 100 - int((len(self.domains)*100)/self.dmslen)
self.frmwk.print_process(percent) | [
"carson.wang@droi.com"
] | carson.wang@droi.com |
63bd2a6cd2928deb0aa5f613631c38f4e76fbd4f | 5be5d40aef67d9aac7dd4705e537af64d2d9ef40 | /Ch7/restaurant_seating.py | 4b61d89b10545f180e23b024e27b43b18079ae7d | [] | no_license | ajfm88/PythonCrashCourse | 389ed422943d5caded10300b13fae5e4778f477c | e78a139edddd687257afc5ae7fd2b606d9568674 | refs/heads/master | 2023-06-16T16:13:46.296824 | 2021-07-07T06:59:36 | 2021-07-07T06:59:36 | 333,620,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | #Write a program that asks the user how many people are in their dinner group.
#If the answer is more than eight, print a message saying they'll have to wait for a table.
#Otherwise, report that their table is ready.
guests = input("How many people are there in your dinner group?: ")
guests = int(guests)
if guests > 8:
print(f"You have {guests} people in your party. You'll have to wait for a table.")
else:
print(f"You have {guests} people in your party. Your table is ready.") | [
"ajfoucaultmo@mail.usf.edu"
] | ajfoucaultmo@mail.usf.edu |
670480ff5dfa0c5afbda89a5114d85d8cdcdb17d | 998e595768b290c06823278bd9fd9f33f569bcef | /multilang/resources/test.py | abc72a44b7a3354d4a03b4dfbc0ead4ae7c46dfd | [] | no_license | devbas/realtime-analytics-public-transport | bfdd8e5531a1ca8d15cf9029f511901998eeab77 | 1bb05f9243ced39ca4e6a798758b2d441a339424 | refs/heads/master | 2021-02-26T11:33:28.620428 | 2020-03-06T21:44:05 | 2020-03-06T21:44:05 | 245,521,710 | 0 | 0 | null | 2020-03-06T21:44:06 | 2020-03-06T21:42:04 | Python | UTF-8 | Python | false | false | 89 | py | #!/usr/bin/env python
import sys
while True:
line = sys.stdin.readline()
print(line) | [
"bastiangeneugelijk@me.com"
] | bastiangeneugelijk@me.com |
f4f0aeaaed3e82afde0e0b60f90c9241c94be569 | ec66135cd72a7a7dc115a48402e71f31fde54330 | /database.py | 9208a9f4e5efe41f403e8568f4d8eeebad55edd7 | [] | no_license | ghiffaryr/Scraper-Berita | 1efa8c231e2dc2843f96835166d84a886b383d99 | f222c9f1bc30d89b2f36bb905a0c56fa56a73b8b | refs/heads/main | 2023-05-22T00:29:29.378378 | 2021-06-09T15:52:25 | 2021-06-09T15:52:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | import sqlalchemy
import os
def init_connection_engine():
db_config = {
# [START cloud_sql_mysql_sqlalchemy_limit]
# Pool size is the maximum number of permanent connections to keep.
"pool_size": 5,
# Temporarily exceeds the set pool_size if no connections are available.
"max_overflow": 2,
# The total number of concurrent connections for your application will be
# a total of pool_size and max_overflow.
# [END cloud_sql_mysql_sqlalchemy_limit]
# [START cloud_sql_mysql_sqlalchemy_backoff]
# SQLAlchemy automatically uses delays between failed connection attempts,
# but provides no arguments for configuration.
# [END cloud_sql_mysql_sqlalchemy_backoff]
# [START cloud_sql_mysql_sqlalchemy_timeout]
# 'pool_timeout' is the maximum number of seconds to wait when retrieving a
# new connection from the pool. After the specified amount of time, an
# exception will be thrown.
"pool_timeout": 30, # 30 seconds
# [END cloud_sql_mysql_sqlalchemy_timeout]
# [START cloud_sql_mysql_sqlalchemy_lifetime]
# 'pool_recycle' is the maximum number of seconds a connection can persist.
# Connections that live longer than the specified amount of time will be
# reestablished
"pool_recycle": 1800, # 30 minutes
# [END cloud_sql_mysql_sqlalchemy_lifetime]
}
if os.environ.get("DB_HOST"):
return init_tcp_connection_engine(db_config)
else:
return init_unix_connection_engine(db_config)
def init_unix_connection_engine(db_config):
# [START cloud_sql_mysql_sqlalchemy_create_socket]
# Remember - storing secrets in plaintext is potentially unsafe. Consider using
# something like https://cloud.google.com/secret-manager/docs/overview to help keep
# secrets secret.
db_user = os.environ["DB_USER"]
db_pass = os.environ["DB_PASS"]
db_name = os.environ["DB_NAME"]
db_socket_dir = os.environ.get("DB_SOCKET_DIR", "/cloudsql")
cloud_sql_connection_name = os.environ["CLOUD_SQL_CONNECTION_NAME"]
pool = sqlalchemy.create_engine(
# Equivalent URL:
# mysql+pymysql://<db_user>:<db_pass>@/<db_name>?unix_socket=<socket_path>/<cloud_sql_instance_name>
sqlalchemy.engine.url.URL.create(
drivername="mysql+pymysql",
username=db_user, # e.g. "my-database-user"
password=db_pass, # e.g. "my-database-password"
database=db_name, # e.g. "my-database-name"
query={
"unix_socket": "{}/{}".format(
db_socket_dir, # e.g. "/cloudsql"
cloud_sql_connection_name) # i.e "<PROJECT-NAME>:<INSTANCE-REGION>:<INSTANCE-NAME>"
}
),
**db_config
)
# [END cloud_sql_mysql_sqlalchemy_create_socket]
return pool
| [
"isamujahid.im@gmail.com"
] | isamujahid.im@gmail.com |
a663a571c791506a5bbea2e874df529dbed68ebb | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1907/py02/day01/cut_log.py | cceaf977d83d75e82696a61778603e0948c24313 | [] | no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 525 | py | import time
t9 = time.strptime('2019-05-15 09:00:00', '%Y-%m-%d %H:%M:%S')
t12 = time.strptime('2019-05-15 12:00:00', '%Y-%m-%d %H:%M:%S')
with open('mylog.txt') as fobj:
for line in fobj:
t = time.strptime(line[:19], '%Y-%m-%d %H:%M:%S')
if t > t12:
break
if t >= t9:
print(line, end='')
# with open('mylog.txt') as fobj:
# for line in fobj:
# t = time.strptime(line[:19], '%Y-%m-%d %H:%M:%S')
# if t9 <= t <= t12:
# print(line, end='')
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
1586616caf1191874f3dfdf0a908af9d390cbd3e | 54eeab2befaa4bf0d96a7bd18110900f8f32c766 | /other/sql/sqlite.py | cc06497fe5586ae73d672cbedf67aa19174a1c04 | [] | no_license | w8833531/mypython | 40239ada90426db73444ee54e6e79decc6c9fc9b | 45ed12a611efd33838766e7bd73840e6d8b73e28 | refs/heads/master | 2021-01-19T06:59:09.790525 | 2017-10-18T06:20:43 | 2017-10-18T06:20:43 | 87,513,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#由于SQLite的驱动内置在Python标准库中,所以我们可以直接来操作SQLite数据库。
#要操作关系数据库,首先需要连接到数据库,一个数据库连接称为Connection;
#连接到数据库后,需要打开游标,称之为Cursor,通过Cursor执行SQL语句,然后,获得执行结果。
#导入SQLite 驱动
import sqlite3
try:
# 连接到SQLite数据库
# 数据库文件是test.db
# 如果文件不存在,会自动在当前目录创建:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
# cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
cursor.execute('insert into user (id, name) values(\'3\', \'Wu\')')
print cursor.rowcount
except sqlite3.Error as e:
print e
finally:
cursor.close()
conn.commit()
conn.close()
#在Python中操作数据库时,要先导入数据库对应的驱动,然后,通过Connection对象和Cursor对象操作数据。
#要确保打开的Connection对象和Cursor对象都正确地被关闭,否则,资源就会泄露。
try:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
cursor.execute('select * from user')
values = cursor.fetchall()
print values
except sqlite3.Error as e:
print e
finally:
cursor.close()
conn.close()
| [
"w8833531@hotmail.com"
] | w8833531@hotmail.com |
7394010400225008bcf0ebefdea0242ca3765d3e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_96/1509.py | 985390ba9c8945569ce4096912a9a40962d7ecaf | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | from string import split
f1=open('B-large.in','r')
f2=open('out.txt','w')
t=int(f1.readline())
for i in range (t):
k=0
s=f1.readline()
data=list(map(int,s.split(' ')))
u=data[1]+0
for j in range(data[0]):
if data[j+3]==0 or data[j+3]==1:
if data[j+3]>=data[2]:
k+=1
elif data[1]==0:
if data[j+3] % 3==0 and data[j+3]//3>=data[2]:
k+=1
elif data[j+3]%3!=0 and data[j+3]//3+1>=data[2]:
k+=1
else:
if data[j+3]%3==1 and data[j+3]//3+1>=data[2]:
k+=1
elif data[j+3]%3==0 and data[j+3]//3+1==data[2] and u!=0:
u-=1
k+=1
elif data[j+3]%3==0 and data[j+3]//3>=data[2]:
k+=1
elif data[j+3]%3==2 and data[j+3]//3+2==data[2] and u!=0:
u-=1
k+=1
elif data[j+3]%3==2 and data[j+3]//3+1>=data[2]:
k+=1
f2.write ("Case #"+str(i+1)+": "+str(k)+"\n")
f1.close()
f2.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
873e739ab0eea0d3486e0e87c7319848bca001ad | 9cd923d5c0c3c8252f27ff21b84433aa7e09b60e | /user/tests.py | 6e1933ff6dee3f92ba34ba6cd8c91f6afb254329 | [] | no_license | Muxazuxa/freelance | 45d66708da44a2c20385502f9f0f00b0f830bfcf | b18e8052f818a1c05734116186b5724a4d7d6b25 | refs/heads/master | 2022-06-17T02:09:42.717739 | 2019-07-28T16:44:59 | 2019-07-28T16:44:59 | 190,109,963 | 0 | 0 | null | 2022-05-25T02:27:57 | 2019-06-04T01:49:19 | Python | UTF-8 | Python | false | false | 1,062 | py | from django.test import TestCase
from django.shortcuts import reverse
from django.test import Client
class UserProfileTest(TestCase):
def setUp(self):
self.client = Client()
self.client.post(reverse('users:list'), {'username': 'test', 'email': 'test@mail.ru', 'password': '12345'})
self.client.login(username='test', password='12345')
def test_user_login(self):
response = self.client.post('/api-auth/login/?next=/tasks/', {'username': 'test', 'password': '12345'})
self.assertEqual(response.status_code, 302)
def test_user_list(self):
response = self.client.get(reverse('users:list'))
self.assertEqual(response.status_code, 200)
def test_user_not_exist(self):
response = self.client.get('/users/10000')
self.assertEqual(response.status_code, 404)
def test_user_username_unique(self):
response = self.client.post(reverse('users:list'), {'username': 'test', 'email': 'test@mail.ru', 'password': '12345'})
self.assertEqual(response.status_code, 400)
| [
"muxazuxa@mail.ru"
] | muxazuxa@mail.ru |
837d8f52574c6bab972f540869f2bca52b2bf000 | 94c8dd4126da6e9fe9acb2d1769e1c24abe195d3 | /qiskit/circuit/library/boolean_logic/quantum_or.py | 0864affb7958edffe5050f3c8d54af82bdc515be | [
"Apache-2.0"
] | permissive | levbishop/qiskit-terra | a75c2f96586768c12b51a117f9ccb7398b52843d | 98130dd6158d1f1474e44dd5aeacbc619174ad63 | refs/heads/master | 2023-07-19T19:00:53.483204 | 2021-04-20T16:30:16 | 2021-04-20T16:30:16 | 181,052,828 | 1 | 0 | Apache-2.0 | 2019-06-05T15:32:13 | 2019-04-12T17:20:54 | Python | UTF-8 | Python | false | false | 3,664 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Implementations of boolean logic quantum circuits."""
from typing import List, Optional
from qiskit.circuit import QuantumRegister, QuantumCircuit
from qiskit.circuit.library.standard_gates import MCXGate
class OR(QuantumCircuit):
r"""A circuit implementing the logical OR operation on a number of qubits.
For the OR operation the state :math:`|1\rangle` is interpreted as ``True``. The result
qubit is flipped, if the state of any variable qubit is ``True``. The OR is implemented using
a multi-open-controlled X gate (i.e. flips if the state is :math:`|0\rangle`) and
applying an X gate on the result qubit.
Using a list of flags, qubits can be skipped or negated.
The OR gate without special flags:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5)
%circuit_library_info circuit
Using flags we can negate qubits or skip them. For instance, if we have 5 qubits and want to
return ``True`` if the first qubit is ``False`` or one of the last two are ``True`` we use the
flags ``[-1, 0, 0, 1, 1]``.
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5, flags=[-1, 0, 0, 1, 1])
%circuit_library_info circuit
"""
def __init__(self, num_variable_qubits: int, flags: Optional[List[int]] = None,
mcx_mode: str = 'noancilla') -> None:
"""Create a new logical OR circuit.
Args:
num_variable_qubits: The qubits of which the OR is computed. The result will be written
into an additional result qubit.
flags: A list of +1/0/-1 marking negations or omissions of qubits.
mcx_mode: The mode to be used to implement the multi-controlled X gate.
"""
# store num_variables_qubits and flags
self.num_variable_qubits = num_variable_qubits
self.flags = flags
# add registers
qr_variable = QuantumRegister(num_variable_qubits, name='variable')
qr_result = QuantumRegister(1, name='result')
super().__init__(qr_variable, qr_result, name='or')
# determine the control qubits: all that have a nonzero flag
flags = flags or [1] * num_variable_qubits
control_qubits = [q for q, flag in zip(qr_variable, flags) if flag != 0]
# determine the qubits that need to be flipped (if a flag is > 0)
flip_qubits = [q for q, flag in zip(qr_variable, flags) if flag > 0]
# determine the number of ancillas
self.num_ancilla_qubits = MCXGate.get_num_ancilla_qubits(len(control_qubits), mode=mcx_mode)
if self.num_ancilla_qubits > 0:
qr_ancilla = QuantumRegister(self.num_ancilla_qubits, 'ancilla')
self.add_register(qr_ancilla)
else:
qr_ancilla = []
self.x(qr_result)
if len(flip_qubits) > 0:
self.x(flip_qubits)
self.mcx(control_qubits, qr_result[:], qr_ancilla[:], mode=mcx_mode)
if len(flip_qubits) > 0:
self.x(flip_qubits)
| [
"noreply@github.com"
] | noreply@github.com |
92863297ffa311b51dc9f97b666ff2df19874bba | 62f10bb996b14777001da4bd3f27428e0d0010d8 | /Flask_blog/flaskblog/__init__.py | 9f61ec9055774dc52d7552dfa913e7a02cd79949 | [] | no_license | Hardik-Ghori/Blog-using-Flask | d3ba0022ad79bd16d93af2cff665a086b62dbe3d | 9f0b87beffcd09c8e1a65ae1917f5f1c432bbb4c | refs/heads/main | 2023-03-11T02:51:46.440657 | 2021-03-02T17:05:26 | 2021-03-02T17:05:26 | 343,848,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
app = Flask(__name__)
app.config['SECRET_KEY'] = 'fe65090ff8a3280dec4e4f0010526fa6'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
from flaskblog import routes | [
"noreply@github.com"
] | noreply@github.com |
fb2c1991f3e1461bc18cd5e916c0b0efc55a91b4 | b50a760fa751b03afc9fcf7a938fc91a90b229c7 | /sensor/bin/surfids-check.py | e92b2ab8178c476bd3a66a5091c56d5811fc58ff | [] | no_license | cyberintelframework/sensor | 9e81fc7dea378c4be978689a758845c1191a5024 | b6ef69fe2bf4f4a26af505cdaba579539f2dffee | refs/heads/master | 2021-01-01T19:33:42.845958 | 2012-10-18T14:46:18 | 2012-10-18T14:46:18 | 38,752,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | #!/usr/bin/env python
import logging
import os
from sensor import altlog
from sensor import functions
from sensor import locations
cmd = "ifconfig -a | grep ^br | wc -l"
chk = os.popen(cmd).readline().rstrip()
if chk == "0":
logging.debug("Tunnel status: disabled")
else:
logging.debug("Tunnel status: enabled")
if os.path.exists(locations.OPENVPNPID):
pid = open(locations.OPENVPNPID).read().rstrip()
if pid.isdigit():
pid = int(pid)
if functions.checkPid(pid):
logging.debug("Tunnel (%s) status OK" % str(pid))
else:
# kill manager
if os.path.exists(locations.MANAGERPID):
mpid = open(locations.MANAGERPID).read().rstrip()
if mpid.isdigit():
functions.sensorDown()
mpid = int(mpid)
logging.info("Tunnel down, killing manager %s (1)" % str(mpid))
os.kill(mpid, 15)
else:
logging.debug("No manager pid file found")
else:
# kill manager
if os.path.exists(locations.MANAGERPID):
mpid = open(locations.MANAGERPID).read().rstrip().int()
if mpid.isdigit():
functions.sensorDown()
mpid = int(mpid)
logging.info("Tunnel down, killing manager %s (2)" % str(mpid))
os.kill(mpid, 15)
else:
logging.debug("No manager pid file found")
| [
"github@nabber.nl"
] | github@nabber.nl |
3a7a4e0fc74d98a3d4bb90e7220f2bca91eaa4d0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/97/usersdata/239/54617/submittedfiles/lecker.py | e3ba12e1ff6dc558621a8f2f17a217e1787cc426 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # -*- coding: utf-8 -*-
from __future__ import division
def lecker (lista):
cont=0
for i in range (0,len(lista),1
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
elif i==(len(lista)-1):
if lista[i]>lista[i-1]:
cont=cont+1
else:
if lista[i]>lista[i-1]:
if lista[i]>lista[i+1]:
cont=cont+1
if cont==1:
return True
else:
return False
n=int(input("Digite a quantidade de elementos da lista:"))
a=[]
for i in range (0,n,1):
valor=int(input("Digite o valor:"))
a.append(valor)
b=[]
for i in range (0,n,1):
valor=int(input("Digite o valor:"))
b.append(valor)
if lecker (a):
print("S")
else:
print("N")
if lecker (b):
print("S")
else:
print("N")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c4f8406bcf2bcb16312ae99e077dfae8f7b74118 | c16fb74fd2fd69d65cd813a3d23d5e7b61f9808f | /xueqiu/scrape_callback2_p3.py | 68dc6122a14157b3fe762554b9739ee400a06198 | [] | no_license | nightqiuhua/selenium_crawler_xueqiu | 9d3f9d10b2fdb5a479269e6d14cc52c97945ec31 | 0c68eeed7033c28f81def5f94351f2fbb42ca079 | refs/heads/master | 2020-03-20T17:54:53.943464 | 2018-06-16T13:36:45 | 2018-06-16T13:36:45 | 137,568,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import re
import pymongo
from pymongo import MongoClient,errors
class Scrape_Callback:
def __init__(self):
self.db = pymongo.MongoClient("localhost", 27017).cache
def __call__(self,html):
#html = Downloader(url).decode('utf-8')
data_regx = re.compile('<pre style="word-wrap: break-word; white-space: pre-wrap;">(.*?)</pre>',re.IGNORECASE)
data = data_regx.findall(html)
data_dict = eval(data[0])
for item in data_dict['stocks']:
item['_id'] = item['symbol']
try:
self.db.stocks.insert(item)
except errors.DuplicateKeyError as e:
pass
| [
"208408@whut.edu.cn"
] | 208408@whut.edu.cn |
b041c4045da2e483c82f6f9891353b1cf13e7757 | 344d025661d615a7740adf0482df5b66545302d2 | /NLG/table/IO.py | 1d757ca452586182c366042d454725c898f2b718 | [] | no_license | Lyz1213/msc_dissertation | 641806eda03cc0b943553c5709ced57a238b7c0d | 7f109bcf3cc17d2c669e18fb2ad3357aa4e0fc2e | refs/heads/main | 2023-07-03T19:59:19.893076 | 2021-08-16T10:22:16 | 2021-08-16T10:22:16 | 396,736,327 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,153 | py | import torch
import torchtext.data
import torchtext.vocab
from collections import Counter, defaultdict
from itertools import chain
from table.Tokenize import SrcVocab
PAD_WORD = '[PAD]'
PAD = 0
BOS_WORD = '<s>'
BOS = 1
EOS_WORD = '</s>'
EOS = 2
SKP_WORD = '<sk>'
SKP = 3
UNK_WORD = '[UNK]'
UNK = 4
special_token_list = [PAD_WORD, BOS_WORD,
EOS_WORD, SKP_WORD, UNK_WORD]
tgt_not_copy_list = ['{hlist', '{hvalue', '{hcount', '{hgreater', '{hequal', '{hless', '{his','{hmax','{hmin', 'o', 'p', 'v', 't', 's', '?x0', '?x1', '$y0', '$y1', '}', 'type', 'location', 'contains', 'starts_with', 'ov', 'sv','label']
def load_template(path):
templates = []
with open(path) as fread:
lines = fread.readlines()
for line in lines:
templates.append(line.strip())
return templates
def get_lay_skip(src):
non_skip = ['s', 'p', 'o', 't', '}', 'v', 'ov', 'sv']
lay_skip = []
tgt_ = []
for i in range(len(src)):
if not src[i].startswith('{h') and src[i] not in non_skip:
lay_skip.append('<sk>')
tgt_.append(src[i])
else:
if i > 1:
if src[i - 1] in non_skip and src[i - 2] == 'starts_with':
lay_skip.append('<sk>')
tgt_.append(src[i])
else:
lay_skip.append(src[i])
tgt_.append(PAD_WORD)
else:
lay_skip.append(src[i])
tgt_.append(PAD_WORD)
#print("lay_skip is {}".format(lay_skip))
return lay_skip, tgt_
def list2str(alist, type):
#print('alist is ', alist)
alist_list = []
alist = str(alist).strip().replace("}",", }'").replace('"','').split("', ")
if type == 'lay':
for term in alist:
term = term.replace("'","")
if term.startswith("sv: {") or term.startswith("ov: {"):
#if term.startswith("?x0: {") or term.startswith("?x1: {") or term.startswith("$y0: {") or term.startswith("$y1: {"):
term = term.split(': ')
alist_list.append(term[0].strip())
alist_list.append(term[1].strip()+ term[2].strip())
elif term == "}":
alist_list.append(term)
else:
alist_list.append(term.replace(': ', ''))
else:
for term in alist:
term = term.replace("'","")
if term.strip().startswith("{h"):
alist_list.append(term.replace(": ",""))
#elif term.startswith("?x") or term.startswith("$y"):
elif term.startswith("ov") or term.startswith("sv"):
newterm = term.strip().split(': ')
if len(newterm) == 3:
alist_list.append(newterm[0].strip())
alist_list.append(newterm[1].strip() + newterm[2].strip())
elif len(newterm) == 2:
alist_list.append(newterm[0])
alist_list.append(newterm[1])
else:
print("something went wrong")
elif term == "}":
alist_list.append(term)
else:
term = term.strip().split(": ")
if len(term) < 2:
return None
alist_list.append(term[0].strip())
if term[1] == '?x0,?x1':
alist_list.append("?x0")
alist_list.append("?x1")
else:
words = term[1].strip().split(" ")
for word in words:
alist_list.append(word)
#print('alist_list is ',alist_list)
return alist_list
def get_parent_index(tk_list):
stack = [0]
r_list = []
for i, tk in enumerate(tk_list):
r_list.append(stack[-1])
if tk.startswith('{'):
# +1: because the parent of the top level is 0
stack.append(i+1)
elif tk =='}':
stack.pop()
# for EOS (</s>)
r_list.append(0)
return r_list
def get_lay_index(lay_skip, data):
length = len(data['lay'])
# with a <s> token at the first position
r_list = [0]
k = 0
for tk in lay_skip:
if tk in (SKP_WORD, ):
r_list.append(0)
else:
r_list.append(k)
if k == length:
#print('alist is {} \n tgt_ is {} \n length is {} \n lay is {} \n lay_skip is {} \n rlist {} \n'.format(data['alist'], data['tgt_'], length, data['lay'], lay_skip, r_list))
return None
k += 1
return r_list
def get_lay_index_(lay_skip):
r_list = [0]
k = 0
for tk in lay_skip:
if tk in (SKP_WORD,):
r_list.append(0)
else:
r_list.append(k)
k+=1
return r_list
def get_tgt_loss(line, mask_target_loss):
r_list = []
for tk_tgt, tk_lay_skip in zip(line['tgt_'], line['lay_skip']):
if tk_lay_skip in (SKP_WORD,):
r_list.append(tk_tgt)
else:
if mask_target_loss:
r_list.append(PAD_WORD)
else:
r_list.append(tk_tgt)
return r_list
def get_tgt_mask(lay_skip):
# 0: use layout encoding vectors; 1: use target word embeddings;
# with a <s> token at the first position
return [1] + [1 if tk in (SKP_WORD,) else 0 for tk in lay_skip]
def get_tgt_not_copy(src, tgt):
mask_list = []
src_set = set(src)
for tk_tgt in tgt:
if tk_tgt in src_set:
mask_list.append(UNK_WORD)
else:
if tk_tgt not in tgt_not_copy_list:
#print('tk_tgt is {}\n src is {}\ntgt is {}\n'.format(tk_tgt, src, tgt))
return None
mask_list.append(tk_tgt)
return mask_list
def get_copy_ext_wordpiece(src, wordpiece_index):
#print('src is {} wordpiece is {}'.format(src, wordpiece_index))
i = 0
paded_src = []
for wordpiece in wordpiece_index:
if wordpiece:
paded_src.append(PAD_WORD)
pass
else:
paded_src.append(src[i])
i+=1
return paded_src
def create_lay_alist(alist):
value2key = {}
lay_alist = {}
_alist = {}
for key, value in alist.items():
if isinstance(value, str):
if key != 'h' and key != 'v':
if key.startswith("?") or key.startswith("$"):
# new_key = str(value2key[key] + 'v')
# lay_alist[new_key] = str(len(value.strip().split(' ')))
lay_alist[key] = str(len(value.strip().split(' ')))
else:
if alist[key].__contains__('?') or alist[key].__contains__('$'):
value2key[value] = key
lay_alist[key] = str(len(value.strip().split(' ')))
_alist[key] = alist[key]
elif key == 'v':
lay_alist[key] = str(len(value.strip().split(',')))
_alist[key] = alist[key]
else:
lay_alist[key] = alist[key]
_alist[key] = alist[key]
else:
if key in value2key:
new_key = str(value2key[key]) + 'v'
lay_alist[new_key], _alist[new_key] = create_lay_alist(alist[key])
# print('alist is {}'.format(alist))
# print('lay_alist is ', lay_alist)
# print('new_alist is ', _alist)
return lay_alist, _alist
def get_bart(vocab, src, tgt_list):
tgt = ' '.join(tgt_list)
bart_src = vocab.tokenizer(tgt)['input_ids']
bart_dec_inp = vocab.tokenizer(src)['input_ids']
#print('src {} dec {} other {}'.format(bart_src, bart_dec_inp, vocab.tokenizer(tgt_list)['input_ids']))
return bart_src, bart_dec_inp
def modify_data(data, bart_vocab):
if data['alist'].__contains__('[') or data['alist'] == '{}':
return None
else:
if data['sparql'] != 'lol':
data['src_'] = data.pop('query').lower().replace("{","").replace("?", "").replace("}","").replace("(max","").replace("(","").replace("(min","").replace(",", "").replace(")","").replace("<", "").replace(">","").replace("'s","").replace('"','').replace("'","").strip()
data['alist'] = data['alist'].lower()
else:
data['src_'] = data.pop('query').strip()
data['src'] = data['src_'].split(' ')
data['src'] = [token for token in data['src'] if token != ""]
lay_alist, alist = create_lay_alist(eval(data['alist']))
data['tgt_'] = list2str(alist, 'tgt')
data['lay'] = list2str(lay_alist, 'lay')
if data['tgt_'] == None or data['lay'] == None:
return None
data['lay_skip'], data['tgt'] = get_lay_skip(data['tgt_'])
data['lay_parent_index'] = get_parent_index(data['lay'])
data['tgt_parent_index'] = get_parent_index(data['tgt_'])
#data['lay_index'] = get_lay_index(data['lay_skip'], data)
data['tgt_not_copy'] = get_tgt_not_copy(data['src'],data['tgt_'])
data['bart_src'],data['bart_dec_inp'] = get_bart(bart_vocab,data['src_'], data['tgt_'])
data['tgt'] = bart_vocab.tokenizer.convert_ids_to_tokens(data['bart_dec_inp'])
data['attention_mask'] = [1 for token in data['bart_src']]
data['tgt_mask'] = [1 for token in data['bart_dec_inp']]
if data['tgt_not_copy'] == None:
return None
#print('lay_skip is {}\n tgt_mask is {}\ntgt_loss is {} \n tgt is {} \n lay_index{} \n*********'.format(data['lay_skip'], data['tgt_mask'], data['tgt_'], data['tgt'], data['lay_index']))
return data
def read_txt(path):
datas = []
bart_vocab = SrcVocab()
with open(path) as f:
lines = f.readlines()
data = {}
for line in lines:
if line is not None:
line = line.strip()
if line.startswith('Q'):
data['query'] = line.split('Q:')[1].strip()
elif line.startswith('SPARQL'):
data['sparql'] = line.split('SPARQL:')[1].strip().lower()
elif line.startswith('ALIST'):
data['alist'] = line.split('ALIST:')[1].strip()
elif line.startswith('TEMPLATE:'):
data['template'] = line.split('TEMPLATE: ')[1].strip()
if 'query' in data and 'sparql' in data and 'alist' in data and 'template' in data:
data = modify_data(data, bart_vocab)
if data is not None:
datas.append(data)
data = {}
return datas
def __getstate__(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def __setstate__(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
torchtext.vocab.Vocab.__getstate__ = __getstate__
torchtext.vocab.Vocab.__setstate__ = __setstate__
def filter_counter(freqs, min_freq):
cnt = Counter()
for k, v in freqs.items():
if (min_freq is None) or (v >= min_freq):
cnt[k] = v
return cnt
def merge_vocabs(vocabs, min_freq=0, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = Counter()
for vocab in vocabs:
merged += filter_counter(vocab.freqs, min_freq)
return torchtext.vocab.Vocab(merged,
specials=list(special_token_list),
max_size=vocab_size, min_freq=min_freq)
def _tgt_copy_ext(line):
r_list = []
mask_list = []
src_set = set(line['src'])
for tk_tgt in line['tgt_']:
if tk_tgt in src_set:
r_list.append(tk_tgt)
else:
r_list.append(UNK_WORD)
return r_list
def _tgt_not_copy(line):
mask_list = []
src_set = set(line['src'])
for tk_tgt in line['tgt_']:
if tk_tgt in src_set:
mask_list.append(UNK_WORD)
else:
if tk_tgt not in tgt_not_copy_list:
print('tk_tgt is {}, src is {}, tgt is {}'.format(tk_tgt, line['src'], line['tgt_']))
mask_list.append(tk_tgt)
return mask_list
def join_dicts(*args):
"""
args: dictionaries with disjoint keys
returns: a single dictionary that has the union of these keys
"""
return dict(chain(*[d.items() for d in args]))
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
self.batches = torchtext.data.pool(
self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
class TableDataset(torchtext.data.Dataset):
"""Defines a dataset for machine translation."""
@staticmethod
def sort_key(ex):
"Sort in reverse size order"
return -len(ex.bart_src)
def __init__(self, path, fields, opt, **kwargs):
"""
Create a TranslationDataset given paths and fields.
anno: location of annotated data / js_list
filter_ex: False - keep all the examples for evaluation (should not have filtered examples); True - filter examples with unmatched spans;
"""
if isinstance(path, str):
datas = read_txt(path)
else:
datas = path
bart_src_data = self._read_annotated_file(datas, 'bart_src')
bart_src_examples = self._construct_examples(bart_src_data, 'bart_src')
bart_dec_inp_data = self._read_annotated_file(datas, 'bart_dec_inp')
bart_dec_inp_examples = self._construct_examples(bart_dec_inp_data, 'bart_dec_inp')
attention_mask_data = self._read_annotated_file(datas, 'attention_mask')
attention_mask_examples = self._construct_examples(attention_mask_data, 'attention_mask')
# tgt_loss_data = self._read_annotated_file(
# opt, datas, 'tgt_loss', filter_ex)
# tgt_loss_examples = self._construct_examples(tgt_loss_data, 'tgt_loss')
#
# tgt_loss_masked_data = self._read_annotated_file(
# opt, js_list, 'tgt_loss_masked', filter_ex)
# tgt_loss_masked_examples = self._construct_examples(
# tgt_loss_masked_data, 'tgt_loss_masked')
# examples: one for each src line or (src, tgt) line pair.
examples = [join_dicts(*it) for it in
zip(bart_src_examples, bart_dec_inp_examples, attention_mask_examples)]
# the examples should not contain None
len_before_filter = len(examples)
examples = list(filter(lambda x: all(
(v is not None for k, v in x.items())), examples))
len_after_filter = len(examples)
num_filter = len_before_filter - len_after_filter
#print("examples is ", [[example['bart_src'], example['bart_dec_inp']]for example in examples])
#print('exmaples is ', examples)
# Peek at the first to see which fields are used.
ex = examples[0]
keys = ex.keys()
fields = [(k, fields[k])
for k in (list(keys) + ["indices"])]
def construct_final(examples):
for i, ex in enumerate(examples):
s = torchtext.data.Example.fromlist(
[ex[k] for k in keys] + [i],
fields)
#print("exmaple is {}, preprocessed is {}".format(examples[i]['tgt_loss'], s.tgt_loss))
yield torchtext.data.Example.fromlist(
[ex[k] for k in keys] + [i],
fields)
def filter_pred(example):
return True
super(TableDataset, self).__init__(
construct_final(examples), fields, filter_pred)
def _read_annotated_file(self, data_list, field):
"""
path: location of a src or tgt file
truncate: maximum sequence length (0 for unlimited)
"""
if field in ('copy_to_tgt','copy_to_ext'):
lines = (line['src'] for line in data_list)
elif field in ('tgt_copy_ext',):
lines = (_tgt_copy_ext(line) for line in data_list)
elif field in ('tgt_not_copy',):
#lines = (_tgt_not_copy(line) for line in data_list)
lines = (line['tgt_not_copy'] for line in data_list)
elif field in ('tgt_loss',):
lines = (get_tgt_loss(line, False) for line in data_list)
else:
lines = (line[field] for line in data_list)
for line in lines:
yield line
def _construct_examples(self, lines, side):
for words in lines:
#print('side is {}, words is {}'.format(side, words))
example_dict = {side: words}
yield example_dict
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(TableDataset, self).__reduce_ex__()
@staticmethod
def load_fields(vocab):
vocab = dict(vocab)
fields = TableDataset.get_fields()
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
return fields
@staticmethod
def save_vocab(fields):
vocab = []
for k, f in fields.items():
if 'vocab' in f.__dict__:
f.vocab.stoi = dict(f.vocab.stoi)
vocab.append((k, f.vocab))
return vocab
@staticmethod
def get_fields():
fields = {}
fields['bart_src'] = torchtext.data.Field(use_vocab = False, pad_token=1)
fields['bart_dec_inp'] = torchtext.data.Field(use_vocab = False, pad_token=1)
fields["attention_mask"] = torchtext.data.Field(use_vocab=False, pad_token=0)
# fields["lay_bpe"] = torchtext.data.Field(
# init_token=BOS_WORD, include_lengths=True, eos_token=EOS_WORD, pad_token=PAD_WORD)
# fields["tgt_loss"] = torchtext.data.Field(
# init_token=BOS_WORD, eos_token=EOS_WORD, pad_token=PAD_WORD)
# fields["tgt_loss_masked"] = torchtext.data.Field(
# init_token=BOS_WORD, eos_token=EOS_WORD, pad_token=PAD_WORD)
fields["indices"] = torchtext.data.Field(
use_vocab=False, sequential=False)
return fields
@staticmethod
def build_vocab(train, dev, test, opt):
print('vocab')
if __name__ == "__main__":
print(load_template('/Users/liyanzhou/Desktop/Edinburgh/Dissertation/semantic_parsing/data_model/templates.txt')) | [
"liyanzhou97@outlook.com"
] | liyanzhou97@outlook.com |
a06dff71a67cdd28e24856b0673ae851213d50df | 6b6361645b19cf0aa97b7372c19ff9f72ca45041 | /Open Sources/code-for-blog-master/pygame-3D/5_Using_matrices/displayWireframe.py | a026594df2c541a1e24b59c37ff578ec6d11375b | [] | no_license | ssy05468/2018-OOP-Python-lightbulb | 43e1c72fd9ec00f85cf1f0fdaca98a61a0cb603a | 3dd59e2f37173d92f5f1870729ef32e25a5b33f7 | refs/heads/master | 2020-04-06T08:26:59.805006 | 2019-06-05T01:08:42 | 2019-06-05T01:08:42 | 157,305,036 | 2 | 1 | null | 2018-12-07T20:13:33 | 2018-11-13T02:00:54 | Python | UTF-8 | Python | false | false | 3,722 | py | import wireframe as wf
import pygame
import numpy as np
key_to_function = {
pygame.K_LEFT: (lambda x: x.translateAll('x', -10)),
pygame.K_RIGHT: (lambda x: x.translateAll('x', 10)),
pygame.K_DOWN: (lambda x: x.translateAll('y', 10)),
pygame.K_UP: (lambda x: x.translateAll('y', -10)),
pygame.K_EQUALS: (lambda x: x.scaleAll(1.25)),
pygame.K_MINUS: (lambda x: x.scaleAll( 0.8)),
pygame.K_q: (lambda x: x.rotateAll('X', 0.1)),
pygame.K_w: (lambda x: x.rotateAll('X', -0.1)),
pygame.K_a: (lambda x: x.rotateAll('Y', 0.1)),
pygame.K_s: (lambda x: x.rotateAll('Y', -0.1)),
pygame.K_z: (lambda x: x.rotateAll('Z', 0.1)),
pygame.K_x: (lambda x: x.rotateAll('Z', -0.1))}
class ProjectionViewer:
""" Displays 3D objects on a Pygame screen """
def __init__(self, width, height):
self.width = width
self.height = height
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Wireframe Display')
self.background = (10,10,50)
self.wireframes = {}
self.displayNodes = True
self.displayEdges = True
self.nodeColour = (255,255,255)
self.edgeColour = (200,200,200)
self.nodeRadius = 4
def addWireframe(self, name, wireframe):
""" Add a named wireframe object. """
self.wireframes[name] = wireframe
def run(self):
""" Create a pygame screen until it is closed. """
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key in key_to_function:
key_to_function[event.key](self)
self.display()
pygame.display.flip()
def display(self):
""" Draw the wireframes on the screen. """
self.screen.fill(self.background)
for wireframe in self.wireframes.values():
if self.displayEdges:
for n1, n2 in wireframe.edges:
pygame.draw.aaline(self.screen, self.edgeColour, wireframe.nodes[n1][:2], wireframe.nodes[n2][:2], 1)
if self.displayNodes:
for node in wireframe.nodes:
pygame.draw.circle(self.screen, self.nodeColour, (int(node[0]), int(node[1])), self.nodeRadius, 0)
def translateAll(self, axis, d):
""" Translate all wireframes along a given axis by d units. """
for wireframe in self.wireframes.itervalues():
wireframe.translate(axis, d)
def scaleAll(self, scale):
""" Scale all wireframes by a given scale, centred on the centre of the screen. """
centre_x = self.width/2
centre_y = self.height/2
for wireframe in self.wireframes.itervalues():
wireframe.scale((centre_x, centre_y), scale)
def rotateAll(self, axis, theta):
""" Rotate all wireframe about their centre, along a given axis by a given angle. """
rotateFunction = 'rotate' + axis
for wireframe in self.wireframes.itervalues():
centre = wireframe.findCentre()
getattr(wireframe, rotateFunction)(centre, theta)
if __name__ == '__main__':
pv = ProjectionViewer(400, 300)
cube = wf.Wireframe()
cube_nodes = [(x,y,z) for x in (50,250) for y in (50,250) for z in (50,250)]
cube.addNodes(np.array(cube_nodes))
cube.addEdges([(n,n+4) for n in range(0,4)]+[(n,n+1) for n in range(0,8,2)]+[(n,n+2) for n in (0,1,4,5)])
pv.addWireframe('cube', cube)
pv.run()
| [
"34187131+magicjunha@users.noreply.github.com"
] | 34187131+magicjunha@users.noreply.github.com |
4ae9d4cd17ad18027fa1dffe901e6463804b40c4 | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/python/typecheck/mypy/skip_field.py | 672a681eeba2e506b35d3c2f51bbadb683934354 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 897 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import (
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
PythonTestUtilsGeneratorTarget,
)
from pants.engine.target import BoolField
class SkipMyPyField(BoolField):
alias = "skip_mypy"
default = False
help = "If true, don't run MyPy on this target's code."
def rules():
return [
PythonSourcesGeneratorTarget.register_plugin_field(SkipMyPyField),
PythonSourceTarget.register_plugin_field(SkipMyPyField),
PythonTestsGeneratorTarget.register_plugin_field(SkipMyPyField),
PythonTestTarget.register_plugin_field(SkipMyPyField),
PythonTestUtilsGeneratorTarget.register_plugin_field(SkipMyPyField),
]
| [
"noreply@github.com"
] | noreply@github.com |
cab1ad315dd1080096ab4bf7cb6ea70f947a2eac | 2d57a05c549be8cc43cef84f7959193db7f8ead8 | /5_Peer_Graded_Assignment_Questions (1).py | 3e5615753fe47546c34b433ea9451c787ab6b28b | [] | no_license | Gagan8533/pandas-dash | b526a49851495694e1e427619e6210d7dc947b01 | e6cfb2988375d9906af573ce8bf447dfcde33685 | refs/heads/main | 2023-07-18T00:36:40.135049 | 2021-08-20T19:15:15 | 2021-08-20T19:15:15 | 398,377,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,133 | py | # Import required libraries
import pandas as pd
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
import plotly.express as px
from dash import no_update
# Create a dash application
app = dash.Dash(__name__)
# REVIEW1: Clear the layout and do not display exception till callback gets executed
app.config.suppress_callback_exceptions = True
# Read the airline data into pandas dataframe
airline_data = pd.read_csv('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/airline_data.csv',
encoding = "ISO-8859-1",
dtype={'Div1Airport': str, 'Div1TailNum': str,
'Div2Airport': str, 'Div2TailNum': str})
# List of years
year_list = [i for i in range(2005, 2021, 1)]
"""Compute graph data for creating yearly airline performance report
Function that takes airline data as input and create 5 dataframes based on the grouping condition to be used for plottling charts and grphs.
Argument:
df: Filtered dataframe
Returns:
Dataframes to create graph.
"""
def compute_data_choice_1(df):
# Cancellation Category Count
bar_data = df.groupby(['Month','CancellationCode'])['Flights'].sum().reset_index()
# Average flight time by reporting airline
line_data = df.groupby(['Month','Reporting_Airline'])['AirTime'].mean().reset_index()
# Diverted Airport Landings
div_data = df[df['DivAirportLandings'] != 0.0]
# Source state count
map_data = df.groupby(['OriginState'])['Flights'].sum().reset_index()
# Destination state count
tree_data = df.groupby(['DestState', 'Reporting_Airline'])['Flights'].sum().reset_index()
return bar_data, line_data, div_data, map_data, tree_data
"""Compute graph data for creating yearly airline delay report
This function takes in airline data and selected year as an input and performs computation for creating charts and plots.
Arguments:
df: Input airline data.
Returns:
Computed average dataframes for carrier delay, weather delay, NAS delay, security delay, and late aircraft delay.
"""
def compute_data_choice_2(df):
# Compute delay averages
avg_car = df.groupby(['Month','Reporting_Airline'])['CarrierDelay'].mean().reset_index()
avg_weather = df.groupby(['Month','Reporting_Airline'])['WeatherDelay'].mean().reset_index()
avg_NAS = df.groupby(['Month','Reporting_Airline'])['NASDelay'].mean().reset_index()
avg_sec = df.groupby(['Month','Reporting_Airline'])['SecurityDelay'].mean().reset_index()
avg_late = df.groupby(['Month','Reporting_Airline'])['LateAircraftDelay'].mean().reset_index()
return avg_car, avg_weather, avg_NAS, avg_sec, avg_late
# Application layout
app.layout = html.Div(children=[ html.H1('US Domestic Airline Flights Performance',
style= {'textAlign':'center', 'color':'#503D36','font size':24}),
# TASK1: Add title to the dashboard
# Enter your code below. Make sure you have correct formatting.
# REVIEW2: Dropdown creation
# Create an outer division
html.Div([
# Add an division
html.Div([
# Create an division for adding dropdown helper text for report type
html.Div(
[
html.H2('Report Type:', style={'margin-right': '2em'}),
]
),
# TASK2: Add a dropdown
# Enter your code below. Make sure you have correct formatting.
dcc.Dropdown(id='input-type',
options=[{'label':'Yearly Airline Performance Report', 'value':'OPT1'},
{'label':'Yearly Airline Delay Report', 'value':'OPT2'}],
Placeholder='Select a report type', style={'width':'80%', 'padding':'3px', 'font size':'20px','text-align-last':'center'})
# Place them next to each other using the division style
], style={'display':'flex'}),
]),
# Add next division
html.Div([
# Create an division for adding dropdown helper text for choosing year
html.Div(
[
html.H2('Choose Year:', style={'margin-right': '2em'})
]
),
dcc.Dropdown(id='input-year',
# Update dropdown values using list comphrehension
options=[{'label': i, 'value': i} for i in year_list],
placeholder="Select a year",
style={'width':'80%', 'padding':'3px', 'font-size': '20px', 'text-align-last' : 'center'}),
# Place them next to each other using the division style
], style={'display': 'flex'}),
]),
# Add Computed graphs
# REVIEW3: Observe how we add an empty division and providing an id that will be updated during callback
html.Div([ ], id='plot1'),
html.Div([
html.Div([ ], id='plot2'),
html.Div([ ], id='plot3')
], style={'display': 'flex'}),
# TASK3: Add a division with two empty divisions inside. See above disvision for example.
# Enter your code below. Make sure you have correct formatting.
html.Div([
html.Div([ ], id='plot4'),
html.Div([ ], id='plot5')
], style={'display': 'flex'}),
])
# Callback function definition
# TASK4: Add 5 ouput components
# Enter your code below. Make sure you have correct formatting.
@app.callback( [Output(component_id='plot1', component_property='children'),
Output(component_id='plot2', component_property='children'),
Output(component_id='plot3', component_property='children'),
Output(component_id='plot4', component_property='children'),
Output(component_id='plot5', component_property='children')],
[Input(component_id='input-type', component_property='value'),
Input(component_id='input-year', component_property='value')],
# REVIEW4: Holding output state till user enters all the form information. In this case, it will be chart type and year
[State("plot1", 'children'), State("plot2", "children"),
State("plot3", "children"), State("plot4", "children"),
State("plot5", "children")
])
# Add computation to callback function and return graph
def get_graph(chart, year, children1, children2, c3, c4, c5):
# Select data
df = airline_data[airline_data['Year']==int(year)]
if chart == 'OPT1':
# Compute required information for creating graph from the data
bar_data, line_data, div_data, map_data, tree_data = compute_data_choice_1(df)
# Number of flights under different cancellation categories
bar_fig = px.bar(bar_data, x='Month', y='Flights', color='CancellationCode', title='Monthly Flight Cancellation')
# TASK5: Average flight time by reporting airline
# Enter your code below. Make sure you have correct formatting.
line_fig= px.line(line_data, x='Month', y='AirTime', color='Reporting_Airline', title='Average monthly flight time (minute) by airline' )
# Percentage of diverted airport landings per reporting airline
pie_fig = px.pie(div_data, values='Flights', names='Reporting_Airline', title='% of flights by reporting airline')
# REVIEW5: Number of flights flying from each state using choropleth
map_fig = px.choropleth(map_data, # Input data
locations='OriginState',
color='Flights',
hover_data=['OriginState', 'Flights'],
locationmode = 'USA-states', # Set to plot as US States
color_continuous_scale='GnBu',
range_color=[0, map_data['Flights'].max()])
map_fig.update_layout(
title_text = 'Number of flights from origin state',
geo_scope='usa') # Plot only the USA instead of globe
# TASK6: Number of flights flying to each state from each reporting airline
# Enter your code below. Make sure you have correct formatting.
tree_fig = px.treemap(tree_data, path=['DestState','Reporting_Airline'], values='Flights', color='Flights', color_continuous_scale='RdBu', title='Flight count by airline to distination state')
# REVIEW6: Return dcc.Graph component to the empty division
return [dcc.Graph(figure=tree_fig),
dcc.Graph(figure=pie_fig),
dcc.Graph(figure=map_fig),
dcc.Graph(figure=bar_fig),
dcc.Graph(figure=line_fig)
]
else:
# REVIEW7: This covers chart type 2 and we have completed this exercise under Flight Delay Time Statistics Dashboard section
# Compute required information for creating graph from the data
avg_car, avg_weather, avg_NAS, avg_sec, avg_late = compute_data_choice_2(df)
# Create graph
carrier_fig = px.line(avg_car, x='Month', y='CarrierDelay', color='Reporting_Airline', title='Average carrrier delay time (minutes) by airline')
weather_fig = px.line(avg_weather, x='Month', y='WeatherDelay', color='Reporting_Airline', title='Average weather delay time (minutes) by airline')
nas_fig = px.line(avg_NAS, x='Month', y='NASDelay', color='Reporting_Airline', title='Average NAS delay time (minutes) by airline')
sec_fig = px.line(avg_sec, x='Month', y='SecurityDelay', color='Reporting_Airline', title='Average security delay time (minutes) by airline')
late_fig = px.line(avg_late, x='Month', y='LateAircraftDelay', color='Reporting_Airline', title='Average late aircraft delay time (minutes) by airline')
return[dcc.Graph(figure=carrier_fig),
dcc.Graph(figure=weather_fig),
dcc.Graph(figure=nas_fig),
dcc.Graph(figure=sec_fig),
dcc.Graph(figure=late_fig)]
# Run the app
if __name__ == '__main__':
app.run_server() | [
"noreply@github.com"
] | noreply@github.com |
37493f770591ef660a442812b1c6032e2a27f92b | 01be310880d1df1d4207daa2e2c7bfaa9c065952 | /calC.py | 368076ea7cad2da9f18028d3b434c6105444f9eb | [] | no_license | pathakdhruv/GalDynPsr_0 | 62cb4e84e38ca305c38c5f1797ad8dedc97ea4e1 | a36753767c7cc9940fbea4de11bc4fbe552c8095 | refs/heads/master | 2021-08-30T09:47:40.524768 | 2017-12-17T09:52:00 | 2017-12-17T09:52:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | from __future__ import print_function
import math
import numpy as np
import parameters as par
from ExcessZ import g
from Excesspl import aplmod
from Excesspl import Rpkpcfunc
from Excesspl import Vprat
from err_NT import errNT
from err_excesspl_Reid import err_Reid14
def calc(bdeg, sigb, ldeg, sigl, dkpc, sigd, Har):
b = bdeg*par.degtorad
l = ldeg*par.degtorad
zkpc = dkpc*math.sin(b)
adrc = aplmod(dkpc,b,l)*math.cos(b) #s^-1
errReid = err_Reid14(bdeg, sigb, ldeg, sigl, dkpc, sigd) #s^-1
azbcnt = g(zkpc)*math.sin(b) #s^-1
errnt = errNT(bdeg, sigb, dkpc, sigd) #s^-1
if Har==1:
print ("Excess_parallel_Reid2014, Excess_z_NT95 = ", adrc,", ", azbcnt)
else:
print ("Excess_parallel_Reid2014, Excess_z_NT95 = ", adrc,"+/-",errReid, ", ", azbcnt,"+/-",errnt)
return None;
| [
"noreply@github.com"
] | noreply@github.com |
01abb829ed3171dd9ab444b05d782bc3a26e2d9b | 334e7e8b9162cd74e1c9dd115a6e293f01051454 | /src/videos/views.py | 0215dd18336157f194698c4c49d03bb9bff38c8a | [
"MIT"
] | permissive | contactr2m/remote_repo | dec0dff9c299ab665cd36642a757ae9fa35950c3 | 5665c55b794929fd40645264c5c149e64d172097 | refs/heads/master | 2021-01-10T13:13:47.359357 | 2016-04-26T14:23:49 | 2016-04-26T14:23:49 | 53,814,820 | 0 | 0 | null | 2016-03-20T19:37:37 | 2016-03-14T00:13:06 | Python | UTF-8 | Python | false | false | 170 | py | from django.shortcuts import render
from django.views import generic
# Create your views here.
class VideoPage(generic.TemplateView):
template_name = "videos.html"
| [
"contactr2m@gmail.com"
] | contactr2m@gmail.com |
e3338b3c6f281013c90ce45537f49a37b900d982 | b2f03f8bf5974baef7f56ebeb9a5f4bc00bb3a04 | /aristaflow/remote_iterator_handler.py | 9755d766cd379f8342099d3ba9ecc31b0d8d289e | [
"MIT"
] | permissive | riuns/aristaflowpy | 14d79d9a03c5dea1e3b49425aeb28b0a592fe448 | 58ad48310366484379fc519d649c564c762f89f9 | refs/heads/main | 2023-06-18T22:24:10.561605 | 2021-06-17T09:34:34 | 2021-06-17T09:34:34 | 385,566,380 | 0 | 0 | MIT | 2021-07-13T10:35:36 | 2021-07-13T10:35:35 | null | UTF-8 | Python | false | false | 1,408 | py | # coding: utf-8
# Default Python Libraries
from typing import List, Type
# AristaFlow REST Libraries
from aristaflow.service_provider import ServiceProvider
class RemoteIteratorHandler(object):
"""
Utilities for handling remote iterators
"""
_service_provider: ServiceProvider = None
def __init__(self, service_provider: ServiceProvider):
"""
Constructor
"""
self._service_provider = service_provider
def _consume(self, target: List, iterator_data, attrib_name: str, iter_api, iter_api_method):
if iterator_data is None:
return
if getattr(iterator_data, attrib_name):
target += getattr(iterator_data, attrib_name)
else:
# no data left, end recursion
return
if iterator_data.dropped:
return
next_iter = iter_api_method(iter_api, iterator_data.iterator_id)
self._consume(target, next_iter, attrib_name, iter_api_method)
def consume(
self, iterator_data, attrib_name: str, iter_api_type: Type, iter_api_method=None
) -> List:
iter_api = self._service_provider.get_service(iter_api_type)
if iter_api_method is None:
iter_api_method = getattr(iter_api_type, "get_next")
target = []
self._consume(target, iterator_data, attrib_name, iter_api, iter_api_method)
return target
| [
"kevin.goeser@aristaflow.com"
] | kevin.goeser@aristaflow.com |
ab14c4d4a9d8c432ae24647c18b9e98e4968ece0 | 90be755a741d6c93dd59d4acef8b27b4cf93ff54 | /src/elsia/scripts/get_abs_ori.py | 8decc0c43e2f8716f8f28629c4b7ed417de7cc24 | [] | no_license | karry3775/Elsia_ws | 05aa5786a6f3f64b70c7ceafead6d72d4ca18bab | 031f8006e9a439d9947be5ed288a666f20fca3a7 | refs/heads/master | 2023-02-21T05:21:10.842475 | 2021-01-23T14:58:57 | 2021-01-23T15:21:46 | 326,032,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from cv_bridge import CvBridge, CvBridgeError
from nav_msgs.msg import Odometry
import cv2
import numpy as np
import math as m
# initialize the node
rospy.init_node("get_abs_ori_node")
# global variables
best_ori_estimate = 0.0
ini_angle_offset = 0.0
# create publishers
odom_pub = rospy.Publisher("/abs_orientation_odom", Odometry, queue_size=10)
image_pub = rospy.Publisher("/considered_image", Image, queue_size=10)
# global variable for whether to DEBUG or not
DEBUG = False
def wrap2Pi(theta):
wrappedUpVal = m.atan2(m.sin(theta), m.cos(theta))
return wrappedUpVal
def abs_ori_cb(msg):
global best_ori_estimate
try:
cv_image = CvBridge().imgmsg_to_cv2(msg, "bgr8")
# crop out the excess image
cv_image = cv_image[100:300, 100:300, :]
except CvBridgeError as e:
print("[INFO]: Error in obtaining image from CvBridge! Skipping frame!")
else:
# convert to gray
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
# convert to edges
edges = cv2.Canny(gray, 50, 150)
cv2.imshow("edges", edges)
cv2.waitKey(1)
# convert to thresholded image
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
# extract hough lines
lines = cv2.HoughLinesP(edges, 1, m.pi/180, 2, None, 20, 1)
# list of [count, angle] pairs
cnt_ang_pair = []
# draw lines
for i in range(lines.shape[0]):
for line in lines[i]:
pt1 = (line[0], line[1])
pt2 = (line[2], line[3])
cv2.line(cv_image, pt1, pt2, (255, 0, 0), 3)
# calculate angle
ang = m.atan2(pt2[1]-pt1[1], pt2[0]-pt1[0])
cnt_ang_pair.append([1, m.degrees(ang)])
###################### show the detected lines ########################
cv2.imshow("frame", cv_image)
cv2.waitKey(1)
#######################################################################
if len(cnt_ang_pair) != 0:
# sort the cnt_ang_pair
cnt_ang_pair.sort(key=lambda x: x[1])
# bunch up the pairs based on predetermined threshold
ang_thresh_deg = 1
bunch = [cnt_ang_pair[0]]
for i in range(1, len(cnt_ang_pair)):
pairs = cnt_ang_pair[i]
if abs(pairs[1] - bunch[-1][1]) < ang_thresh_deg:
# update the value and the count
new_count = bunch[-1][0] + 1
new_value = (
(bunch[-1][1] * (new_count - 1) * 1.0) / new_count) + (pairs[1]*1.0) / new_count
bunch[-1] = [new_count, new_value]
else:
# time to append
bunch.append(pairs)
# sort bunch based on first value i.e. count
bunch.sort(key=lambda x: x[0], reverse=True)
if DEBUG:
print("The cnt_ang_pair list is: \n {} \n".format(cnt_ang_pair))
print("The bunched up list is: \n {} \n".format(bunch))
# use the first value of bunch
f_ori = m.radians(bunch[0][1]) # in degrees
f_ori1 = wrap2Pi(f_ori + m.radians(90) - ini_angle_offset)
f_ori2 = wrap2Pi(f_ori + m.radians(-90) - ini_angle_offset)
f_ori3 = wrap2Pi(f_ori + m.radians(180) - ini_angle_offset)
# we need to find which has the smallest difference
# f_ori, f_ori1 or f_ori2
if(abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori1)) and abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori2)) and abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori
elif(abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori)) and abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori2)) and abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori1
elif(abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori)) and abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori1)) and abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori2
else:
best_ori_estimate_temp = f_ori3
# will get the best_ori_estimate in degrees , the choice is made so that any difference will be amplified more than radians
best_ori_estimate = best_ori_estimate_temp
if DEBUG:
print("best ori estimate: {} deg".format(
m.degrees(best_ori_estimate)))
# to debug lets plot the best_ori_estimate in the image
pt1 = [200, 200]
pt2 = [200, 200]
line_angle = best_ori_estimate
pt2[0] = int(pt2[0] + 200*m.cos(line_angle))
pt2[1] = int(pt2[1] + 200*m.sin(line_angle))
cv2.line(cv_image, (pt1[0], pt1[1]),
(pt2[0], pt2[1]), (0, 0, 255), 3)
# publish abs odometry for yaw
# create euler angles
roll = 0
pitch = 0
yaw = -best_ori_estimate
# convert to quaternion
q = quaternion_from_euler(roll, pitch, yaw)
# create a odom message
odom_msg = Odometry()
odom_msg.pose.pose.orientation.x = q[0]
odom_msg.pose.pose.orientation.y = q[1]
odom_msg.pose.pose.orientation.z = q[2]
odom_msg.pose.pose.orientation.w = q[3]
odom_msg.header.frame_id = "odom"
odom_msg.header.stamp = rospy.Time().now()
odom_pub.publish(odom_msg)
rosimg = CvBridge().cv2_to_imgmsg(cv_image, "bgr8")
image_pub.publish(rosimg)
if __name__ == "__main__":
try:
abs_ori_sub = rospy.Subscriber(
"/stereo/left_upward/image_rect", Image, abs_ori_cb)
rospy.spin()
except rospy.ROSInterruptException:
pass
| [
"kartikprakash3775@gmail.com"
] | kartikprakash3775@gmail.com |
4e752d4ff716073c6fb85e0f1f876a69a596e0af | 634f86d2e9a534566b4e120c986c079ffb246804 | /relevate_web_app/apps/profiles/models/user_models.py | f0a7aa99c6659f50c12aa50697a6032381edca4b | [] | no_license | jhock/Relevate | dcbb32a11c44766a55291dec1ed8b1f68fb32236 | 8296c49dfa8771b47965c24b6b49a2b6e8ace6cf | refs/heads/master | 2023-01-19T14:13:56.756661 | 2019-08-12T22:19:02 | 2019-08-12T22:19:02 | 105,825,724 | 1 | 0 | null | 2023-01-13T22:30:25 | 2017-10-04T22:31:12 | JavaScript | UTF-8 | Python | false | false | 1,625 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from .contributor_model import ContributorProfile
from ...contribution.models import Topics
class UserProfile(models.Model):
user = models.OneToOneField(User)
confirmed = models.BooleanField(default=False)
is_adviser = models.BooleanField(default=False)
is_contributor = models.BooleanField(default=False)
is_pending_contributor = models.BooleanField(default=False)
topics_preferences = models.ManyToManyField('contribution.Topics', blank=True)
#user_avatar = models.ImageField(upload_to='user_profiles/user_avatar', null=True, blank=True)
def get_associated_contributor(self):
#returns the contributor profile associated with the user. Mainly used for getting the avatar without having to pass
#the user's contributor profile to every single page. May be removed and replaced with setting the user_avatar
#to the contributor.avatar. I'll have to run through the code and see if other information besides the contributor's
#avatar is needed.
return ContributorProfile.objects.get(user_profile=self)
def __unicode__(self):
#returns the username of the user
return self.user.username
def full_name(self):
#returns the full name of the user
return self.user.first_name + " " + self.user.last_name
def __str__(self):
#returns the full name of the user
#will remove this soon as it has been replaced by full_name, need to sure it is not needed anywhere before removal
return self.user.first_name + " " + self.user.last_name
class Meta:
db_table = "myrelevate_userprofile"
| [
"joshua.a.hock@gmail.com"
] | joshua.a.hock@gmail.com |
6b723c4ecce3eb9878dfd0f11f0ff48ecb26c830 | f04d1f900583708f54ff889e0cfce4c5aeb83030 | /clustering/prepare.py | a4501944b0a1773fbca16017cdd29cb4f3596c44 | [] | no_license | chemiczny/neural_networks_neupy | 9facd09bc21407e7238c27dde7dafe5419b16d5a | bbf234b20140a4b24a2b897a35471bf1703461af | refs/heads/master | 2020-05-06T12:14:19.646643 | 2019-04-11T13:27:31 | 2019-04-11T13:27:31 | 180,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 16:34:42 2019
@author: michal
"""
from os.path import isdir, join
from os import mkdir, remove
from glob import glob
from SOFMgenerator import SOFMgenerator
from dataPreprocessor import DataPreprocessor
inputDir = "inputs"
if not isdir(inputDir):
mkdir(inputDir)
for inputFile in glob(join( inputDir, "*.inp" )):
remove(inputFile)
dataObj = DataPreprocessor()
sofm = SOFMgenerator(dataObj)
dataPerFile = 4
nRows = range(5, 11)
nCols = range(5, 11)
repeatNetwork = 3
nnCounter = 0
actualI = 0
actualFile = open( join( inputDir, str(actualI)+".inp" ),'w' )
dataInActualFile = 0
Rthreshold = [ 0.9, 0.875, 0.85]
for repeat in range(repeatNetwork):
for rt in Rthreshold:
for nr in nRows:
for nc in nCols:
for lr in sofm.learningRadius:
if lr >= max(nr, nc):
continue
for wi in sofm.weightInit:
for gt in sofm.gridType:
if gt == "hexagon" and wi == "init_pca":
continue
data = [ str(nr) , str(nc), str(lr), wi, gt, str(rt) ]
data = " ".join(data) + "\n"
actualFile.write(data)
dataInActualFile += 1
nnCounter += 1
if dataInActualFile >= dataPerFile:
dataInActualFile = 0
actualFile.close()
actualI += 1
actualFile = open( join( inputDir, str(actualI)+".inp" ),'w' )
actualFile.close()
print(nnCounter, actualI) | [
"glanowskimichal@gmail.com"
] | glanowskimichal@gmail.com |
42bcc717daa52c76b623b77adb64ac1e50d8fe60 | b57d337ddbe946c113b2228a0c167db787fd69a1 | /scr/py00033SpiderDeath.py | 6fd5b9134c2358a8544c5ef441100d8e4da50196 | [] | no_license | aademchenko/ToEE | ebf6432a75538ae95803b61c6624e65b5cdc53a1 | dcfd5d2de48b9d9031021d9e04819b309d71c59e | refs/heads/master | 2020-04-06T13:56:27.443772 | 2018-11-14T09:35:57 | 2018-11-14T09:35:57 | 157,520,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from toee import *
from utilities import *
from combat_standard_routines import *
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
if (attachee.map == 5069):
game.global_vars[3] = game.global_vars[3] + 1
if (game.party_alignment == LAWFUL_NEUTRAL or game.party_alignment == CHAOTIC_NEUTRAL or game.party_alignment == TRUE_NEUTRAL or game.party_alignment == LAWFUL_EVIL or game.party_alignment == CHAOTIC_EVIL or game.party_alignment == NEUTRAL_EVIL):
ring = attachee.item_find( 3000 )
ring.destroy()
elif (attachee.map == 5002):
if (game.party_alignment == LAWFUL_GOOD or game.party_alignment == CHAOTIC_GOOD or game.party_alignment == NEUTRAL_GOOD or game.party_alignment == LAWFUL_EVIL or game.party_alignment == CHAOTIC_EVIL or game.party_alignment == NEUTRAL_EVIL):
ring = attachee.item_find( 3000 )
ring.destroy()
elif (attachee.map == 5003):
if (game.party_alignment == LAWFUL_GOOD or game.party_alignment == CHAOTIC_GOOD or game.party_alignment == NEUTRAL_GOOD or game.party_alignment == LAWFUL_NEUTRAL or game.party_alignment == CHAOTIC_NEUTRAL or game.party_alignment == TRUE_NEUTRAL):
ring = attachee.item_find( 3000 )
ring.destroy()
return RUN_DEFAULT
| [
"demchenko.recruitment@gmail.com"
] | demchenko.recruitment@gmail.com |
24f4ad0bc75271d08496072c0885072c734d3990 | 5b1ff6054c4f60e4ae7315db9f20a334bc0b7634 | /Launchkey_MK2/Colors.py | 6f5028d35ea48a5ef4fb11c613cb1206a59fc846 | [] | no_license | maratbakirov/AbletonLive9_RemoteScripts | 2869122174634c75405a965401aa97a2dae924a1 | 4a1517c206353409542e8276ebab7f36f9bbd4ef | refs/heads/master | 2021-06-05T14:38:27.959025 | 2021-05-09T11:42:10 | 2021-05-09T11:42:10 | 13,348,327 | 3 | 4 | null | 2016-10-16T13:51:11 | 2013-10-05T16:27:04 | Python | UTF-8 | Python | false | false | 4,566 | py | #Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Launchkey_MK2/Colors.py
from _Framework.ButtonElement import Color
from .consts import BLINK_LED_CHANNEL, PULSE_LED_CHANNEL
class Blink(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Blink, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=BLINK_LED_CHANNEL)
class Pulse(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Pulse, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=PULSE_LED_CHANNEL)
class Rgb:
BLACK = Color(0)
DARK_GREY = Color(1)
GREY = Color(2)
WHITE = Color(3)
RED = Color(5)
RED_BLINK = Blink(5)
RED_PULSE = Pulse(5)
RED_HALF = Color(7)
ORANGE = Color(9)
ORANGE_HALF = Color(11)
AMBER = Color(96)
AMBER_HALF = Color(14)
YELLOW = Color(13)
YELLOW_HALF = Color(15)
DARK_YELLOW = Color(17)
DARK_YELLOW_HALF = Color(19)
GREEN = Color(21)
GREEN_BLINK = Blink(21)
GREEN_PULSE = Pulse(21)
GREEN_HALF = Color(27)
MINT = Color(29)
MINT_HALF = Color(31)
LIGHT_BLUE = Color(37)
LIGHT_BLUE_HALF = Color(39)
BLUE = Color(45)
BLUE_HALF = Color(47)
DARK_BLUE = Color(49)
DARK_BLUE_HALF = Color(51)
PURPLE = Color(53)
PURPLE_HALF = Color(55)
DARK_PURPLE = Color(59)
BRIGHT_PURPLE = Color(81)
DARK_ORANGE = Color(84)
CLIP_COLOR_TABLE = {15549221: 60,
12411136: 61,
11569920: 62,
8754719: 63,
5480241: 64,
695438: 65,
31421: 66,
197631: 67,
3101346: 68,
6441901: 69,
8092539: 70,
3947580: 71,
16712965: 72,
12565097: 73,
10927616: 74,
8046132: 75,
4047616: 76,
49071: 77,
1090798: 78,
5538020: 79,
8940772: 80,
10701741: 81,
12008809: 82,
9852725: 83,
16149507: 84,
12581632: 85,
8912743: 86,
1769263: 87,
2490280: 88,
6094824: 89,
1698303: 90,
9160191: 91,
9611263: 92,
12094975: 93,
14183652: 94,
16726484: 95,
16753961: 96,
16773172: 97,
14939139: 98,
14402304: 99,
12492131: 100,
9024637: 101,
8962746: 102,
10204100: 103,
8758722: 104,
13011836: 105,
15810688: 106,
16749734: 107,
16753524: 108,
16772767: 109,
13821080: 110,
12243060: 111,
11119017: 112,
13958625: 113,
13496824: 114,
12173795: 115,
13482980: 116,
13684944: 117,
14673637: 118,
16777215: 119}
RGB_COLOR_TABLE = ((0, 0),
(1, 1973790),
(2, 8355711),
(3, 16777215),
(4, 16731212),
(5, 16711680),
(6, 5832704),
(7, 1638400),
(8, 16760172),
(9, 16733184),
(10, 5840128),
(11, 2562816),
(12, 16777036),
(13, 16776960),
(14, 5855488),
(15, 1644800),
(16, 8978252),
(17, 5570304),
(18, 1923328),
(19, 1321728),
(20, 5046092),
(21, 65280),
(22, 22784),
(23, 6400),
(24, 5046110),
(25, 65305),
(26, 22797),
(27, 6402),
(28, 5046152),
(29, 65365),
(30, 22813),
(31, 7954),
(32, 5046199),
(33, 65433),
(34, 22837),
(35, 6418),
(36, 5030911),
(37, 43519),
(38, 16722),
(39, 4121),
(40, 5015807),
(41, 22015),
(42, 7513),
(43, 2073),
(44, 5000447),
(45, 255),
(46, 89),
(47, 25),
(48, 8867071),
(49, 5505279),
(50, 1638500),
(51, 983088),
(52, 16731391),
(53, 16711935),
(54, 5832793),
(55, 1638425),
(56, 16731271),
(57, 16711764),
(58, 5832733),
(59, 2228243),
(60, 16717056),
(61, 10040576),
(62, 7950592),
(63, 4416512),
(64, 211200),
(65, 22325),
(66, 21631),
(67, 255),
(68, 17743),
(69, 2425036),
(70, 8355711),
(71, 2105376),
(72, 16711680),
(73, 12451629),
(74, 11529478),
(75, 6618889),
(76, 1084160),
(77, 65415),
(78, 43519),
(79, 11007),
(80, 4129023),
(81, 7995647),
(82, 11672189),
(83, 4202752),
(84, 16730624),
(85, 8970502),
(86, 7536405),
(87, 65280),
(88, 3931942),
(89, 5898097),
(90, 3735500),
(91, 5999359),
(92, 3232198),
(93, 8880105),
(94, 13835775),
(95, 16711773),
(96, 16744192),
(97, 12169216),
(98, 9502464),
(99, 8609031),
(100, 3746560),
(101, 1330192),
(102, 872504),
(103, 1381674),
(104, 1450074),
(105, 6896668),
(106, 11010058),
(107, 14569789),
(108, 14182940),
(109, 16769318),
(110, 10412335),
(111, 6796559),
(112, 1973808),
(113, 14483307),
(114, 8454077),
(115, 10131967),
(116, 9332479),
(117, 4210752),
(118, 7697781),
(119, 14745599),
(120, 10485760),
(121, 3473408),
(122, 1757184),
(123, 475648),
(124, 12169216),
(125, 4141312),
(126, 11755264),
(127, 4920578)) | [
"julien@julienbayle.net"
] | julien@julienbayle.net |
29643c3046fd8c594d777bdbc9bb75858a56e9ef | a7211f3f0ef6cbb96a796e502062656681dcdf9b | /run_rle.py | 744e4c9379f0be2a0affd1c521af8540da053fae | [
"MIT"
] | permissive | chplushsieh/carvana-challenge | fdc3e78966a37f95f3e60a179a511705cc0da55f | cba536657714df7c1c33150b92e3e152195b68db | refs/heads/master | 2021-01-01T19:40:08.442482 | 2017-10-05T17:53:29 | 2017-10-05T17:53:29 | 98,639,822 | 24 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | import time
import argparse
import util.ensemble as ensemble
import util.submit as submit
import util.const as const
import rle_loader
def apply_rle(pred_dir, rle_loader):
img_rles = {}
for i, (img_name, rle) in enumerate(rle_loader):
iter_start = time.time()
assert len(img_name) == 1
assert len(rle) == 1
img_name = img_name[0]
rle = rle[0]
img_rles[img_name] = rle
if (i % 1000) == 0:
print('Iter {} / {}, time spent: {} sec'.format(i, len(rle_loader), time.time() - iter_start))
# save into submission.csv
submit.save_predictions(pred_dir, img_rles)
return
if __name__ == "__main__":
program_start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('pred_dir', nargs='?', default='0922-03:34:53')
args = parser.parse_args()
pred_dir = args.pred_dir
exp_names, test_time_aug_names = ensemble.get_models_ensembled(pred_dir)
print('The predictions are ensemble from {}. '.format(list(zip(exp_names, test_time_aug_names))))
rle_loader = rle_loader.get_rle_loader(pred_dir)
apply_rle(pred_dir, rle_loader)
print('Total time spent: {} sec = {} hours'.format(time.time() - program_start, (time.time() - program_start) / 3600))
| [
"chplushsieh@gmail.com"
] | chplushsieh@gmail.com |
df62e1d9eca4e326139aca3e09b2b263a8f172c2 | 0e2484fd7660c0a21e4f2ac199b0cc6737f19cfa | /api_tag/api_tag/settings.py | b613bbf56b628b4e6e77f5b120cd475c61008640 | [] | no_license | codesree/tag-build | 63bfa32c6036f78f7aa7659920369a2e51c3f892 | 192f305df88f08606b62f111152dfebf7f6f4063 | refs/heads/master | 2020-03-22T20:34:10.474808 | 2018-09-16T16:05:40 | 2018-09-16T16:05:40 | 140,610,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | py | """
Django settings for api_tag project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import mongoengine
import djongo
import logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,"templates")
STATIC_DIR = os.path.join(BASE_DIR,"static")
mongoengine.connect()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b(_=8mkl+hpoq#2)18uoeb+fzb17@qr#_mzmp4)x-$t@_*p0zp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['tag-env.k7wbxx2hzf.us-east-1.elasticbeanstalk.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'testapi',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api_tag.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_tag.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'djongo',
'NAME': 'tag_users',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),]
LOGIN_URL = '/test_api/login'
| [
"srekanth.me@gmail.com"
] | srekanth.me@gmail.com |
66b654358f12a58b653f3dd74fb992717fe0bcc6 | a6b46a37bb2fc9e27ed000cb5d2e1fcef6e7527c | /python/common/expressions.py | fac15ec0ea42f9bfa0e78d51628dfc525e99d9a9 | [
"MIT"
] | permissive | data-man-34/ad_examples | 3c4b522b64a8387aed922a6fd062114a3e96c26f | f0d5d95c443cf1cfaf293a2e76b9bff3cbfd85b7 | refs/heads/master | 2020-04-09T15:12:48.677931 | 2018-12-04T18:58:22 | 2018-12-04T18:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,371 | py | import tokenize
import re
import os
import numpy as np
# from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support
"""
General Rule-parsing functions. We might use only a subset of the features available.
For some rule illustrations/examples, see test_rule_apis()
To test:
pythonw -m common.expressions
"""
# Supported datatypes
DTYPE_CATEGORICAL = 0 # categorical
DTYPE_CONTINUOUS = 1 # numerical float values
DTYPE_BINARY = 2 # categorical {'0','1'}
LABEL_VAR_INDEX = -1
UNINITIALIZED_VAR_INDEX = -2
ILLEGAL_VAR_INDEX = -3
UNKNOWN_CATEGORICAL_VALUE_INDEX = -1
DEFAULT_PREDICATE_WEIGHT = 1.0
class stack(list):
def push(self, item):
self.append(item)
def is_empty(self):
return not self
class DType(object):
def __init__(self):
self.name = None
def is_numeric(self):
pass
def is_continuous(self):
pass
class Factor(DType):
"""Stores information about the values that a categorical variable can take.
Also provides the one-hot encodings
Attributes:
__values: dict
Mapping of categorical values from string representations
to integer.
__fromindex: dict
Mapping of integers to corresponding categorical string
representations.
__onehot: dict
Mapping of categorical (integer) values to corresponding
one-hot representations.
__onehotNA: np.array
One-hot vector that will be returned when the value is
missing. This has 'nan' in all positions of the vector.
"""
def __init__(self, vals, sort=True):
"""Creates a Factor instance from the input set of values.
Args:
vals: list
An unique set of allowable values/levels for the factor.
sort: boolean, (default True)
Whether to sort the values alphabetically before assigning
them indexes. Sometimes the input order might need to be
maintained (with sort=False) e.g., if these represent
column names which occur in the specific input order.
"""
DType.__init__(self)
self.__values = {}
self.__fromindex = {}
self.__onehot = {}
tmp = [x for x in vals if x != '']
if sort:
tmp = sorted(tmp)
self.__onehotNA = np.empty(len(tmp))
self.__onehotNA.fill(np.nan)
tmphot = np.zeros(len(tmp), dtype=float)
for i in range(0, len(tmp)):
self.__values[tmp[i]] = i
self.__fromindex[i] = tmp[i]
self.__onehot[i] = np.array(tmphot) # store a new copy
self.__onehot[i][i] = 1
def is_numeric(self):
return False
def is_continuous(self):
return False
def all_values(self):
return self.__values
def index_of(self, value):
return self.__values.get(value)
def encode_to_one_hot(self, index):
"""Encode the categorical variable to one-hot vector.
Some algorithms like scikit-learn decision trees need the data
to be presented only as numeric vectors. In that case, we need
to encode all categorical features as one-hot vectors.
Args:
index: int
Index of the value
Returns: np.array
"""
ret = self.__onehot.get(index)
return self.__onehotNA if ret is None else ret
def __getitem__(self, index):
return self.__fromindex[index]
def num_values(self):
return len(self.__values)
def __repr__(self):
return 'Factor ' + repr(self.__values)
def __str__(self):
return 'Factor ' + repr(self.__values)
class NumericContinuous(DType):
"""Definitions for Gaussian distributed real values."""
def __init__(self, vals):
"""Initializes the mean and variance of the Gaussian variable."""
DType.__init__(self)
# Ignore NaNs
n = np.count_nonzero(~np.isnan(vals))
if n > 0:
self.mean = np.nanmean(vals)
self.variance = np.nanvar(vals)
else:
self.mean = 0
self.variance = 0
def is_numeric(self):
return True
def is_continuous(self):
return True
def __repr__(self):
return 'Continuous(mean=' + str(self.mean) + ", var=" + str(self.variance) + ")"
def __str__(self):
return 'Continuous(mean=' + str(self.mean) + ", var=" + str(self.variance) + ")"
class FeatureMetadata(object):
"""Contains all metadata related to features.
Attributes:
lblname: string
The column name of the label column.
lbldef: Factor
All permissible label values.
featurenames: Factor
All feature names stored in the same order as features.
featuredefs: list
Contains info about each feature.
"""
def __init__(self, lblname=None, lbldef=None,
featurenames=None, featuredefs=None):
self.lblname = lblname
self.lbldef = lbldef
self.featurenames = featurenames
self.featuredefs = featuredefs
def num_features(self):
return 0 if self.featuredefs is None else len(self.featuredefs)
def _tostr(self):
return "[FeatureMetadata]\nlblname: " + str(self.lblname) + "\n" + \
"lbldef: " + str(self.lbldef) + "\n" + \
"featurenames: " + str(self.featurenames) + "\n" + \
"featuredefs: " + str(self.featuredefs)
def __repr__(self):
return self._tostr()
def __str__(self):
return self._tostr()
class Expression(object):
"""Base class for any expression that needs to be parsed or evaluated."""
def evaluate(self, inst, lbl, meta):
"""Given an instance, will evaluate the expression.
The expression might return a literal or a True/False value.
"""
pass
def compile(self, meta):
"""Resolves the variable and literal bindings such that
the expression can be evaluated efficiently later.
"""
pass
def ground(self, inst, lbl, meta):
"""Grounds the expression with values from
the instance and returns a string representation.
This is useful for debugging since it makes the
evaluation transparent.
"""
pass
def expr(self, meta):
"""Returns a readable string representation of the rule that can be parsed."""
def get_variables(self):
"""Returns the set of variables bound to the expression"""
raise TypeError("Unsupported operation: get_variables()")
def __repr__(self):
return str(self)
class Term(Expression):
pass
class Literal(Term):
"""A literal.
Literals might be numeric, strings, or categorical.
If categorical, they are converted to internal (integer)
representation when compiled.
Attributes:
val: object (float/string/category)
The actual value
valindex: int
If the corresponding feature is categorical, then
the integer representation of the value is stored.
categorical: boolean
Indicates whether the literal is categorical.
rexp: Compiled regular expression
Used to remove quotes (surrounding the literal),
if required.
"""
rexp = re.compile(r" *(['\"])(.*)\1 *")
def __init__(self, val, removequotes=False):
self.val = val
self.valindex = UNKNOWN_CATEGORICAL_VALUE_INDEX
self.categorical = None
if removequotes:
m = Literal.rexp.match(val)
if m is not None:
self.val = m.group(2)
else:
pass
def evaluate(self, inst, lbl, meta):
if self.categorical is None:
raise ValueError("Undetermined whether literal '" + str(self.val) + "' is categorical or not.")
elif self.categorical:
ret = self.valindex
else:
ret = self.val
# print(str(self) + ': ' + str(ret))
return ret
def ground(self, inst, lbl, meta):
return repr(self.val)
def expr(self, meta):
if meta is None:
raise ValueError("Invalid metadata")
if self.categorical is not None and self.categorical:
return "'" + str(self.val) + "'"
return str(self.val)
def get_variables(self):
return set([]) # A literal does not contain any variable
def __str__(self):
return "Lit(" + str(self.val) + "<" + str(self.valindex) + ">" + ")"
class Var(Term):
"""Variable/Feature.
This represents the feature/label variable. Initially, the
name of the feature is stored. This gets compiled later into
the feature index such that evaluation can be faster.
Label variable is indicated by the feature index '-1'.
Before compilation, the feature index is initialized to '-2'.
After compilation, the feature index corresponding to the
variable name is looked up using the metadata.
Attributes:
name: string
The name of the feature. Usually corresponds to a
columnname on the data.
varindex: int
The index of the variable -- computed during compilation.
vartype: int (default DTYPE_CATEGORICAL)
The datatype for the variable.
"""
def __init__(self, name):
self.name = name
self.varindex = UNINITIALIZED_VAR_INDEX # uninitialized
self.vartype = DTYPE_CATEGORICAL
def evaluate(self, inst, lbl, meta):
ret = None
if self.varindex == LABEL_VAR_INDEX:
ret = lbl
elif self.vartype == DTYPE_CATEGORICAL and self.varindex >= 0:
# self.vartype is categorical
ret = int(inst[self.varindex])
elif self.vartype == DTYPE_CONTINUOUS and self.varindex >= 0:
# self.vartype is numeric continuous
ret = inst[self.varindex]
# print(str(self) + ': ' + str(ret))
return None if self.vartype == DTYPE_CATEGORICAL and ret < 0 else ret
def compile(self, meta):
self.varindex = UNINITIALIZED_VAR_INDEX # set to uninitialized first
# print('Compiling Var ' + str(self.name))
if self.name == meta.lblname:
self.varindex = LABEL_VAR_INDEX # label column
else:
idx = meta.featurenames.index_of(self.name)
# -3 = unknown
self.varindex = ILLEGAL_VAR_INDEX if idx is None else idx
if self.varindex == ILLEGAL_VAR_INDEX:
raise ValueError("Unknown variable: '%s' in expression. Allowed variables: %s or '%s'" %
(self.name, str(meta.featurenames.all_values().keys()), meta.lblname))
if self.varindex >= 0 and meta.featuredefs is not None \
and meta.featuredefs[self.varindex].is_continuous():
self.vartype = DTYPE_CONTINUOUS # DTYPE_CONTINUOUS
else:
self.vartype = DTYPE_CATEGORICAL # DTYPE_CATEGORICAL
def ground(self, inst, lbl, meta):
val = "?"
if self.varindex == LABEL_VAR_INDEX: # label
val = "'" + meta.lbldef[lbl] + "'"
elif self.varindex >= 0:
# assume that all features are continuous
val = inst[self.varindex]
return str(self.name) + "(" + repr(val) + ")"
def expr(self, meta):
if self.varindex == LABEL_VAR_INDEX: # label
return meta.lblname
elif self.varindex >= 0:
return meta.featurenames[self.varindex]
raise ValueError("Uncompiled Rule: %s" % (str(self),))
def get_variables(self):
return {self.varindex}
def __str__(self):
return "Var(" + str(self.name) + "<" + str(self.varindex) + ">)"
class Predicate(Expression):
pass
class Atom(Predicate):
pass
class BinaryPredicate(Predicate):
"""Predicate taking two inputs."""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
self.p1 = p1
self.p2 = p2
self.weight = weight
def compile(self, meta):
# print('Compiling ' + str(self.p1) + ' ' + str(isinstance(self.p1, Predicate)))
self.p1.compile(meta)
# print('Compiling ' + str(self.p2) + ' ' + str(isinstance(self.p2, Predicate)))
self.p2.compile(meta)
def get_variables(self):
vars = set()
vars.update(self.p1.get_variables())
vars.update(self.p2.get_variables())
return vars
def get_str_weight(self, suppress_default_weight=True):
if suppress_default_weight and self.weight == DEFAULT_PREDICATE_WEIGHT:
return ""
return "[" + str(self.weight) + "]"
class UnaryPredicate(Predicate):
"""Predicate taking one input."""
def __init__(self, p=None, weight=DEFAULT_PREDICATE_WEIGHT):
self.p = p
self.weight = weight
def compile(self, meta):
self.p.compile(meta)
def get_variables(self):
vars = set()
vars.update(self.p.get_variables())
return vars
def get_str_weight(self, suppress_default_weight=True):
if suppress_default_weight and self.weight == DEFAULT_PREDICATE_WEIGHT:
return ""
return "[" + str(self.weight) + "]"
class Cmp(BinaryPredicate):
"""Base class for evaluating comparison operators."""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
BinaryPredicate.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluate(self, inst, lbl, meta):
e1 = self.p1.evaluate(inst, lbl, meta)
e2 = self.p2.evaluate(inst, lbl, meta)
ret = None if e1 is None or e2 is None else self.evaluateCmp(e1, e2)
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value for %s unbound \n inst: %s' \
% (str(self), str(inst)))
return ret
def evaluateCmp(self, e1, e2):
raise NotImplementedError('Comparison operator not implemented.')
def compile(self, meta):
self.p1.compile(meta)
self.p2.compile(meta)
# Comparisons must be between a variable and a literal.
tvar = self.p1 if isinstance(self.p1, Var) else self.p2 if isinstance(self.p2, Var) else None
tlit = self.p1 if isinstance(self.p1, Literal) else self.p2 if isinstance(self.p2, Literal) else None
if tvar is not None and tlit is not None:
if tvar.varindex == LABEL_VAR_INDEX: # label column
tlit.categorical = True # class variables are always categorical
tlit.valindex = meta.lbldef.index_of(tlit.val)
elif tvar.varindex >= 0: # feature column
# assume that all features are continuous
tlit.categorical = False
else:
raise ValueError('Comparisons must be between a variable and a literal.')
def ground(self, inst, lbl, meta):
raise NotImplementedError('ground() not implemented.')
def __str__(self):
return "Cmp(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpEq(Cmp):
"""Compares if values of two expressions are equal"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 == e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " = " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " = " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpEq(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpLr(Cmp):
"""Compares if e1 < e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 < e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " < " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " < " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpLr(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpLE(Cmp):
"""Compares if e1 <= e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 <= e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " <= " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " <= " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpLE(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpGr(Cmp):
"""Compares if e1 > e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 > e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " > " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " > " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpGr(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpGE(Cmp):
"""Compares if e1 >= e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 >= e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " >= " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " >= " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpGE(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class Or(BinaryPredicate):
def __init__(self, p1, p2, weight=DEFAULT_PREDICATE_WEIGHT):
BinaryPredicate.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluate(self, inst, lbl, meta):
e1 = self.p1.evaluate(inst, lbl, meta)
if e1 is None:
raise ValueError('predicate value unbound for e1')
elif e1:
return True
e2 = self.p2.evaluate(inst, lbl, meta)
ret = None if e1 is None or e2 is None else e1 or e2
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value unbound for e2')
return ret
def ground(self, inst, lbl, meta):
return "(" + self.p1.ground(inst, lbl, meta) + " | " + self.p2.ground(inst, lbl, meta) + ")"
def expr(self, meta):
return "(" + self.p1.expr(meta) + " | " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "Or(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class And(BinaryPredicate):
def __init__(self, p1, p2, weight=DEFAULT_PREDICATE_WEIGHT):
BinaryPredicate.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluate(self, inst, lbl, meta):
e1 = self.p1.evaluate(inst, lbl, meta)
if e1 is None:
raise ValueError('predicate value unbound for e1')
elif not e1:
return False
e2 = self.p2.evaluate(inst, lbl, meta)
ret = None if e1 is None or e2 is None else e1 and e2
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value unbound for e2')
return ret
def ground(self, inst, lbl, meta):
return "(" + self.p1.ground(inst, lbl, meta) + " & " + self.p2.ground(inst, lbl, meta) + ")"
def expr(self, meta):
return "(" + self.p1.expr(meta) + " & " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "And(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class Not(UnaryPredicate):
def __init__(self, p, weight=DEFAULT_PREDICATE_WEIGHT):
UnaryPredicate.__init__(self, p=p, weight=weight)
def evaluate(self, inst, lbl, meta):
e = self.p.evaluate(inst, lbl, meta)
ret = None if e is None else not e
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value unbound')
return ret
def ground(self, inst, lbl, meta):
return "~(" + self.p.ground(inst, lbl, meta) + ")"
def expr(self, meta):
return "~(" + self.p.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "Not(" + str(self.p) + ")" + self.get_str_weight()
class RuleParser(object):
"""Methods to parse strings as Expression objects."""
# noinspection PyMethodMayBeStatic
def parse(self, s):
"""Parses string 's' and returns an Expression object.
:param s: str
:return: Predicate
"""
# A kludgy way to make it work for both Python 2.7 and 3.5+
try:
import StringIO
rdr = StringIO.StringIO(s).readline
except:
from io import StringIO
rdr = StringIO(s).readline
def precedence(op):
"""Higher value means higher precedence"""
if op == "|":
return 1
elif op == "&":
return 2
elif op == "~":
return 3
elif op == "=" or op == "<=" or op == "<" or op == ">" or op == ">=":
return 4
elif op == "": # usually as endmarker
return 0
else:
return 0
def consume_operator(astk, ostk, op):
while not ostk.is_empty():
top = ostk[len(ostk) - 1]
# print("top: %s op: %s precedence(top): %d precedence(op): %d" % (top,op,precedence(top),precedence(op)))
if op == ")" and top == "(":
ostk.pop()
break
elif op == "]" and top == "[":
# populate predicate weight
ostk.pop()
# There must be a predicate and a numeric literal on stack
if len(astk) < 2:
raise ValueError("invalid weight found")
wtlit = astk.pop()
pred = astk.pop()
if not isinstance(wtlit, Literal) or not isinstance(pred, Predicate):
raise ValueError("invalid weight format")
pred.weight = wtlit.val
astk.push(pred)
break
elif op == "]" and not top == "[":
raise ValueError("invalid ']' found")
if precedence(op) <= precedence(top):
if top == "=":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpEq(t1, t2))
elif top == "<":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpLr(t1, t2))
elif top == "<=":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpLE(t1, t2))
elif top == ">":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpGr(t1, t2))
elif top == ">=":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpGE(t1, t2))
elif top == "~":
ostk.pop()
t1 = astk.pop()
astk.push(Not(t1))
elif top == "&":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(And(t1, t2))
elif top == "|":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(Or(t1, t2))
else:
break
else:
break
astk = stack()
ostk = stack() # operator stack
g = tokenize.generate_tokens(rdr) # tokenize the string
ret = None
for toknum, tokval, _, _, _ in g:
if toknum == tokenize.OP:
# print('OP ' + tokval + ' ' + str(toknum))
if tokval == "(": # nested predicate
ostk.push(tokval)
elif tokval == ")":
consume_operator(astk, ostk, tokval)
elif tokval == "[": # predicate weight
ostk.push(tokval)
elif tokval == "]":
consume_operator(astk, ostk, tokval)
elif tokval == "-": # handle negative numbers
ostk.push(tokval)
elif tokval in ["=", "&", "|", "~", "<=", "<", ">", ">="]:
consume_operator(astk, ostk, tokval)
ostk.push(tokval)
else:
raise SyntaxError("Illegal operator '" + tokval + "' found in rule expression")
elif toknum == tokenize.NAME:
# print('NAME ' + tokval + ' ' + str(toknum))
astk.push(Var(tokval))
elif toknum == tokenize.STRING:
# print('STR/NUM ' + tokval + ' ' + str(toknum))
astk.push(Literal(tokval, removequotes=True))
elif toknum == tokenize.NUMBER:
# print('STR/NUM ' + tokval + ' ' + str(toknum))
sign = 1
if len(ostk) > 0 and ostk[len(ostk) - 1] == "-":
sign = -1
ostk.pop()
astk.push(Literal(sign * float(tokval)))
elif toknum == tokenize.INDENT or toknum == tokenize.DEDENT:
pass
elif toknum == tokenize.ENDMARKER:
consume_operator(astk, ostk, "")
ret = None if astk.is_empty() else astk.pop()
# print(ret)
if not astk.is_empty():
print(astk)
print(ostk)
raise SyntaxError("Invalid rule syntax in " + str(s))
else:
print('UNK ' + tokval + ' ' + str(toknum))
# print("astk: %s" % (str(astk),))
# print("ostk: %s" % (str(ostk),))
return ret
def string_to_predicate(str_predicate, meta=None, parser=None):
"""Converts a string representation of rule to a Predicate object"""
parser_ = RuleParser() if parser is None else parser
predicate = parser_.parse(str_predicate)
if meta is not None:
predicate.compile(meta)
return predicate
class PredicateContext(object):
"""Holds predicate traversal context
Attributes:
neg: boolean
features: set of tuples
"""
def __init__(self):
self.neg = False
self.features = []
def traverse_predicate_conjunctions(predicate, context):
""" Updates traversal context recursively
Collects all the And/Cmp predicate expressions into predicate list.
Expressions other than {Cmp, And} will raise error.
:param predicate: Expression
:param context: PredicateContext
:return: None
"""
if isinstance(predicate, Cmp):
p1 = None # holds Var
p2 = None # holds Literal
if isinstance(predicate.p1, Var):
p1 = predicate.p1
elif isinstance(predicate.p2, Var):
p1 = predicate.p2
if isinstance(predicate.p1, Literal):
p2 = predicate.p1
elif isinstance(predicate.p2, Literal):
p2 = predicate.p2
if p1 is not None and p2 is not None:
context.features.append((p1, p2, predicate))
else:
raise ValueError("Unbound Var or Literal. Expected Comparison of Var and Literal.")
elif isinstance(predicate, And):
traverse_predicate_conjunctions(predicate.p1, context)
traverse_predicate_conjunctions(predicate.p2, context)
else:
raise ValueError("Expected conjunctive form, but found Or()")
return context
def conjunctive_predicate_to_list(predicate):
context = PredicateContext()
traverse_predicate_conjunctions(predicate, context)
return context.features
class ConjunctiveRule(object):
""" Represents a conjunction (And) of simple one-feature-value comparison predicates """
def __init__(self, predicates, meta, id=None):
self.predicates = predicates
self.meta = meta
# id might be required e.g., when we have to remember the node
# of the tree that this corresponds to.
self.id = id
self.support = None
self.confusion_matrix = None
def set_confusion_matrix(self, positive_indexes, y):
mask = np.array([True] * len(y))
mask[positive_indexes] = False
negative_indexes = np.where(mask)[0]
tp = np.sum(y[positive_indexes])
fp = len(positive_indexes) - tp
tn = np.sum(y[negative_indexes])
fn = len(negative_indexes) - tn
self.confusion_matrix = np.array([[tp, fp], [fn, tn]], dtype=np.float32)
self.support = tp * 1.0 / (tp + fp)
@staticmethod
def parse(str_rule, meta):
rule = string_to_predicate(str_rule, meta)
conjunctions = conjunctive_predicate_to_list(rule)
predicates = []
for p1, p2, predicate in conjunctions:
if not (isinstance(predicate.p1, Var) and isinstance(predicate.p2, Literal)):
raise ValueError("Conjunctive predicates must be of format: Variable = Literal")
predicates.append(predicate)
return ConjunctiveRule(predicates, meta)
def evaluate_inst(self, inst, label):
""" Checks if the instance satisfies all the predicates (i.e., 'And') """
result = True
i = 0
while result and i < len(self.predicates):
result = result and self.predicates[i].evaluate(inst, label, self.meta)
i += 1
return result
def where_satisfied(self, insts, labels):
""" Returns all indexes of insts which satisfy the rule
:param insts: np.ndarray
:param labels: np.array
:return: np.array
"""
satisfied = []
for i in range(insts.shape[0]):
if self.evaluate_inst(insts[i, :], labels[i]):
satisfied.append(i)
return np.array(satisfied, dtype=np.int32)
def _str_confusion_mat(self):
if self.confusion_matrix is None:
return 'None'
else:
return "[%s, %s]" % \
(str(list(self.confusion_matrix[0,:])), str(list(self.confusion_matrix[1,:])))
def __str__(self):
predicate_strs = []
for predicate in self.predicates:
predicate_strs.append(predicate.expr(self.meta))
return " & ".join(predicate_strs)
def __len__(self):
if self.predicates is not None:
return len(self.predicates)
return 0
def __repr__(self):
predicate_strs = []
for predicate in self.predicates:
predicate_strs.append(predicate.expr(self.meta))
return "%s%s%s" % \
("" if self.support is None else "support: %0.4f; " % self.support,
" & ".join(predicate_strs),
"" if self.confusion_matrix is None else "; %s" % self._str_confusion_mat())
def convert_feature_ranges_to_rules(ranges, meta):
""" Converts list of maps of feature-ranges to Rule objects.
Each range map in the input list will be converted to a separate Rule.
The leaf nodes of a tree-based model usually partition the feature
space into subspaces defined by corresponding feature ranges. These
feature-ranges can be represented by the ConjunctiveRule data structure.
:param ranges: list of dict
[{feature_index: (min_val, max_val), ...}, ...]
:param meta: FeatureMetadata
:return: list of ConjunctiveRule, list of strings
"""
rules = []
str_rules = []
for range_map in ranges:
predicates = []
for feature, range in range_map.items():
if np.isfinite(range[0]):
predicates.append("%s > %f" % (meta.featurenames[feature], range[0]))
if np.isfinite(range[1]):
predicates.append("%s <= %f" % (meta.featurenames[feature], range[1]))
str_rule = " & ".join(predicates)
rules.append(ConjunctiveRule.parse(str_rule, meta))
str_rules.append(str_rule)
return rules, str_rules
def convert_conjunctive_rule_to_feature_ranges(rule, meta):
extents = dict()
for feature, featuredef in enumerate(meta.featuredefs):
if featuredef.is_continuous():
extents[feature] = (-np.inf, np.inf)
for predicate in rule.predicates:
feature = predicate.p1.varindex
if not meta.featuredefs[feature].is_continuous():
continue
value = predicate.p2.val
f_range = extents[feature]
if isinstance(predicate, CmpGr) or isinstance(predicate, CmpGE):
f_range = (max(f_range[0], value), f_range[1])
elif isinstance(predicate, CmpLr) or isinstance(predicate, CmpLE):
f_range = (f_range[0], min(f_range[1], value))
extents[feature] = f_range
return extents
def convert_conjunctive_rules_to_strings(rules):
return [str(rule) for rule in rules]
def convert_conjunctive_rules_to_feature_ranges(rules, meta):
ranges = [convert_conjunctive_rule_to_feature_ranges(rule, meta) for rule in rules]
return ranges
def convert_strings_to_conjunctive_rules(str_rules, meta):
rules = []
for str_rule in str_rules:
rules.append(ConjunctiveRule.parse(str_rule, meta))
return rules
def get_max_len_in_rules(rules):
return max([len(rule) for rule in rules])
def get_rule_satisfaction_matrix(x, y, rules):
""" Returns a matrix that shows which instances satisfy which rules
Each column of the returned matrix corresponds to a rules and each row to an instance.
If an instance satisfies a rule, the corresponding value will be 1, else 0.
:param x: np.ndarray
:param y: np.array
:param rules: list
:param opts: AadOpts
:return: np.ndarray
matrix with x.shape[0] rows and len(rules) rows
"""
satisfaction_matrix = np.zeros((x.shape[0], len(rules)), dtype=np.int32)
for i, rule in enumerate(rules):
idxs = rule.where_satisfied(x, y)
satisfaction_matrix[idxs, i] = 1
return satisfaction_matrix
def check_if_at_least_one_rule_satisfied(x, y, rules):
""" For each input instance, check if it satisfies at least one rule
Basically performs a disjunction of rules.
Can be applied to rules in DNF format.
:param x: np.ndarray
:param y: np.array
This could be None if unsupervised and if it is not required to evaluate any rule
:param rules: list of rules
:return: np.array
Binary indicator for each instance
"""
sat_vec = np.zeros(x.shape[0], dtype=np.int32)
for rule in rules:
idxs = rule.where_satisfied(x, y)
sat_vec[idxs] += 1
return np.minimum(sat_vec, 1)
def evaluate_ruleset(x, y, rules, average="binary"):
""" For each input instance, check if it satisfies at least one rule and computes F1 """
y_hat = check_if_at_least_one_rule_satisfied(x, y, rules)
precision, recall, f1, _ = precision_recall_fscore_support(y_true=y, y_pred=y_hat, average=average)
return precision, recall, f1
def save_strings_to_file(strs, file_path):
if file_path is None or file_path == '':
raise ValueError
with open(file_path, 'w') as f:
for s in strs:
f.write(s + os.linesep)
def load_strings_from_file(file_path):
strs = []
with open(file_path) as f:
for line in f:
line = line.strip()
if line != "":
strs.append(line)
return strs
def get_feature_meta_default(x, y, feature_names=None,
label_name='label', labels=None, featuredefs=None):
""" A simple convenience method that creates a default FeatureMetadata
In the default metadata:
1. If feature names are not provided, the columns/features of x
are assigned names F1, F2, ...
2. The class label is referred to as 'label'
3. All columns are treated as continuous numeric
In case dataset-specific names are to be assigned, create the appropriate
metadata in a similar manner as illustrated here.
"""
if feature_names is None:
f_names = Factor(["F%d" % (i+1) for i in range(x.shape[1])], sort=False)
else:
if x.shape[1] != len(feature_names):
raise ValueError("feature_names should have same length as columns in x")
f_names = Factor(feature_names, sort=False)
if featuredefs is None:
featuredefs = [NumericContinuous(x[:, i]) for i in range(x.shape[1])]
if labels is None:
labels = np.unique(y)
meta = FeatureMetadata(lblname=label_name, lbldef=Factor(labels),
featurenames=f_names, featuredefs=featuredefs)
return meta
def evaluate_instances_for_predicate(predicate, insts, labels, meta):
satisfied = []
for i in range(insts.shape[0]):
if predicate.evaluate(insts[i, :], labels[i], meta):
satisfied.append(i)
return np.array(satisfied, dtype=np.int32)
def test_rule_apis():
from .gen_samples import read_anomaly_dataset
x, y = read_anomaly_dataset("toy2")
y = np.asarray(y, dtype=np.int32)
meta = get_feature_meta_default(x, y)
print(meta)
parser = RuleParser()
"""
Since the default metadata names the features as F1, F2, ... and
the class label as 'label', we will refer to these by the same names
in the predicate rules.
RuleParser can parse any well-formed logical predicates such as
those in predicate_strs below.
"""
predicate_strs = [
# internal representation:
# CmpEq(Var(label<-1>), Lit(0.0<0>))
# Var(label<-1>) : <-1> means that the variable 'label' is not a regular feature
# Lit(0.0<0>) : the label '0' got changed to 0.0 because it was numeric.
# To make label '0', change the label to string.
# <0> means that '0' is at the 0-th position of the label Factor
"label = 0", # all 0 labeled
# internal representation:
# CmpEq(Var(label<-1>), Lit(1.0<1>))
"label = 1", # all 1 labeled
# internal representation:
# And(Or(Or(CmpGE(Var(F1<0>), Lit(0.0<-1>)), CmpLr(Var(F2<1>), Lit(2.0<-1>))), CmpLr(Var(F1<0>), Lit(-5.0<-1>))), CmpGr(Var(F2<1>), Lit(0.0<-1>)))
# Var(F1<0>) : feature 'F1' is the 0-th feature
# Var(F2<1>) : feature 'F2' is the 1-st feature
# Lit(0.0<-1>) : <-1> here means 0.0 is numeric, and not categorical
# ... and so on ...
"(F1 >= 0 | F2 < 2 | F1 < -5) & F2 > 0", # just an arbitrary predicate
# internal representation:
# Or(Not(CmpGE(Var(F2<1>), Lit(2.0<-1>))), CmpEq(Var(label<-1>), Lit(1.0<1>)))
"(~(F2 >= 2) | (label = 1))", # a Horn clause: (F2 >= 2) => (label = 1)
# internal representation:
# And(And(And(CmpGE(Var(F1<0>), Lit(1.0<-1>)), CmpLr(Var(F1<0>), Lit(5.0<-1>))), CmpGE(Var(F2<1>), Lit(0.0<-1>))), CmpLr(Var(F2<1>), Lit(6.0<-1>)))
"F1 >= 1 & F1 < 5 & (F2 >= 0) & (F2 < 6)", # conjunctive predicate
]
for predicate_str in predicate_strs:
predicate = parser.parse(predicate_str)
predicate.compile(meta) # bind feature indexes to feature names
matches = evaluate_instances_for_predicate(predicate, x, y, meta)
print("%s matched: %d\n repr: %s" % (predicate.expr(meta), len(matches), str(predicate)))
# the rule(s) below are conjunctive
# conjunctive_str = "(F1 >= 1) & (F1 < 5) & (F2 >= 0) & (F2 < 6)"
conjunctive_str = "F1 >= 1 & F1 < 5 & (F2 >= 0) & (F2 < 6)"
# conjunctive rules can be used with the convenience class ConjunctiveRule
rule = ConjunctiveRule.parse(conjunctive_str, meta)
idxs = rule.where_satisfied(x, y)
rule.set_confusion_matrix(idxs, y)
print(str(rule))
if __name__ == "__main__":
test_rule_apis()
| [
"smd.shubhomoydas@gmail.com"
] | smd.shubhomoydas@gmail.com |
b352068896dbae835d20da90ab54de2d4f34fec9 | d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25 | /contests_atcoder/arc017/arc017_c.py | 80b806e3389e7dfd81e012229a4a9723cc08f1d5 | [
"BSD-2-Clause"
] | permissive | stdiorion/competitive-programming | 5020a12b85f1e691ceb0cacd021606a9dc58b72c | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | refs/heads/main | 2023-03-27T01:13:42.691586 | 2021-03-08T08:05:53 | 2021-03-08T08:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from bisect import bisect_left, bisect_right
n, x = map(int, input().split())
w = [int(input()) for _ in range(n)]
pt1 = w[:16]
pt2 = w[16:]
w1 = []
for bit in range(1 << len(pt1)):
weight = 0
for i in range(len(pt1)):
if (bit >> i) & 1:
weight += pt1[i]
w1.append(weight)
if not len(pt2):
print(w1.count(x))
exit()
w2 = []
for bit in range(1 << len(pt2)):
weight = 0
for i in range(len(pt2)):
if (bit >> i) & 1:
weight += pt2[i]
w2.append(weight)
ans = 0
w1.sort()
w2.sort()
i2 = 0
for weight1 in w1:
ans += bisect_right(w2, x - weight1) - bisect_left(w2, x - weight1)
print(ans) | [
"itkn1900@gmail.com"
] | itkn1900@gmail.com |
b1dde0477b45dffe82a9f680f72b5dc5f910eee9 | 3eb4d64a8bb0bc240a2ef189724f4d51b5275eac | /heltour/tournament/migrations/0106_auto_20161031_0546.py | 059d9943ff0cb31240b7a8a561df84ba822d9f3b | [
"MIT"
] | permissive | brucemubayiwa/heltour | c01cc88be7f86dce8246f619d7aa2da37e0e0ac2 | fa4e9b06343acaf6a8a99337860e1ad433e68f6b | refs/heads/master | 2021-01-23T19:59:04.099215 | 2017-09-06T03:34:31 | 2017-09-06T03:34:31 | 102,840,526 | 1 | 0 | null | 2017-09-08T08:53:30 | 2017-09-08T08:53:30 | null | UTF-8 | Python | false | false | 2,839 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-31 05:46
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import select2.fields
class Migration(migrations.Migration):
dependencies = [
('tournament', '0105_seasonplayer_final_rating'),
]
operations = [
migrations.AlterField(
model_name='alternateassignment',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='availabletime',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='gamenomination',
name='nominating_player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='leaguemoderator',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playeravailability',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerbye',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerlateregistration',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerwithdrawl',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='seasonplayer',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='seasonprizewinner',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='teammember',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
]
| [
"ben.cyanfish@gmail.com"
] | ben.cyanfish@gmail.com |
9262d9b3881e896a97b190c2ea16eeea43d24d9c | 958c19436632b41b43c9462337d13e836935a9da | /E01_python_for_data_analysis/04_NumPy/0403_numpy_cal.py | 24af0cf6e7c79bf551a52bc51df3c822da19b676 | [] | no_license | Vincent105/ML | 4752b2a99c124e01e40e383a0177fb5d82115cb6 | fa926caabf83628b3fb7d74cee02a3e923a917f7 | refs/heads/master | 2020-12-29T18:21:50.144711 | 2020-10-12T09:56:41 | 2020-10-12T09:56:41 | 238,697,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | import numpy as np
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
print(arr)
print(arr * arr)
print(arr * arr - arr)
# 数组与标量的算术运算会将标量值传播到各个元素:
print(1 / arr)
print(arr * 0.5)
# 大小相同的数组之间的比较会生成布尔值数组:
arr2 = np.array([[0., 4., 1.], [7., 2., 12.]])
print(arr2)
print(arr2 > arr)
| [
"vincent1050917@gmail.com"
] | vincent1050917@gmail.com |
34eae03af1fab17696208689483285aabf0088d4 | 8c82443ac6f24f21509a73392a8bba6b49f12f88 | /timepass.py | ddd73684ffda985025898a64ce6c4931b37dcc3b | [] | no_license | meetparmar002/Optimization-Lab | 2b7ec211d2f7bfbbd4a482c0e64c14b53e870ecb | 95ba34f7e3ced33e2b5961143738174d65ab49cc | refs/heads/master | 2023-01-19T03:22:42.642312 | 2020-11-25T19:15:51 | 2020-11-25T19:15:51 | 306,827,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | import numpy as np
import matplotlib.pyplot as plt
a = [[1, 2], [3, 4]]
print(a)
b = []
for i in a:
b.append(i.copy())
a[0][0] = 5
print(a)
print(b)
| [
"201801195@daiict.ac.in"
] | 201801195@daiict.ac.in |
3ec6bfaea601759fd9ce090e2468cd49049e454d | 88cfeb8f7076450e7a38d31ab2d11883c1818c8d | /net/dpn92.py | bee4297159590c50e4ca40b1570569426a17eb3b | [] | no_license | ZQPei/Alibaba_Cloud_German_AI_Challenge_for_Earth_Observation | 4e5a127c12e0c02ed1914ab000a131e1a7f7d844 | c2efb32763af0a56a3a7ecb9d83c0744f71d5c14 | refs/heads/master | 2020-04-26T04:31:57.731178 | 2019-02-17T01:10:55 | 2019-02-17T01:10:55 | 173,305,034 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,563 | py | '''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(10, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 17)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| [
"dfzspzq@163.com"
] | dfzspzq@163.com |
341121855f7de7105622a140ad0326dd3ae1e54c | b139a0c76700aa6ef3c972bdbbefc862d44fe7cb | /string/aa.cpp | 1ee4ddfb04ae9adc6cc45f35b5e2a9759ff77755 | [] | no_license | sk10salman/dynamic-programming | d1b7d694f376f3972181a2887317b9f213a36d20 | 31fd22b120f785a2a1070ea0d33dc3e6eda8a235 | refs/heads/master | 2021-04-18T01:31:27.797931 | 2020-06-09T21:09:31 | 2020-06-09T21:09:31 | 249,493,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | cpp | // C++ program to find maximum dot product of two array
#include<bits/stdc++.h>
using namespace std;
// Function compute Maximum Dot Product and
// return it
long long int MaxDotProduct(int A[], int B[],
int m, int n)
{
// Create 2D Matrix that stores dot product
// dp[i+1][j+1] stores product considering B[0..i]
// and A[0...j]. Note that since all m > n, we fill
// values in upper diagonal of dp[][]
long long int dp[n+1][m+1];
memset(dp, 0, sizeof(dp));
// Traverse through all elements of B[]
for (int i=1; i<=n; i++)
// Consider all values of A[] with indexes greater
// than or equal to i and compute dp[i][j]
for (int j=i; j<=m; j++)
// Two cases arise
// 1) Include A[j]
// 2) Exclude A[j] (insert 0 in B[])
dp[i][j] = max((dp[i-1][j-1] + (A[j-1]*B[i-1])) ,
dp[i][j-1]);
// return Maximum Dot Product
return dp[n][m] ;
}
// Driver program to test above function
int main()
{
int A[] = { 2, 1,-2,5} ;
int B[] = { 3, 0, -6 } ;
int m = sizeof(A)/sizeof(A[0]);
int n = sizeof(B)/sizeof(B[0]);
cout << MaxDotProduct(A, B, m, n);
return 0;
}
| [
"noreply@github.com"
] | noreply@github.com |
331b189e0343621baccb34a048bf215ef7de4929 | bd00b3ada3097671771b2b37e49f3459023cae5d | /models/CNNs/p3d/p3d.py | b43169e5635de448229a9eb889492c7a21bd8b60 | [] | no_license | jasonkli/cs224n-project | 92d3f97625c6e5b7754ef471ed08a54835359983 | 05d5607bf5b9011e43496a30098a6643232a5467 | refs/heads/master | 2020-04-26T01:21:58.494887 | 2019-03-29T04:51:47 | 2019-03-29T04:51:47 | 173,201,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,561 | py | from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
__all__ = ['P3D', 'P3D63', 'P3D131','P3D199']
def conv_S(in_planes,out_planes,stride=1,padding=1):
# as is descriped, conv S is 1x3x3
return nn.Conv3d(in_planes,out_planes,kernel_size=(1,3,3),stride=1,
padding=padding,bias=False)
def conv_T(in_planes,out_planes,stride=1,padding=1):
# conv T is 3x1x1
return nn.Conv3d(in_planes,out_planes,kernel_size=(3,1,1),stride=1,
padding=padding,bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(out.size(0), planes - out.size(1),
out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,n_s=0,depth_3d=47,ST_struc=('A','B','C')):
super(Bottleneck, self).__init__()
self.downsample = downsample
self.depth_3d=depth_3d
self.ST_struc=ST_struc
self.len_ST=len(self.ST_struc)
stride_p=stride
if not self.downsample ==None:
stride_p=(1,2,2)
if n_s<self.depth_3d:
if n_s==0:
stride_p=1
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False,stride=stride_p)
self.bn1 = nn.BatchNorm3d(planes)
else:
if n_s==self.depth_3d:
stride_p=2
else:
stride_p=1
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,stride=stride_p)
self.bn1 = nn.BatchNorm2d(planes)
# self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
self.id=n_s
self.ST=list(self.ST_struc)[self.id%self.len_ST]
if self.id<self.depth_3d:
self.conv2 = conv_S(planes,planes, stride=1,padding=(0,1,1))
self.bn2 = nn.BatchNorm3d(planes)
#
self.conv3 = conv_T(planes,planes, stride=1,padding=(1,0,0))
self.bn3 = nn.BatchNorm3d(planes)
else:
self.conv_normal = nn.Conv2d(planes, planes, kernel_size=3, stride=1,padding=1,bias=False)
self.bn_normal = nn.BatchNorm2d(planes)
if n_s<self.depth_3d:
self.conv4 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn4 = nn.BatchNorm3d(planes * 4)
else:
self.conv4 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn4 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def ST_A(self,x):
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
return x
def ST_B(self,x):
tmp_x = self.conv2(x)
tmp_x = self.bn2(tmp_x)
tmp_x = self.relu(tmp_x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
return x+tmp_x
def ST_C(self,x):
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
tmp_x = self.conv3(x)
tmp_x = self.bn3(tmp_x)
tmp_x = self.relu(tmp_x)
return x+tmp_x
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
if self.id<self.depth_3d: # C3D parts:
if self.ST=='A':
out=self.ST_A(out)
elif self.ST=='B':
out=self.ST_B(out)
elif self.ST=='C':
out=self.ST_C(out)
else:
out = self.conv_normal(out) # normal is res5 part, C2D all.
out = self.bn_normal(out)
out = self.relu(out)
out = self.conv4(out)
out = self.bn4(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class P3D(nn.Module):
def __init__(self, block, layers, modality='RGB',
shortcut_type='B', num_classes=400,dropout=0.5,ST_struc=('A','B','C')):
self.inplanes = 64
super(P3D, self).__init__()
# self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2),
# padding=(3, 3, 3), bias=False)
self.input_channel = 3 if modality=='RGB' else 2 # 2 is for flow
self.ST_struc=ST_struc
self.conv1_custom = nn.Conv3d(self.input_channel, 64, kernel_size=(1,7,7), stride=(1,2,2),
padding=(0,3,3), bias=False)
self.depth_3d=sum(layers[:3])# C3D layers are only (res2,res3,res4), res5 is C2D
self.bn1 = nn.BatchNorm3d(64) # bn1 is followed by conv1
self.cnt=0
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(2, 3, 3), stride=2, padding=(0,1,1)) # pooling layer for conv1.
self.maxpool_2 = nn.MaxPool3d(kernel_size=(2,1,1),padding=0,stride=(2,1,1)) # pooling layer for res2, 3, 4.
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=2)
self.avgpool = nn.AvgPool2d(kernel_size=(5, 5), stride=1) # pooling layer for res5.
self.dropout=nn.Dropout(p=dropout)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# some private attribute
self.input_size=(self.input_channel,16,160,160) # input of the network
self.input_mean = [0.485, 0.456, 0.406] if modality=='RGB' else [0.5]
self.input_std = [0.229, 0.224, 0.225] if modality=='RGB' else [np.mean([0.229, 0.224, 0.225])]
@property
def scale_size(self):
return self.input_size[2] * 256 // 160 # asume that raw images are resized (340,256).
@property
def temporal_length(self):
return self.input_size[1]
@property
def crop_size(self):
return self.input_size[2]
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
stride_p=stride #especially for downsample branch.
if self.cnt<self.depth_3d:
if self.cnt==0:
stride_p=1
else:
stride_p=(1,2,2)
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride_p, bias=False),
nn.BatchNorm3d(planes * block.expansion)
)
else:
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,n_s=self.cnt,depth_3d=self.depth_3d,ST_struc=self.ST_struc))
self.cnt+=1
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,n_s=self.cnt,depth_3d=self.depth_3d,ST_struc=self.ST_struc))
self.cnt+=1
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1_custom(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.maxpool_2(self.layer1(x)) # Part Res2
x = self.maxpool_2(self.layer2(x)) # Part Res3
x = self.maxpool_2(self.layer3(x)) # Part Res4
sizes=x.size()
x = x.view(-1,sizes[1],sizes[3],sizes[4]) # Part Res5
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(-1,self.fc.in_features)
#x = self.fc(self.dropout(x))
return x
def P3D63(**kwargs):
"""Construct a P3D63 modelbased on a ResNet-50-3D model.
"""
model = P3D(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def P3D131(**kwargs):
"""Construct a P3D131 model based on a ResNet-101-3D model.
"""
model = P3D(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def P3D199(pretrained=False,modality='RGB',**kwargs):
"""construct a P3D199 model based on a ResNet-152-3D model.
"""
model = P3D(Bottleneck, [3, 8, 36, 3], modality=modality,**kwargs)
if pretrained==True:
if modality=='RGB':
pretrained_file='/Users/Jason/Desktop/cs224n-project/models/CNNs/p3d/p3d_rgb_199.checkpoint.pth.tar'
elif modality=='Flow':
pretrained_file='p3d_flow_199.checkpoint.pth.tar'
weights=torch.load(pretrained_file)['state_dict']
model.load_state_dict(weights)
return model
# custom operation
def get_optim_policies(model=None,modality='RGB',enable_pbn=True):
'''
first conv: weight --> conv weight
bias --> conv bias
normal action: weight --> non-first conv + fc weight
bias --> non-first conv + fc bias
bn: the first bn2, and many all bn3.
'''
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
bn = []
if model==None:
log.l.info('no model!')
exit()
conv_cnt = 0
bn_cnt = 0
for m in model.modules():
if isinstance(m, torch.nn.Conv3d) or isinstance(m, torch.nn.Conv2d):
ps = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(ps[0])
if len(ps) == 2:
first_conv_bias.append(ps[1])
else:
normal_weight.append(ps[0])
if len(ps) == 2:
normal_bias.append(ps[1])
elif isinstance(m, torch.nn.Linear):
ps = list(m.parameters())
normal_weight.append(ps[0])
if len(ps) == 2:
normal_bias.append(ps[1])
elif isinstance(m, torch.nn.BatchNorm3d):
bn_cnt += 1
# later BN's are frozen
if not enable_pbn or bn_cnt == 1:
bn.extend(list(m.parameters()))
elif isinstance(m,torch.nn.BatchNorm2d):
bn.extend(list(m.parameters()))
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError("New atomic module type: {}. Need to give it a learning policy".format(type(m)))
slow_rate=0.7
n_fore=int(len(normal_weight)*slow_rate)
slow_feat=normal_weight[:n_fore] # finetune slowly.
slow_bias=normal_bias[:n_fore]
normal_feat=normal_weight[n_fore:]
normal_bias=normal_bias[n_fore:]
return [
{'params': first_conv_weight, 'lr_mult': 5 if modality == 'Flow' else 1, 'decay_mult': 1,
'name': "first_conv_weight"},
{'params': first_conv_bias, 'lr_mult': 10 if modality == 'Flow' else 2, 'decay_mult': 0,
'name': "first_conv_bias"},
{'params': slow_feat, 'lr_mult': 1, 'decay_mult': 1,
'name': "slow_feat"},
{'params': slow_bias, 'lr_mult': 2, 'decay_mult': 0,
'name': "slow_bias"},
{'params': normal_feat, 'lr_mult': 1 , 'decay_mult': 1,
'name': "normal_feat"},
{'params': normal_bias, 'lr_mult': 2, 'decay_mult':0,
'name': "normal_bias"},
{'params': bn, 'lr_mult': 1, 'decay_mult': 0,
'name': "BN scale/shift"},
]
if __name__ == '__main__':
model = P3D199(pretrained=True,num_classes=400)
model = model.cuda()
data=torch.autograd.Variable(torch.rand(10,3,16,160,160)).cuda() # if modality=='Flow', please change the 2nd dimension 3==>2
out=model(data) | [
"jasonkli@stanford.edu"
] | jasonkli@stanford.edu |
2819548a0f234a61815765acfeb1d9764740781f | 24ea510c6f5356a8b55dfbcb5f01420130c8c67a | /src/predict.py | fda2bc1e644a8c87ad8e7c4c402c91d40fa0e2f2 | [] | no_license | maestro73/OilandGasAssetEvaluation | eee4c37400f60d1f8c41792fda5b859435f7b86e | d882ba571264864f0f4ffc9a438ef88e161bcf73 | refs/heads/master | 2021-09-08T13:45:47.087030 | 2018-03-10T01:15:01 | 2018-03-10T01:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,611 | py | from sklearn.model_selection import train_test_split,cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, r2_score
from catboost import CatBoostRegressor
import numpy as np
import pandas as pd
import seaborn as sns
import pickle
import matplotlib.pyplot as plt
def final_model(feature_matrix, target):
#Train/test split the data
X_train, X_test, y_train, y_test = train_test_split(feature_matrix,target,test_size=.2)
#Instantiate Scaler and Model
ss = StandardScaler()
model = CatBoostRegressor()
#Scale data
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
#Fit model and predict
model.fit(X_train_scaled,y_train)
y_pred = pd.Series(model.predict(X_test_scaled))
#Validate model
RMSE = np.sqrt(mean_squared_error(y_test,y_pred))
r2 = r2_score(y_test,y_pred)
print("RMSE (test data):{:.2f}".format(RMSE))
print("r2 (test data):{:.2f}".format(r2))
feature_importance = set()
catboost = True
tree = False
if catboost:
feature_importance = model.get_feature_importance(X_train_scaled,y=y_train)
elif tree:
feature_importance = model.feature_importances_
return y_test, y_pred, RMSE,r2, feature_importance
def make_plots(y_test, y_pred,metric,timeframe):
if metric == 'Cumulative':
label = 'Oil Production, bbls'
else:
label = 'Oil Production, bbl/month'
#Distribution comparison
limit = int(max(y_pred))
plt.figure(figsize=(10,10))
sns.set_style('darkgrid')
plt.xlim((0,limit))
sns.distplot(y_test,bins=35, axlabel = metric + label,label='y_test', color='purple').set_title(str(timeframe)+ ' Year Production', fontsize=12)
sns.distplot(y_pred,bins=35, axlabel = metric + label,label='y_pred',color='black').set_title(str(timeframe) + ' Year '+metric+ ' Production', fontsize=12)
plt.yticks([])
plt.ylabel('Frequency',fontsize=12)
plt.legend(fontsize=12)
plt.xlabel(metric + ' Oil Production, bbl/month',fontsize=12)
plt.savefig('Plots/'+str(timeframe)+'Year/'+'DistributionComparison'+metric+'.png')
#test/prediction correlation
plt.figure(figsize=(10,10))
sns.set_style('darkgrid')
sns.regplot(y_test,y_pred, color='r',label='test vs prediction')
plt.xlabel('y_test, ' + label)
plt.ylabel('y_predict, ' + label)
plt.xlim((0,limit))
plt.ylim((0,limit))
plt.plot(list(range(limit)),list(range(limit)), color='green',label='Perfect correlation')
plt.legend()
plt.title('Correlation for ' + str(timeframe) + ' Year ' + metric+ ' Oil Production')
plt.savefig('Plots/'+str(timeframe)+'Year/'+'Correlation'+metric+'.png')
def feature_importance_plot(columns,feature_importance, top_amount, metric,timeframe):
#Top features
list1 = list(zip(columns,feature_importance))
list1 = sorted(list1,key= lambda x: x[1], reverse=True)
list2 = [x[0] for x in list1]
list3 = [x[1] for x in list1]
features = list2[:top_amount][::-1]
importances = list3[:top_amount][::-1]
indices = np.argsort(importances)
plt.figure(figsize=(10,10))
plt.title('Feature Importances')
plt.barh(range(len(indices)),importances, color='g', align='center')
plt.yticks(range(len(indices)), features)
plt.xlabel('Relative Importance')
plt.savefig('Plots/'+str(timeframe)+'Year/'+'feature_importance'+metric+'.png')
if __name__ == '__main__':
np.random.seed(50)
#Load data
with open('Data/features.pkl','rb') as p:
features= pickle.load(p)
with open('Data/targets.pkl','rb') as p:
targets = pickle.load(p)
with open('Data/metricinfo.pkl','rb') as p:
metric_info = pickle.load(p)
#Fit and Validate model
API = []
models = ['Average','Cumulative','Peak']
results = {}
for i in range(3):
API.append(features[i].pop('API10'))
y_test,y_pred,RMSE,r2,feature_importance = final_model(features[i],targets[i][0])
results[models[i]] = {'RMSE':RMSE,'r2':r2}
#Create distribution comparisons and top features plots
make_plots(y_test, y_pred, metric=models[i], timeframe=metric_info[0])
feature_importance_plot(features[i].columns, feature_importance, top_amount=10, metric=models[i], timeframe=metric_info[0])
results["Number of Wells"] = metric_info[1]
output = pd.DataFrame(results)
output.to_csv('Data/'+str(metric_info[0])+'YearResults.csv')
print (results.items())
print ("Number of Wells:" + str(metric_info[1]))
| [
"jccarrigan@gmail.com"
] | jccarrigan@gmail.com |
3e4424a55a0ac8f36a6a0779ff5a7059b5f1c270 | a2da441808e9db76234cd50d8cae10a276cbb534 | /virtualenv/bin/pip3 | 4208998c842ed10a629c16b30febaf7ac5f62206 | [] | no_license | yakul-crossml/django_project | cad8a8f9ddb45d780cbc587cbea1e81e8766f474 | 7c8520aac6e667054e4d2443454fdb5b08920fd0 | refs/heads/master | 2023-07-25T10:59:01.759959 | 2021-09-02T05:56:24 | 2021-09-02T05:56:24 | 397,568,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/home/yakul/Desktop/django_project/virtualenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"yakul@crossml.com"
] | yakul@crossml.com | |
fa428df271c1a095589ea4dda94bbd27ca4f7705 | 06870667821f26b0c8c96b52321938df58fd91f6 | /parking_scrapers/scrapers/new_haven.py | 85e9236cddfb6c481a2d0bfc60ccfb3c43b84610 | [] | no_license | jmcarp/open-parking-spaces | 69244962a316fe6bd3273ba6837bfe8d0f1f4b8e | 5f855a1b25c9109f15af26e1fb3b4ecbd3ef5845 | refs/heads/master | 2023-01-24T11:43:53.641262 | 2020-11-30T19:00:46 | 2020-11-30T19:00:46 | 312,906,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import re
from typing import Iterator
import lxml.html
import requests
from base import LotSpaces, Scraper
class NewHavenScraper(Scraper):
"""Scrape New Haven html.
https://parknewhaven.com
"""
HTML_URL = "https://parknewhaven.com"
TIMEOUT = 5
SPACES_PATTERN = re.compile(r"(.*?):\s+(\d+)% \((\d+) available\)", re.IGNORECASE)
name = "new_haven"
def fetch_spaces(self) -> Iterator[LotSpaces]:
response = requests.get(
self.HTML_URL,
headers={"User-Agent": "open-parking-spaces"},
timeout=self.TIMEOUT,
)
response.raise_for_status()
doc = lxml.html.fromstring(response.content)
links = doc.xpath(
'//div[contains(@class, "tickr")]//a[contains(@class, "tickrlink")]'
)
for link in links:
match = self.SPACES_PATTERN.search(link.text_content())
assert match is not None
lot, percent, spaces = match.groups()
yield LotSpaces(
lot=lot,
spaces=int(spaces),
url=link.attrib["href"],
)
| [
"jm.carp@gmail.com"
] | jm.carp@gmail.com |
f36d7b851dd1f3875bf4450554e4297e5133dd62 | 3dee7a51ab2f214f6aa69d737625e5dad73db352 | /djragon/transcode/admin.py | ddd026a0fea7017e6acb0bcbcb7c40bd8147b6f9 | [] | no_license | skyl/djragon-cms | 533e33f94ab0ea56adfd47c9003dda8f6f97b7f4 | 45be3cde23193ca66405eb6a9b3eac0a0a53dc7b | refs/heads/master | 2020-04-04T14:46:48.017588 | 2010-08-03T03:16:04 | 2010-08-03T03:16:04 | 629,046 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django.contrib import admin
from transcode.models import LogResult
class LogResultAdmin(admin.ModelAdmin):
pass
admin.site.register(LogResult, LogResultAdmin)
| [
"skylar.saveland@gmail.com"
] | skylar.saveland@gmail.com |
eedea8ce429df0e2e5de299e797c1912dd4bd962 | 5d7e3b62244cf01c8e58e16e8ec50bed01cac43c | /matix-label cnn/test.py | b7d9099db41320c69c0052b0f13d46b91ea697d0 | [] | no_license | yuguoqi-learner/NHD-and-method-of-paper- | 9a64e6297335c2712e28aef4bbebe80ba08267d4 | 1bfb01a1ea59920650fca2eb29cbf1c9b99b1ee5 | refs/heads/main | 2023-06-04T01:24:08.795335 | 2021-07-08T03:18:01 | 2021-07-08T03:18:01 | 383,482,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,870 | py |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
import torch.utils.data as data
import torchvision
from torch.autograd import Variable
import matplotlib.pyplot as plt
# from functions import *
from sklearn.model_selection import train_test_split,cross_val_score
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pickle
from torch.utils.data import DataLoader, TensorDataset, Dataset
from itertools import groupby
import heapq
import matplotlib.pyplot as plt
import random
from torch.backends import cudnn
import seaborn as sn
import pandas as pd
from torch.optim import lr_scheduler
from random_seed import seed
from dataset import get_X_Y,random_dataloader,split_dataloader,encoder_trans
from model import LeNet5,Classifier,Resnet
from device import device
def max_index_pred(data):
list_index = [] # 创建列表,存放最大值的索引
# data = data.detach().cpu().numpy()
for i in range(len(data)):
datas = data[i]
dim = datas.ravel()
nums_max = heapq.nlargest(1, range(len(dim)), dim.take)
num_max_index = []
for j in nums_max:
rowindex = j // 10 # 0 start
colindex = j % 10
# max_index = [rowindex,colindex]
# num_max_index.append(max_index)
# ave_row_index = (num_max_index[0][0]+num_max_index[1][0]+num_max_index[2][0])/len(num_max_index)
# ave_col_index = (num_max_index[0][1]+num_max_index[1][1]+num_max_index[2][1])/len(num_max_index)
index = tuple((i,rowindex,colindex))
list_index.append(index)
return list_index
def acc(y,pred):
num = 0
y,pred = np.array(y),np.array(pred)
if np.sqrt(np.sum(np.power((y - pred), 2))) == np.sqrt(0):
num = 1
return num
batchs_acc = []
list_y = []
list_pred = []
batchs_acc_e = []
batchs_acc_h = []
list_pred_e = []
list_pred_h = []
def test(data,y):
lenet5, classifier =LeNet5(),Classifier()
# lstm.eval()
lenet5.eval()
classifier.eval()
#Load param
# lstm.load_state_dict(torch.load("/home/thinkstation/YU/yuguoqi/model_save/lstm_epoch130.pth"))
lenet5.load_state_dict(torch.load("/home/thinkstation/YU/yuguoqi/model_save/cnn72.pth"))
classifier.load_state_dict(torch.load("/home/thinkstation/YU/yuguoqi/model_save/classifier_epoch72.pth"))
# optimizer.load_state_dict(torch.load("model_save/optimizer_epoch200.pth"))
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu")
lenet5.to(device)
# lstm.to(device)
classifier.to(device)
data = data.float().to(device)
# data= data.transpose(2,3)
output_cnn = lenet5(data)
output = classifier(output_cnn)
output=output.cpu().detach().numpy()
y = y[np.newaxis,:,:]
y_pred = max_index_pred(output)
y = max_index_pred(y)
y_e = y[0][1]
y_h = y[0][2]
y_pred_e = y_pred[0][1]
y_pred_h = y_pred[0][2]
# batch_acc = acc(y_pred, y)
# batchs_acc.append(batch_acc)
batch_acc_e = acc(y_e, y_pred_e)
batch_acc_h = acc(y_h, y_pred_h)
batchs_acc_e.append(batch_acc_e)
batchs_acc_h.append(batch_acc_h)
#
# list_y.append(np.array(y))
list_pred.append(np.array(y_pred))
list_pred_e.append(y_pred_e)
list_pred_h.append(y_pred_h)
return list_pred,list_pred_e,list_pred_h,batchs_acc_e,batchs_acc_h
def get_data_label():
path = '/home/thinkstation/YU/yuguoqi/test_datatas/'
path_label = '/home/thinkstation/YU/yuguoqi/test_label/'
files =os.listdir(path)
files.sort()
files_label = os.listdir(path_label)
files_label.sort()
list_X = []
list_Y = []
name_list = []
for file in files:
if not os.path.isdir(path + file):
f_name = str(file)
filename = path + f_name
data = np.load(filename, allow_pickle=True)
datas = data['arr_0']
list_X.append(datas)
y = [''.join(list(g)) for k, g in groupby(f_name, key=lambda x: x.isdigit())]
Y = y[0]
for file_label in files_label:
labels = os.path.splitext(file_label)[0]
if labels == Y:
Label = np.loadtxt(path_label + str(file_label))
name = file_label
name_list.append(name)
# Labels = Label.transpose()
list_Y.append(Label)
break;
X = np.array(list_X)
X_mean = np.mean(X, axis=3)
X_std = np.std(X, axis=3)
for j in range(2):
for i in range(23):
for k in range(300):
X[:, j, i, k] = (X[:, j, i, k] - X_mean[:, j, i])
for l in range(2):
for m in range(23):
for n in range(300):
X[:, l, m, n] = X[:, l, m, n] / X_std[:, l, m]
Y = np.array(list_Y)
# X = list_X
# Y = list_Y
return X, Y,name_list
datas,label,name_list = get_data_label()
for i in range(len(datas)):
datass = datas[i]
labels = label[i]
datass = torch.from_numpy(datass)
datasss = datass.unsqueeze(0)
list_pred,list_pred_e,list_pred_h,batchs_acc_e,batchs_acc_h = test(datasss,labels)
# class_acc = sum(class_num)/len(class_num)
a = list_pred_e[0:50]
b = list_pred_h[50:100]
c = list_pred_h[100:150]
d = list_pred_h[150:200]
e = list_pred_h[200:250]
f = list_pred_h[250:300]
a.sort()
b.sort()
c.sort()
d.sort()
e.sort()
f.sort()
# np.savetxt('/home/thinkstation/YU/Badminton_h',a)
# np.savetxt('/home/thinkstation/YU/BigDrawPaper_h.txt',b)
# np.savetxt('/home/thinkstation/YU/Earmuffs_h.txt',c)
# np.savetxt('/home/thinkstation/YU/HDD_h.txt',d)
# np.savetxt('/home/thinkstation/YU/InkpadBox_h.txt',e)
# np.savetxt('/home/thinkstation/YU/NailBox_h.txt',f)
Badminton_e = sum(list_pred_e[0:50])/50
BigDrawPaper_e = sum(list_pred_e[50:100])/50
Earmuffs_e = sum(list_pred_e[100:150])/50
HDD_e =sum(list_pred_e[150:200])/50
InkpadBox_e = sum(list_pred_e[200:250])/50
NailBox_e = sum(list_pred_e[250:300])/50
Badminton_h = sum(list_pred_h[0:50])/50
BigDrawPaper_h = sum(list_pred_h[50:100])/50
Earmuffs_h = sum(list_pred_h[100:150])/50
HDD_h =sum(list_pred_h[150:200])/50
InkpadBox_h = sum(list_pred_h[200:250])/50
NailBox_h = sum(list_pred_h[250:300])/50
Badminton_acc_e = sum(batchs_acc_e[0:50])/50
BigDrawPaper_acc_e = sum(batchs_acc_e[50:100])/50
DoublelayerFoamBoard_acc_e = sum(batchs_acc_e[100:150])/50
Earmuffs_acc_e =sum(batchs_acc_e[150:200])/50
InkpadBox_acc_e = sum(batchs_acc_e[200:250])/50
NailBox_acc_e = sum(batchs_acc_e[250:300])/50
Badminton_acc_h = sum(batchs_acc_h[0:50])/50
BigDrawPaper_acc_h = sum(batchs_acc_h[50:100])/50
DoublelayerFoamBoard_acc_h = sum(batchs_acc_h[100:150])/50
Earmuffs_acc_h =sum(batchs_acc_h[150:200])/50
InkpadBox_acc_h = sum(batchs_acc_h[200:250])/50
NailBox_acc_h = sum(batchs_acc_h[250:300])/50
e_acc = sum(list_pred_e)/300
h_acc = sum(list_pred_h)/300
print(h_acc)
# print('len',len(class_acc))
# print('clas',class_acc)
# BlackBandage_listpred = sum(listpred[0:50])/50
# InkpadBox_listpred = sum(listpred[0:50])/50 #[7,6]
# RoundSponge_listpred = sum(listpred[50:100])/50 #[6,0]
# SoapBox_listpred = sum(listpred[100:150])/50#[5,5]
# # Tissue_listpred = sum(listpred[200:250])/50#
# WhiteThread_listpred = sum(listpred[150:-1])/50
#
# test_accu = (clas[-1]+1)/len(clas)
# print('pre_out acc rate',test_accu)
# x1, y1 = zip(*list_clas[0:50])
# x2,y2 = zip(*list_clas[50:100])
# x3,y3 = zip(*list_clas[100:150])
# x4,y4 = zip(*list_clas[150:-1])
# plt.figure()
# # plt.scatter(x1,y1,color = "#808080")
# # plt.scatter(x2,y2,color = "#666666")
# plt.scatter(x3,y3,color = "#CCCCCC")
# # plt.scatter(x4,y4,color = "#000000")
#
# plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
2245001287f01d37fdb2558b53963059c16d91d1 | 02818cb241a117f7bfda5462744b0fb8e2f69666 | /core/models.py | c3cd8c5ec3ab20863777a7fe9e6dbcea82007b43 | [] | no_license | Oswaldgerald/clients | 4127b1440da650b91149d44d286e46b83c869202 | 5ffeafe5d7562471384fc14cec7c651fa1748ac3 | refs/heads/master | 2020-03-23T08:41:08.352313 | 2018-11-16T07:25:18 | 2018-11-16T07:25:18 | 141,340,298 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | from django.db import models
class ID(models.Model):
number = models.CharField(max_length=50)
def __str__(self):
return self.number
class Client(models.Model):
name = models.CharField(max_length=70)
last_name = models.CharField(max_length=72)
age = models.IntegerField()
salary = models.DecimalField(max_digits=9, decimal_places=2)
email = models.EmailField(null=True, blank=True)
date_of_birth = models.DateTimeField(null=True, blank=True)
photo = models.ImageField(null=True, blank=True, upload_to='clients')
doc_id = models.OneToOneField(ID, null=True, on_delete=models.CASCADE)
def getfullname(self):
return self.name + ' ' + self.last_name
def __str__(self):
return self.getfullname()
class Product(models.Model):
description = models.CharField(max_length=100)
price = models.DecimalField(max_digits=7, decimal_places=2)
taxes = models.DecimalField(max_digits=7, decimal_places=2)
def __str__(self):
return self.description
class Sale(models.Model):
sale_number = models.CharField(max_length=10)
client = models.ForeignKey(Client, on_delete=False)
date = models.DateTimeField(auto_now_add=True)
total = models.DecimalField(max_digits=9, decimal_places=2)
products = models.ManyToManyField(Product, blank=True)
def __str__(self):
return self.sale_number
| [
"moswaldgerald@gmail.com"
] | moswaldgerald@gmail.com |
b290f6c4c523dba303d7efb6b9edbfc26d01ce6b | 4d0bbeb8ab52f7e450aff20056f7509e12751258 | /lists/migrations/0003_list.py | da0266eb470c2bba6c9bd9b11f8ba74b47076401 | [] | no_license | chicocheco/tdd_book | f7c9246dcb4eb5327704c72f655bf6e187b28849 | 574b1082aa523c7434f50e0c4cbdf5777ddf50ef | refs/heads/master | 2022-05-02T17:44:27.217329 | 2020-03-13T18:57:22 | 2020-03-13T18:57:22 | 197,633,503 | 0 | 0 | null | 2022-04-22T22:19:12 | 2019-07-18T17:56:43 | JavaScript | UTF-8 | Python | false | false | 441 | py | # Generated by Django 2.2.3 on 2019-08-08 07:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"stanislav.matas@gmail.com"
] | stanislav.matas@gmail.com |
02be7232b3923ceecd22b8fa265b5624213d162c | 7a3d8975148146b63154e0922f6e97ce46b2fe95 | /app/analyzers/brainless.py | b9a95b67a93bf3ebf787c473484ee31ee6212e8f | [] | no_license | mgorsk1/video-stream-analysis | 20b9e6ef50811ac5843f1de0bde22d69edf54821 | ccc99d958402fe1190901b768c8f30c3d92c2c8d | refs/heads/master | 2022-12-12T00:08:18.387080 | 2019-11-16T19:15:58 | 2019-11-16T19:15:58 | 170,911,472 | 0 | 0 | null | 2022-11-22T04:30:39 | 2019-02-15T18:38:44 | Python | UTF-8 | Python | false | false | 928 | py | from app.analyzers.base import BaseAnalyzer
from app.config import log
from app.tools import format_key_active
__all__ = ['Brainless']
class Brainless(BaseAnalyzer):
"""
Brainless class should be used when there is no grace period.
"""
def __init__(self, *args, **kwargs):
super(Brainless, self).__init__(*args, **kwargs)
def _analyze(self, value, confidence, image, **kwargs):
log.info("#starting #analysis", extra=dict(value=value, confidence=confidence))
already_filed = self.tdb.get_val(format_key_active(value))
if not already_filed:
log.info("detection has not been filed yet", extra=dict(value=value))
self.take_action(value, confidence, image, **dict(kwargs))
else:
log.info("detection already filed", extra=dict(value=value))
log.info("#finished #analysis", extra=dict(value=value, confidence=confidence))
| [
"gorskimariusz13@gmail.com"
] | gorskimariusz13@gmail.com |
23241518e94ae0d5c41c03ff56152a117f302c17 | d7ec67a5ba315103fa6a6bae6dc045f1fecf7add | /docs_master_tensorflow/keras/tf_dqn_simple_master/dqn_agent.py | d0dc2cccfa0c1fbf14d21175a9b41c3605ff96e2 | [] | no_license | munezou/PycharmProject | cc62f5e4278ced387233a50647e8197e009cc7b4 | 26126c02cfa0dc4c0db726f2f2cabb162511a5b5 | refs/heads/master | 2023-03-07T23:44:29.106624 | 2023-01-23T16:16:08 | 2023-01-23T16:16:08 | 218,804,126 | 2 | 1 | null | 2023-02-28T23:58:22 | 2019-10-31T15:57:22 | Jupyter Notebook | UTF-8 | Python | false | false | 4,247 | py | from collections import deque
import os
import numpy as np
import tensorflow as tf
class DQNAgent:
"""
Multi Layer Perceptron with Experience Replay
"""
def __init__(self, enable_actions, environment_name):
# parameters
self.name = os.path.splitext(os.path.basename(__file__))[0]
self.environment_name = environment_name
self.enable_actions = enable_actions
self.n_actions = len(self.enable_actions)
self.minibatch_size = 32
self.replay_memory_size = 1000
self.learning_rate = 0.001
self.discount_factor = 0.9
self.exploration = 0.1
self.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
self.model_name = "{}.ckpt".format(self.environment_name)
# replay memory
self.D = deque(maxlen=self.replay_memory_size)
# model
self.init_model()
# variables
self.current_loss = 0.0
def init_model(self):
# input layer (8 x 8)
self.x = tf.placeholder(tf.float32, [None, 8, 8])
# flatten (64)
x_flat = tf.reshape(self.x, [-1, 64])
# fully connected layer (32)
W_fc1 = tf.Variable(tf.truncated_normal([64, 64], stddev=0.01))
b_fc1 = tf.Variable(tf.zeros([64]))
h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1)
# output layer (n_actions)
W_out = tf.Variable(tf.truncated_normal([64, self.n_actions], stddev=0.01))
b_out = tf.Variable(tf.zeros([self.n_actions]))
self.y = tf.matmul(h_fc1, W_out) + b_out
# loss function
self.y_ = tf.placeholder(tf.float32, [None, self.n_actions])
self.loss = tf.reduce_mean(tf.square(self.y_ - self.y))
# train operation
optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
self.training = optimizer.minimize(self.loss)
# saver
self.saver = tf.train.Saver()
# session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def Q_values(self, state):
# Q(state, action) of all actions
return self.sess.run(self.y, feed_dict={self.x: [state]})[0]
def select_action(self, state, epsilon):
if np.random.rand() <= epsilon:
# random
return np.random.choice(self.enable_actions)
else:
# max_action Q(state, action)
return self.enable_actions[np.argmax(self.Q_values(state))]
def store_experience(self, state, action, reward, state_1, terminal):
self.D.append((state, action, reward, state_1, terminal))
def experience_replay(self):
state_minibatch = []
y_minibatch = []
# sample random minibatch
minibatch_size = min(len(self.D), self.minibatch_size)
minibatch_indexes = np.random.randint(0, len(self.D), minibatch_size)
for j in minibatch_indexes:
state_j, action_j, reward_j, state_j_1, terminal = self.D[j]
action_j_index = self.enable_actions.index(action_j)
y_j = self.Q_values(state_j)
if terminal:
y_j[action_j_index] = reward_j
else:
# reward_j + gamma * max_action' Q(state', action')
y_j[action_j_index] = reward_j + self.discount_factor * np.max(self.Q_values(state_j_1)) # NOQA
state_minibatch.append(state_j)
y_minibatch.append(y_j)
# training
self.sess.run(self.training, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
# for log
self.current_loss = self.sess.run(self.loss, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
def load_model(self, model_path=None):
if model_path:
# load from model_path
self.saver.restore(self.sess, model_path)
else:
# load from checkpoint
checkpoint = tf.train.get_checkpoint_state(self.model_dir)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
def save_model(self):
self.saver.save(self.sess, os.path.join(self.model_dir, self.model_name))
| [
"kazumikm0119@pi5.fiberbit.net"
] | kazumikm0119@pi5.fiberbit.net |
a31b322b32555a927b3a63f5092900042142b843 | 27398b2a8ed409354d6a36c5e1d2089dad45b4ac | /backend/common/decapod_common/models/properties.py | 2a7dbbf75e03a2cf644b94bf0f4bf491dda45988 | [
"Apache-2.0"
] | permissive | amar266/ceph-lcm | e0d6c1f825f5ac07d2926bfbe6871e760b904340 | 6b23ffd5b581d2a1743c0d430f135261b7459e38 | refs/heads/master | 2021-04-15T04:41:55.950583 | 2018-03-23T12:51:26 | 2018-03-23T12:51:26 | 126,484,605 | 0 | 0 | null | 2018-03-23T12:50:28 | 2018-03-23T12:50:27 | null | UTF-8 | Python | false | false | 3,449 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains special property descriptors."""
import enum
import importlib
class Property:
SENTINEL = object()
class ChoicesProperty(Property):
def __init__(self, attr_name, choices):
self.choices = choices
self.attr_name = attr_name
def __get__(self, instance, owner):
value = getattr(instance, self.attr_name, self.SENTINEL)
if value is self.SENTINEL:
raise AttributeError()
return value
def __set__(self, instance, value):
choices = self.choices
if callable(choices) and type(choices) is not enum.EnumMeta:
choices = choices()
try:
if value in choices:
setattr(instance, self.attr_name, value)
return
except TypeError:
pass
raise ValueError("Unknown error")
class ModelProperty(Property):
@classmethod
def get_value_id(cls, value):
if hasattr(value, "model_id"):
return value.model_id
if isinstance(value, dict):
return value.get("_id", value.get("id"))
if value is None:
return None
return str(value)
@classmethod
def get_model(cls, klass, model_id):
return klass.find_by_model_id(model_id)
def __init__(self, model_class_name, id_attribute):
self.model_class_name = model_class_name
self.id_attribute = id_attribute
self.instance_attribute = id_attribute + "_instance"
def __get__(self, instance, owner):
value = instance.__dict__.get(self.instance_attribute, self.SENTINEL)
if value is not self.SENTINEL:
return value
model_id = instance.__dict__.get(self.id_attribute)
model = self.get_model(self.get_class(), model_id)
instance.__dict__[self.instance_attribute] = model
return model
def __set__(self, instance, value):
value_id = self.get_value_id(value)
instance.__dict__[self.id_attribute] = value_id
instance.__dict__[self.instance_attribute] = self.SENTINEL
def get_class(self):
module, obj_name = self.model_class_name.rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, obj_name)
return klass
class ModelListProperty(ModelProperty):
@classmethod
def get_value_id(cls, value):
return [super(ModelListProperty, cls).get_value_id(item)
for item in value]
@classmethod
def get_model(cls, klass, model_id):
query = {
"model_id": {"$in": model_id},
"is_latest": True
}
models = []
for item in klass.list_raw(query):
model = klass()
model.update_from_db_document(item)
models.append(model)
return models
| [
"sarkhipov@mirantis.com"
] | sarkhipov@mirantis.com |
215a011898e29aea78aa8531f6aadbd936358259 | d68c9105c03bef9dce2e438b5b91c2bdd0d856e2 | /[9095] 1, 2, 3 더하기.py | 308b6ff62e407f5494c58d595b9839b3addcf2e6 | [] | no_license | newfull5/Baekjoon-Online-Judge | 2a2dd1080af234551ecab6277968fedeb170a1f4 | 00d04f6c21080e3ad7c0fb06ca311f2324a591c0 | refs/heads/master | 2023-06-29T21:05:07.539911 | 2021-07-16T09:23:46 | 2021-07-16T09:23:46 | 267,557,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | def Reculsive(n):
global answer
if n >=3:
Reculsive(n-3)
if n >=2:
Reculsive(n-2)
if n >=1:
Reculsive(n-1)
if n == 0:
answer += 1
return
for _ in range(int(input())):
answer = 0
Reculsive(int(input()))
print(answer)
| [
"noreply@github.com"
] | noreply@github.com |
4bb3df61f7e8707d0f5b6dc0a372e300a836a1f0 | d5e4d88e4124ab2387bac64e7d7b76ff37793bf6 | /011/problem11.py | 072127ab96c86257506ca23cee758a4aa9743be4 | [] | no_license | grawinkel/ProjectEuler | 1ae5572eec92e4307183e8b30222ffa39ef4bbce | b470dd4219c769587769c9a70ec3bae5d3ca1166 | refs/heads/master | 2021-05-26T20:01:03.410567 | 2012-10-05T16:58:48 | 2012-10-05T16:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | # To change this template, choose Tools | Templates
# and open the template in the editor.
__author__="meatz"
__date__ ="$01.08.2010 14:10:38$"
m = []
max = 0
maxa,maxb = 0,0
def nw(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b-1]) * int(m[a-2][b-2]) * int(m[a-3][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def n(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b]) * int(m[a-2][b]) * int(m[a-3][b])
if (prod > max):
max = prod
maxa = a
maxb = b
def sw(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b-1]) * int(m[a+2][b-2]) * int(m[a+3][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def w(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a][b-1]) * int(m[a][b-2]) * int(m[a][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def s(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b]) * int(m[a+2][b]) * int(m[a+3][b])
if (prod > max):
max = prod
maxa = a
maxb = b
def se(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b+1]) * int(m[a+2][b+2]) * int(m[a+3][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def ne(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b+1]) * int(m[a-2][b+2]) * int(m[a-3][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def e(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a][b+1]) * int(m[a][b+2]) * int(m[a][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def run(m):
for a in range(20):
for b in range(20):
if (a-3>=0):
n(a,b)
if (a+3<=19):
s(a,b)
if (b-3>=0): #check the west
w(a,b)
if (a-3>=0):
nw(a,b)
if (a+3<=19):
sw(a,b)
if (b+3<20): #check the east
e(a,b)
if (a-3>=0):
ne(a,b)
if (a+3<20):
se(a,b)
if __name__ == "__main__":
f = open("data.txt","r")
for x in f.readlines():
m.append(x.split(" "))
run(m)
print max
| [
"matthias@grawinkel.com"
] | matthias@grawinkel.com |
fcf3fe369d825fc8f70166e86d6154d98a1eccfa | 23bc3e2bc6b2b9e3fd19f738d4767d09bec590b5 | /CourseWork/Labs/lab3/vivek_pygame_base_template.py | e880efac680ed5ff5a5856816fdf28423d8e2bb4 | [] | no_license | vivekVells/GameDesignProgramming | 4e683114bf487d2ea4c5c1c4a2b7a3375e8be8e7 | bee0fbc4d0a8d0e4001d6c9c9b35fea6b74da1f9 | refs/heads/master | 2020-03-27T13:49:52.159394 | 2018-12-12T09:37:01 | 2018-12-12T09:37:01 | 146,630,596 | 0 | 0 | null | 2018-12-12T08:32:11 | 2018-08-29T16:49:28 | Python | UTF-8 | Python | false | false | 2,516 | py | """
Show how to use a sprite backed by a graphic.
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/vRB_983kUMc
"""
import pygame
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
# Set the width and height of the screen [width, height]
size = (700, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Vivek's 1st House via PyGame")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# --- Game logic should go here
# --- Drawing code should go here
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
# rect(screen, GREEN, [x,y,breadth, length], 0)
# polygon(screen, BLACK, [[midx, midy], [leftx, lefty], [rightx, righty]], 5)
# drawing house
pygame.draw.rect(screen, RED, [100, 200, 200, 200], 0)
# drawing chimney
pygame.draw.rect(screen, BLACK, [125, 140, 20, 60], 0)
# drawing roof
pygame.draw.polygon(screen, WHITE, [[200, 100], [100, 200], [300, 200]], 0)
pygame.draw.polygon(screen, BLACK, [[200, 100], [100, 200], [300, 200]], 3)
# drawing window
pygame.draw.rect(screen, GREEN, [125, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [175, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [225, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [275, 250, 10, 30], 0)
# drawing the door
pygame.draw.rect(screen, BLACK, [190, 350, 20, 50], 0)
BLUE = (0, 0, 255)
BOARD_X = 50
BOARD_Y = 350
BOARD_LENGTH = 150
BOARD_WIDTH = 70
BOARD_COLOR_FILL = 0
pygame.draw.rect(screen, BLUE, [BOARD_X, BOARD_Y, BOARD_LENGTH, BOARD_WIDTH], BOARD_COLOR_FILL)
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
| [
"techengineervivek@gmail.com"
] | techengineervivek@gmail.com |
96b272fb814a0622763c4fe523506715910ee90b | 59555ea98bedd9e8670898818474a84ac6e01461 | /fiba_inbounder/settings.py | 728124568a90d421a58c42cbdf99f67341c8cef7 | [
"MIT"
] | permissive | nypgand1/fiba-inbounder | 11a25ab45230c7dcff2b5a5d3d91655e01462061 | 6f4c3268c24a6e67f92cb918261ddeb7a320a96a | refs/heads/master | 2023-05-25T23:12:48.274436 | 2022-11-12T15:27:34 | 2022-11-12T15:27:34 | 199,791,534 | 1 | 0 | MIT | 2023-05-22T22:30:20 | 2019-07-31T06:15:03 | Python | UTF-8 | Python | false | false | 1,287 | py | # -*- coding: utf-8 -*-
import simplejson
import logging
from fiba_inbounder.shot_chart_zone import SHOT_CHART_ZONE_GEO
LOGGER_FORMAT = '%(levelname)s: %(asctime)-15s: %(message)s'
logging.basicConfig(format=LOGGER_FORMAT, level=logging.INFO)
LOGGER = logging.getLogger('FibaInbounder')
FIBA_DATA_URL_V5 = 'https://www.fibalivestats.com/data/{match_id}/data.json'
FIBA_GAME_STATS_URL_V7 = 'https://livecache.sportresult.com/node/db/FIBASTATS_PROD/{event_id}_GAME_{game_unit}_JSON.json'
FIBA_PLAY_BY_PLAY_URL_V7 = 'https://livecache.sportresult.com/node/db/FIBASTATS_PROD/{event_id}_GAMEACTIONS_{game_unit}_{period_id}_JSON.json'
FIBA_DETAIL_URL_V7 = 'https://livecache.sportresult.com/node/db/FIBASTATS_PROD/{event_id}_COMPDETAILS_{game_unit}_JSON.json'
PLEAGUE_GAME_STATS_URL = 'http://api.pleagueplus.meetagile.com/rest/game/{game_id}'
PLEAGUE_SUB_URL = 'http://api.pleagueplus.meetagile.com/rest/gameplayerplaytime/{game_id}/{team_id}'
PLEAGUE_PLAY_BY_PLAY_URL = 'http://api.pleagueplus.meetagile.com/rest/gamerecord/{game_id}/{team_id}'
REG_FULL_GAME_MINS = 40
SHOT_CHART_BACKGROUND = 'shotchart_background_zone_652.png'
SHOT_CHART_PERC_RED = [
0.5, #At Rim
0.5, 0.5, 0.5, #Mid Two
0.5, 0.5, 0.5, 0.5, 0.5, #Long Two
0.333, 0.333, 0.333, 0.333, 0.333 #Three
]
| [
"nypgand1@gmail.com"
] | nypgand1@gmail.com |
74500f2dd0b8c53a83c336ef4540ba2e49d79f58 | 5ca1893df92150683d386ba61f849a8a20e80f0a | /RSRvenv/lib/python3.5/site-packages/polcart/__init__.py | e72aa4dbaf2557af7d2da9b78431fe7fe7b21272 | [] | no_license | JaredJRoss/RSR | a5340a087b7e19f5c9c8a47d8b322e2384ae8152 | 6601afbab963f095f939ba4ca07cc07c7257e271 | refs/heads/master | 2021-07-10T18:26:28.128522 | 2018-01-08T20:24:20 | 2018-01-08T20:24:20 | 104,166,288 | 0 | 6 | null | 2017-11-27T23:35:25 | 2017-09-20T04:38:26 | Python | UTF-8 | Python | false | false | 23 | py | from .polcart import *
| [
"jaredrossj@gmail.com"
] | jaredrossj@gmail.com |
c95ca03fbc0a46c72b98adbd06c05f447d748b46 | 64d7fd332075b1eedacf6e2324b998b03aa1ac92 | /source/trigger_stackset_sm.py | 0cd3ad4e3050d16034eca8a4fd70108a98098aae | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] | permissive | inderjeetrao1/aws-control-tower-customizations | 8ec674facfba3c1a33eeb00427130f8a4e69f715 | e4752bf19a1c8f0a597195982d63a1a2c2dd799a | refs/heads/master | 2021-02-16T14:09:39.808765 | 2020-01-21T17:59:06 | 2020-01-21T17:59:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,880 | py | ######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
from lib.logger import Logger
from lib.state_machine import StateMachine
from lib.ssm import SSM
from lib.helper import sanitize, convert_s3_url_to_http_url, trim_length, download_remote_file
from lib.helper import transform_params, convert_http_url_to_s3_url, reverse_transform_params
from lib.manifest import Manifest
from lib.cloudformation import StackSet
from lib.organizations import Organizations
from lib.params import ParamsHandler
from lib.metrics import Metrics
from lib.sts import STS
from lib.s3 import S3
import inspect
import sys
import time
import os
import json
import tempfile
import filecmp
from uuid import uuid4
TEMPLATE_KEY_PREFIX = '_custom_control_tower_templates_staging'
MANIFEST_FILE_NAME = 'manifest.yaml'
CAPABILITIES = 'CAPABILITY_NAMED_IAM'
class DeployStackSetStateMachine(object):
def __init__(self, logger, wait_time, manifest_file_path, sm_arn_stackset, staging_bucket, execution_mode):
self.state_machine = StateMachine(logger)
self.ssm = SSM(logger)
self.s3 = S3(logger)
self.send = Metrics(logger)
self.param_handler = ParamsHandler(logger)
self.logger = logger
self.manifest_file_path = manifest_file_path
self.manifest_folder = manifest_file_path[:-len(MANIFEST_FILE_NAME)]
self.wait_time = wait_time
self.sm_arn_stackset = sm_arn_stackset
self.manifest = None
self.list_sm_exec_arns = []
self.staging_bucket = staging_bucket
self.root_id = None
self.uuid = uuid4()
self.state_machine_event = {}
if execution_mode.lower() == 'sequential':
self.logger.info("Running {} mode".format(execution_mode))
self.sequential_flag = True
else:
self.logger.info("Running {} mode".format(execution_mode))
self.sequential_flag = False
def _stage_template(self, relative_template_path):
try:
if relative_template_path.lower().startswith('s3'):
# Convert the S3 URL s3://bucket-name/object to HTTP URL https://s3.amazonaws.com/bucket-name/object
s3_url = convert_s3_url_to_http_url(relative_template_path)
else:
local_file = os.path.join(self.manifest_folder, relative_template_path)
remote_file = "{}/{}_{}".format(TEMPLATE_KEY_PREFIX, self.uuid, relative_template_path)
logger.info("Uploading the template file: {} to S3 bucket: {} and key: {}".format(local_file,
self.staging_bucket,
remote_file))
self.s3.upload_file(self.staging_bucket, local_file, remote_file)
s3_url = "{}{}{}{}".format('https://s3.amazonaws.com/', self.staging_bucket, '/', remote_file)
return s3_url
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _load_params(self, relative_parameter_path, account=None, region=None):
try:
if relative_parameter_path.lower().startswith('s3'):
parameter_file = download_remote_file(self.logger, relative_parameter_path)
else:
parameter_file = os.path.join(self.manifest_folder, relative_parameter_path)
logger.info("Parsing the parameter file: {}".format(parameter_file))
with open(parameter_file, 'r') as content_file:
parameter_file_content = content_file.read()
params = json.loads(parameter_file_content)
if account is not None:
# Deploying Core resource Stack Set
# The last parameter is set to False, because we do not want to replace the SSM parameter values yet.
sm_params = self.param_handler.update_params(params, account, region, False)
else:
# Deploying Baseline resource Stack Set
sm_params = self.param_handler.update_params(params)
logger.info("Input Parameters for State Machine: {}".format(sm_params))
return sm_params
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _create_ssm_input_map(self, ssm_parameters):
try:
ssm_input_map = {}
for ssm_parameter in ssm_parameters:
key = ssm_parameter.name
value = ssm_parameter.value
ssm_value = self.param_handler.update_params(transform_params({key: value}))
ssm_input_map.update(ssm_value)
return ssm_input_map
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _create_state_machine_input_map(self, input_params, request_type='Create'):
try:
self.state_machine_event.update({'RequestType': request_type})
self.state_machine_event.update({'ResourceProperties': input_params})
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _create_stack_set_state_machine_input_map(self, stack_set_name, template_url, parameters,
account_list, regions_list, ssm_map):
input_params = {}
input_params.update({'StackSetName': sanitize(stack_set_name)})
input_params.update({'TemplateURL': template_url})
input_params.update({'Parameters': parameters})
input_params.update({'Capabilities': CAPABILITIES})
if len(account_list) > 0:
input_params.update({'AccountList': account_list})
if len(regions_list) > 0:
input_params.update({'RegionList': regions_list})
else:
input_params.update({'RegionList': [self.manifest.region]})
else:
input_params.update({'AccountList': ''})
input_params.update({'RegionList': ''})
if ssm_map is not None:
input_params.update({'SSMParameters': ssm_map})
self._create_state_machine_input_map(input_params)
def _populate_ssm_params(self):
try:
# The scenario is if you have one core resource that exports output from CFN stack to SSM parameter
# and then the next core resource reads the SSM parameter as input,
# then it has to wait for the first core resource to
# finish; read the SSM parameters and use its value as input for second core resource's input for SM
# Get the parameters for CFN template from self.state_machine_event
logger.debug("Populating SSM parameter values for SM input: {}".format(self.state_machine_event))
params = self.state_machine_event.get('ResourceProperties').get('Parameters', {})
# First transform it from {name: value} to [{'ParameterKey': name}, {'ParameterValue': value}]
# then replace the SSM parameter names with its values
sm_params = self.param_handler.update_params(transform_params(params))
# Put it back into the self.state_machine_event
self.state_machine_event.get('ResourceProperties').update({'Parameters': sm_params})
logger.debug("Done populating SSM parameter values for SM input: {}".format(self.state_machine_event))
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _compare_template_and_params(self):
try:
stack_name = self.state_machine_event.get('ResourceProperties').get('StackSetName', '')
flag = False
if stack_name:
stack_set = StackSet(self.logger)
describe_response = stack_set.describe_stack_set(stack_name)
if describe_response is not None:
self.logger.info("Found existing stack set.")
self.logger.info("Checking the status of last stack set operation on {}".format(stack_name))
response = stack_set.list_stack_set_operations(StackSetName=stack_name,
MaxResults=1)
if response:
if response.get('Summaries'):
for instance in response.get('Summaries'):
self.logger.info("Status of last stack set operation : {}"
.format(instance.get('Status')))
if instance.get('Status') != 'SUCCEEDED':
self.logger.info("The last stack operation did not succeed. "
"Triggering Update StackSet for {}".format(stack_name))
return False
self.logger.info("Comparing the template of the StackSet: {} with local copy of template"
.format(stack_name))
template_http_url = self.state_machine_event.get('ResourceProperties').get('TemplateURL', '')
if template_http_url:
template_s3_url = convert_http_url_to_s3_url(template_http_url)
local_template_file = download_remote_file(self.logger, template_s3_url)
else:
self.logger.error("TemplateURL in state machine input is empty. Check state_machine_event:{}"
.format(self.state_machine_event))
return False
cfn_template_file = tempfile.mkstemp()[1]
with open(cfn_template_file, "w") as f:
f.write(describe_response.get('StackSet').get('TemplateBody'))
template_compare = filecmp.cmp(local_template_file, cfn_template_file, False)
self.logger.info("Comparing the parameters of the StackSet: {} "
"with local copy of JSON parameters file".format(stack_name))
params_compare = True
params = self.state_machine_event.get('ResourceProperties').get('Parameters', {})
if template_compare:
cfn_params = reverse_transform_params(describe_response.get('StackSet').get('Parameters'))
for key, value in params.items():
if cfn_params.get(key, '') == value:
pass
else:
params_compare = False
break
self.logger.info("template_compare={}".format(template_compare))
self.logger.info("params_compare={}".format(params_compare))
if template_compare and params_compare:
account_list = self.state_machine_event.get('ResourceProperties').get("AccountList", [])
if account_list:
self.logger.info("Comparing the Stack Instances Account & Regions for StackSet: {}"
.format(stack_name))
expected_region_list = set(self.state_machine_event.get('ResourceProperties').get("RegionList", []))
# iterator over accounts in event account list
for account in account_list:
actual_region_list = set()
self.logger.info("### Listing the Stack Instances for StackSet: {} and Account: {} ###"
.format(stack_name, account))
stack_instance_list = stack_set.list_stack_instances_per_account(stack_name, account)
self.logger.info(stack_instance_list)
if stack_instance_list:
for instance in stack_instance_list:
if instance.get('Status').upper() == 'CURRENT':
actual_region_list.add(instance.get('Region'))
else:
self.logger.info("Found at least one of the Stack Instances in {} state."
" Triggering Update StackSet for {}"
.format(instance.get('Status'),
stack_name))
return False
else:
self.logger.info("Found no stack instances in account: {}, "
"Updating StackSet: {}".format(account, stack_name))
# # move the account id to index 0
# newindex = 0
# oldindex = self.state_machine_event.get('ResourceProperties').get("AccountList").index(account)
# self.state_machine_event.get('ResourceProperties').get("AccountList").insert(newindex, self.state_machine_event.get('ResourceProperties').get("AccountList").pop(oldindex))
return False
if expected_region_list.issubset(actual_region_list):
self.logger.info("Found expected regions : {} in deployed stack instances : {},"
" so skipping Update StackSet for {}"
.format(expected_region_list,
actual_region_list,
stack_name))
flag = True
else:
self.logger.info("Found no changes in template & parameters, "
"so skipping Update StackSet for {}".format(stack_name))
flag = True
return flag
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def state_machine_failed(self, status, failed_execution_list):
error = " StackSet State Machine Execution(s) Failed. Navigate to the AWS Step Functions console and" \
" review the following State Machine Executions. ARN List: {}".format(failed_execution_list)
if status == 'FAILED':
logger.error(100 * '*')
logger.error(error)
logger.error(100 * '*')
sys.exit(1)
def _run_or_queue_state_machine(self, stackset_name):
try:
logger.info("State machine Input: {}".format(self.state_machine_event))
exec_name = "%s-%s-%s" % (self.state_machine_event.get('RequestType'), trim_length(stackset_name.replace(" ", ""), 50),
time.strftime("%Y-%m-%dT%H-%M-%S"))
# If Sequential, wait for the SM to be executed before kicking of the next one
if self.sequential_flag:
self.logger.info(" > > > > > > Running Sequential Mode. > > > > > >")
self._populate_ssm_params()
if self._compare_template_and_params():
return
else:
sm_exec_arn = self.state_machine.trigger_state_machine(self.sm_arn_stackset, self.state_machine_event, exec_name)
self.list_sm_exec_arns.append(sm_exec_arn)
status, failed_execution_list = self.monitor_state_machines_execution_status()
if status == 'FAILED':
self.state_machine_failed(status, failed_execution_list)
else:
self.logger.info("State Machine execution completed. Starting next execution...")
# Else if Parallel, execute all SM at regular interval of wait_time
else:
self.logger.info(" | | | | | | Running Parallel Mode. | | | | | |")
# RUNS Parallel, execute all SM at regular interval of wait_time
self._populate_ssm_params()
# if the stackset comparision is matches - skip SM execution
if self._compare_template_and_params():
return
else: # if False execution SM
sm_exec_arn = self.state_machine.trigger_state_machine(self.sm_arn_stackset, self.state_machine_event, exec_name)
time.sleep(int(wait_time)) # Sleeping for sometime
self.list_sm_exec_arns.append(sm_exec_arn)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _deploy_resource(self, resource, account_list):
try:
template_full_path = self._stage_template(resource.template_file)
params = {}
if resource.parameter_file:
if len(resource.regions) > 0:
params = self._load_params(resource.parameter_file, account_list, resource.regions[0])
else:
params = self._load_params(resource.parameter_file, account_list, self.manifest.region)
ssm_map = self._create_ssm_input_map(resource.ssm_parameters)
# Deploying Core resource Stack Set
stack_name = "CustomControlTower-{}".format(resource.name)
self._create_stack_set_state_machine_input_map(stack_name, template_full_path,
params, account_list, resource.regions, ssm_map)
self.logger.info(" >>> State Machine Input >>>")
self.logger.info(self.state_machine_event)
self._run_or_queue_state_machine(stack_name)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _get_root_id(self, org):
response = org.list_roots()
self.logger.info("Response: List Roots")
self.logger.info(response)
return response['Roots'][0].get('Id')
def _list_ou_for_parent(self, org, parent_id):
_ou_list = org.list_organizational_units_for_parent(parent_id)
self.logger.info("Print Organizational Units List under {}".format(parent_id))
self.logger.info(_ou_list)
return _ou_list
def _get_accounts_in_ou(self, org, ou_id_list):
_accounts_in_ou = []
accounts_in_all_ous = []
ou_id_to_account_map = {}
for _ou_id in ou_id_list:
_account_list = org.list_accounts_for_parent(_ou_id)
for _account in _account_list:
# filter ACTIVE and CREATED accounts
if _account.get('Status') == "ACTIVE":
# create a list of accounts in OU
accounts_in_all_ous.append(_account.get('Id'))
_accounts_in_ou.append(_account.get('Id'))
# create a map of accounts for each ou
self.logger.info("Creating Key:Value Mapping - OU ID: {} ; Account List: {}"
.format(_ou_id, _accounts_in_ou))
ou_id_to_account_map.update({_ou_id: _accounts_in_ou})
self.logger.info(ou_id_to_account_map)
# reset list of accounts in the OU
_accounts_in_ou = []
self.logger.info("All accounts in OU List: {}".format(accounts_in_all_ous))
self.logger.info("OU to Account ID mapping")
self.logger.info(ou_id_to_account_map)
return accounts_in_all_ous, ou_id_to_account_map
def _get_ou_ids(self, org):
# for each OU get list of account
# get root id
root_id = self._get_root_id(org)
# get OUs under the Org root
ou_list_at_root_level = self._list_ou_for_parent(org, root_id)
ou_id_list = []
_ou_name_to_id_map = {}
_all_ou_ids = []
for ou_at_root_level in ou_list_at_root_level:
# build list of all the OU IDs under Org root
_all_ou_ids.append(ou_at_root_level.get('Id'))
# build a list of ou id
_ou_name_to_id_map.update({ou_at_root_level.get('Name'): ou_at_root_level.get('Id')})
self.logger.info("Print OU Name to OU ID Map")
self.logger.info(_ou_name_to_id_map)
# return:
# 1. OU IDs of the OUs in the manifest
# 2. Account IDs in OUs in the manifest
# 3. Account IDs in all the OUs in the manifest
return _all_ou_ids, _ou_name_to_id_map
def get_account_for_name(self, org):
# get all accounts in the organization
account_list = org.get_accounts_in_org()
#self.logger.info("Print Account List: {}".format(account_list))
_name_to_account_map = {}
for account in account_list:
if account.get("Status") == "ACTIVE":
_name_to_account_map.update({account.get("Name"): account.get("Id")})
self.logger.info("Print Account Name > Account Mapping")
self.logger.info(_name_to_account_map)
return _name_to_account_map
def get_organization_details(self):
# > build dict
# KEY: OU Name (in the manifest)
# VALUE: OU ID (at root level)
# > build list
# all OU IDs under root
org = Organizations(self.logger)
all_ou_ids, ou_name_to_id_map = self._get_ou_ids(org)
# > build list of all active accounts
# use case: use to validate accounts in the manifest file.
# > build dict
# KEY: OU ID (for each OU at root level)
# VALUE: get list of all active accounts
# use case: map OU Name to account IDs
accounts_in_all_ous, ou_id_to_account_map = self._get_accounts_in_ou(org, all_ou_ids)
# build dict
# KEY: email
# VALUE: account id
# use case: convert email in manifest to account ID for SM event
name_to_account_map = self.get_account_for_name(org)
return accounts_in_all_ous, ou_id_to_account_map, ou_name_to_id_map, name_to_account_map
def start_stackset_sm(self):
try:
logger.info("Parsing Core Resources from {} file".format(self.manifest_file_path))
count = 0
accounts_in_all_ous, ou_id_to_account_map, ou_name_to_id_map, name_to_account_map = self.get_organization_details()
for resource in self.manifest.cloudformation_resources:
self.logger.info(">>>>>>>>> START : {} >>>>>>>>>".format(resource.name))
# Handle scenario if 'deploy_to_ou' key does not exist in the resource
try:
self.logger.info(resource.deploy_to_ou)
except:
resource.deploy_to_ou = []
# Handle scenario if 'deploy_to_account' key does not exist in the resource
try:
self.logger.info(resource.deploy_to_account)
except:
resource.deploy_to_account = []
# find accounts for given ou name
accounts_in_ou = []
ou_ids_manifest = []
# check if OU name list is empty
if resource.deploy_to_ou:
# convert OU Name to OU IDs
for ou_name in resource.deploy_to_ou:
ou_id = [value for key, value in ou_name_to_id_map.items() if ou_name.lower() in key.lower()]
ou_ids_manifest.extend(ou_id)
# convert OU IDs to accounts
for ou_id, accounts in ou_id_to_account_map.items():
if ou_id in ou_ids_manifest:
accounts_in_ou.extend(accounts)
self.logger.info(">>> Accounts: {} in OUs: {}".format(accounts_in_ou, resource.deploy_to_ou))
# convert account numbers to string type
account_list = self._convert_list_values_to_string(resource.deploy_to_account)
self.logger.info(">>>>>> ACCOUNT LIST")
self.logger.info(account_list)
# separate account id and emails
name_list = []
new_account_list = []
self.logger.info(account_list)
for item in account_list:
if item.isdigit() and len(item) == 12: # if an actual account ID
new_account_list.append(item)
self.logger.info(new_account_list)
else:
name_list.append(item)
self.logger.info(name_list)
# check if name list is empty
if name_list:
# convert OU Name to OU IDs
for name in name_list:
name_account = [value for key, value in name_to_account_map.items() if
name.lower() in key.lower()]
self.logger.info("%%%%%%% Name {} - Account {}".format(name, name_account))
new_account_list.extend(name_account)
# Remove account ids from the manifest that is not in the organization or not active
sanitized_account_list = list(set(new_account_list).intersection(set(accounts_in_all_ous)))
self.logger.info("Print Updated Manifest Account List")
self.logger.info(sanitized_account_list)
# merge account lists manifest account list and accounts under OUs in the manifest
sanitized_account_list.extend(accounts_in_ou)
sanitized_account_list = list(set(sanitized_account_list)) # remove duplicate accounts
self.logger.info("Print merged account list - accounts in manifest + account under OU in manifest")
self.logger.info(sanitized_account_list)
if resource.deploy_method.lower() == 'stack_set':
self._deploy_resource(resource, sanitized_account_list)
else:
raise Exception("Unsupported deploy_method: {} found for resource {} and Account: {} in Manifest"
.format(resource.deploy_method, resource.name, sanitized_account_list))
self.logger.info("<<<<<<<<< FINISH : {} <<<<<<<<<".format(resource.name))
# Count number of stack sets deployed
count += 1
data = {"StackSetCount": str(count)}
self.send.metrics(data)
return
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
# return list of strings
def _convert_list_values_to_string(self, _list):
return list(map(str, _list))
# monitor list of state machine executions
def monitor_state_machines_execution_status(self):
try:
if self.list_sm_exec_arns:
self.logger.info("Starting to monitor the SM Executions: {}".format(self.list_sm_exec_arns))
final_status = 'RUNNING'
while final_status == 'RUNNING':
for sm_exec_arn in self.list_sm_exec_arns:
status = self.state_machine.check_state_machine_status(sm_exec_arn)
if status == 'RUNNING':
final_status = 'RUNNING'
time.sleep(int(wait_time))
break
else:
final_status = 'COMPLETED'
err_flag = False
failed_sm_execution_list = []
for sm_exec_arn in self.list_sm_exec_arns:
status = self.state_machine.check_state_machine_status(sm_exec_arn)
if status == 'SUCCEEDED':
continue
else:
failed_sm_execution_list.append(sm_exec_arn)
err_flag = True
continue
if err_flag:
return 'FAILED', failed_sm_execution_list
else:
return 'SUCCEEDED', ''
else:
self.logger.info("SM Execution List {} is empty, nothing to monitor.".format(self.list_sm_exec_arns))
return None, []
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def trigger_stackset_state_machine(self):
try:
self.manifest = Manifest(self.manifest_file_path)
self.start_stackset_sm()
return
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
if __name__ == '__main__':
if len(sys.argv) > 6:
log_level = sys.argv[1]
wait_time = sys.argv[2]
manifest_file_path = sys.argv[3]
sm_arn_stackset = sys.argv[4]
staging_bucket = sys.argv[5]
exec_mode = sys.argv[6]
logger = Logger(loglevel=log_level)
deploy_stackset = DeployStackSetStateMachine(logger, wait_time, manifest_file_path,
sm_arn_stackset, staging_bucket, exec_mode)
deploy_stackset.trigger_stackset_state_machine()
if exec_mode.lower() != 'sequential':
status, failed_execution_list = deploy_stackset.monitor_state_machines_execution_status()
deploy_stackset.state_machine_failed(status, failed_execution_list)
else:
print('No arguments provided. ')
print('Example: trigger_stackset_sm.py <LOG-LEVEL> <WAIT_TIME> '
'<MANIFEST-FILE-PATH> <SM_ARN_STACKSET> <STAGING_BUCKET> <EXECUTION-MODE>')
sys.exit(2)
| [
"georgebearden@gmail.com"
] | georgebearden@gmail.com |
ab16a287fd304c7b1b4ef1d73c9d1b094b4dd508 | 571361aa406dc135df5b47ac0fafea3e9f6713d5 | /utils.py | 4b3ebb1fd6a0ccb189e3e85e2535551a8376f26c | [] | no_license | ghsaheb/Genetic-Scheduling | 6129c99dee61f8379beff2098b96c0ff2d053d9e | 07db5cdf4a09820d921d423ffc12437b9021b910 | refs/heads/master | 2020-04-08T10:42:10.012518 | 2018-04-19T16:25:34 | 2018-04-19T16:25:34 | 159,279,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | import random
from math import floor
def get_random_professor(course_index, courses_profs):
prof_num = int(floor(random.random() * len(courses_profs[course_index])))
return courses_profs[course_index][prof_num]
| [
"bornaarzi@gmail.com"
] | bornaarzi@gmail.com |
77a0588a1e9e67284d23379d6796e3d1c059dc1c | 7e1c78c1a1b486e211c2a38ac60dbda46e7f99f7 | /orders/backend/models.py | 8b0e2e3d8b8e0a0920130e5ad17f120266edb59d | [] | no_license | andyAG24/pd-diploma | 7c3dca8f326f44e3d8c5d9f554dcadf54c249ed0 | 64b10c24679fea25b617c39f37004a463e30f6d3 | refs/heads/master | 2023-01-31T00:57:04.717490 | 2020-12-09T21:32:02 | 2020-12-09T21:32:02 | 300,041,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,665 | py | from django.db import models
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.utils.translation import ugettext
from django_rest_passwordreset.tokens import get_token_generator
STATE_CHOICES = (
('basket', 'Статус корзины'),
('new', 'Новый'),
('confirmed', 'Подтвержден'),
('assembled', 'Собран'),
('sent', 'Отправлен'),
('delivered', 'Доставлен'),
('canceled', 'Отменен'),
)
USER_TYPE_CHOICES = (
('shop', 'Магазин'),
('buyer', 'Покупатель'),
('staff', 'Сотрудник')
)
# Create your models here.
class UserManager(BaseUserManager):
"""
Миксин для управления пользователями
"""
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('Email required')
email = self.normalize_email(email)
user = self.model(
email=email,
is_active=True,
**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True')
else:
extra_fields.setdefault('type', 'staff')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True')
return self._create_user(email, password, **extra_fields)
class User(AbstractUser):
"""
Стандартная модель пользователей
"""
REQUIRED_FIELDS = ['email']
objects = UserManager()
USERNAME_FIELD = 'username'
email = models.EmailField(ugettext('Email'), unique=True)
company = models.CharField(max_length=100, verbose_name='Компания', blank=True)
position = models.CharField(max_length=40, verbose_name='Должность', blank=True)
username_validator = UnicodeUsernameValidator()
username = models.CharField(
ugettext('username'),
max_length=150,
help_text=ugettext('Требуется имя пользователя. Буквы, цифры и @/./+/-/_.'),
validators=[username_validator],
error_messages={
'unique': ugettext('Пользователь с таким именем уже существует'),
},
unique=True,
)
is_active = models.BooleanField(
ugettext('active'),
default=False,
help_text=ugettext(
'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'
),
)
type = models.CharField(verbose_name='Тип пользователя', choices=USER_TYPE_CHOICES, max_length=5, default='buyer')
def __str__(self):
return f'{self.username}'
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
ordering = ('email', )
class Shop(models.Model):
name = models.CharField(max_length=50, verbose_name='Название')
url = models.URLField(verbose_name='Ссылка', null=True, blank=True)
filename = models.CharField(max_length=100)
state = models.BooleanField(verbose_name='Принимает заказы?', default=True)
class Meta:
verbose_name = 'Магазин'
verbose_name_plural = 'Магазины'
ordering = ('-name',)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=50, verbose_name='Название')
shop = models.ManyToManyField(Shop, verbose_name='Магазины', related_name='categories', blank=True)
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.name
class Product(models.Model):
category = models.ForeignKey(Category, verbose_name='Категория', related_name='products', blank=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=50, verbose_name='Название', default=None)
class Meta:
verbose_name = 'Продукт'
verbose_name_plural = 'Продукты'
# ordering = ('-name',)
# def __str__(self):
# return self.name
class ProductInfo(models.Model):
product = models.ForeignKey(Product, verbose_name='Продукт', related_name='product_infos', blank=True,
on_delete=models.CASCADE)
shop = models.ForeignKey(Shop, verbose_name='', related_name='product_infos', blank=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=50, verbose_name='Название')
quantity = models.PositiveIntegerField(verbose_name='Количество')
price = models.PositiveIntegerField(verbose_name='Цена')
price_rrc = models.PositiveIntegerField(verbose_name='Рекомендуемая цена')
external_id = models.PositiveIntegerField(verbose_name='External id', default=None)
model = models.CharField(max_length=80, verbose_name='Модель', blank=True)
class Meta:
verbose_name = 'Информация о продукте'
verbose_name_plural = 'Информация о продуктах'
# ordering = ('-name',)
def __str__(self):
return self.name
class Parameter(models.Model):
name = models.CharField(max_length=50, verbose_name='Название')
class Meta:
verbose_name = 'Параметр'
verbose_name_plural = 'Параметры'
# ordering = ('-name',)
def __str__(self):
return self.name
class ProductParameter(models.Model):
product_info = models.ForeignKey(ProductInfo, verbose_name='Продукт', related_name='product_parameters', blank=True,
on_delete=models.CASCADE)
parameter = models.ForeignKey(Parameter, verbose_name='Параметр', related_name='product_parameters', blank=True,
on_delete=models.CASCADE)
value = models.CharField(max_length=50, verbose_name='Значение')
class Meta:
verbose_name = 'Параметр продукта'
verbose_name_plural = 'Информация о параметрах продуктов'
# ordering = ('-name',)
# def __str__(self):
# return self.name
class Order(models.Model):
user = models.ForeignKey(User, verbose_name='Пользователь', related_name='orders', blank=True, null=True,
on_delete=models.CASCADE)
datetime = models.DateTimeField(auto_now_add=True)
status = models.CharField(verbose_name='Статус', choices=STATE_CHOICES, max_length=15)
class Meta:
verbose_name = 'Заказ'
verbose_name_plural = 'Заказы'
# ordering = ('-name',)
# def __str__(self):
# return self.name
class OrderItem(models.Model):
order = models.ForeignKey(Order, verbose_name='Заказ', related_name='ordered_items', blank=True,
on_delete=models.CASCADE)
product = models.ForeignKey(Product, verbose_name='Продукт', related_name='ordered_items', blank=True,
on_delete=models.CASCADE)
shop = models.ForeignKey(Shop, verbose_name='Магазин', related_name='ordered_items', blank=True,
on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(verbose_name='Количество')
class Meta:
verbose_name = 'Продукт в заказе'
verbose_name_plural = 'Список продуктов заказа'
# ordering = ('-name',)
# def __str__(self):
# return self.name
class Contact(models.Model):
user = models.ForeignKey(User, verbose_name='Пользователь',
related_name='contacts', blank=True, null=True,
on_delete=models.CASCADE)
city = models.CharField(max_length=50, verbose_name='Город')
street = models.CharField(max_length=100, verbose_name='Улица')
house = models.CharField(max_length=15, verbose_name='Дом', blank=True)
structure = models.CharField(max_length=15, verbose_name='Корпус', blank=True)
building = models.CharField(max_length=15, verbose_name='Строение', blank=True)
apartment = models.CharField(max_length=15, verbose_name='Квартира', blank=True)
phone = models.CharField(max_length=20, verbose_name='Телефон')
class Meta:
verbose_name = 'Контакты пользователя'
verbose_name_plural = "Список контактов пользователя"
def __str__(self):
return f'{self.city} {self.street} {self.house}'
class ConfirmEmailToken(models.Model):
class Meta:
verbose_name: 'Токен для подтверждения Email'
verbose_name_plural: 'Токены для подтверждения Email'
@staticmethod
def generate_key():
return get_token_generator().generate_token()
user = models.ForeignKey(
User,
related_name='confirm_email_tokens',
on_delete=models.CASCADE,
verbose_name=ugettext('The User which is associated to this password reset token')
)
created_at = models.DateTimeField(
auto_now_add=True,
verbose_name=ugettext('Time when token was generated')
)
key = models.CharField(
ugettext('Key'),
max_length=64,
db_index=True,
unique=True
)
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(ConfirmEmailToken, self).save(*args, **kwargs)
def __str__(self):
return "Password reset token for user {user}".format(user=self.user) | [
"andy.ag.24@gmail.com"
] | andy.ag.24@gmail.com |
8244ee0f70783c9f1817748f49a3a00d505bb0fa | 5683e0be45bbd4c0eff0bc143e1fabb39e3dd2d1 | /facilities/migrations/0011_facility_approved_national_level.py | d5c4978e3bf4e2a7d4d7d154806e8dee847ad9ef | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | permissive | SteveWaweru/mfl_api | be1e6b24a039553447dc6fdc23b588c4a6756a8f | 695001fb48cb1b15661cd480831ae33fe6374532 | refs/heads/master | 2023-05-29T04:27:35.421574 | 2021-10-27T06:07:10 | 2021-10-27T06:07:10 | 205,343,934 | 0 | 5 | MIT | 2023-05-15T00:38:30 | 2019-08-30T08:54:14 | Python | UTF-8 | Python | false | false | 539 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-09-08 14:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facilities', '0010_auto_20190828_2112'),
]
operations = [
migrations.AddField(
model_name='facility',
name='approved_national_level',
field=models.BooleanField(default=False, help_text=b'Has the facility been approved at the national level'),
),
]
| [
"mwaura.marika@gmail.com"
] | mwaura.marika@gmail.com |
144fc7f9ff84f6e9a9f9ae2caa52b32ac79f98ec | 51bcb50645153934604da620fecd5169ba80ba67 | /tiparep2/pupils_large.py | a40494d8df12dd5001733cd4dcb18c9d32131c9d | [] | no_license | newusrlll/tiparep | abfb5bfd63aa9a60a64f22bfd5d7847066568362 | bc7b245a08953363cf4bdd024af952d459a8460b | refs/heads/master | 2023-03-22T03:50:04.041038 | 2021-03-14T12:10:46 | 2021-03-14T12:10:46 | 336,764,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import time
class Pupil():
def __init__(self, f, n, p):
self.f = f
self.n = n
self.p = p
pupils = 0
otlichniki = []
s_a = 0
start_time = time.time()
with open("pupils_large_txt.txt", "r", encoding='utf-8') as file:
for line in file:
data = line.split(' ')
data_pupil = Pupil(data[0], data[1], int(data[2]))
if data_pupil.p == 5:
otlichniki.append(data_pupil.f)
pupils += 1
s_a += data_pupil.p
print('Средняя оценка класса:', (s_a/pupils), '\n\n', 'Лучшие ученики:')
for pupil in otlichniki:
print(pupil)
print('Время выполнения', (time.time()-start_time), 'секунд.') | [
"lebochkin2@yandex.ru"
] | lebochkin2@yandex.ru |
d3e1cb323db751ac2050493151ddde48bb868a90 | 566638e179b0add891e1d5c8900d35ae531af6dc | /alembic_simplelis/versions/6487bfd4c8aa_renamed_columns.py | 6cd0406fe6bfe944447113a2432ef47fb6ff8af3 | [] | no_license | likit/querystud | 9b023a45adfdbf6dc8a3a2f97fefb82b765c8690 | 1702c09ff6931b2cd94d0b55ef42f244c503a68a | refs/heads/master | 2020-03-25T19:25:40.412824 | 2018-08-09T18:08:48 | 2018-08-09T18:08:48 | 144,082,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | """renamed columns
Revision ID: 6487bfd4c8aa
Revises: 8c08809abb09
Create Date: 2018-08-09 15:54:28.683879
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '6487bfd4c8aa'
down_revision = '8c08809abb09'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('labs', sa.Column('reportDate', sa.Date(), nullable=True))
op.add_column('labs', sa.Column('reportTime', sa.Time(), nullable=True))
op.alter_column('labs', 'recvDate',
existing_type=sa.DATE(),
nullable=False)
op.alter_column('labs', 'recvTime',
existing_type=postgresql.TIME(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('labs', 'recvTime',
existing_type=postgresql.TIME(),
nullable=True)
op.alter_column('labs', 'recvDate',
existing_type=sa.DATE(),
nullable=True)
op.drop_column('labs', 'reportTime')
op.drop_column('labs', 'reportDate')
# ### end Alembic commands ###
| [
"likit.pre@mahidol.edu"
] | likit.pre@mahidol.edu |
207502ae33c51cacfd21b43da1ac824d09d91a43 | 1395ee264400c14a30115cdfbe374db0ca979a3d | /agent_DDQN_MA.py | 05e3e64ee016613d98ef882029fd92ac4fea6892 | [] | no_license | FabioTarocco/MultiAgent_ia | 3b08e8c20e478ee7681da5dc998288c37d83f333 | 688cf7b0f3d0117685227e990d45398abfcf41c4 | refs/heads/main | 2023-09-01T06:24:30.694310 | 2021-10-25T15:02:17 | 2021-10-25T15:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,331 | py | """DDQN agent script
This manages the training phase of the off-policy DDQN.
"""
#Simple tag, simple spread, simple push/simple adv
import random
from collections import deque
import time
import yaml
import numpy as np
with open('config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
seed = cfg['setup']['seed']
ymlfile.close()
random.seed(seed)
np.random.seed(seed)
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
tf.random.set_seed(seed)
from utils.deepnetwork import DeepNetwork
from utils.memorybuffer import Buffer
class DDQN:
"""
Class for the DQN agent
"""
def __init__(self, env, params):
"""Initialize the agent, its network, optimizer and buffer
Args:
env (gym): gym environment
params (dict): agent parameters (e.g.,dnn structure)
Returns:
None
"""
"""
self.env = env
self.model = DeepNetwork.build(env, params['dnn'])
self.model_tg = DeepNetwork.build(env, params['dnn'])
self.model_tg.set_weights(self.model.get_weights())
self.model_opt = Adam()
self.buffer = Buffer(params['buffer']['size'])
"""
self.env = env
self.model=[]
self.model_tg = []
self.buffer = []
self.model_opt = []
state = self.env.reset()
for i in range (self.env.n):
self.model.append(DeepNetwork.build(env, params['dnn'], len(state[i])))
self.model_tg.append(DeepNetwork.build(env, params['dnn'], len(state[i])))
self.buffer.append(Buffer(params['buffer']['size']))
self.model_tg[i].set_weights(self.model[i].get_weights())
self.model_opt.append(Adam())
def get_action(self, state, i,eps):
"""Get the action to perform
Args:
state (list): agent current state
eps (float): random action probability
Returns:
action (float): sampled actions to perform
"""
"""
One hot encoding
Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
Agente 1 : [0, 0, 0, 0, 1]
Agente 2 : [0, 0, 0, 1, 0]
...
Agente env.n: [0, 0, 0, 0, 1]
il get_action deve tornare una roba simile [[0,0,0,0,1]
[0,0,0,1,0]
...
[0,0,0,0,1]]
Matrice nx5 00001
00010
...
00001
self.discrete_action_space = True
if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
"""
if np.random.uniform() <= eps:
return np.random.randint(0, self.env.action_space[0].n)
#provare con : q_values = self.model[i](np.array([state])).numpy()[0]
q_values = self.model[i](np.array([state])).numpy()[0]
return np.argmax(q_values)
def to_encode(self, action):
"""
Return the OneHotEncoding action of a particular agent
Args:
action (int): Number of action obtained by get_action()
Returns:
encoded (list): Encoded action in the form [0,0,0,0,0] where there will be a 1 in the i-th column corresponding to the number of action.
"""
encoded = np.zeros((self.env.action_space[0].n), dtype=int)
encoded[action] = 1
return encoded
def update(self, gamma, batch_size):
"""Prepare the samples to update the network
Args:
gamma (float): discount factor
batch_size (int): batch size for the off-policy A2C
Returns:
None
"""
for i in range (self.env.n ):
batch_size = min(self.buffer[i].size, batch_size)
states, actions, rewards, obs_states, dones = self.buffer[i].sample(batch_size)
# The updates require shape (n° samples, len(metric))
rewards = rewards.reshape(-1, 1)
dones = dones.reshape(-1, 1)
self.fit(gamma, states, actions, rewards, obs_states, dones, self.model[i], self.model_tg[i], self.model_opt[i])
def fit(self, gamma, states, actions, rewards, obs_states, dones, model, model_tg, model_opt ):
"""We want to minimizing mse of the temporal difference error given by Q(s,a|θ) and the target y = r + γ max_a' Q_tg(s', a'|θ). It addresses the non-stationary target of DQN
Args:
gamma (float): discount factor
states (list): episode's states for the update
actions (list): episode's actions for the update
rewards (list): episode's rewards for the update
obs_states (list): episode's obs_states for the update
dones (list): episode's dones for the update
Returns:
None
"""
with tf.GradientTape() as tape:
# Compute the target y = r + γ max_a' Q_tg(s', a'|θ), where a' is computed with model
obs_qvalues_tg = model_tg(obs_states).numpy()
obs_qvalues = model(obs_states)
obs_actions = tf.math.argmax(obs_qvalues, axis=-1).numpy()
idxs = np.array([[int(i), int(action)] for i, action in enumerate(obs_actions)])
max_obs_qvalues = tf.expand_dims(tf.gather_nd(obs_qvalues_tg, idxs), axis=-1)
y = rewards + gamma * max_obs_qvalues * dones
# Compute values Q(s,a|θ)
qvalues = model(states)
idxs = np.array([[int(i), int(action)] for i, action in enumerate(actions)])
qvalues = tf.expand_dims(tf.gather_nd(qvalues, idxs), axis=-1)
# Compute the loss as mse of Q(s, a) - y
td_errors = tf.math.subtract(qvalues, y)
td_errors = 0.5 * tf.math.square(td_errors)
loss = tf.math.reduce_mean(td_errors)
# Compute the model gradient and update the network
grad = tape.gradient(loss, model.trainable_variables)
model_opt.apply_gradients(zip(grad,model.trainable_variables))
@tf.function
def polyak_update(self, weights, target_weights, tau):
"""Polyak update for the target networks
Args:
weights (list): network weights
target_weights (list): target network weights
tau (float): controls the update rate
Returns:
None
"""
for (w, tw) in zip(weights, target_weights):
tw.assign(w * tau + tw * (1 - tau))
def train(self, tracker, n_episodes, verbose, params, hyperp):
"""Main loop for the agent's training phase
Args:
tracker (object): used to store and save the training stats
n_episodes (int): n° of episodes to perform
verbose (int): how frequent we save the training stats
params (dict): agent parameters (e.g., the critic's gamma)
hyperp (dict): algorithmic specific values (e.g., tau)
Returns:
None
"""
mean_good_reward = deque(maxlen=100)
mean_adv_reward = deque(maxlen=100)
eps, eps_min = params['eps'], params['eps_min']
eps_decay = hyperp['eps_d']
tau, use_polyak, tg_update = hyperp['tau'], hyperp['use_polyak'], hyperp['tg_update']
for e in range(n_episodes):
ep_good_reward,ep_adv_reward, steps = 0, 0, 0
state = self.env.reset()
badTH = 1000000
for s in state:
badTH = min(badTH, s.size)
while steps < 250:
actions = []
index_actions = []
for i in range(self.env.n):
action = self.get_action(state[i],i,eps)
index_actions.append(action)
actions.append(self.to_encode(action))
actions = np.array(actions)
obs_state, obs_reward, done, _ = self.env.step(actions)
for i in range (self.env.n):
self.buffer[i].store(state[i],
index_actions[i],
obs_reward[i],
obs_state[i],
1 - int(done[i])
)
for i in range (self.env.n):
if obs_state[i].size>badTH:
ep_good_reward+=obs_reward[i]
else:
ep_adv_reward+=obs_reward[i]
steps += 1
state = obs_state
if e > params['update_start']:
self.update(
params['gamma'],
params['buffer']['batch']
)
for i in range (self.env.n):
if use_polyak:
# DDPG Polyak update improve stability over the periodical full copy
self.polyak_update(self.model[i].variables, self.model_tg[i].variables, tau)
elif steps % tg_update == 0:
self.model_tg[i].set_weights(self.model[i].get_weights())
if done: break
eps = max(eps_min, eps * eps_decay)
mean_good_reward.append(ep_good_reward)
mean_adv_reward.append(ep_adv_reward)
tracker.update([e, ep_good_reward, ep_adv_reward, np.mean(mean_good_reward), np.mean(mean_adv_reward)])
if e % verbose == 0:
tracker.save_metrics()
#tracker.save_model(self.model, e, mean_good_reward[len(mean_good_reward) - 1], mean_adv_reward[len(mean_adv_reward) - 1])
#if mean_reward[len(mean_reward)-1] < -20 : tracker.save_model(self.model,e,mean_reward[len(mean_reward)-1])
print(f'Ep: {e}, Ep_Rew: {ep_good_reward}, Ep_Adv_Rew: {ep_adv_reward}, Mean_Rew: {np.mean(mean_good_reward)}, Mean_Adv_Rew: {np.mean(mean_adv_reward)}')
| [
"fabio.tarocco.vr@gmail.com"
] | fabio.tarocco.vr@gmail.com |
c65f10f40c7746b6a0f8b226efa07085cf5a26f6 | 3634703ad8685c9bc5d73edf148b7b8722356c0e | /Algorithm/programmers/pg_2016년.py | 872b394b834701e55c74ca2098cf27d1a25d7d18 | [] | no_license | chj3748/TIL | 23d88f97ebc8b1e3a06bb93752dfd2d331d01fd8 | 40a4e524c28945c95f059b0dee598abb686abe04 | refs/heads/master | 2022-02-26T16:43:56.964719 | 2022-02-14T04:43:20 | 2022-02-14T04:43:20 | 235,233,054 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # math | programmers 2016년
# github.com/chj3748
import sys
def input():
return sys.stdin.readline().rstrip()
def solution(a, b):
months = [0, 0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for i in range(1, 14):
months[i] += months[i - 1]
weeks = [ 'THU', 'FRI', 'SAT', 'SUN', 'MON', 'TUE', 'WED']
return weeks[(months[a] + b) % 7] | [
"redsmile123@naver.com"
] | redsmile123@naver.com |
8e54379c9e0e2512323873740a307b5ac6552d0b | de79ece8981f0fd241bcea578e4a534a1213397e | /spirl/configs/few_shot_imitation_learning/kitchen/hierarchical_cl_gc_demo_slide_demo_trained_vae/conf.py | 1f2fcbafcc3b7ab14bb8c70bf240ee9d69987572 | [
"BSD-3-Clause"
] | permissive | ahmeda14960/fist | 3ee684cd7da0bb531d791321f1af09adad386ab4 | baf2b0bfed12a9bc0db9a099abeefad1ef618d1c | refs/heads/master | 2023-08-02T01:35:29.983633 | 2021-09-13T20:07:28 | 2021-09-13T20:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-microwave_kettle_hinge_slide.hdf5',
subseq_len=10,
)
env = AttrDict(
task_list = ['microwave', 'kettle', 'slide cabinet', 'hinge cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
# checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/kitchen/hierarchical_cl_gc_no_slide'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-no-slide.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
| [
"kourosh_hakhamaneshi@berkeley.edu"
] | kourosh_hakhamaneshi@berkeley.edu |
63e68cd682ae71bda971f13061eb08f9b94186e0 | b839791d8d52186b13e8d4f734ad8dc50f514927 | /Exercises/4_5_State_and_Motion/car.py | 95954c43a074620fc3e5e2d0cfbd087a7231a8b7 | [
"MIT"
] | permissive | giuseppe-testa/Computer-Vision-ND | d5eb0d01863212d1da981755384109d4fda7e7e7 | bbb939d6a654953037ad4e9dd4a20ee5d98cf10d | refs/heads/master | 2022-11-28T19:05:42.338805 | 2020-02-17T16:43:00 | 2020-02-17T16:43:00 | 207,527,569 | 0 | 0 | MIT | 2022-11-22T02:24:31 | 2019-09-10T10:14:52 | Jupyter Notebook | UTF-8 | Python | false | false | 4,903 | py | import matplotlib.pyplot as plt
""" The Car class defines a car's movement and keeps track of its state.
The class includes init, move, and display functions.
This class assumes a constant velocity motion model and the state
of the car includes the car's position, and it's velocity.
Attributes:
state: A list of the car's current position [y, x] and velocity [vy, vx]
world: The world that the car is moving within (a 2D list)
"""
class Car(object):
# Car constructor
# Called when you write car.Car(_, _, _)
def __init__(self, position, velocity, world):
"""Initializes Car with some position, velocity, and a world to traverse."""
# Initialize the state
# Position is a list [y, x] and so is velocity [vy, vx]
self.state = [position, velocity]
self.world = world # world is a 2D list of values that range from 0-1
# Set the default color
self.color = 'r'
# Initalize the path
self.path = []
self.path.append(position)
# Move function
def move(self, dt=1):
""" The move function moves the car in the direction of the velocity and
updates the state.
It assumes a circular world and a default dt = 1 (though dt can be any
non-negative integer).
"""
height = len(self.world)
width = len(self.world[0])
position = self.state[0]
velocity = self.state[1]
# Predict the new position [y, x] based on velocity [vx, vy] and time, dt
predicted_position = [
(position[0] + velocity[0]*dt) % height, # default dt = 1
(position[1] + velocity[1]*dt) % width
]
# Update the state
self.state = [predicted_position, velocity]
# Every time the robot moves, add the new position to the path
self.path.append(predicted_position)
# Turn left function
def turn_left(self):
""" Turning left "rotates" the velocity values, so vy = -vx, and vx = vy.
For example, if a car is going right at 1 world cell/sec this means
vy = 0, vx = 1,
and if it turns left, then it should be moving upwards on the world grid
at the same speed!
And up is vy = -1 and vx = 0
"""
# Change the velocity
velocity = self.state[1]
predicted_velocity = [
-velocity[1],
velocity[0]
]
# Update the state velocity
self.state[1] = predicted_velocity
## TODO: Write the turn_right function
## Hint: Use turn_left for inspiration!
def turn_right(self):
""" Turning right "rotates" the velocity values, so vy = vx, and vx = -vy.
For example, if a car is going right at 1 world cell/sec this means
vy = 0, vx = 1,
and if it turns right, then it should be moving upwards on the world grid
at the same speed!
And up is vy = 1 and vx = 0
"""
# Change the velocity
velocity = self.state[1]
predicted_velocity = [
velocity[1],
-velocity[0]
]
# Update the state velocity
self.state[1] = predicted_velocity
# Helper function for displaying the world + robot position
# Assumes the world in a 2D numpy array and position is in the form [y, x]
# path is a list of positions, and it's an optional argument
def display_world(self):
# Store the current position of the car
position = self.state[0]
# Plot grid of values + initial ticks
plt.matshow(self.world, cmap='gray')
# Set minor axes in between the labels
ax=plt.gca()
rows = len(self.world)
cols = len(self.world[0])
ax.set_xticks([x-0.5 for x in range(1,cols)],minor=True )
ax.set_yticks([y-0.5 for y in range(1,rows)],minor=True)
# Plot grid on minor axes in gray (width = 2)
plt.grid(which='minor',ls='-',lw=2, color='gray')
# Create a 'x' character that represents the car
# ha = horizontal alignment, va = verical
ax.text(position[1], position[0], 'x', ha='center', va='center', color=self.color, fontsize=30)
# Draw path if it exists
if(len(self.path) > 1):
# loop through all path indices and draw a dot (unless it's at the car's location)
for pos in self.path:
if(pos != position):
ax.text(pos[1], pos[0], '.', ha='center', va='baseline', color=self.color, fontsize=30)
# Display final result
plt.show()
| [
"giuseppe1.testa@gmail.com"
] | giuseppe1.testa@gmail.com |
764c9237e3797d3e9ab48d5e9665a89d57b9c495 | d7d5af499142d9313a24ac711757a8ad7d6c2ef2 | /dataPreparation/parseReaxsysFile_v2.py | 6f74471536cdbf450ee5edee4136a3fbc4f5bce5 | [
"MIT"
] | permissive | rmrmg/SuzukiConditions | f8fa2bb07c37c4a8d148d6f1c2a1a495c635bb50 | 7ec3cbab269f2e769b4451571775d1fc6e547c45 | refs/heads/master | 2023-04-13T20:47:06.883856 | 2022-02-07T10:53:02 | 2022-02-07T10:53:02 | 352,658,712 | 4 | 2 | MIT | 2021-11-08T10:34:16 | 2021-03-29T13:45:06 | Roff | UTF-8 | Python | false | false | 48,601 | py | import itertools, re
from rdkit import Chem
from rdkit.Chem import AllChem
from collections import defaultdict
from rdkit import RDLogger
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
def parseArgs():
import argparse
parser = argparse.ArgumentParser(description='parser of Suzuki reaction in csv from reaxsys')
parser.add_argument('--heterohetero', action="store_true", default=False, help='include heteroaryl heteroaryl Suzuki coupling data')
parser.add_argument('--arylhetero', action="store_true", default=False, help='include aryl heteroaryl Suzuki coupling data')
parser.add_argument('--arylaryl', action="store_true", default=False, help='include aryl aryl Suzuki coupling data')
parser.add_argument('--withpatents', action='store_true', default=False, help='include also data from patents')
parser.add_argument('--withtemponly', action='store_true', default=False, help='include only reaction with given temperature')
parser.add_argument('--withbaseonly', action='store_true', default=False, help='include only reaction with given base')
parser.add_argument('--withsolvonly', action='store_true', default=False, help='include only reaction with given solvent')
parser.add_argument('--withyieldonly', action='store_true', default=False, help='include only reaction with given yield')
parser.add_argument('--withligandonly', action='store_true', default=False, help='include only reaction with given ligand')
parser.add_argument('--withpdonly', action='store_true', default=False, help='include only reaction with given Pd-source')
parser.add_argument('--output', default='plikzestatamisuzukiego_5', help='output file name')
args = parser.parse_args()
if args.heterohetero is False and args.arylhetero is False and args.arylaryl == False:
parser.error("at least one of --heterohetero and --arylhetero required")
return args
def parseFile(fn, separator='\t', includePatents=True):
lines =open(fn).readlines()
#HEAD ['Reaction ID', 'Reaction: Links to Reaxys', 'Data Count', 'Number of Reaction Details', 'Reaction Rank', 'Record Type', 'Reactant', 'Product', 'Bin', 'Reaction',
#10: 'Reaction Details: Reaction Classification', 'Example label', 'Example title', 'Fulltext of reaction', 'Number of Reaction Steps', 'Multi-step Scheme',
#15: 'Multi-step Details', 'Number of Stages', 'Solid Phase', 'Time (Reaction Details) [h]', 'Temperature (Reaction Details) [C]',
#20: 'Pressure (Reaction Details) [Torr]', 'pH-Value (Reaction Details)', 'Other Conditions', 'Reaction Type', 'Subject Studied',
#25: 'Prototype Reaction', 'Named Reaction', 'Type of reaction description (Reaction Details)', 'Location', 'Comment (Reaction Details)', 'Product',
#30: 'Yield', 'Yield (numerical)', 'Yield (optical)', 'Stage Reactant', 'Reagent', 'Catalyst', 'Solvent (Reaction Details)', 'References', 'Links to Reaxys']
head= [x.strip() for x in lines[0].split(separator) if x.strip()]
if includePatents:
data= [ line.split(separator) for line in lines[1:] ]
else:
data= [ line.split(separator) for line in lines[1:] if not 'Patent;' in line ]
return {'header':head, 'data':data}
def _getIdxOfAromNeiOfAtm(atomObj):
neis= atomObj.GetNeighbors()
if len(neis) == 1:
return neis[0].GetIdx()
arom = [n for n in neis if n.GetIsAromatic()]
if len(arom) == 0:
arom = [ n for n in neis if str(n.GetHybridization()) == 'SP2' ]
if len(arom) != 1:
print("XXX", arom, "ATOM", atomObj, Chem.MolToSmiles(atomObj.GetOwningMol()) )
raise
return arom[0].GetIdx()
def _getRingSystem(idx, inRingIdxes):
ri = ()
added=set()
front=[]
##1st ring
for ring in inRingIdxes:
if idx in ring:
ri = ring
added.add(ring)
front = ring
break
##2nd rings
front2=[]
ri2=[]
for ring in inRingIdxes:
if ring in added: continue
if any([idx in front for idx in ring]):
front2.extend( ring)
added.add(ring)
ri2.append(ring)
if not ri2:
return [ ri, ]
#3rd ring
ri3=[]
for ring in inRingIdxes:
if ring in added: continue
if any([idx in front2 for idx in ring]):
added.add(ring)
ri3.append(ring)
if not ri3:
return [ri, ri2]
return [ri, ri2, ri3]
def getRingBenzo(mol, expectedAtoms):
allTypes={'benzoin6m1N':{'c1ccc2ncccc2c1':'', 'c1ccc2cnccc2c1':'',},
'benzoIn6m2N':{'c1ccc2ncncc2c1':'', 'c1ccc2cnncc2c1':'', 'c1ccc2nccnc2c1':'', 'c1ccc2nnccc2c1':'',},
'benzoIn6m3N':{'c1ccc2nnncc2c1':'', 'c1ccc2nncnc2c1':'',},
'benzoIn6m4N':{'c1ccc2nnnnc2c1':''},
'benzoIn5m1het':{'c1cccc2c1[nX3]cc2':'', 'c1cccc2c1occ2':'', 'c1cccc2c1scc2':'', 'c1cccc2c1[se]cc2':'', 'c1cccc2c1c[nX3]c2':'', 'c1cccc2c1coc2':'', 'c1cccc2c1csc2':'', 'c1cccc2c1c[se]c2':''},
'benzoIn5m2het':{'c1cccc2c1noc2':'', 'c1cccc2c1nsc2':'', 'c1cccc2c1nnc2':'', 'c1cccc2c1nco2':'', 'c1cccc2c1ncs2':'', 'c1cccc2c1ncn2':'',
'c1cccc2c1onc2':'', 'c1cccc2c1snc2':'', 'c1cccc2c1[se]cn2':'', 'c1cccc2c1[se]nc2':'',},
'benzoIn5m3het':{'c1cccc2c1nnn2':'', 'c1cccc2c1non2':'', 'c1cccc2c1onn2':'', 'c1cccc2c1nsn2':'', 'c1cccc2c1snn2':'', 'c1cccc2c1[se]nn2':'', 'c1cccc2c1n[se]n2':''},
}
for rclass in allTypes:
for rsmarts in allTypes[rclass]:
smamol = Chem.MolFromSmarts( rsmarts)
if not smamol:
print("CANNOT PARSE", smamol, rsmarts)
allTypes[rclass][rsmarts] = smamol
foundRings={}
for rclass in allTypes:
for rsmarts in allTypes[rclass]:
allFound = mol.GetSubstructMatches( allTypes[rclass][rsmarts] )
if allFound:
#print("RIMOL", mol, allTypes[rclass][rsmarts], "ALLFOUND", allFound,)
found =[ring for ring in allFound if any(expAtom in ring for expAtom in expectedAtoms)]
if found:
foundRings[rclass+":::"+rsmarts]= list(found)
submols=dict()
for fRingType in foundRings:
numAtomInMol=mol.GetNumAtoms()
fRingClass = fRingType.split(':::')[0]
for fRingIdx in foundRings[fRingType]:
editMol = Chem.EditableMol(mol)
allIdxes= sorted(fRingIdx)
for atm in expectedAtoms:
if atm in fRingIdx:
allIdxes.append( expectedAtoms[atm] )
allIdxes=set(allIdxes)
for idx in range(numAtomInMol-1, -1, -1):
if not idx in allIdxes:
editMol.RemoveAtom(idx)
submols[ fRingClass+':::'+Chem.MolToSmiles( editMol.GetMol() ) ]=allIdxes
return submols
#return foundRings
def getRingNames(mol, which, neiOfWhich ):
"""
mol - mol Object
which - Br or B
neiOfWhich - dict {neiOfWhichIdx:idxOfWhich, }
"""
#mm=Chem.MolFromSmarts('c1cccc2c1nnc2')
#if (mol.HasSubstructMatch(mm)):print("DZIALA")
allTypesB={
'FURANS AND BENZOFURANS': { 'OB(O)c1ccco1':'2-iodo-furan', 'OB(O)c1cc2aaaac2o1':'2-iodobenzofuran', 'c1c(B(O)O)cco1':'3-iodobenzofuran', 'c1c(B(O)O)c2:a:a:a:a:c2o1':'3-iodo-furan', },
'THIOPHENE AND BENZOTHIOPHENE': {'OB(O)c1cccs1':'2-iodo-thiazole', 'OB(O)c1cc2aaaac2s1':'2-iodobenzothiazole', 'c1c(B(O)O)ccs1':'3-iodo-thiazole', 'c1c(B(O)O)c2:a:a:a:a:c2s1':'3-iodobenzothiazole'},
'PYRROLE AND INDOLE': {'OB(O)c1cccn1': '', 'OB(O)c1cc2aaaac2n1':'', 'c1c(B(O)O)ccn1':'','c1c(B(O)O)c2:a:a:a:a:c2n1':''},
'ISOOXAZOLE': {'OB(O)c1ccno1':'', 'OB(O)c1onc2:a:a:a:a:c12':'', 'c1c(B(O)O)cno1':'', 'c1cc(B(O)O)no1':'', 'OB(O)c1noc2:a:a:a:a:c12':''},
'OXAZOLE AND BENZOXAZOLE': { 'OB(O)c1cnco1':'', 'OB(O)c1cocn1':'', 'OB(O)c1ncco1':'', 'OB(O)c1nc2aaaac2o1':''},
'THIAZOLE AND BENZOTHIAZOLE': { 'OB(O)c1cncs1':'', 'OB(O)c1cscn1':'', 'OB(O)c1nccs1':'', 'OB(O)c1nc2aaaac2s1':'', },
'ISOTHIAZOLE AND ISOBENZOTHIAZOLE':{'OB(O)c1ccns1':'', 'OB(O)c1snc2:a:a:a:a:c12':'', 'c1c(B(O)O)cns1':'', 'c1cc(B(O)O)ns1':'', 'OB(O)c1nsc2:a:a:a:a:c12':'',},
'PYRAZOLE AND BENZOPYRAZOLE': { 'OB(O)c1nncc1':'', 'OB(O)c1nnc2aaaac12':'', 'c1c(B(O)O)cnn1':'', 'OB(O)c1cnn2aaaac12':'', 'OB(O)c1cc2aaaan2n1':'', 'OB(O)c1ccnn1':'','OB(O)c1c2aaaac2nn1':'', },
'IMIDAZOLE AND BENZIMIDAZOLE': { 'OB(O)c1cncn1':'', 'OB(O)c1c2aaaan2cn1':'', 'OB(O)c1cn2aaaac2n1':'', 'OB(O)c1cncn1':'', 'OB(O)c1cnc2aaaan12':'', 'OB(O)c1nccn1':'','OB(O)c1nc2aaaac2n1':'', 'OB(O)c1ncc2aaaan12':'',},
'1,2,5-OXADIAZOLE': { 'n1oncc1B(O)O':''},
'1,2,4-OXADIAZOLE': {'OB(O)c1ncno1':'', 'OB(O)c1ncon1':''},
'1,3,4-OXADIAZOLE': {'n1ncoc1B(O)O':'', },
'1,2,5-THIADIAZOLE': {'n1sncc1B(O)O':''},
'1,2,4-THIADIAZOLE':{'OB(O)c1ncns1':'', 'OB(O)c1ncsn1':''},
'1,3,4-THIADIAZOLE':{'n1ncsc1B(O)O':'' , },
'1H-1,2,3-TRIAZOLE':{'OB(O)c1cnnn1':'', 'c1c(B(O)O)nnn1':'', 'OB(O)c1c2a=aa=an2nn1':'', },
'2H-1,2,3-TRIAZOLE':{'OB(O)c1nnnc1':'',},
'1H-1,2,4-TRIAZOLE':{'OB(O)c1ncnn1':'', 'c1nc(B(O)O)nn1':'', 'OB(O)c1nn2aaaac2n1':'', },
'4H-1,2,4-TRIAZOLE':{'OB(O)c1nncn1':'', 'OB(O)c1nnc2aaaan12':'', },
'TETRAZOLE': { 'OB(O)c1nnnn1':'', 'OB(O)c1nnnn1':'', },
'PYRIDINES': {'OB(O)c1ccccn1':'6mn','OB(O)c1cnccc1':'3-pyridine','OB(O)c1ccncc1':'4-pyridine'},
'PYRIDAZINE':{'OB(O)c1cccnn1':'6mnn','OB(O)c1cnncc1':'4-pyridazine',},
'PYRIMIDINE':{'OB(O)c1ncccn1':'2-iodopyrimidine','OB(O)c1ccncn1':'4-iodopyrimidine', 'OB(O)c1cncnc1':'5-iodopyrimidine'},
'PYRAZINE':{'OB(O)c1cnccn1':'2-iodopyrazine',},
'1,2,3-triazine': {'OB(O)c1ccnnn1':'4-iodo-1,2,3-triazine', 'OB(O)c1cnnnc1':'5-iodo-1,2,3-triazine',},
'1,2,4-triazine':{'OB(O)c1nnccn1':'3-iodo-1,2,4-triazine', 'OB(O)c1nncnc1':'6-iodo-1,2,4-triazine', 'OB(O)c1ncnnc1':'5-iodo-1,2,4-triazine'},
'1,3,5-triazine': {'OB(O)c1ncncn1':'2-iodo-1,3,5-triazine', },
'6-membrede with 4-heteroatoms': {'OB(O)c1nncnn1':'3-iodo-1,2,4,5-tetrazine'},
'5-membrede with selenide': {'OB(O)c1ccc[Se]1':''},
}
allTypesBr={
'FURANS AND BENZOFURANS': { 'Ic1ccco1':'2-iodo-furan', 'Ic1cc2aaaac2o1':'2-iodobenzofuran', 'c1c(I)cco1':'3-iodobenzofuran', 'c1c(I)c2:a:a:a:a:c2o1':'3-iodo-furan',
'Brc1ccco1':'2-bromo-furan', 'Brc1cc2aaaac2o1': '2-bromobenzofuran', 'c1c(Br)cco1':'3-bromo-furan', 'c1c(Br)c2:a:a:a:a:c2o1':'3-bromobenzofuran',
'Clc1ccco1':'2-bromo-furan', 'Clc1cc2aaaac2o1': '2-bromobenzofuran', 'c1c(Cl)cco1':'3-bromo-furan', 'c1c(Cl)c2:a:a:a:a:c2o1':'3-bromobenzofuran'},
'THIOPHENE AND BENZOTHIOPHENE': {'Ic1cccs1':'2-iodo-thiazole', 'Ic1cc2aaaac2s1':'2-iodobenzothiazole', 'c1c(I)c=cs1':'3-iodo-thiazole','c1c(I)c2:a:a:a:a:c2s1':'3-iodobenzothiazole',
'Brc1cccs1':'2-bromo-thiazole', 'Brc1cc2aaaac2s1':'2-bromobenzothiazole', 'c1c(Br)ccs1':'3-bromo-thiazole', 'c1c(Br)c2:a:a:a:a:c2s1':'3-bromobenzothiazole',
'Clc1cccs1':'2-bromo-thiazole', 'Clc1cc2aaaac2s1':'2-bromobenzothiazole', 'c1c(Cl)ccs1':'3-bromo-thiazole', 'c1c(Cl)c2:a:a:a:a:c2s1':'3-bromobenzothiazole',},
'PYRROLE AND INDOLE': {'Ic1cccn1': '', 'Ic1cc2aaaac2n1':'', 'c1c(I)ccn1':'','c1c(I)c2:a:a:a:a:c2n1':'', 'Brc1cccn1':'','Brc1cc2aaaac2n1':'', 'c1c(Br)ccn1':'', 'c1c(Br)c2:a:a:a:a:c2n1':'',
'Clc1cccn1':'','Clc1cc2aaaac2n1':'', 'c1c(Cl)ccn1':'', 'c1c(Cl)c2:a:a:a:a:c2n1':''},
'ISOOXAZOLE': {'Ic1ccno1':'', 'Ic1onc2:a:a:a:a:c12':'', 'c1c(I)cno1':'', 'c1cc(I)no1':'', 'Ic1noc2:a:a:a:a:c12':'', 'Brc1ccno1':'','Brc1onc2:a:a:a:a:c12':'', 'c1c(Br)cno1':'', 'c1cc(Br)no1':'', 'Brc1noc2:a:a:a:a:c12':'',
'Clc1ccno1':'','Clc1onc2:a:a:a:a:c12':'', 'c1c(Cl)cno1':'', 'c1cc(Cl)no1':'', 'Clc1noc2:a:a:a:a:c12':''},
'OXAZOLE AND BENZOXAZOLE': { 'Ic1cnco1':'', 'Ic1cocn1':'', 'Ic1ncco1':'', 'Ic1nc2aaaac2o1':'', 'Brc1cnco1':'', 'Brc1cocn1':'', 'Brc1ncco1':'', 'Brc1nc2aaaac2o1':'',
'Clc1cnco1':'', 'Clc1cocn1':'', 'Clc1ncco1':'', 'Clc1nc2aaaac2o1':'',},
'THIAZOLE AND BENZOTHIAZOLE': { 'Ic1cncs1':'', 'Ic1cscn1':'', 'Ic1nccs1':'', 'Ic1nc2aaaac2s1':'', 'Brc1cncs1':'', 'Brc1cscn1':'', 'Brc1nccs1':'', 'Brc1nc2aaaac2s1':'',
'Clc1cncs1':'', 'Clc1cscn1':'', 'Clc1nccs1':'', 'Clc1nc2aaaac2s1':'',},
'ISOTHIAZOLE AND ISOBENZOTHIAZOLE':{'Ic1ccns1':'', 'Ic1snc2:a:a:a:a:c12':'', 'c1c(I)cns1':'', 'c1cc(I)ns1':'', 'Ic1nsc2:a:a:a:a:c12':'',
'Brc1ccns1':'', 'Brc1snc2:a:a:a:a:c12':'', 'c1c(Br)cns1':'', 'c1cc(Br)ns1':'', 'Brc1nsc2:a:a:a:a:c12':'',
'Clc1ccns1':'', 'Clc1snc2:a:a:a:a:c12':'', 'c1c(Cl)cns1':'', 'c1cc(Cl)ns1':'', 'Clc1nsc2:a:a:a:a:c12':'',},
'PYRAZOLE AND BENZOPYRAZOLE': { 'Ic1nncc1':'', 'Ic1nnc2aaaac12':'', 'c1c(I)cnn1':'', 'Ic1cnn2aaaac12':'', 'Ic1cc2aaaan2n1':'', 'Ic1ccnn1':'','Ic1c2aaaac2nn1':'',
'BrC1=NNC=C1':'', 'BrC1=NNc2aaaac12':'', 'c1c(Br)cnn1':'', 'Brc1cnn2aaaac12':'', 'Brc1cc2aaaan2n1':'', 'Brc1ccnn1':'', 'Brc1c2aaaac2nn1':'',
'ClC1=NNC=C1':'', 'ClC1=NNc2aaaac12':'', 'c1c(Cl)cnn1':'', 'Clc1cnn2aaaac12':'', 'Clc1cc2aaaan2n1':'', 'Clc1ccnn1':'', 'Clc1c2aaaac2nn1':'', },
'IMIDAZOLE AND BENZIMIDAZOLE': { 'Ic1cncn1':'', 'Ic1c2aaaan2cn1':'', 'Ic1cn2aaaac2n1':'', 'Ic1cncn1':'', 'Ic1cnc2aaaan12':'', 'Ic1nccn1':'','Ic1nc2aaaac2n1':'', 'Ic1ncc2aaaan12':'',
'Brc1cncn1':'', 'Brc1c2aaaan2cn1':'', 'Brc1cn2aaaac2n1':'', 'Brc1cncn1':'', 'Brc1cnc2aaaan12':'','Brc1nccn1':'', 'Brc1nc2aaaac2n1':'', 'Brc1ncc2aaaan12':'',
'Clc1cncn1':'', 'Clc1c2aaaan2cn1':'', 'Clc1cn2aaaac2n1':'', 'Clc1cncn1':'', 'Clc1cnc2aaaan12':'','Clc1nccn1':'', 'Clc1nc2aaaac2n1':'', 'Clc1ncc2aaaan12':'',},
'1,2,5-OXADIAZOLE': { 'n1oncc1I':'', 'n1oncc1Br':'', 'n1oncc1Cl':'',},
'1,2,4-OXADIAZOLE': {'Ic1ncno1':'', 'Ic1ncon1':'', 'Brc1ncno1':'', 'Brc1ncon1':'', 'Clc1ncno1':'', 'Clc1ncon1':'',},
'1,3,4-OXADIAZOLE': {'n1ncoc1I':'', 'n1ncoc1Br':'', 'n1ncoc1Cl':'',},
'1,2,5-THIADIAZOLE': {'n1sncc1I':'', 'n1sncc1Br':'', 'n1sncc1Cl':'',},
'1,2,4-THIADIAZOLE':{'Ic1ncns1':'', 'Ic1ncsn1':'', 'Brc1ncns1':'', 'Brc1ncsn1':'', 'Clc1ncns1':'', 'Clc1ncsn1':'',},
'1,3,4-THIADIAZOLE':{'n1ncsc1I':'', 'n1ncsc1Br':'', 'n1ncsc1Cl':'',},
'1H-1,2,3-TRIAZOLE':{'Ic1cnnn1':'', 'c1c(I)nnn1':'', 'Ic1c2a=aa=an2nn1':'', 'Brc1cnnn1':'', 'c1c(Br)nnn1':'', 'Brc1c2aaaan2nn1':'', 'Clc1cnnn1':'', 'c1c(Cl)nnn1':'', 'Clc1c2aaaan2nn1':'',},
'2H-1,2,3-TRIAZOLE':{'Ic1nnnc1':'','Brc1nnnc1':'', 'Clc1nnnc1':'',},
'1H-1,2,4-TRIAZOLE':{'Ic1ncnn1':'', 'c1nc(I)nn1':'', 'Ic1nn2aaaac2n1':'', 'Brc1ncnn1':'', 'c1nc(Br)nn1':'', 'Brc1nn2aaaac2n1':'', 'Clc1ncnn1':'', 'c1nc(Cl)nn1':'', 'Clc1nn2aaaac2n1':'',},
'4H-1,2,4-TRIAZOLE':{'Ic1nncn1':'', 'Ic1nnc2aaaan12':'', 'Brc1nncn1':'', 'Brc1nnc2aaaan12':'', 'Clc1nncn1':'', 'Clc1nnc2aaaan12':'',},
'TETRAZOLE': { 'Ic1nnnn1':'', 'Ic1nnnn1':'', 'Brc1nnnn1':'', 'Brc1nnnn1':'', },
'1,2,3,4-THIATRIAZOLE': {'Ic1nnns1':'', 'Brc1nnns1':'',},
'1,2,3,4-OXATRIAZOLE':{'Ic1nnno1':'', 'Brc1nnno1':'',},
'with selenide': { 'Brc1ccc[Se]1':'', 'IC1=CC=C[Se]1':'', 'BrC1=C[Se]C=C1':'', 'IC1=C[Se]C=C1':'', 'BrC1=NC=C[Se]1':'', 'IC1=NC=C[Se]1':'', 'BrC1=CN=C[Se]1':'',
'IC1=CN=C[Se]1':'', 'BrC1=CC=N[Se]1':'', 'IC1=CC=N[Se]1':'', 'BrC1=C[Se]N=C1':'', 'IC1=C[Se]N=C1':'', 'BrC1=C[Se]C=N1':'', 'IC1=C[Se]C=N1':'', 'BrC1=N[Se]C=C1':'',
'IC1=N[Se]C=C1':'',},
'PYRIDINES': {'Ic1ccccn1':'2-iodopyridine', 'Brc1ccccn1':'2-bromopyridine', 'Ic1cnccc1':'3-iodopyridine', 'Brc1cnccc1':'3-bromopyridine','Ic1ccncc1':'4-iodopyridine', 'Brc1ccncc1':'4-bromopyridine',
'Clc1ccccn1':'2-bromopyridine', 'Clc1cnccc1':'3-iodopyridine', 'Clc1ccncc1':'4-bromopyridine',},
'PYRIDAZINE': {'Ic1cccnn1':'3-iodopyridazine', 'Brc1cccnn1':'3-bromopyridazine', 'Ic1cnncc1':'4-iodopyridazine', 'Brc1cnncc1':'4-bromopyridazine'},
'PYRIMIDINE':{'Ic1ncccn1':'2-iodopyrimidine','Brc1ncccn1':'2-bromopyrimidine', 'Ic1ccncn1':'4-iodopyrimidine', 'Brc1ccncn1':'4-bromopyrimidine', 'Ic1cncnc1':'5-iodopyrimidine', 'Brc1cncnc1':'5-bromopyrimidine',},
'PYRAZINE':{'Ic1cnccn1':'2-iodopyrazine', 'Brc1cnccn1':'2-bromopyrazine',},
'1,2,3-triazine': {'Ic1ccnnn1':'4-iodo-1,2,3-triazine', 'Brc1ccnnn1':'4-bromo-1,2,3-triazine', 'Ic1cnnnc1':'5-iodo-1,2,3-triazine','Brc1cnnnc1':'5-bromo-1,2,3-triazine'},
'1,2,4-triazine': {'Ic1nnccn1':'3-iodo-1,2,4-triazine', 'Brc1nnccn1':'3-bromo-1,2,4-triazine', 'Ic1cncnn1':'6-iodo-1,2,4-triazine', 'Brc1nncnc1':'6-bromo-1,2,4-triazine',
'Ic1cnncn1':'5-iodo-1,2,4-triazine', 'Brc1cnncn1':'5-bromo-1,2,4-triazine',},
'1,3,5-triazine': {'Ic1ncncn1':'2-iodo-1,3,5-triazine', 'Brc1ncncn1':'2-bromo-1,3,5-triazine',},
'6-membrede with 4-heteroatoms': {'Ic1nncnn1':'3-iodo-1,2,4,5-tetrazine', 'Brc1nncnn1':'3-bromo-1,2,4,5-tetrazine'}
}
for rclass in allTypesBr:
for rsmarts in allTypesBr[rclass]:
allTypesBr[rclass][rsmarts] = Chem.MolFromSmarts( rsmarts)
for rclass in allTypesB:
for rsmarts in allTypesB[rclass]:
allTypesB[rclass][rsmarts] = Chem.MolFromSmarts( rsmarts)
####
if which == 'Br' or which=='I' or which=='Cl':
allTypes = allTypesBr
elif which =='B':
allTypes = allTypesB
foundRings={}
for rclass in allTypes:
for rsmarts in allTypes[rclass]:
found = mol.GetSubstructMatches( allTypes[rclass][rsmarts] )
if found:
foundRings[rclass+':::'+rsmarts]= list(found)
###
#reduce
#RN {'Brc1nccn1': ((0, 1, 2, 3, 8, 9),), 'Brc1nc2aaaac2n1': ((0, 1, 2, 3, 4, 5, 6, 7, 8, 9
if len(foundRings) > 1: #doreduce
ringLens=dict()
for rn in foundRings:
for ring in foundRings[rn]:
size=len(ring)
if not size in ringLens:
ringLens[size]=[]
ringLens[size].append(ring)
ringsizes= sorted( ringLens.keys(), reverse=True)
ringToRemove=set()
for rsizeBig in ringsizes:
for oneRingBig in ringLens[ rsizeBig]:
for rsize in ringLens:
if rsize >= rsizeBig: continue
for idxSmallRing in range( len(ringLens[rsize])-1, -1,-1):
if all([a in oneRingBig for a in ringLens[rsize][idxSmallRing]]):
rem= ringLens[rsize].pop(idxSmallRing)
ringToRemove.add( rem)
for ringToRm in ringToRemove:
keyToRm=[]
for rtype in foundRings:
if ringToRm in foundRings[rtype]:
foundRings[rtype].remove(ringToRm)
if not foundRings[rtype]:
keyToRm.append(rtype)
for k in keyToRm:
foundRings.pop(k)
if not foundRings:
benzoRing=getRingBenzo(mol, neiOfWhich)
#print("BENZO", benzoRing, Chem.MolToSmiles(mol), "N", neiOfWhich)
return benzoRing
return foundRings
def getRingType(smi, wantedAtmType='Br' ):
#thiophene, furan, benzothiophene, benzofuran, pyrrole, pyrazole, thiazole, quinoline, isoquinoline, pyridine, triazole, benzooxadiazole)
mol = Chem.MolFromSmiles(smi)
atoms = [a for a in mol.GetAtoms()]
symbols = [ a.GetSymbol() for a in atoms]
idxOfWantedAtmType = [ i for i,s in enumerate(symbols) if s == wantedAtmType]
#print("SMI", smi, "WANTED", wantedAtmType)
idxOfNeiOfWantedAtmType = dict()
toRm=[]
for idx in idxOfWantedAtmType:
#{ _getIdxOfAromNeiOfAtm(atoms[i] ):i for i in idxOfWantedAtmType}
try:
idxOfAromNei = _getIdxOfAromNeiOfAtm(atoms[idx] )
idxOfNeiOfWantedAtmType[ idxOfAromNei] = idx
except:
##toremove
toRm.append(idx)
for i in toRm:
idxOfWantedAtmType.remove(i)
ri = mol.GetRingInfo()
riAtoms = ri.AtomRings()
return [ _ringIdxToAtmSymbols(_getRingSystem(idx, riAtoms), symbols ) for idx in idxOfNeiOfWantedAtmType], idxOfNeiOfWantedAtmType
def _ringIdxToAtmSymbols(ringSystem, symbols, asStr=True ):
#ringSystem = [list, [list, ...], ....]
r1=[symbols[x] for x in ringSystem[0]]
if asStr:
r1=[ x+str(r1.count(x)) for x in sorted(set(r1))]
res= [r1, ]
if len(ringSystem)>1:
for ringLevel in ringSystem[1:]:
thisLevel = [ [symbols[idx] for idx in ring] for ring in ringLevel]
if asStr:
thisLevel = [ [ x+str(ring.count(x)) for x in sorted(set(ring))] for ring in thisLevel]
res.append(thisLevel)
return res
def getRxClass(halos, borons, fullDane, printStat=False):
lh=len(halos)
lb=len(borons)
if lh != lb: raise
statB=dict()
statBr=dict()
statRingBr=dict()
statRingB=dict()
allRxNames=[]
onerx=[]
bRingTypeName=[]
xRingTypeName=[]
for i in range(lh):
brRing = []
brRingName=[]
for s in halos[i]:
try:
usedHalogen='Cl'
if 'Br' in s:
usedHalogen='Br'
if 'I' in s:
if 'I+' in s and s.count('I+') == s.count('I'):
pass
else:
usedHalogen='I'
ri, neiOfHalogen =getRingType(s, wantedAtmType=usedHalogen)
brRing.append(ri)
#print("RI", ri[0][0])
rname = getRingNames(Chem.MolFromSmiles(s), usedHalogen, neiOfHalogen)
#print("HALOGEN", s, usedHalogen, neiOfHalogen, rname)
brRingName.extend(rname.keys())
key='other'
if len(rname)>1:
print("RNhalo", rname)
key=frozenset( list(rname.keys()) )
elif len(rname) == 1:
key=list(rname.keys())[0]
if not key in statRingBr:
statRingBr[key]=0
statRingBr[key]+=1
#print("RI ng type", ri, s , usedHalogen)
rid = tuple( ri[0][0])
if not rid in statBr:
statBr[rid]=0
statBr[rid]+=1
if key=='other': print("OTHERBORON", s)
except:
raise
print("halo problem", s )
bRing = []
bRingName=[]
for s in borons[i]:
try:
ri, neiOfBoron = getRingType(s, wantedAtmType='B')
bRing.append(ri)
rid = tuple(ri[0][0])
#print("RI", ri, "RID", rid)
#if not rid in statB:
# statB[rid]=0
#statB[rid]+=1
rname = getRingNames(Chem.MolFromSmiles(s), 'B', neiOfBoron)
bRingName.extend(rname.keys())
#print("BORON", s, rname)
key='other'
if len(rname)>1:
print("RNboro", rname)
key=frozenset( list(rname.keys()) )
elif len(rname) == 1:
key=list(rname.keys())[0]
if not key in statRingB:
statRingB[key]=0
statRingB[key]+=1
except:
raise
print("B problem", s)
#print ("ONERX", len(brRingName), len(bRingName), '.'.join(brRingName ), '.'.join( bRingName) )
brNT='other'
if brRingName:
brNT='.'.join(brRingName)
bNT='other'
if bNT:
bNT='.'.join(bRingName)
allRxNames.append( (brNT, bNT) )
bRingTypeName.append(bNT)
xRingTypeName.append(brNT)
## getFutherinfo:
#'Pd', 'solvent', 'base', 'ligand', 'special', 'temp', 'time', 'raw', 'yield'
brGenTyp = list(set([x.split(':::')[0] for x in brRingName]))
bGenTyp = list(set([x.split(':::')[0] for x in bRingName]))
#print ("FF", fullDane['raw'][0]['rxInfo']['sbs'] )
#raise
onerx.append( json.dumps( ("ONERX", brGenTyp, bGenTyp, brRingName, bRingName, fullDane['solvent'][i], fullDane['base'][i], fullDane['temp'][i], fullDane['ligand'][i],
fullDane['Pd'][i], fullDane['special'][i].split('; '), fullDane['yield'][i], fullDane['litSource'][i], fullDane['raw'][i]['rxInfo']['sbs'], fullDane['raw'][i]["Reaction"],
fullDane['raw'][i]['rxInfo'] ) ) )
#print(halos, borons)
if printStat:
print("STAT B", statB)
print("STAT Br", statBr)
print("STAT B")
for i in sorted(statB, key = lambda x:statB[x]):
print(i, statB[i])
print("STAT halogen")
for i in sorted(statBr, key = lambda x:statBr[x]):
print(i, statBr[i])
print("STAT RING X")
for i in sorted(statRingBr, key = lambda x:statRingBr[x]):
print(i, statRingBr[i])
print("STAT RING B")
for i in sorted(statRingB, key = lambda x:statRingB[x]):
print(i, statRingB[i])
rxNameStat( allRxNames)
#print("TYPES OF BORON")
sbsNameStat(xRingTypeName, mode='X')
sbsNameStat(bRingTypeName, mode='B')
return onerx
def combinateFiles(res, removeDuplicatesByPos=(0,), ):
header=[ r['header'] for r in res]
datas = [ r['data'] for r in res]
for h in header[1:]:
if h != header[0]:
print("H", h)
print("H", header[0])
raise
header=header[0]
data = []
mustBeUniq={pos:set() for pos in removeDuplicatesByPos}
for dfile in datas:
for line in dfile:
ignoreLine=False
for pos in mustBeUniq:
if line[pos] in mustBeUniq[pos]:
print("ignore duplicated", header[pos], "which has value:", line[pos])
ignoreLine = True
else:
mustBeUniq[pos].add( line[pos] )
if ignoreLine:
continue
if len(line) == len(header):
data.append(line)
elif len(line) == len(header) +1:
if line[-1].strip():
raise
else:
data.append(line[:-1])
else:
print("LEN", len(line) )
raise
#return (header, data)
return {h:[d[i] for d in data] for i,h in enumerate(header)}
def filterOutNotMatched( data, reactions=('suzuki', ), entryName='Reaction' ):
toRmIdx=[]
data['rxInfo']=[]
for i, rx in enumerate(data[entryName]):
if 'suzuki' in reactions:
rxInfo = isSuzuki(rx)
if not rxInfo:
toRmIdx.append(i)
data['rxInfo'].append( rxInfo)
else:
data['rxInfo'].append(rxInfo)
print("TO RM", len(toRmIdx))
toRmIdx.reverse()
for i in toRmIdx:
for header in data:
removed=data[header].pop(i)
#if header == 'rxInfo': print("rm info", removed, end=' ')
#print(i, rx)
return data
def isPd(text):
lowe=text.lower()
if 'Pd' in text or 'pallad' in lowe or 'palad' in lowe:
return True
return False
def isBoronic(smi):
return smi.count('B') > smi.count('Br')
def isHalogen(smi):
return smi.count('Br') > 0 or smi.count('I') >0 or smi.count('Cl')>0
def countRings(smi):
if '%' in smi: #more two digit ring numbering
raise
bra = re.findall('\[\d+', smi)
ket = re.findall('\d+\]', smi)
allDigits= re.findall('\d+', smi)
for b in bra:
allDigits.remove( b[1:])
for k in ket:
allDigits.remove( k[:-1])
#print("allDO", allDigits)
return sum([len(x) for x in allDigits])/2
def simpleStat(data):
for header in data:
notEmpty = [d for d in data[header] if d.strip() ]
withPd = set([x for x in notEmpty if isPd(x)])
onlyPd=set()
for manyEntry in withPd:
_ = [ onlyPd.add(x.strip() ) for x in manyEntry.split(';') if isPd(x) ]
if len(withPd) < 100_000:
print( header, "not empty", len(notEmpty), "uniq", len( set(notEmpty) ), "with Pd", len(withPd), "only", len(onlyPd), onlyPd )
else:
print( header, "not empty", len(notEmpty), "uniq", len( set(notEmpty) ), "with Pd", len(withPd) )
def isSuzuki(smiles, verbose=False):
suzukiRx=AllChem.ReactionFromSmarts('[#6:1][Br,I,Cl:2].[#6:3][BX3:4]([OX2:5])[OX2:6]>>[#6:1][#6:3].[*:2].[B:4]([O:5])[O:6]')
history=dict()
try:
substrates, products= smiles.split('>>')
except:
if not smiles.strip():
return False
print("PROBLEM WITH", smiles)
substrates = substrates.split('.')
products = products.split('.')
boronicInitial = set([ s for s in substrates if isBoronic(s)])
halogenInitial = set([ s for s in substrates if isHalogen(s)])
prodMols = []
for p in products:
#print("P", p, countRings(p), smiles )
if countRings(p) < 2:
continue
try:
mol=Chem.MolFromSmiles(p)
if not mol:
print("no mol from p")
raise
prodMols.append(mol)
except:
print("cannot proceed smiles", p)
#raise
canonProd = [Chem.MolToSmiles(s) for s in prodMols]
if not canonProd:
#print("no prod in ", smiles)
return False
canonProdNoStereo = [Chem.MolToSmiles(s, False) for s in prodMols]
maxIter = 10
halogen = halogenInitial
boronic = boronicInitial
for i in range(maxIter):
res, resNoStereo = makeAllCombination( suzukiRx, [tuple(halogen), tuple(boronic)])
if any([p in canonProd for p in res] ) or any([p in canonProdNoStereo for p in resNoStereo ]):
obtainedTrueProductNoStereo = [p for p in resNoStereo if p in canonProdNoStereo]
obtainedTrueProduct = [p for p in res if p in canonProd]
substrateForTrueProduct = {p:res[p] for p in res}
substrateForTrueProductNoStereo = {p:resNoStereo[p] for p in resNoStereo}
#print("REN", resNoStereo)
allHalo =set()
for pr in resNoStereo:
_ = [ allHalo.add(s[0]) for s in resNoStereo[pr ] ]
allBoro =set()
for pr in resNoStereo:
_ = [ allBoro.add(s[1]) for s in resNoStereo[pr] ]
return {'products':tuple(obtainedTrueProduct), 'productsNoStereo':tuple(obtainedTrueProductNoStereo),
'halogens':tuple([h for h in halogenInitial if h in allHalo]),
'borons':tuple([b for b in boronicInitial if b in allBoro]),
'sbs':substrateForTrueProduct, 'sbsNoStereo':substrateForTrueProductNoStereo, 'history':history }
#return True
halo = set([s for s in res if isHalogen(s)])
boro = set([s for s in res if isBoronic(s)])
for h in halo:
if not h in history:
history[h]=[]
history[h].extend(res[h])
for b in boro:
if not b in history:
history[b] =[]
history[b].extend(res[b])
if halo and boro:
if verbose:
print("HALOGEN", halogen, boronic)
print("HALO BORO", halo, boro, "\n canonProd:",canonProd, "\nsmiles", smiles)
print("RES", res, resNoStereo)
print("EXP", canonProd, canonProdNoStereo)
return False
#raise
elif halo:
halogen = halo
elif boro:
boronic = boro
else:
return False
return None
def makeAllCombination( reactionObj, substratesListList):
allProds = defaultdict(set)
allProdsNoStereo = defaultdict(set)
for sbsList in itertools.product(*substratesListList):
#print("SS", sbsList, "SS", substratesListList)
sbs = [Chem.MolFromSmiles(s) for s in sbsList]
if any([s ==None for s in sbs]):
print("sbsList", sbsList)
continue
rxProds = [ x[0] for x in reactionObj.RunReactants(sbs) if x]
_ = [ Chem.SanitizeMol(x) for x in rxProds]
_ = [ allProds[Chem.MolToSmiles(m, True)].add( tuple(sbsList) ) for m in rxProds]
_ = [allProdsNoStereo[ Chem.MolToSmiles(m, False) ].add( tuple(sbsList) ) for m in rxProds ]
allProds = {p:tuple(allProds[p]) for p in allProds}
allProdsNoStereo = {p:tuple(allProdsNoStereo[p]) for p in allProdsNoStereo}
return allProds, allProdsNoStereo
def findPd(data, pos, ignoredHeader={'Fulltext of reaction', 'Example title'}):
withPd=[ ]
#entries =[ data[head][lid] for head in data.keys() if isPd(data[head][lid]) ]
for header in data.keys():
if header in {'Fulltext of reaction', 'Example title', 'rxInfo'}:
continue
entry= data[header][pos]
_ = [ withPd.append( e ) for e in entry.split('; ') if isPd(e) ]
##make canonical name:
canonName={'tetrakis(triphenylphosphine) palladium(0)':'Pd[P(Ph)3]4', '1: tetrakis(triphenylphosphine) palladium(0)':'Pd[P(Ph)3]4', 'tetrakis(triphenylphosphine) palladium(0)':'Pd[P(Ph)3]4',
'tetrakis (triphenylphosphine) palladium (0)':'Pd[P(Ph)3]4','tetrakistriphenylphosphanepalladium(0)':'Pd[P(Ph)3]4', 'tetrakis(triphenylphosphine)palladium (0)':'Pd[P(Ph)3]4',
'tetrakis(triphenylphosphine) palladium(0) / N,N-dimethyl-formamide':'Pd[P(Ph)3]4', 'tetrakis-(triphenylphosphino)-palladium(0)':'Pd[P(Ph)3]4',
'tetrakistriphenylphosphanepalladium(0)':'Pd[P(Ph)3]4', 'tetrakis (triphenylphosphine) palladium (0)':'Pd[P(Ph)3]4', 'tetrakis(triphenylphosphine)palladium (0)':'Pd[P(Ph)3]4',
'tetrakis(triphenylphosphine) palladium(0) / N,N-dimethyl-formamide':'Pd[P(Ph)3]4', 'tetrakis-(triphenylphosphino)-palladium(0)':'Pd[P(Ph)3]4',
'bis(tri-tert-butylphosphine)palladium(0)':'Pd[P(Ph)3]4',
'[1,1-bis(diphenylphosphino)ferrocene]-dichloropalladium': 'Pd(dppf)Cl2', "[1,1'-bis(diphenylphosphino)ferrocene]dichloropalladium(II)": 'Pd(dppf)Cl2',
"(1,1'-bis(diphenylphosphino)ferrocene)palladium(II) dichloride":'Pd(dppf)Cl2', "(1,1'-bis(diphenylphosphino)ferrocene)palladium(II) dichloride / 1,4-dioxane":'Pd(dppf)Cl2',
"1,1'-bis(diphenylphosphino)ferrocene-palladium(II)dichloride dichloromethane complex":'Pd(dppf)Cl2',
"1: sodium carbonate / (1,1'-bis(diphenylphosphino)ferrocene)palladium(II) dichloride / DMF (N,N-dimethyl-formamide)":'Pd(dppf)Cl2',
'1: sodium carbonate / tetrakis(triphenylphosphine) palladium(0) / 1,2-dimethoxyethane':'Pd(dppf)Cl2', "dichloro(1,1'-bis(diphenylphosphanyl)ferrocene)palladium(II)*CH2Cl2":'Pd(dppf)Cl2',
'palladium bis[bis(diphenylphosphino)ferrocene] dichloride':'Pd(dppf)Cl2',
'bis-triphenylphosphine-palladium(II) chloride':'Pd[P(Ph)3]2Cl2', 'bis(triphenylphosphine)palladium(II) chloride':'Pd[P(Ph)3]2Cl2', 'bis(triphenylphosphine)palladium(II)-chloride':'Pd[P(Ph)3]2Cl2',
'bis-triphenylphosphine-palladium(II) chloride':'Pd[P(Ph)3]2Cl2', 'bis(triphenylphosphine)palladium(II) dichloride':'Pd[P(Ph)3]2Cl2', 'dichlorobis(triphenylphosphine)palladium[II]':'Pd[P(Ph)3]2Cl2',
'trans-bis(triphenylphosphine)palladium dichloride':'Pd[P(Ph)3]2Cl2', 'dichlorobis(triphenylphosphine)palladium(II)':'Pd[P(Ph)3]2Cl2',
'tris-(dibenzylideneacetone)dipalladium(0)':'Pd2(dba)3', 'bis(dibenzylideneacetone)-palladium(0)':'Pd2(dba)3', 'tris(dibenzylideneacetone)dipalladium(0) chloroform complex':'Pd2(dba)3',
'tris(dibenzylideneacetone)dipalladium (0)':'Pd2(dba)3', '"tris-(dibenzylideneacetone)dipalladium(0)':'Pd2(dba)3',
'bis(di-tert-?butyl(4-?dimethylaminophenyl)?phosphine)?dichloropalladium(II)': 'Pd(amphos)Cl2', 'bis[di-t-butyl(p-dimethylaminophenyl)phosphino]palladium (II) Dichloride':'Pd(amphos)Cl2',
'[1,3-bis(2,6-diisopropylphenyl)imidazol-2-ylidene](3-chloropyridyl)palladium(ll) dichloride':'PEPPSI-IPr-PdCl2', '[1,3-bis(2,6-diisopropylphenyl)imidazol-2-ylidene](3-chloropyridyl)palladium(II) dichloride':'PEPPSI-IPr-PdCl2',
'[1,3-bis(2,6-diisopropylphenyl)imidazol-2-ylidene](3chloro-pyridyl)palladium(II) dichloride':'PEPPSI-IPr-PdCl2',
}
toRet= []
for pd in withPd:
if pd in canonName:
toRet.append( canonName[pd] )
else:
toRet.append(pd)
#withPd =[ canonName[x] for x in withPd]
print("WITHPD", toRet)
return list(set(toRet))
def findSolvent(data, pos):
solvents= [x.strip() for x in data['Solvent (Reaction Details)'][pos].split(';') if x]
if solvents:
return sorted( set(solvents) )
return solvents
def findTemp(data, pos):
tmp = data['Temperature (Reaction Details) [C]'][pos]
temps=[]
for t in tmp.split():
try:
temps.append( int(t) )
except:
continue
return temps
def findTime(data, pos):
rxtime = data['Time (Reaction Details) [h]'][pos]
#print(rxtime)
return rxtime
def findSpecialCare(data,pos):
oth=data['Other Conditions'][pos]
#print(oth)
return oth
def findBase(data, pos):
allowedHeader={'Reactant','Reagent'}
#excludedHeader={'Links to Reaxys', 'Reaction: Links to Reaxys'}
expectedKeywords=('caesium', 'carbonate', 'fluoride', 'hydroxide', 'amine', 'ammonium', 'phosphate', 'anolate')
#allowedKeywords=('acetate')
addit=[]
for header in data:
if not header in allowedHeader:
continue
for entry in data[header][pos].split(';'):
entry=re.sub(' [a-z]*hydrate', '', entry)
entry=entry.replace('cesium', 'caesium').replace('tribase', '').replace('barium(II)', 'barium').replace('"', '').replace(' n ', '')
entry=entry.replace('barium dihydroxide', 'barium hydroxide').replace('tribasic', '')
entry=entry.strip()
if 'pallad' in entry or 'bromo' in entry or 'iodo' in entry or 'ammonium' in entry or ' acid' in entry or 'iodine' in entry: continue
if 'Bromo' in entry or 'midazolium hexafluorophosphate' in entry: continue
if any( [k in entry for k in expectedKeywords]):
addit.append( entry )
if 'acetate' in entry and not 'pallad' in entry:
addit.append(entry.strip() )
if addit:
return addit
else:
print("nobase", [data[x][pos] for x in data] )
return ()
def findLigand(data, pos, pd):
headerToIgnore={'Fulltext of reaction', 'References', 'Product', 'rxInfo'}
found=[]
toIgnore= {'1-(Di-tert-butyl-phosphinoyl)-2-iodo-benzene', 'phosphoric acid', 'diethyl 1-(2-bromophenyl)-3-oxopropylphosphonate', 'General procedure for Suzuki coupling in position 2 (main text Scheme 6, method A).'}
for header in data:
if header in headerToIgnore:
continue
entries= data[header][pos]
for entry in entries.split(';'):
if 'pos' in entry or 'phos' in entry:
if 'Pd' in entry or 'palladium' in entry or 'phosphate' in entry:
continue
entry= entry.strip()
if entry in ('triphenylphosphine', 'triphenylphosphine / 1,4-dioxane'):
entry='PPh3'
if entry in {"1,1'-bis(diphenylphosphino)ferrocene", "1,1'-bis-(diphenylphosphino)ferrocene"}:
entry='dppf'
if entry in ('1,4-di(diphenylphosphino)-butane', ):
entry = 'dppb'
if entry in {'tris-(m-sulfonatophenyl)phosphine', 'triphenylphosphine-3,3?,3?-trisulfonic acid trisodium salt', 'triphenylphosphine trisulfonic acid',
'trisodium tris(3-sulfophenyl)phosphine', }:
entry='TPPTS'
if entry in {"tricyclohexylphosphine", "tricyclohexylphosphine tetrafluoroborate", "tricyclohexylphosphine tetrafluoroborate",
"tris(cyclohexyl)phosphonium tetrafluoroborate"}:
entry = 'P(cychex)3'
if entry in ('1,2-bis-(diphenylphosphino)ethane'):
entry='dppe'
if entry in ('tributylphosphine'):
entry='PBu3'
if entry in ('tri-tert-butyl phosphine', "tris[tert-butyl]phosphonium tetrafluoroborate", "tri-t-butylphosphonium tetraphenylborate complex",
"tri tert-butylphosphoniumtetrafluoroborate", "tri tert-butylphosphoniumtetrafluoroborate", "tri-tertiary-butyl phosphonium tetrafluoroborate",
"tri-tert-butylphosphonium tetrafluoroborate"):
entry='PtBu3'
if entry in ('tris-(o-tolyl)phosphine', 'tris(2-methylphenyl)phosphine'):
entry='P(o-Tol)3'
if entry in ("dicyclohexyl-(2',6'-dimethoxybiphenyl-2-yl)-phosphane", "dicyclohexyl(2?,6?-dimethoxy-[1,1?-biphenyl]-3-yl)phosphine", "2-dicyclohexylphosphino-2?,6?-dimethoxybiphenyl",
"2-dicyclohexylphosphino-2?,6?-diisopropoxy-1,1?-biphenyl"):
entry='SPhos'
if entry in ('(4-(N,N-dimethylamino)phenyl)-di-tert-butylphosphine'):
entry ='APhos'
if entry in ('4,5-bis(diphenylphos4,5-bis(diphenylphosphino)-9,9-dimethylxanthenephino)-9,9-dimethylxanthene'):
entry='xantphos'
if entry in ("(1RS,2RS,3SR,4SR)-1,2,3,4-tetrakis((diphenylphosphanyl)methyl)cyclopentane",):
entry = 'tedicyp'
if entry.endswith('oxide'):
continue
if not entry in toIgnore:
found.append( entry)
if pd:
if any(["P(Ph)3" in pdentry or 'PPh3' in pdentry for pdentry in pd]):
found.append("PPh3")
if any(['dppf' in pdentry for pdentry in pd]):
found.append('dppf')
found = list(set(found))
print("LIGANDS", *found, sep='\t')
return found
def findYield(data,pos):
#print( len(data), type(data), data.keys() )
#a= input('xx')
try:
return tuple(map(float, data['Yield (numerical)'][pos].split(';')))
except:
if data['Yield'][pos] or data['Yield (numerical)'][pos] or data['Yield (optical)'][pos]:
print("YIELD1", data['Yield'][pos])
print("YIELD2", data['Yield (numerical)'][pos])
print("YIELD3", data['Yield (optical)'][pos])
return ""
def findSource(data, pos):
ref= data['References'][pos]
if 'Patent' in ref:
return ('PATENT', ref)
if 'Article' in ref:
return ('ARTICLE', ref)
return ("OTHER", ref)
def getCNR(data,pos):
link = data['Reaction: Links to Reaxys'][pos]
rxid = link.split('RX.ID=')[1].split('&')[0]
link = data['Links to Reaxys'][pos]
cnr = link.split('CNR=')[1].split('&')[0]
return str(rxid), str(cnr)
def getCorrection( cdict):
corr=dict()
for key in cdict:
corr[key]=dict()
for line in open(cdict[key]):
rxid, cdn, value = line.split('\t')[:3]
corr[key][ (str(rxid), str(cdn) ) ] =value
return corr
def entryStat(data, limits, removeDuplicate=True, correctionFiles=None ):
correction=False
onlyWithYield = limits.withyieldonly
if correctionFiles:
correction=getCorrection(correctionFiles)
numEntry = len(data[ tuple(data.keys())[0] ])
##temp, time, base, solvent, Pd (ligand)
uniqSet= set()
dane = {'Pd':[], 'solvent':[], 'base':[], 'ligand':[], 'special':[], 'temp':[], 'time':[], 'raw':[], 'yield':[], 'litSource':[] }
#print("NOBASE", '\t'.join([k for k in data]), sep='\t' )
for lid in range(numEntry):
withPd=findPd(data, lid)
#print("pd", withPd)
solvent=findSolvent(data, lid)
temp = findTemp(data,lid)
time = findTime(data,lid)
special = findSpecialCare(data,lid)
base= findBase(data,lid)
rxyield = findYield(data, lid)
rxidcnr = getCNR(data,lid)
if correction:
if 'base' in correction and rxidcnr in correction['base']:
base = correction['base'][rxidcnr].strip()
if 'o base' in base or 'cant find paper' in base or 'not suzuki' in base: continue
if ' or ' in base:
base = base.split(' or ')[0]
base = base.split(', ')
print("NO BASE", base) #, "\n", [(x,data[x][lid]) for x in data])
#raise
if 'ligand' in correction and rxidcnr in correction['ligand']:
ligand = correction['ligand'][rxidcnr].strip()
if 'exclude' in ligand or 'bad data' in ligand:
continue
ligand= findLigand(data, lid, withPd)
if not base and limits.withbaseonly:
continue
if onlyWithYield and not rxyield:
continue
if not ligand and limits.withligandonly:
continue
if not temp and limits.withtemponly:
continue
if not solvent and limits.withsolvonly:
continue
if not withPd and limits.withpdonly:
continue
if removeDuplicate:
thisId = (tuple(withPd), tuple(ligand), tuple(base), tuple(solvent), str(data['rxInfo'][lid]['sbs']) )
if thisId in uniqSet:
continue
uniqSet.add( thisId)
print("LIGAND==", ligand, rxidcnr, rxidcnr in correction['ligand'], "PD", withPd)
dane['Pd'].append( tuple(withPd))
dane['solvent'].append(solvent)
dane['base'].append(base)
dane['ligand'].append(ligand)
dane['special'].append(special)
dane['temp'].append(temp)
dane['time'].append(time)
dane['yield'].append(rxyield)
dane['raw'].append({k:data[k][lid] for k in data})
dane['litSource'].append( findSource(data,lid) )
#if not ligand and not( 'Pd[P(Ph)3]4' in withPd):
# if any(['phos' in pdcat or 'Pd(dppf)Cl2' in pdcat or 'Pd[P(Ph)3]2Cl2' in pdcat for pdcat in withPd]): continue
# print( 'NOLIGAND:', withPd, '\t'.join([str(data[k][lid]) for k in data]), sep='\t' )
#if not base:
# print("NOBASE", '\t'.join([str(data[k][lid]) for k in data]), sep='\t' )
return dane
#print("Pd", withPd, 'S:', solvent, 'base', base, 'L:', ligand, )
# print(withPd)
if __name__ == "__main__":
parser=parseArgs()
print("P", parser)
files=[]
if parser.heterohetero:
prefixheterohetero = 'downloadedRx/hetero-hetero/'
heterohetero = ['39074585_20200310_190414_098.xls', '39074585_20200310_191650_204.xls', '39074585_20200310_194241_493.xls', 'Suzuki_Har_1-2500.csv', 'Suzuki_Har_2501-5000.csv']
for i in heterohetero:
files.append(prefixheterohetero+i)
if parser.arylhetero:
prefixarylhetero = 'downloadedRx/hetero-aryl/'
arylhetero = ['aga1.csv', 'aga2.csv', 'aga3.csv', 'aga4.csv', 'aga5.csv',]
for i in arylhetero:
files.append(prefixarylhetero+i)
if parser.arylaryl:
prefixarylaryl='downloadedRx/aryl-aryl/'
arylaryl=['Reaxys_Exp_20200424_184807.csv', 'Reaxys_Exp_20200424_201155.csv', 'Reaxys_Exp_20200425_011430.csv', 'Reaxys_Exp_20200425_060051.csv', 'Reaxys_Exp_20200427_151519.csv']
for i in arylaryl:
files.append(prefixarylaryl+i)
res = [ parseFile(fn, includePatents=parser.withpatents) for fn in files]
print( [ len(r['header']) for r in res])
#def combinateFiles(res, removeDuplicatesByPos=(0,), ):
data= combinateFiles(res, removeDuplicatesByPos=[] )
print("DATA", data.keys())
data=filterOutNotMatched( data, reactions=('suzuki', ), entryName='Reaction' )
print("DATA", data.keys())
print("+++++++++++++++++")
#simpleStat(data)
dane=entryStat(data, parser, removeDuplicate=True, correctionFiles={'base':'./downloadedRx/nobase.csv', 'ligand':'./downloadedRx/noligand.csv'} )
import json
json.dump(dane, open(parser.output, 'w') )
print("h", data.keys() )
allrx=getRxClass( [lst['rxInfo']['halogens'] for lst in dane['raw']], [ lst['rxInfo']['borons'] for lst in dane['raw'] ], dane)
fnw= open( parser.output+'.onerx', 'w')
for i in allrx:
print(i, file=fnw)
fnw.close() | [
"rmrmg@wp.pl"
] | rmrmg@wp.pl |
634ad361e16f38381b57bf4f8de237ed67db1539 | 5e9a2f34c833360818ed12e2a4fd33996d14fcb6 | /MainApp/models.py | 7ae91b6e8af831cdac94211a2e92934b3606522e | [] | no_license | sainishreya/Eshop | e308bce7cfe45df3eb1244203cc3baa3964a4ac8 | f95decba0edaccb134e954b1d5cbb5b069df7b12 | refs/heads/master | 2020-06-18T03:14:59.500488 | 2019-07-10T06:43:04 | 2019-07-10T06:43:04 | 196,147,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | from django.db import models
from django.contrib.auth.forms import User
class Brand(models.Model):
bid=models.AutoField
bname=models.CharField(max_length=30)
def __str__(self):
return self.bname
class Category(models.Model):
cid=models.AutoField
cname=models.CharField(max_length=30)
def __str__(self):
return self.cname
class Product(models.Model):
id=models.AutoField
cat=models.ForeignKey(Category,on_delete="CASCADE",default=None)
name=models.CharField(max_length=30)
description=models.TextField()
brand=models.ForeignKey(Brand,on_delete="CASCADE",default=None)
basicPrice=models.IntegerField()
discount=models.IntegerField()
price=models.IntegerField()
color=models.CharField(max_length=20)
img1=models.ImageField(upload_to='images')
img2 = models.ImageField(upload_to='images',default=None)
img3 = models.ImageField(upload_to='images',default=None)
img4 = models.ImageField(upload_to='images',default=None)
date=models.DateTimeField(auto_now_add=True)
update=models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Cart(models.Model):
cartid=models.AutoField
cart_user=models.ForeignKey(User,on_delete='CASCADE',default=None)
cart_product=models.ForeignKey(Product,on_delete='CASCADE',default=None)
count=models.IntegerField()
total=models.IntegerField()
date = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.cart_user)
class Checkout(models.Model):
checkid=models.AutoField
chname=models.CharField(max_length=30)
mobile=models.IntegerField()
email=models.EmailField(max_length=50)
state=models.CharField(max_length=30)
city=models.CharField(max_length=30)
address=models.CharField(max_length=50)
pin=models.CharField(max_length=10)
def __str__(self):
return self.chname
| [
"student@test.com"
] | student@test.com |
b27b059c477b45152d67c266b8bde14dfdbcfe93 | e122ab31559f7551e4bc4dff6dfa7f7dbbd10168 | /jaqs/__init__.py | 0be750ea380b5ec64652ff6b426589ec22e928c8 | [
"Apache-2.0"
] | permissive | WayneWan413/JAQS | ffb909d6d550451552697358735ec5dd74975b2d | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | refs/heads/master | 2021-08-30T10:30:20.675837 | 2017-12-17T14:14:59 | 2017-12-17T14:14:59 | 113,726,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # encoding: utf-8
"""
JAQS
~~~~
Open source quantitative research&trading framework.
copyright: (c) 2017 quantOS-org.
license: Apache 2.0, see LICENSE for details.
"""
import os
__version__ = '0.6.6'
SOURCE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
| [
"brillliantz@outlook.com"
] | brillliantz@outlook.com |
ae48ce85c8caa8b2632e5bbc58f086388955ab75 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/application_command/application_command/tests/test__validate_version.py | 0311466ec90e46c18abaa78702c11bd7846f90a8 | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 1,003 | py | import vampytest
from ..fields import validate_version
def test__validate_version__0():
"""
Tests whether `validate_version` works as intended.
Case: passing.
"""
version = 202302260011
for input_value, expected_output in (
(version, version),
(str(version), version),
):
output = validate_version(input_value)
vampytest.assert_eq(output, expected_output)
def test__validate_version__1():
"""
Tests whether `validate_version` works as intended.
Case: `ValueError`.
"""
for input_value in (
'-1',
-1,
):
with vampytest.assert_raises(AssertionError, ValueError):
validate_version(input_value)
def test__validate_version__2():
"""
Tests whether `validate_version` works as intended.
Case: `TypeError`.
"""
for input_value in (
12.6,
):
with vampytest.assert_raises(TypeError):
validate_version(input_value)
| [
"re.ism.tm@gmail.com"
] | re.ism.tm@gmail.com |
8ce8d15ed85b76e97a1588db1e2a9fe574ea56bf | 852b381ae34e817b7ac340d287c67216e7a18b89 | /logisticRegression.py | 46f2e239387d4254a60c4ade3d3aa12980195559 | [] | no_license | ratom/Data-Science | d4dc85333a5c0ae3879331bc8d4c501f6955d8b6 | 1466fdeb02ea73c7da32dfa40665d2ba9222cb21 | refs/heads/master | 2021-06-09T04:40:15.157537 | 2021-05-08T16:35:55 | 2021-05-08T16:35:55 | 154,630,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,confusion_matrix
df=pd.read_csv("datasets/advertising.csv")
#sns.heatmap(df.isnull())
#df.info()
#print(df.describe())
X=df[['Daily Time Spent on Site','Age','Area Income','Daily Internet Usage','Male']]
y=df['Clicked on Ad']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=101)
lr=LogisticRegression()
lr.fit(X_train,y_train)
prediction=lr.predict(X_test)
print(confusion_matrix(y_test,prediction))
print("**************************************************")
print(classification_report(y_test,prediction))
| [
"dulalatom@gmail.com"
] | dulalatom@gmail.com |
cca049b91b2745cd78349902b46db3b1dd1cdecc | d6aae799e18e907fb413b715200c7832252a87e5 | /image-generation/stylegan2-cdc/execution/train.py | 4a3272d0d68b5312fc4196f36ab69425919adf01 | [
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sony/nnabla-examples | 0d0bbd5df3028996e790bcf07248fdb0932697d1 | 41f71faa6efff7774a76bbd5af3198322a90a6ab | refs/heads/master | 2023-09-04T03:45:54.023899 | 2023-08-22T03:31:21 | 2023-08-22T03:31:21 | 109,625,584 | 308 | 108 | Apache-2.0 | 2023-08-22T03:31:23 | 2017-11-05T23:30:40 | Python | UTF-8 | Python | false | false | 15,055 | py | # Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import nnabla.solvers as S
from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile
from nnabla.utils.data_iterator import data_iterator_simple, data_iterator
import os
import subprocess as sp
from tqdm import trange
from collections import namedtuple
from .base import BaseExecution
from models import *
from .ops import *
from .losses import *
from data import *
import numpy as np
from .CrossDomainCorrespondence import CrossDomainCorrespondence
from .CrossDomainCorrespondence import NoiseTop
class Train(BaseExecution):
"""
Execution model for StyleGAN training and testing
"""
def __init__(self, monitor, config, args, comm, few_shot_config):
super(Train, self).__init__(
monitor, config, args, comm, few_shot_config)
# Initialize Monitor
self.monitor_train_loss, self.monitor_train_gen = None, None
self.monitor_val_loss, self.monitor_val_gen = None, None
if comm is not None:
if comm.rank == 0:
self.monitor_train_gen_loss = MonitorSeries(
config['monitor']['train_loss'], monitor,
interval=self.config['logger_step_interval']
)
self.monitor_train_gen = MonitorImageTile(
config['monitor']['train_gen'], monitor,
interval=self.config['logger_step_interval'], num_images=self.config['batch_size']
)
self.monitor_train_disc_loss = MonitorSeries(
config['monitor']['train_loss'], monitor,
interval=self.config['logger_step_interval']
)
os.makedirs(self.config['saved_weights_dir'], exist_ok=True)
self.results_dir = args.results_dir
self.save_weights_dir = args.weights_path
self.few_shot_config = few_shot_config
if self.few_shot_config['common']['type'] == 'cdc':
self.generator_s = Generator(
config['generator'], self.img_size, config['train']['mix_after'], global_scope='Source/')
# Initialize Discriminator
self.discriminator = Discriminator(
config['discriminator'], self.img_size)
self.gen_exp_weight = 0.5 ** (32 / (10 * 1000))
self.generator_ema = Generator(
config['generator'], self.img_size, config['train']['mix_after'], global_scope='GeneratorEMA')
if self.few_shot_config['common']['type'] == 'cdc':
self.discriminator = PatchDiscriminator(
config['discriminator'], self.img_size)
# Initialize Solver
if 'gen_solver' not in dir(self):
if self.config['solver'] == 'Adam':
self.gen_solver = S.Adam(beta1=0, beta2=0.99)
self.disc_solver = S.Adam(beta1=0, beta2=0.99)
else:
self.gen_solver = eval('S.'+self.config['solver'])()
self.disc_solver = eval('S.'+self.config['solver'])()
self.gen_solver.set_learning_rate(self.config['learning_rate'])
self.disc_solver.set_learning_rate(self.config['learning_rate'])
self.gen_mean_path_length = 0.0
self.dali = args.dali
self.args = args
# Initialize Dataloader
if args.data == 'ffhq':
if args.dali:
self.train_loader = get_dali_iterator_ffhq(
args.dataset_path, config['data'], self.img_size, self.batch_size, self.comm)
else:
self.train_loader = get_data_iterator_ffhq(
args.dataset_path, config['data'], self.batch_size, self.img_size, self.comm)
else:
print('Dataset not recognized')
exit(1)
# Start training
self.train()
def build_static_graph(self):
real_img = nn.Variable(
shape=(self.batch_size, 3, self.img_size, self.img_size))
noises = [F.randn(shape=(self.batch_size, self.config['latent_dim']))
for _ in range(2)]
if self.few_shot_config['common']['type'] == 'cdc':
NT_class = NoiseTop(
n_train=self.train_loader.size,
latent_dim=self.config['latent_dim'],
batch_size=self.batch_size
)
noises = NT_class()
self.PD_switch_var = NT_class.PD_switch_var
if self.config['regularize_gen']:
fake_img, dlatents = self.generator(
self.batch_size, noises, return_latent=True)
else:
fake_img = self.generator(self.batch_size, noises)
fake_img_test = self.generator_ema(self.batch_size, noises)
if self.few_shot_config['common']['type'] != 'cdc':
fake_disc_out = self.discriminator(fake_img)
real_disc_out = self.discriminator(real_img)
disc_loss = disc_logistic_loss(real_disc_out, fake_disc_out)
gen_loss = 0
if self.few_shot_config['common']['type'] == 'cdc':
fake_img_s = self.generator_s(self.batch_size, noises)
cdc_loss = CrossDomainCorrespondence(
fake_img, fake_img_s, _choice_num=self.few_shot_config['cdc']['feature_num'], _layer_fix_switch=self.few_shot_config['cdc']['layer_fix'])
gen_loss += self.few_shot_config['cdc']['lambda'] * cdc_loss
# --- PatchDiscriminator ---
fake_disc_out, fake_feature_var = self.discriminator(
fake_img, patch_switch=True, index=0)
real_disc_out, real_feature_var = self.discriminator(
real_img, patch_switch=True, index=0)
disc_loss = disc_logistic_loss(real_disc_out, fake_disc_out)
disc_loss_patch = disc_logistic_loss(
fake_feature_var, real_feature_var)
disc_loss += self.PD_switch_var * disc_loss_patch
gen_loss += gen_nonsaturating_loss(fake_disc_out)
var_name_list = ['real_img', 'noises', 'fake_img', 'gen_loss',
'disc_loss', 'fake_disc_out', 'real_disc_out', 'fake_img_test']
var_list = [real_img, noises, fake_img, gen_loss,
disc_loss, fake_disc_out, real_disc_out, fake_img_test]
if self.config['regularize_gen']:
dlatents.need_grad = True
mean_path_length = nn.Variable()
pl_reg, path_mean, _ = gen_path_regularize(
fake_img=fake_img,
latents=dlatents,
mean_path_length=mean_path_length
)
path_mean_update = F.assign(mean_path_length, path_mean)
path_mean_update.name = 'path_mean_update'
pl_reg += 0*path_mean_update
gen_loss_reg = gen_loss + pl_reg
var_name_list.append('gen_loss_reg')
var_list.append(gen_loss_reg)
if self.config['regularize_disc']:
real_img.need_grad = True
real_disc_out = self.discriminator(real_img)
disc_loss_reg = disc_loss + self.config['r1_coeff']*0.5*disc_r1_loss(
real_disc_out, real_img)*self.config['disc_reg_step']
real_img.need_grad = False
var_name_list.append('disc_loss_reg')
var_list.append(disc_loss_reg)
Parameters = namedtuple('Parameters', var_name_list)
self.parameters = Parameters(*var_list)
def forward_backward_pass(self, i, real_img=None, noises=None):
"""1 training step: forward and backward propagation
Args:
real_img (nn.Variable): [description]
noises (list of nn.Variable): [description]
Returns:
[type]: [description]
""" """
Graph construction for forward pass for training
"""
# Update Discriminator
self.disc_solver.zero_grad()
self.parameters.fake_img.need_grad = False
self.parameters.fake_img.forward()
if self.config['regularize_disc'] and i % self.config['disc_reg_step'] == 0:
self.parameters.disc_loss_reg.forward(clear_no_need_grad=True)
self.parameters.disc_loss_reg.backward(clear_buffer=True)
else:
self.parameters.disc_loss.forward(clear_no_need_grad=True)
self.parameters.disc_loss.backward(clear_buffer=True)
if self.comm is not None:
params = [x.grad for x in self.disc_solver.get_parameters().values()]
self.comm.all_reduce(params, division=False, inplace=True)
self.disc_solver.update()
# Update Generator
self.gen_solver.zero_grad()
self.parameters.fake_img.need_grad = True
if self.config['regularize_gen'] and i % self.config['gen_reg_step'] == 0:
self.parameters.gen_loss_reg.forward(clear_no_need_grad=True)
self.parameters.gen_loss_reg.backward(clear_buffer=True)
else:
self.parameters.gen_loss.forward(clear_no_need_grad=True)
self.parameters.gen_loss.backward(clear_buffer=True)
if self.comm is not None:
params = [x.grad for x in self.gen_solver.get_parameters().values()]
self.comm.all_reduce(params, division=False, inplace=True)
self.gen_solver.update()
def ema_update(self):
with nn.parameter_scope('Generator'):
g_params = nn.get_parameters(grad_only=False)
with nn.parameter_scope('GeneratorEMA'):
g_ema_params = nn.get_parameters(grad_only=False)
update_ema_list = []
for name in g_ema_params.keys():
params_ema_updated = self.gen_exp_weight * \
g_ema_params[name] + \
(1.0 - self.gen_exp_weight) * g_params[name]
update_ema_list.append(
F.assign(g_ema_params[name], params_ema_updated))
return F.sink(*update_ema_list)
def copy_params(self, scope_from, scope_to):
with nn.parameter_scope(scope_from):
params_from = nn.get_parameters(grad_only=False)
with nn.parameter_scope(scope_to):
params_to = nn.get_parameters(grad_only=False)
for name in params_to.keys():
params_to[name].d = params_from[name].d
def train(self):
"""
Training loop: Runs forward_backward pass for the specified number of iterations and stores the generated images, model weigths and solver states
"""
# n_procs = 1 if self.comm is None else self.comm.n_procs
iterations_per_epoch = int(np.ceil(
self.train_loader.size/self.train_loader.batch_size))
self.build_static_graph()
disc_params = {k: v for k, v in nn.get_parameters(
).items() if k.startswith('Discriminator')}
self.disc_solver.set_parameters(disc_params)
gen_params = {k: v for k, v in nn.get_parameters().items() if (
k.startswith('Generator') and not k.startswith('GeneratorEMA'))}
self.gen_solver.set_parameters(gen_params)
if os.path.isfile(os.path.join(self.args.weights_path, 'gen_solver.h5')):
self.gen_solver.load_states(os.path.join(
self.args.weights_path, 'gen_solver.h5'))
self.disc_solver.load_states(os.path.join(
self.args.weights_path, 'disc_solver.h5'))
self.copy_params('Generator', 'GeneratorEMA')
ema_updater = self.ema_update()
for epoch in range(self.config['num_epochs']):
pbar = trange(iterations_per_epoch, desc='Epoch ' +
str(epoch), disable=self.comm.rank > 0)
epoch_gen_loss, epoch_disc_loss = 0.0, 0.0
print(
f'Iterations per epoch: {iterations_per_epoch}, Number of processes: {self.comm.n_procs}, Data Loader size: {self.train_loader.size}')
for i in pbar:
if self.few_shot_config['common']['type'] == 'cdc' and i % self.few_shot_config['cdc']['subspace_freq'] == 0:
self.PD_switch_var.d = 1
elif self.few_shot_config['common']['type'] == 'cdc':
self.PD_switch_var.d = 0
data = self.train_loader.next()
if isinstance(data[0], nn.NdArray):
self.parameters.real_img.data = data[0]
else:
self.parameters.real_img.d = data[0]
self.forward_backward_pass(i)
gen_loss = self.parameters.gen_loss
disc_loss = self.parameters.disc_loss
real_img = self.parameters.real_img
fake_img = self.parameters.fake_img
real_disc_out = self.parameters.real_disc_out
fake_disc_out = self.parameters.fake_disc_out
ema_updater.forward()
epoch_gen_loss += gen_loss.d
epoch_disc_loss += disc_loss.d
pbar.set_description(
f'Gen Loss: {gen_loss.d}, Disc Loss: {disc_loss.d}')
if np.isnan(gen_loss.d) or np.isnan(disc_loss.d):
for k, v in nn.get_parameters().items():
if v.d.max() < 1e-3 or np.any(np.isnan(v.d)):
print(k)
if self.comm.rank == 0 and (i == 30 and (epoch % self.config['save_param_step_interval'] == 0 or epoch == self.config['num_epochs']-1)):
self.save_weights(
self.save_weights_dir, epoch)
self.parameters.fake_img_test.forward(clear_buffer=True)
fake_img.forward(clear_buffer=True)
save_generations(self.parameters.fake_img_test, os.path.join(
self.results_dir, f'fake_ema_{epoch}'))
save_generations(real_img, os.path.join(
self.results_dir, f'real_{epoch}'))
save_generations(fake_img, os.path.join(
self.results_dir, f'fake_{epoch}'))
epoch_gen_loss /= iterations_per_epoch
epoch_disc_loss /= iterations_per_epoch
if self.comm is not None:
if self.comm.rank == 0:
self.monitor_train_gen_loss.add(epoch, epoch_gen_loss)
self.monitor_train_gen_loss.add(epoch, epoch_disc_loss)
| [
"Hua.Ding@sony.com"
] | Hua.Ding@sony.com |
aec1934da3b54fcc5c4063a0d4d125cd0a1329f0 | af2beabfeca92de7204a06f38136ebeee836b14b | /shopping_cartapp/migrations/0002_auto_20201028_1404.py | 85780978e73d23f3a13d86674b4d93565345358c | [] | no_license | isthatjoke/geekshop | be163714d031c79d5c3dedff61625aeeb2e8304b | b95e6124ae4c7d312a74f4664773372fffaf68af | refs/heads/master | 2023-01-23T16:21:39.864886 | 2020-12-04T21:42:56 | 2020-12-04T21:42:56 | 317,832,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.1.2 on 2020-10-28 14:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shopping_cartapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='shoppingcart',
old_name='product',
new_name='game',
),
]
| [
"61158881+isthatjoke@users.noreply.github.com"
] | 61158881+isthatjoke@users.noreply.github.com |
5377d5647573a6524196b794344d1e11aa5e927d | 3440d670ec5c7d1db2c2a1e0e4cea942705c5cf7 | /fatorial_v2.py | 3fa7b853564d28b246f614f733fe8e17b2986241 | [
"MIT"
] | permissive | paulo-caixeta/Curso-Python-IME-USP | 417ae8473e5d43e4a970fc6755a5a72508160d73 | 03097c7ed625796560a79c01511b8990f37efa6a | refs/heads/main | 2023-03-30T19:15:17.083456 | 2021-03-31T23:04:56 | 2021-03-31T23:04:56 | 330,421,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
n = int(input("Digite um número inteiro: "))
while n >= 0:
fatorial = 1
while n > 1:
fatorial = fatorial * n
n = n - 1
print (fatorial)
n = int(input("Digite um número inteiro: "))
| [
"67445567+paulo-caixeta@users.noreply.github.com"
] | 67445567+paulo-caixeta@users.noreply.github.com |
7314c2d996c31dcbda1bb79a693a93b259857c44 | fbeed384e855fc90719ad8d5423f9574d1f720c3 | /programas.py | 494c3c634d9a67bdc7f4c6fb231ceb426dff45e9 | [] | no_license | lespinoza182/TTMelanomaESCOM | 4363b49d167a8ba7ee4c11f12a05e8bbcff5bacd | 0232ff8a7607e29f89097d6037138badd1ef6d4f | refs/heads/master | 2020-03-27T08:00:00.571605 | 2019-04-05T23:33:05 | 2019-04-05T23:33:05 | 146,211,764 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,882 | py | # -*- coding: utf-8 -*-
from PIL import Image
from matplotlib import pyplot as plt
from collections import Counter
from scipy import ndimage as ndi
from skimage import feature
import scipy.misc
import numpy as np
import statistics
import random
import scipy
import time
#import cv2
"""ABRIR UNA IMAGEN A COLOR Y A GRISES"""
def abrir_imagen(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""ESCALA DE GRISES DE LA IMAGEN A COLOR"""
def escala_de_grises(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im2 = im
i = 0
while i < im2.size[0]:
j = 0
while j < im2.size[1]:
r, g, b = im2.getpixel((i, j))
g = (r + g + b) / 3
gris = int(g)
pixel = tuple([gris, gris, gris])
im2.putpixel((i, j), pixel)
j+=1
i+=1
im2.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""MAXIMO DE GRISES DE LA IMAGEN A COLOR"""
def maximo(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im3 = im
i = 0
while i < im3.size[0]:
j = 0
while j < im3.size[1]:
maximo = max(im3.getpixel((i, j)))
pixel = tuple([maximo, maximo, maximo])
im3.putpixel((i, j), pixel)
j+=1
i+=1
print("El Valor Maximo De Grises Es: ", maximo)
im3.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""MINIMO DE GRISES DE LA IMAGEN A COLOR"""
def minimo(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im4 = im
i = 0
while i < im4.size[0]:
j = 0
while j < im4.size[1]:
minimo = min(im4.getpixel((i, j)))
pixel = tuple([minimo, minimo, minimo])
im4.putpixel((i, j), pixel)
j+=1
i+=1
print("El Valor Maximo De Grises Es: ", minimo)
im4.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""NEGATIVO DE LA IMAGEN A COLOR"""
def negativo_color(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im5 = im
i = 0
while i < im5.size[0]:
j = 0
while j < im5.size[1]:
r, g, b = im5.getpixel((i, j))
rn = 255 - r
gn = 255 - g
bn = 255 - b
pixel = tuple([rn, gn, bn])
im5.putpixel((i, j), pixel)
j+=1
i+=1
im5.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""NEGATIVO DE LA IMAGEN A GRISES"""
def negativo_grises(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im15 = im
i = 0
while i < im15.size[0]:
j = 0
while j < im15.size[1]:
gris = im15.getpixel((i,j))
valor = 255 - gris
im15.putpixel((i, j), valor)
j+=1
i+=1
im15.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""BLANCO Y NEGRO DE LA IMAGEN A COLOR"""
def blanco_negro(im,grisBase):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im6 = im
i = 0
while i < im6.size[0]:
j = 0
while j < im6.size[1]:
r, g, b = im6.getpixel((i, j))
gris = (r + g + b) / 3
if gris < grisBase:
im6.putpixel((i, j), (0, 0, 0))
else:
im6.putpixel((i, j), (255, 255, 255))
j+=1
i+=1
im6.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""TRAMSPUESTA DE LA IMAGEN A GRISES"""
def tramspuesta(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im7 = im
ar = np.zeros((im7.size[0], im7.size[1]))
i = 0
while i < im7.size[1]:
j = 0
while j < im7.size[0]:
a = im7.getpixel((j, i))
ar[j, i] = a
j+=1
i+=1
ar = ar.astype(int)
im7 = Image.fromarray(ar)
im7.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""HISTOGRAMA DE LA IMAGEN A COLOR"""
def histograma_color(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im8 = im
arregloim8 = np.asarray(im8)
plt.subplot(221), plt.imshow(im8)
color = ('r','g','b')
for i,col in enumerate(color):
histr = cv2.calcHist([arregloim8],[i],None,[256],[0,256])
plt.subplot(222), plt.plot(histr,color = col)
plt.xlim([0,256])
plt.xlim([0,256])
plt.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""HISTOGRAMA DE LA IMAGEN A GRISES"""
def histograma_grises(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im16 = im
[ren, col] = im16.size
total = ren * col
a = np.asarray(im16, dtype = np.float32)
a = a.reshape(1, total)
a = a.astype(int)
a = max(a)
valor = 0
maxd = max(a)
grises = maxd
vec=np.zeros(grises + 1)
for i in range(total - 1):
valor = a[i]
vec[valor] = vec[valor] + 1
plt.plot(vec)
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""BRILLO DE LA IMAGEN A GRISES"""
def brillo(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im9 = im
arreglo = np.array(im9.size)
total = arreglo[0] * arreglo[1]
i = 0
suma = 0
while i < im9.size[0]:
j = 0
while j < im9.size[1]:
suma = suma + im9.getpixel((i, j))
j+=1
i+=1
brillo = suma / total
print("El brillo de la imagen es", brillo)
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""CONTRASTE DE LA IMAGEN A GRISES"""
def contraste(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im10 = im
arreglo = np.array(im10.size)
total = arreglo[0] * arreglo[1]
i = 0
suma = 0
while i < im10.size[0]:
j = 0
while j < im10.size[1]:
suma = suma + im10.getpixel((i, j))
j+=1
i+=1
brillo = suma / total
i = 0
while i < im10.size[0]:
j = 0
while j < im10.size[1]:
aux = im10.getpixel((i,j)) - brillo
suma = suma + aux
j+=1
i+=1
cont = suma * suma
cont = np.sqrt(suma / total)
contraste = int(cont)
print("El contraste de la imagen es", contraste)
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""SUMA DE GRISES EN LA IMAGEN A GRISES"""
def suma(im,alpha):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im11 = im
i = 0
while i < im11.size[0]:
j = 0
while j < im11.size[1]:
valor = im11.getpixel((i, j))
valor = valor + alpha
if valor >= 255:
valor = 255
else:
valor = valor
im11.putpixel((i, j),(valor))
j+=1
i+=1
im11.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""RESTA DE GRISES EN LA IMAGEN A GRISES"""
def resta(im,alpha):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im12 = im
i = 0
while i < im12.size[0]:
j = 0
while j < im12.size[1]:
valor = im12.getpixel((i, j))
valor = valor - alpha
if valor <= 0:
valor = abs(valor)
else:
valor = valor
im12.putpixel((i, j),(valor))
j+=1
i+=1
im12.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""MULTIPLICACION DE GRISES EN LA IMAGEN A GRISES"""
def multiplicacion(im,alpha):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im13 = im
i = 0
while i < im13.size[0]:
j = 0
while j < im13.size[1]:
valor = im13.getpixel((i, j))
valor = valor * alpha
if valor >= 255:
valor = 255
if valor <= 0:
valor = valor
im13.putpixel((i, j),(valor))
j+=1
i+=1
im13.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""DIVISION DE GRISES EN LA IMAGEN A GRISES"""
def division(im,alpha):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
im14 = im
i = 0
while i < im14.size[0]:
j = 0
while j < im14.size[1]:
valor = im14.getpixel((i, j))
valor = valor / alpha
valor = int(valor)
if valor <= 0:
valor = abs(valor)
else:
valor = valor
im14.putpixel((i, j),(valor))
j+=1
i+=1
im14.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""SUMA DE DOS IMAGENES A GRISES"""
def suma_imagenes(im):
tiempoIn = time.time()
imagen1 = Image.open("C:/Users/CkriZz/Pictures/1.jpeg")
imagen2 = Image.open("C:/Users/CkriZz/Pictures/2.jpeg")
imagen1.show()
imagen2.show()
# para realizar blending deben tener el mismo tamano
imagen1.resize(imagen2.size)
# out = image1 * (1.0 - alpha) + image2 * alpha
# alpha * imagen1 + (1.0 - alpha) * imagen2
out = Image.blend(imagen1, imagen2, 0.50)
out.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""CONVOLUCION DE LA IMAGEN A GRISES"""
def convolucion(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
print("Dimesion De La Matriz:")
dimension = input()
dimension = int(dimension)
datos = []
i = 0
print("Datos De La Matriz: ")
while i < dimension:
j = 0
while j < dimension:
nuevo = input()
nuevo = float(nuevo)
datos.append(nuevo)
j+=1
i+=1
datos = np.asarray(datos, dtype = np.float32)
datos = datos.reshape(dimension, dimension)
[col,ren] = ima.size
imagen1 = np.asarray(ima, dtype = np.float32)
imagen2 = imagen1
i = 0
while i < ren-dimension:
j = 0
while j < col-dimension:
sub = imagen1[i:(dimension + i), j:(dimension + j)]
suma = 0
r = 0
while r < dimension:
c=0
while c < dimension:
suma = suma + sub[r,c] * datos[r,c]
c+=1
r+=1
valor = suma / (dimension * dimension)
indice1 = ((dimension / 2 + .5) + i)
indice2 = ((dimension / 2 + .5) + j)
imagen2[indice1, indice2]=valor
j+=1
i+=1
imagen2=Image.fromarray(imagen2)
imagen2.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""ECUALIZACION NORMAL DE LA IMAGEN A GRISES"""
def ecua_normal(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
[ren, col] = ima.size
ima = np.asarray(ima, dtype = np.float32).reshape(1, ren * col)
valor = 0
maxdata = max(max(ima))
mindata = min(min(ima))
niveles = maxdata
h = np.zeros(niveles)
ima = ima.reshape(col, ren)
ac = h
i = 0
#cálculo del histograma
while i < ren:
j = 0
while j<col:
valor = ima[j, i] - 1
h[valor] = h[valor] + 1
j+=1
i+=1
ac[0] = h[0]
i = 1
while i < maxdata:
ac[i] = ac[i - 1] + h[i]
i+=1
ac = ac / (ren * col)
#funcion de mapeo
mapeo = np.floor(mindata * ac)
#si mindata es cero la imagen sera cero
newim = np.zeros((col, ren))
i = 0
while i < ren:
j = 0
while j < col:
newim[j, i] = mapeo[ima[j, i] - 1]
j+=1
i+=1
newim = Image.fromarray(newim)
newim.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""ECUALIZACION UNIFORME DE LA IMAGEN A GRISES"""
def ecua_uniforme(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
[ren, col] = ima.size
ima = np.asarray(ima, dtype = np.float32).reshape(1, ren * col)
valor = 0
maxdata = max(max(ima))
mindata = min(min(ima))
niveles = maxdata
h = np.zeros(niveles)
ima = ima.reshape(col, ren)
ac = h
i = 0
#cálculo del histograma
while i < ren:
j = 0
while j<col:
valor = ima[j, i] - 1
h[valor] = h[valor] + 1
j+=1
i+=1
ac[0] = h[0]
i = 1
while i < maxdata:
ac[i] = ac[i - 1] + h[i]
i+=1
ac = ac / (ren * col)
#funcion de mapeo
m1 = maxdata - mindata
m2 = m1 * ac
m3 = m2 + mindata
mapeo = np.floor(m3)
#si mindata es cero la imagen sera cero
newim = np.zeros((col, ren))
i = 0
while i < ren:
j = 0
while j < col:
newim[j, i] = mapeo[ima[j, i] - 1]
j+=1
i+=1
newim = Image.fromarray(newim)
newim.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""ECUALIZACION EXPONENCIAL DE LA IMAGEN A GRISES"""
def ecua_exponencial(im,alpha):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
[ren, col] = ima.size
ima = np.asarray(ima, dtype = np.float32).reshape(1, ren * col)
valor = 0
maxdata = max(max(ima))
mindata = min(min(ima))
niveles = maxdata
h = np.zeros(niveles)
ima = ima.reshape(col, ren)
ac = h
i = 0
#cálculo del histograma
while i < ren:
j = 0
while j < col:
valor = ima[j,i] - 1
h[valor] = h[valor] + 1
j+=1
i+=1
ac[0] = h[0]
i = 1
while i < maxdata:
ac[i] = ac[i - 1] + h[i]
i+=1
ac = ac / (ren * col)
#funcion de mapeo
m1 = 1 - ac
mapeo = np.floor(mindata - 1 / alpha * np.log(m1))
#si mindata es cero la imagen sera cero
newim = np.zeros((col, ren))
i = 0
while i < ren:
j = 0
while j < col:
newim[j, i] = mapeo[ima[j, i] - 1]
j+=1
i+=1
newim = Image.fromarray(newim)
newim.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""ECUALIZACION RAYLEIGH DE LA IMAGEN A GRISES"""
def ecua_rayleigh(im,alpha):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
[ren, col] = ima.size
ima = np.asarray(ima, dtype = np.float32).reshape(1, ren * col)
valor = 0
maxdata = max(max(ima))
mindata = min(min(ima))
niveles = maxdata
h = np.zeros(niveles)
ima = ima.reshape(col, ren)
ac = h
i = 0
#cálculo del histograma
while i < ren:
j = 0
while j < col:
valor = ima[j,i] - 1
h[valor] = h[valor] + 1
j+=1
i+=1
ac[0] = h[0]
i = 1
while i < maxdata:
ac[i] = ac[i - 1] + h[i]
i+=1
ac = ac / (ren * col)
#funcion de mapeo
m1 = 1 - ac
m2 = 1 / m1
m3 = alpha * alpha
m4 = 2 * m3
m5 = np.log(m2)
m6 = m4 * m5
m7 = pow(m6, 1/2)
m8 = mindata + m7
mapeo = np.floor(m8)
#si mindata es cero la imagen sera cero
newim = np.zeros((col, ren))
i = 0
while i < ren:
j = 0
while j < col:
newim[j, i] = mapeo[ima[j, i] - 1]
j+=1
i+=1
newim = Image.fromarray(newim)
newim.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""ECUALIZACION RAYLEIGH DE LA IMAGEN A GRISES"""
def ecua_hypercubica(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
[ren, col] = ima.size
ima = np.asarray(ima, dtype = np.float32).reshape(1, ren * col)
valor = 0
maxdata = max(max(ima))
mindata = min(min(ima))
niveles = maxdata
h = np.zeros(niveles)
ima = ima.reshape(col, ren)
ac = h
i = 0
#cálculo del histograma
while i < ren:
j = 0
while j < col:
valor = ima[j,i] - 1
h[valor] = h[valor] + 1
j+=1
i+=1
ac[0] = h[0]
i = 1
while i < maxdata:
ac[i] = ac[i - 1] + h[i]
i+=1
ac = ac / (ren * col)
#funcion de mapeo
m1 = pow(maxdata, 1/3)
m2 = pow(mindata, 1/3)
m3 = m2 * ac
m4 = m1 - m3
m5 = m4 + m1
m6 = pow(m5 , 3)
mapeo = np.floor(m6)
#si mindata es cero la imagen sera cero
newim = np.zeros((col, ren))
i = 0
while i < ren:
j = 0
while j < col:
newim[j, i] = mapeo[ima[j, i] - 1]
j+=1
i+=1
newim = Image.fromarray(newim)
newim.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""ECUALIZACION NHYPERBOLICA DE LA IMAGEN A GRISES"""
def ecua_hyperbolica(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
[ren, col] = ima.size
ima = np.asarray(ima, dtype = np.float32).reshape(1, ren * col)
valor = 0
maxdata = max(max(ima))
mindata = min(min(ima))
niveles = maxdata
h = np.zeros(niveles)
ima = ima.reshape(col, ren)
ac = h
i = 0
#cálculo del histograma
while i < ren:
j = 0
while j < col:
valor = ima[j, i] - 1
h[valor] = h[valor] + 1
j+=1
i+=1
ac[0] = h[0]
i = 1
while i < maxdata:
ac[i] = ac[i - 1] + h[i]
i+=1
ac = ac / (ren * col)
#funcion de mapeo
m1 = maxdata / mindata
m2 = mindata * m1
m3 = pow(m2, ac)
mapeo = np.floor(m3)
#si mindata es cero la imagen sera cero
newim = np.zeros((col, ren))
i = 0
while i < ren:
j = 0
while j < col:
newim[j, i] = mapeo[ima[j, i] - 1]
j+=1
i+=1
newim = Image.fromarray(newim)
newim.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""RUIDO GAUSSIANO EN LA IMAGEN A GRISES """
def ruido_gaussiano(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
l = scipy.misc.imread(ruta)
noisy = l + 0.4 * l.std() * np.random.random(l.shape)
plt.figure(figsize = (50, 50))
plt.subplot(131)
plt.imshow(noisy, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""RUIDO SAL Y PIMIENTA EN LA IMAGEN A COLOR"""
def salypimienta_color(im,prob):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
[ren, col] = im.size
sal = Image.new("RGB",(ren, col))
for i in range(ren):
for j in range(col):
r,g,b = im.getpixel((i, j))
if random.random() < prob:
syp = random.randint(0,1)
if syp == 0:
syp = 0
else:
syp = 255
sal.putpixel((i, j),(syp, syp, syp))
else:
sal.putpixel((i, j),(r, g, b))
sal.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""RUIDO SAL Y PIMIENTA EN LA IMAGEN A GRISES"""
def salypimienta_grises(im,prob):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
[ren, col] = im.size
sal = im
nMaxRen = round(ren * prob / 100.0)
nMaxCol = round(col * prob / 100.0)
i = 1
for i in range(nMaxRen):
j = 1
for j in range(nMaxCol):
cx = round(np.random.rand() * (col - 1)) + 1
cy = round(np.random.rand() * (ren - 1)) + 1
aaa = round(np.random.rand() * 255)
if aaa > 128:
val = 255
sal.putpixel((cy, cx),(val))
else:
val= 1
sal.putpixel((cy, cx),(val))
sal.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""FILTRO MAXIMO DE LA IMAGEN A GRISES PARA QUITAR RUIDO"""
def filtro_maximo(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
out = im
[ren, col] = out.size
matriz = np.asarray(out, dtype = np.float32)
i = 0
while i < ren - 3:
j = 0
while j < col - 3:
submatriz = matriz[j:j+3,i:i+3]
submatriz = submatriz.reshape(1, 9)
nuevo = int(max(max(submatriz)))
out.putpixel((i + 1, j + 1),(nuevo))
j+=1
i+=1
out.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""FILTRO MINIMO DE LA IMAGEN A GRISES PARA QUITAR RUIDO"""
def filtro_minimo(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
out = im
[ren, col] = out.size
matriz = np.asarray(out, dtype = np.float32)
i = 0
while i < ren - 3:
j = 0
while j < col - 3:
submatriz = matriz[j:j+3,i:i+3]
submatriz = submatriz.reshape(1, 9)
nuevo = int(min(min(submatriz)))
out.putpixel((i + 1, j + 1),(nuevo))
j+=1
i+=1
out.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""FILTRO MEDIANA DE LA IMAGEN A GRISES PARA QUITAR RUIDO SAL Y PIMIENTA"""
def filtro_mediana(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
out = im
[ren, col] = out.size
matriz = np.asarray(out, dtype = np.float32)
i = 0
while i < ren - 3:
j = 0
while j < col - 3:
submatriz = matriz[j:j+3,i:i+3]
submatriz = submatriz.reshape(1, 9)
nuevo = (max(submatriz))
nuevo = statistics.median(nuevo)
nuevo = int(nuevo)
out.putpixel((i + 1, j + 1),(nuevo))
j+=1
i+=1
out.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""FILTRO MODA DE LA IMAGEN A GRISES PARA QUITAR RUIDO SAL Y PIMIENTA"""
def filtro_moda(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
out = im
[ren, col] = out.size
matriz = np.asarray(out, dtype = np.float32)
i = 0
while i < ren - 3:
j = 0
while j < col - 3:
submatriz = matriz[j:j+3,i:i+3]
submatriz = submatriz.reshape(1, 9)
nuevo = (max(submatriz))
data = Counter(nuevo)
nuevo = data.most_common(1)
nuevo = max(nuevo)
nuevo = int(nuevo[0])
out.putpixel((i + 1, j + 1),(nuevo))
j+=1
i+=1
out.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""DETECCION DE BORDES CON SOBEL EN UNA IMAGEN A GRISES"""
def bordes_sobel(im, mask):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
[ren, col] = ima.size
pix = ima.load()
out_im = Image.new("L", (ren, col))
# gx + gy + prewit45° = ([1,3,3],[-3,-2,3],[-3,-3,1])
# gx = ([-1,0,1], [-2,0,2], [-1,0,1])
# gy = ([1,2,1], [0,0,0], [-1,-2,-1])
out = out_im.load()
for i in range(ren):
for j in range(col):
suma = 0
for n in range(i-1, i+2):
for m in range(j-1, j+2):
if n >= 0 and m >= 0 and n < ren and m < col:
suma += mask[n - (i - 1)][ m - (j - 1)] * pix[n, m]
out[i, j] = suma
out_im.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""DETECCION DE BORDES CON CANNY EN UNA IMAGEN A GRISES"""
def bordes_canny(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
ima = ndi.gaussian_filter(im, 4)
edges = feature.canny(ima)
fig, (ax2) = plt.subplots(nrows = 1, ncols = 1, figsize = (8, 3), sharex = True, sharey = True)
ax2.imshow(edges, cmap = plt.cm.gray)
ax2.axis('off')
plt.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""UMBRAL OPTIMO POR EL METODO OTSU Y APLICACION DE UMBRALIZACION AL "ARRAY" EN UNA IMAGEN A GRISES"""
def umbral_otsu(im):
tiempoIn = time.time()
ruta = ("C:/Users/CkriZz/Pictures/" + im)
im = Image.open(ruta)
im.show()
ima = im
width, height = ima.size
img = np.array(ima.getdata())
histogram = np.array(ima.histogram(),float) / (width * height)
#Vector de probabilidad acomulada.
omega = np.zeros(256)
#Vector de media acomulada
mean = np.zeros(256)
#Partiendo del histograma normalizado se calculan la probabilidad
#acomulada (omega) y la media acomulada (mean)
omega[0] = histogram[0]
for i in range(len(histogram)):
omega[i] = omega[i - 1] + histogram[i]
mean[i] = mean[i - 1] + (i - 1) * histogram[i]
sigmaB2 = 0
mt = mean[len(histogram) - 1] #El Valor de la intensidad media de la imagen
sigmaB2max = 0
T = 0
for i in range(len(histogram)):
clase1 = omega[i]
clase2 = 1 - clase1
if clase1 != 0 and clase2 != 0:
m1 = mean[i] / clase1
m2 = (mt - mean[i]) / clase2
sigmaB2 = (clase1 * (m1 - mt) * (m1 - mt) + clase2 * (m2 - mt) * (m2 - mt))
if sigmaB2 > sigmaB2max:
sigmaB2max = sigmaB2
T = i
thr = int(T)
print('El Umbral Optimo De La Imagen Es: ' ,thr)
#Se Aplica la umbralización al "array" de la imagen
#limites de procesado en x
x_min, x_max = 0, width
#limites de procesado en y
y_min, y_max = 0, height
#imagen de salida
img_out = np.zeros(width * height)
#procesado de la imagen
loc = 0 #posicin del "pixel" actual
for y in range (y_min, y_max):
for x in range(x_min, x_max):
loc = y * width + x
if img[loc] > thr:
img_out[loc] = 255
else:
img_out[loc] = 0
img_thr = img_out
im_otsu = img_thr.reshape(height, width)
im_otsu = Image.fromarray(im_otsu)
im_otsu.show()
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
"""MATRIZ DE CONCURRENCIA"""
def matrizConcurrencia(datos):
# datos = ([0,0,1,1],[0,0,1,1],[0,2,2,2],[2,2,3,3])
tiempoIn = time.time()
datos=np.asarray(datos)
[ren, col] = datos.shape
total = ren * col
nm = datos.reshape((1, total))
nm = max(nm)
x=max(nm)
"""0º Grados"""
print("-----0º Grados-----")
cero = np.zeros((x + 1, x + 1))
cont = 1
i = 0
while i < (total - 1):
n1 = nm[i]
n2 = nm[i + 1]
cero[n1, n2] = cero[n1, n2] + 1
cero[n2, n1] = cero[n2, n1] + 1
if(cont == (ren - 1)):
i = i + 2
cont = 1
else:
i = i + 1
cont = cont + 1
print(cero)
print("-----45º Grados-----")
"""45º Grados"""
cont = 1
i = 1
cuarenta = np.zeros((x + 1, x + 1))
while i < (total - (ren)) + 1:
n1 = nm[i]
n2 = nm[i + 3]
cuarenta[n1, n2] = cuarenta[n1, n2] + 1
cuarenta[n2, n1] = cuarenta[n2, n1] + 1
if(cont == (col-1)):
i = i + 2
cont = 1
else:
i = i + 1
cont = cont + 1
print(cuarenta)
print("-----90º Grados-----")
"""90º Grados"""
cont = 1
i = 0
noventa = np.zeros((x + 1, x + 1))
while i < (total - (ren)):
n1 = nm[i]
n2 = nm[i + 4]
noventa[n1, n2] = noventa[n1, n2] + 1
noventa[n2, n1] = noventa[n2, n1] + 1
i = i + 1
print(noventa)
print("-----135º Grados-----")
"""135º Grados"""
cont = 1
i = 1
cien = np.zeros((x + 1, x + 1))
while i < (total - (ren)) - 1:
n1 = nm[i];
n2 = nm[i + 5];
cien[n1, n2] = cien[n1, n2] + 1;
cien[n2, n1] = cien[n2, n1] + 1;
if(cont == (col - 1)):
i = i + 2
cont = 1
else:
i = i + 1
cont = cont + 1
print(cien)
print("--------------------")
tiempoFin = time.time()
print('El Proceso Tardo: ', tiempoFin - tiempoIn, ' Segundos')
| [
"noreply@github.com"
] | noreply@github.com |
69fa94dee859ced3c1d201cbaba72e52e49bd102 | f57337d795f19c14d67887743cbd5941d1e84378 | /blog_project/blog/migrations/0002_auto_20200603_2323.py | 814f9e8c538d7234a6bec958a9fea7db5e5ac65c | [] | no_license | teja465/blog-project | 7662884cb3b810b9959cd6d618560eebe1f08b58 | c7ccb1cbc8e7c2cbb29e39b32cd6798ce6329d36 | refs/heads/master | 2022-10-03T02:54:59.115590 | 2020-06-06T17:16:54 | 2020-06-06T17:16:54 | 270,048,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # Generated by Django 3.0 on 2020-06-03 17:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='blog_model',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"katukuri.raviteja465@gmail.com"
] | katukuri.raviteja465@gmail.com |
07ddfdca133dc644af21ed96ff81b51deae90705 | 7990a6b89de63d7d947224e1225d7821718d1af8 | /variables.py | ca64358511e47b325f1e5633e45d2eed3e720883 | [] | no_license | PragathiNS/PythonCodes | 1838e75dbcd3a089954e8a67c382739660a158f3 | 87b63cc915bcbbc8ea29b132f888744cbf7a3aa8 | refs/heads/master | 2021-03-27T09:54:04.931646 | 2018-05-30T05:07:48 | 2018-05-30T05:07:48 | 85,904,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | # variables can be initialized without any type mentioned to it
intVar = 10
floatVar = 1.1
stringVar = "Variable"
boolVar = True
#We can know the type of these variables using type()
print(type(intVar))
print(type(floatVar))
print(type(stringVar))
print(type(boolVar))
| [
"pragathi.code@gmail.com"
] | pragathi.code@gmail.com |
b043bbed978055b6395eae69ddcac30437659c9c | 39555d47880ce5433845424bf9942fd359fe4602 | /TaxiFareModel/encoders.py | a0c7421dde24adae4b9ccfdcf293eeb628b2ff29 | [] | no_license | Doxycy/TaxiFareModel | 928289f62a255feb1d8d705d5cf23fe14a71fc77 | 18beb16daa2fe5dfe91c4f9d6fd057c1e87b135f | refs/heads/master | 2023-07-17T04:45:32.633079 | 2021-08-17T14:04:45 | 2021-08-17T14:04:45 | 397,279,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,982 | py | from sklearn.base import BaseEstimator, TransformerMixin
from TaxiFareModel.utils import haversine_vectorized
import pandas as pd
class TimeFeaturesEncoder(BaseEstimator, TransformerMixin):
"""Extract the day of week (dow), the hour, the month and the year from a
time column."""
def __init__(self, time_column, time_zone_name='America/New_York'):
self.time_column = time_column
self.time_zone_name = time_zone_name
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert isinstance(X, pd.DataFrame)
X_ = X.copy()
X_.index = pd.to_datetime(X[self.time_column])
X_.index = X_.index.tz_convert(self.time_zone_name)
X_["dow"] = X_.index.weekday
X_["hour"] = X_.index.hour
X_["month"] = X_.index.month
X_["year"] = X_.index.year
return X_[['dow', 'hour', 'month', 'year']]
class DistanceTransformer(BaseEstimator, TransformerMixin):
"""
Computes the haversine distance between two GPS points.
Returns a copy of the DataFrame X with only one column: 'distance'.
"""
def __init__(self,
start_lat="pickup_latitude",
start_lon="pickup_longitude",
end_lat="dropoff_latitude",
end_lon="dropoff_longitude"):
self.start_lat = start_lat
self.start_lon = start_lon
self.end_lat = end_lat
self.end_lon = end_lon
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert isinstance(X, pd.DataFrame)
X_ = X.copy()
X_["distance"] = haversine_vectorized(X_,
start_lat=self.start_lat,
start_lon=self.start_lon,
end_lat=self.end_lat,
end_lon=self.end_lon)
return X_[['distance']]
| [
"morgan.godard@outlook.fr"
] | morgan.godard@outlook.fr |
00213373c71f2901f04b9c3f250dfd0d591ee90b | bfd41fc543f6dbfc821341522cf8e7a9d2e34ce8 | /venv/lib/python2.7/site-packages/astroid/scoped_nodes.py | f9ec7b774f86c4821ff457b8eb19100ab3217d62 | [] | no_license | MaraKovalcik/Flask | 783243560ead637a381f76d3893da2b212eff898 | 1ff8413f3551b051f8e6c76db6cf402fc7428188 | refs/heads/master | 2021-01-22T09:09:16.165734 | 2015-02-24T16:57:14 | 2015-02-24T16:57:14 | 31,268,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,465 | py | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, Class, Function (and
Lambda, GenExpr, DictComp and SetComp to some extent).
"""
from __future__ import with_statement
__doctype__ = "restructuredtext en"
import sys
from itertools import chain
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
import six
from logilab.common.compat import builtins
from logilab.common.decorators import cached, cachedproperty
from astroid.exceptions import NotFoundError, \
AstroidBuildingException, InferenceError
from astroid.node_classes import Const, DelName, DelAttr, \
Dict, From, List, Pass, Raise, Return, Tuple, Yield, YieldFrom, \
LookupMixIn, const_factory as cf, unpack_infer, Name, CallFunc
from astroid.bases import NodeNG, InferenceContext, Instance,\
YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, \
BUILTINS
from astroid.mixins import FilterStmtsMixin
from astroid.bases import Statement
from astroid.manager import AstroidManager
ITER_METHODS = ('__iter__', '__getitem__')
PY3K = sys.version_info >= (3, 0)
def remove_nodes(func, cls):
def wrapper(*args, **kwargs):
nodes = [n for n in func(*args, **kwargs) if not isinstance(n, cls)]
if not nodes:
raise NotFoundError()
return nodes
return wrapper
def function_to_method(n, klass):
if isinstance(n, Function):
if n.type == 'classmethod':
return BoundMethod(n, klass)
if n.type != 'staticmethod':
return UnboundMethod(n)
return n
def std_special_attributes(self, name, add_locals=True):
if add_locals:
locals = self.locals
else:
locals = {}
if name == '__name__':
return [cf(self.name)] + locals.get(name, [])
if name == '__doc__':
return [cf(self.doc)] + locals.get(name, [])
if name == '__dict__':
return [Dict()] + locals.get(name, [])
raise NotFoundError(name)
MANAGER = AstroidManager()
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astroid for the builtin
module
"""
builtin_astroid = MANAGER.ast_from_module(builtins)
if name == '__dict__':
return builtin_astroid, ()
try:
stmts = builtin_astroid.locals[name]
except KeyError:
stmts = ()
return builtin_astroid, stmts
# TODO move this Mixin to mixins.py; problem: 'Function' in _scope_lookup
class LocalsDictNodeNG(LookupMixIn, NodeNG):
""" this class provides locals handling common to Module, Function
and Class nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
# dictionary of locals with name as key and node defining the local as
# value
def qname(self):
"""return the 'qualified' name of the node, eg module.name,
module.class.name ...
"""
if self.parent is None:
return self.name
return '%s.%s' % (self.parent.frame().qname(), self.name)
def frame(self):
"""return the first parent frame node (i.e. Module, Function or Class)
"""
return self
def scope(self):
"""return the first node defining a new scope (i.e. Module,
Function, Class, Lambda but also GenExpr, DictComp and SetComp)
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = node._filter_stmts(self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
if self.parent: # i.e. not Module
# nested scope: if parent scope is a function, that's fine
# else jump to the module
pscope = self.parent.scope()
if not pscope.is_function:
pscope = pscope.root()
return pscope.scope_lookup(node, name)
return builtin_lookup(name) # Module
def set_local(self, name, stmt):
"""define <name> in locals (<stmt> is the node defining the name)
if the node is a Module node (i.e. has globals), add the name to
globals
if the name is already defined, ignore it
"""
#assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""append a child which should alter locals to the given node"""
if name != '__class__':
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""method from the `dict` interface returning the first node
associated with the given name in the locals dictionary
:type item: str
:param item: the name of the locally defined object
:raises KeyError: if the name is not defined
"""
return self.locals[item][0]
def __iter__(self):
"""method from the `dict` interface returning an iterator on
`self.keys()`
"""
return iter(self.keys())
def keys(self):
"""method from the `dict` interface returning a tuple containing
locally defined names
"""
return list(self.locals.keys())
def values(self):
"""method from the `dict` interface returning a tuple containing
locally defined nodes which are instance of `Function` or `Class`
"""
return [self[key] for key in self.keys()]
def items(self):
"""method from the `dict` interface returning a list of tuple
containing each locally defined name with its associated node,
which is an instance of `Function` or `Class`
"""
return list(zip(self.keys(), self.values()))
def __contains__(self, name):
return name in self.locals
has_key = __contains__
# Module #####################################################################
class Module(LocalsDictNodeNG):
_astroid_fields = ('body',)
fromlineno = 0
lineno = 0
# attributes below are set by the builder module or by raw factories
# the file from which as been extracted the astroid representation. It may
# be None if the representation has been built from a built-in module
file = None
# Alternatively, if built from a string/bytes, this can be set
file_bytes = None
# encoding of python source file, so we can get unicode out of it (python2
# only)
file_encoding = None
# the module name
name = None
# boolean for astroid built from source (i.e. ast)
pure_python = None
# boolean for package module
package = None
# dictionary of globals with name as key and node defining the global
# as value
globals = None
# Future imports
future_imports = None
# names of python special attributes (handled by getattr impl.)
special_attributes = set(('__name__', '__doc__', '__file__', '__path__',
'__dict__'))
# names of module attributes available through the global scope
scope_attrs = set(('__name__', '__doc__', '__file__', '__path__'))
def __init__(self, name, doc, pure_python=True):
self.name = name
self.doc = doc
self.pure_python = pure_python
self.locals = self.globals = {}
self.body = []
self.future_imports = set()
@cachedproperty
def file_stream(self):
if self.file_bytes is not None:
return BytesIO(self.file_bytes)
if self.file is not None:
return open(self.file, 'rb')
return None
def block_range(self, lineno):
"""return block line numbers.
start from the beginning whatever the given lineno
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
if name in self.scope_attrs and not name in self.locals:
try:
return self, self.getattr(name)
except NotFoundError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
return '%s.module' % BUILTINS
def display_type(self):
return 'Module'
def getattr(self, name, context=None, ignore_locals=False):
if name in self.special_attributes:
if name == '__file__':
return [cf(self.file)] + self.locals.get(name, [])
if name == '__path__' and self.package:
return [List()] + self.locals.get(name, [])
return std_special_attributes(self, name)
if not ignore_locals and name in self.locals:
return self.locals[name]
if self.package:
try:
return [self.import_module(name, relative_only=True)]
except AstroidBuildingException:
raise NotFoundError(name)
except SyntaxError:
raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
raise NotFoundError(name)
getattr = remove_nodes(getattr, DelName)
def igetattr(self, name, context=None):
"""inferred getattr"""
# set lookup name since this is necessary to infer on import nodes for
# instance
if not context:
context = InferenceContext()
try:
return _infer_stmts(self.getattr(name, context), context, frame=self, lookupname=name)
except NotFoundError:
raise InferenceError(name)
def fully_defined(self):
"""return True if this module has been built from a .py file
and so contains a complete representation including the code
"""
return self.file is not None and self.file.endswith('.py')
def statement(self):
"""return the first parent node marked as statement node
consider a module as a statement...
"""
return self
def previous_sibling(self):
"""module has no sibling"""
return
def next_sibling(self):
"""module has no sibling"""
return
if sys.version_info < (2, 8):
@cachedproperty
def _absolute_import_activated(self):
for stmt in self.locals.get('absolute_import', ()):
if isinstance(stmt, From) and stmt.modname == '__future__':
return True
return False
else:
_absolute_import_activated = True
def absolute_import_activated(self):
return self._absolute_import_activated
def import_module(self, modname, relative_only=False, level=None):
"""import the given module considering self as context"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return MANAGER.ast_from_module_name(absmodname)
except AstroidBuildingException:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return MANAGER.ast_from_module_name(modname)
def relative_to_absolute_name(self, modname, level):
"""return the absolute module name for a relative import.
The relative import can be implicit or explicit.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.astroid.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit('.', level)[0]
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit('.', 1)[0]
if package_name:
if not modname:
return package_name
return '%s.%s' % (package_name, modname)
return modname
def wildcard_import_names(self):
"""return the list of imported names when this module is 'wildcard
imported'
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
"""
# take advantage of a living module if it exists
try:
living = sys.modules[self.name]
except KeyError:
pass
else:
try:
return living.__all__
except AttributeError:
return [name for name in living.__dict__.keys()
if not name.startswith('_')]
# else lookup the astroid
#
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
default = [name for name in self.keys() if not name.startswith('_')]
try:
all = self['__all__']
except KeyError:
return default
try:
explicit = next(all.assigned_stmts())
except InferenceError:
return default
except AttributeError:
# not an assignment node
# XXX infer?
return default
# Try our best to detect the exported name.
infered = []
try:
explicit = next(explicit.infer())
except InferenceError:
return default
if not isinstance(explicit, (Tuple, List)):
return default
str_const = lambda node: (isinstance(node, Const) and
isinstance(node.value, six.string_types))
for node in explicit.elts:
if str_const(node):
infered.append(node.value)
else:
try:
infered_node = next(node.infer())
except InferenceError:
continue
if str_const(infered_node):
infered.append(infered_node.value)
return infered
class ComprehensionScope(LocalsDictNodeNG):
def frame(self):
return self.parent.frame()
scope_lookup = LocalsDictNodeNG._scope_lookup
class GenExpr(ComprehensionScope):
_astroid_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class DictComp(ComprehensionScope):
_astroid_fields = ('key', 'value', 'generators')
def __init__(self):
self.locals = {}
self.key = None
self.value = None
self.generators = []
class SetComp(ComprehensionScope):
_astroid_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class _ListComp(NodeNG):
"""class representing a ListComp node"""
_astroid_fields = ('elt', 'generators')
elt = None
generators = None
if sys.version_info >= (3, 0):
class ListComp(_ListComp, ComprehensionScope):
"""class representing a ListComp node"""
def __init__(self):
self.locals = {}
else:
class ListComp(_ListComp):
"""class representing a ListComp node"""
# Function ###################################################################
def _infer_decorator_callchain(node):
""" Detect decorator call chaining and see if the
end result is a static or a classmethod.
"""
current = node
while True:
if isinstance(current, CallFunc):
try:
current = next(current.func.infer())
except InferenceError:
return
elif isinstance(current, Function):
if not current.parent:
return
try:
# TODO: We don't handle multiple inference results right now,
# because there's no flow to reason when the return
# is what we are looking for, a static or a class method.
result = next(current.infer_call_result(current.parent))
if current is result:
# This will lead to an infinite loop, where a decorator
# returns itself.
return
except (StopIteration, InferenceError):
return
if isinstance(result, (Function, CallFunc)):
current = result
else:
if isinstance(result, Instance):
result = result._proxied
if isinstance(result, Class):
if (result.name == 'classmethod' and
result.root().name == BUILTINS):
return 'classmethod'
elif (result.name == 'staticmethod' and
result.root().name == BUILTINS):
return 'staticmethod'
else:
return
else:
# We aren't interested in anything else returned,
# so go back to the function type inference.
return
else:
return
def _function_type(self):
"""
Function type, possible values are:
method, function, staticmethod, classmethod.
"""
# Can't infer that this node is decorated
# with a subclass of `classmethod` where `type` is first set,
# so do it here.
if self.decorators:
for node in self.decorators.nodes:
if isinstance(node, CallFunc):
_type = _infer_decorator_callchain(node)
if _type is None:
continue
else:
return _type
if not isinstance(node, Name):
continue
try:
for infered in node.infer():
if not isinstance(infered, Class):
continue
for ancestor in infered.ancestors():
if isinstance(ancestor, Class):
if (ancestor.name == 'classmethod' and
ancestor.root().name == BUILTINS):
return 'classmethod'
elif (ancestor.name == 'staticmethod' and
ancestor.root().name == BUILTINS):
return 'staticmethod'
except InferenceError:
pass
return self._type
class Lambda(LocalsDictNodeNG, FilterStmtsMixin):
_astroid_fields = ('args', 'body',)
name = '<lambda>'
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
type = 'function'
def __init__(self):
self.locals = {}
self.args = []
self.body = []
def pytype(self):
if 'method' in self.type:
return '%s.instancemethod' % BUILTINS
return '%s.function' % BUILTINS
def display_type(self):
if 'method' in self.type:
return 'Method'
return 'Function'
def callable(self):
return True
def argnames(self):
"""return a list of argument names"""
if self.args.args: # maybe None with builtin functions
names = _rec_get_names(self.args.args)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
if node in self.args.defaults or node in self.args.kw_defaults:
frame = self.parent.frame()
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
class Function(Statement, Lambda):
if PY3K:
_astroid_fields = ('decorators', 'args', 'body', 'returns')
returns = None
else:
_astroid_fields = ('decorators', 'args', 'body')
special_attributes = set(('__name__', '__doc__', '__dict__'))
is_function = True
# attributes below are set by the builder module or by raw factories
blockstart_tolineno = None
decorators = None
_type = "function"
type = cachedproperty(_function_type)
def __init__(self, name, doc):
self.locals = {}
self.args = []
self.body = []
self.name = name
self.doc = doc
self.extra_decorators = []
self.instance_attrs = {}
@cachedproperty
def fromlineno(self):
# lineno is the line number of the first decorator, we want the def
# statement lineno
lineno = self.lineno
if self.decorators is not None:
lineno += sum(node.tolineno - node.lineno + 1
for node in self.decorators.nodes)
return lineno
@cachedproperty
def blockstart_tolineno(self):
return self.args.tolineno
def block_range(self, lineno):
"""return block line numbers.
start from the "def" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
"""
if name == '__module__':
return [cf(self.root().qname())]
if name in self.instance_attrs:
return self.instance_attrs[name]
return std_special_attributes(self, name, False)
def is_method(self):
"""return true if the function node should be considered as a method"""
# check we are defined in a Class, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != 'function' and isinstance(self.parent.frame(), Class)
def decoratornames(self):
"""return a list of decorator qualified names"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
for infnode in decnode.infer():
result.add(infnode.qname())
return result
decoratornames = cached(decoratornames)
def is_bound(self):
"""return true if the function is bound to an Instance or a class"""
return self.type == 'classmethod'
def is_abstract(self, pass_is_abstract=True):
"""Returns True if the method is abstract.
A method is considered abstract if
- the only statement is 'raise NotImplementedError', or
- the only statement is 'pass' and pass_is_abstract is True, or
- the method is annotated with abc.astractproperty/abc.abstractmethod
"""
if self.decorators:
for node in self.decorators.nodes:
try:
infered = next(node.infer())
except InferenceError:
continue
if infered and infered.qname() in ('abc.abstractproperty',
'abc.abstractmethod'):
return True
for child_node in self.body:
if isinstance(child_node, Raise):
if child_node.raises_not_implemented():
return True
if pass_is_abstract and isinstance(child_node, Pass):
return True
return False
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""return true if this is a generator function"""
# XXX should be flagged, not computed
return next(self.nodes_of_class((Yield, YieldFrom),
skip_klass=(Function, Lambda)), False)
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
if self.is_generator():
yield Generator()
return
# This is really a gigantic hack to work around metaclass generators
# that return transient class-generating functions. Pylint's AST structure
# cannot handle a base class object that is only used for calling __new__,
# but does not contribute to the inheritance structure itself. We inject
# a fake class into the hierarchy here for several well-known metaclass
# generators, and filter it out later.
if (self.name == 'with_metaclass' and
len(self.args.args) == 1 and
self.args.vararg is not None):
metaclass = next(caller.args[0].infer(context))
if isinstance(metaclass, Class):
c = Class('temporary_class', None)
c.hide = True
c.parent = self
c.bases = [next(b.infer(context)) for b in caller.args[1:]]
c._metaclass = metaclass
yield c
return
returns = self.nodes_of_class(Return, skip_klass=Function)
for returnnode in returns:
if returnnode.value is None:
yield Const(None)
else:
try:
for infered in returnnode.value.infer(context):
yield infered
except InferenceError:
yield YES
def _rec_get_names(args, names=None):
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
# Class ######################################################################
def _is_metaclass(klass, seen=None):
""" Return if the given class can be
used as a metaclass.
"""
if klass.name == 'type':
return True
if seen is None:
seen = set()
for base in klass.bases:
try:
for baseobj in base.infer():
if baseobj in seen:
continue
else:
seen.add(baseobj)
if isinstance(baseobj, Instance):
# not abstract
return False
if baseobj is YES:
continue
if baseobj is klass:
continue
if not isinstance(baseobj, Class):
continue
if baseobj._type == 'metaclass':
return True
if _is_metaclass(baseobj, seen):
return True
except InferenceError:
continue
return False
def _class_type(klass, ancestors=None):
"""return a Class node type to differ metaclass, interface and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have a ancestor loop
if klass._type is not None:
return klass._type
if _is_metaclass(klass):
klass._type = 'metaclass'
elif klass.name.endswith('Interface'):
klass._type = 'interface'
elif klass.name.endswith('Exception'):
klass._type = 'exception'
else:
if ancestors is None:
ancestors = set()
if klass in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = 'class'
return 'class'
ancestors.add(klass)
for base in klass.ancestors(recurs=False):
name = _class_type(base, ancestors)
if name != 'class':
if name == 'metaclass' and not _is_metaclass(klass):
# don't propagate it if the current class
# can't be a metaclass
continue
klass._type = base.type
break
if klass._type is None:
klass._type = 'class'
return klass._type
def _iface_hdlr(iface_node):
"""a handler function used by interfaces to handle suspicious
interface nodes
"""
return True
class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin):
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astroid_fields = ('decorators', 'bases', 'body') # name
decorators = None
special_attributes = set(('__name__', '__doc__', '__dict__', '__module__',
'__bases__', '__mro__', '__subclasses__'))
blockstart_tolineno = None
_type = None
_metaclass_hack = False
hide = False
type = property(_class_type,
doc="class'type, possible values are 'class' | "
"'metaclass' | 'interface' | 'exception'")
def __init__(self, name, doc):
self.instance_attrs = {}
self.locals = {}
self.bases = []
self.body = []
self.name = name
self.doc = doc
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
klass = self._explicit_metaclass()
# could be any callable, we'd need to infer the result of klass(name,
# bases, dict). punt if it's not a class node.
if klass is not None and isinstance(klass, Class):
self._newstyle = klass._newstyle_impl(context)
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(_newstyle_impl,
doc="boolean indicating if it's a new style class"
"or not")
@cachedproperty
def blockstart_tolineno(self):
if self.bases:
return self.bases[-1].tolineno
else:
return self.fromlineno
def block_range(self, lineno):
"""return block line numbers.
start from the "class" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def pytype(self):
if self.newstyle:
return '%s.type' % BUILTINS
return '%s.classobj' % BUILTINS
def display_type(self):
return 'Class'
def callable(self):
return True
def is_subtype_of(self, type_name, context=None):
if self.qname() == type_name:
return True
for anc in self.ancestors(context=context):
if anc.qname() == type_name:
return True
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
if self.is_subtype_of('%s.type' % (BUILTINS,), context) and len(caller.args) == 3:
name_node = next(caller.args[0].infer(context))
if (isinstance(name_node, Const) and
isinstance(name_node.value, six.string_types)):
name = name_node.value
else:
yield YES
return
result = Class(name, None)
bases = next(caller.args[1].infer(context))
if isinstance(bases, (Tuple, List)):
result.bases = bases.itered()
else:
# There is currently no AST node that can represent an 'unknown'
# node (YES is not an AST node), therefore we simply return YES here
# although we know at least the name of the class.
yield YES
return
result.parent = caller.parent
yield result
else:
yield Instance(self)
def scope_lookup(self, node, name, offset=0):
if node in self.bases:
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
# list of parent class as a list of string (i.e. names as they appear
# in the class definition) XXX bw compat
def basenames(self):
return [bnode.as_string() for bnode in self.bases]
basenames = property(basenames)
def ancestors(self, recurs=True, context=None):
"""return an iterator on the node base classes in a prefixed
depth first order
:param recurs:
boolean indicating if it should recurse or return direct
ancestors only
"""
# FIXME: should be possible to choose the resolution order
# FIXME: inference make infinite loops possible here
yielded = set([self])
if context is None:
context = InferenceContext()
if sys.version_info[0] >= 3:
if not self.bases and self.qname() != 'builtins.object':
yield builtin_lookup("object")[1][0]
return
for stmt in self.bases:
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, Class):
if isinstance(baseobj, Instance):
baseobj = baseobj._proxied
else:
# duh ?
continue
if not baseobj.hide:
if baseobj in yielded:
continue # cf xxx above
yielded.add(baseobj)
yield baseobj
if recurs:
for grandpa in baseobj.ancestors(recurs=True,
context=context):
if grandpa in yielded:
continue # cf xxx above
yielded.add(grandpa)
yield grandpa
except InferenceError:
# XXX log error ?
continue
def local_attr_ancestors(self, name, context=None):
"""return an iterator on astroid representation of parent classes
which have <name> defined in their locals
"""
for astroid in self.ancestors(context=context):
if name in astroid:
yield astroid
def instance_attr_ancestors(self, name, context=None):
"""return an iterator on astroid representation of parent classes
which have <name> defined in their instance attribute dictionary
"""
for astroid in self.ancestors(context=context):
if name in astroid.instance_attrs:
yield astroid
def has_base(self, node):
return node in self.bases
def local_attr(self, name, context=None):
"""return the list of assign node associated to name in this class
locals or in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
try:
return self.locals[name]
except KeyError:
# get if from the first parent implementing it if any
for class_node in self.local_attr_ancestors(name, context):
return class_node.locals[name]
raise NotFoundError(name)
local_attr = remove_nodes(local_attr, DelAttr)
def instance_attr(self, name, context=None):
"""return the astroid nodes associated to name in this class instance
attributes dictionary and in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
# Return a copy, so we don't modify self.instance_attrs,
# which could lead to infinite loop.
values = list(self.instance_attrs.get(name, []))
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
if not values:
raise NotFoundError(name)
return values
instance_attr = remove_nodes(instance_attr, DelAttr)
def instanciate_class(self):
"""return Instance of Class node, else return self"""
return Instance(self)
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
It may return a YES object if the attribute has not been actually
found but a __getattr__ or __getattribute__ method is defined
"""
values = self.locals.get(name, [])
if name in self.special_attributes:
if name == '__module__':
return [cf(self.root().qname())] + values
# FIXME: do we really need the actual list of ancestors?
# returning [Tuple()] + values don't break any test
# this is ticket http://www.logilab.org/ticket/52785
# XXX need proper meta class handling + MRO implementation
if name == '__bases__' or (name == '__mro__' and self.newstyle):
node = Tuple()
node.items = self.ancestors(recurs=True, context=context)
return [node] + values
return std_special_attributes(self, name)
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if not values:
raise NotFoundError(name)
return values
def igetattr(self, name, context=None):
"""inferred getattr, need special treatment in class to handle
descriptors
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
if not context:
context = InferenceContext()
try:
for infered in _infer_stmts(self.getattr(name, context), context,
frame=self, lookupname=name):
# yield YES object instead of descriptors when necessary
if not isinstance(infered, Const) and isinstance(infered, Instance):
try:
infered._proxied.getattr('__get__', context)
except NotFoundError:
yield infered
else:
yield YES
else:
yield function_to_method(infered, self)
except NotFoundError:
if not name.startswith('__') and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a YES object
yield YES
else:
raise InferenceError(name)
def has_dynamic_getattr(self, context=None):
"""return True if the class has a custom __getattr__ or
__getattribute__ method
"""
# need to explicitly handle optparse.Values (setattr is not detected)
if self.name == 'Values' and self.root().name == 'optparse':
return True
try:
self.getattr('__getattr__', context)
return True
except NotFoundError:
#if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr('__getattribute__', context)[0]
if getattribute.root().name != BUILTINS:
# class has a custom __getattribute__ defined
return True
except NotFoundError:
pass
return False
def methods(self):
"""return an iterator on all methods defined in the class and
its ancestors
"""
done = {}
for astroid in chain(iter((self,)), self.ancestors()):
for meth in astroid.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""return an iterator on all methods defined in the class"""
for member in self.values():
if isinstance(member, Function):
yield member
def interfaces(self, herited=True, handler_func=_iface_hdlr):
"""return an iterator on interfaces implemented by the given
class node
"""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = Instance(self).getattr('__implements__')[0]
except NotFoundError:
return
if not herited and not implements.frame() is self:
return
found = set()
missing = False
for iface in unpack_infer(implements):
if iface is YES:
missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise InferenceError()
_metaclass = None
def _explicit_metaclass(self):
""" Return the explicit defined metaclass
for the current class.
An explicit defined metaclass is defined
either by passing the ``metaclass`` keyword argument
in the class definition line (Python 3) or (Python 2) by
having a ``__metaclass__`` class attribute, or if there are
no explicit bases but there is a global ``__metaclass__`` variable.
"""
for base in self.bases:
try:
for baseobj in base.infer():
if isinstance(baseobj, Class) and baseobj.hide:
self._metaclass = baseobj._metaclass
self._metaclass_hack = True
break
except InferenceError:
pass
if self._metaclass:
# Expects this from Py3k TreeRebuilder
try:
return next(node for node in self._metaclass.infer()
if node is not YES)
except (InferenceError, StopIteration):
return None
if sys.version_info >= (3, ):
return None
if '__metaclass__' in self.locals:
assignment = self.locals['__metaclass__'][-1]
elif self.bases:
return None
elif '__metaclass__' in self.root().locals:
assignments = [ass for ass in self.root().locals['__metaclass__']
if ass.lineno < self.lineno]
if not assignments:
return None
assignment = assignments[-1]
else:
return None
try:
infered = next(assignment.infer())
except InferenceError:
return
if infered is YES: # don't expose this
return None
return infered
def metaclass(self):
""" Return the metaclass of this class.
If this class does not define explicitly a metaclass,
then the first defined metaclass in ancestors will be used
instead.
"""
klass = self._explicit_metaclass()
if klass is None:
for parent in self.ancestors():
klass = parent.metaclass()
if klass is not None:
break
return klass
def has_metaclass_hack(self):
return self._metaclass_hack
def _islots(self):
""" Return an iterator with the inferred slots. """
if '__slots__' not in self.locals:
return
for slots in self.igetattr('__slots__'):
# check if __slots__ is a valid type
for meth in ITER_METHODS:
try:
slots.getattr(meth)
break
except NotFoundError:
continue
else:
continue
if isinstance(slots, Const):
# a string. Ignore the following checks,
# but yield the node, only if it has a value
if slots.value:
yield slots
continue
if not hasattr(slots, 'itered'):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is YES:
continue
for elt in values:
try:
for infered in elt.infer():
if infered is YES:
continue
if (not isinstance(infered, Const) or
not isinstance(infered.value, str)):
continue
if not infered.value:
continue
yield infered
except InferenceError:
continue
# Cached, because inferring them all the time is expensive
@cached
def slots(self):
""" Return all the slots for this node. """
return list(self._islots())
| [
"mara.kovalcik@gmail.com"
] | mara.kovalcik@gmail.com |
209fce0e9ab23269f0f2c394e287b9bb821a5323 | b87b4c16c0d596fff5069c3210040eb8565417fc | /Code forces/F. Way Too Long Words.py | b5f2bae7e1bffafcb88f854ea9e549b169cb86f3 | [] | no_license | Seif-Fathi/Problem-Solving | e321411ec7e59c9c4f397f38896d6a1788acc1bd | ac0f3a81179e2d3418d47ab77e34188e1470425c | refs/heads/master | 2023-04-25T13:03:50.780362 | 2021-05-15T18:39:17 | 2021-05-15T18:39:17 | 367,705,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
if __name__=='__main__':
num_tsts = int(input())
for i in range(num_tsts):
sttr = input().lower()
if len(sttr) <=10:
print(sttr)
else:
print(sttr[0],len(sttr)-2,sttr[-1],sep='')
| [
"seif.fathi22@gmail.com"
] | seif.fathi22@gmail.com |
61c4329bc9311c20d6ca2fdca35994a57d850ee5 | 38fff7bdefd8d62a740d51329b50d0e1e49258bb | /projects/oscrypto/fuzz_keys.py | 891da04cd3fd9abfb86f3c556edf67a2c729e495 | [
"Apache-2.0"
] | permissive | google/oss-fuzz | 026384c2ada61ef68b147548e830f60730c5e738 | f0275421f84b8f80ee767fb9230134ac97cb687b | refs/heads/master | 2023-08-31T23:30:28.157702 | 2023-08-31T21:49:30 | 2023-08-31T21:49:30 | 63,809,205 | 9,438 | 2,315 | Apache-2.0 | 2023-09-14T20:32:19 | 2016-07-20T19:39:50 | Shell | UTF-8 | Python | false | false | 938 | py | #!/usr/bin/python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import atheris
from oscrypto import keys
def TestOneInput(data):
try:
keys.parse_pkcs12(data, b'123')
except ValueError:
pass
except OSError:
pass
def main():
atheris.instrument_all()
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
b0beb7eb20985e2746ae25244fe46cc036f72866 | 07eea58a2c39932ae75fef81981e3bc2a392d35f | /robotiq_c_model_control/src/robotiq_c_model_control/baseCModel.py | 2a47199aab083d0d8d37ad524df155ca0a2a5a94 | [
"BSD-2-Clause"
] | permissive | jhu-lcsr/robotiq | dda4a79460e45bf85c69e8db7ed8cd00f2aa8349 | c2880525ef9a5e1ea8854dd7b57007e3b148fc91 | refs/heads/kinetic-devel | 2020-04-05T20:09:45.839988 | 2017-03-03T17:30:38 | 2017-03-03T17:30:38 | 22,268,277 | 3 | 3 | BSD-2-Clause | 2019-03-28T19:25:20 | 2014-07-25T19:30:12 | Python | UTF-8 | Python | false | false | 4,874 | py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Robotiq, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Robotiq, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2012, Robotiq, Inc.
# Revision $Id$
"""@package docstring
Module baseCModel: defines a base class for handling command and status of the Robotiq C-Model gripper.
After being instanciated, a 'client' member must be added to the object. This client depends on the communication protocol used by the Gripper. As an example, the ROS node 'CModelTcpNode.py' instanciate a robotiqBaseCModel and adds a client defined in the module comModbusTcp.
"""
from robotiq_c_model_control.msg import _CModel_robot_input as inputMsg
from robotiq_c_model_control.msg import _CModel_robot_output as outputMsg
class robotiqBaseCModel:
"""Base class (communication protocol agnostic) for sending commands and receiving the status of the Robotic C-Model gripper"""
def __init__(self):
#Initiate output message as an empty list
self.message = []
#Note: after the instantiation, a ".client" member must be added to the object
def verifyCommand(self, command):
"""Function to verify that the value of each variable satisfy its limits."""
#Verify that each variable is in its correct range
command.rACT = max(0, command.rACT)
command.rACT = min(1, command.rACT)
command.rGTO = max(0, command.rGTO)
command.rGTO = min(1, command.rGTO)
command.rATR = max(0, command.rATR)
command.rATR = min(1, command.rATR)
command.rPR = max(0, command.rPR)
command.rPR = min(255, command.rPR)
command.rSP = max(0, command.rSP)
command.rSP = min(255, command.rSP)
command.rFR = max(0, command.rFR)
command.rFR = min(255, command.rFR)
#Return the modified command
return command
def refreshCommand(self, command):
"""Function to update the command which will be sent during the next sendCommand() call."""
#Limit the value of each variable
command = self.verifyCommand(command)
#Initiate command as an empty list
self.message = []
#Build the command with each output variable
#To-Do: add verification that all variables are in their authorized range
self.message.append(command.rACT + (command.rGTO << 3) + (command.rATR << 4))
self.message.append(0)
self.message.append(0)
self.message.append(command.rPR)
self.message.append(command.rSP)
self.message.append(command.rFR)
def sendCommand(self):
"""Send the command to the Gripper."""
self.client.sendCommand(self.message)
def getStatus(self):
"""Request the status from the gripper and return it in the CModel_robot_input msg type."""
#Acquire status from the Gripper
status = self.client.getStatus(6);
#Message to output
message = inputMsg.CModel_robot_input()
#Assign the values to their respective variables
message.gACT = (status[0] >> 0) & 0x01;
message.gGTO = (status[0] >> 3) & 0x01;
message.gSTA = (status[0] >> 4) & 0x03;
message.gOBJ = (status[0] >> 6) & 0x03;
message.gFLT = status[2]
message.gPR = status[3]
message.gPO = status[4]
message.gCU = status[5]
return message
| [
"jrgnicho@gmail.com"
] | jrgnicho@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.