blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
28406623a7d58705959d97d793833a218b938765
|
07a59784d27ab7dcbba1b893345c32355d45947d
|
/docs/conf.py
|
d7f8483ad08bf37c060471a1d7e3c3e6cd2f675d
|
[] |
no_license
|
konikkar/Covid-19-Dashboard
|
822a8a08e98cf041b3b580d50878df38d662795e
|
3e1a7ebd9bf27ac880aced2ad500e5a5a0cf5c4e
|
refs/heads/master
| 2022-12-05T15:56:16.033220
| 2020-08-25T12:46:36
| 2020-08-25T12:46:36
| 288,080,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,789
|
py
|
# -*- coding: utf-8 -*-
#
# ADS_COVID-19 documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ADS_COVID-19'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ads_covid-19doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'ads_covid-19.tex',
u'ADS_COVID-19 Documentation',
u"Roshan", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ads_covid-19', u'ADS_COVID-19 Documentation',
[u"Roshan"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ads_covid-19', u'ADS_COVID-19 Documentation',
u"Roshan", 'ADS_COVID-19',
'applied data science on COVID-19 data', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
[
"69574081+konikkar@users.noreply.github.com"
] |
69574081+konikkar@users.noreply.github.com
|
54ba8db9db77211eab644e3a6209be27a079a967
|
77f76ddf9b33052fc39532aaaeded5e4971ad865
|
/migrations/versions/77b4b6ae646d_database_update.py
|
28f9b3701beff7c0a15d11363d0cf31297911966
|
[] |
no_license
|
PythonPerfect/PythonPerfect
|
32c8c8a04999f4c49df19b21e265b083b3e93141
|
ad70e07d1abaf272d5ad4866189a25a9ad4d5db1
|
refs/heads/master
| 2023-05-12T22:28:57.225071
| 2021-05-18T03:41:38
| 2021-05-18T03:41:38
| 358,127,863
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
"""Database update
Revision ID: 77b4b6ae646d
Revises: ed279c7f2160
Create Date: 2021-05-12 08:39:04.816417
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '77b4b6ae646d'
down_revision = 'ed279c7f2160'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('content__viewed',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('viewed', sa.Boolean(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('content_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['content_id'], ['content.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('question__response',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('response', sa.Integer(), nullable=True),
sa.Column('question_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['question_id'], ['question.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('answer')
with op.batch_alter_table('question', schema=None) as batch_op:
batch_op.add_column(sa.Column('answer', sa.String(length=32), nullable=True))
batch_op.add_column(sa.Column('question', sa.String(length=256), nullable=True))
batch_op.drop_column('body')
batch_op.drop_column('user_responce')
with op.batch_alter_table('quiz', schema=None) as batch_op:
batch_op.add_column(sa.Column('title', sa.String(length=50), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('quiz', schema=None) as batch_op:
batch_op.drop_column('title')
with op.batch_alter_table('question', schema=None) as batch_op:
batch_op.add_column(sa.Column('user_responce', sa.INTEGER(), nullable=True))
batch_op.add_column(sa.Column('body', sa.VARCHAR(length=140), nullable=True))
batch_op.drop_column('question')
batch_op.drop_column('answer')
op.create_table('answer',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('question_id', sa.INTEGER(), nullable=True),
sa.Column('answer', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['question_id'], ['question.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('question__response')
op.drop_table('content__viewed')
# ### end Alembic commands ###
|
[
"wassuprt@gmail.com"
] |
wassuprt@gmail.com
|
b5941ae0f40b6c52b788e8a07bffd43c07d5b679
|
d2fd372b98a85f91a017f4bc188d7cc198d66e8e
|
/venv/Common/Common.py
|
584e824d393b872f1cbc12821af32426c83b772b
|
[] |
no_license
|
v-anjia/ICU__STRESS
|
28cc5b7fd780ffcec2411572f40361f1119ecc1c
|
04b2de675db6c4f40bd50dc5bdceb1a2604e2a4d
|
refs/heads/master
| 2022-11-11T00:23:19.414529
| 2020-06-27T23:49:06
| 2020-06-27T23:49:06
| 272,986,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101,458
|
py
|
'''
deposit public class or function
data:2019-3-28
@author antony weijiang
'''
#coding=utf-8
import subprocess
import re
import os
import time
import sys
import uiautomator2 as u2
import requests
from bs4 import BeautifulSoup
from log import logger as loger
import json
import zipfile
import serial
import random
# from Common import LogCatFile as lcf
# from Common import ScreenCap as scp
from Common import Signal_Common as SC
from Common import Signal_List as SL
logger = loger.Current_Module()
busybox = "/oem/bin/busybox"
json_name = "/update/data.json"
log_path = "/update/log/FOTA_HU_*.log"
data_size = "200"
package_path = "/update/package/hu/mpu"
# libHufota = "/update/libHUfota/libHUfota.log"
libHufota = "/update/log/libHUfota.log"
lvlog = "/sdcard/lvlog/com.living.ota/normal/*.log"
lvlog_path = "/sdcard/lvlog/com.living.ota/normal/"
settinglog = "/sdcard/lvlog/com.android.settings/normal/*.log"
network_device = "tbox0"
tbox_version = "/update/version.txt"
logcat_object = None
'''
define class for hu devices
'''
class ADB_SN(object):
'''
get sn number
'''
def __init__(self):
self.sn = "unknown"
def set_sn(self,sn):
self.sn = sn
def get_sn(self):
return self.sn
def get_sn_from_adb_command(self):
'''
function:get sn serial number
:return:
'''
cmd = 'adb devices'
rtn = subprocess.check_output(cmd,shell=True)
p = re.compile("\n([\w]*)\t")
return re.findall(p, rtn.decode())
# def check_adb_status(self):
# '''
# function:check adb device status
# :return:
# '''
# output = subprocess.Popen('adb devices', stdout=subprocess.PIPE, shell=True).\
# stdout.read()
# if b'not running' in output or b'error' in output \
# or output == b'List of devices attached\r\n\r\n':
# logger.log_error('adb connect error,exit',\
# sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
# sys.exit(-1)
# else:
# return True
def check_adb_status(self):
'''
function:check adb device status
:return:
'''
output = subprocess.Popen('adb devices', stdout=subprocess.PIPE, shell=True).\
stdout.read()
if b'not running' in output or b'error' in output \
or output == b'List of devices attached\r\n\r\n':
logger.log_error('adb connect error,exit',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
else:
return True
@property
def isConnecting(self):
if str.encode(self.sn) in subprocess.check_output\
('adb -s %s devices' % self.sn, shell=True):
return True
return False
# def wait_adb(self,sn):
# os.system('adb -s %s wait-for-device' % sn)
# while True:
# time.sleep(10)
# retryRet = subprocess.Popen('adb -s %s shell "getprop | grep boot_completed"' % sn, stdout=subprocess.PIPE,
# shell=True).stdout.read()
# if str.encode('sys.boot_completed') not in retryRet and str.encode(self.sn) not in subprocess.check_output\
# ('adb -s %s devices' % self.sn, shell=True):
# log_warn('%s waitting for device boot up' % sn)
# log_warn('%s waitting for adb connected')
# else:
# logger.info('=====>%s: boot into system success' % sn)
# logger.info('=====>%s: adb has connected' % sn)
# return True
def wait_adb(self,delay_time):
'''
function wait adb devices
:param delay_time:
:return:
'''
os.system('adb -s %s wait-for-device' % self.sn)
for i in range(delay_time):
if str.encode(self.sn) in subprocess.check_output\
('adb -s %s devices' % self.sn, shell=True):
logger.log_info('adb has connected',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return True
elif i == delay_time:
logger.log_error('device: %s boot to adb FAIL' % self.sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
else:
logger.log_info('wait adb 10s: %d' % i, \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
time.sleep(10)
def wait_ui(self):
'''
function wait hu device wake up
:return:
'''
for i in range(2):
logger.log_info('wait UI 60s: %d' % i,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
if 'sys.boot_completed' in str(subprocess.check_output\
('adb -s %s shell "getprop|grep boot_completed"' % self.sn,shell=True)):
return True
elif i == 4:
logger.log_error('device: %s boot to ui FAIL' % self.sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
else:
time.sleep(10)
class Serial(object):
def __init__(self):
self.serial_port = None
self.serial_baudrate = 115200
self.serial_bytesize = None
self.serial_parity = 'N'
self.serial_stopbits = 1
self.package = Install_Package()
def set_serialport(self):
self.serial_port = self.package.update_fota_package()[4]
def get_serialport(self):
return self.serial_port
def set_serial_baudrate(self, serial_baudrate):
self.serial_baudrate = serial_baudrate
def get_serial_baudrate(self):
return self.serial_baudrate
def set_serial_bytesize(self,serial_bytesize):
self.serial_bytesize = serial_bytesize
def get_serial_bytesize(self):
return self.serial_bytesize
def set_serial_parity(self,serial_stopbits):
self.serial_stopbits = serial_stopbits
def get_serial_parity(self):
return self.serial_stopbits
def open_adb_through_serial(self,count):
'''
function:open adb port
:return:
'''
self.set_serialport()
ser = serial.Serial(self.get_serialport(),self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
time.sleep(5)
ser.write(("ps -ef|grep activate|grep -v grep| %s awk '{print $2}'|xargs kill -9 \r\n" %(busybox)).encode('utf-8'))
time.sleep(5)
ser.write("content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:3\r\n".encode('utf-8'))
time.sleep(5)
ser.write("input keyevent 3\r\n".encode('utf-8'))
time.sleep(5)
# ser.write("am start -n com.android.settings/com.android.settings.livingui.LVSettingsActivity\r\n".encode('utf-8'))
ser.write("am start -n com.android.settings/com.android.settings.livingui.LVSettingHomePageActivity\r\n".encode('utf-8'))
time.sleep(5)
ser.write("input swipe 300 1452 500 1452 1000\r\n".encode('utf-8'))
time.sleep(5)
ser.write("input swipe 379 1700 379 0 500\r\n".encode('utf-8'))
time.sleep(5)
for i in range(count):
ser.write("input tap 969 878\r\n".encode('utf-8'))
time.sleep(5)
ser.close()
def wait_ui_through_serial(self):
self.set_serialport()
ser = serial.Serial(self.get_serialport(), self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
time.sleep(5)
while True:
for i in range(200):
ser.write("getprop|grep boot_completed\r\n".encode('utf-8'))
if 'sys.boot_completed' in ser.read_all().decode('utf-8'):
return True
else:
time.sleep(9)
return False
def enter_fastboot_mode(self):
self.set_serialport()
ser = serial.Serial(self.get_serialport(), self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
ser.write("reboot fastboot\r\n".encode('utf-8'))
ser.write("reboot fastboot\r\n".encode('utf-8'))
time.sleep(10)
def enter_softupgrade_page(self):
self.set_serialport()
ser = serial.Serial(self.get_serialport(), self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
time.sleep(5)
ser.write(("ps -ef|grep activate|grep -v grep| %s awk '{print $2}'|xargs kill -9 \r\n" %(busybox)).encode('utf-8'))
time.sleep(5)
ser.write("content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:3\r\n".encode('utf-8'))
time.sleep(5)
ser.write("input keyevent 3\r\n".encode('utf-8'))
time.sleep(5)
ser.write("am start -n com.android.settings/com.android.settings.livingui.LVSettingHomePageActivity\r\n".encode('utf-8'))
# ser.write("am start -n com.android.settings/com.android.settings.livingui.LVSettingsActivity\r\n".encode('utf-8'))
time.sleep(5)
ser.write("am start -n com.living.ota/com.living.ota.MainActivity\r\n".encode('utf-8'))
time.sleep(5)
def cancle_activeupgrade_through_settings(self):
self.set_serialport()
ser = serial.Serial(self.get_serialport(), self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
time.sleep(5)
#setting-->soft upgrade
ser.write("input swipe 400 1570 600 1570 1000\r\n".encode('utf-8'))
time.sleep(20)
#放弃升级
ser.write("input swipe 260 1300 360 1300 1000\r\n".encode('utf-8'))
time.sleep(5)
#返回
ser.write("input swipe 50 190 80 190 1000\r\n".encode('utf-8'))
time.sleep(5)
# #跳过
# ser.write("input swipe 700 1400 800 1400 1000\r\n".encode('utf-8'))
# time.sleep(5)
# #退出安装
# ser.write("input swipe 650 1100 750 1100 1000\r\n".encode('utf-8'))
# time.sleep(5)
def activeupgrade_through_settings(self):
self.set_serialport()
ser = serial.Serial(self.get_serialport(), self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
time.sleep(5)
#设置-->软件升级 立即安装
ser.write("input swipe 400 1570 600 1570 1000\r\n".encode('utf-8'))
time.sleep(20)
# 立即安装1
ser.write("input swipe 260 1200 360 1200 1000\r\n".encode('utf-8'))
time.sleep(5)
#放弃升级
# ser.write("input swipe 260 1400 360 1400 1000\r\n".encode('utf-8'))
# time.sleep(5)
# #跳过
# ser.write("input swipe 700 1400 800 1400 1000\r\n".encode('utf-8'))
# time.sleep(5)
#确认安装
# ser.write("input swipe 250 1100 250 1100 1000\r\n".encode('utf-8'))
# time.sleep(5)
#退出安装
# ser.write("input swipe 650 1100 750 1100 1000\r\n".encode('utf-8'))
# time.sleep(5)
def active_upgrade(self):
self.set_serialport()
ser = serial.Serial(self.get_serialport(), self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
time.sleep(5)
#立即安装
ser.write("input swipe 400 1750 600 1750 1000\r\n".encode('utf-8'))
# ser.write("input swipe 400 1690 600 1690 1000\r\n".encode('utf-8'))
time.sleep(20)
# 立即安装1
ser.write("input swipe 260 1300 360 1300 1000\r\n".encode('utf-8'))
time.sleep(5)
#取消
#ser.write("input swipe 900 120 950 120 1000\r\n".encode('uft-8'))
#设置-->软件升级 立即安装
# ser.write("input swipe 400 1570 600 1570 1000\r\n".encode('utf-8'))
# time.sleep(20)
#放弃升级
# ser.write("input swipe 260 1300 360 1300 1000\r\n".encode('utf-8'))
# time.sleep(5)
#跳过
# ser.write("input swipe 700 1400 800 1400 1000\r\n".encode('utf-8'))
# time.sleep(5)
#确认安装
# ser.write("input swipe 250 1100 250 1100 1000\r\n".encode('utf-8'))
# time.sleep(5)
#退出安装
# ser.write("input swipe 650 1100 750 1100 1000\r\n".encode('utf-8'))
# time.sleep(5)
def cancel_activeupgrade(self):
self.set_serialport()
ser = serial.Serial(self.get_serialport(), self.get_serial_baudrate())
ser.write("su\r\n".encode('utf-8'))
time.sleep(5)
#取消
ser.write("input swipe 900 120 950 120 1000\r\n".encode('utf-8'))
time.sleep(5)
'''
this class for install package
'''
class Install_Package(object):
def __init__(self):
pass
def curDate(self):
'''
function:get current date
:return:
'''
curDate = time.strftime('%Y%m%d', time.localtime())
return curDate
def oldDate(self):
'''
function:get old date from date_for_fota
:return:
'''
logger.log_info("%s" %(self.update_fota_package()[1]),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return self.update_fota_package()[1]
def down_fastboot_package_to_local(self,url_link,date):
'''
function:download 'mcuupgrade.bin' and 'update.zip' to local
:param url_link:
:param date:
:return:
'''
if not os.path.exists("Common/update"):
os.makedirs("Common/update")
urls =r'%s%s' %(url_link[0],date)
logger.log_info("%s" %(urls),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
site = requests.get(urls)
text = site.text
site.close()
soup = BeautifulSoup((text), "html5lib")
alist = soup.find_all('a')
try:
ver = alist[6]
except:
logger.log_critical('has no update for daily test',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
sys.exit(-1)
HDver=ver.text
print ("%s" %(HDver))
urlfastboot = '%s/%s' % (urls, HDver)
print ("%s" %(urlfastboot))
logger.log_info('starting download fastboot(fastboot.zip) to local , pls wait ...',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
site2 = requests.get(urlfastboot, stream=True)
with open("Common/update/fastboot.zip",'wb') as ofp:
for chunk in site2.iter_content(chunk_size=1024):
ofp.write(chunk)
logger.log_info('fastboot.zip download finished !',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def unzip_fastboot_package(self):
'''
function:unzip fastboot.zip
:return:
'''
fast_boot = "Common/update/fastboot.zip"
try:
if os.path.exists("%s" %(fast_boot)):
zf = zipfile.ZipFile("%s" %(fast_boot), 'r')
logger.log_info("start unzip fastboot.zip package",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
zf.extractall("Common/update/")
else:
logger.log_error("has no fastboot.zip package",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
sys.exit(-1)
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
sys.exit(-1)
'''
function for ase-1 test
'''
def down_package_to_local(self,url_link,date):
'''
function:download 'mcuupgrade.bin' and 'update.zip' to local
:param url_link:
:param date:
:return:
'''
if not os.path.exists("Common/update"):
os.makedirs("Common/update")
urls =r'%s%s' %(url_link[0],date)
logger.log_info("%s" %(urls),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
site = requests.get(urls)
text = site.text
site.close()
soup = BeautifulSoup((text), "html5lib")
alist = soup.find_all('a')
try:
ver = alist[5]
except:
logger.logg_critical('has no update for daily test',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
sys.exit(-1)
HDver=ver.text
urlMCU = '%s/%s%s'%(urls,HDver,'mcuupgrade.bin')
urlos = '%s/%s%s'%(urls,HDver,'update.zip')
logger.log_info('starting download MCU(mcuupgrade.bin) to local , pls wait ...',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
logger.log_info("link is :%s" %(urlMCU),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
site2 = requests.get(urlMCU,stream = True)
with open("Common/update/mcuupgrade.bin",'wb') as ofp:
for chunk in site2.iter_content(chunk_size=512):
ofp.write(chunk)
logger.log_info('MCU download finished !',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
logger.log_info('starting download OS(update.zip) to local, pls wait ...',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
logger.log_info("link is :%s" %(urlos),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
site3 = requests.get(urlos,stream = True)
with open("Common/update/update.zip",'wb') as ofp:
for chunk in site3.iter_content(chunk_size=512):
ofp.write(chunk)
logger.log_info('OS download finished !',\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
'''
function for ase-1 test
'''
def copy_local_to_udisk(self,sn):
'''
function :copy update.zip and bin file to udisk
:param sn:
:return:
'''
#subprocess.call('adb -s %s reboot' % sn, shell=True)
os.system('adb -s %s wait-for-device' % sn)
os.system('adb -s %s shell am force-stop com.wm.activate' % sn)
subprocess.call('adb -s %s root' % sn, shell=True)
while True:
time.sleep(5)
if sn not in ADB_SN().get_sn_from_adb_command():
logger.log_warn('%s: wait adb connect' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
else:
break
# init_u2 = Prepare_Ui()
# init_u2.check_ui(sn)
logger.log_info('%s: copy new image into DUT(udisk)' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
udisk = subprocess.Popen('adb -s %s shell "df | grep media_rw"' % sn, stdout=subprocess.PIPE,
shell=True).stdout.read()
if str.encode('media_rw') not in udisk:
logger.log_error('%s: no udisk found, pls insert udisk' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
sys.exit(-1)
try:
os.system('adb -s %s root' % sn)
cmd = "$(df |grep media_rw|/yf/bin/busybox awk '{print $1}')"
os.system('adb -s %s shell "mount -o remount,rw %s"' %(sn,cmd))
time.sleep(5)
os.system('adb -s %s push Common/update/mcuupgrade.bin /mnt/udisk/update/devices/mcuupgrade.bin' % sn)
time.sleep(5)
os.system('adb -s %s push Common/update/update.zip /mnt/udisk/update/os/update.zip' % sn)
logger.log_info("copy mcuupgrade.bin and update.zip to udisk successfully",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
except Exception as e:
logger.log_error("Error message is :%s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def update_system_through_fastboot(self, flag):
'''
function: update system
:return:
'''
# print("antony@@@debug")
serial = Serial()
serial.enter_fastboot_mode()
# path = os.path.dirname(os.path.dirname(__file__))+"\\Main\\Common\\update\\fastboot"
current_path = os.getcwd()
path = current_path + "\\Common\\update\\fastboot"
logger.log_info("start install system through fastboot mode,pls wait...",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
os.chdir("%s" %(path))
os.system("%s\\fastboot.bat < nul" % (path))
time.sleep(5)
os.chdir("%s" % (current_path))
logger.log_info("fastboot install over,wait system launch...",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
if serial.wait_ui_through_serial() == True:
adb_sn = ADB_SN()
count = 0
while True:
if adb_sn.check_adb_status() == False:
serial.open_adb_through_serial(flag)
# self.open_adb_through_serial(self.update_fota_package()[4],flag)
count = count + 1
if adb_sn.check_adb_status() == True:
break
elif adb_sn.check_adb_status() == False and count == flag:
while True:
serial.open_adb_through_serial(flag -1)
if adb_sn.check_adb_status() == False:
count = count - 1
else:
break
if count == 0 and adb_sn.check_adb_status() == False:
logger.log_error("can not open adb port",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
break
else:
return 0
return 0
else:
logger.log_error("boot can not completed",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def flash_through_system(self,sn):
'''
function:
:param sn:
:return:
'''
logger.log_info('%s: MCU and OS start upgrade !!!' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
os.system('adb -s %s shell am force-stop com.wm.activate' % sn)
subprocess.call('adb -s %s root' % sn, shell=True)
init_u2 = Prepare_Ui()
init_u2.check_ui(sn)
d = u2.connect(sn)
delete_file(sn)
d.press("home")
d.app_start("com.android.settings",
"com.android.settings.WMSystemUpdateActivity")
time.sleep(5)
if d(resourceId="android:id/button1"):
print (d(resourceId="android:id/button1").get_text())
d(resourceId="android:id/button1").click()
d(resourceId="com.android.settings:id/btn_start_update").click()
while True:
time.sleep(20)
if sn not in ADB_SN().get_sn_from_adb_command():
break
logger.log_info('%s: OS are flashing, pls wait ...' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
# init_u2 = Prepare_Ui()
init_u2.wait_ui(sn)
time.sleep(20)
logger.log_info('%s: clean up system ...' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
os.system('adb -s %s shell am force-stop com.wm.activate' % sn)
init_u2.check_ui(sn)
d = u2.connect(sn)
if d(resourceId="android:id/button1"):
print (d(resourceId="android:id/button1").get_text())
d(resourceId="android:id/button1").click()
os.system('adb -s %s shell am start -a android.settings.DATE_SETTINGS' % sn)
status = d(resourceId="android:id/switchWidget")
if u"关闭" in status.get_text():
status.click()
status.click_gone(maxretry=9, interval=1.0)
d.press("home")
os.system('adb -s %s root' % sn)
cmd = "$(df |grep media_rw|%s awk '{print $1}')" %(busybox)
os.system('adb -s %s shell "mount -o remount,rw %s"' % (sn, cmd))
os.system('adb -s %s shell rm -rf /mnt/udisk/logFile/*' % sn)
os.system('adb -s %s shell rm -rf /mnt/udisk/BaiduMapLog/*' % sn)
logger.log_info('%s: MCU and OS upgrade completed !' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def flash_through_recovery(self,sn):
'''
function:flash system through recovery normally
:param sn:
:return:
'''
logger.log_info("flash system through recovery mode",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
self.usb_root_and_install(sn)
def update_fota_package(self):
'''
function:get data_for_fota file content
:return:
'''
filelist = []
file_name = os.path.join(os.path.dirname(__file__),'Configuration')
# print("antony @@@debug %s" %(file_name))
# logger.log_info("file path is : %s" %(file_name),\
# sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
if os.path.exists(file_name):
with open(file_name,'r+') as f:
for msg in f:
filelist.append(msg.strip())
return filelist
def get_software_version(self,sn):
'''
function:get software version
:param sn:
:return:version
'''
cut_cmd = 'cut -d"[" -f2 | cut -d"]" -f1'
try:
version = subprocess.Popen('adb -s %s shell "getprop | grep -E ASE* | %s awk \'{print $NF}\'| %s| uniq"' %(sn,busybox,cut_cmd),\
stdout=subprocess.PIPE, shell=True).stdout.read()
version = str(version, encoding="utf-8").replace('\r\r\n','')
return version.strip()
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def get_tbox_verison(self,sn):
'''
function:get tbox version
:param sn:
:return:
'''
str_msg = 'adb -s %s shell "cat %s |head -n1"' %(sn, tbox_version)
try:
return removal(subprocess.check_output(str_msg)).strip()
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def usb_root_and_install(self,sn):
'''
function:check udisk status and reboot recovery to install old version
:param sn:
:return: re_code
'''
udisk = subprocess.Popen('adb -s %s shell "df | grep media_rw"' % sn, stdout=subprocess.PIPE, \
shell=True).stdout.read()
if str.encode('media_rw') not in udisk:
logger.log_error('%s: no udisk found, pls insert udisk' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
sys.exit(-1)
else:
try:
subprocess.check_output('adb -s %s shell reboot recovery' % sn)
except subprocess.CalledProcessError as e:
logger.log_error("%s: execute reboot recovery command fail" % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return e.returncode
def check_download_progress(self, sn,flag):
cmd = 'adb -s %s shell "cat %s |grep DownLoadProcess |tail -n1 |%s sed \'s/.*DownLoadProcess.\{2\}\([0-9]\{1,3\}\).*/\\1/\'"' %(sn,log_path,busybox)
while True:
try:
if int(removal(subprocess.check_output(cmd)).strip()) >= 0 and int(removal(subprocess.check_output(cmd)).strip()) < 100:
logger.log_debug("current download progress is :%s " %(removal(subprocess.check_output(cmd)).strip()),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
time.sleep(5)
elif int(removal(subprocess.check_output(cmd)).strip()) >= 0 and int(removal(subprocess.check_output(cmd)).strip()) == 100:
logger.log_debug("download %s package success" %(get_packagename_from_json_file(sn,flag)),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 0
else :
logger.log_error("can not find download process through serach log file",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
'''
this class for platform information
'''
class Platform_Information(object):
def __init__(self):
self.vin_version = 'None'
self.sw_version = 'None'
self.sw_version_old = 'None'
self.sw_version_new = 'None'
self.device = 'None'
self.mcu_version = 'None'
def set_vin_version(self, vin_version):
self.vin_version = vin_version
def get_vin_version(self):
return self.vin_version
def set_sw_version(self, sw_version):
self.sw_version = sw_version
def get_sw_version(self):
return self.sw_version
def set_sw_version_old(self, sw_version_old):
self.sw_version_old = sw_version_old
def get_sw_version_old(self):
return self.sw_version_old
def set_sw_version_new(self, sw_version_new):
self.sw_version_new = sw_version_new
def get_sw_version_new(self):
return self.sw_version_new
def set_device(self,device):
self.device = device
def get_device(self):
return self.device
def set_mcu_version(self,mcu_version):
self.mcu_version = mcu_version
def get_mcu_version(self):
return self.mcu_version
def get_vin_verbose(self,sn):
'''
function:get vin number
:param sn:
:return: str
'''
cmd = 'getprop |grep -r ".*vehicle\.vin"|%s awk -F \':\' \'{print $NF}\'| %s sed \'s/\[\([0-9A-Z]*\)\]/\\1/\'' %(busybox,busybox)
try:
version = subprocess.Popen(
'adb -s %s shell "%s"' % (sn, cmd), \
stdout=subprocess.PIPE, shell=True).stdout.read()
version = str(version, encoding="utf-8").replace('\r\r\n', '')
return version.strip()
except Exception as e:
logger.log_error("can not find vin number,more details is %s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def get_software_version(self, sn):
'''
function:get software version
:param sn:
:return:version
'''
cut_cmd = 'cut -d"[" -f2 | cut -d"]" -f1'
version = subprocess.Popen(
'adb -s %s shell "getprop | grep -E ASE* | %s awk \'{print $NF}\'| %s"' % (sn, busybox, cut_cmd), \
stdout=subprocess.PIPE, shell=True).stdout.read()
version = str(version, encoding="utf-8").replace('\r\r\n', '')
return version.strip()
def temporary_get_port_data(self, vin_version, device, sw_version_old, mcu_version):
time_value = int(time.time())
data_json = \
{
"token": None,
"timestamp": time_value,
"vin": "%s" % (vin_version),
"device_list":
[{
"device_type": "%s" % (device),
"device_id": "SN00001",
"part_number": "PN00001",
"software_list":
[{
"software_type": "mpu",
"sw_version": "%s" % (sw_version_old),
"backup_sw_version": "%s" % (sw_version_old)
}]
},
{
"device_type": "%s" %(device),
"device_id": "SN00001",
"part_number": "PN00001",
"software_list": [{
"software_type": "mcu",
"sw_version": "%s" %(mcu_version)
}]
}
]
}
return data_json
def get_post_data(self, vin_version, device, sw_version_old, sw_version_new,mcu_version):
'''
function:get data_json
:param vin_version:
:param device:
:param sw_version_old:
:param sw_version_new:
:return:
'''
time_value = int(time.time())
data_json = \
{
"token": None,
"timestamp": time_value,
"vin": "%s" % (vin_version),
"device_list":
[{
"device_type": "%s" % (device),
"device_id": "SN00001",
"part_number": "PN00001",
"software_list":
[{
"software_type": "mpu",
"sw_version": "%s" % (sw_version_old),
"backup_sw_version": "%s" % (sw_version_old)
}]
},
{
"device_type": "%s" % (device),
"device_id": "SN00001",
"part_number": "PN00001",
"software_list": [{
"software_type": "mcu",
"sw_version": "%s" % (mcu_version)
}]
}
]
}
return data_json
def get_header(self, vin_version):
'''
function:get header
:param vin_version:
:return:
'''
header = {"Content-Type": "application/json", "vin": "%s" % (vin_version)}
return header
'''
prepare device environment
'''
class Prepare_Ui(object):
def __init__(self):
pass
def wait_ui(self,sn):
'''
functin:wait hu devices
:param sn:
:return:
'''
os.system('adb -s %s wait-for-device' % sn)
while True:
retryRet = subprocess.Popen('adb -s %s shell "getprop | grep boot_completed"' % sn, stdout=subprocess.PIPE,
shell=True).stdout.read()
if str.encode('sys.boot_completed') not in retryRet:
logger.log_warn('%s: waitting for device boot up' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
else:
break
time.sleep(5)
logger.log_info('%s: boot into system success' % sn,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
osVersion = subprocess.Popen('adb -s %s shell "getprop | grep ro.headunit.version"' % (sn),
stdout=subprocess.PIPE, shell=True).stdout.read().decode('utf-8')
p = re.compile(r": \[(ASE.*)\]")
logger.log_info("%s: OS current version: %s" % (sn, re.findall(p, osVersion)[0]),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def check_ui(self,sn):
'''
function:check uiautomator
:param sn:
:return:
'''
r = subprocess.Popen('''adb -s %s shell "ps | grep -E 'atx-agent|com.github.uiautomator'"''' % sn,
stdout=subprocess.PIPE, shell=True).stdout.read()
logger.log_debug("%s" %(r),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
if b"uiautomator" not in r:
self.apk_step(sn)
def apk_step(self,sn):
rtn = subprocess.Popen(
"adb -s %s shell pm list packages" % sn, stdout=subprocess.PIPE, shell=True).stdout.readlines()
packageList = list(map(lambda x: re.split(r"[:|\\]", str(x))[1].strip(), rtn))
if "com.github.uiautomator" not in packageList or "com.github.uiautomator.test" not in packageList:
# os.system('adb -s %s install -r %s' %
# (sn, os.path.join(os.getcwd(), "Source", "app-uiautomator.apk")))
# os.system(
# 'adb -s %s install -r %s' % (sn, os.path.join("Source", "app-uiautomator-test.apk")))
logger.log_debug("init install uiautomator2",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
os.system('python -m uiautomator2 init')
else:
logger.log_warn("start uiautomator services",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
os.system('adb -s %s shell am start -n com.github.uiautomator/.IdentifyActivity' % sn)
'''
check install package message from fotaClient.log
'''
class Check_message(object):
def __init__(self):
self.delay_time = 240
self.flag_list = ["Full","Diff"]
# data_json = '/update/data.json'
# fota_Client = '/update/fotaClient.log'
# lib_hufota = '/update/libHUfota/libHUfota.log'
self.message_full = ["Download.*HU.*New.*Full.*Package", "HU.*Package.*Download.*Finished",\
"Start.*Silence.*Install.*HU.*package", ".*1136.*FOTAFlash.*HU_start_upgrade.*Return:0.*"]
self.message_diff = ["Download.*HU.*New.*Delta.*Package", "HU.*Package.*Download.*Finished",\
"Start.*Silence.*Install.*HU.*package", ".*1136.*FOTAFlash.*HU_start_upgrade.*Return:0.*"]
def set_time(self, delay_time):
self.delay_time = delay_time
def get_time(self):
return self.delay_time
def check_data_file(self, sn):
'''
function: check data.json file
:param sn:
:return:
'''
delay_time = self.get_time()
logger.log_debug("verify data.json if exist", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
while True:
cmd = 'adb -s %s shell "if [ -f %s ]; then %s wc -c %s | %s awk \'{print $1}\';fi"' % (sn,json_name,busybox,json_name,busybox)
if removal(subprocess.check_output(cmd)) >= data_size:
logger.log_info("data.json has existed", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return True
else:
delay_time = delay_time - 1
if delay_time >= 0:
logger.log_debug("wait ...", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
time.sleep(2)
else:
logger.log_error("data.json can not find...,request sever fail", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return False
def check_fota_Client(self, sn, flag = None):
'''
function: check fotaClient.log
:param sn:
:param flag:
:return:
'''
logger.log_debug("start to check fota_Client.log progress", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
delay_time = self.get_time()
if flag == self.flag_list[0]:
msg_list = self.message_full
elif flag == self.flag_list[1]:
msg_list = self.message_diff
for msg in msg_list:
logger.log_info("check if start %s" % (msg.replace(".*", " ")), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
os.system('adb -s %s root' % sn)
os.system('adb -s %s shell "mount -o remount,rw /"' % sn)
os.system('adb -s %s shell "chmod -R 777 /update/"' % sn)
cmd = 'adb -s %s shell "if [ -f /update/fotaClient.log ];then cat /update/fotaClient.log |grep -E "%s";fi' % (
sn, msg)
count = 0
while count < delay_time:
if check_log_message(sn, cmd, msg):
logger.log_info("has start %s\n" % (msg.replace(".*", " ")),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
break
else:
logger.log_warn("can not get %s field,wait 5s time" %(msg.replace(".*", " ")),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
time.sleep(5)
count = count + 1
if count >= delay_time:
logger.log_error("can not get %s field,wait 5s time" %(msg.replace(".*", " ")),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
return True
def check_system_directory(self, sn):
'''
function:check system directory
:param sn:
:return:
'''
count = self.get_time()
cmd = 'adb -s %s shell "if [ -d /update/system/ ];then ls -al /update/system | %s wc -l ;fi"' % (sn,busybox)
while True:
try:
result = removal(subprocess.check_output(cmd, shell=True))
logger.log_info("%s %s" % (result, result1), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
if result.strip() != '' and result.strip() == '0':
time.sleep(20)
return True
else:
count = count - 1
time.sleep(5)
if count <= 0:
return False
continue
except subprocess.CalledProcessError as e:
logger.log_info("%s" % (e.message), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return False
def check_libHUfota_exist(self,sn):
count = self.get_time()
cmd = 'adb -s %s shell "if [ -f %s ];then echo 0;else echo 1;fi"' % (sn, libHufota)
while True:
try:
result = removal(subprocess.check_output(cmd, shell=True))
logger.log_info("%s" % (result), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
if int(result.strip()) == 0:
time.sleep(20)
return True
else:
count = count - 1
time.sleep(5)
if count <= 0:
logger.log_error("loop over and install package failed", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return False
except subprocess.CalledProcessError as e:
logger.log_error("%s" % (e.message), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return False
def check_libHUfota(self, sn):
'''
function:check libHUfota.log file
:param sn:
:return:
'''
count = self.get_time()
# cmd = 'adb -s %s shell "if [ -f %s ];then grep -E "LibHUfota.*install.*success" \
# %s | %s wc -l ;fi"' % (sn, libHufota, libHufota, busybox)
cmd = 'adb -s %s shell "if [ -f %s ];then grep -E "LIBHUFOTA_MSG.*INSTALL_COMPLETE" \
%s | %s wc -l ;fi"' % (sn, libHufota, libHufota, busybox)
while True:
try:
result = removal(subprocess.check_output(cmd, shell=True))
logger.log_info("%s" % (result), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
if result.strip() != '' and int(result.strip()) >= 1:
time.sleep(20)
return True
else:
count = count - 1
time.sleep(5)
if count <= 0:
logger.log_error("loop over and install package failed", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
except subprocess.CalledProcessError as e:
logger.log_error("%s" % (e.message), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return False
class activeupgrade(object):
def __init__(self):
pass
def delete_lvlog(self,sn):
try:
os.system('adb -s %s shell "mount -o rw,remount /;rm -rf %s;rm -rf %s"' %(sn, lvlog, settinglog))
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def check_activeupgrade_starui(self, sn):
'''
function:get start ui log
:param sn:
:return:
'''
cmd = 'adb -s %s shell "cat %s |grep createWindow|wc -l"' %(sn, lvlog)
try:
return int(removal(subprocess.check_output(cmd, shell=True)).strip())
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def check_activeupgrade_starui_from_settings(self, sn):
'''
function:get start ui log
:param sn:
:return:
'''
cmd = 'adb -s %s shell "cat %s |grep LVSettingHomePageActivity:onStop|wc -l"' %(sn, settinglog)
try:
print(int(removal(subprocess.check_output(cmd, shell=True)).strip()))
return int(removal(subprocess.check_output(cmd, shell=True)).strip())
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def check_activeupgrade_cancleui(self, sn, delay_time = 20):
'''
function:get cancle ui log
:param sn:
:return:
'''
cmd = 'adb -s %s shell "cat %s |grep -E "WindowUtils.*destroy"|wc -l"' % (sn, lvlog)
try:
while delay_time > 0:
if int(removal(subprocess.check_output(cmd, shell=True)).strip()) >= 1:
logger.log_info("cancle ui successfully",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 0
else :
delay_time = delay_time - 1
time.sleep(0.5)
logger.log_error("timeout,and can not find upgrade result",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
except Exception as e:
logger.log_error("%s" % e,\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def check_install_progress(self, sn, pcan, delay_time= 360):
'''
function: check install progress
:param sn:
:param delay_time:
:return:
'''
str_msg = "cat %s |grep -E 'upgrade progress info' | tail -n1|%s awk -F \'process\' \'{print $2}\' " % (
lvlog, busybox)
str_msg2 = "cat %s |grep -E 'upgrade progress info' | tail -n1|%s awk -F \'process\' \'{print $2}\' |%s awk \'{print $1}\' " % (lvlog, busybox, busybox)
# str_msg = "cat /sdcard/lvlog/com.living.ota/normal/123.log |grep -E 'upgrade progress info' | tail -n1|%s awk -F \'process\' \'{print $2}\' " % (busybox)
cmd = 'adb -s %s shell "%s"' % (sn, str_msg)
# cmd1 = 'adb -s %s shell "if [ -f %s ];then grep -E "LIBHUFOTA_MSG.*INSTALL_COMPLETE" \
# %s | %s wc -l ;fi"' % (sn, libHufota, libHufota, busybox)
cmd2 = 'adb -s %s shell "%s"' % (sn, str_msg2)
# cmd ='adb -s %s shell "cat %s |grep -E "upgrade progress info" | tail -n1|%s awk -F \'process\' \'{print $2}\'"' %(sn, lvlog, busybox)
try:
while delay_time > 0:
pcan.enter_ota_lopper()
print(removal(subprocess.check_output(cmd, shell=True)).strip())
# print(removal(subprocess.check_output(cmd1, shell=True)).strip())
print(removal(subprocess.check_output(cmd2, shell=True)).strip())
if '100' in removal(subprocess.check_output(cmd, shell=True)).strip() and 'errorCode 0' in removal(
subprocess.check_output(cmd, shell=True)).strip():
# if '100' in removal(subprocess.check_output(cmd, shell=True)).strip():
logger.log_info("install package successfully for active upgrade", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 0
elif '100' not in removal(
subprocess.check_output(cmd, shell=True)).strip() and 'errorCode 0' in removal(
subprocess.check_output(cmd, shell=True)).strip():
print("delay 0.5 second")
time.sleep(0.5)
# if int(removal(subprocess.check_output(cmd2, shell=True)).strip()) > 1 and 'errorCode 0' not in removal(
# subprocess.check_output(cmd, shell=True)).strip():
# logger.log_error("install failed and errorCode is not zero", \
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
# sys._getframe().f_lineno)
# return 1
delay_time = delay_time - 1
logger.log_error("timeout,and can not find upgrade result", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 1
except Exception as e:
logger.log_error("%s" % e, \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 1
def check_activeupgrade_resultui(self, sn):
'''
function: check result log
:param sn:
:param delay_time:
:return:
'''
# os.system("adb -s %s shell su" %(sn))
# os.system("adb -s %s shell chmod 777 -R %s" %(sn, lvlog_path))
cmd = 'adb -s %s shell "cat %s |grep -E "FotaUpgradeResult.*upgradeResult=true.*rebootResult=false.*"|%s wc -l "' %(sn,lvlog,busybox)
try:
print(int(removal(subprocess.check_output(cmd, shell=True)).strip()))
if int(removal(subprocess.check_output(cmd, shell=True)).strip()) >= 1:
print("antony@@@debug")
logger.log_info("install package successfully for active upgrade",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 0
# else :
# delay_time = delay_time - 1
# time.sleep(0.5)
# logger.log_error("timeout,and can not find upgrade result",\
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
# return 1
except Exception as e:
logger.log_error("%s" % e,\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def removal(stg):
'''
funciton :removal msg
:param stg:
:return:
'''
new_stg = str(stg, encoding="utf-8").replace('\r\r\n','')
return new_stg
def check_log_message(sn,cmd,msg):
'''
functioncheck log status
:param:sn
:return:
'''
message = subprocess.Popen(cmd,shell = True,stdout = subprocess.PIPE).stdout.read()
new_message = removal(message)
if new_message.strip() != '':
logger.log_debug("%s successfully" %(msg.replace(".*"," ")),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return True
else :
logger.log_warn("%s cannot finish,need to wait ...." %(msg.replace(".*"," ")),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
def execute_cmd(cmd,delay_time=2):
'''
function:execute adb shell command
:param sn:
:return:
'''
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell= True,stdin = None)
count = 0
while count < delay_time:
if p.poll() is None:
time.sleep(5)
else :
return p.stdout.read()
count = count + 1
time.sleep(5)
if p.poll() is None:
p.kill()
logger.log_debug("force to kill progress",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return None
else:
logger.log_debug("%s" %(p.stdout.read()),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return p.stdout.read()
def execute_cmd_getcode(cmd,delay_time = 2 ):
'''
get cmd return status:
{0: successfully
1: fail}
:param cmd:
:param delay_time:
:return: ret_code
'''
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, stdin=None)
count = 0
while count < delay_time:
if p.poll() is None:
time.sleep(5)
else:
return 0
count = count + 1
time.sleep(5)
if p.poll() is None:
p.kill()
logger.debug("force to kill progress", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
def reboot_device(sn):
'''
function:reboot devices
:param sn:
:return:
'''
os.system('adb -s %s root' % sn)
os.system('adb -s %s shell "reboot"' % sn)
logger.log_debug("reboot system...",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def delete_file(sn):
'''
function:delete file from /update
:param sn:
:return:
'''
updatedirectory_rw(sn)
os.system('adb -s %s shell "mount -o rw,remount /;rm -rf /update/*;sleep 2;rm -rf /update/*"' %sn)
logger.log_debug("delete /update/ directory",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def updatedirectory_rw(sn):
'''
function chmod update directory rw
:param sn:
:return:
'''
os.system('adb -s %s root' % sn)
os.system('adb -s %s shell "mount -o remount,rw /"' % sn)
os.system('adb -s %s shell "chmod -R 777 /update/*"' % sn)
def wait_hu_recovery(sn, wait_time = 20):
'''
function wait hu recovery
:param sn:
:return:
'''
# os.system('adb -s %s wait-for-device' % sn)
#
# while wait_time >= 0:
# retryRet = subprocess.Popen('adb -s %s shell "getprop | grep boot_completed"' % sn, stdout=subprocess.PIPE,
# shell=True).stdout.read()
# if 'sys.boot_completed' not in str(retryRet):
# logger.log_warn('%s: waitting for device boot up' % sn,\
# sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
# time.sleep(6)
# else:
# return 0
# wait_time = wait_time - 1
# return 1
loop_count = 15
p = subprocess.Popen('adb -s %s wait-for-device' % (sn), stderr=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, shell=False)
while loop_count > 0:
time.sleep(random.randint(20, 30))
print(p.poll())
if p.poll() is not None:
logger.log_info("adb devices init successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
break
else:
serial = Serial()
serial.open_adb_through_serial(5)
loop_count = loop_count - 1
while wait_time >= 0:
retryRet = subprocess.Popen('adb -s %s shell "getprop | grep boot_completed"' % sn, stdout=subprocess.PIPE,
shell=True).stdout.read()
if 'sys.boot_completed' not in str(retryRet):
logger.log_warn('%s: waitting for device boot up' % sn, \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
time.sleep(6)
else:
return 0
wait_time = wait_time - 1
return 1
def check_json_file(sn,delay_time = 60):
'''
function :check json file
:param sn:
:param delay_time: =60
:return:
'''
while True:
cmd ='adb -s %s shell "if [ -f %s ]; then %s wc -c %s | %s awk \'{print $1}\';fi"' % (sn,json_name,busybox,json_name,busybox)
if removal(subprocess.check_output(cmd)) >= data_size:
logger.log_debug("server respond OK and client receive data.json successfully",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 0
else :
delay_time = delay_time - 1
if delay_time >= 0:
logger.log_debug("wait 2s for data.json file ...",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
time.sleep(2)
else:
logger.log_error("Client reveive fail",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def get_packagesize_from_json_file(sn, flag, delay_time = 240):
'''
function:get package size
:param sn:
:param delay_time:
:return:
'''
if flag == "Full":
str_msg = "%s sed 's/.*file_size.\{2\}\([0-9]*\).*/\\1/' %s" % (busybox, json_name)
elif flag == "Diff":
str_msg = "%s sed 's/.*file_size.\{2\}\([0-9]*\).*file_size.\{2\}\([0-9]*\).*/\\1/' %s" % (busybox, json_name)
cmd = 'adb -s %s shell "if [ -f %s ]; then %s;fi"' % (sn, json_name, str_msg)
while True:
try:
if removal(subprocess.check_output(cmd)):
logger.log_debug("will download package size is %s" %(removal(subprocess.check_output(cmd))), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return removal(subprocess.check_output(cmd))
delay_time = delay_time - 1
if delay_time >= 0:
logger.log_debug("wait ...", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
time.sleep(2)
else:
logger.log_error("Client receive fail,can not find data.json file", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return None
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return None
def get_packagename_from_json_file(sn, flag, delay_time = 240):
if flag == "Full":
str_msg = "%s sed 's/.*file_name.\{3\}\(.*\).\{3\}file_size.*/\\1/' %s" % (busybox, json_name)
elif flag == "Diff":
str_msg = "%s sed 's/.*file_name.\{3\}\(.*zip.*ed\).*file_size.*/\\1/' %s" % (busybox, json_name)
cmd = 'adb -s %s shell "if [ -f %s ]; then %s;fi"' % (sn, json_name, str_msg)
while True:
try:
if removal(subprocess.check_output(cmd)):
logger.log_debug("will download package name is %s" % (removal(subprocess.check_output(cmd))),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return removal(subprocess.check_output(cmd))
delay_time = delay_time - 1
if delay_time >= 0:
logger.log_debug("wait ...", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
time.sleep(2)
else:
logger.log_error("Client receive fail,can not find data.json file", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return None
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return None
# def get_diffpackagename_from_json_file(sn,delay_time = 240):
# while True:
# str_msg="/yf/bin/busybox sed 's/.*file_name.\{3\}\(.*\).\{3\}file_size.*/\\1/' / update / data.json"
# # str_msg = "/yf/bin/busybox sed 's/.*file_name.\{3\}\(.*zip.*ed\).*file_size.*/\\1/' /update/data.json"
# cmd = 'adb -s %s shell "if [ -f /update/data.json ]; then %s;fi"' % (sn, str_msg)
# # print (removal(subprocess.check_output(cmd)))
# if removal(subprocess.check_output(cmd)) is not None:
# logger.log_debug("will download package name is %s" % (removal(subprocess.check_output(cmd))), \
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
# sys._getframe().f_lineno)
# return removal(subprocess.check_output(cmd))
# else:
# delay_time = delay_time - 1
# if delay_time >= 0:
# logger.log_debug("wait ...", \
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
# sys._getframe().f_lineno)
# time.sleep(2)
# else:
# logger.log_error("Client receive fail,can not find data.json file", \
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
# sys._getframe().f_lineno)
# return None
def check_package_exist(sn, flag, delay_time = 240):
'''
function :Check if the package exists
:param sn:
:return:
'''
package_name = get_packagename_from_json_file(sn, flag)
while True:
cmd = 'adb -s %s shell "ls -al %s/%s |%s wc -l 2>/dev/null"' %(sn, package_path, package_name, busybox)
try:
print (removal(subprocess.check_output(cmd).strip()))
if '1' in removal(subprocess.check_output(cmd).strip()):
logger.log_debug("%s exists and downloading ..." %(package_name),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 0
delay_time = delay_time - 1
if delay_time >= 0:
logger.log_debug("wait a minute...", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
time.sleep(5)
else:
logger.log_error("can not find %s," % (package_name), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 1
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def check_package_size(sn, flag, delay_time = 240):
'''
function:check package size
:param sn:
:param flag:
:param delay_time:
:return:
'''
package_name = get_packagename_from_json_file(sn,flag)
if check_package_exist(sn,flag,delay_time) == 0:
while True:
str_msg = "ls -al %s/%s |%s awk '{print $5}'" %(package_path,package_name ,busybox)
# str_msg = "ls -al /update/ota-full.zip|/oem/bin/busybox awk '{print $4}'"
cmd = 'adb -s %s shell "%s"' % (sn, str_msg)
try :
if int (removal(subprocess.check_output(cmd)).strip()) >=0:
logger.log_debug("has downloaded package size: %s" % (removal(subprocess.check_output(cmd))), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return removal(subprocess.check_output(cmd)).strip()
delay_time = delay_time - 1
if delay_time >= 0:
logger.log_debug("wait ...", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
time.sleep(2)
else:
logger.log_debug("can not find package size ,may be has download well", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return None
except Exception as e:
# print (delay_time)
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def enable_network(sn):
'''
function:enable network
:param sn:
:return:
'''
cmd = 'adb -s %s shell "ifconfig %s up && sleep 2 && ifconfig |grep %s >> /dev/null && echo $?"' %(sn,network_device,network_device)
try:
if removal(subprocess.check_output(cmd)).strip() == '0':
logger.log_debug("execute enable command successfully",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return True
else:
logger.log_error("execute enable command fail",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
def disable_network(sn):
'''
function:disable network
:param sn:
:return:
'''
os.system("adb -s %s root" %(sn))
cmd = 'adb -s %s shell "ifconfig %s down && sleep 2 && ifconfig |grep "%s" 2>&1 || echo $?"' %(sn,network_device,network_device)
# print (removal(subprocess.check_output(cmd)).strip())
try:
if removal(subprocess.check_output(cmd)).strip() == '1':
logger.log_debug("execute disable command successsfully",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return True
else:
logger.log_error("execute disable command fail",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return False
def get_vin_number(sn):
'''
function:get vin number
:param sn:
:return: str
'''
cmd = 'getprop |grep -r ".*vehicle\.vin"|%s awk -F \':\' \'{print $NF}\'| %s sed \'s/\[\([0-9A-Z]*\)\]/\\1/\'' %(busybox,busybox)
# print (cmd)
try:
version = subprocess.Popen(
'adb -s %s shell "%s"' % (sn, cmd), \
stdout=subprocess.PIPE, shell=True).stdout.read()
version = str(version, encoding="utf-8").replace('\r\r\n', '')
return version.strip()
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def get_software_version(sn):
'''
function:get software version
:param sn:
:return:version
'''
cut_cmd = 'cut -d"[" -f2 | cut -d"]" -f1'
version = subprocess.Popen(
'adb -s %s shell "getprop | grep -E ASE* | %s awk \'{print $NF}\'| %s |uniq"' % (sn, busybox, cut_cmd), \
stdout=subprocess.PIPE, shell=True).stdout.read()
version = str(version, encoding="utf-8").replace('\r\r\n', '')
return version.strip()
def post_request(sn, header={}, data={}, url=None):
'''
platform :window
function: post request
:param sn:
:return:
'''
try:
if url == None:
# url = "https://qa-hu1.wm-icenter.net/api/vehicle/fota/softwares"
url = "https://qa-hu1.wm-icenter.net/api/v2/vehicle/fota/softwares"
print("antony@@@debug")
print(json.dumps(data))
r = requests.post(url=url,headers=header,data=json.dumps(data))
# print (header,data)
print (r.text)
if "success" in r.text and r.status_code == 200:
logger.log_info("respond ok and status is :%s" %(r.status_code),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 0
else:
logger.log_error("respond failed and status is :%s" %(r.status_code),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
except Exception as e:
logger.log_error("may be has no network",\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def post_request_to_file(sn, header={}, data={}, url=None):
try:
if url == None:
# url = "https://qa-hu1.wm-icenter.net/api/vehicle/fota/softwares"
url = "https://qa-hu1.wm-icenter.net/api/v2/vehicle/fota/softwares"
r = requests.post(url=url, headers=header, data=json.dumps(data))
print (r.text)
return write_request_content_file_and_push_to_hu(sn,r.text)
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def write_request_content_file_and_push_to_hu(sn,msg):
'''
funciton :write data.json to android
:param sn:
:param msg:
:return:
'''
current_path = os.getcwd()
# print (current_path)
current_time = int(time.time())
current_filename = current_path+"\\"+str(current_time)+'.txt'
# print (current_time,current_filename)
with open(current_filename,'w') as f:
f.write(msg)
os.system('adb root')
cmd = 'adb -s %s push %s %s' %(sn, current_filename, json_name)
os.system(cmd)
os.remove(current_filename)
cmd = 'adb -s %s shell ""if [ -f %s ];then echo "0";else echo "1";fi"' %(sn, json_name)
return int(removal(subprocess.check_output(cmd)).strip())
def get_md5_value(sn, package_name, new_name):
'''
function : get package md5 value
:param sn:
:param package_name:
:param new_name:
:return:
'''
try:
os.system('adb -s %s root' %(sn))
cmd = 'adb -s %s shell "mv /%s/%s /%s/%s;md5sum /%s/%s |%s awk \'{print $1}\'"'\
%(sn,package_path,package_name,package_path,new_name,package_path,new_name,busybox)
return removal(subprocess.check_output(cmd)).strip()
except Exception as e:
logger.log_error("%s" %(e),sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def get_md5_value_from_datafile(sn, flag):
'''
function: get md5 value from datafile
:param sn:
:return:
'''
try:
os.system('adb -s %s root' %(sn))
if flag == "Full":
str_msg = "%s sed \'s/.*sign.\{3\}\(.*\).\{3\}package_type.*/\\1/\' %s" %(busybox,json_name)
elif flag == "Diff":
str_msg = "%s sed \'s/.*sign.\{3\}\(.*\).\{3\}package_type.*sign.\{3\}\(.*\).\{3\}package_type.*/\\1/\' %s" %(busybox,json_name)
cmd = 'adb -s %s shell "%s"' %(sn,str_msg)
return removal(subprocess.check_output(cmd)).strip()
except Exception as e:
logger.log_error("%s" %(e),sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def delete_update_directory(sn):
'''
function: delete update directory
:param sn:
:return:
'''
os.system("adb -s %s root" %(sn))
cmd = "if [ -d /update ] && [ $(ls -al /update/| %s wc -l) != 0 ];then mount -o rw,remount /;rm -rf /update/*;fi" %(busybox)
try:
subprocess.check_output('adb -s %s shell "%s"' %(sn,cmd))
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
def kill_fota_daemon(sn):
'''
function : kill fota daemon
:param sn:
:return:
'''
cmd = 'adb -s %s shell "ps -ef|grep FotaModule|grep -v grep|%s awk \'{print $2}\'|xargs kill -9"' %(sn,busybox)
try:
subprocess.check_output(cmd)
return 0
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename,sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def start_fota_daemon_child(sn, cmd):
'''
function run fota daemon process
:param sn:
:param cmd:
:return:
'''
cmd_start_fotamodule = 'adb -s %s shell "LD_LIBRARY_PATH=/system/lib/fotalib /system/bin/FotaModule &' % (sn)
execute_cmd(cmd_start_fotamodule)
if int(removal(subprocess.check_output(cmd))) >= 1:
logger.log_info("start fota module success", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 0
else:
logger.log_error("start fota module failed", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
def start_fota_daemon(sn):
'''
function:start fota daemon process
:param sn:
:return:
'''
os.system("adb -s %s root" %(sn))
cmd_check_fotamodule = 'adb -s %s shell "ps -ef|grep FotaModule|grep -v grep | %s wc -l"' %(sn, busybox)
# print(int(removal(subprocess.check_output(cmd_check_fotamodule)).strip()))
try:
# if int(removal(subprocess.check_output(cmd_check_fotamodule)).strip()) == 0:
# return start_fota_daemon_child(sn,cmd_check_fotamodule)
# elif int(removal(subprocess.check_output(cmd_check_fotamodule)).strip()) > 0:
# kill_fota_daemon(sn)
# return start_fota_daemon_child(sn, cmd_check_fotamodule)
return 0
except Exception as e:
logger.log_error("error as follow: %s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def send_signal(sn, arry_list=[], device_flag = None, cycle = 10):
'''
function :send update signal and install package through ui
:param sn:
:param cycle = 100:
:return:
'''
def send_tbox_signal(pcan):
try:
pcan.enterota()
if ag.check_install_progress(sn,pcan) == 1:
pcan.poweron_and_clean()
return 1
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
pcan.poweron_and_clean()
return 1
try:
pcan = SC.PCAN()
ag = activeupgrade()
logger.log_info("delete lvlog", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
ag.delete_lvlog(sn)
time.sleep(1)
for i in range(5):
logger.log_info("start install ui",sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
if ag.check_activeupgrade_starui(sn) == 0:
for i in range(cycle):
pcan.send_arry(arry_list)
time.sleep(5)
serial = Serial()
logger.log_info("start enter ui to upgrade package", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
# print("2222222")
# serial.enter_softupgrade_page()
logger.log_info("start click install button",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
serial.active_upgrade()
logger.log_info("start send ota signal", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
pcan.enterota()
time.sleep(5)
logger.log_info("check install progress...", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
if ag.check_install_progress(sn, pcan) == 1:
pcan.poweron_and_clean()
return 1
if device_flag == "tbox":
logger.log_info("first package install successfully,start send exit ota signal",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
pcan.exit_ota_lopper()
logger.log_info("second send enter ota signal",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
send_tbox_signal(pcan)
# logger.log_info("start send power on signal", \
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
# pcan.poweron()
# logger.log_info("power on signal done", \
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
for i in range(cycle * 9):
logger.log_info("start send exit ota normal signal,send signal times is:%s" % (i), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
pcan.exitota()
logger.log_info("wait install successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
if ag.check_activeupgrade_resultui(sn) == 0:
pcan.clean()
logger.log_info("check laster result successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 0
pcan.clean()
logger.log_error("check laster result failed", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
def alway_send_signal(sn, arry_list=[], delay_time = 300):
try:
pcan = SC.PCAN()
ag = activeupgrade()
logger.log_info("delete lvlog", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
ag.delete_lvlog(sn)
logger.log_info("start send signal ,and wait install ui...")
while delay_time > 0:
if ag.check_activeupgrade_starui(sn) == 0:
pcan.send_arry(arry_list)
time.sleep(0.5)
delay_time = delay_time - 1
else:
pcan.poweron_and_clean()
return 0
pcan.poweron_and_clean()
return 1
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
def upgrade_through_ui(sn, arry_list = [],device_flag = None, cycle = 10):
def send_tbox_signal(pcan):
try:
pcan.enterota()
if ag.check_install_progress(sn,pcan) == 1:
pcan.poweron_and_clean()
return 1
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
pcan.poweron_and_clean()
return 1
try:
serial = Serial()
pcan = SC.PCAN()
for i in range(100):
pcan.send_arry(arry_list)
ag = activeupgrade()
logger.log_info("delete lvlog", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
ag.delete_lvlog(sn)
logger.log_info("start enter ui to upgrade package", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
serial.active_upgrade()
logger.log_info("start send ota normal signal",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
pcan.enterota()
if ag.check_install_progress(sn,pcan) == 1:
logger.log_error("has enter ota normal,but can not install",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
pcan.poweron_and_clean()
return 1
if device_flag == "tbox":
logger.log_info("first package install successfully,start send exit ota signal",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
pcan.exit_ota_lopper()
logger.log_info("second send enter ota signal",\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
send_tbox_signal(pcan)
for i in range(cycle * 9):
logger.log_info("start send exit ota normal signal,send signal times is:%s" % (i), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
pcan.exitota()
logger.log_info("wait install successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
if ag.check_activeupgrade_resultui(sn) == 0:
pcan.clean()
logger.log_info("check laster result successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 0
pcan.clean()
logger.log_error("check laster result failed", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
# logger.log_info("start send exit ota normal signal",\
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
# pcan.exitota()
# pcan.clean()
# logger.log_info("wait install successfully", \
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
# if ag.check_activeupgrade_resultui(sn) == 0:
# return 0
# else:
# logger.log_error("maybe can not exit 99% ui page,so upgrade failed",\
# sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
# return 1
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
def cancle_install_through_ui(sn):
try:
serial = Serial()
ag = activeupgrade()
logger.log_info("delete lvlog", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
ag.delete_lvlog(sn)
logger.log_info("start cancle ui ", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
serial.cancel_activeupgrade()
if ag.check_activeupgrade_cancleui(sn) == 0:
logger.log_info("cancle successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 0
else:
return 1
except Exception as e:
logger.log_error("%s" % (e), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
def cancleinstall_through_setting(sn, arry_list = [],cycle = 100):
try:
serial = Serial()
pcan = SC.PCAN()
ag = activeupgrade()
logger.log_info("delete lvlog", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
ag.delete_lvlog(sn)
for i in range(cycle):
pcan.send_arry(arry_list)
serial.enter_softupgrade_page()
for i in range(5):
if ag.check_activeupgrade_starui_from_settings(sn) == 0:
time.sleep(6)
else :
serial.cancle_activeupgrade_through_settings()
if ag.check_activeupgrade_cancleui(sn) == 0:
pcan.poweron_and_clean()
return 0
else:
pcan.poweron_and_clean()
return 1
pcan.poweron_and_clean()
return 1
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
def activeupgrade_through_setting(sn, arry_list = [], device_flag = None, cycle = 100):
def send_tbox_signal(pcan):
try:
pcan.enterota()
if ag.check_install_progress(sn,pcan) == 1:
pcan.poweron_and_clean()
return 1
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
pcan.poweron_and_clean()
return 1
try:
serial = Serial()
pcan = SC.PCAN()
ag = activeupgrade()
logger.log_info("delete lvlog", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
ag.delete_lvlog(sn)
time.sleep(5)
for i in range(5):
if ag.check_activeupgrade_starui(sn) == 0:
for i in range(cycle):
pcan.send_arry(arry_list)
else :
time.sleep(5)
serial.cancel_activeupgrade()
time.sleep(5)
if ag.check_activeupgrade_cancleui(sn) == 0:
logger.log_debug("cancle successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
ag.delete_lvlog(sn)
logger.log_info("start enter soft upgrade page", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
serial.enter_softupgrade_page()
for i in range(5):
if ag.check_activeupgrade_starui_from_settings(sn) == 0:
print("aaaaa")
time.sleep(6)
else:
time.sleep(5)
logger.log_info("start upgrade through setting page", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
serial.activeupgrade_through_settings()
pcan.enterota()
time.sleep(5)
if ag.check_install_progress(sn,pcan) == 1:
pcan.poweron_and_clean()
return 1
if device_flag == "tbox":
pcan.exit_ota_lopper()
send_tbox_signal(pcan)
for i in range(cycle * 9):
logger.log_info("start send exit ota normal signal,send signal times is:%s" % (i), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
pcan.exitota()
logger.log_info("wait install successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
if ag.check_activeupgrade_resultui(sn) == 0:
pcan.clean()
logger.log_info("check laster result successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 0
pcan.clean()
logger.log_error("check laster result failed", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 1
return 1
else:
pcan.clean()
logger.log_error("cancle install through ui", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,sys._getframe().f_lineno)
return 1
pcan.poweron_and_clean()
return 1
except Exception as e:
pcan.poweron_and_clean()
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
return 1
def check_md5_status(sn, delay_time=240):
'''
function:check md5 value status
:param sn:
:param delay_time:
:return:
'''
try:
cmd = 'adb -s %s shell "grep -Er "verify.*OTA.*success" %s |%s wc -l"' % (sn, log_path, busybox)
while delay_time > 0:
if removal(subprocess.check_output(cmd)).strip() == '2':
logger.log_info("check md5 successfully", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 0
else:
delay_time = delay_time - 1
time.sleep(1)
logger.log_error("check md5 timeout", \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 1
except Exception as e:
logger.log_error("%s" % (e), \
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name,
sys._getframe().f_lineno)
return 1
def Set_Logcat_Message(sn):
'''
function for set logcat
:param module_name:
:return:
'''
lgc = lcf.logcat()
lgc.set_file_name("logcat.log")
lgc.set_file_path("/update")
lgc.collect_logcat_file(sn, lgc.get_file_path(), lgc.get_file_name())
return lgc
def Get_Logcat_Message(sn, lgc):
'''
function for get logcat
:param module_name:
:return:
'''
lgc = lcf.logcat()
try:
print (lgc)
lgc.set_pull_file_path("..\Result\%s\%s" %(loger.file_dir,loger.case_name_directory))
lgc.pull_logcat_file(sn ,lgc.get_file_name(), lgc.get_file_path(),\
lgc.get_pull_file_path(), time.strftime('%Y-%m-%d__%H-%M-%S',time.localtime(time.time())))
except Exception as e:
logger.log_error("%s" %e,\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
def Set_Screencap_Message(sn):
'''
function for set screencap
:param module_name:
:return:
'''
snp = scp.screen_cap()
snp.set_picture_name("screen.png")
snp.set_picture_path("/update")
snp.get_screencap(sn, snp.get_picture_name(), snp.get_picture_path())
return snp
def Get_Screencap_Message(sn, snp):
'''
function for get screencap
:return:
'''
snp.set_pull_picture_path("..\Result\%s\%s" %(loger.file_dir,loger.case_name_directory))
snp.pull_screencap(sn, snp.get_pull_picture_path(), snp.get_picture_path(), \
snp.get_picture_name(),time.strftime('%Y-%m-%d__%H-%M-%S', time.localtime(time.time())))
def Get_libHU_Message(sn):
try:
libHU_path = "..\Result\%s\%s" %(loger.file_dir,loger.case_name_directory)
cm = Check_message()
# if cm.check_libHUfota_exist(sn) == True:
# file_name = removal(subprocess.check_output('adb -s %s shell "ls /update/log/libHU*"' %(sn))).strip()
# os.system("adb -s %s pull %s %s/%s_%s" \
# %(sn,file_name,libHU_path,time.strftime('%Y-%m-%d__%H-%M-%S', time.localtime(time.time())), "libHUfota.log"))
os.system("adb -s %s pull /sdcard/lvlog/com.living.ota/normal/. %s/%s_%s" \
%(sn,libHU_path,time.strftime('%Y-%m-%d__%H-%M-%S', time.localtime(time.time())), "com_living_ota.log"))
os.system("adb -s %s pull /update/log/. %s/%s_%s" \
%(sn,libHU_path,time.strftime('%Y-%m-%d__%H-%M-%S', time.localtime(time.time())), "update_log"))
except Exception as e:
logger.log_error("%s" %(e),\
sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno)
def open_tbox_adb():
time.sleep(10)
import pyautogui
screenWidth, screenHeight = pyautogui.size()
print("height:%s width:%s" %(screenHeight,screenWidth))
currentMouseX, currentMouseY = pyautogui.position()
print("currentMouseX:%s currentMouseY:%s" %(currentMouseX,currentMouseY))
pyautogui.moveTo(currentMouseX, currentMouseY)
pyautogui.click()
|
[
"bdty2013antony@163.com"
] |
bdty2013antony@163.com
|
73c13eff80b7d3a220922d5ee5f7cf12283fc69f
|
b966c155f16ed58942e6af3660102c0bb222faa3
|
/tools/config_manager.py
|
b9508c9a6664985f0508b8de15bb8a4c80fa07a3
|
[] |
no_license
|
tungit980/docbao
|
9e9a8837935168e69f5abfa9a5966b94305d94f0
|
0ecd9696ca2b7b6b4ed17f3996cb0ab4400efbb3
|
refs/heads/master
| 2022-12-05T10:41:46.326095
| 2020-08-30T09:52:22
| 2020-08-30T09:52:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,086
|
py
|
header = '''
###############################################################################
# program : config manager
# function: help create, edit web configs easily for crawling
# author : hailoc12
# created : 2019-06-14
###############################################################################
'''
import copy
from os import system
import os
from src.backend.lib.config import ConfigManager, WebConfig
from src.backend.lib.utils import get_independent_os_path
from src.backend.lib.data import Article, ArticleManager
from src.backend.lib.browser_crawler import BrowserWrapper, BrowserCrawler
HELP_STRING = 'Type "help" for help, "quit" or "exit" to quit app'
MAX_DISPLAY = 10
mem_list = None # memorized list use for chaining commands
# UTILITY
def display_multiple_input(message, answer_message="Input: "):
answer = None
print(message)
count=0
result = []
while answer != '':
count+=1
answer = input(str(count) + '. ' + answer_message)
if answer !='':
result.append(answer)
return result
def display_choice_dialog(message, choices, answer_message="Your Answer: "):
'''
output
======
None if cancel
0..len(choices) if choice
'''
print(message)
for i in range(0, len(choices)):
print("%s. %s" % (str(i+1), choices[i]))
choice = 'a'
while not choice.isdigit():
try:
print()
choice = input(answer_message)
if choice.strip().isdigit():
choice = int(choice)
if choice <= 0 or choice > len(choices):
print("You must input a number between 1..%s" % str(len(choices)))
choice = 'a'
return choice-1
else:
if choice.strip() == '':
return None
else:
print("You must input a number between 1..%s" % str(len(choices)))
choice = 'a'
except:
print("You must input a number between 1..%s" % str(len(choices)))
choice = 'a'
def extract_args(args):
'''
extract index or keyword from arguments
arg:
- args: list of arguments of command
output: index (int) or keyword (str) or None if fail
'''
arg = " ".join(args).strip()
try:
index = int(arg) - 1 #index start from 0
if mem_list is not None:
if index >=0 and index < len(mem_list):
return index
else:
print("Must provide an index between 1..%s" % str(len(mem_list)))
else:
print('There are no list to use index. Use command "show" to create list')
except:
keyword = arg
return keyword
return None
def display_menu_dialog(menu_message,input_message, menu=[]):
'''
function
--------
display menu and get user choice
input
-----
menu: list of menu
output
------
return user choice in number (0..len(menu)-1)
'''
print(menu_message)
choice = display_choice_dialog(input_message, menu)
return choice
def clear_screen():
system('clear')
def process_edit_config(webconfig):
if webconfig is None:
input("There isn't any web config yet. Create a new one or load from resources")
return None
print_config_header = '''
###########################################
# PRINT WEB CONFIG #
###########################################
'''
while True:
clear_screen()
print(print_config_header)
config_list = webconfig.print_config()
number_of_config = len(config_list)
choice = input("Press ENTER to return, LINE NUMBER <=%s to edit, -LINENUMBER <=%s to remove, or >%s to add new config: " % (number_of_config, number_of_config, number_of_config))
is_digit = False
try:
line = int(choice)
is_digit = True
except:
pass
if is_digit:
if line <= number_of_config: # edit config
if line > 0: # edit old config
if line != 1:
key, value = webconfig.get_config_by_index(line-2)
else:
key = 'Webname'
value = webconfig.get_webname()
print()
print("Editing key: %s" % key)
print("Old value: %s" % str(value))
validated = False
while not validated:
try:
new_value = eval(input("New Value: ")) # note: eval is important here to get right data type
validated = True
except:
print("Input value in wrong format. Remember to input like Python code (eg. 'money' not money. [a, b, c] not '[a, b, c]'")
if line != 1:
webconfig.set_config(key, new_value)
else:
webconfig.set_webname(new_value)
input("Change OK. Press ENTER to continue")
elif line < 0: # delete config
if line == -1: # delete webname
input("Sorry, you can't delete site name")
else:
key, value = webconfig.get_config_by_index(-line-2)
webconfig.delete_config(key)
input("Succesfully remove %s from site config" % key)
else: # add new config
key = input("Enter new config key: ")
validated = False
while not validated:
try:
value = eval(input("Enter config value: "))
validated = True
except:
print("Input value in wrong format. Remember to input like Python code (eg. 'money' not money. [a, b, c] not '[a, b, c]'")
webconfig.set_config(key, value)
input("Sucessfully add %s to config" % key)
else:
break
return webconfig
def display_yes_no_dialog(message):
choice = input(message)
if 'y' == choice.lower().strip():
return True
elif 'n' == choice.lower().strip():
return False
else:
return None
def process_create_blank_newspaper():
webconfig = WebConfig()
webconfig.load_default_config('newspaper')
print()
# basic config
newspaper_name = input("Please enter newspaper name: ")
newspaper_url = input("Please enter newspaper base url: ")
choice = display_yes_no_dialog("Is crawl url the same as base url (y/n) ?")
if not choice:
crawl_url = input("Please enter crawl url: ")
else:
crawl_url = newspaper_url
use_browser = display_yes_no_dialog("Do you want to crawl using browser or not (y/n) ?")
if use_browser:
webconfig.set_config("use_browser", True)
else:
webconfig.set_config("use_browser", False)
#date_xpath = input("Please enter xpath to get publish date in detail page: ")
menu = ['Auto Find',
'Class name',
'Xpath']
date_extract_type = display_choice_dialog('Please choose one way to extract publish date in detail page: ', menu)
if date_extract_type == 1: # CSS selection
css = input("Please enter class name of html tag that contain publish date: ")
date_xpath = '//node()[@class="' + css + '"]'
elif date_extract_type == 2:
date_xpath = input("Please enter single xpath that extract html tag containing publish date: ")
xpath_count = len(webconfig.get_topics_xpath())
remove_date_tag_html = display_yes_no_dialog('Does it need to remove html tag to extract publish date (y/n)?: ')
webconfig.set_config('remove_date_tag_html', remove_date_tag_html)
ignore_topic_menu_choice = ['Topic is invalid', 'Use current time as publish date']
choice = display_choice_dialog("How to treat topic that can't find its publish date ?", ignore_topic_menu_choice)
if choice == 0:
ignore_topic_not_have_publish_date = True
else:
ignore_topic_not_have_publish_date = False
webconfig.set_config('ignore_topic_not_have_publish_date', ignore_topic_not_have_publish_date)
crawl_detail_choice = display_yes_no_dialog('Do you want to crawl detail content (sapo, content, img) ?: ')
if crawl_detail_choice:
sapo_xpath = input("Please enter xpath to extract sapo text: ")
content_xpath = input("Please enter xpath to extract main content: ")
remove_content_html = True
remove_content_html_xpaths = []
answer = None
count = 1
while answer != '':
print("Please input xpaths to remove tags (ENTER=Finish): ")
answer = input("Xpath %s: " % str(count))
if answer != '':
remove_content_html_xpaths.append(answer)
count+=1
feature_image_xpath = input("Please enter xpath to extract feature images url: ")
text_xpath = display_multiple_input("Please input xpaths to get text element: ")
image_box_xpath = display_multiple_input("Please input xpaths to get image box element: ")
image_title_xpath = display_multiple_input("Please input xpaths to get title element from image box (ENTER=finish): ")
video_box_xpath = display_multiple_input("Please input xpaths to get video box element (ENTER=Finish)")
video_title_xpath = display_multiple_input("Please input xpaths to get title element from video box (ENTER=finish): ")
audio_box_xpath = display_multiple_input("Please input xpaths to get audio box element (ENTER=Finish)")
audio_title_xpath = display_multiple_input("Please input xpaths to get title element from audio box (ENTER=finish): ")
avatar_choice_menu = ['Provide logo link', 'Xpath to get logo url']
avatar_choice = display_choice_dialog("How do you want to extract logo url ?", avatar_choice_menu)
avatar_url = ''
avatar_xpath = ''
if avatar_choice == 0: # provide linke
avatar_type = 'url'
avatar_url = input("Please enter logo absolute url: ")
else:
avatar_xpath = input("Please enter xpath to extract avatar/logo url: ")
avatar_type = 'xpath'
sapo_xpath_list = []
content_xpath_list = []
feature_image_xpath_list = []
for i in range(0, xpath_count):
sapo_xpath_list.append(sapo_xpath)
content_xpath_list.append(content_xpath)
feature_image_xpath_list.append(feature_image_xpath)
webconfig.set_config('sapo_xpath', sapo_xpath_list)
webconfig.set_config('content_xpath', content_xpath_list)
webconfig.set_config('text_xpath', text_xpath)
webconfig.set_config('feature_image_xpath', feature_image_xpath_list)
webconfig.set_config('get_detail_content', crawl_detail_choice)
webconfig.set_config('remove_content_html', remove_content_html)
webconfig.set_config('remove_content_html_xpaths', remove_content_html_xpaths)
webconfig.set_config('image_box_xpath', image_box_xpath)
webconfig.set_config('image_title_xpath', image_title_xpath)
webconfig.set_config('video_box_xpath', video_box_xpath)
webconfig.set_config('video_title_xpath', video_title_xpath)
webconfig.set_config('audio_box_xpath', audio_box_xpath)
webconfig.set_config('audio_title_xpath', audio_title_xpath)
webconfig.set_config('avatar_type', avatar_type)
webconfig.set_config('avatar_xpath', avatar_xpath)
webconfig.set_config('avatar_url', avatar_url)
tags = display_multiple_input("Please input metadata to label article crawled from this config (ENTER=finish): ")
domain_re = newspaper_url
webconfig.set_webname(newspaper_name)
webconfig.set_config('web_url', newspaper_url)
webconfig.set_config('crawl_url', crawl_url)
webconfig.set_config('url_pattern_re', domain_re)
webconfig.set_tags(tags)
date_xpath_list = []
# use the same date_xpath for every topic_xpath
for i in range(0, xpath_count):
date_xpath_list.append(date_xpath)
webconfig.set_config('date_xpath', date_xpath_list)
# topic type
choice = display_yes_no_dialog("Do you want to run test with default config (y/n) ?")
if choice:
webconfig = process_test_crawl_web_config(webconfig)
is_ok = True
input("Successfully createn %s site config" % webconfig.get_webname())
return webconfig
def process_create_blank_web_config():
clear_screen()
header = '''
##############################################
CREATE BLANK WEB CONFIG
##############################################
'''
print(header)
menu = ['Newspaper',
'Wordpress Blog',
'Facebook Account',
'Facebook Page',
'Facebook Group',
'Other']
# choose template
user_choice = display_menu_dialog('What type of site: ', 'Choice: ', menu)
webconfig = WebConfig()
if user_choice == 0: # newspaper
webconfig = process_create_blank_newspaper()
elif user_choice == 1: # wordpress
webconfig.load_default_config('wordpress')
elif user_choice == 2: # facebook user
webconfig.load_default_config('facebook user')
fb_name = input("Please input FB Account name: ")
fb_username = input("Please input FB username (or ENTER to leave it): ")
fb_userid = input("Please input FB user ID (or ENTER to leave it): ")
if fb_username.strip() != '':
url = "https://www.facebook.com/" + fb_username.strip()
else:
url = "https://www.facebook.com/profile.php?id=" + fb_userid.strip()
webconfig.set_webname(fb_name)
webconfig.set_config('web_url', url)
webconfig.set_config('crawl_url',url)
webconfig.set_config('url_pattern_re', url)
elif user_choice == 3: # facebook fanpage
webconfig.load_default_config('facebook fanpage')
fb_name = input("Please input Fanpage name: ")
fb_id = input("Please input Fanpage id: ")
url = "https://www.facebook.com/pg/" + fb_id.strip() + "/posts/?ref=page_internal"
webconfig.set_webname(fb_name)
webconfig.set_config('web_url', url)
webconfig.set_config('crawl_url',url)
webconfig.set_config('url_pattern_re', url)
elif user_choice == 4: # facebook group
webconfig.load_default_config('facebook fanpage')
fb_name = input("Please input FB Group name (eg Page Hải Phòng): ")
fb_id = input("Please input Group id (eg page.haiphong): ")
url = "https://www.facebook.com/groups/" + fb_id.strip() + "/"
webconfig.set_webname(fb_name)
webconfig.set_config('web_url', url)
webconfig.set_config('crawl_url',url)
webconfig.set_config('url_pattern_re', url)
else:
webconfig.load_default_config()
# what to do next
menu = ['Edit created config',
'Test crawling this config',
'Save config',
"Return"]
user_choice = -1
while user_choice != 3: # finish
clear_screen()
print(header)
user_choice = display_menu_dialog('What do you want to do next ? ', 'Choice: ', menu)
if user_choice == 0: # edit config
process_edit_config(webconfig)
elif user_choice == 1: # test crawl
webconfig = process_test_crawl_web_config(webconfig)
elif user_choice == 2: # save config
process_save_webconfig(webconfig)
return webconfig
def process_test_crawl_web_config(webconfig):
'''
function
--------
try to crawl with webconfig only
return
------
modified webconfig
'''
test_crawl_header = '''
###########################################
# TEST CRAWLING SITE CONFIG #
###########################################
'''
has_change_dir = False
try:
os.chdir("backend")
has_change_dir = True
except:
pass
continue_test = True
while continue_test:
clear_screen()
print(test_crawl_header)
# prepare webconfig for test
minimum_duration_old_value = webconfig.get_minimum_duration_between_crawls()
webconfig.set_minimum_duration_between_crawls(-5) # mean always crawl this config
maximum_url_old_value = webconfig.get_config('maximum_url', 10)
webconfig.set_config('maximum_url',50)
# ask for edit
choice = display_yes_no_dialog("Is there anything to edit before test crawling (y/n) ?")
if choice:
webconfig = process_edit_config(webconfig)
maximum_url_old_value = webconfig.get_maximum_url()
# test
config_manager = ConfigManager(get_independent_os_path(['src', 'backend','input', 'test.yaml']), get_independent_os_path(['input', 'kols_list.txt']), get_independent_os_path(['input', 'fb_list.txt']))
config_manager.load_data()
config_manager.replace_crawl_list([webconfig])
data_filename = get_independent_os_path(['src', 'backend', 'data','test_article.dat'])
blacklist_filename = get_independent_os_path(['src', 'backend','data','test_blacklist.dat'])
data_manager = ArticleManager(config_manager, data_filename, blacklist_filename)
data_manager.reset_data()
# test crawl
my_pid = 1
browser = BrowserWrapper()
if webconfig.get_crawl_type() == 'newspaper':
data_manager.add_articles_from_newspaper(my_pid, webconfig, browser)
elif 'facebook' in webconfig.get_crawl_type():
data_manager.add_articles_from_facebook(my_pid, webconfig, browser)
# report
#
continue_test = display_yes_no_dialog('Do you want to test again (y/n) ?: ')
# return back
webconfig.set_config('maximum_url', maximum_url_old_value)
webconfig.set_minimum_duration_between_crawls(minimum_duration_old_value)
if has_change_dir:
os.chdir("..")
return webconfig
def process_create_web_config_from_existing_one():
'''
return
------
webconfig object that contain config load from file
'''
load_config_header = '''
###########################################
# PRINT WEB CONFIG #
###########################################
'''
load_config_menu = ['Load from local config files',
'Load from online config database',
'Return to main menu'
]
choice = -1
webconfig = None
while choice != 2:
clear_screen()
print(load_config_header)
choice = display_menu_dialog('What do you want to do ?', "Choice (ENTER=cancel): ", load_config_menu)
if choice == 0:
# display config file list
file_base_path = get_independent_os_path(['resources','configs', 'newspaper'])
filepath = display_choose_file_dialog(file_base_path)
if filepath is not None:
webconfig = WebConfig()
webconfig.load_config_from_file(filepath)
input("Successfully load site config from %s" % filepath)
elif choice == 1:
pass
return webconfig
def process_manage_other_config(config_manager):
'''
return
------
None
'''
manage_other_config_header = '''
###########################################
# MANAGE PROPERTIES IN CONFIG.YAML FILE #
###########################################
'''
while True:
clear_screen()
print(manage_other_config_header)
config_list = config_manager.print_config()
choice = input("Press ENTER to return or LINE NUMBER to edit: ")
if choice.isdigit():
index = int(choice) - 1
key, value = config_list[index]
print()
print("Editing key: %s" % key)
print("Old value: %s" % str(value))
new_value = eval(input("New Value: "))
config_manager.set_config(key, new_value)
input("Change OK. Press ENTER to continue")
else:
break
config_manager.save_data()
def display_choose_file_dialog(file_base_path):
header = '''
###########################################
# LOAD SITE CONFIG #
###########################################
'''
ok = False
while not ok:
clear_screen()
print(header)
search = input("Enter keyword to find config file or ENTER to display all: ")
file_list = []
for root,directory,files in os.walk(file_base_path):
for item in files:
if search in item:
file_list.append(item)
# choose file
config_file_index = display_menu_dialog('Which config do you want to load ?', 'Choice (ENTER=Cancel): ', file_list)
if config_file_index is None: #cancel
return None
config_file_name = file_list[int(config_file_index)]
# make filepath to load
filepath = get_independent_os_path([file_base_path, config_file_name])
answer = input("Are you sure to load site config from %s ? (ENTER=ok, anything=repeat)" % config_file_name)
if answer.strip() == '':
ok = True
return filepath
def process_manage_crawl_list(config_manager):
'''
output
======
- config_manager with modified data
- None or webconfig loaded from current crawl list
'''
manage_crawl_list_header = '''
###########################################
# MANAGE CRAWL LIST #
###########################################
'''
# what to do next
menu = ['Add site config file to list',
'Remove newspaper from list',
'Edit site config in list',
'Load site config in list to working config',
'Edit config of all site in list',
'Add working site config to list',
'Load working site config from list',
'Return',
]
user_choice = -1
webconfig = None
while user_choice != len(menu)-1: # finish
clear_screen()
print(manage_crawl_list_header)
newspaper_list = config_manager.print_crawl_list() # newspaper_list contain all crawl config. All edits will be made on newspaper_list then merge back into config_manager
print()
user_choice = display_menu_dialog('What do you want to do next ? ', 'Choice: ', menu)
if user_choice == 0: # add config file to list
filepath = display_choose_file_dialog(get_independent_os_path(['resources', 'configs', 'newspaper']))
if filepath is not None:
new_webconfig = WebConfig()
new_webconfig.load_config_from_file(filepath)
newspaper_list.append(new_webconfig)
input("Successfully add %s to crawl list" % new_webconfig.get_webname())
config_manager.replace_crawl_list(newspaper_list) #save all changes to config_manager
config_manager.save_data()
elif user_choice == 1: # remove newspaper from list
choice = input("Please input LINE NUMBER to remove or ENTER to cancel: ")
if choice.strip() != '' and choice.isdigit():
remove_webconfig = newspaper_list.pop(int(choice) -1)
input("Successfuly remove %s from crawl list" % remove_webconfig.get_webname())
config_manager.replace_crawl_list(newspaper_list) #save all changes to config_manager
config_manager.save_data()
elif user_choice == 2: # edit site config in list
choice = input("Please input LINE NUMBER to edit or ENTER to cancel: ")
if choice.strip() != '' and choice.isdigit():
choose_webconfig = newspaper_list[int(choice) -1]
choose_webconfig = process_edit_config(choose_webconfig)
config_manager.add_newspaper(choose_webconfig) # update config
config_manager.save_data()
elif user_choice == 3: # load site config to working config
choice = input("Please input LINE NUMBER to load or ENTER to cancel: ")
if choice.strip() != '' and choice.isdigit():
choose_webconfig = newspaper_list[int(choice) -1]
webconfig = choose_webconfig
input("Successfuly load %s config to working config" % choose_webconfig.get_webname())
elif user_choice == 4: # edit single config of all sites in list
print()
newspaper_list = config_manager.get_newspaper_list()
if len(newspaper_list) > 0:
print("Sample of a site config:")
sample_site = newspaper_list[0]
sample_site.print_config()
print()
key = input('Enter config property to edit: (ENTER=cancel, -config_name=remove)').strip()
key = key.strip()
if key != '':
if key[0] == '-': # remove config
key = key[1:]
count = 0
for newspaper in config_manager.get_newspaper_list():
count+=1
newspaper.delete_config(key)
input("Successfully remove %s of %s site" % (key, str(count)))
config_manager.save_data()
else: # edit all
new_value = eval(input('Enter new value of %s: ' % key))
count = 0
for newspaper in config_manager.get_newspaper_list():
count+=1
newspaper.set_config(key, new_value)
input("Successfully change %s of %s site to new value" % (key, str(count)))
config_manager.save_data()
else:
print("There haven't been any site config in crawling list")
elif user_choice == 5: # add working site config to list
if webconfig is not None:
config_manager.add_newspaper(webconfig)
input("Succesfully add %s to crawl list" % webconfig.get_webname())
config_manager.save_data()
else:
input("No working site config. Please go to site config manager to create/load one")
elif user_choice == 6: # load newspaper to current webconfig
choice = input("Please input LINE NUMBER to load or ENTER to cancel: ")
if choice.strip() != '' and choice.isdigit():
webconfig = copy.copy(newspaper_list[int(choice) -1])
input("Successfuly load %s from crawl list" % webconfig.get_webname())
return webconfig
def process_manage_config(webconfig=None, config_manager=None):
'''
return
------
None or webconfig loaded from crawling list
'''
manage_config_header = '''
###########################################
# MANAGE CONFIG.YAML FILE #
###########################################
'''
manage_config_menu = [
'Edit crawl list',
'Edit program settings',
'Go to site config manager',
'Return to main menu'
]
choice = -1
while choice != len(manage_config_menu) -1:
clear_screen()
print(manage_config_header)
choice = display_menu_dialog('What do you want to do ?', "Choice: ", manage_config_menu)
if choice == 0: # manage crawl list
load_web_config = process_manage_crawl_list(config_manager)
if load_web_config is not None:
webconfig = load_web_config
elif choice == 1: # manage other config
process_manage_other_config(config_manager)
elif choice == 2: # go to site config manager
process_webconfig_manager(webconfig, config_manager)
return webconfig
def process_save_webconfig(webconfig):
menu = ['Save in a new file',
'Update an existing file']
choice = display_choice_dialog('How do you want to save: ', menu)
file_base_path = get_independent_os_path(['resources','configs','newspaper'])
if choice == 0:
print("Working config will be saved in ./resource/config/newspaper/")
filename = input("Filename: ")
filepath = get_independent_os_path([file_base_path, filename])
elif choice == 1: #update existing file
filepath = display_choose_file_dialog(file_base_path)
webconfig.export(filepath)
print("File is save OK")
def process_webconfig_manager(webconfig=None, config_manager=None):
'''
return
------
None
'''
webconfig_header = '''
###########################################
# SITE CONFIG MANAGER #
###########################################
'''
manage_webconfig_menu = ['Create new blank config',
'Load config from file',
'Edit working site config',
'Save working site config to file',
'Test crawling working site config',
'Add/Update working site config to crawl list',
'Move to program setting',
'Return to main menu'
]
choice = -1
while choice != len(manage_webconfig_menu) -1 :
clear_screen()
print(webconfig_header)
choice = display_menu_dialog('What do you want to do ?', "Choice: ", manage_webconfig_menu)
if choice == 0: # add current webconfig to crawl list
webconfig = process_create_blank_web_config()
if choice == 1: # create webconfig from existing one
webconfig = process_create_web_config_from_existing_one()
elif choice == 2: # editing working webconfig
webconfig = process_edit_config(webconfig)
elif choice == 3: # save working site config to file
process_save_webconfig(webconfig)
elif choice == 4: # test crawling
process_test_crawl_web_config(webconfig)
elif choice == 5: # add/update working site config to crawl list
config_manager.add_newspaper(webconfig)
config_manager.save_data()
print("Successfully add/update %s to crawl list" % webconfig.get_webname())
elif choice == 6: # move to program setting
process_manage_config(webconfig, config_manager)
return webconfig
# MAIN PROGRAM
config_manager = ConfigManager(get_independent_os_path(['src', 'backend', 'input', 'config.yaml']),
get_independent_os_path(['src, ','backend', 'input', 'kols_list.txt']),
get_independent_os_path(['src', 'backend', 'input', 'fb_list.txt'])) #config object
config_manager.load_data(True, False, False, 30, '.')
main_menu_choice = -1
while main_menu_choice != 2:
clear_screen()
print(header)
main_menu = ['Manage site config',
'Manage program settings',
'Quit']
main_menu_choice = display_menu_dialog('WELCOME TO CONFIG MANAGER', 'Your choice: ', main_menu)
webconfig = None
if main_menu_choice == 0: # crawl new site
webconfig = process_webconfig_manager(webconfig, config_manager)
elif main_menu_choice == 1: # manage config.yaml
webconfig = process_manage_config(webconfig, config_manager)
print("Goodbye")
|
[
"hailoc12@gmail.com"
] |
hailoc12@gmail.com
|
86624abf9e511fc127ad47f21f6f9f4b1c09a79a
|
68463eaf559a3063ac9d490dc36676d714f817c5
|
/statsy/urls.py
|
4978648065a6a51c735a09dae387298c5e269706
|
[
"MIT"
] |
permissive
|
zhebrak/django-statsy
|
6ba1e94e19da0ed8d25ed1f9f5b32f33ddafc83e
|
d74845e75c78842fc3890db123ab6e36fe2d3973
|
refs/heads/master
| 2020-05-25T13:37:02.105397
| 2019-03-29T21:59:41
| 2019-03-29T21:59:41
| 27,049,965
| 60
| 11
|
MIT
| 2019-03-29T21:59:42
| 2014-11-23T22:12:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 70
|
py
|
# coding: utf-8
import statsy
urlpatterns = statsy.site.get_urls()
|
[
"fata2ex@gmail.com"
] |
fata2ex@gmail.com
|
ad7fc8c0adf2cc245667ae640affb4172b7d3866
|
cba18d5bbf1c320d28d6b407abfefd6fcf10329d
|
/clients/models.py
|
1ab49a4ee66d8c64226be7770733b74728fa8ca9
|
[] |
no_license
|
casttroff/AbmTest
|
52c61d493e51020df6f22575549d58b35d84ce54
|
e01ad409fc50ca6884746a2c494a1b95e03413b7
|
refs/heads/main
| 2023-06-07T14:10:29.268501
| 2021-06-18T23:11:33
| 2021-06-18T23:11:33
| 378,282,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
#CLIENTS/MODELS
import uuid
class Client:
def __init__(self, name, company, email, position, uid=None):
self.name = name
self.company = company
self.email = email
self.position = position
self.uid = uid or uuid.uuid4()
def to_dict(self):
return vars(self) #Checkea lo que devuelve Dict y convierte a diccionario
@staticmethod
def schema():
return ['name', 'company', 'email', 'position', 'uid']
|
[
"casttroff@gmail.com"
] |
casttroff@gmail.com
|
a5f7ee66bb01870a20c98e9c3f0bb59c42c5afc2
|
ee09846576fbd3b75978db60e6ea4940c40cfa49
|
/Python/Aula15c.py
|
48f16b23f8fc2eeb612585d874145ac08fe7bacc
|
[] |
no_license
|
MariVilas/teste
|
f59758277957f6f6335aa8f8ff8d608ce4b43358
|
0ce8e1ee01b07a814f1603b971d2d60f3796b645
|
refs/heads/master
| 2023-07-14T20:48:52.991477
| 2021-08-28T05:42:35
| 2021-08-28T05:42:35
| 275,003,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
from random import randint
from time import sleep
resp='S'
jogo = resultado = 0
while resp in 'Ss':
lista = ("PAR","ÍMPAR")
computador = randint(0, 1)
perguntar = int(input('''Escolha uma opcao para se jogar:
[0] PAR
[1] ÍMPAR
Digite sua escolha: '''))
print("ÍMPAR\n")
sleep(1)
print("OU\n")
sleep(1)
print("PAR!!!\n")
print("-=" * 20)
print("O computador escolheu: {}".format(lista[computador]))
print("O jogador escolheu: {}".format(lista[perguntar]))
print("-=" * 20)
if computador == 0:
if perguntar == 0:
print("Empate!")
if computador == 1:
if perguntar == 1:
print("Empate!")
resultado += 1
resp = str(input('Quer continuar?[S/N]')).upper().strip()[0]
print(resultado)
|
[
"mari_jvs@homail.com"
] |
mari_jvs@homail.com
|
67cc8ae0e9b2a05fb85c2ca88cb58905348d3cf1
|
141545126466a00f32247dfa40e067ec049b0fa4
|
/Programming Basics Python/Exam Problems 20042019/Easter Shop.py
|
24f0f9d5278dd658f8d045e6e6d0eab1bb4ce257
|
[] |
no_license
|
RadkaValkova/SoftUni-Web-Developer
|
83314367172a18f001e182b4e57f7ca0502ad1fc
|
61d3414373498bb6009ae70e8d17f26cd2d88ea5
|
refs/heads/main
| 2023-06-01T02:11:06.606370
| 2021-06-29T19:39:19
| 2021-06-29T19:39:19
| 325,611,606
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
start_eggs = int(input())
sold_eggs = 0
fill_eggs = 0
available_eggs = start_eggs
while True:
command = input()
if command == 'Close':
print('Store is closed!')
print(f'{sold_eggs} eggs sold.')
break
eggs_number = int(input())
if command == 'Buy':
if available_eggs < eggs_number:
print('Not enough eggs in store!')
print(f'You can buy only {available_eggs}.')
break
else:
sold_eggs += eggs_number
available_eggs -= eggs_number
else:
fill_eggs += eggs_number
available_eggs += eggs_number
|
[
"radka_valkova@abv.bg"
] |
radka_valkova@abv.bg
|
4586f31cad02d6a6be7b36c3b4163765c6399b35
|
c937baa378c59cbcaaf88ef26e652103f7bca5d1
|
/message.py
|
afa6e91a73a5dbe3e58e7ae5e2c7ac15a494680b
|
[] |
no_license
|
gregorybrancq/pythonCommon
|
87f8026b5e11ad3218e6e24b21002d974f4079f4
|
e3aa7b22d60b760737cba618a7fbb81e1cee9c71
|
refs/heads/master
| 2023-03-23T06:17:56.294515
| 2021-03-09T13:42:42
| 2021-03-09T13:42:42
| 247,312,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,192
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Open a window to print the status of the program
"""
# use for graphical interface
import multiprocessing
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
##############################################
# Message Dialog Class
##############################################
class MessageDialog(object):
"""Shows a message. The message type, title and the message to be
displayed can be passed when initializing the class."""
def __init__(self, dialog_type, title, message1, message2=None):
self.type = dialog_type
if self.type == 'error':
self.dialog = Gtk.MessageDialog(Gtk.Window(),
Gtk.DialogFlags.MODAL,
Gtk.MessageType.ERROR,
Gtk.ButtonsType.CLOSE,
message1,
)
if message2 is not None:
self.dialog.format_secondary_text(message2)
elif self.type == 'info':
self.dialog = Gtk.MessageDialog(Gtk.Window(),
Gtk.DialogFlags.MODAL,
Gtk.MessageType.INFO,
Gtk.ButtonsType.CLOSE,
message1,
)
if message2 is not None:
self.dialog.format_secondary_text(message2)
elif self.type == 'question':
self.dialog = Gtk.MessageDialog(Gtk.Window(),
Gtk.DialogFlags.MODAL,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO,
message1,
)
if message2 is not None:
self.dialog.format_secondary_text(message2)
elif self.type == 'entry':
self.dialog = Gtk.MessageDialog(Gtk.Window(),
Gtk.DialogFlags.MODAL,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO,
message1,
)
if message2 is not None:
self.dialog.format_secondary_text(message2)
dialog_box = self.dialog.get_content_area()
self.userEntry = Gtk.Entry()
self.userEntry.show()
# the following will trigger OK response when enter is hit in the entry
self.userEntry.connect("activate", lambda w: self.dialog.response(Gtk.ResponseType.YES))
dialog_box.pack_end(self.userEntry, False, False, 0)
self.dialog.set_title(title)
self.dialog.show_all()
def run(self):
"""Runs the dialog and closes it afterwards."""
response = self.dialog.run()
if (response == Gtk.ResponseType.YES) and (self.type == 'entry'):
user_text = self.userEntry.get_text()
self.dialog.destroy()
return user_text
else:
self.dialog.destroy()
return response
class KillQuestionAfterDelay(object):
"""
Shows a question and wait a delay before killing it if user hasn't give an answer.
"""
def __init__(self, delay, question, message=None):
self.delay = delay
self.question = question
self.message = message
def ask_question(self, answer):
response = MessageDialog('question', self.question, self.message).run()
if response == Gtk.ResponseType.YES:
answer.value = True
else:
answer.value = False
def run(self):
# Shared memory
user_answer = multiprocessing.Value('b', True)
# Start question as a process
p = multiprocessing.Process(target=self.ask_question, args=(user_answer,))
p.start()
p.join(self.delay)
# Terminate answer after delay
if p.is_alive():
p.terminate()
p.join()
# return answer
return user_answer.value
# End dialog
def MessageDialogEnd(error, log_file, title, msg1, msg2):
if error:
msg2 += "\nLog file = " + str(log_file)
MessageDialog(dialog_type='error', title=title, message1=msg1, message2=msg2).run()
else:
msg2 += "\nLog file = " + str(log_file)
MessageDialog(dialog_type='info', title=title, message1=msg1, message2=msg2).run()
# 'application' code
# print(MessageDialog('error', "Error !", "Le programme a planté", "comme une grosse ...").run())
# print(MessageDialog('info', "Info", "C'est une information importante").run())
# print(MessageDialog('question', "Question ?", "Irais-tu sur la lune si tu le pouvais ?", "une petite aide : pourquoi pas...").run())
# print(MessageDialog('entry', "Entry", "Question importante", "Donne moi un chiffre").run())
|
[
"gregory.brancq@free.fr"
] |
gregory.brancq@free.fr
|
4cbc0ba6e9100415dbf4ce1c478cfdddfc1267ce
|
9f735b190f30bd11f1d14d4c413b2c958c7b17a7
|
/travel/migrations/0032_remove_user_uid.py
|
22817cf5a2486e0e21274a97208adea11444cdcc
|
[
"MIT"
] |
permissive
|
sausage-team/travel-notes
|
e5a18a38a486e7971be371c40a5a655851f85c49
|
3c2454ebad7764906c5ff30cbdfe296cb7c64eb4
|
refs/heads/master
| 2020-05-23T10:53:33.906472
| 2019-05-22T05:14:20
| 2019-05-22T05:14:20
| 186,726,475
| 0
| 0
|
MIT
| 2019-05-22T05:14:21
| 2019-05-15T01:24:41
|
Python
|
UTF-8
|
Python
| false
| false
| 322
|
py
|
# Generated by Django 2.2.1 on 2019-05-15 08:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('travel', '0031_auto_20190515_1603'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='uid',
),
]
|
[
"1197633750@qq.com"
] |
1197633750@qq.com
|
56217e6cc807440450ad8316fdd6c63956a56e05
|
0c68fad8b6e8375f17ad2e10a2a74e6181d7b55c
|
/LV_cavity.py
|
f1349c229cfd769ffca82ee4f70a64e24967dd18
|
[] |
no_license
|
Wenping-Cui/Eco_functions
|
0e92ad993bff6b9a83d9220ecf908155f7f46a62
|
1598e981c8eacfa37c9f202f28e6762fcc42778a
|
refs/heads/master
| 2021-07-03T09:06:21.443023
| 2020-08-24T03:45:41
| 2020-08-24T03:45:41
| 150,139,810
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,636
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu 03/31/2019
@author: Wenping Cui
"""
import time
import pandas as pd
import pdb
import matplotlib
from matplotlib import cm
from matplotlib import colors
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
import numpy as np
import itertools
from Eco_function.eco_lib import *
from Eco_function.Model_cavity_LV import *
import pdb
import os.path
import pickle
from scipy.integrate import odeint
from multiprocessing import Pool
import multiprocessing
import argparse
#pdb.set_trace()
parser = argparse.ArgumentParser(description='Process types and dynamics')
parser.add_argument('--A', default='gaussian')
parser.add_argument('--B', default='identity')
args = parser.parse_args()
A_type = args.A #'gaussian',‘binomial'
B_type = args.B #'identity', 'null', 'circulant' and 'block'
start_time = time.time()
Pool_num=10
file_name='LV_'+A_type+'_sig_1.csv'
parameters = {}
parameters['sample_size']=10
parameters['S'] =100
parameters['A_type']=A_type
parameters['B_type']=B_type
parameters['k']=1.0;
parameters['sigma_k']=0.1;
parameters['mu']=1.0;
parameters['epsilon'] =0.1
parameters['g']=1.;
parameters['sigma_g']=0.;
parameters['B']=0
parameters['ODE_Time']=[0,500, 2000]
parameters['mapping_CR']=False
parameters['sys_gamma']=0
def func_parallel(para):
index=para[0]
paras={}
paras={**para[1],**paras}
start_time0=time.time()
S=paras['S']
assert paras['A_type'] in ['binomial','gamma', 'gaussian','uniform'], \
"A type must be 'binomial','gamma' ,'gaussian' or 'uniform'"
assert paras['B_type'] in ['identity','null', 'circulant','block'], \
"B type must be 'identity','null', 'circulant','block'"
if paras['B_type']=='identity': #'diag', 'null', 'circulant' and 'block'
B=np.identity(S)
elif paras['B_type']=='null':
B=0
elif paras['B_type']=='circulant':
D = [7, 1] # generalist, specialist
B=circ(S, D[1])+np.identity(S)
elif paras['B_type']=='block':
B=block(int(S/10), 10)+np.identity(S)
paras['B']=B
Model=LV_Cavity_simulation(paras)
mean_var=Model._simulation(dynamics="ODE")
epsilon, mu, gamma = paras['epsilon'], paras['mu'], paras['sys_gamma']
save_pkl =1
if save_pkl:
filename='eigenvalues'+'_'+A_type +'_'+B_type+'_sigc_'+str(round(epsilon,2))+'_mu_'+str(round(mu,2))+"gamma_"+str(round(gamma,2))+'.pkl'
with open(filename, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump((Model.lams, Model.lams_org, Model.phin_list, Model.col_N), f)
paras.pop("B", None)
paras.pop("ODE_Time", None)
data= { **paras,**mean_var}
para_df = pd.DataFrame(data, index=[index])
print("index", index)
print("*"*20)
print("finished time: ", time.time()-start_time0)
return para_df
jobs=[];
index=0
for B_type in ['identity']:
for S in [100]:
for epsilon in np.arange(0.4, 1., 0.1):
for mu in [0]:
#np.arange(-1, 1.1, 0.1)
for sys_gamma in [1.,0.]:
parameters['S'] =S
parameters['sys_gamma'] = sys_gamma
parameters['sample_size']=int(1000)
parameters['B_type']=B_type
parameters['mu']=mu
parameters['epsilon'] =epsilon
parameters['sys_gamma']= sys_gamma
var=parameters.copy()
jobs.append([index, var])
index=index+1
pool = Pool(processes=Pool_num)
results = pool.map(func_parallel, jobs)
pool.close()
pool.join()
results_df = pd.concat(results)
with open(file_name, 'a', newline = '\n') as f:
results_df.to_csv(f, index=False,encoding='utf-8')
print ('finish time',int(time.time() - start_time))
|
[
"wenpingcui@gmail.com"
] |
wenpingcui@gmail.com
|
a916c65dd16e574dda4caae3acfe79ca554e7832
|
6cb6f0d5213d2af70793bca7f9429cfe878630fa
|
/sovryn_bridge_rewarder/utils.py
|
51ffe105f3994d7b1cb998d438492ea61802537c
|
[] |
no_license
|
DistributedCollective/sovryn-bridge-rewarder
|
90812926a50a855ee925f6cdea6f3d0e75533838
|
1286fb5bed02c67474bd1a105c2e6fb664b52046
|
refs/heads/master
| 2023-07-17T08:34:28.444152
| 2021-08-26T12:46:47
| 2021-08-26T12:46:47
| 366,236,486
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,243
|
py
|
from datetime import datetime, timezone
import functools
import json
import logging
import os
from time import sleep
from typing import Dict, Any, Union
from eth_abi import decode_single
from eth_abi.exceptions import DecodingError
from eth_typing import AnyAddress
from eth_utils import to_checksum_address, to_hex
from web3 import Web3
from web3.contract import Contract, ContractEvent
THIS_DIR = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
def utcnow() -> datetime:
return datetime.now(timezone.utc)
def load_abi(name: str) -> Dict[str, Any]:
with open(os.path.join(THIS_DIR, 'abi', name)) as f:
return json.load(f)
def address(a: Union[bytes, str]) -> AnyAddress:
# Web3.py expects checksummed addresses, but has no support for EIP-1191,
# so RSK-checksummed addresses are broken
# Should instead fix web3, but meanwhile this wrapper will help us
return to_checksum_address(a)
# Alias, better name...
to_address = address
@functools.lru_cache()
def get_erc20_contract(*, token_address: Union[str, AnyAddress], web3: Web3) -> Contract:
return web3.eth.contract(
address=address(token_address),
abi=ERC20_ABI,
)
ERC20_ABI = load_abi('IERC20.json')
def get_events(
*,
event: ContractEvent,
from_block: int,
to_block: int,
batch_size: int = 100
):
"""Load events in batches"""
if to_block < from_block:
raise ValueError(f'to_block {to_block} is smaller than from_block {from_block}')
logger.info('fetching events from %s to %s with batch size %s', from_block, to_block, batch_size)
ret = []
batch_from_block = from_block
while batch_from_block <= to_block:
batch_to_block = min(batch_from_block + batch_size, to_block)
logger.info('fetching batch from %s to %s (up to %s)', batch_from_block, batch_to_block, to_block)
event_filter = event.createFilter(
fromBlock=batch_from_block,
toBlock=batch_to_block,
)
events = get_event_batch_with_retries(
event=event,
from_block=batch_from_block,
to_block=batch_to_block,
)
if len(events) > 0:
logger.info(f'found %s events in batch', len(events))
ret.extend(events)
batch_from_block = batch_to_block + 1
return ret
def get_event_batch_with_retries(event, from_block, to_block, *, retries=3):
while True:
try:
return event.getLogs(
fromBlock=from_block,
toBlock=to_block,
)
except ValueError as e:
if retries <= 0:
raise e
logger.warning('error in get_all_entries: %s, retrying (%s)', e, retries)
retries -= 1
def exponential_sleep(attempt, max_sleep_time=256.0):
sleep_time = min(2 ** attempt, max_sleep_time)
sleep(sleep_time)
def retryable(*, max_attempts: int = 10):
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
attempt = 0
while True:
try:
return func(*args, **kwargs)
except Exception as e:
if attempt >= max_attempts:
logger.warning('max attempts (%s) exchusted for error: %s', max_attempts, e)
raise
logger.warning(
'Retryable error (attempt: %s/%s): %s',
attempt + 1,
max_attempts,
e,
)
exponential_sleep(attempt)
attempt += 1
return wrapped
return decorator
class UserDataNotAddress(Exception):
def __init__(self, userdata: bytes):
super().__init__(f'userdata {userdata!r} cannot be decoded to an address')
def decode_address_from_userdata(userdata: bytes) -> str:
try:
return decode_single('address', userdata)
except DecodingError as e:
raise UserDataNotAddress(userdata) from e
@functools.lru_cache()
def is_contract(*, web3: Web3, address: str) -> bool:
code = web3.eth.get_code(to_address(address))
return code != b'\x00'
|
[
"rainer@koirikivi.fi"
] |
rainer@koirikivi.fi
|
b577f0d8e1d931be4eb3ff721911ce9e9b179843
|
d1e95aa28914f4ef4a6906b6a70ae9be79b0544d
|
/Spectrum Based Fault Localization/forbes.py
|
5f303701baf931c10df9bbc595602ec8eb510778
|
[] |
no_license
|
Achal-Gupta/SpectrumBasedFaultLocalisation
|
00d886ea71d6d6131b13be4bdc85089d0c5bc813
|
7611e37085f7027be4738fc6dd5c243e3898bd07
|
refs/heads/master
| 2022-08-01T21:36:17.475108
| 2020-05-29T11:57:38
| 2020-05-29T11:57:38
| 267,845,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,530
|
py
|
import sys
import time
from datetime import datetime
start_time=datetime.now()
import pandas as pd
import numpy as np
import math
import os
import csv
cwd =os.getcwd()
version=cwd.split("/")[-1]
program_name=cwd.split("/")[-2].split("_")[0]
print(cwd)
str_cwd=cwd.replace("/"+program_name+"/"+version,"")
print(str_cwd)
f_l=0
start_time=datetime.now()
with open('faultyLine.txt') as f:
f_l = f.readline()
print("**************")
print(f_l)
print("**************")
f_l=int(f_l)
############Original##############
st1 = datetime.now()
df_train=pd.read_csv('statementResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt1= (datetime.now() -st1)
o1=ct3+1
o2=ct3+ct2
############Original with uniqueness##############
st2 = datetime.now()
df_train=pd.read_csv('uniqueResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt2= (datetime.now() -st2)
o3=ct3+1
o4=ct3+ct2
############Original with slicing##############
st3=datetime.now()
#code for retriving the sliced data
sdf=pd.read_csv('slice1.csv')
ys=np.array([sdf['In_Slice']]).T
ys=ys.tolist()
df_train=pd.read_csv('statementResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
if ys[i][0]==0:
sus_score=-999
else:
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt3= (datetime.now() -st3)
o5=ct3+1
o6=ct3+ct2
############Original with slicing and uniqueness##############
st4=datetime.now()
#code for retriving the sliced data
sdf=pd.read_csv('slice1.csv')
ys=np.array([sdf['In_Slice']]).T
ys=ys.tolist()
df_train=pd.read_csv('uniqueResult.csv')
#training output dataset
y = np.array([df_train['Result']]).T
y=y.tolist()
#print y
#training input dataset
df_train.drop(['Result'],1 , inplace=True)
t_in = df_train.values.tolist()
x = np.array(t_in)
x=x.tolist()
#print len(y[0])
total_failed=np.count_nonzero(y)
total_passed=len(y)-total_failed
suspicious=[]
#print len(y)
#print len(x[0])
#print total_passed,total_failed
f = total_failed
p = total_passed
for i in range(0,len(x[0])):
nsuccess=0
nfailure=0
for j in range(0,len(y)):
#print x[j][i],y[j][0]
if x[j][i]==1 and y[j][0]==0:
nsuccess=nsuccess+1
elif x[j][i]==1 and y[j][0]==1:
nfailure=nfailure+1
try:
#nfailure=Ncf... nsuccess=Ncs
#Nf=total_failed.... Ns=total_passed
#print nfailure,nsuccess
ep = nsuccess
ef = nfailure
np1 = p - ep
nf = f - ef
if ys[i][0]==0:
sus_score=-999
else:
sus_score = float((f+p)*ef)/float(f*(ef + ep))
suspicious.append(sus_score)
print(str(i)+" "+str(sus_score))
except ZeroDivisionError:
suspicious.append(0)
d = {}
for i in range(0,len(suspicious)):
key = float(suspicious[i])
#print key
if key !=0:
if key not in d:
d[key] = []
d[key].append(i)
ct1=0
ct2=0
ct3=0
fct=0
print("Faulty line:"+str(f_l))
for x in sorted(d):
print (x,len(d[x]))
if f_l not in d[x] and fct==0:
ct1=ct1+len(d[x])
elif f_l not in d[x] and fct==1:
ct3=ct3+len(d[x])
else:
fct=1
ct2=len(d[x])
print("We have to search "+str(ct3+1)+" to "+str(ct3+ct2))
nwt4= (datetime.now() -st4)
o7=ct3+1
o8=ct3+ct2
end_time=datetime.now()
csvfile=open(str_cwd+"/forbes.csv", "a+")
spamwriter1 = csv.writer(csvfile, delimiter=',')
stmt_complex=[]
stmt_complex.append(program_name);
stmt_complex.append(str(version));
#stmt_complex.append(str(sys.argv[1]));
stmt_complex.append(f_l);
stmt_complex.append(o1);
stmt_complex.append(o2);
stmt_complex.append(nwt1);
stmt_complex.append(o3);
stmt_complex.append(o4);
stmt_complex.append(nwt2);
stmt_complex.append(o5);
stmt_complex.append(o6);
stmt_complex.append(nwt3);
stmt_complex.append(o7);
stmt_complex.append(o8);
stmt_complex.append(nwt4);
spamwriter1.writerow(stmt_complex);
|
[
"noreply@github.com"
] |
noreply@github.com
|
04a6f058f68d60e995b0e65725d2a95d224275ac
|
342fec10a5e75fa881413fadfb21c1e21021aa08
|
/todolist/todolist/settings.py
|
032b73a8d6672c3d0572ab87d394720d5f422eb9
|
[] |
no_license
|
ELBEQQAL94/todo_app_django_python
|
9c915cdec634644a6e7e5281810ba9e9683fb2c8
|
7484b60ee1af6d65242b877c8b49f758ad902b6b
|
refs/heads/master
| 2021-01-14T18:56:02.047963
| 2020-02-24T11:58:52
| 2020-02-24T11:58:52
| 242,720,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,094
|
py
|
"""
Django settings for todolist project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tz%9v8paqsd53)@yt^d5ruy($!lnq)=#hpp1d0(heljcjto($j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todolist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todolist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"ELBEQQAL.youssef@gmail.com"
] |
ELBEQQAL.youssef@gmail.com
|
cac5a5118c1caa62049a0352cf2b96d18328b009
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/Django/rbac/day69/luffy_permission/rbac/middlewares/rbac.py
|
05e05715e769339255dc4cd45e4b8f209919f427
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
from django.utils.deprecation import MiddlewareMixin
from django.shortcuts import HttpResponse, redirect, reverse
from django.conf import settings
import re
class RbacMiddleWare(MiddlewareMixin):
def process_request(self, request):
# 获取当前访问的页面
url = request.path_info # index
# 白名单
for i in settings.WHITE_LIST:
if re.match(i, url):
return
# 获取登录状态
is_login = request.session.get('is_login')
# 没有登录跳转到登录页面
if not is_login:
return redirect(reverse('login'))
# 免认证
for i in settings.NO_PERMISSION_LIST:
if re.match(i, url):
return
# 获取当前用户的权限
permission_list = request.session['permission']
print(permission_list)
# 权限的校验
for i in permission_list:
if re.match('^{}$'.format(i['permissions__url']), url):
return
# 没匹配成功 没有权限
return HttpResponse('没有访问的权限')
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
76b7cc09b0866a058e802ecbad3ff7e42ba9b7a1
|
4ecedf7d7801e25c60ea3098a63ea96056b6fe20
|
/matrix_addition_ii.py
|
01377ea050a419f99ac13065fb407fdd0039ed91
|
[] |
no_license
|
bengovernali/list_exercises
|
1b86d5df1c3c002d72c156e5047422af417317bf
|
00cd186fe34c4abec53dd8c020ce190464ae893d
|
refs/heads/master
| 2020-05-15T12:29:25.523641
| 2019-04-21T23:47:56
| 2019-04-21T23:47:56
| 182,266,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
mat1 = [[1, 3], [2, 4]]
mat2 = [[5, 2], [1, 0]]
def matrix_addition(matrix1, matrix2):
result = []
row_count = 0
for row in matrix1:
col_count = 0
for col in row:
result[row_count][col_count] = matrix1[row_count][col_count] + matrix2[row_count][col_count]
col_count += 1
row_count += 1
return result
print(matrix_addition(mat1, mat2))
|
[
"bgovern93@gmail.com"
] |
bgovern93@gmail.com
|
76586b973fc183aca6038075ca9d1fedb6047377
|
7b4fedb81a42d737007b4602eee035916be39bcd
|
/day7/day7.py
|
61f847d64f3c0b640b3dd2b0e6c74583dde710dd
|
[] |
no_license
|
garymjr/adventofcode
|
51fbbcc01d0d3806f039475baa62a2e6170af7a9
|
0e2550123efa2bff216b857f2e7f47c4f2d0bebd
|
refs/heads/master
| 2021-01-21T04:48:06.013313
| 2016-06-14T15:34:47
| 2016-06-14T15:34:47
| 48,621,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
import re
f = open("./input.txt")
lines = f.readlines()
wires = {}
def get_val(wire):
if wire.isdigit():
val = int(wire)
elif wire in wires.keys():
val = wires[wire]
else:
val = find_wire_val(wire)
return val
def find_wire_val(wire):
if wire in wires.keys():
return wires[wire]
for line in lines:
line = line.split()
output_wire = line[-1]
if wire == output_wire:
if len(line) == 3:
wires[output_wire] = get_val(line[0])
elif line[0] == "NOT":
wires[output_wire] = 65535 - get_val(line[1])
elif line[1] == "AND":
wires[output_wire] = get_val(line[0]) & get_val(line[2])
elif line[1] == "OR":
wires[output_wire] = get_val(line[0]) | get_val(line[2])
elif line[1] == "LSHIFT":
shift = int(line[2])
wires[output_wire] = get_val(line[0]) << shift
elif line[1] == "RSHIFT":
shift = int(line[2])
wires[output_wire] = get_val(line[0]) >> shift
return wires[wire]
# part 1
print(find_wire_val("a"))
# part 2
b = wires['a']
wires.clear()
wires['b'] = b
print(find_wire_val("a"))
|
[
"garymjr@gmail.com"
] |
garymjr@gmail.com
|
0297d348ab4dfb6710e82a3926547044c9ca90ac
|
1aa4ef1dc6052e763dc83bf1aadfd7d02ac183a6
|
/assignment12new 202020/products/migrations/0002_offer.py
|
a0ab111969653c50669d7206e72b68768abc66ca
|
[] |
no_license
|
berves50/Final-project-
|
b65b86f4b1a36b49585960fbc0fd44b4bf5563ec
|
514778844d1306fcea92021d062e230495c43553
|
refs/heads/main
| 2023-01-15T22:56:05.500769
| 2020-11-26T05:44:49
| 2020-11-26T05:44:49
| 316,122,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# Generated by Django 3.0.10 on 2020-11-20 16:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=100)),
('discount', models.FloatField()),
('description', models.CharField(max_length=2000)),
],
),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
6a7a27fa1290cb3c81ae1d132dbf45749bc38835
|
efd2781b5da3d118ea0bc56aa947193b500e0299
|
/newapi/urls.py
|
fed203a45232f8955249d7e79931774157fd3799
|
[] |
no_license
|
Hari635/newsApI
|
9cdb58cea8a1f3e776c18dc8342f3ffd53651c49
|
be03c0632a6875b2440156ff66d2e519d7d5bd76
|
refs/heads/master
| 2023-07-06T02:16:15.842751
| 2021-07-25T13:03:39
| 2021-07-25T13:03:39
| 389,348,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
"""newapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from django.contrib import admin
from django.urls import path,include
schema_view = get_schema_view(
openapi.Info(
title="news API",
default_version='v1',
description="News description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path("", schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path("redoc", schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('admin/', admin.site.urls),
path('api/',include("news.urls"))
]
|
[
"hariharasuthan81@gmail.com"
] |
hariharasuthan81@gmail.com
|
9606fc118d763b54512c8278ba1755a594d973cb
|
61e98b0302a43ab685be4c255b4ecf2979db55b6
|
/sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/environment/__init__.py
|
f760583ece5807e4a028e2fb675ec70d4f9836db
|
[
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"Apache-2.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] |
permissive
|
dzenyu/kafka
|
5631c05a6de6e288baeb8955bdddf2ff60ec2a0e
|
d69a24bce8d108f43376271f89ecc3b81c7b6622
|
refs/heads/master
| 2021-07-16T12:31:09.623509
| 2021-06-28T18:22:16
| 2021-06-28T18:22:16
| 198,724,535
| 0
| 0
|
Apache-2.0
| 2019-07-24T23:51:47
| 2019-07-24T23:51:46
| null |
UTF-8
|
Python
| false
| false
| 50,863
|
py
|
# -*- coding: utf-8 -*-
"""
sphinx.environment
~~~~~~~~~~~~~~~~~~
Global creation environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import os
import sys
import time
import types
import codecs
import fnmatch
from os import path
from glob import glob
from six import iteritems, itervalues, class_types, next
from six.moves import cPickle as pickle
from docutils import nodes
from docutils.io import NullOutput
from docutils.core import Publisher
from docutils.utils import Reporter, relative_path, get_source_line
from docutils.parsers.rst import roles
from docutils.parsers.rst.languages import en as english
from docutils.frontend import OptionParser
from sphinx import addnodes
from sphinx.io import SphinxStandaloneReader, SphinxDummyWriter, SphinxFileInput
from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict
from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \
process_only_nodes
from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir
from sphinx.util.images import guess_mimetype
from sphinx.util.i18n import find_catalog_files, get_image_filename_for_language, \
search_image_for_language
from sphinx.util.console import bold, purple
from sphinx.util.docutils import sphinx_domains
from sphinx.util.matching import compile_matchers
from sphinx.util.parallel import ParallelTasks, parallel_available, make_chunks
from sphinx.util.websupport import is_commentable
from sphinx.errors import SphinxError, ExtensionError
from sphinx.versioning import add_uids, merge_doctrees
from sphinx.transforms import SphinxContentsFilter
from sphinx.environment.managers.indexentries import IndexEntries
from sphinx.environment.managers.toctree import Toctree
default_settings = {
'embed_stylesheet': False,
'cloak_email_addresses': True,
'pep_base_url': 'https://www.python.org/dev/peps/',
'rfc_base_url': 'https://tools.ietf.org/html/',
'input_encoding': 'utf-8-sig',
'doctitle_xform': False,
'sectsubtitle_xform': False,
'halt_level': 5,
'file_insertion_enabled': True,
}
# This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files.
#
# NOTE: increase base version by 2 to have distinct numbers for Py2 and 3
ENV_VERSION = 50 + (sys.version_info[0] - 2)
dummy_reporter = Reporter('', 4, 4)
versioning_conditions = {
'none': False,
'text': is_translatable,
'commentable': is_commentable,
}
class NoUri(Exception):
"""Raised by get_relative_uri if there is no URI available."""
pass
class BuildEnvironment(object):
"""
The environment in which the ReST files are translated.
Stores an inventory of cross-file targets and provides doctree
transformations to resolve links to them.
"""
# --------- ENVIRONMENT PERSISTENCE ----------------------------------------
@staticmethod
def frompickle(srcdir, config, filename):
with open(filename, 'rb') as picklefile:
env = pickle.load(picklefile)
if env.version != ENV_VERSION:
raise IOError('build environment version not current')
if env.srcdir != srcdir:
raise IOError('source directory has changed')
env.config.values = config.values
return env
def topickle(self, filename):
# remove unpicklable attributes
warnfunc = self._warnfunc
self.set_warnfunc(None)
values = self.config.values
del self.config.values
domains = self.domains
del self.domains
managers = self.detach_managers()
# remove potentially pickling-problematic values from config
for key, val in list(vars(self.config).items()):
if key.startswith('_') or \
isinstance(val, types.ModuleType) or \
isinstance(val, types.FunctionType) or \
isinstance(val, class_types):
del self.config[key]
with open(filename, 'wb') as picklefile:
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
# reset attributes
self.attach_managers(managers)
self.domains = domains
self.config.values = values
self.set_warnfunc(warnfunc)
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
def __init__(self, srcdir, doctreedir, config):
self.doctreedir = doctreedir
self.srcdir = srcdir
self.config = config
# the method of doctree versioning; see set_versioning_method
self.versioning_condition = None
self.versioning_compare = None
# the application object; only set while update() runs
self.app = None
# all the registered domains, set by the application
self.domains = {}
# the docutils settings for building
self.settings = default_settings.copy()
self.settings['env'] = self
# the function to write warning messages with
self._warnfunc = None
# this is to invalidate old pickles
self.version = ENV_VERSION
# All "docnames" here are /-separated and relative and exclude
# the source suffix.
self.found_docs = set() # contains all existing docnames
self.all_docs = {} # docname -> mtime at the time of reading
# contains all read docnames
self.dependencies = {} # docname -> set of dependent file
# names, relative to documentation root
self.included = set() # docnames included from other documents
self.reread_always = set() # docnames to re-read unconditionally on
# next build
# File metadata
self.metadata = {} # docname -> dict of metadata items
# TOC inventory
self.titles = {} # docname -> title node
self.longtitles = {} # docname -> title node; only different if
# set differently with title directive
self.tocs = {} # docname -> table of contents nodetree
self.toc_num_entries = {} # docname -> number of real entries
# used to determine when to show the TOC
# in a sidebar (don't show if it's only one item)
self.toc_secnumbers = {} # docname -> dict of sectionid -> number
self.toc_fignumbers = {} # docname -> dict of figtype ->
# dict of figureid -> number
self.toctree_includes = {} # docname -> list of toctree includefiles
self.files_to_rebuild = {} # docname -> set of files
# (containing its TOCs) to rebuild too
self.glob_toctrees = set() # docnames that have :glob: toctrees
self.numbered_toctrees = set() # docnames that have :numbered: toctrees
# domain-specific inventories, here to be pickled
self.domaindata = {} # domainname -> domain-specific dict
# Other inventories
self.indexentries = {} # docname -> list of
# (type, string, target, aliasname)
self.versionchanges = {} # version -> list of (type, docname,
# lineno, module, descname, content)
# these map absolute path -> (docnames, unique filename)
self.images = FilenameUniqDict()
self.dlfiles = FilenameUniqDict()
# temporary data storage while reading a document
self.temp_data = {}
# context for cross-references (e.g. current module or class)
# this is similar to temp_data, but will for example be copied to
# attributes of "any" cross references
self.ref_context = {}
self.managers = {}
self.init_managers()
def init_managers(self):
managers = {}
for manager_class in [IndexEntries, Toctree]:
managers[manager_class.name] = manager_class(self)
self.attach_managers(managers)
def attach_managers(self, managers):
for name, manager in iteritems(managers):
self.managers[name] = manager
manager.attach(self)
def detach_managers(self):
managers = self.managers
self.managers = {}
for _, manager in iteritems(managers):
manager.detach(self)
return managers
def set_warnfunc(self, func):
self._warnfunc = func
self.settings['warning_stream'] = WarningStream(func)
def set_versioning_method(self, method, compare):
"""This sets the doctree versioning method for this environment.
Versioning methods are a builder property; only builders with the same
versioning method can share the same doctree directory. Therefore, we
raise an exception if the user tries to use an environment with an
incompatible versioning method.
"""
if method not in versioning_conditions:
raise ValueError('invalid versioning method: %r' % method)
condition = versioning_conditions[method]
if self.versioning_condition not in (None, condition):
raise SphinxError('This environment is incompatible with the '
'selected builder, please choose another '
'doctree directory.')
self.versioning_condition = condition
self.versioning_compare = compare
def warn(self, docname, msg, lineno=None, **kwargs):
"""Emit a warning.
This differs from using ``app.warn()`` in that the warning may not
be emitted instantly, but collected for emitting all warnings after
the update of the environment.
"""
# strange argument order is due to backwards compatibility
self._warnfunc(msg, (docname, lineno), **kwargs)
def warn_node(self, msg, node, **kwargs):
"""Like :meth:`warn`, but with source information taken from *node*."""
self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs)
def clear_doc(self, docname):
"""Remove all traces of a source file in the inventory."""
if docname in self.all_docs:
self.all_docs.pop(docname, None)
self.reread_always.discard(docname)
self.metadata.pop(docname, None)
self.dependencies.pop(docname, None)
self.titles.pop(docname, None)
self.longtitles.pop(docname, None)
self.images.purge_doc(docname)
self.dlfiles.purge_doc(docname)
for version, changes in self.versionchanges.items():
new = [change for change in changes if change[1] != docname]
changes[:] = new
for manager in itervalues(self.managers):
manager.clear_doc(docname)
for domain in self.domains.values():
domain.clear_doc(docname)
def merge_info_from(self, docnames, other, app):
"""Merge global information gathered about *docnames* while reading them
from the *other* environment.
This possibly comes from a parallel build process.
"""
docnames = set(docnames)
for docname in docnames:
self.all_docs[docname] = other.all_docs[docname]
if docname in other.reread_always:
self.reread_always.add(docname)
self.metadata[docname] = other.metadata[docname]
if docname in other.dependencies:
self.dependencies[docname] = other.dependencies[docname]
self.titles[docname] = other.titles[docname]
self.longtitles[docname] = other.longtitles[docname]
self.images.merge_other(docnames, other.images)
self.dlfiles.merge_other(docnames, other.dlfiles)
for version, changes in other.versionchanges.items():
self.versionchanges.setdefault(version, []).extend(
change for change in changes if change[1] in docnames)
for manager in itervalues(self.managers):
manager.merge_other(docnames, other)
for domainname, domain in self.domains.items():
domain.merge_domaindata(docnames, other.domaindata[domainname])
app.emit('env-merge-info', self, docnames, other)
def path2doc(self, filename):
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
"""
if filename.startswith(self.srcdir):
filename = filename[len(self.srcdir) + 1:]
for suffix in self.config.source_suffix:
if fnmatch.fnmatch(filename, '*' + suffix):
return filename[:-len(suffix)]
else:
# the file does not have docname
return None
def doc2path(self, docname, base=True, suffix=None):
"""Return the filename for the document name.
If *base* is True, return absolute path under self.srcdir.
If *base* is None, return relative path to self.srcdir.
If *base* is a path string, return absolute path under that.
If *suffix* is not None, add it instead of config.source_suffix.
"""
docname = docname.replace(SEP, path.sep)
if suffix is None:
for candidate_suffix in self.config.source_suffix:
if path.isfile(path.join(self.srcdir, docname) +
candidate_suffix):
suffix = candidate_suffix
break
else:
# document does not exist
suffix = self.config.source_suffix[0]
if base is True:
return path.join(self.srcdir, docname) + suffix
elif base is None:
return docname + suffix
else:
return path.join(base, docname) + suffix
def relfn2path(self, filename, docname=None):
"""Return paths to a file referenced from a document, relative to
documentation root and absolute.
In the input "filename", absolute filenames are taken as relative to the
source dir, while relative filenames are relative to the dir of the
containing document.
"""
if filename.startswith('/') or filename.startswith(os.sep):
rel_fn = filename[1:]
else:
docdir = path.dirname(self.doc2path(docname or self.docname,
base=None))
rel_fn = path.join(docdir, filename)
try:
# the path.abspath() might seem redundant, but otherwise artifacts
# such as ".." will remain in the path
return rel_fn, path.abspath(path.join(self.srcdir, rel_fn))
except UnicodeDecodeError:
# the source directory is a bytestring with non-ASCII characters;
# let's try to encode the rel_fn in the file system encoding
enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn))
def find_files(self, config, buildername=None):
"""Find all source files in the source dir and put them in
self.found_docs.
"""
matchers = compile_matchers(
config.exclude_patterns[:] +
config.templates_path +
config.html_extra_path +
['**/_sources', '.#*', '**/.#*', '*.lproj/**']
)
self.found_docs = set()
for docname in get_matching_docs(self.srcdir, config.source_suffix,
exclude_matchers=matchers):
if os.access(self.doc2path(docname), os.R_OK):
self.found_docs.add(docname)
else:
self.warn(docname, "document not readable. Ignored.")
# Current implementation is applying translated messages in the reading
# phase.Therefore, in order to apply the updated message catalog, it is
# necessary to re-process from the reading phase. Here, if dependency
# is set for the doc source and the mo file, it is processed again from
# the reading phase when mo is updated. In the future, we would like to
# move i18n process into the writing phase, and remove these lines.
if buildername != 'gettext':
# add catalog mo file dependency
for docname in self.found_docs:
catalog_files = find_catalog_files(
docname,
self.srcdir,
self.config.locale_dirs,
self.config.language,
self.config.gettext_compact)
for filename in catalog_files:
self.dependencies.setdefault(docname, set()).add(filename)
def get_outdated_files(self, config_changed):
"""Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
added = set()
changed = set()
if config_changed:
# config values affect e.g. substitutions
added = self.found_docs
else:
for docname in self.found_docs:
if docname not in self.all_docs:
added.add(docname)
continue
# if the doctree file is not there, rebuild
if not path.isfile(self.doc2path(docname, self.doctreedir,
'.doctree')):
changed.add(docname)
continue
# check the "reread always" list
if docname in self.reread_always:
changed.add(docname)
continue
# check the mtime of the document
mtime = self.all_docs[docname]
newmtime = path.getmtime(self.doc2path(docname))
if newmtime > mtime:
changed.add(docname)
continue
# finally, check the mtime of dependencies
for dep in self.dependencies.get(docname, ()):
try:
# this will do the right thing when dep is absolute too
deppath = path.join(self.srcdir, dep)
if not path.isfile(deppath):
changed.add(docname)
break
depmtime = path.getmtime(deppath)
if depmtime > mtime:
changed.add(docname)
break
except EnvironmentError:
# give it another chance
changed.add(docname)
break
return added, changed, removed
def update(self, config, srcdir, doctreedir, app):
"""(Re-)read all files new or changed since last update.
Store all environment docnames in the canonical format (ie using SEP as
a separator in place of os.path.sep).
"""
config_changed = False
if self.config is None:
msg = '[new config] '
config_changed = True
else:
# check if a config value was changed that affects how
# doctrees are read
for key, descr in iteritems(config.values):
if descr[1] != 'env':
continue
if self.config[key] != config[key]:
msg = '[config changed] '
config_changed = True
break
else:
msg = ''
# this value is not covered by the above loop because it is handled
# specially by the config class
if self.config.extensions != config.extensions:
msg = '[extensions changed] '
config_changed = True
# the source and doctree directories may have been relocated
self.srcdir = srcdir
self.doctreedir = doctreedir
self.find_files(config, app.buildername)
self.config = config
# this cache also needs to be updated every time
self._nitpick_ignore = set(self.config.nitpick_ignore)
app.info(bold('updating environment: '), nonl=True)
added, changed, removed = self.get_outdated_files(config_changed)
# allow user intervention as well
for docs in app.emit('env-get-outdated', self, added, changed, removed):
changed.update(set(docs) & self.found_docs)
# if files were added or removed, all documents with globbed toctrees
# must be reread
if added or removed:
# ... but not those that already were removed
changed.update(self.glob_toctrees & self.found_docs)
msg += '%s added, %s changed, %s removed' % (len(added), len(changed),
len(removed))
app.info(msg)
self.app = app
# clear all files no longer present
for docname in removed:
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
# read all new and changed files
docnames = sorted(added | changed)
# allow changing and reordering the list of docs to read
app.emit('env-before-read-docs', self, docnames)
# check if we should do parallel or serial read
par_ok = False
if parallel_available and len(docnames) > 5 and app.parallel > 1:
par_ok = True
for extname, md in app._extension_metadata.items():
ext_ok = md.get('parallel_read_safe')
if ext_ok:
continue
if ext_ok is None:
app.warn('the %s extension does not declare if it '
'is safe for parallel reading, assuming it '
'isn\'t - please ask the extension author to '
'check and make it explicit' % extname)
app.warn('doing serial read')
else:
app.warn('the %s extension is not safe for parallel '
'reading, doing serial read' % extname)
par_ok = False
break
if par_ok:
self._read_parallel(docnames, app, nproc=app.parallel)
else:
self._read_serial(docnames, app)
if config.master_doc not in self.all_docs:
raise SphinxError('master file %s not found' %
self.doc2path(config.master_doc))
self.app = None
for retval in app.emit('env-updated', self):
if retval is not None:
docnames.extend(retval)
return sorted(docnames)
def _read_serial(self, docnames, app):
for docname in app.status_iterator(docnames, 'reading sources... ',
purple, len(docnames)):
# remove all inventory entries for that file
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
self.read_doc(docname, app)
def _read_parallel(self, docnames, app, nproc):
# clear all outdated docs at once
for docname in docnames:
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
def read_process(docs):
self.app = app
self.warnings = []
self.set_warnfunc(lambda *args, **kwargs: self.warnings.append((args, kwargs)))
for docname in docs:
self.read_doc(docname, app)
# allow pickling self to send it back
self.set_warnfunc(None)
del self.app
del self.domains
del self.config.values
del self.config
return self
def merge(docs, otherenv):
warnings.extend(otherenv.warnings)
self.merge_info_from(docs, otherenv, app)
tasks = ParallelTasks(nproc)
chunks = make_chunks(docnames, nproc)
warnings = []
for chunk in app.status_iterator(
chunks, 'reading sources... ', purple, len(chunks)):
tasks.add_task(read_process, chunk, merge)
# make sure all threads have finished
app.info(bold('waiting for workers...'))
tasks.join()
for warning, kwargs in warnings:
self._warnfunc(*warning, **kwargs)
def check_dependents(self, already):
to_rewrite = (self.toctree.assign_section_numbers() +
self.toctree.assign_figure_numbers())
for docname in set(to_rewrite):
if docname not in already:
yield docname
# --------- SINGLE FILE READING --------------------------------------------
def warn_and_replace(self, error):
"""Custom decoding error handler that warns and replaces."""
linestart = error.object.rfind(b'\n', 0, error.start)
lineend = error.object.find(b'\n', error.start)
if lineend == -1:
lineend = len(error.object)
lineno = error.object.count(b'\n', 0, error.start) + 1
self.warn(self.docname, 'undecodable source characters, '
'replacing with "?": %r' %
(error.object[linestart + 1:error.start] + b'>>>' +
error.object[error.start:error.end] + b'<<<' +
error.object[error.end:lineend]), lineno)
return (u'?', error.end)
def read_doc(self, docname, app=None):
"""Parse a file and add/update inventory entries for the doctree."""
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
self.temp_data['default_domain'] = \
self.domains.get(self.config.primary_domain)
self.settings['input_encoding'] = self.config.source_encoding
self.settings['trim_footnote_reference_space'] = \
self.config.trim_footnote_reference_space
self.settings['gettext_compact'] = self.config.gettext_compact
docutilsconf = path.join(self.srcdir, 'docutils.conf')
# read docutils.conf from source dir, not from current dir
OptionParser.standard_config_files[1] = docutilsconf
if path.isfile(docutilsconf):
self.note_dependency(docutilsconf)
with sphinx_domains(self):
if self.config.default_role:
role_fn, messages = roles.role(self.config.default_role, english,
0, dummy_reporter)
if role_fn:
roles._roles[''] = role_fn
else:
self.warn(docname, 'default role %s not found' %
self.config.default_role)
codecs.register_error('sphinx', self.warn_and_replace)
# publish manually
reader = SphinxStandaloneReader(self.app, parsers=self.config.source_parsers)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, self.settings, None)
src_path = self.doc2path(docname)
source = SphinxFileInput(app, self, source=None, source_path=src_path,
encoding=self.config.source_encoding)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
doctree = pub.document
# post-processing
self.process_dependencies(docname, doctree)
self.process_images(docname, doctree)
self.process_downloads(docname, doctree)
self.process_metadata(docname, doctree)
self.create_title_from(docname, doctree)
for manager in itervalues(self.managers):
manager.process_doc(docname, doctree)
for domain in itervalues(self.domains):
domain.process_doc(self, docname, doctree)
# allow extension-specific post-processing
if app:
app.emit('doctree-read', doctree)
# store time of reading, for outdated files detection
# (Some filesystems have coarse timestamp resolution;
# therefore time.time() can be older than filesystem's timestamp.
# For example, FAT32 has 2sec timestamp resolution.)
self.all_docs[docname] = max(
time.time(), path.getmtime(self.doc2path(docname)))
if self.versioning_condition:
old_doctree = None
if self.versioning_compare:
# get old doctree
try:
with open(self.doc2path(docname,
self.doctreedir, '.doctree'), 'rb') as f:
old_doctree = pickle.load(f)
except EnvironmentError:
pass
# add uids for versioning
if not self.versioning_compare or old_doctree is None:
list(add_uids(doctree, self.versioning_condition))
else:
list(merge_doctrees(
old_doctree, doctree, self.versioning_condition))
# make it picklable
doctree.reporter = None
doctree.transformer = None
doctree.settings.warning_stream = None
doctree.settings.env = None
doctree.settings.record_dependencies = None
# cleanup
self.temp_data.clear()
self.ref_context.clear()
roles._roles.pop('', None) # if a document has set a local default role
# save the parsed doctree
doctree_filename = self.doc2path(docname, self.doctreedir,
'.doctree')
ensuredir(path.dirname(doctree_filename))
with open(doctree_filename, 'wb') as f:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# utilities to use while reading a document
@property
def docname(self):
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
@property
def currmodule(self):
"""Backwards compatible alias. Will be removed."""
self.warn(self.docname, 'env.currmodule is being referenced by an '
'extension; this API will be removed in the future')
return self.ref_context.get('py:module')
@property
def currclass(self):
"""Backwards compatible alias. Will be removed."""
self.warn(self.docname, 'env.currclass is being referenced by an '
'extension; this API will be removed in the future')
return self.ref_context.get('py:class')
def new_serialno(self, category=''):
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document.
"""
key = category + 'serialno'
cur = self.temp_data.get(key, 0)
self.temp_data[key] = cur + 1
return cur
def note_dependency(self, filename):
"""Add *filename* as a dependency of the current document.
This means that the document will be rebuilt if this file changes.
*filename* should be absolute or relative to the source directory.
"""
self.dependencies.setdefault(self.docname, set()).add(filename)
def note_included(self, filename):
"""Add *filename* as a included from other document.
This means the document is not orphaned.
*filename* should be absolute or relative to the source directory.
"""
self.included.add(self.path2doc(filename))
def note_reread(self):
"""Add the current document to the list of documents that will
automatically be re-read at the next build.
"""
self.reread_always.add(self.docname)
def note_versionchange(self, type, version, node, lineno):
self.versionchanges.setdefault(version, []).append(
(type, self.temp_data['docname'], lineno,
self.ref_context.get('py:module'),
self.temp_data.get('object'), node.astext()))
# post-processing of read doctrees
def process_dependencies(self, docname, doctree):
"""Process docutils-generated dependency info."""
cwd = getcwd()
frompath = path.join(path.normpath(self.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
if not deps:
return
for dep in deps.list:
# the dependency path is relative to the working dir, so get
# one relative to the srcdir
if isinstance(dep, bytes):
dep = dep.decode(fs_encoding)
relpath = relative_path(frompath,
path.normpath(path.join(cwd, dep)))
self.dependencies.setdefault(docname, set()).add(relpath)
def process_downloads(self, docname, doctree):
"""Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
rel_filename, filename = self.relfn2path(targetname, docname)
self.dependencies.setdefault(docname, set()).add(rel_filename)
if not os.access(filename, os.R_OK):
self.warn_node('download file not readable: %s' % filename,
node)
continue
uniquename = self.dlfiles.add_file(docname, filename)
node['filename'] = uniquename
def process_images(self, docname, doctree):
"""Process and rewrite image URIs."""
def collect_candidates(imgpath, candidates):
globbed = {}
for filename in glob(imgpath):
new_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
filename)
try:
mimetype = guess_mimetype(filename)
if mimetype not in candidates:
globbed.setdefault(mimetype, []).append(new_imgpath)
except (OSError, IOError) as err:
self.warn_node('image file %s not readable: %s' %
(filename, err), node)
for key, files in iteritems(globbed):
candidates[key] = sorted(files, key=len)[0] # select by similarity
for node in doctree.traverse(nodes.image):
# Map the mimetype to the corresponding image. The writer may
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
node['candidates'] = candidates = {}
imguri = node['uri']
if imguri.startswith('data:'):
self.warn_node('image data URI found. some builders might not support', node,
type='image', subtype='data_uri')
candidates['?'] = imguri
continue
elif imguri.find('://') != -1:
self.warn_node('nonlocal image URI found: %s' % imguri, node,
type='image', subtype='nonlocal_uri')
candidates['?'] = imguri
continue
rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
if self.config.language:
# substitute figures (ex. foo.png -> foo.en.png)
i18n_full_imgpath = search_image_for_language(full_imgpath, self)
if i18n_full_imgpath != full_imgpath:
full_imgpath = i18n_full_imgpath
rel_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
i18n_full_imgpath)
# set imgpath as default URI
node['uri'] = rel_imgpath
if rel_imgpath.endswith(os.extsep + '*'):
if self.config.language:
# Search language-specific figures at first
i18n_imguri = get_image_filename_for_language(imguri, self)
_, full_i18n_imgpath = self.relfn2path(i18n_imguri, docname)
collect_candidates(full_i18n_imgpath, candidates)
collect_candidates(full_imgpath, candidates)
else:
candidates['*'] = rel_imgpath
# map image paths to unique image names (so that they can be put
# into a single directory)
for imgpath in itervalues(candidates):
self.dependencies.setdefault(docname, set()).add(imgpath)
if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
self.warn_node('image file not readable: %s' % imgpath,
node)
continue
self.images.add_file(docname, imgpath)
def process_metadata(self, docname, doctree):
"""Process the docinfo part of the doctree as metadata.
Keep processing minimal -- just return what docutils says.
"""
self.metadata[docname] = md = {}
try:
docinfo = doctree[0]
except IndexError:
# probably an empty document
return
if docinfo.__class__ is not nodes.docinfo:
# nothing to see here
return
for node in docinfo:
# nodes are multiply inherited...
if isinstance(node, nodes.authors):
md['authors'] = [author.astext() for author in node]
elif isinstance(node, nodes.TextElement): # e.g. author
md[node.__class__.__name__] = node.astext()
else:
name, body = node
md[name.astext()] = body.astext()
for name, value in md.items():
if name in ('tocdepth',):
try:
value = int(value)
except ValueError:
value = 0
md[name] = value
del doctree[0]
def create_title_from(self, docname, document):
"""Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
titlenode = nodes.title()
longtitlenode = titlenode
# explicit title set with title directive; use this only for
# the <title> tag in HTML output
if 'title' in document:
longtitlenode = nodes.title()
longtitlenode += nodes.Text(document['title'])
# look for first section title and use that as the title
for node in document.traverse(nodes.section):
visitor = SphinxContentsFilter(document)
node[0].walkabout(visitor)
titlenode += visitor.get_entry_text()
break
else:
# document has no title
titlenode += nodes.Text('<no title>')
self.titles[docname] = titlenode
self.longtitles[docname] = longtitlenode
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
self.toctree.note_toctree(docname, toctreenode)
def get_toc_for(self, docname, builder):
"""Return a TOC nodetree -- for use on the same page only!"""
return self.toctree.get_toc_for(docname, builder)
def get_toctree_for(self, docname, builder, collapse, **kwds):
"""Return the global TOC nodetree."""
return self.toctree.get_toctree_for(docname, builder, collapse, **kwds)
def get_domain(self, domainname):
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
"""
try:
return self.domains[domainname]
except KeyError:
raise ExtensionError('Domain %r is not registered' % domainname)
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
"""Read the doctree for a file from the pickle and return it."""
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
with open(doctree_filename, 'rb') as f:
doctree = pickle.load(f)
doctree.settings.env = self
doctree.reporter = Reporter(self.doc2path(docname), 2, 5,
stream=WarningStream(self._warnfunc))
return doctree
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
if doctree is None:
doctree = self.get_doctree(docname)
# resolve all pending cross-references
self.resolve_references(doctree, docname, builder)
# now, resolve all toctree nodes
for toctreenode in doctree.traverse(addnodes.toctree):
result = self.resolve_toctree(docname, builder, toctreenode,
prune=prune_toctrees,
includehidden=includehidden)
if result is None:
toctreenode.replace_self([])
else:
toctreenode.replace_self(result)
return doctree
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
to the value of the *maxdepth* option on the *toctree* node.
If *titles_only* is True, only toplevel document titles will be in the
resulting tree.
If *collapse* is True, all branches not containing docname will
be collapsed.
"""
return self.toctree.resolve_toctree(docname, builder, toctree, prune,
maxdepth, titles_only, collapse,
includehidden)
def resolve_references(self, doctree, fromdocname, builder):
for node in doctree.traverse(addnodes.pending_xref):
contnode = node[0].deepcopy()
newnode = None
typ = node['reftype']
target = node['reftarget']
refdoc = node.get('refdoc', fromdocname)
domain = None
try:
if 'refdomain' in node and node['refdomain']:
# let the domain try to resolve the reference
try:
domain = self.domains[node['refdomain']]
except KeyError:
raise NoUri
newnode = domain.resolve_xref(self, refdoc, builder,
typ, target, node, contnode)
# really hardwired reference types
elif typ == 'any':
newnode = self._resolve_any_reference(builder, refdoc, node, contnode)
elif typ == 'doc':
newnode = self._resolve_doc_reference(builder, refdoc, node, contnode)
# no new node found? try the missing-reference event
if newnode is None:
newnode = builder.app.emit_firstresult(
'missing-reference', self, node, contnode)
# still not found? warn if node wishes to be warned about or
# we are in nit-picky mode
if newnode is None:
self._warn_missing_reference(refdoc, typ, target, node, domain)
except NoUri:
newnode = contnode
node.replace_self(newnode or contnode)
# remove only-nodes that do not belong to our builder
process_only_nodes(doctree, builder.tags, warn_node=self.warn_node)
# allow custom references to be resolved
builder.app.emit('doctree-resolved', doctree, fromdocname)
def _warn_missing_reference(self, refdoc, typ, target, node, domain):
warn = node.get('refwarn')
if self.config.nitpicky:
warn = True
if self._nitpick_ignore:
dtype = domain and '%s:%s' % (domain.name, typ) or typ
if (dtype, target) in self._nitpick_ignore:
warn = False
# for "std" types also try without domain name
if (not domain or domain.name == 'std') and \
(typ, target) in self._nitpick_ignore:
warn = False
if not warn:
return
if domain and typ in domain.dangling_warnings:
msg = domain.dangling_warnings[typ]
elif typ == 'doc':
msg = 'unknown document: %(target)s'
elif node.get('refdomain', 'std') not in ('', 'std'):
msg = '%s:%s reference target not found: %%(target)s' % \
(node['refdomain'], typ)
else:
msg = '%r reference target not found: %%(target)s' % typ
self.warn_node(msg % {'target': target}, node, type='ref', subtype=typ)
def _resolve_doc_reference(self, builder, refdoc, node, contnode):
# directly reference to document by source name;
# can be absolute or relative
docname = docname_join(refdoc, node['reftarget'])
if docname in self.all_docs:
if node['refexplicit']:
# reference with explicit title
caption = node.astext()
else:
caption = clean_astext(self.titles[docname])
innernode = nodes.inline(caption, caption)
innernode['classes'].append('doc')
newnode = nodes.reference('', '', internal=True)
newnode['refuri'] = builder.get_relative_uri(refdoc, docname)
newnode.append(innernode)
return newnode
def _resolve_any_reference(self, builder, refdoc, node, contnode):
"""Resolve reference generated by the "any" role."""
target = node['reftarget']
results = []
# first, try resolving as :doc:
doc_ref = self._resolve_doc_reference(builder, refdoc, node, contnode)
if doc_ref:
results.append(('doc', doc_ref))
# next, do the standard domain (makes this a priority)
results.extend(self.domains['std'].resolve_any_xref(
self, refdoc, builder, target, node, contnode))
for domain in self.domains.values():
if domain.name == 'std':
continue # we did this one already
try:
results.extend(domain.resolve_any_xref(self, refdoc, builder,
target, node, contnode))
except NotImplementedError:
# the domain doesn't yet support the new interface
# we have to manually collect possible references (SLOW)
for role in domain.roles:
res = domain.resolve_xref(self, refdoc, builder, role, target,
node, contnode)
if res and isinstance(res[0], nodes.Element):
results.append(('%s:%s' % (domain.name, role), res))
# now, see how many matches we got...
if not results:
return None
if len(results) > 1:
nice_results = ' or '.join(':%s:' % r[0] for r in results)
self.warn_node('more than one target found for \'any\' cross-'
'reference %r: could be %s' % (target, nice_results),
node)
res_role, newnode = results[0]
# Override "any" class with the actual role type to get the styling
# approximately correct.
res_domain = res_role.split(':')[0]
if newnode and newnode[0].get('classes'):
newnode[0]['classes'].append(res_domain)
newnode[0]['classes'].append(res_role.replace(':', '-'))
return newnode
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
return self.indices.create_index(builder, group_entries=group_entries, _fixre=_fixre)
def collect_relations(self):
traversed = set()
def traverse_toctree(parent, docname):
if parent == docname:
self.warn(docname, 'self referenced toctree found. Ignored.')
return
# traverse toctree by pre-order
yield parent, docname
traversed.add(docname)
for child in (self.toctree_includes.get(docname) or []):
for subparent, subdocname in traverse_toctree(docname, child):
if subdocname not in traversed:
yield subparent, subdocname
traversed.add(subdocname)
relations = {}
docnames = traverse_toctree(None, self.config.master_doc)
prevdoc = None
parent, docname = next(docnames)
for nextparent, nextdoc in docnames:
relations[docname] = [parent, prevdoc, nextdoc]
prevdoc = docname
docname = nextdoc
parent = nextparent
relations[docname] = [parent, prevdoc, None]
return relations
def check_consistency(self):
"""Do consistency checks."""
for docname in sorted(self.all_docs):
if docname not in self.files_to_rebuild:
if docname == self.config.master_doc:
# the master file is not included anywhere ;)
continue
if docname in self.included:
# the document is included from other documents
continue
if 'orphan' in self.metadata[docname]:
continue
self.warn(docname, 'document isn\'t included in any toctree')
|
[
"alex.barreto@databricks.com"
] |
alex.barreto@databricks.com
|
7370dbee5c5d4f5920e1dc5ef94adebfd991c9f7
|
4774ce4e23a19827f0fbf6baa71bbb8cab98b4de
|
/imagespace/server/imagebackgroundsearch_rest.py
|
eae3a18eb28023f8c75fd155d13e7765ebbd71fa
|
[
"Apache-2.0"
] |
permissive
|
lewismc/image_space
|
a4bd8494fbc04f8cb71256bb5918786aea9259b9
|
4164523b573be364661809f5971946c65570e899
|
refs/heads/master
| 2021-01-18T11:22:37.363303
| 2015-10-15T14:50:21
| 2015-10-15T14:50:21
| 39,895,591
| 0
| 0
| null | 2015-07-29T13:43:50
| 2015-07-29T13:43:50
| null |
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
from girder import logger
import requests
import os
class ImageBackgroundSearch(Resource):
def __init__(self):
self.resourceName = 'imagebackgroundsearch'
self.route('GET', (), self.getImageSearch)
@access.public
def getImageSearch(self, params):
return self._imageSearch(params)
@access.public
def postImageSearch(self, params):
return self._imageSearch(params)
def _imageSearch(self, params):
return [{'id': d[0], 'score': d[1]} for d in requests.post(
os.environ['IMAGE_SPACE_CMU_BACKGROUND_SEARCH'],
data=params['url'],
headers={
'Content-type': 'text',
'Content-length': str(len(params['url']))
},
verify=False)
.json()]
getImageSearch.description = Description('Searches images by background')
|
[
"jeff.baumes@kitware.com"
] |
jeff.baumes@kitware.com
|
5e5e921bd20b358220b07bd22aafc99e675ffafa
|
62a7b1a4569f519554f7e68e73115e87d4102c39
|
/wyklad_1/file_hasher.py
|
895d6149f422c3f4a90f553d2b32c556dffdd547
|
[] |
no_license
|
DaftAcademy/zajecia_python_mini_edycja4
|
ff491b3dcdf4f84ea9b01edc6cb9094235bcb7df
|
f83d7b31521cb5a3248fb17a3640840d8f77a84a
|
refs/heads/master
| 2022-08-01T02:45:28.471046
| 2017-05-15T11:25:09
| 2017-05-15T11:25:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
import hashlib
# https://en.wikipedia.org/wiki/Pigeonhole_principle
# https://pl.wikipedia.org/wiki/Zasada_szufladkowa_Dirichleta
def get_hash(f_path, mode='md5'):
h = hashlib.new(mode)
# TODO: PRACA DOMOWA: Nie czytać całego pliku na raz tylko po kawałku
f = open(f_path, 'rb') # otwiera plik w funkcji hashującej, co z obsługą błedów?
h.update(f.read()) # czyta cały plik na raz!
hash_text = h.hexdigest()
f.close()
return hash_text
#print(get_hash('plik_testowy'))
#print(get_hash('sha1_collisions/shattered-1.pdf', mode='sha1'))
#print(get_hash('sha1_collisions/shattered-2.pdf', mode='sha1'))
# eb63071881718ed66bb75ce670e65b9e
# eb63071881718ed66bb75ce670e65b9e
|
[
"marcin.jaroszewski@dcx.bz"
] |
marcin.jaroszewski@dcx.bz
|
2cd69de0ed6caab0657b020ebf567283aa2001c4
|
52a32a93942b7923b7c0c6ca5a4d5930bbba384b
|
/dojo/db_migrations/0051_regulation_categories.py
|
2fe6ff5d352fd9d2bde9377ad6d5aaf8ae2c4bdb
|
[
"MIT-open-group",
"GCC-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-or-later",
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"PSF-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-2.0-or-later",
"HPND",
"libtiff",
"LGPL-2.1-or-later",
"EPL-2.0",
"GPL-3.0-only",
"MIT",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-public-domain-disclaimer",
"HPND-Markus-Kuhn",
"CC-BY-SA-4.0",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LicenseRef-scancode-openssl-exception-lgpl3.0plus",
"Libpng",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSD-Advertising-Acknowledgement",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT-Modern-Variant",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-xfree86-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] |
permissive
|
DefectDojo/django-DefectDojo
|
43bfb1c728451335661dadc741be732a50cd2a12
|
b98093dcb966ffe972f8719337de2209bf3989ec
|
refs/heads/master
| 2023-08-21T13:42:07.238370
| 2023-08-14T18:00:34
| 2023-08-14T18:00:34
| 31,028,375
| 2,719
| 1,666
|
BSD-3-Clause
| 2023-09-14T19:46:49
| 2015-02-19T17:53:47
|
HTML
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
# Generated by Django 2.2.15 on 2020-08-30 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0050_deduplication_on_engagement'),
]
operations = [
migrations.AlterField(
model_name='regulation',
name='category',
field=models.CharField(choices=[('privacy', 'Privacy'), ('finance', 'Finance'), ('education', 'Education'), ('medical', 'Medical'), ('corporate', 'Corporate'), ('other', 'Other')], help_text='The subject of the regulation.', max_length=9),
),
migrations.AlterField(
model_name='regulation',
name='name',
field=models.CharField(help_text='The name of the regulation.', max_length=128, unique=True),
),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
83b12f7616cc9f2600fc279082ca09056457ba30
|
a071ceda7a4f7327f2e0ddbe87d431b69e2cdf6f
|
/Lab1/ex5.py
|
11bd8fc7bf3f262b740f22b8eb23727b8b728a78
|
[] |
no_license
|
thomasolsen/IS105
|
e599e5992d4f0cb93a3052f911d98137929792d9
|
2c4537c42c76a59fc2fe1a22ce32e5ac7ffbe89f
|
refs/heads/master
| 2020-04-19T13:48:28.445758
| 2014-01-29T09:50:18
| 2014-01-29T09:50:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
my_name = 'Zed A. Shaw' # No, it's really Thomas.
my_age = 35 # Actually, i'm 26.
my_height = 74 # inches, but 72,5 in reality.
my_weight = 180 # lbs but really 190.
my_eyes = 'Blue' # Green actually.
my_teeth = 'White' # Teeth colour.
my_hair = 'Brown' # Blonde brown really.
# The text that shows everything in a proper manner.
print "Let's talk about %s." % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usually %s depending on the coffee." % my_teeth
# This is where we sum up all my numbers and get a total sum for no reason.
print "If I add %d, %d, and %d I get %d." % (my_age, my_height, my_weight, my_age + my_height + my_weight)
|
[
"thomas.pedersen@live.com"
] |
thomas.pedersen@live.com
|
4f46f1e8e5ff03b75d112f34a2a61c0445245336
|
a9378ff37b5efe805894a1ea3aab98c91e982635
|
/Microgrid/test.py
|
e4b362f9f0e0ed2184667b3530ca5e8ae0562e28
|
[
"MIT"
] |
permissive
|
nudging-SMDP/nudging-supplementary-material
|
36fd91f248ba12df7aefbf4977198de5fde8fffe
|
94795abf1e26abab07389436fc737029e6a2a566
|
refs/heads/main
| 2023-07-05T22:25:23.254530
| 2021-08-10T23:37:20
| 2021-08-10T23:37:20
| 372,692,059
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,752
|
py
|
import os
# comment if not using GPU
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from microgrid_env import Microgrid
from stable_baselines import PPO2
from stable_baselines.common.policies import MlpPolicy
from nudge.nudge_functions import *
import numpy as np
# path to final model
path = f'./results_nudging/ppo_opt_nudge_final'
# parameters for the recurrent state
day = 2
month = 1
gas_enable = True
battery_charge = 50
# number of episodes to test
test_episodes = 100
# get estimated gain
rhos = np.load(f'{path}/summary_rhos.npy')
# create environment
ugrid = Microgrid(day, month, gas_enable, battery_charge)
ugrid.rho = rhos[-1]
# reset environment and get recurrent state
state_s0 = ugrid.reset()
# load policy model
model = PPO2.load(f'{path}')
test_rewards = []
test_cost = []
test_rc = []
# ------------ TESTING ------------
for i in range(test_episodes):
# reset environment
state = ugrid.reset()
done = False
rewards = []
costs = []
while(not done):
# apply policy
action, value, _ = model.predict(observation=state_s0, deterministic=True)
# get next state, nudged reward and done flag
state, r, done, _ = ugrid.step(action)
# save rewards and cost of the microgrid followinf the learned policy
rewards.append(ugrid.record_rewards[-1])
costs.append(ugrid.record_cost[-1])
if done:
test_rewards.append(np.mean(rewards))
test_cost.append(np.mean(costs))
rc = test_rewards[-1]/test_cost[-1]
test_rc.append(rc)
break
# Report
print(f'Rewards, r = {np.mean(test_rewards)}')
print(f'Cost, c = {np.mean(test_cost)}')
print(f'r/c = {np.mean(test_rc)}')
|
[
"optimalnudging@gmail.com"
] |
optimalnudging@gmail.com
|
7bdbb45c2a647db014716b1afeeb24c732579da3
|
7a73fef9ae426c48573bae41447cef7cb2b97bf6
|
/dynamicserialize/dstypes/com/raytheon/uf/common/management/response/__init__.py
|
e1e5fcfbdc6f395fcaab59c2371def4edec4459b
|
[
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] |
permissive
|
mjames-upc/python-awips
|
7f0a80a04457224c9e195b82a95eef4d9b2b3091
|
e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c
|
refs/heads/master
| 2020-03-31T03:00:49.540816
| 2018-10-05T23:15:42
| 2018-10-05T23:15:42
| 53,707,817
| 0
| 0
| null | 2017-04-12T18:00:59
| 2016-03-12T01:46:57
|
Python
|
UTF-8
|
Python
| false
| false
| 104
|
py
|
##
##
# File auto-generated by PythonFileGenerator
__all__ = [
'diagnostic'
]
|
[
"mjames@unidata.ucar.edu"
] |
mjames@unidata.ucar.edu
|
d8378edcd6198ed26579441c3a40b78c8cf15001
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/representations/__init__.py
|
bf71c01861311a939e78407acf62aa9512c59a19
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.representations_api import RepresentationsApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.type.type_api import TypeApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.vtt.vtt_api import VttApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.imsc.imsc_api import ImscApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.sprite.sprite_api import SpriteApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.fmp4.fmp4_api import Fmp4Api
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.chunked_text.chunked_text_api import ChunkedTextApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.cmaf.cmaf_api import CmafApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.mp4.mp4_api import Mp4Api
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.webm.webm_api import WebmApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.progressive_webm.progressive_webm_api import ProgressiveWebmApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.dash_representation_list_query_params import DashRepresentationListQueryParams
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
ca40498aa30d2a1e5baee83adfbb27bbb25c2bfa
|
f18125b848e37a64e35136a90cf4694e52eb9fcc
|
/teek/_timeouts.py
|
13563144be59e5b03aa5f93e5c8b5b14d4b72708
|
[
"MIT"
] |
permissive
|
carlbordum/teek
|
d19271dfe11e3e77052b1a3c215ddf6a9d50e440
|
a931b468744c8236fd4ce6f1dc3a8c4829d59db3
|
refs/heads/master
| 2020-04-16T11:41:10.909230
| 2019-01-13T19:24:16
| 2019-01-13T19:24:16
| 165,547,247
| 0
| 0
| null | 2019-01-13T19:48:26
| 2019-01-13T19:48:26
| null |
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
import teek as tk
from teek._tcl_calls import make_thread_safe
# there's no after_info because i don't see how it would be useful in
# teek
class _Timeout:
def __init__(self, after_what, callback, args, kwargs):
if kwargs is None:
kwargs = {}
self._callback = callback
self._args = args
self._kwargs = kwargs
self._state = 'pending' # just for __repr__ and error messages
self._tcl_command = tk.create_command(self._run)
self._id = tk.tcl_call(str, 'after', after_what, self._tcl_command)
def __repr__(self):
name = getattr(self._callback, '__name__', self._callback)
return '<%s %r timeout %r>' % (self._state, name, self._id)
def _run(self):
needs_cleanup = True
# this is important, thread tests freeze without this special
# case for some reason
def quit_callback():
nonlocal needs_cleanup
needs_cleanup = False
tk.before_quit.connect(quit_callback)
try:
self._callback(*self._args, **self._kwargs)
self._state = 'successfully completed'
except Exception as e:
self._state = 'failed'
raise e
finally:
tk.before_quit.disconnect(quit_callback)
if needs_cleanup:
tk.delete_command(self._tcl_command)
@make_thread_safe
def cancel(self):
"""Prevent this timeout from running as scheduled.
:exc:`RuntimeError` is raised if the timeout has already ran or
it has been cancelled.
"""
if self._state != 'pending':
raise RuntimeError("cannot cancel a %s timeout" % self._state)
tk.tcl_call(None, 'after', 'cancel', self._id)
self._state = 'cancelled'
tk.delete_command(self._tcl_command)
@make_thread_safe
def after(ms, callback, args=(), kwargs=None):
"""Run ``callback(*args, **kwargs)`` after waiting for the given time.
The *ms* argument should be a waiting time in milliseconds, and
*kwargs* defaults to ``{}``. This returns a timeout object with a
``cancel()`` method that takes no arguments; you can use that to
cancel the timeout before it runs.
"""
return _Timeout(ms, callback, args, kwargs)
@make_thread_safe
def after_idle(callback, args=(), kwargs=None):
"""Like :func:`after`, but runs the timeout as soon as possible."""
return _Timeout('idle', callback, args, kwargs)
|
[
"akuviljanen17@gmail.com"
] |
akuviljanen17@gmail.com
|
e9bc9aded9058951ba4a02e774a00a8f7b8b97b0
|
1ed7d8d0490c90704994863daf6e6517717b17cd
|
/programs/unsupervised_category_learning/domain_to_category.py
|
f8d6e3ebb76e32bebdc0c785c97f2adb7750d902
|
[] |
no_license
|
ankuragrawal70/KenSor
|
61ca59ee61a374eb5efcabf742c05f1b67000bd0
|
08cc378aaf5087a7990c1d3bfc2962b8334a0de7
|
refs/heads/master
| 2020-03-27T05:09:41.046750
| 2018-08-31T12:44:44
| 2018-08-31T12:44:44
| 145,998,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,491
|
py
|
#import MySQLdb
import gexf
import networkx as nx
import wikipedia
import difflib
import operator
import socket
import MySQLdb
import os
import urllib2, httplib
import category_from_urls as cat_class
import wiki_classification_cat_relation as wiki
from bs4 import BeautifulSoup
#import matplotlib.pyplot as plt
def loose_match(s1,s2):
seq = difflib.SequenceMatcher()
try:
seq.set_seqs(s1, s2)
d=seq.ratio()*100
d=int(d)
return d
except:
return 0
"""def source_fetcher():
db = MySQLdb.connect("localhost",user="root",db="temporary_category_classification")
cursor = db.cursor()
sql="select * from level_1"
url_list=[]
try:
cursor.execute(sql)
result=cursor.fetchall()
url_list=list(result)
except:
print "error in selection"
db.close()
print len(url_list)
for e in url_list:
if e[2]>=3:
#if e[1]=='http://timesofindia.indiatimes.com/techspeciallisting/35626913.cms':
# print 'yes'
u=e[1]
#cat_list=url_category_list(u)
#print u,cat_list
if domain.has_key(e[0]):
info=domain[e[0]]
category_domain_info(u,info)
domain[e[0]]=info
else:
info={}
category_domain_info(u,info)
domain[e[0]]=info"""
def gdelt_source_fetcher():
gdelt_path='D://Thesis//data//domain_name//category_gdelt_valid_source//'
file_list=os.listdir(gdelt_path)
for i in range(1,len(file_list)):
p=gdelt_path+file_list[i]
f=open(p,'r')
c_info=eval(f.read())
if len(c_info)>0:
f_name='http://'+file_list[i].rstrip('.txt')
#print f_name
for e in c_info:
cat_class.category_domain_info(e,f_name,category)
f.close()
"""gdelt_path='D://Thesis//data//domain_name//gdelt_heuristic_approach_1//'
file_list=os.listdir(gdelt_path)
for i in range(0,len(file_list)):
#for i in range(0,500):
p=gdelt_path+file_list[i]
f=open(p,'r')
category_url=f.read().split('\n')
f_name='http://'+file_list[i].rstrip('.txt')
#print f_name
i=0
for e in category_url:
cat_class.category_domain_info(e,f_name,category)
i=i+1
#gdelt_cat[f_name]=cat
if i==len(category_url)-1:
break
f.close()"""
"""gdelt_path='D://Thesis//data//domain_name//special_sources_not_in_gdelt//output//'
file_list=os.listdir(gdelt_path)
for i in range(0,len(file_list)):
#for i in range(0,500):
p=gdelt_path+file_list[i]
f=open(p,'r')
category_url=f.read().split('\n')
f_name='http://'+file_list[i].rstrip('.txt')
#print f_name
i=0
for e in category_url:
cat_class.category_domain_info(e,f_name,category)
i=i+1
#gdelt_cat[f_name]=cat
if i==len(category_url)-1:
break
f.close()"""
def wiki_check(cat):
#print len(list1)
cat_distri={}
cat=clean(cat)
try:
#ny = wikipedia.page(cat)
wiki_text=wikipedia.summary(cat)
elements=wiki_text.split(" ")
count=0
for e in elements:
e=e.lower()
e=e.encode('utf-8')
try:
for ele in list2:
ele=ele.encode('utf-8')
ele=ele.lower()
#print ele
if loose_match(ele,e)>=80:
#if loose_match1(ele,e):
#print 'hello'
count=count+1
if cat_distri.has_key(ele):
cat_distri[ele]=cat_distri[ele]+1
else:
f=0
for ca in cat_distri:
if ca in ele or ele in ca:
f=1
cat_distri[ca]=cat_distri[ca]+1
if ca=='chile':
print 'elemenmt is' ,ele
else:
if loose_match(ca,ele)>=80:
f=1
cat_distri[ca]=cat_distri[ca]+1
if f==0:
cat_distri[ele]=1
except:
pass
print 'delhi in cat does not exists\n delhi',cat_distri['delhi']
print cat_distri
ch=len(elements)
x=sorted(cat_distri.iteritems(), key=lambda (k,v): (v,k),reverse=True)
"""for w in sorted(cat_distri, key=cat_distri.get, reverse=True):
c=sum(cat_distri.values())
print c
x=((float)(cat_distri[w]))/c
print w,x,sub_cat[w].parent"""
k=0
for re in x:
print re[0], float(re[1])/ch
k=k+1
if k>6:
break
except:
print "no result"
def check_validity(h_link,domain_name,level):
if domain_name in h_link:
level.append(h_link)
else:
if h_link[0]=='/':
if domain_name[len(domain_name)-1]!='/':
level.append(domain_name+h_link)
else:
l=len(domain_name)-1
x=domain_name[:l]
level.append(x+h_link)
def top_news_articles(d_name):
level=[]
try :
web_page = urllib2.urlopen(d_name,timeout=4)
soup = BeautifulSoup(web_page)
c=soup.find_all('a')
for e in c:
#print e
try:
l=e['href']
if l!=d_name:
check_validity(l,d_name,level)
except:
#print 'error after parsing links'
pass
#return level
return level
except:
print 'error in main link'
#return level
return level
pass
def remove_redundant_category(category):
for cat in (category.keys()):
cat_object=category[cat]
#print cat,cat_object
#for ele in (cat_object.references.keys()):
#if cat_object.references[ele]<=2:
if len(category[cat].news_sources)<6:
cat_delete=category[cat]
if len(cat_delete.parent)>0:
for ele in cat_delete.parent:
del category[ele].references[cat]
if len(cat_delete.references)>0:
for e in cat_delete.references:
category[e].parent.remove(cat)
del category[cat]
#category={}
def write_to_db(category):
db = MySQLdb.connect("localhost",user="root",db="web_categorization")
cursor = db.cursor()
p_id=0
for e in (category.keys()):
c_info=category[e]
name=c_info.c_name
c=c_info.count
p_id=p_id+1
length=len(category[name].news_sources)
sql = ("insert into category(category_name,count,sources_count) values ('%s','%s','%s')"%(name,c,length))
try:
# Execute the SQL command
cursor.execute(sql)
db.commit()
print 'inserted category is',name, pid
except:
print "error in main"
# Rollback in case there is any error
db.rollback()
db.close()
def write_children(category):
db = MySQLdb.connect("localhost",user="root",db="web_categorization")
cursor = db.cursor()
#for el in c_info.references:
for e in category:
#e="business"
c_id=0
#print e
sql="select category_id from category where category_name='%s'"%(e)
try:
cursor.execute(sql)
row=cursor.fetchone()
c_id=row[0]
#print "c_id is",c_id
db.commit()
except:
print "error in selection"
db.rollback()
obj=category[e]
children=obj.references
for child in children:
cnt=children[child]
print "category and c_id ",e,c_id
sql = ("INSERT INTO CHILDREN(CHILD_NAME,COUNT,PARENT_ID) VALUES('%s','%s','%s')" %(child, cnt,c_id))
try:
cursor.execute(sql)
db.commit()
except:
print "error in insertion"
db.rollback()
"""url_info=obj.news_sources
for ele in url_info:
sql = ("INSERT INTO url_info(url,cat_id) VALUES('%s','%s')" %(ele,c_id))
try:
cursor.execute(sql)
print "inserting url"
db.commit()
except:
print "error in insertion"
db.rollback()"""
db.close()
def write_sources(category):
db = MySQLdb.connect("localhost",user="root",db="web_categorization")
cursor = db.cursor()
for element in category:
sou=category[element].news_sources
for child in sou:
print "category and source ",element,child
sql = ("INSERT INTO DOMAIN_CATEGORY_PATH(CATEGORY_NAME,DOMAIN_PATH) VALUES('%s','%s')" %(element,child))
try:
cursor.execute(sql)
db.commit()
except:
print "error in insertion"
db.rollback()
db.close()
domain={}
category={}
#source_fetcher()
gdelt_source_fetcher()
remove_redundant_category(category)
#print 'length of the category is',len(category),"\n".join(category.keys())
#write_sources(category)
#write_to_db(category)
#write_children(category)
#y=sorted(domain.items(), key=lambda kv: len(kv[1]), reverse=True)
"""for i in range(0,20):
print y[i][0],len(y[i][1])"""
#source_fetcher2()
#print domain.keys()
#write sources category_wise
#path="D://Thesis//data//domain_name//domains_related_to_category_all_5//"
#source_file="D://Thesis//data//domain_name//special_sources_not_in_gdelt//sources.txt"
#file_o=open(source_file,"a+")
#code to find domains for categories
"""exist=os.listdir(path)
for e in category:
if len(category[e].news_sources)>1:
cat=e+'.txt'
#if cat not in exist:
#print e
path1=path+cat
try:
sources=category[e].news_sources
f1=open(path1,'w')
str1="\n".join(sources.keys())
f1.write(str1)
file_o.write("\n"+str1)
f1.close()
except:
pass
else:
path2=path+cat
print e
sources=category[e].news_sources
str2="\n".join(sources.keys())
f2=open(path2,'a+')
f2.write(str2)
f2.close()
#used for wikipedia distribution identification"""
"""list1=os.listdir(path)
list2=[]
for e in list1:
path1=path+e
f=open(path1,'r')
sources=f.read().split("\n")
if len(sources)>9:
list2.append(e.rstrip('.txt'))"""
"""while True:
categ=raw_input("input category")
if categ+'.txt' in list1:
path1=path+categ+'.txt'
f1=open(path1,'r')
sources=f1.read().split("\n")
for s in sources:
print s
print len(sources)
else:
print 'category not found'"""
#wiki_check('Arvind Kejriwal')
def suggest(inp):
suggessions=""
for element in category:
if element!=inp:
if inp in element or element in inp:
suggessions=suggessions+element+'\n'
else:
if loose_match(element,inp)>80:
suggessions=suggessions+element+'\n'
return suggessions
"""while True:
try:
sou=raw_input('enter source')
#d=domain[sou]
for v in d:
#if len(d[v].references)>0:
print v,d[v].count,d[v].references
#print d[v].news_sources
print '\n'
#print d['music'].news_sources
print 'no of categories obtained are',len(d)
except:
pass"""
# suggessions function to get suggessions for a particular category
# working synario"""
def result(c_name):
d=category
output=[[],[],[],[]]
#while True:
try:
#i=raw_input('enter category\n')
#inp=i.lower()
inp=c_name.lower()
print d[inp].count
if len(d[inp].references)>0 or len(d[inp].parent)>0:
print 'related categories are'
if len(d[inp].references)>0:
#children sorted in reverse order of count
print 'childrens are',len(d[inp].references)
x=sorted(d[inp].references.iteritems(), key=lambda (k,v): (v,k),reverse=True)
rel=""
for r in x:
#print r[0]
rel=rel+r[0]+'\n'
output[0]=rel
#output[0]="\n".join(x)
if len(d[inp].parent)>0:
print 'parents',len(d[inp].parent)
#reverse sorted the map
map={}
p_list=""
for e in d[inp].parent:
map[e]=category[e].references[inp]
y=sorted(map.iteritems(), key=lambda (k,v): (v,k),reverse=True)
#ou= d[inp].parent
#print 'parents info is',y
for el in y:
#output[1]="\n".join(ou)
p_list=p_list+el[0]+'\n'
output[1]=p_list
#print p_list
j=0
print 'total sources published in the categories are',len(d[inp].news_sources)
str1=""
for ns in d[inp].news_sources:
str1=str1+ ns+'\n'
j=j+1
#if j>20:
# break
output[2]=str1
#x=suggest(inp)
#print 'other categories)',len(x)
#if len(x)>0:
# print 'other similiar categories may be\n',x
#print 'news sources are\n'
"""min1=1000
source=''
for e in d[inp].news_sources.keys():
if len(e)<min1:
source=e
min1=len(e)
if source !='':
sour=source.split(inp)[0]+inp+'/'
print 'category path is',sour
x=top_news_articles(sour)
# x containes list of links specified by the path
x.sort(key = lambda s: len(s),reverse=True)
if len(x)>0:
print 'latest news events are '
for i in range(0,5):
print x[i]
#print source
else:
print d[inp].news_sources.keys()"""
su=suggest(inp)
#sug="\n".join(su)
output[3]=su
#if len(su)>0:
# print " Please enter correct one, Suggessions are\n",su
#else:
# print 'no category found. Please enter correct one'
#continue
except:
s=suggest(inp)
output[3]=s
return output
def wiki_result(c_name):
result=wiki.wiki_check(c_name,category)
return result
#function is used to rank news sources based on global ranking of top 200 news sources
def sources_based_on_ranking():
world_ranking="D://Thesis//data//domain_name//news_sources_ranking//based_on_4inm_website//"
direc=os.listdir(world_ranking)
print direc
r_sources=[]
ranked_sources=[]
for i in range(14,15):
print direc[i]
f=open(world_ranking+direc[i],'r')
#ranked_sources are 200 sources containing global ranking
r_sources.append(f.read().split("\n"))
print r_sources
for sou in r_sources:
ranked_sources=ranked_sources+sou
print '200 ranked sources are',ranked_sources
f.close()
while True:
try:
cat=raw_input("Please enter the category")
#category specific sources
print 'related categories are',category[cat].references.keys()
print category[cat].parent
category_sources=category[cat].news_sources.keys()
print 'length of category sources is',len(category_sources)
print 'sources without rankings are',category_sources
#sorting index is used to sort category mnews sources
sorting_index=[0]*len(category_sources)
count=0
for i in range(0,len(category_sources)):
domain_name='http://'+(category_sources[i].split("//")[1]).split('/')[0]
if domain_name in ranked_sources:
index=ranked_sources.index(domain_name)
count=count+1
#print index
sorting_index[i]=index
else:
sorting_index[i]=50000
#sort category specific source based on ranks of top 200 news sources
#print sorting_index
print 'sources is top 200 are',count
category_sources.sort(key=dict(zip(category_sources, sorting_index)).get)
#[x for (y,x) in sorted(zip(sorting_index,category_sources))]
print "\n sources after rankings are\n"
for source in category_sources:
print source
except:
pass
#sources_based_on_ranking()
|
[
"ankuragrawal70@gmail.com"
] |
ankuragrawal70@gmail.com
|
0957c3b078d721491faabeeda539ea760c7bf54c
|
0205e04f1c47b3a34da20e938a4dab7640df89ad
|
/ics_demo/dao/interfaces/demo/carrot.py
|
1db4747531680bc6dafe8feaca0ac9d5ddf6e24a
|
[
"MIT"
] |
permissive
|
Mahuaixu/ics_demo
|
e765c06aa68be77c5886d962ed0266aee04ad551
|
21a08945f3983eb409916a7380549f74e3ba5171
|
refs/heads/master
| 2021-01-23T20:56:45.619488
| 2016-04-01T11:49:57
| 2016-04-01T11:49:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
from ics_demo.dao.interfaces import base
from ics_demo.dao.orm.demo import Carrot
from ics_demo.helpers import uuidgen
def get_all():
return base.class_get_all_to_dict(Carrot)
def get_one(uuid):
return base.class_get_one_by_uuid_to_dict(Carrot, uuid)
def get_obj(uuid):
return base.class_get_one_by_uuid_to_obj(Carrot, uuid)
def get_obj_by_ID(ID):
return base.class_get_one_by_ID_to_obj(Carrot, ID)
def get_keys():
return base.class_get_keys(Carrot)
def save(post_dict):
name = post_dict['name']
rabbit = post_dict['rabbit']
return Carrot(uuid=uuidgen(), name=name, rabbit=rabbit)
|
[
"zhaozhl@inspur.com"
] |
zhaozhl@inspur.com
|
513cd74af6ff93ef148488fe794428acc09017a1
|
0c17c0eae2f1b616dce500315b35167fda6c0828
|
/GUI/Source_Code_OfficePortal-0.1/Make_Config_File.py
|
ec10d17c4e4a34e8da28154721acd64d47dba3ac
|
[] |
no_license
|
merijndewit/Office_Portal
|
e32f6e74be7316783f44f1c89a4a46a3442d3157
|
f58589589f0b94839eba222558e7af2075e4856c
|
refs/heads/main
| 2023-06-19T08:33:55.301685
| 2021-07-14T10:02:31
| 2021-07-14T10:02:31
| 369,529,388
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
import sys,os
import json
if getattr(sys, 'frozen', False):
cd = os.path.dirname(sys.executable)
else:
cd = os.path.dirname(os.path.abspath(__file__))
def makeConfig(values, configspecs):
config = {}
config['streamInfo'] = []
config = {
'ip': values[configspecs[0]],
'pixelWidth': values[configspecs[1]],
'pixelHeight': values[configspecs[2]],
'framerate': values[configspecs[3]],
'ledStrip': values[configspecs[4]],
'ledTexture': values[configspecs[5]],
'none': values[configspecs[6]],
'bitrate': values[configspecs[7]],
'portSend': values[configspecs[8]],
'portReceive': values[configspecs[9]],
'blue': values[configspecs[10]],
'orange': values[configspecs[11]],
'ring1080': values[configspecs[12]],
'ring720': values[configspecs[13]],
'customcolor': values[configspecs[14]],
'brightness': values[configspecs[15]],
'Rvalue': values[configspecs[16]],
'Gvalue': values[configspecs[17]],
'Bvalue': values[configspecs[18]],
'customTexture': values[configspecs[19]],
'customPath': values[configspecs[20]]
}
with open(cd + '/config.txt', 'w+') as outfile:
json.dump(config, outfile)
|
[
"merijn.de.wit@gmail.com"
] |
merijn.de.wit@gmail.com
|
5cfb969dbad69601a0a0ad1f17caf3bd457ffee9
|
6cb4f94be34451bd51d9f1585ea3bd252e60d5e0
|
/fresh_day_back/fresh_day_back/urls.py
|
b40e8fd3f5f744144d5ed48abf343b91f66ad978
|
[] |
no_license
|
1032524125/django
|
b3f7b825fd8cacf268fbaab8482d142599bda275
|
b57ff20c45032092e295434b8e1b60bce027d2f1
|
refs/heads/master
| 2020-04-04T10:03:22.279642
| 2018-11-15T09:55:42
| 2018-11-15T09:55:42
| 155,841,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
"""fresh_day_back URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.staticfiles.urls import static
from fresh_day_back import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
#app模块
url(r'^app/',include('app.urls',namespace='app')),
#商品模块
url(r'^goods/',include('goods.urls',namespace='goods'))
]
urlpatterns += static(settings.MEDIA_URL,document_root = settings.MEDIA_ROOT)
|
[
"1032524125@qq.com"
] |
1032524125@qq.com
|
a2d16a5c6608a02c2ad6b9f34f6a09e60da23d96
|
b95bf1219a8a35e9991e965b5aa2df6e1ed9e1d8
|
/Variables/variable.py
|
bdab6666729db6f6c1e979141065e0c563662117
|
[] |
no_license
|
cosmolgj/charm_private_python
|
73738b8c53909ccf376cd0582590101d340ccc52
|
85c87f6922d4dcb08c41ac10d990b8a741958a13
|
refs/heads/master
| 2023-08-28T07:43:57.471238
| 2021-10-21T03:54:51
| 2021-10-21T03:54:51
| 417,421,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
# 리스트 참조하는 새로운 변수할당
a = [1, 2, 3]
b = a
print(id(a))
print(id(b))
# 리스트 복사
a = [1, 2, 3]
b = a[:] # list copy 1
a[1] = 4
print(a)
print(b)
from copy import copy
b = copy(a) # list copy 2
b[1] = 7
print(a)
print(b)
b = a.copy() # 리스트의 자체 함수
print(a)
print(b)
"""
def print_n_times(n, *values):
# n번 반복합니다.
for i in range(n):
# values는 리스트처럼 활용합니다.
for value in values:
print(value)
# 단순한 줄바꿈
print()
# 함수를 호출합니다.
print_n_times(3, "안녕하세요", "즐거운", "파이썬 프로그래밍")
"""
|
[
"cosmolgj@empal.com"
] |
cosmolgj@empal.com
|
5bcb14b9a77abb06d694d912c1001a113b902a97
|
a901e62765acf49238fcff53c92dacfb3ac806dd
|
/app/gbd/core/util.py
|
aebf5c44d23becf0d83f3d6a1a5c0d8214d39602
|
[
"MIT"
] |
permissive
|
gbd-consult/qwebgisapp
|
d0834492a301395d18fb5867f2fef485ac032c14
|
86318f42d2e931ab3a6ea4e5108b9e0b0095156b
|
refs/heads/master
| 2021-03-19T14:21:57.993770
| 2018-02-27T12:11:17
| 2018-02-27T12:11:17
| 94,889,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
"""Misc utilities."""
import sys, string, time, os, math, itertools, random, datetime
import mako.template, mako.exceptions
from gbd.core import config, log, templatetools
def f(s, *args, **kwargs):
frame = sys._getframe(1)
d = {}
d.update(frame.f_globals)
d.update(frame.f_locals)
d.update(kwargs)
return string.Formatter().vformat(s, args, d)
def inline_format(s, *args, **kwargs):
frame = sys._getframe(1)
d = {}
d.update(frame.f_globals)
d.update(frame.f_locals)
d.update(kwargs)
return string.Formatter().vformat(s, args, d)
class Struct(object):
def __init__(self, d):
if isinstance(d, dict):
for k, v in d.items():
setattr(self, k, v)
elif isinstance(d, (list, tuple)):
for k in d:
setattr(self, k, None)
def struct(d):
return Struct(d)
def encode_all(d, coding='utf8'):
if isinstance(d, unicode):
return d.encode(coding)
if isinstance(d, dict):
return dict((encode_all(k, coding), encode_all(v, coding)) for k, v in d.items())
if isinstance(d, list):
return [encode_all(v, coding) for v in d]
if isinstance(d, tuple):
return tuple(encode_all(v, coding) for v in d)
return d
def render_from_string(src, args=None):
try:
# we use 'disable_unicode' to avoid u"" strings in the template
# so all input must be preencoded to utf8
tpl = mako.template.Template(encode_all(src), disable_unicode=True, input_encoding='utf-8')
args = encode_all(args or {})
args['_'] = templatetools
return tpl.render(**args)
except:
err = mako.exceptions.text_error_template().render()
for s in err.strip().splitlines():
log.error(s)
raise
def render_template(path, args=None):
with open(path) as fp:
return render_from_string(fp.read(), args)
_app_version = None
def app_version(as_number=False):
global _app_version
if _app_version is None:
with open(os.path.join(config.app_root(), 'VERSION')) as fp:
_app_version = fp.read().strip()
if as_number:
v = _app_version.split('.')
return int(v[0]) * 1000000 + int(v[1]) * 1000 + int(v[2])
return _app_version
def strip_none(d):
return dict((k, v) for k, v in d.items() if v is not None and v != [None])
def split_dict(d):
r = {}
for k, v in d.items():
k = k.split('.')
q = r
for sub in k[:-1]:
if sub not in q:
q[sub] = {}
q = q[sub]
q[k[-1]] = v
return r
def pick(d, keys, skip_none=False):
r = {}
is_dict = isinstance(keys, dict)
for k in keys:
p = keys[k] if is_dict else k
if d.get(k) is not None:
r[p] = d[k]
elif not skip_none:
r[p] = None
return r
def chunked(it, size):
it = iter(it)
while True:
p = tuple(itertools.islice(it, size))
if not p:
break
yield p
def randstr(chars, size):
return ''.join(chars[random.randint(0, len(chars) - 1)] for _ in range(size))
def uid(prefix=''):
r = randstr('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', 64)
return prefix + r
def now():
return datetime.datetime.now()
class ProgressIndicator(object):
def __init__(self, title, total, resolution=10):
self.console = sys.stderr.isatty()
self.resolution = resolution
self.title = title
self.total = total
self.progress = 0
self.lastd = 0
def __enter__(self):
self.write('START (total=%d)' % self.total)
self.starttime = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.write('\n')
else:
t = time.time() - self.starttime
self.write('END (time=%.2f rps=%.1f)' % (t, self.total / t))
def update(self, add=1):
self.progress += add
p = math.floor(self.progress * 100.0 / self.total)
if p > 100:
p = 100
d = round(p / self.resolution) * self.resolution
if d > self.lastd:
self.write('%d%%' % d)
self.lastd = d
def write(self, s):
log.info(self.title + ': ' + s)
|
[
"gebrkn@gmail.com"
] |
gebrkn@gmail.com
|
f1984d621cb9dff5c7d33467c52fd530da9da790
|
d78d214c1040865d1d054cc034ff04e3cb025549
|
/bdgtools/io.py
|
526a76e4e092697ed3e067aaa8e3ee8588a8646d
|
[
"MIT"
] |
permissive
|
knutdrand/bdgtools
|
922685c6225bb0dc458d3f4b9dc6985aee0b9e51
|
18d21586515ec03e5fb96e959447f6b35e5350de
|
refs/heads/master
| 2023-01-03T14:27:05.855074
| 2020-10-21T12:30:12
| 2020-10-21T12:30:12
| 301,361,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,279
|
py
|
import logging
from itertools import chain, groupby
from more_itertools import pairwise
from operator import itemgetter
import pandas as pd
import numpy as np
from .regions import Regions
from .splitregions import SplitRegions, Genes
from .bedgraph import BedGraph, broadcast
log = logging
def _peek_line(f):
pos = f.tell()
line = f.readline()
f.seek(pos)
return line
def read_bedfile(file_obj):
n_cols = len(_peek_line(file_obj).split("\t"))
assert n_cols >=3, n_cols
if n_cols < 6:
table = pd.read_table(file_obj, names=["chrom", "start", "end"], usecols=[0, 1, 2])
else:
table = pd.read_table(file_obj, names=["chrom", "start", "end", "direction"], usecols=[0, 1, 2, 5])
table = table.sort_values(["chrom", "start"])
changes = np.flatnonzero(table["chrom"].values[:-1] != table["chrom"].values[1:])+1
changes = np.concatenate(([0], changes, [table["chrom"].values.size]))
chrom_split = (table.iloc[start:end] for start, end in zip(changes[:-1], changes[1:]))
r = {t["chrom"].iloc[0]: Regions(t["start"].values, t["end"].values,
np.where(t["direction"].values=="+", 1, -1) if n_cols>=6 else 1)
for t in chrom_split}
return r
def _fix_bedgraph(starts, ends, values):
ends_w_zero = np.insert(ends[:-1], 0, 0)
missing = np.flatnonzero(starts != ends_w_zero)
new_starts = ends_w_zero[missing]
new_ends = starts[missing]
all_starts = np.empty(starts.size+missing.size, dtype=starts.dtype)
all_ends = np.empty(starts.size+missing.size,dtype=ends.dtype)
all_values = np.empty(starts.size+missing.size, dtype=values.dtype)
new_missing = missing + np.arange(missing.size)
all_starts[new_missing] = ends_w_zero[missing]
all_ends[new_missing] = starts[missing]
all_values[new_missing] = 0
for i, (a, b) in enumerate(pairwise(chain([0], missing, [starts.size]))):
all_starts[a+i:b+i] = starts[a:b]
all_ends[a+i:b+i] = ends[a:b]
all_values[a+i:b+i] = values[a:b]
return all_starts, all_ends, all_values
def _get_bedgraph(chunks):
chunks = list(chunks)
cur_chrom = chunks[0]["chrom"].iloc[0]
starts = np.concatenate([c["start"].values for c in chunks])
ends = np.concatenate([c["end"].values for c in chunks])
values = np.concatenate([c["value"].values for c in chunks])
if starts[0] != 0 or not np.all(starts[1:] == ends[:-1]):
logging.warning(f"Uncomplete bedfile, fixing %s", starts[0])
starts, ends, values = _fix_bedgraph(starts, ends, values)
assert np.all(starts[1:] == ends[:-1]), f"Begraph is not continous on {cur_chrom}, {starts[1:]}, {ends[:-1]}\n{np.flatnonzero(starts[1:]!=ends[:-1])}, {starts.size}"
log.info("Read chromosome", cur_chrom)
return BedGraph(starts,
values,
chunks[-1]["end"].values[-1])
def read_bedgraph(file_obj, size_hint=1000000):
reader = pd.read_table(file_obj, names=["chrom", "start", "end", "value"], usecols=[0, 1, 2, 3], chunksize=size_hint)
grouped = groupby(chain.from_iterable(chunk.groupby("chrom", sort=False) for chunk in reader),
itemgetter(0))
grouped = ((chrom, map(itemgetter(1), group)) for chrom, group in grouped)
return ((chrom, _get_bedgraph(group)) for chrom, group in grouped)
def _get_bedfile(chunks, with_strand=False):
chunks = list(chunks)
cur_chrom = chunks[0]["chrom"].iloc[0]
starts = np.concatenate([c["start"].values for c in chunks])
ends = np.concatenate([c["end"].values for c in chunks])
if with_strand:
strands = np.concatenate([np.where(c["strand"].values=="+", 1, -1) for c in chunks])
else:
strands=1
log.info("Read chromosome", cur_chrom)
return Regions(starts,
ends,
strands)
def read_large_bedfile(file_obj, size_hint=1000000):
n_cols = len(_peek_line(file_obj).split("\t"))
assert n_cols >=3, n_cols
names=["chrom", "start", "end"]
cols = [0, 1, 2]
if n_cols>=6:
names.append("strand")
cols.append(5)
reader = pd.read_table(file_obj, names=names, usecols=cols, chunksize=size_hint)
grouped = groupby(chain.from_iterable(chunk.groupby("chrom", sort=False) for chunk in reader),
itemgetter(0))
grouped = ((chrom, map(itemgetter(1), group)) for chrom, group in grouped)
return ((chrom, _get_bedfile(group)) for chrom, group in grouped)
def _filter_coding(df):
s = np.array([starts[0] for starts in df["exon_starts"]])
e = np.array([ends[-1] for ends in df["exon_ends"]])
mask = df["cds_start"] > s
mask &= df["cds_end"] > df["cds_start"]
mask &= e > df["cds_end"]
if not np.any(mask):
return None
return df.loc[mask]
def _get_genes(df):
df = _filter_coding(df)
if df is None:
return None
directions = np.where(df["direction"].values=="+", 1, -1)
starts = np.concatenate([s[::d] for s, d in zip(df["exon_starts"].values, directions)])
ends = np.concatenate([s[::d] for s, d in zip(df["exon_ends"].values, directions)])
lens = [len(starts) for starts in df["exon_starts"]]
offsets=np.cumsum([0]+lens)
all_directions = broadcast(directions, offsets)
regions = Regions(starts, ends, all_directions)
coding_offsets = _find_coding_offsets(df["cds_start"].values,
df["cds_end"].values,
regions, offsets, all_directions)
return Genes(regions, offsets, coding_regions=Regions(coding_offsets[0], coding_offsets[1]))
def _find_coding_offsets(cds_starts, cds_ends, regions, offsets, directions):
cum_sizes = np.insert(np.cumsum(regions.sizes()), 0, 0)
starts = broadcast(cds_starts, offsets)
start_idxs = np.flatnonzero((regions.ends>=starts) & (regions.starts<=starts))
local_starts = np.where(directions[start_idxs]==1,
cds_starts-regions.starts[start_idxs],
regions.ends[start_idxs]-cds_starts)
local_starts += cum_sizes[start_idxs]-cum_sizes[offsets[:-1]]
ends = broadcast(cds_ends, offsets)
end_idxs = np.flatnonzero((regions.ends>=ends) & (regions.starts<=ends))
local_ends = np.where(directions[end_idxs]==1,
cds_ends-regions.starts[end_idxs],
regions.ends[end_idxs]-cds_ends)
local_ends += cum_sizes[end_idxs]-cum_sizes[offsets[:-1]]
return np.where(directions[start_idxs]==1, local_starts, local_ends), np.where(directions[start_idxs]==-1, local_starts, local_ends)
def read_refseq(file_obj):
get_ints = lambda x: [int(r) for r in x.split(",") if r]
df = pd.read_table(file_obj,
names=["chrom", "direction", "start", "end","cds_start","cds_end", "exon_starts", "exon_ends"],
usecols=[2,3,4,5,6,7,9,10],
converters={"exon_starts": get_ints, "exon_ends": get_ints})
grouped = df.sort_values(["chrom", "start"]).groupby("chrom", sort=False)
d = {chrom: _get_genes(df) for chrom, df in grouped}
return {chrom: genes for chrom, genes in d.items() if genes is not None}
def write_bedgraph(bedgraphs, f):
for chrom, bedgraph in bedgraphs:
if bedgraph._size is not None:
df = pd.DataFrame({"chrom": chrom,
"start": bedgraph._indices,
"end": np.append(bedgraph._indices, bedgraph._size),
"value": bedgraph._values})
else:
df = pd.DataFrame({"chrom": chrom,
"start": bedgraph._indices[:-1],
"end": bedgraph._indices[1:],
"value": bedgraph._values[:-1]})
df.to_csv(f, sep="\t", header=False, index=False)
def write_bedfile(regions_dict, f):
for chrom, regions in regions_dict.items():
df = pd.DataFrame({"chrom": chrom,
"start": regions.starts,
"end": regions.ends})
df.to_csv(f, sep="\t", header=False, index=False)
|
[
"knutdrand@gmail.com"
] |
knutdrand@gmail.com
|
cccdf851ec9f9a96a2225d8bfbd7e62cfcd1b59a
|
0c01f24c7d2e2433ad999358bcb5cd246d824c64
|
/Settings/Lopy/Lopy_device/flash/main.py
|
d577883725a19488191f6648b511b6abb6858500
|
[] |
no_license
|
jemesmain/openenergy
|
368b2b2dc4cee6e3b0ea93f9d0029572b004983f
|
3d0c7d0892336a0a0f0a880d53561079f9ef2637
|
refs/heads/master
| 2023-05-11T19:09:00.011456
| 2023-04-30T21:29:02
| 2023-04-30T21:29:02
| 251,673,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
""" OTAA Node example compatible with the LoPy Nano Gateway """
from network import LoRa
import socket
import binascii
import struct
import time
import config
# initialize LoRa in LORAWAN mode.
lora = LoRa(mode=LoRa.LORAWAN)
# create an OTA authentication params
#setting found in ttn device / name of device with activation method OTAA
#ttn dev_eui 70B3D54995E574C6
#const char *appEui = "70B3D59BA0000114";
#const char *appKey = "65240933ADE876AB9AD5990DB003B6E9";
#ttn configuration
#print('ttn config')
#dev_eui = binascii.unhexlify('70B3D54985E573F7'.replace(' ','')) # these settings can be found from TTN
#app_eui = binascii.unhexlify('70B3D59CC0000004'.replace(' ','')) # these settings can be found from TTN
#app_key = binascii.unhexlify('65240932ADE876AB9AD5990DB004C2E9'.replace(' ','')) # these settings can be found from TTN
#chirpstack configuration
print('chirpstack config')
dev_eui = binascii.unhexlify('db2b4bbc2cf73c6'.replace(' ','')) # these settings can be found from Chirpstack
app_eui = binascii.unhexlify('db2b4bbc2cf743c6'.replace(' ','')) # these settings can be found from Chirpstack
app_key = binascii.unhexlify('f5d90236196aef9d6d39c690b5b8a317'.replace(' ','')) # these settings can be found from Chirpstack
# set the 3 default channels to the same frequency (must be before sending the OTAA join request)
lora.add_channel(0, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=5)
lora.add_channel(1, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=5)
lora.add_channel(2, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=5)
# join a network using OTAA
lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0, dr=config.LORA_NODE_DR)
# wait until the module has joined the network
i=1
while not lora.has_joined():
time.sleep(2.5)
print('Not joined yet...')
print(i)
i+=1
print('device connected')
# remove all the non-default channels
for i in range(3, 16):
lora.remove_channel(i)
# create a LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# set the LoRaWAN data rate
s.setsockopt(socket.SOL_LORA, socket.SO_DR, config.LORA_NODE_DR)
# make the socket blocking
s.setblocking(False)
time.sleep(5.0)
for i in range (60):
#envoi des information toutes les 2 minutes donc pendant 2h
value = 22.5
# Nombre mis dans un tableau de flottants et on l'affiche avec formattage
myArrayOfFloat = bytes(struct.pack("f", value))
print('myArrayOfFloat')
print(myArrayOfFloat)
print([ "0x%02x" % item for item in myArrayOfFloat ])
#s.send(b'0001' + myArrayOfFloat)
s.send(myArrayOfFloat)
# send some data ("hello")
#s.send(bytes([0x48, 0x65, 0x6C, 0x6C, 0x6F]))
#print('hello is sent')
# send some data ("h")
#s.send(bytes([0x48]))
#print('h is sent')
# send some data ("20.5")
#s.send(bytes([0x32, 0x30, 0x2e, 0x35]))
#print('20.5 is sent')
#s.send(b'PKT #' + myArrayOfFloat)
#s.send(b'PKT #' + bytes([i]))
time.sleep(118)
rx, port = s.recvfrom(256)
if rx:
print('Received: {}, on port: {}'.format(rx, port))
time.sleep(2)
|
[
"jeaneric.mesmain@gmail.com"
] |
jeaneric.mesmain@gmail.com
|
3c2f70f7d8cb1b239b139682b5e5bfd6579d4cb8
|
02c4c52f2f630df77bc25994aee13a7530e3136d
|
/012016/python/xmlParsing.py
|
ec2f58145a53178b29ba7dc467dc309ce4c9fc3c
|
[] |
no_license
|
abgoswam/2016
|
ac7d61828bc5fda8c003f135bc250ed57d03ae4c
|
307844ba74bf302c290a4e2b4792a096e0c3cb56
|
refs/heads/master
| 2020-04-15T14:03:49.674650
| 2016-12-09T02:19:44
| 2016-12-09T02:19:44
| 57,982,844
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,555
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 07:38:36 2016
@author: agoswami
Code from : http://www.blog.pythonlibrary.org/2013/04/30/python-101-intro-to-xml-parsing-with-elementtree/
"""
import time
import xml.etree.ElementTree as xml
import xml.etree.cElementTree as ET
import xml.dom.minidom as minidom
#http://stackoverflow.com/questions/17402323/use-xml-etree-elementtree-to-write-out-nicely-formatted-xml-files
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
"The idea is to print your Element in a string, parse it using minidom and convert it again in XML using the toprettyxml function"
rough_string = xml.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
def createXML(filename):
root = xml.Element("zAppointments")
appt = xml.Element("appointment")
root.append(appt)
# adding appt children
begin = xml.SubElement(appt, "begin")
begin.text = "12345678"
uid = xml.SubElement(appt, "uid")
uid.text = "040000008200E000"
alarmTime = xml.SubElement(appt, "alarmTime")
alarmTime.text = "1181572063"
state = xml.SubElement(appt, "state")
location = xml.SubElement(appt, "location")
duration = xml.SubElement(appt, "duration")
duration.text = "1800"
subject = xml.SubElement(appt, "subject")
tree = xml.ElementTree(root)
with open(filename, "w") as fh:
tree.write(fh)
def editXML(filename, updatedfilename):
tree = ET.ElementTree(file=filename)
root = tree.getroot()
for begin_time in root.iter("begin"):
begin_time.text = time.ctime(int(begin_time.text))
s = prettify(root)
print s
# tree = ET.ElementTree(root)
with open(updatedfilename, "w") as f:
# tree.write(f)
f.write(s)
def parseXML(xml_file):
"""
Parse XML with ElementTree
"""
tree = ET.ElementTree(file=xml_file)
print tree.getroot()
root = tree.getroot()
print "tag=%s, attrib=%s" % (root.tag, root.attrib)
for child in root:
print child.tag, child.attrib
if child.tag == "appointment":
for step_child in child:
print step_child.tag
# iterate over the entire tree
print "-" * 40
print "Iterating using a tree iterator"
print "-" * 40
iter_ = tree.getiterator()
for elem in iter_:
print elem.tag
# get the information via the children!
print "-" * 40
print "Iterating using getchildren()"
print "-" * 40
appointments = root.getchildren()
for appointment in appointments:
appt_children = appointment.getchildren()
for appt_child in appt_children:
print "%s=%s" % (appt_child.tag, appt_child.text)
#----------------------------------------------------------------------
if __name__ == "__main__":
filename = "appt.xml"
updatedfilename = "updated.xml"
createXML(filename)
## just playing around with how to read / write text to files in python
# f = open(filename, "ab")
# f.writelines("abhishek\n")
# f.writelines("goswami\n")
# f.writelines("microsoft\n")
# f.close()
#
# with open(filename, "rb") as fh:
# s = fh.read()
# print "++ line:{0}".format(s)
# for line in fh:
# print "-- line:{0}".format(line)
editXML(filename, updatedfilename)
parseXML(updatedfilename)
|
[
"abgoswam@gmail.com"
] |
abgoswam@gmail.com
|
84eaed61749bc550b3f113ebdf7b48e5b356ceed
|
cd9702fdcfd36b26fcd639fb4d0be8019c4c21e0
|
/src/getEcommence_yhd.py
|
b469fe92a4f6abf2061ff8a23e7533a675ba3408
|
[] |
no_license
|
Neilfu/NLP
|
0d467b860061d2fd2e2aaa8571f391f183158e32
|
7ebfc2264761b83174f8ad47e0fce11fdff7e322
|
refs/heads/master
| 2020-04-06T07:04:54.755915
| 2016-02-26T13:38:49
| 2016-02-26T13:38:49
| 15,428,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,456
|
py
|
#coding=utf-8
import re
import sys, getopt
import datetime
import requests
from bs4 import BeautifulSoup
import logging
import json
from pymongo import MongoClient,ASCENDING
import time
LEVELS={'DEBUG':logging.DEBUG,
'INFO':logging.INFO,
'WARNING':logging.WARNING,
'ERROR':logging.ERROR,
'CRITICAL':logging.CRITICAL,
}
def setLog(strLevel='INFO',logfile='../log.txt'):
level=LEVELS[strLevel]
logging.basicConfig(
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
logger = logging.getLogger()
handler = logging.FileHandler(logfile)
logger.addHandler(handler)
console = logging.StreamHandler()
logger.addHandler(console)
logger.setLevel(level)
return logger
def progressBar(strTitle,num=1, total=100):
rate = int(float(num) / float(total)*1000)
sys.stdout.write(strTitle+"\t=>\t"+str(rate/10.0)+"%\r")
sys.stdout.flush()
def openTable(tableName=''):
strConn = "mongodb://" + global_setting['host'] + ":" + global_setting['port']
con = MongoClient(strConn)
db = con[global_setting['database']]
return db[tableName]
def getCategoryUrl(site="",url=""):
catDb = openTable(tableName=global_setting['catTable'])
r = session.get(url)
if not r.text:
return False
soup = BeautifulSoup(r.text)
#
for level1 in soup.select('.alonesort'):
curLevel1 = level1.select('.mt')[0].text
curLevel1 = re.sub('\s', '', curLevel1)
for level2 in level1.select('dl'):
curLevel2 = level2.select('dt')[0].text
curLevel2 = re.sub('\s', '', curLevel2)
for level3 in level2.select('dd a'):
curLevel3 = re.sub('\s', '', level3.text)
curlUrl = level3['href']
retFind = re.findall(r'com\/(.*)\/',curlUrl)
if retFind:
curCatID = retFind[0]
if catDb.find({'catId':curCatID}).count() >0:
logger.debug('catetogy %s exists,skip\n'%(curCatID))
else:
catDb.insert({'catId':curCatID,'level1':curLevel1, 'level2':curLevel2, 'level3':curLevel3, 'catUrl':curlUrl, 'site':site})
return True
def getPidList4Cat():
level1Filter = global_setting['level1']
level2Filter = global_setting['level2']
level3Filter = global_setting['level3']
catDb = openTable(tableName=global_setting['catTable'])
dbProductList = openTable(tableName=global_setting['productTable'])
#支持Ctrl+c 中断
ctlC = False
#优先扫描最久没更新的类别
for cat in catDb.find({'site':global_setting['site']}).sort('lasttime',ASCENDING):
if ctlC:
break
if (global_setting['site']==cat['site']) and cat['catUrl'] and ((level1Filter and cat['level1'] in level1Filter) \
or (level2Filter and cat['level2'] in level2Filter) \
or level3Filter and cat['level3'] in level3Filter):
#产品的类别属性
CATLIST = ('catId','catUrl','site','level1','level2','level3')
paraCat = {}
for li in CATLIST:
if li in cat:
paraCat[li] = cat[li]
#获取产品列表
ctlC = getProduct(dbProductList, **paraCat)
#记录最近更新时间及品类库存量
catCount = dbProductList.find({'$and':[{'catId':cat['catId']},{'site':cat['site']}]}).count()
lastFreshDate = datetime.datetime.utcnow()
catDb.update({'catId':cat['catId']},{'$set':{'lasttime':lastFreshDate}})
catDb.update({'catId':cat['catId']},{'$push':{'timeline':{'lasttime':lastFreshDate,'count':catCount}}})
def getCatPageNum(url):
r = session.get(url)
soup = BeautifulSoup(r.text)
strPages = soup.select('.select_page_num')
if strPages:
pages = re.findall('\/(\d+)',strPages[0].text)[0]
if pages:
pages = int(pages)
else:
pages = 0
return pages
def getProduct(dbProductList,**cat):
SUFFIX = '/b/a-s1-v4-p%s-price-d0-f0d-m1-rt0-pid-mid0-k/'
catUrl = cat['catUrl']
totalPages = getCatPageNum(catUrl)
logger.info("begin:%s\t->%s\t->%s,\ttotal %d page" %(cat['level1'],cat['level2'],cat['level3'], totalPages) )
rule = re.compile(r'id=\"plist\".*?>(.*?)<div class=\"clr\"',re.S)
Skip = False
for page in range(totalPages):
if Skip:
break
try:
progressBar("getting pages",page,totalPages)
urlPage = catUrl + SUFFIX %(page+1)
time.sleep(0.1)
r = session.get(urlPage)
#listUls = re.findall(rule,r.text)
soup = BeautifulSoup(r.text)
skuLists=[]
for li in soup.select('.mod_search_pro'):
product = {}
product.update(cat)
item = li.select('.proName')[0].a
product['sku'] = item['pmid']
skuLists.append(product['sku'])
product['url'] = item['href']
product['name'] = re.sub('[\r\n]','',item.text)
reBrand = re.findall(r'^(.*?)[\s(]',product['name'])
if reBrand:
product['brand'] = reBrand[0]
strPrice = li.select('.proPrice')[0]
product['price'] = strPrice.em['yhdprice']
try:
if dbProductList.find({u'sku':product['sku']}).count() >0:
if global_setting['delta']:
logger.debug('Delta:on, category %s scanning finished' %(cat['catId']) )
Skip = True
break
else:
logger.debug('%s exist,skip' %(product['sku']))
else:
dbProductList.insert(product)
if global_setting['spec']:
getProductDetail(product['sku'],product['url'],dbProductList)
except Exception, e:
logger.exception("error in Page:%d, skuid:%s, reason:%s" %(page, product['sku'], str(e)))
continue
if global_setting['price']:
updatePrice(skuLists,dbProductList)
except (KeyboardInterrupt, SystemExit), e:
logger.critical("app is interrupted, finished pages:%d" %(page))
Skip = True
return Skip
except Exception,e:
logger.exception("error in Page:%d, reason:%s" %(page,str(e)))
logger.debug('category %s getting finished'%(cat['level3']))
def getProductDetail(sku, url, db):
tableRule=re.compile(r'<table.*?class="Ptable".*?<\/table>',re.S)
if not url:
return False
productDetail = {}
time.sleep(0.5)
r = session.get(url)
try:
table = re.findall(tableRule,r.text)[0]
if not table:
return productDetail
soup = BeautifulSoup(table)
trs = soup('tr')
for tr in trs:
if len(tr('td')) == 2:
productDetail[tr('td')[0].text.replace('.','')] = tr('td')[1].text
#product[tr('td')[0].text.replace('.','').encode('utf-8')] = tr('td')[1].text.encode('utf-8')
db.update({'sku':sku},{'$set':{'spec':productDetail}})
except Exception,e:
logger.exception("error in parsing sku:%s\t page:%s,reson:%s" %(sku, url,str(e)))
return False
return True
def updatePrice(skuLists,db):
priceUrl = 'http://p.3.cn/prices/mgets?skuIds=J_%s&type=1'
time.sleep(0.5)
strSku = ",J_".join(skuLists)
r = session.get(priceUrl %(strSku))
if not r.text:
return False
jsonPriceLists = json.loads(r.text)
for price in jsonPriceLists:
if price['p'] and price['id']:
skuid = price['id'].replace('J_','')
price = price['p']
curTime = datetime.datetime.utcnow()
db.update({'sku':skuid},{'$set':{'price':price}})
db.update({'sku':skuid},{'$push':{'timeline':{'date':curTime,'price':price}}})
return True
def parseCommandLine():
para = {}
options,args = getopt.getopt(sys.argv[1:],"h",['site=', 'level1=', 'level2=', 'level3=', 'host=', 'port=', 'database=','productTable=','catTable=','pagesize=', 'hasPrice','batchUpdate', 'hasSpec','delta', 'help','catUpdate'])
for opt, value in options:
if opt in ['--level1','--level2','--level3']:
strKey = re.sub('-','',opt)
para[strKey] = value.decode('gb2312').split(',')
elif opt in ['--site','--database','--catTable','--productTable']:
strKey = re.sub('-','',opt)
para[strKey] = value.decode('gb2312')
elif opt in ['--host','--port','--pagesize']:
strKey = re.sub('-','',opt)
para[strKey] = value
elif opt in ['--hasPrice','--hasSpec','--delta','--batchUpdate','--catUpdate']:
strKey = re.sub('-','',opt)
para[strKey] = True
if opt in ['-h','--help']:
usage()
sys.exit()
return para
def updateBactch():
dbProductList = openTable(tableName=global_setting['productTable'])
updateCatids = getUpdateCat()
for catid,catname in updateCatids:
skuList = []
logger.info('price updating to categary %s(%s) '%(catname,catid))
for product in dbProductList.find({'catId':catid}):
try:
if global_setting['spec']:
getProductDetail(product['sku'],product['url'],dbProductList)
skuList.append(product['sku'])
if len(skuList) > global_setting['pagesize']:
updatePrice(skuList, dbProductList);
skuList = []
except Exception, e:
logger.exception("error in updating pricing:%s %s " %(catname, str(e)))
continue
return True
def getUpdateCat():
level1Filter = global_setting['level1']
level2Filter = global_setting['level2']
level3Filter = global_setting['level3']
retCat = []
catDb = openTable(tableName=global_setting['catTable'])
for cat in catDb.find({'site':global_setting['site']}).sort('lasttime',ASCENDING):
if (global_setting['site']==cat['site']) and cat['catUrl'] and ((level1Filter and cat['level1'] in level1Filter) \
or (level2Filter and cat['level2'] in level2Filter) \
or level3Filter and cat['level3'] in level3Filter):
retCat.append((cat['catId'],cat['level3']))
return retCat
def usage():
print "Usage: python getCategory.py [--help] [--site] [--hasPrice] [--hasSpec] [--homeUrl] [--host], [--port], [--database],[--productTable], [--catTable],[--level1] [--level2] [--level3] [--delta] [--batchUpdate] [--catUpdate]\n"
global_setting = {}
global session
global logger
if __name__ == '__main__':
logger = setLog('INFO')
logger.debug('log level, %d' %(logger.level))
session = requests.Session()
retPara = parseCommandLine()
global_setting['site'] = retPara.get('site',u'京东')
global_setting['targetUrl'] = retPara.get('homeUrl','http://www.yhd.com/marketing/allproduct.html')
global_setting['level1'] = retPara.get('level1',None)
global_setting['level2'] = retPara.get('level2',None)
global_setting['level3'] = retPara.get('level3',None)
global_setting['spec'] = retPara.get('hasSpec',False)
global_setting['price'] = retPara.get('hasPrice',False)
global_setting['delta'] = retPara.get('delta',False)
global_setting['host'] = retPara.get('host','127.0.0.1')
global_setting['port'] = retPara.get('port','27017')
global_setting['database'] = retPara.get('database','productKB')
global_setting['catTable'] = retPara.get('catTable','catdb')
global_setting['productTable'] = retPara.get('productTable','productdb')
global_setting['pagesize'] = retPara.get('pagesize',60)
global_setting['batchUpdate'] = retPara.get('batchUpdate',False)
global_setting['catUpdate'] = retPara.get('catUpdate',False)
#import pdb;pdb.set_trace()
if global_setting['catUpdate']:
getCategoryUrl(site=global_setting['site'],url=global_setting['targetUrl'])
if global_setting['batchUpdate']:
updateBactch()
else:
getPidList4Cat()
|
[
"neil@local.com"
] |
neil@local.com
|
6e3a536375721655c5f4134ba620f4f7a4d0bbb6
|
b6a9268bcb333ec3d873cf28caef38c12de55e00
|
/ch7_functions/anonymous_functions.py
|
99fafbc2095ac3013dd4a9474d109961f183bd61
|
[] |
no_license
|
bobo333/python_cookbook
|
eb6fff82e1f5dc90963c8798cf88ffdcc3195440
|
e0a022ecfe5abbcd074f90be9b274f6d29fe4362
|
refs/heads/master
| 2020-07-03T09:20:37.808762
| 2017-03-17T08:29:52
| 2017-03-17T08:29:52
| 66,986,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
# lambdas are anonymous functions
add = lambda x, y: x + y
print(add(5, 3))
# usually used in combination with some other operation, like sort
names = ['David Beazley', 'Brian Jones', 'Raymond Hettinger', 'Ned Batchelder']
print(sorted(names, key=lambda name: name.split()[-1].lower()))
|
[
"redsox333@gmail.com"
] |
redsox333@gmail.com
|
aa6c6881d7565c65dcc402b478b85a6aaab77318
|
8410bb5a2e8849bb3a554b95ddc713d88f3440c4
|
/aws-dev/awsdev8/venv/Lib/site-packages/amazondax/grammar/DynamoDbGrammarListener.py
|
e8bd3845c650ff1693a6d39c27674112f67b43c3
|
[
"MIT"
] |
permissive
|
PacktPublishing/-AWS-Certified-Developer---Associate-Certification
|
ae99b6c1efb30e8fab5b76e3d8c821823a4cd852
|
b9838b4e038b42ad1813a296379cbbc40cab6286
|
refs/heads/master
| 2022-11-03T04:37:49.014335
| 2022-10-31T05:42:19
| 2022-10-31T05:42:19
| 219,964,717
| 13
| 11
|
MIT
| 2021-06-02T00:57:45
| 2019-11-06T09:54:09
|
Python
|
UTF-8
|
Python
| false
| false
| 13,172
|
py
|
# Generated from grammar/DynamoDbGrammar.g4 by ANTLR 4.7
from antlr4 import *
if __name__ is not None and "." in __name__:
from .DynamoDbGrammarParser import DynamoDbGrammarParser
else:
from DynamoDbGrammarParser import DynamoDbGrammarParser
# This class defines a complete listener for a parse tree produced by DynamoDbGrammarParser.
class DynamoDbGrammarListener(ParseTreeListener):
# Enter a parse tree produced by DynamoDbGrammarParser#projection_.
def enterProjection_(self, ctx:DynamoDbGrammarParser.Projection_Context):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#projection_.
def exitProjection_(self, ctx:DynamoDbGrammarParser.Projection_Context):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#projection.
def enterProjection(self, ctx:DynamoDbGrammarParser.ProjectionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#projection.
def exitProjection(self, ctx:DynamoDbGrammarParser.ProjectionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#condition_.
def enterCondition_(self, ctx:DynamoDbGrammarParser.Condition_Context):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#condition_.
def exitCondition_(self, ctx:DynamoDbGrammarParser.Condition_Context):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#Or.
def enterOr(self, ctx:DynamoDbGrammarParser.OrContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#Or.
def exitOr(self, ctx:DynamoDbGrammarParser.OrContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#Negation.
def enterNegation(self, ctx:DynamoDbGrammarParser.NegationContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#Negation.
def exitNegation(self, ctx:DynamoDbGrammarParser.NegationContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#In.
def enterIn(self, ctx:DynamoDbGrammarParser.InContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#In.
def exitIn(self, ctx:DynamoDbGrammarParser.InContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#And.
def enterAnd(self, ctx:DynamoDbGrammarParser.AndContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#And.
def exitAnd(self, ctx:DynamoDbGrammarParser.AndContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#Between.
def enterBetween(self, ctx:DynamoDbGrammarParser.BetweenContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#Between.
def exitBetween(self, ctx:DynamoDbGrammarParser.BetweenContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#FunctionCondition.
def enterFunctionCondition(self, ctx:DynamoDbGrammarParser.FunctionConditionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#FunctionCondition.
def exitFunctionCondition(self, ctx:DynamoDbGrammarParser.FunctionConditionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#Comparator.
def enterComparator(self, ctx:DynamoDbGrammarParser.ComparatorContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#Comparator.
def exitComparator(self, ctx:DynamoDbGrammarParser.ComparatorContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#ConditionGrouping.
def enterConditionGrouping(self, ctx:DynamoDbGrammarParser.ConditionGroupingContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#ConditionGrouping.
def exitConditionGrouping(self, ctx:DynamoDbGrammarParser.ConditionGroupingContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#comparator_symbol.
def enterComparator_symbol(self, ctx:DynamoDbGrammarParser.Comparator_symbolContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#comparator_symbol.
def exitComparator_symbol(self, ctx:DynamoDbGrammarParser.Comparator_symbolContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#update_.
def enterUpdate_(self, ctx:DynamoDbGrammarParser.Update_Context):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#update_.
def exitUpdate_(self, ctx:DynamoDbGrammarParser.Update_Context):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#update.
def enterUpdate(self, ctx:DynamoDbGrammarParser.UpdateContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#update.
def exitUpdate(self, ctx:DynamoDbGrammarParser.UpdateContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#set_section.
def enterSet_section(self, ctx:DynamoDbGrammarParser.Set_sectionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#set_section.
def exitSet_section(self, ctx:DynamoDbGrammarParser.Set_sectionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#set_action.
def enterSet_action(self, ctx:DynamoDbGrammarParser.Set_actionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#set_action.
def exitSet_action(self, ctx:DynamoDbGrammarParser.Set_actionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#add_section.
def enterAdd_section(self, ctx:DynamoDbGrammarParser.Add_sectionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#add_section.
def exitAdd_section(self, ctx:DynamoDbGrammarParser.Add_sectionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#add_action.
def enterAdd_action(self, ctx:DynamoDbGrammarParser.Add_actionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#add_action.
def exitAdd_action(self, ctx:DynamoDbGrammarParser.Add_actionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#delete_section.
def enterDelete_section(self, ctx:DynamoDbGrammarParser.Delete_sectionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#delete_section.
def exitDelete_section(self, ctx:DynamoDbGrammarParser.Delete_sectionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#delete_action.
def enterDelete_action(self, ctx:DynamoDbGrammarParser.Delete_actionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#delete_action.
def exitDelete_action(self, ctx:DynamoDbGrammarParser.Delete_actionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#remove_section.
def enterRemove_section(self, ctx:DynamoDbGrammarParser.Remove_sectionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#remove_section.
def exitRemove_section(self, ctx:DynamoDbGrammarParser.Remove_sectionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#remove_action.
def enterRemove_action(self, ctx:DynamoDbGrammarParser.Remove_actionContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#remove_action.
def exitRemove_action(self, ctx:DynamoDbGrammarParser.Remove_actionContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#OperandValue.
def enterOperandValue(self, ctx:DynamoDbGrammarParser.OperandValueContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#OperandValue.
def exitOperandValue(self, ctx:DynamoDbGrammarParser.OperandValueContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#ArithmeticValue.
def enterArithmeticValue(self, ctx:DynamoDbGrammarParser.ArithmeticValueContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#ArithmeticValue.
def exitArithmeticValue(self, ctx:DynamoDbGrammarParser.ArithmeticValueContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#PlusMinus.
def enterPlusMinus(self, ctx:DynamoDbGrammarParser.PlusMinusContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#PlusMinus.
def exitPlusMinus(self, ctx:DynamoDbGrammarParser.PlusMinusContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#ArithmeticParens.
def enterArithmeticParens(self, ctx:DynamoDbGrammarParser.ArithmeticParensContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#ArithmeticParens.
def exitArithmeticParens(self, ctx:DynamoDbGrammarParser.ArithmeticParensContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#PathOperand.
def enterPathOperand(self, ctx:DynamoDbGrammarParser.PathOperandContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#PathOperand.
def exitPathOperand(self, ctx:DynamoDbGrammarParser.PathOperandContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#LiteralOperand.
def enterLiteralOperand(self, ctx:DynamoDbGrammarParser.LiteralOperandContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#LiteralOperand.
def exitLiteralOperand(self, ctx:DynamoDbGrammarParser.LiteralOperandContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#FunctionOperand.
def enterFunctionOperand(self, ctx:DynamoDbGrammarParser.FunctionOperandContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#FunctionOperand.
def exitFunctionOperand(self, ctx:DynamoDbGrammarParser.FunctionOperandContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#ParenOperand.
def enterParenOperand(self, ctx:DynamoDbGrammarParser.ParenOperandContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#ParenOperand.
def exitParenOperand(self, ctx:DynamoDbGrammarParser.ParenOperandContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#FunctionCall.
def enterFunctionCall(self, ctx:DynamoDbGrammarParser.FunctionCallContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#FunctionCall.
def exitFunctionCall(self, ctx:DynamoDbGrammarParser.FunctionCallContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#path.
def enterPath(self, ctx:DynamoDbGrammarParser.PathContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#path.
def exitPath(self, ctx:DynamoDbGrammarParser.PathContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#id_.
def enterId_(self, ctx:DynamoDbGrammarParser.Id_Context):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#id_.
def exitId_(self, ctx:DynamoDbGrammarParser.Id_Context):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#MapAccess.
def enterMapAccess(self, ctx:DynamoDbGrammarParser.MapAccessContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#MapAccess.
def exitMapAccess(self, ctx:DynamoDbGrammarParser.MapAccessContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#ListAccess.
def enterListAccess(self, ctx:DynamoDbGrammarParser.ListAccessContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#ListAccess.
def exitListAccess(self, ctx:DynamoDbGrammarParser.ListAccessContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#LiteralSub.
def enterLiteralSub(self, ctx:DynamoDbGrammarParser.LiteralSubContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#LiteralSub.
def exitLiteralSub(self, ctx:DynamoDbGrammarParser.LiteralSubContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#expression_attr_names_sub.
def enterExpression_attr_names_sub(self, ctx:DynamoDbGrammarParser.Expression_attr_names_subContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#expression_attr_names_sub.
def exitExpression_attr_names_sub(self, ctx:DynamoDbGrammarParser.Expression_attr_names_subContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#expression_attr_values_sub.
def enterExpression_attr_values_sub(self, ctx:DynamoDbGrammarParser.Expression_attr_values_subContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#expression_attr_values_sub.
def exitExpression_attr_values_sub(self, ctx:DynamoDbGrammarParser.Expression_attr_values_subContext):
pass
# Enter a parse tree produced by DynamoDbGrammarParser#unknown.
def enterUnknown(self, ctx:DynamoDbGrammarParser.UnknownContext):
pass
# Exit a parse tree produced by DynamoDbGrammarParser#unknown.
def exitUnknown(self, ctx:DynamoDbGrammarParser.UnknownContext):
pass
|
[
"sonalis@packtpub.com"
] |
sonalis@packtpub.com
|
b97bdf194a8fbf05e9d4d3cc5fcabae4813e1dcc
|
ec02650631ee787fa2518d4b12febbd372fe64c3
|
/src/statistics_calculation.py
|
81947f9231555030fcdb4363d0f370a6fa229f53
|
[] |
no_license
|
VitalyCherkov/state_exam_statistics
|
d889f95c20c86e4cd2b4aaad9f9f57c367cd030b
|
f499363e3bf1a8d7c96d5e5aee019ba99fdefd23
|
refs/heads/master
| 2020-03-23T05:13:27.771935
| 2019-07-10T10:00:56
| 2019-07-10T10:00:56
| 140,887,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
import functools
import operator
from src.constants.common_parameter_names import ACADEMIC_PERFORMANCE, QUALITY, AVERAGE_GRADE
from src.weight_functions import (
get_academic_performance_weight,
get_quality_weight,
get_average_grade_weight
)
def calc_percentages_per_task(columns, max_results):
"""
возвращает массив суммарных баллов по каждой задаче
и массив процентов решений каждой задачи
"""
rows_number = columns.shape[0]
total_by_task = columns.sum(axis=0)
total_by_task = [value for index, value in total_by_task.iteritems()]
percentages_by_task = [
round(value / (max_results[index] * rows_number) * 100)
for index, value in enumerate(total_by_task)
]
return percentages_by_task
def calc_common_statistics_via_func(marks_column, weight_func):
"""
подсчитывет статистику для данной задачи
по переданной весовой функции
"""
total_count = len(marks_column)
weighted_sequence = [
mark
for index, mark in
marks_column.apply(weight_func).iteritems()
]
total_weight = functools.reduce(operator.add, weighted_sequence, 0)
return round(total_weight / total_count * 100)
def get_common_statistics(table, marks_column_name):
"""
подсчитывает статистики для девятых классов
"""
marks_column = table[marks_column_name]\
.apply(lambda x: int(x))
academic_performance = calc_common_statistics_via_func(
marks_column=marks_column,
weight_func=get_academic_performance_weight
)
quality = calc_common_statistics_via_func(
marks_column=marks_column,
weight_func=get_quality_weight
)
average_grade = calc_common_statistics_via_func(
marks_column=marks_column,
weight_func=get_average_grade_weight
)
return {
ACADEMIC_PERFORMANCE: str(academic_performance),
QUALITY: str(quality),
AVERAGE_GRADE: str(average_grade),
}
|
[
"vv-ch@bk.ru"
] |
vv-ch@bk.ru
|
98a9eb880eda3ee7f1c46c6d7d034777ec672a3c
|
71f36e89cef2f7739f6a19e77ec94649cf408297
|
/CodeIsFun/manage.py
|
9a2b1882e4285baba6d0d67a98b347aa4e0d2beb
|
[] |
no_license
|
ansh9690/Django-Blog
|
bdbccf4cb6310ec9b325afdaf2402e319751b287
|
845c0d30381fb7248756d3617b30647e680884a3
|
refs/heads/master
| 2022-12-08T01:42:49.453597
| 2020-08-25T17:13:59
| 2020-08-25T17:13:59
| 258,409,983
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CodeIsFun.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"anshuprakash8445@gmail.com"
] |
anshuprakash8445@gmail.com
|
5d7f25c116d2a9fa1d5af893930135ce01f01bff
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/d3d1aa2dca4988bb7a232458338d86a0c9fef06e-<main>-fix.py
|
2ca55744f3a4a48e7dd7ef030d5aae4df1a066eb
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,010
|
py
|
def main():
module = AnsibleModule(argument_spec=dict(src=dict(required=True), dest=dict(required=True), dest_port=dict(default=None, type='int'), delete=dict(default='no', type='bool'), private_key=dict(default=None), rsync_path=dict(default=None), _local_rsync_path=dict(default='rsync', type='path'), _substitute_controller=dict(default='no', type='bool'), archive=dict(default='yes', type='bool'), checksum=dict(default='no', type='bool'), compress=dict(default='yes', type='bool'), existing_only=dict(default='no', type='bool'), dirs=dict(default='no', type='bool'), recursive=dict(type='bool'), links=dict(type='bool'), copy_links=dict(type='bool'), perms=dict(type='bool'), times=dict(type='bool'), owner=dict(type='bool'), group=dict(type='bool'), set_remote_user=dict(default='yes', type='bool'), rsync_timeout=dict(type='int', default=0), rsync_opts=dict(type='list'), ssh_args=dict(type='str'), partial=dict(default='no', type='bool'), verify_host=dict(default='no', type='bool'), mode=dict(default='push', choices=['push', 'pull'])), supports_check_mode=True)
if module.params['_substitute_controller']:
try:
source = (('"' + substitute_controller(module.params['src'])) + '"')
dest = (('"' + substitute_controller(module.params['dest'])) + '"')
except ValueError:
module.fail_json(msg='Could not determine controller hostname for rsync to send to')
else:
source = (('"' + module.params['src']) + '"')
dest = (('"' + module.params['dest']) + '"')
dest_port = module.params['dest_port']
delete = module.params['delete']
private_key = module.params['private_key']
rsync_path = module.params['rsync_path']
rsync = module.params.get('_local_rsync_path', 'rsync')
rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
archive = module.params['archive']
checksum = module.params['checksum']
compress = module.params['compress']
existing_only = module.params['existing_only']
dirs = module.params['dirs']
partial = module.params['partial']
recursive = module.params['recursive']
links = module.params['links']
copy_links = module.params['copy_links']
perms = module.params['perms']
times = module.params['times']
owner = module.params['owner']
group = module.params['group']
rsync_opts = module.params['rsync_opts']
ssh_args = module.params['ssh_args']
verify_host = module.params['verify_host']
if ('/' not in rsync):
rsync = module.get_bin_path(rsync, required=True)
ssh = module.get_bin_path('ssh', required=True)
cmd = ('%s --delay-updates -F' % rsync)
if compress:
cmd = (cmd + ' --compress')
if rsync_timeout:
cmd = (cmd + (' --timeout=%s' % rsync_timeout))
if module.check_mode:
cmd = (cmd + ' --dry-run')
if delete:
cmd = (cmd + ' --delete-after')
if existing_only:
cmd = (cmd + ' --existing')
if checksum:
cmd = (cmd + ' --checksum')
if archive:
cmd = (cmd + ' --archive')
if (recursive is False):
cmd = (cmd + ' --no-recursive')
if (links is False):
cmd = (cmd + ' --no-links')
if (copy_links is True):
cmd = (cmd + ' --copy-links')
if (perms is False):
cmd = (cmd + ' --no-perms')
if (times is False):
cmd = (cmd + ' --no-times')
if (owner is False):
cmd = (cmd + ' --no-owner')
if (group is False):
cmd = (cmd + ' --no-group')
else:
if (recursive is True):
cmd = (cmd + ' --recursive')
if (links is True):
cmd = (cmd + ' --links')
if (copy_links is True):
cmd = (cmd + ' --copy-links')
if (perms is True):
cmd = (cmd + ' --perms')
if (times is True):
cmd = (cmd + ' --times')
if (owner is True):
cmd = (cmd + ' --owner')
if (group is True):
cmd = (cmd + ' --group')
if dirs:
cmd = (cmd + ' --dirs')
if (private_key is None):
private_key = ''
else:
private_key = ('-i "%s"' % private_key)
ssh_opts = '-S none'
if (not verify_host):
ssh_opts = ('%s -o StrictHostKeyChecking=no' % ssh_opts)
if ssh_args:
ssh_opts = ('%s %s' % (ssh_opts, ssh_args))
if (source.startswith('"rsync://') and dest.startswith('"rsync://')):
module.fail_json(msg='either src or dest must be a localhost', rc=1)
if ((not source.startswith('"rsync://')) and (not dest.startswith('"rsync://'))):
if (dest_port is not None):
cmd += (" --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port))
else:
cmd += (" --rsh 'ssh %s %s'" % (private_key, ssh_opts))
if rsync_path:
cmd = (cmd + (' --rsync-path=%s' % rsync_path))
if rsync_opts:
cmd = ((cmd + ' ') + ' '.join(rsync_opts))
if partial:
cmd = (cmd + ' --partial')
changed_marker = '<<CHANGED>>'
cmd = (((cmd + " --out-format='") + changed_marker) + "%i %n%L'")
if ('@' not in source):
source = os.path.expanduser(source)
if ('@' not in dest):
dest = os.path.expanduser(dest)
cmd = ' '.join([cmd, source, dest])
cmdstr = cmd
(rc, out, err) = module.run_command(cmd)
if rc:
return module.fail_json(msg=err, rc=rc, cmd=cmdstr)
else:
changed = (changed_marker in out)
out_clean = out.replace(changed_marker, '')
out_lines = out_clean.split('\n')
while ('' in out_lines):
out_lines.remove('')
if module._diff:
diff = {
'prepared': out_clean,
}
return module.exit_json(changed=changed, msg=out_clean, rc=rc, cmd=cmdstr, stdout_lines=out_lines, diff=diff)
else:
return module.exit_json(changed=changed, msg=out_clean, rc=rc, cmd=cmdstr, stdout_lines=out_lines)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
49ac5ef41b8dac8841016e8ef4956958958458a3
|
585a068c651d85e6f02b18686ad95ec0671fbdfb
|
/habilidades.py
|
bdf134c919923b842545c8815dde37f379f18fe9
|
[] |
no_license
|
FlavioSG-BR/Flask-RestfulAPI
|
f9b1361b3e2ba4b4436bc656a0b534418ecbe4c6
|
7380143a4d801f758ab59779287f443c1f1ad13b
|
refs/heads/master
| 2022-04-17T18:41:18.508258
| 2020-04-16T18:10:40
| 2020-04-16T18:10:40
| 256,292,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
from flask_restful import Resource
lista_habilidades = ['Python', 'Java', 'Flask', 'PHP']
class Habilidades(Resource):
def get(self):
return lista_habilidades
|
[
"flavio_godoy@yahoo.com.br"
] |
flavio_godoy@yahoo.com.br
|
a23a188a9a220ca25ffaec14f74dd0d311ecb76a
|
ba3be84d355e90860479fc2a0d92b536c377c643
|
/PYTHON/Advance/Python Controls/py_Frame2.py
|
d9e598e1135bffa05661b601196ac36c71774648
|
[] |
no_license
|
vipin26/python
|
c62a3427511658ff292085fc382c5b3b3ff4d609
|
e8442b84c385ddef972d6a514e471d8eba8af0a3
|
refs/heads/master
| 2020-09-29T12:39:52.775466
| 2019-12-15T05:44:31
| 2019-12-15T05:44:31
| 227,034,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
from Tkinter import *
top = Tk()
top.geometry("500x500")
#width="200", height="200"
f1 = Frame(top,bg="green")
f1.pack(side="left",fill=BOTH,expand=1)
f2 = Frame(top,bg="Red", width="200", height="200")
f2.pack(side="right",fill=BOTH,expand=1)
b1= Button(f1,text="Button 1");
b1.pack()
b2= Button(f2,text="Button 2");
b2.pack()
top.mainloop()
|
[
"58661640+vipin26@users.noreply.github.com"
] |
58661640+vipin26@users.noreply.github.com
|
0fe60563da73b0f964c80040f248e1ff90390f63
|
b57414bc300636cec239e81bc2cf0d1b1c170d21
|
/Main.py
|
d1827d7ef168205a2e0c07dcfe867316e6130d59
|
[] |
no_license
|
TaraRosen/mn_primary_election_day_tracking
|
7abcd0f4bcf56f196aef1b865f17129501aeb6f7
|
db6e73f6023b2432530d4664cdba3e1fa5f6eaab
|
refs/heads/main
| 2023-01-15T19:23:01.755729
| 2020-11-06T15:13:49
| 2020-11-06T15:13:49
| 310,629,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,681
|
py
|
import time
from time import sleep
from warnings import warn
import urllib.parse as up
from requests import get
from bs4 import BeautifulSoup
import re
import datetime
import psycopg2
def resultsScraper(countyCode, precinctCodes):
precinct_entry = ""
precincts = ""
for precinct in precinctCodes:
precincts += precinct
precincts += ","
precincts = precincts[:-1]
# specify the url with 'countyid' and 'precincts'
response = get(
'https://electionresults.sos.state.mn.us/Results/PrecinctListResults/115?countyid=' + countyCode + '&precincts=' + precincts)
# parse the html using beautiful soup and store in variable `soup`
soup = BeautifulSoup(response.text, 'html.parser')
# Selects starting area in html at 'center'
center = soup.find('center')
# Creates list of precinct names,
# sets iterator at 1 to skip "Results for Selected Precincts in Hennepin County"
precinct_containers = soup.find_all('div', class_='resultgroupheader')
pnum = 1
# Creates list of all tables which is where results are stored for each precinct
tables = center.find_all('table')
# Iterates through table
for ptable in tables:
# Holds the name of the office_name candidates are running for i.e. U.S. Senator
office_name = ""
# Creates list of all rows which is where each candidates results are stored
rows = ptable.find_all('tr')
# Iterates through candidates
for row in rows:
# Initializes the string that holds the row for each candidate result in table
# with precinct name and office name
rowentry = "('" + precinct_containers[0].text.strip()[34:-7].replace("'", "") + "','" + precinct_containers[pnum].text.strip().replace("'", "") + "','" + office_name.replace("'", "") + "'"
# Check if the row has 'class' so it doesn't error, skips if doesn't
if row.has_attr('class'):
# Updates the 'office_name' variable to whichever seat candidates are running for
if row['class'] == ['resultofficeheader']:
# Generates and cleans the office name
office_name = row.find('div', class_='resultoffice')
office_name = office_name.text.strip()
office_name = re.sub(r"\s+", " ", office_name)
# If not a new office, check if a candidate result
elif row['class'] == ['resultcandidates']:
# Selects appropriate entries, cleans extra empty field, cleans text
cols = row.find_all('td')[:4]
cols = [ele.text.strip() for ele in cols]
if cols:
for ele in cols:
rowentry += ",'"
rowentry += ele.replace("'", "") + "'"
rowentry += "),"
precinct_entry += rowentry
# Updates to next precinct once iterated through entire table
pnum += 1
return precinct_entry
def precinctCodes(countyCode, reportedPrecincts):
# List to store codes in
newPrecincts = []
precinct_codes = ""
precinct_counter = 0
# Specificy URL
response = get('https://electionresults.sos.state.mn.us/Select/CountyPrecinctSelect/115?districtid=' + countyCode)
# Parse the html using beautiful soup and store in variable `soup`
soup = BeautifulSoup(response.text, 'html.parser')
# Precinct list
precinct_list = soup.find_all('option', class_='selOptReported', limit=253) # Change back to 'selOptReported' when actually using, and limit 253
# Check all precinct codes
for precinct in precinct_list:
precinctCode = precinct.attrs['value']
# Compile precincts that reported since last check
if precinctCode not in reportedPrecincts:
newPrecincts.append(precinctCode)
precinct_counter += 1
precinct_codes += precinctCode
precinct_codes += ","
return newPrecincts
#-- Main -------------------------------------------------------------------------------
conn = psycopg2.connect(dbname="results18", user="dflvictory", password="dflguest18", host="dfl-election-returns.cmycsq7ldygm.us-east-2.rds.amazonaws.com")
cur = conn.cursor()
cur.execute("set time zone 'America/Chicago'")
# while True:
# precinctsReported = numpy.empty(88, dtype=object)
precinctsReported = [[] for i in range(88)]
# print(precinctsReported)
# URL
URL = 'https://electionresults.sos.state.mn.us/Results/CountyStatistics/115'
# Open URL
response = get(URL)
# Monitor Loop
start_time = time.time()
requests = 0
# Throw a warning for non-200 status codes
if response.status_code != 200:
requests += 1
warn('Request: {}; Status code: []'.format(requests, response.status_code))
# Slow the loop
sleep(1)
# Parse the html using beautiful soup and store in variable `soup`
soup = BeautifulSoup(response.text, 'html.parser')
# Precinct list
counties_reported = soup.find_all('tr')
# Finding County Results
for county in counties_reported:
county_entry = "INSERT INTO results18 (county, precinct, office, party, candidate, raw_votes, percentage) values "
# Removes nulls
if county.find('a', href=True) is not None:
# Get County Code
row = county.find('a', href=True)
url = row.get('href')
parsed = up.urlparse(url)
code = up.parse_qs(parsed.query)['countyId']
countyCode = code[0]
# Get Precincts Reported
reported = county.find('td', class_='statscell statscellnumber').text
numReported = int(reported)
# Compared # of precincts currently reported to # previously reported
if numReported > len(precinctsReported[int(countyCode)]): # look up syntax for this
# Call PrecinctCodes helper function
precinctsUpdated = precinctCodes(countyCode, precinctsReported[int(countyCode)])
# Call MyScraper
if(len(precinctsUpdated) > 0):
county_entry += resultsScraper(countyCode, precinctsUpdated)
# Append new list of precincts to array
# numpy.insert(precinctsReported, countyCode, precinctsUpdated)
# if county_entry != "INSERT INTO results18 (county, precinct, office, party, candidate, raw_votes, percentage) values ":
county_entry = county_entry[:-1]
cur.execute(county_entry)
conn.commit()
# else:
# print(countyCode)
print("Time:", time.time() - start_time)
cur.close()
conn.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
3af5a5a3567f1a8fedae3b522ec01b68b906db07
|
456219b19249259643810cca5f92ce4c9a26c947
|
/Problem 07/advent7b.py
|
0d4c68181df187231c7452e046bd203646f84ebf
|
[
"Apache-2.0"
] |
permissive
|
mankybansal/advent-of-code-2020
|
04a10a874a8444a7ef34f8d105050f574fdf0f3d
|
62b389c52d488ea88443b564b6b6b89fd7b5290b
|
refs/heads/main
| 2023-02-01T16:38:49.004717
| 2020-12-23T11:27:15
| 2020-12-23T11:27:15
| 318,087,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
ad_list = {}
for line in open('input.txt', 'r').readlines():
rules = line.strip().replace('.', '').replace('contain', '').replace(',', ' ').split(' ')
master_key = None
for i in range(len(rules)):
if i == 0:
split_rule = rules[i].split(' ')
key = split_rule[0] + split_rule[1]
if key not in ad_list:
ad_list[key] = []
master_key = key
else:
split_rule = rules[i].split(' ')
number_key = 0 if split_rule[0] == 'no' else int(split_rule[0])
ad_list[master_key].append((number_key, split_rule[1] + split_rule[2]))
def recurse(key):
if key == 'otherbags':
return 0
total = 0
for q_num, q_key in ad_list[key]:
total += (q_num * recurse(q_key)) + q_num
return total
print(recurse('shinygold'))
|
[
"sunny.bansal@gmail.com"
] |
sunny.bansal@gmail.com
|
17f286bd52e1fda213acc5e1347e4d32bd730c24
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/389/usersdata/346/73669/submittedfiles/poligono.py
|
2d92908686c389fe16d305bc44eb0541a9bdb9f1
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
# -*- coding: utf-8 -*-
#ENTRADA
n= int(input('Digite quantos lados deve ter o seu polígono: ')
#PROCESSAMENTO
nd = (n*(n-3))/2
#SAÍDA
print('%.1f' % nd)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e9690e73f0b4e1dc083c4d6cc4fcf8ffe8143425
|
a3984032b1f20c38b4dee1809326230fa828287a
|
/manage.py
|
d960e32230106c88c173a3a199a6479c23c5ba5b
|
[
"MIT"
] |
permissive
|
tomi77/django-auth-role
|
c5acccae5647578fffa938e5cba2f06a4eba5f7f
|
8fb3bf8940856a13ca7c7265c4670e4359dcb638
|
refs/heads/master
| 2021-01-19T21:50:33.773375
| 2017-05-10T16:48:39
| 2017-05-10T16:48:39
| 88,711,319
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
#!/usr/bin/env python
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"tomasz.rup@gmail.com"
] |
tomasz.rup@gmail.com
|
3f9c3e9ffc8803e6cd6e39ae498eaed82dd43a2d
|
a9cb028a29388ef97614faca6405985584e508c0
|
/CorpRoomApp/views/__init__.py
|
0202939f5240055387b68e169d950ae50c230c2f
|
[] |
no_license
|
tungsten-manojb/DialARoomProject
|
f18cb0d71dbe5a94818aa7d6ec015c57580d83db
|
d5f8bb6a4611d6c452db65e782f0029b19081d13
|
refs/heads/master
| 2021-01-10T02:25:14.389591
| 2015-12-22T07:30:48
| 2015-12-22T07:30:48
| 48,413,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
from CorpRoomApp.views import portal
|
[
"manoj.bawane@tungstenbigdata.com"
] |
manoj.bawane@tungstenbigdata.com
|
12e36e9537e9bd48715cc87299c6710dcc8d2484
|
4341c1c4fbf30032c50b66ca6ac2d4a2bfc0e83a
|
/translate/models.py
|
6d1a98c568bcfc0c779f1c1ebfd40552bd2fb9a1
|
[] |
no_license
|
a574751346/transfer2nl
|
fec566835a62ebdc5388fcfef7526dbe72bf78d7
|
0251655603e2da0c3ca7cf597b2d7c10060804ba
|
refs/heads/master
| 2021-05-23T17:44:56.836659
| 2020-04-09T05:33:57
| 2020-04-09T05:33:57
| 253,404,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47,574
|
py
|
import tensorflow as tf
import math
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell
from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell
import utils, beam_search
def auto_reuse(fun):
"""
Wrapper that automatically handles the `reuse' parameter.
This is rather risky, as it can lead to reusing variables
by mistake.
"""
def fun_(*args, **kwargs):
try:
return fun(*args, **kwargs)
except ValueError as e:
if 'reuse' in str(e):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
return fun(*args, **kwargs)
else:
raise e
return fun_
get_variable = auto_reuse(tf.get_variable)
dense = auto_reuse(tf.layers.dense)
class CellWrapper(RNNCell):
"""
Wrapper around LayerNormBasicLSTMCell, BasicLSTMCell and MultiRNNCell, to keep
the state_is_tuple=False behavior (soon to be deprecated).
"""
def __init__(self, cell):
super(CellWrapper, self).__init__()
self.cell = cell
self.num_splits = len(cell.state_size) if isinstance(cell.state_size, tuple) else 1
@property
def state_size(self):
return sum(self.cell.state_size)
@property
def output_size(self):
return self.cell.output_size
def __call__(self, inputs, state, scope=None):
state = tf.split(value=state, num_or_size_splits=self.num_splits, axis=1)
new_h, new_state = self.cell(inputs, state, scope=scope)
return new_h, tf.concat(new_state, 1)
def multi_encoder(encoder_inputs, encoders, encoder_input_length, other_inputs=None, **kwargs):
"""
Build multiple encoders according to the configuration in `encoders`, reading from `encoder_inputs`.
The result is a list of the outputs produced by those encoders (for each time-step), and their final state.
:param encoder_inputs: list of tensors of shape (batch_size, input_length), one tensor for each encoder.
:param encoders: list of encoder configurations
:param encoder_input_length: list of tensors of shape (batch_size,) (one tensor for each encoder)
:return:
encoder outputs: a list of tensors of shape (batch_size, input_length, encoder_cell_size), hidden states of the
encoders.
encoder state: concatenation of the final states of all encoders, tensor of shape (batch_size, sum_of_state_sizes)
new_encoder_input_length: list of tensors of shape (batch_size,) with the true length of the encoder outputs.
May be different than `encoder_input_length` because of maxout strides, and time pooling.
"""
encoder_states = []
encoder_outputs = []
# create embeddings in the global scope (allows sharing between encoder and decoder)
embedding_variables = []
for encoder in encoders:
if encoder.binary:
embedding_variables.append(None)
continue
# inputs are token ids, which need to be mapped to vectors (embeddings)
embedding_shape = [encoder.vocab_size, encoder.embedding_size]
if encoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if encoder.embeddings_on_cpu else None
with tf.device(device): # embeddings can take a very large amount of memory, so
# storing them in GPU memory can be impractical
embedding = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape,
initializer=initializer)
embedding_variables.append(embedding)
new_encoder_input_length = []
for i, encoder in enumerate(encoders):
if encoder.use_lstm is False:
encoder.cell_type = 'GRU'
with tf.variable_scope('encoder_{}'.format(encoder.name)):
encoder_inputs_ = encoder_inputs[i]
encoder_input_length_ = encoder_input_length[i]
def get_cell(input_size=None, reuse=False):
if encoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse))
elif encoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm,
input_size=input_size, input_keep_prob=encoder.rnn_input_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob)
elif encoder.cell_type.lower() == 'treelstm':
# TODO
cell = None
return
else:
cell = GRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm)
if encoder.use_dropout and encoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob,
output_keep_prob=encoder.rnn_output_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob,
variational_recurrent=encoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size)
return cell
embedding = embedding_variables[i]
batch_size = tf.shape(encoder_inputs_)[0]
time_steps = tf.shape(encoder_inputs_)[1]
if embedding is not None:
flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)])
flat_inputs = tf.nn.embedding_lookup(embedding, flat_inputs)
encoder_inputs_ = tf.reshape(flat_inputs,
tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value]))
if other_inputs is not None:
encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2)
if encoder.use_dropout:
noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob,
noise_shape=noise_shape)
size = tf.shape(encoder_inputs_)[2]
noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob,
noise_shape=noise_shape)
if encoder.input_layers:
for j, layer_size in enumerate(encoder.input_layers):
if encoder.input_layer_activation is not None and encoder.input_layer_activation.lower() == 'relu':
activation = tf.nn.relu
else:
activation = tf.tanh
encoder_inputs_ = dense(encoder_inputs_, layer_size, activation=activation, use_bias=True,
name='layer_{}'.format(j))
if encoder.use_dropout:
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob)
# Contrary to Theano's RNN implementation, states after the sequence length are zero
# (while Theano repeats last state)
inter_layer_keep_prob = None if not encoder.use_dropout else encoder.inter_layer_keep_prob
parameters = dict(
inputs=encoder_inputs_, sequence_length=encoder_input_length_,
dtype=tf.float32, parallel_iterations=encoder.parallel_iterations
)
input_size = encoder_inputs_.get_shape()[2].value
state_size = (encoder.cell_size * 2 if encoder.cell_type.lower() == 'lstm' else encoder.cell_size)
def get_initial_state(name='initial_state'):
if encoder.train_initial_states:
initial_state = get_variable(name, initializer=tf.zeros(state_size))
return tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1])
else:
return None
if encoder.bidir:
rnn = lambda reuse: stack_bidirectional_dynamic_rnn(
cells_fw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
cells_bw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
initial_states_fw=[get_initial_state('initial_state_fw')] * encoder.layers,
initial_states_bw=[get_initial_state('initial_state_bw')] * encoder.layers,
time_pooling=encoder.time_pooling, pooling_avg=encoder.pooling_avg,
**parameters)
initializer = CellInitializer(encoder.cell_size) if encoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
encoder_outputs_, _, encoder_states_ = rnn(reuse=False)
except ValueError: # Multi-task scenario where we're reusing the same RNN parameters
encoder_outputs_, _, encoder_states_ = rnn(reuse=True)
else:
if encoder.time_pooling or encoder.final_state == 'concat_last':
raise NotImplementedError
if encoder.layers > 1:
cell = MultiRNNCell([get_cell(input_size if j == 0 else encoder.cell_size)
for j in range(encoder.layers)])
initial_state = (get_initial_state(),) * encoder.layers
else:
cell = get_cell(input_size)
initial_state = get_initial_state()
encoder_outputs_, encoder_states_ = auto_reuse(tf.nn.dynamic_rnn)(cell=cell,
initial_state=initial_state,
**parameters)
last_backward = encoder_outputs_[:, 0, encoder.cell_size:]
indices = tf.stack([tf.range(batch_size), encoder_input_length_ - 1], axis=1)
last_forward = tf.gather_nd(encoder_outputs_[:, :, :encoder.cell_size], indices)
last_forward.set_shape([None, encoder.cell_size])
if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states)
encoder_state_ = tf.concat(encoder_states_, axis=1)
elif encoder.final_state == 'average':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.final_state == 'average_inputs':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.bidir and encoder.final_state == 'last_both':
encoder_state_ = tf.concat([last_forward, last_backward], axis=1)
elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state
encoder_state_ = last_backward
else: # last forward hidden state
encoder_state_ = last_forward
if encoder.bidir and encoder.bidir_projection:
encoder_outputs_ = dense(encoder_outputs_, encoder.cell_size, use_bias=False, name='bidir_projection')
encoder_outputs.append(encoder_outputs_)
encoder_states.append(encoder_state_)
new_encoder_input_length.append(encoder_input_length_)
encoder_state = tf.concat(encoder_states, 1)
return encoder_outputs, encoder_state, new_encoder_input_length
def compute_energy(hidden, state, attn_size, attn_keep_prob=None, pervasive_dropout=False, layer_norm=False,
mult_attn=False, **kwargs):
if attn_keep_prob is not None:
state_noise_shape = [1, tf.shape(state)[1]] if pervasive_dropout else None
state = tf.nn.dropout(state, keep_prob=attn_keep_prob, noise_shape=state_noise_shape)
hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if pervasive_dropout else None
hidden = tf.nn.dropout(hidden, keep_prob=attn_keep_prob, noise_shape=hidden_noise_shape)
if mult_attn:
state = dense(state, attn_size, use_bias=False, name='state')
hidden = dense(hidden, attn_size, use_bias=False, name='hidden')
return tf.einsum('ijk,ik->ij', hidden, state)
else:
y = dense(state, attn_size, use_bias=not layer_norm, name='W_a')
y = tf.expand_dims(y, axis=1)
if layer_norm:
y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state')
hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden')
f = dense(hidden, attn_size, use_bias=False, name='U_a')
v = get_variable('v_a', [attn_size])
s = f + y
return tf.reduce_sum(v * tf.tanh(s), axis=2)
def compute_energy_with_filter(hidden, state, prev_weights, attn_filters, attn_filter_length,
**kwargs):
hidden = tf.expand_dims(hidden, 2)
batch_size = tf.shape(hidden)[0]
time_steps = tf.shape(hidden)[1]
attn_size = hidden.get_shape()[3].value
filter_shape = [attn_filter_length * 2 + 1, 1, 1, attn_filters]
filter_ = get_variable('filter', filter_shape)
u = get_variable('U', [attn_filters, attn_size])
prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1]))
conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME')
shape = tf.stack([tf.multiply(batch_size, time_steps), attn_filters])
conv = tf.reshape(conv, shape)
z = tf.matmul(conv, u)
z = tf.reshape(z, tf.stack([batch_size, time_steps, 1, attn_size]))
y = dense(state, attn_size, use_bias=True, name='y')
y = tf.reshape(y, [-1, 1, 1, attn_size])
k = get_variable('W', [attn_size, attn_size])
# dot product between tensors requires reshaping
hidden = tf.reshape(hidden, tf.stack([tf.multiply(batch_size, time_steps), attn_size]))
f = tf.matmul(hidden, k)
f = tf.reshape(f, tf.stack([batch_size, time_steps, 1, attn_size]))
v = get_variable('V', [attn_size])
s = f + y + z
return tf.reduce_sum(v * tf.tanh(s), [2, 3])
def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs):
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
if encoder.attn_filters:
e = compute_energy_with_filter(hidden_states, state, attn_size=encoder.attn_size,
attn_filters=encoder.attn_filters,
attn_filter_length=encoder.attn_filter_length, **kwargs)
else:
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size,
attn_keep_prob=encoder.attn_keep_prob, pervasive_dropout=encoder.pervasive_dropout,
layer_norm=encoder.layer_norm, mult_attn=encoder.mult_attn, **kwargs)
e -= tf.reduce_max(e, axis=1, keep_dims=True)
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32)
T = encoder.attn_temperature or 1.0
exp = tf.exp(e / T) * mask
weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1)
return weighted_average, weights
def no_attention(state, hidden_states, *args, **kwargs):
batch_size = tf.shape(state)[0]
weighted_average = tf.zeros(shape=tf.stack([batch_size, 0]))
weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]])
return weighted_average, weights
def average_attention(hidden_states, encoder_input_length, *args, **kwargs):
# attention with fixed weights (average of all hidden states)
lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1])
weights = tf.to_float(mask) / lengths
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs):
weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1])
weights = tf.to_float(weights)
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None,
context=None, **kwargs):
batch_size = tf.shape(state)[0]
attn_length = tf.shape(hidden_states)[1]
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
state_size = state.get_shape()[1].value
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
if pos is not None:
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
if pos is not None and encoder.attn_window_size > 0:
# `pred_edits` scenario, where we know the aligned pos
# when the windows size is non-zero, we concatenate consecutive encoder states
# and map it to the right attention vector size.
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = []
for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1):
pos_ = pos + offset
pos_ = tf.minimum(pos_, encoder_input_length - 1)
pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S>
weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length))
weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1)
weighted_average.append(weighted_average_)
weighted_average = tf.concat(weighted_average, axis=1)
weighted_average = dense(weighted_average, encoder.attn_size)
elif pos is not None:
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
else:
# Local attention of Luong et al. (http://arxiv.org/abs/1508.04025)
wp = get_variable('Wp', [state_size, state_size])
vp = get_variable('vp', [state_size, 1])
pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp))
pos = tf.floor(encoder_input_length * pos)
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size]))
idx = tf.reshape(idx, [-1, attn_length])
low = pos - encoder.attn_window_size
high = pos + encoder.attn_window_size
mlow = tf.to_float(idx < low)
mhigh = tf.to_float(idx > high)
m = mlow + mhigh
m += tf.to_float(idx >= encoder_input_length)
mask = tf.to_float(tf.equal(m, 0.0))
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size, **kwargs)
weights = softmax(e, mask=mask)
sigma = encoder.attn_window_size / 2
numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32))
div = tf.truediv(numerator, 2 * sigma ** 2)
weights *= tf.exp(div) # result of the truncated normal distribution
# normalize to keep a probability distribution
# weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
return weighted_average, weights
def attention(encoder, **kwargs):
attention_functions = {
'global': global_attention,
'local': local_attention,
'none': no_attention,
'average': average_attention,
'last_state': last_state_attention
}
attention_function = attention_functions.get(encoder.attention_type, global_attention)
return attention_function(encoder=encoder, **kwargs)
def multi_attention(state, hidden_states, encoders, encoder_input_length, pos=None, aggregation_method='sum',
prev_weights=None, **kwargs):
attns = []
weights = []
context_vector = None
for i, (hidden, encoder, input_length) in enumerate(zip(hidden_states, encoders, encoder_input_length)):
pos_ = pos[i] if pos is not None else None
prev_weights_ = prev_weights[i] if prev_weights is not None else None
hidden = beam_search.resize_like(hidden, state)
input_length = beam_search.resize_like(input_length, state)
context_vector, weights_ = attention(state=state, hidden_states=hidden, encoder=encoder,
encoder_input_length=input_length, pos=pos_, context=context_vector,
prev_weights=prev_weights_, **kwargs)
attns.append(context_vector)
weights.append(weights_)
if aggregation_method == 'sum':
context_vector = tf.reduce_sum(tf.stack(attns, axis=2), axis=2)
else:
context_vector = tf.concat(attns, axis=1)
return context_vector, weights
def attention_decoder(decoder_inputs, initial_state, attention_states, encoders, decoder, encoder_input_length,
feed_previous=0.0, align_encoder_id=0, feed_argmax=True, **kwargs):
"""
:param decoder_inputs: int32 tensor of shape (batch_size, output_length)
:param initial_state: initial state of the decoder (usually the final state of the encoder),
as a float32 tensor of shape (batch_size, initial_state_size). This state is mapped to the
correct state size for the decoder.
:param attention_states: list of tensors of shape (batch_size, input_length, encoder_cell_size),
the hidden states of the encoder(s) (one tensor for each encoder).
:param encoders: configuration of the encoders
:param decoder: configuration of the decoder
:param encoder_input_length: list of int32 tensors of shape (batch_size,), tells for each encoder,
the true length of each sequence in the batch (sequences in the same batch are padded to all have the same
length).
:param feed_previous: scalar tensor corresponding to the probability to use previous decoder output
instead of the ground truth as input for the decoder (1 when decoding, between 0 and 1 when training)
:param feed_argmax: boolean tensor, when True the greedy decoder outputs the word with the highest
probability (argmax). When False, it samples a word from the probability distribution (softmax).
:param align_encoder_id: outputs attention weights for this encoder. Also used when predicting edit operations
(pred_edits), to specifify which encoder reads the sequence to post-edit (MT).
:return:
outputs of the decoder as a tensor of shape (batch_size, output_length, decoder_cell_size)
attention weights as a tensor of shape (output_length, encoders, batch_size, input_length)
"""
assert not decoder.pred_maxout_layer or decoder.cell_size % 2 == 0, 'cell size must be a multiple of 2'
if decoder.use_lstm is False:
decoder.cell_type = 'GRU'
embedding_shape = [decoder.vocab_size, decoder.embedding_size]
if decoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if decoder.embeddings_on_cpu else None
with tf.device(device):
embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer)
input_shape = tf.shape(decoder_inputs)
batch_size = input_shape[0]
time_steps = input_shape[1]
scope_name = 'decoder_{}'.format(decoder.name)
scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
def embed(input_):
embedded_input = tf.nn.embedding_lookup(embedding, input_)
if decoder.use_dropout and decoder.word_keep_prob is not None:
noise_shape = [1, 1] if decoder.pervasive_dropout else [batch_size, 1]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape)
if decoder.use_dropout and decoder.embedding_keep_prob is not None:
size = tf.shape(embedded_input)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else [batch_size, size]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob,
noise_shape=noise_shape)
return embedded_input
def get_cell(input_size=None, reuse=False):
cells = []
for j in range(decoder.layers):
input_size_ = input_size if j == 0 else decoder.cell_size
if decoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(decoder.cell_size, reuse=reuse))
elif decoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm,
input_size=input_size_, input_keep_prob=decoder.rnn_input_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob)
else:
cell = GRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm)
if decoder.use_dropout and decoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=decoder.rnn_input_keep_prob,
output_keep_prob=decoder.rnn_output_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob,
variational_recurrent=decoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size_)
cells.append(cell)
if len(cells) == 1:
return cells[0]
else:
return CellWrapper(MultiRNNCell(cells))
def look(state, input_, prev_weights=None, pos=None):
prev_weights_ = [prev_weights if i == align_encoder_id else None for i in range(len(encoders))]
pos_ = None
if decoder.pred_edits:
pos_ = [pos if i == align_encoder_id else None for i in range(len(encoders))]
if decoder.attn_prev_word:
state = tf.concat([state, input_], axis=1)
parameters = dict(hidden_states=attention_states, encoder_input_length=encoder_input_length,
encoders=encoders, aggregation_method=decoder.aggregation_method)
context, new_weights = multi_attention(state, pos=pos_, prev_weights=prev_weights_, **parameters)
if decoder.context_mapping:
with tf.variable_scope(scope_name):
activation = tf.nn.tanh if decoder.context_mapping_activation == 'tanh' else None
use_bias = not decoder.context_mapping_no_bias
context = dense(context, decoder.context_mapping, use_bias=use_bias, activation=activation,
name='context_mapping')
return context, new_weights[align_encoder_id]
def update(state, input_, context=None, symbol=None):
if context is not None and decoder.rnn_feed_attn:
input_ = tf.concat([input_, context], axis=1)
input_size = input_.get_shape()[1].value
initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
output, new_state = get_cell(input_size)(input_, state)
except ValueError: # auto_reuse doesn't work with LSTM cells
output, new_state = get_cell(input_size, reuse=True)(input_, state)
if decoder.skip_update and decoder.pred_edits and symbol is not None:
is_del = tf.equal(symbol, utils.DEL_ID)
new_state = tf.where(is_del, state, new_state)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = new_state
return output, new_state
def update_pos(pos, symbol, max_pos=None):
if not decoder.pred_edits:
return pos
is_keep = tf.equal(symbol, utils.KEEP_ID)
is_del = tf.equal(symbol, utils.DEL_ID)
is_not_ins = tf.logical_or(is_keep, is_del)
pos = beam_search.resize_like(pos, symbol)
max_pos = beam_search.resize_like(max_pos, symbol)
pos += tf.to_float(is_not_ins)
if max_pos is not None:
pos = tf.minimum(pos, tf.to_float(max_pos))
return pos
def generate(state, input_, context):
if decoder.pred_use_lstm_state is False: # for back-compatibility
state = state[:,-decoder.cell_size:]
projection_input = [state, context]
if decoder.use_previous_word:
projection_input.insert(1, input_) # for back-compatibility
output_ = tf.concat(projection_input, axis=1)
if decoder.pred_deep_layer:
deep_layer_size = decoder.pred_deep_layer_size or decoder.embedding_size
if decoder.layer_norm:
output_ = dense(output_, deep_layer_size, use_bias=False, name='deep_output')
output_ = tf.contrib.layers.layer_norm(output_, activation_fn=tf.nn.tanh, scope='output_layer_norm')
else:
output_ = dense(output_, deep_layer_size, activation=tf.tanh, use_bias=True, name='deep_output')
if decoder.use_dropout:
size = tf.shape(output_)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else None
output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape)
else:
if decoder.pred_maxout_layer:
maxout_size = decoder.maxout_size or decoder.cell_size
output_ = dense(output_, maxout_size, use_bias=True, name='maxout')
if decoder.old_maxout: # for back-compatibility with old models
output_ = tf.nn.pool(tf.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX',
padding='SAME', strides=[2])
output_ = tf.squeeze(output_, axis=2)
else:
output_ = tf.maximum(*tf.split(output_, num_or_size_splits=2, axis=1))
if decoder.pred_embed_proj:
# intermediate projection to embedding size (before projecting to vocabulary size)
# this is useful to reduce the number of parameters, and
# to use the output embeddings for output projection (tie_embeddings parameter)
output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0')
if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer):
bias = get_variable('softmax1/bias', shape=[decoder.vocab_size])
output_ = tf.matmul(output_, tf.transpose(embedding)) + bias
else:
output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1')
return output_
state_size = (decoder.cell_size * 2 if decoder.cell_type.lower() == 'lstm' else decoder.cell_size) * decoder.layers
if decoder.use_dropout:
initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob)
with tf.variable_scope(scope_name):
if decoder.layer_norm:
initial_state = dense(initial_state, state_size, use_bias=False, name='initial_state_projection')
initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=tf.nn.tanh,
scope='initial_state_layer_norm')
else:
initial_state = dense(initial_state, state_size, use_bias=True, name='initial_state_projection',
activation=tf.nn.tanh)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
initial_output = initial_state
else:
initial_output = initial_state[:, -decoder.cell_size:]
time = tf.constant(0, dtype=tf.int32, name='time')
outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs)))
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
initial_context, _ = look(initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights)
initial_data = tf.concat([initial_state, initial_context, tf.expand_dims(initial_pos, axis=1), initial_weights],
axis=1)
context_size = initial_context.shape[1].value
def get_logits(state, ids, time): # for beam-search decoding
with tf.variable_scope('decoder_{}'.format(decoder.name)):
state, context, pos, prev_weights = tf.split(state, [state_size, context_size, 1, -1], axis=1)
input_ = embed(ids)
pos = tf.squeeze(pos, axis=1)
pos = tf.cond(tf.equal(time, 0),
lambda: pos,
lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id]))
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = state
else:
# output is always the right-most part of state. However, this only works at test time,
# because different dropout operations can be used on state and output.
output = state[:, -decoder.cell_size:]
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, ids)
elif decoder.generate_first:
output, state = tf.cond(tf.equal(time, 0),
lambda: (output, state),
lambda: update(state, input_, context, ids))
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, ids)
logits = generate(output, input_, context)
pos = tf.expand_dims(pos, axis=1)
state = tf.concat([state, context, pos, new_weights], axis=1)
return state, logits
def _time_step(time, input_, input_symbol, pos, state, output, outputs, states, weights, attns, prev_weights,
samples):
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, input_symbol)
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, input_symbol)
output_ = generate(output, input_, context)
argmax = lambda: tf.argmax(output_, 1)
target = lambda: inputs.read(time + 1)
softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1),
axis=1)
use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous)
predicted_symbol = tf.case([
(use_target, target),
(tf.logical_not(feed_argmax), softmax)],
default=argmax) # default case is useful for beam-search
predicted_symbol.set_shape([None])
predicted_symbol = tf.stop_gradient(predicted_symbol)
samples = samples.write(time, predicted_symbol)
input_ = embed(predicted_symbol)
pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id])
attns = attns.write(time, context)
weights = weights.write(time, new_weights)
states = states.write(time, state)
outputs = outputs.write(time, output_)
if not decoder.conditional_rnn and not decoder.update_first and decoder.generate_first:
output, state = update(state, input_, context, predicted_symbol)
return (time + 1, input_, predicted_symbol, pos, state, output, outputs, states, weights, attns, new_weights,
samples)
with tf.variable_scope('decoder_{}'.format(decoder.name)):
_, _, _, new_pos, new_state, _, outputs, states, weights, attns, new_weights, samples = tf.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, initial_input, initial_symbol, initial_pos, initial_state, initial_output, outputs,
weights, states, attns, initial_weights, samples),
parallel_iterations=decoder.parallel_iterations,
swap_memory=decoder.swap_memory)
outputs = outputs.stack()
weights = weights.stack() # batch_size, encoders, output time, input time
states = states.stack()
attns = attns.stack()
samples = samples.stack()
# put batch_size as first dimension
outputs = tf.transpose(outputs, perm=(1, 0, 2))
weights = tf.transpose(weights, perm=(1, 0, 2))
states = tf.transpose(states, perm=(1, 0, 2))
attns = tf.transpose(attns, perm=(1, 0, 2))
samples = tf.transpose(samples)
return outputs, weights, states, attns, samples, get_logits, initial_data
def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0,
encoder_input_length=None, feed_argmax=True, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
if encoder_input_length is None:
encoder_input_length = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
parameters = dict(encoders=encoders, decoder=decoder, encoder_inputs=encoder_inputs,
feed_argmax=feed_argmax)
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
attention_states, encoder_state, encoder_input_length = multi_encoder(
encoder_input_length=encoder_input_length, **parameters)
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous,
decoder_inputs=targets[:, :-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length,
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=target_weights)
losses = xent_loss
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def chained_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous,
chaining_strategy=None, align_encoder_id=0, chaining_non_linearity=False,
chaining_loss_ratio=1.0, chaining_stop_gradient=False, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
assert len(encoders) == 2
encoder_input_length = []
input_weights = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
input_weights.append(weights)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
parameters = dict(encoders=encoders[1:], decoder=encoders[0])
attention_states, encoder_state, encoder_input_length[1:] = multi_encoder(
encoder_inputs[1:], encoder_input_length=encoder_input_length[1:], **parameters)
decoder_inputs = encoder_inputs[0][:, :-1]
batch_size = tf.shape(decoder_inputs)[0]
pad = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32) * utils.BOS_ID
decoder_inputs = tf.concat([pad, decoder_inputs], axis=1)
outputs, _, states, attns, _, _, _ = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs,
encoder_input_length=encoder_input_length[1:], **parameters
)
chaining_loss = sequence_loss(logits=outputs, targets=encoder_inputs[0], weights=input_weights[0])
if decoder.cell_type.lower() == 'lstm':
size = states.get_shape()[2].value
decoder_outputs = states[:, :, size // 2:]
else:
decoder_outputs = states
if chaining_strategy == 'share_states':
other_inputs = states
elif chaining_strategy == 'share_outputs':
other_inputs = decoder_outputs
else:
other_inputs = None
if other_inputs is not None and chaining_stop_gradient:
other_inputs = tf.stop_gradient(other_inputs)
parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1],
other_inputs=other_inputs)
attention_states, encoder_state, encoder_input_length[:1] = multi_encoder(
encoder_input_length=encoder_input_length[:1], **parameters)
if chaining_stop_gradient:
attns = tf.stop_gradient(attns)
states = tf.stop_gradient(states)
decoder_outputs = tf.stop_gradient(decoder_outputs)
if chaining_strategy == 'concat_attns':
attention_states[0] = tf.concat([attention_states[0], attns], axis=2)
elif chaining_strategy == 'concat_states':
attention_states[0] = tf.concat([attention_states[0], states], axis=2)
elif chaining_strategy == 'sum_attns':
attention_states[0] += attns
elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'):
if chaining_strategy == 'map_attns':
x = attns
elif chaining_strategy == 'map_outputs':
x = decoder_outputs
else:
x = states
shape = [x.get_shape()[-1], attention_states[0].get_shape()[-1]]
w = tf.get_variable("map_attns/matrix", shape=shape)
b = tf.get_variable("map_attns/bias", shape=shape[-1:])
x = tf.einsum('ijk,kl->ijl', x, w) + b
if chaining_non_linearity:
x = tf.nn.tanh(x)
attention_states[0] += x
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state,
feed_previous=feed_previous, decoder_inputs=targets[:,:-1],
align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length[:1],
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:],
weights=target_weights)
if chaining_loss is not None and chaining_loss_ratio:
xent_loss += chaining_loss_ratio * chaining_loss
losses = [xent_loss, None, None]
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def softmax(logits, dim=-1, mask=None):
e = tf.exp(logits)
if mask is not None:
e *= mask
return e / tf.clip_by_value(tf.reduce_sum(e, axis=dim, keep_dims=True), 10e-37, 10e+37)
def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True):
batch_size = tf.shape(targets)[0]
time_steps = tf.shape(targets)[1]
logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value]))
targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size]))
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_)
crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps]))
log_perp = tf.reduce_sum(crossent * weights, axis=1)
if average_across_timesteps:
total_size = tf.reduce_sum(weights, axis=1)
total_size += 1e-12 # just to avoid division by 0 for all-0 weights
log_perp /= total_size
cost = tf.reduce_sum(log_perp)
if average_across_batch:
return cost / tf.to_float(batch_size)
else:
return cost
def get_weights(sequence, eos_id, include_first_eos=True):
cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))
if include_first_eos:
weights = weights[:,:-1]
shape = [tf.shape(weights)[0], 1]
weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)
return tf.stop_gradient(weights)
|
[
"574751346@qq.com"
] |
574751346@qq.com
|
5c9ac3b06991bb157e92c298215e3f2f75df99ef
|
911d961e2dfe786ca163d0eb5016e3edd7f909b8
|
/backjoon/11399.py
|
0bb55a3a817b59f88d05e760d6d1fcecc561487f
|
[] |
no_license
|
kkhhkk/Study-Algorithms
|
abf8f4897c8ffc33d149930631275851fecb282d
|
28a69c71be8dd526c93e1414bc7531c9f4bb1710
|
refs/heads/master
| 2023-05-07T23:55:35.817235
| 2021-06-01T15:11:49
| 2021-06-01T15:11:49
| 335,855,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
# 11399번
# 문제
# 인하은행에는 ATM이 1대밖에 없다. 지금 이 ATM앞에 N명의 사람들이 줄을 서있다. 사람은 1번부터 N번까지 번호가 매겨져 있으며,
# i번 사람이 돈을 인출하는데 걸리는 시간은 Pi분이다.
# 사람들이 줄을 서는 순서에 따라서, 돈을 인출하는데 필요한 시간의 합이 달라지게 된다. 예를 들어, 총 5명이 있고,
# P1 = 3, P2 = 1, P3 = 4, P4 = 3, P5 = 2 인 경우를 생각해보자. [1, 2, 3, 4, 5] 순서로 줄을 선다면, 1번 사람은 3분만에 돈을 뽑을 수 있다.
# 2번 사람은 1번 사람이 돈을 뽑을 때 까지 기다려야 하기 때문에, 3+1 = 4분이 걸리게 된다. 3번 사람은 1번, 2번 사람이 돈을 뽑을 때까지 기다려야 하기 때문에,
# 총 3+1+4 = 8분이 필요하게 된다. 4번 사람은 3+1+4+3 = 11분, 5번 사람은 3+1+4+3+2 = 13분이 걸리게 된다.
# 이 경우에 각 사람이 돈을 인출하는데 필요한 시간의 합은 3+4+8+11+13 = 39분이 된다.
# 줄을[2, 5, 1, 4, 3] 순서로 줄을 서면, 2번 사람은 1분만에, 5번 사람은 1+2 = 3분, 1번 사람은 1+2+3 = 6분, 4번 사람은 1+2+3+3 = 9분,
# 3번 사람은 1+2+3+3+4 = 13분이 걸리게 된다. 각 사람이 돈을 인출하는데 필요한 시간의 합은 1+3+6+9+13 = 32분이다.
# 이 방법보다 더 필요한 시간의 합을 최소로 만들 수는 없다.
# 줄을 서 있는 사람의 수 N과 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어졌을 때, 각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 구하는 프로그램을 작성하시오.
# 입력
# 첫째 줄에 사람의 수 N(1 ≤ N ≤ 1, 000)이 주어진다. 둘째 줄에는 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어진다. (1 ≤ Pi ≤ 1, 000)
# 출력
# 첫째 줄에 각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 출력한다.
n = int(input())
arr = [int(x) for x in input().split()]
arr.sort()
cnt = 0
for i in range(n):
cnt += (n-i) * arr[i]
print(cnt)
|
[
"78129787+kkhhkk@users.noreply.github.com"
] |
78129787+kkhhkk@users.noreply.github.com
|
0e928393f712c9aa0b389fc8e611da0166635eb7
|
a3d6eb92c6fc6ed6095d10bc7b329cb8b4a8b166
|
/src/config.py
|
9f147f5f9b4dfea32202702abb0161c77899e708
|
[] |
no_license
|
hgiesel/anki_straight_reward
|
ab29e6f154beba7c5f2f9bd9579c21cdd942b218
|
3fe72255f667d2eb544afb2541a1eb974c23eede
|
refs/heads/master
| 2022-12-23T22:38:28.432113
| 2022-12-12T13:46:36
| 2022-12-12T13:46:36
| 245,190,627
| 19
| 4
| null | 2023-08-28T16:01:55
| 2020-03-05T14:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,381
|
py
|
from anki.cards import Card
from aqt import mw
from .types import StraightSetting
DEFAULT_SETTINGS = StraightSetting(2, True, 5, 5, 130, 250)
KEYWORD = "straightReward"
def serialize_setting(setting: StraightSetting) -> dict:
return {
"enableNotifications": setting.enable_notifications,
"straightLength": setting.straight_length,
"baseEase": setting.base_ease,
"stepEase": setting.step_ease,
"startEase": setting.start_ease,
"stopEase": setting.stop_ease,
}
def deserialize_setting(
straight_length: int,
enable_notifications: bool,
base_ease: int,
step_ease: int,
start_ease: int,
stop_ease: int,
) -> StraightSetting:
return StraightSetting(
straight_length,
enable_notifications,
base_ease,
step_ease,
start_ease,
stop_ease,
)
def deserialize_setting_from_dict(setting_data: dict) -> StraightSetting:
return StraightSetting(
setting_data["straightLength"]
if "straightLength" in setting_data
else DEFAULT_SETTINGS.straight_length,
setting_data["enableNotifications"]
if "enableNotifications" in setting_data
else DEFAULT_SETTINGS.enable_notifications,
setting_data["baseEase"]
if "baseEase" in setting_data
else DEFAULT_SETTINGS.base_ease,
setting_data["stepEase"]
if "stepEase" in setting_data
else DEFAULT_SETTINGS.step_ease,
setting_data["startEase"]
if "startEase" in setting_data
else DEFAULT_SETTINGS.start_ease,
setting_data["stopEase"]
if "stopEase" in setting_data
else DEFAULT_SETTINGS.stop_ease,
)
def get_setting_from_config(config) -> StraightSetting:
try:
return deserialize_setting_from_dict(config[KEYWORD])
except:
return get_default_setting()
def get_setting_from_card(card: Card) -> StraightSetting:
# confForDid did resort to conf for default deck if not available (TODO is this still true?)
config = mw.col.decks.config_dict_for_deck_id(card.odid or card.did)
return get_setting_from_config(config)
def get_default_setting() -> StraightSetting:
return DEFAULT_SETTINGS
def write_setting(config, setting: StraightSetting):
config[KEYWORD] = serialize_setting(setting)
mw.col.decks.update_config(config)
|
[
"hengiesel@gmail.com"
] |
hengiesel@gmail.com
|
1c482da35a8e578fdc4f0045a27fb730c1b5717e
|
45d4e36581bfe9982619aa13c255e703d50e77da
|
/blog/forms.py
|
2ff9e417e2b1c65f9e98e25ddfd6e1ab12aedf29
|
[] |
no_license
|
Sagar746/Project_blog
|
b93cedb08263e188f003d3af3462ea4273d1dec8
|
5650f5eca9cce6a011992c8c651ffb420bd3ff9e
|
refs/heads/master
| 2023-03-08T04:41:43.621844
| 2021-02-27T17:53:37
| 2021-02-27T17:53:37
| 342,921,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
from django import forms
from .models import Post
from idea.models import Idea
class PostCreateForm(forms.ModelForm):
class Meta:
model=Post
fields=['title','description','category','image']
class PostUpdateForm(forms.ModelForm):
class Meta:
model=Post
fields=['title','description','category','image']
class IdeaCreateForm(forms.ModelForm):
class Meta:
model=Idea
fields=['author','title','description','photo']
class IdeaUpdateForm(forms.ModelForm):
class Meta:
model=Idea
fields=['author','title','description','photo']
|
[
"saagartiwari722427@gmail.com"
] |
saagartiwari722427@gmail.com
|
57f400f6021873fa23f9efa7d91a3e2a001f4aba
|
6ca12dab48862659c1426e5e245f961c661e5c42
|
/check_gibdd_fines.py
|
9c9d377cda88eb8eb9887b55bb395acfaac997d0
|
[
"MIT"
] |
permissive
|
Wolfram-180/check_gibdd_fines
|
0d69b7269fe7b47fbf59e4a8091efa96c2cdd3f2
|
e94a9368fba549fb61387859ba1ea92747bda536
|
refs/heads/main
| 2023-09-02T12:43:58.745457
| 2021-11-01T10:21:02
| 2021-11-01T10:21:02
| 423,421,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
import time
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import datetime
import emailwork
import re
debug = True
WebDriverWaitInSec = 30
binary = FirefoxBinary('c:\geckodriver\geckodriver.exe')
def init_driver():
binary = r'c:\Program Files (x86)\Mozilla Firefox\firefox.exe'
options = Options()
options.binary = binary
cap = DesiredCapabilities().FIREFOX
cap["marionette"] = True #optional
return webdriver.Firefox(options=options, capabilities=cap, executable_path="c:\\geckodriver\\geckodriver.exe")
def sleep(secs, place='whatever'):
ttlsecs = secs
while secs > 0:
time.sleep(1)
print('now: {} - {} of {} in {}'.format(datetime.datetime.now(), secs, ttlsecs, place))
secs -= 1
cars = {'х988то750': 'https://xn--90adear.xn--p1ai/check/fines#%D1%85988%D1%82%D0%BE+750+9907379357',
'в691ем777': 'https://xn--90adear.xn--p1ai/check/fines#%D0%B2691%D0%B5%D0%BC+777+5047741110'}
if __name__ == "__main__":
for car, link in cars.items():
browser = init_driver()
sleep(10, 'стартуем')
browser.get(link)
sleep(20, 'открываем сайт гибдд')
btn_check = browser.find_element_by_xpath('//*[@id="checkFines"]/p[4]/a')
btn_check.click()
sleep(180, 'ждем проверку')
src = browser.page_source
text_found = re.search(r'В результате проверки не были найдены сведения о неуплаченных штрафах', src)
if text_found is None:
emailwork.send_mail('user@gmail.com', car + ' - есть штраф', link)
else:
emailwork.send_mail('user@gmail.com', car + ' - нет штрафа', link)
browser.quit()
|
[
"noreply@github.com"
] |
noreply@github.com
|
314181b6076dd4f353ab40ca9d5695c63949d5ba
|
c0a9460591dcb5a322c1c5ec8b67e1d775f8f4ba
|
/advec_1d/dg_modal_gpu.py
|
d4b79c2b6d4cba9f5cc88e1e69a5f264c41dfdf1
|
[] |
no_license
|
wbkifun/my_research
|
aca8f5132d03de2e15adc3b0ded164fbd89e38a3
|
eb7e61f5405834dcbea240665bdc819f4b3f97bf
|
refs/heads/master
| 2020-12-24T16:40:29.722161
| 2016-03-07T00:31:15
| 2016-03-07T00:31:15
| 5,176,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,676
|
py
|
#===============================================================================
# DG method with modal basis functions
# 1-D advection equation
# ------------------------------------------------------------------------------
#
# Last update: 2012.4.26
# ------------------------------------------------------------------------------
#
# <Description>
# - basis function: Legendre polynomial
# - boundary condition: periodic
# - initial condition: Gaussian shape
# - numerical integration: Gauss quadrature (Gauss-lubatto rules)
# - time evolution: 4th-order Runge-Kutta
# - Legendre polynomial calculations: Numpy module (numpy.polynomial.legendre)
#
# <Variables>
# - ux solution u(x) at t in physical domain
# - ul spectral components of u(x) in Legendre polynomial space
# - fl spectral components of f(u), f=vu is used
# - v fluid velociy
# - ne # of elements
# - nne # of gaussian quadrature nodes in a element
# - nn # of total nodes
# - x4n global coordinates for each nodes
# - sle slice indices in a element
# - sles list of sle s
#
# <History>
# 2012.4.26 Class inheritance by Ki-Hwan Kim
# Reduce number of kernels (4 -> 2)
# 2012.4.25 fix dx -> de by Ki-Hwan Kim
# 2012.4.24 CUDA version by Ki-Hwan Kim
# 2012.4.14 Convert to object-oriented by Ki-Hwan Kim
# 2012.4.13 Rewriten using Python by Ki-Hwan Kim
# 2012.3.27 Matlab code by Shin-Hoo Kang
#===============================================================================
from __future__ import division
from dg_modal_base import DGModalBase
import numpy as np
import pycuda.driver as cuda
class DGModalGpu(DGModalBase):
def __init__(self, ne, p_degree, cfl=0.1, v=0.5, target_gpu=0):
cuda.init()
self.dev = cuda.Device(target_gpu)
self.ctx = self.dev.make_context()
import atexit
atexit.register(self.ctx.pop)
super(DGModalGpu, self).__init__(ne, p_degree, cfl, v)
def allocation(self):
super(DGModalGpu, self).allocation()
self.ul_gpu = cuda.to_device(self.ul)
self.ul_prev_gpu = cuda.to_device(self.ul)
self.ul_tmp_gpu = cuda.to_device(self.ul)
self.kl_gpu = cuda.to_device(self.ul)
self.el_sum_gpu = cuda.to_device(np.zeros(self.ne))
def x2l(self):
super(DGModalGpu, self).x2l()
cuda.memcpy_htod(self.ul_gpu, self.ul)
def l2x(self):
cuda.memcpy_dtoh(self.ul, self.ul_gpu)
super(DGModalGpu, self).l2x()
def prepare_update(self):
from pycuda.compiler import SourceModule
import os
src_path = '/'.join( os.path.abspath(__file__).split('/')[:-1] )
kernels = open(src_path + '/core.cu').read()
mod = SourceModule(kernels)
#mod = cuda.module_from_file('core.cubin')
self.update_pre = mod.get_function('update_pre')
self.update_ul = mod.get_function('update_ul')
def update(self):
nn, ne, nne = np.int32([self.nn, self.ne, self.nne])
dt, de, vf = np.float64([self.dt, self.de, self.vf])
bs, gs = (256,1,1), (self.nn//256+1,1)
ul, ul_prev, ul_tmp = self.ul_gpu, self.ul_prev_gpu, self.ul_tmp_gpu
kl = self.kl_gpu
el_sum = self.el_sum_gpu
c_ul_tmps = np.float32([0, 0.5, 0.5, 1])
c_uls = np.float32([1./6, 1./3, 1./3, 1./6])
cuda.memcpy_dtod(ul_prev, ul, self.ul.nbytes)
for c_ul_tmp, c_ul in zip(c_ul_tmps, c_uls):
self.update_pre(nn, nne, vf, c_ul_tmp, ul, ul_prev, ul_tmp, kl, el_sum, block=bs, grid=gs)
self.update_ul(nn, ne, nne, dt, de, vf, c_ul, ul, ul_tmp, kl, el_sum, block=bs, grid=gs)
|
[
"kh.kim@kiaps.org"
] |
kh.kim@kiaps.org
|
a75af14244df6896f34f5a5d274727df813bad8c
|
fc65e7c55058361edc163a82ea9d33d27d040268
|
/wsgi/pecan/golden/hooks.py
|
989933c7fe27c5ec77a72c8546846d75491f5fcb
|
[] |
no_license
|
zhaozhilong1993/openstack_test
|
19a011029b4b9c9fa4411df793a1bd1e23e2da1d
|
a1153f4d258cc73e31f3e2604ab7309eb03879de
|
refs/heads/master
| 2021-01-21T14:44:48.605566
| 2016-07-16T03:53:21
| 2016-07-16T03:53:21
| 58,913,030
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
from pecan import hooks
from golden.db import api as db_api
class DBHook(hooks.PecanHook):
"""Create a db connection instance."""
def before(self, state):
state.request.db_conn = db_api.Connection()
|
[
"root@unitedstack.com"
] |
root@unitedstack.com
|
841afbf9bb4cdfd5cafcbc0c6f3f11f329e527fc
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptcapacity/l3v4usage32per.py
|
ec7d3ed093f8b0173f4a279c5813fc5c3027ba2f
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 12,181
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3v4Usage32Per(Mo):
meta = StatsClassMeta("cobra.model.eqptcapacity.L3v4Usage32Per", "Layer3 v4 32 entries usage percentage")
counter = CounterMeta("normalizedv4Total", CounterCategory.GAUGE, "percentage", "Total v4 32 L3 entries usage percentage")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "normalizedv4TotalLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "normalizedv4TotalMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "normalizedv4TotalMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "normalizedv4TotalAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "normalizedv4TotalSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "normalizedv4TotalTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "normalizedv4TotalThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "normalizedv4TotalTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "normalizedv4TotalTr"
meta._counters.append(counter)
meta.isAbstract = True
meta.moClassName = "eqptcapacityL3v4Usage32Per"
meta.moClassName = "eqptcapacityL3v4Usage32Per"
meta.rnFormat = ""
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Layer3 v4 32 entries usage percentage stats"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per1d")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per5min")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per1qtr")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per1h")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per1mo")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per1year")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per1w")
meta.concreteSubClasses.add("cobra.model.eqptcapacity.L3v4Usage32Per15min")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "normalizedv4TotalAvg", "normalizedv4TotalAvg", 44076, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 32 L3 entries usage percentage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalAvg", prop)
prop = PropMeta("str", "normalizedv4TotalLast", "normalizedv4TotalLast", 44073, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v4 32 L3 entries usage percentage current value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalLast", prop)
prop = PropMeta("str", "normalizedv4TotalMax", "normalizedv4TotalMax", 44075, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 32 L3 entries usage percentage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalMax", prop)
prop = PropMeta("str", "normalizedv4TotalMin", "normalizedv4TotalMin", 44074, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 32 L3 entries usage percentage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalMin", prop)
prop = PropMeta("str", "normalizedv4TotalSpct", "normalizedv4TotalSpct", 44077, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 32 L3 entries usage percentage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalSpct", prop)
prop = PropMeta("str", "normalizedv4TotalThr", "normalizedv4TotalThr", 44079, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 32 L3 entries usage percentage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("normalizedv4TotalThr", prop)
prop = PropMeta("str", "normalizedv4TotalTr", "normalizedv4TotalTr", 44081, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 32 L3 entries usage percentage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalTr", prop)
prop = PropMeta("str", "normalizedv4TotalTrBase", "normalizedv4TotalTrBase", 44080, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v4 32 L3 entries usage percentage trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalTrBase", prop)
prop = PropMeta("str", "normalizedv4TotalTtl", "normalizedv4TotalTtl", 44078, PropCategory.IMPLICIT_TOTAL)
prop.label = "Total v4 32 L3 entries usage percentage total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4TotalTtl", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
076a1e0256af4f30ffa33586bba32919f063825a
|
eac8a6ee0065627de15efe4e4f829c1489675879
|
/tests/test_problem01.py
|
d4f4b193638c61937bea711f33fae3f670ff8fa1
|
[
"MIT"
] |
permissive
|
ishaansharma/blind-75-python
|
ff8163ae9f2757a3e69895b468531ac5ad0eaf05
|
b92ef3449eb0143c760ddd339897a3f0a2972830
|
refs/heads/master
| 2023-03-16T11:29:14.620223
| 2020-03-09T00:57:39
| 2020-03-09T00:57:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import unittest
from problems.problem01 import solution
class Test(unittest.TestCase):
def test(self):
self.assertListEqual(solution([2, 7, 11, 15], 9), [0, 1])
|
[
"nolan@nolanwright.dev"
] |
nolan@nolanwright.dev
|
a5c614cd15adeb9672fcd1647fab7785834fd360
|
af994d1163f445a79797186fae4c828670e575c7
|
/env/bin/pip2.7
|
eca04ab69df21074ce698efb4185e442cf920ea9
|
[
"MIT"
] |
permissive
|
dhruvshah1214/jarvis-shah-nlp
|
03a80a9f8dbb0581549736b3cf9fd254b7a893f9
|
b720dc5ab3af7ed223483ce6919b84c4ad56e4a2
|
refs/heads/master
| 2021-01-12T15:34:57.388006
| 2016-10-24T23:51:26
| 2016-10-24T23:51:26
| 71,840,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
7
|
#!/Users/hitesh/Documents/workspace/JARVIS/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"dhruv.shah@gmail.com"
] |
dhruv.shah@gmail.com
|
d2e49530b1207762fac4e9a833f1827195ccf153
|
7830d2bbd370202844b60238dd0a2d08105f9e19
|
/koch.py
|
dfe5cb8c1d0cc4d96547bd5a365a7cb4baf50a7e
|
[] |
no_license
|
bchangip/DAE-Backend
|
cc4abf1ce253f46eab502c4870299ffec217c942
|
44fc7ba4901d5ec9522d9d550b0321f90837b6bc
|
refs/heads/master
| 2020-03-30T22:40:48.948142
| 2018-10-24T20:20:00
| 2018-10-24T20:20:00
| 151,674,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,557
|
py
|
import time
import multiprocessing
import requests
import os
import xlrd
import csv
import sys
import mysql.connector
import pandas as pd
import numpy as np
import json
import emotiv_dnn as dnn
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import preprocessing, neighbors, svm
def getSensorArray(person, sensor):
with open(person+'.csv') as csvfile:
dataInput = csv.reader(csvfile, delimiter=' ', quotechar='|')
print('Reading CSV', dataInput)
array = []
count_array = 0
current_sec = 0
current_count = 0
current_sum = 0
for row in dataInput:
line = ''.join(row)
splitData = line.split('\t')
#print 'Second of Data ' + str(splitData[0])
#Using sensors: AF3, AF4, F3 and F4
for i in range(1, len(splitData)-1):
splitDots= splitData[i].split(":")
splitCommas = splitDots[1].split(',')
if splitDots[0] == sensor :
if current_sec == int(splitData[0]) :
current_sum = current_sum + int(splitCommas[1])
current_count += 1
else:
average = current_sum / current_count
current_sec = int(splitData[0])
current_sum = int(splitCommas[1])
current_count= 0
array.append(average)
#af3 = af3 + str(count_af3) + ',' + str(average) + '\n'
count_array = count_array+1
return array
def getSex(sexo):
if sexo == 'Masculino':
return 0
else:
return 1
def getDemographic(
edad,
pebl,
dsmt,
hare,
ciep,
cief,
ciec,
ciem,
ciex,
cies,
cie
):
demographics = []
demographics.append(edad)
demographics.append(pebl)
demographics.append(dsmt)
demographics.append(hare)
demographics.append(ciep)
demographics.append(cief)
demographics.append(ciec)
demographics.append(ciem)
demographics.append(ciex)
demographics.append(cies)
demographics.append(cie)
return demographics
def insert_BD (
second,
sexo,
edad,
pebl,
dsmt,
hare,
ciep,
cief,
ciec,
ciem,
ciex,
cies,
cie
):
con = mysql.connector.connect(
host="localhost",
user="root",
passwd="admin",
database="megaproyecto"
)
cur = con.cursor()
person_count = 1
q_count = 1
var_count=1
meassure_count = 1
cur.execute('DELETE FROM `megaproyecto`.`medicion`;')
con.commit()
cur.execute('DELETE FROM `megaproyecto`.`pregunta`;')
con.commit()
cur.execute('DELETE FROM `megaproyecto`.`sensor`;')
con.commit()
cur.execute('DELETE FROM `megaproyecto`.`variable`;')
con.commit()
cur.execute('DELETE FROM `megaproyecto`.`variables`;')
con.commit()
cur.execute('DELETE FROM `megaproyecto`.`persona`;')
con.commit()
cur.execute('DELETE FROM `megaproyecto`.`campo`;')
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (1, 'Sexo'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (2, 'Edad'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (3, 'Pebl'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (4, 'Dsmt'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (5, 'Hare'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (6, 'Ciep'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (7, 'Cief'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (8, 'Ciec'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (9, 'Ciem'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (10, 'Ciex'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (11, 'Cies'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`campo` VALUES (%s,%s);', (12, 'Cie'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`sensor` (`id`, `sensor`) VALUES (%s,%s);', (1, 'AF3'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`sensor` (`id`, `sensor`) VALUES (%s,%s);', (2, 'F3'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`sensor` (`id`, `sensor`) VALUES (%s,%s);', (3, 'AF4'))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`sensor` (`id`, `sensor`) VALUES (%s,%s);', (4, 'F4'))
con.commit()
person = 'person'
af3 = getSensorArray(person, 'AF3')
print('something')
f3 = getSensorArray(person, 'F3')
af4 = getSensorArray(person, 'AF4')
f4 = getSensorArray(person, 'F4')
if af3 != 0:
person_count += 1
cur.execute('INSERT INTO `megaproyecto`.`persona` (`idpersona`,`codigo`) VALUES (%s, %s);', (int(person_count), str(person)))
con.commit()
cur.execute('INSERT INTO `megaproyecto`.`variables` (`id`,`id_persona`) VALUES (%s,%s);', (person_count-1, person_count))
con.commit()
sex = getSex(sexo)
if sex != 2:
cur.execute('INSERT INTO `megaproyecto`.`variable` (`id`, `id_variable`, `id_campo`, `valor`, `variablecol`) VALUES (%s, %s , 1, %s,1);', (var_count, person_count-1,sex))
con.commit()
var_count += 1
demographics = getDemographic(edad, pebl, dsmt, hare, ciep, cief, ciec, ciem, ciex, cies, cie)
if demographics != 0:
dem_count = 1
for demo in demographics:
print(demo)
cur.execute('INSERT INTO `megaproyecto`.`variable` (`id`, `id_variable`, `id_campo`, `valor`, `variablecol`) VALUES (%s, %s , %s, %s,1);', (var_count, person_count-1, 1+dem_count ,demo))
con.commit()
var_count += 1
dem_count += 1
verdad = True
contador = 1
local_q_count = 1
if verdad:
cur.execute('INSERT INTO `megaproyecto`.`pregunta` (`id`, `id_persona`,`numero`,`veracidad`) VALUES (%s, %s,%s,%s);', (q_count, int(person_count), local_q_count, str(verdad)))
con.commit()
else:
cur.execute('INSERT INTO `megaproyecto`.`pregunta` (`id`, `id_persona`,`numero`,`veracidad`) VALUES (%s, %s,%s,%s);', (q_count, int(person_count), local_q_count, str(verdad)))
con.commit()
local_q_count += 1
second = 4
print('Second:', second)
print('LEN',len(af3))
if second <= 140 and len(af3)>second:
print('INSIDE IF ')
cur.execute('INSERT INTO `megaproyecto`.`medicion` (`id`, `id_sensor`, `id_pregunta`, `segundo`, `medicion`) VALUES (%s, %s, %s, %s, %s);', (meassure_count, 1, q_count, second, af3[second]))
con.commit()
meassure_count += 1
cur.execute('INSERT INTO `megaproyecto`.`medicion` (`id`, `id_sensor`, `id_pregunta`, `segundo`, `medicion`) VALUES (%s, %s, %s, %s, %s);', (meassure_count, 2, q_count, second, f3[second]))
con.commit()
meassure_count += 1
cur.execute('INSERT INTO `megaproyecto`.`medicion` (`id`, `id_sensor`, `id_pregunta`, `segundo`, `medicion`) VALUES (%s, %s, %s, %s, %s);', (meassure_count, 3, q_count, second, af4[second]))
con.commit()
meassure_count += 1
cur.execute('INSERT INTO `megaproyecto`.`medicion` (`id`, `id_sensor`, `id_pregunta`, `segundo`, `medicion`) VALUES (%s, %s, %s, %s, %s);', (meassure_count, 4, q_count, second, f4[second]))
con.commit()
meassure_count += 1
print('OUT OF IF ')
q_count += 1
def getTableQuestionVera(veracidad, preguntaNum):
con = mysql.connector.connect(
host="localhost",
user="root",
passwd="admin",
database="megaproyecto"
)
cursor = con.cursor(buffered=True)
af3 = []
f3 = []
af4 = []
f4 = []
sensores = [af3, f3,af4,f4]
for sensorId in range(1,5):
cursor.execute("SELECT medicion.medicion FROM medicion INNER JOIN pregunta ON pregunta.id = medicion.id_pregunta INNER JOIN sensor ON sensor.id = medicion.id_sensor INNER JOIN persona ON persona.idpersona = pregunta.id_persona WHERE pregunta.numero = "+str(preguntaNum)+" AND pregunta.veracidad = '"+veracidad+"' AND sensor.id = "+str(sensorId)+" ORDER BY persona.idpersona;")
numrows = cursor.rowcount
print (numrows)
for x in range(0,numrows):
row = cursor.fetchone()
sensores[sensorId-1].append(row[0])
sexo = []
persona = []
cursor.execute("SELECT variable.valor, persona.codigo FROM variable INNER JOIN variables ON variable.id_variable = variables.id INNER JOIN campo ON variable.id_campo = campo.id INNER JOIN persona ON persona.idpersona = variables.id_persona WHERE campo.nombre = 'Sexo' ORDER BY persona.idpersona;")
numrows = cursor.rowcount
for y in range(0,numrows):
row = cursor.fetchone()
sexo.append(row[0])
persona.append(row[1])
demographics = [[],[],[],[],[],[],[],[],[],[],[]]
for campo_id in range(2,13):
cursor.execute("SELECT variable.valor FROM variable INNER JOIN variables ON variable.id_variable = variables.id INNER JOIN campo ON variable.id_campo = campo.id INNER JOIN persona ON persona.idpersona = variables.id_persona WHERE campo.id = "+str(campo_id)+" ORDER BY persona.idpersona;")
numrows = cursor.rowcount
for num in range(0,numrows):
row = cursor.fetchone()
demographics[campo_id-2].append(str(row[0]))
#demographics_values = str.format('{},{},{},{},{},{},{},{},{},{}',)
output = ''
json_output = {"preguntas" : []}
json_object = {}
print(sexo)
print(demographics)
print()
for medicion in range(0,len(sensores[0])):
try:
str_dem = ""
for p in range(0, 11):
if p == 0:
str_dem = str(demographics[p][medicion])
else:
str_dem = str(demographics[p][medicion])+ ','+ str_dem
if veracidad == 'True':
output = output+ persona[medicion]+','+str(sensores[0][medicion])+','+str(sensores[1][medicion])+','+str(sensores[2][medicion])+','+str(sensores[3][medicion])+','+str(sexo[medicion])+","+str(preguntaNum)+',1,'+'2,'+str_dem+'\n'
json_object ['AF3'] = sensores[0][medicion]
json_object ['F3']=sensores[1][medicion]
json_object ['AF4']=sensores[2][medicion]
json_object ['F4']=sensores[3][medicion]
json_object ['sexo']=int(sexo[medicion])
json_object ['cief']=int(demographics[5][medicion])
json_object ['hare']=int(demographics[7][medicion])
json_object ['pebl']=int(demographics[9][medicion])
json_object ['edad']=int(demographics[10][medicion])
else:
output = output+ persona[medicion]+','+str(sensores[0][medicion])+','+str(sensores[1][medicion])+','+str(sensores[2][medicion])+','+str(sensores[3][medicion])+','+str(sexo[medicion])+","+str(preguntaNum)+',0,'+'2,'+str_dem+'\n'
except:
print ('Oops')
json_output["preguntas"].append(json_object)
print (json_output)
return output, json_output
def generate_file():
csv_output, json_output = getTableQuestionVera('True',1)
full_output = 'persona,AF3,F3,AF4,F4,sexo,numPregunta,veracidad,escolaridad,cie,cies,ciex,ciem,ciec,cief,ciep,hare,dsmt,pebl,edad\n'
full_output = full_output + str(csv_output)
with open('pregunta.json', 'w') as outfile:
json.dump(json_output, outfile)
with open('./result.csv', 'w') as file:
print('writing')
file.write(full_output)
def get_knn():
df = pd.read_csv('./result.csv')
df.drop(['persona','numPregunta','cie'], 1, inplace=True)
x = np.array(df.drop(['veracidad'], 1))
y = np.array(df['veracidad'])
clf = neighbors.KNeighborsClassifier(n_neighbors=19)
x_train2 = np.loadtxt('./train/x_train.txt')
y_train2 = np.loadtxt('./train/y_train.txt')
one_value = x
print(one_value)
print(x_train2[0])
clf.fit(x_train2, y_train2)
prediction = clf.predict(one_value)
print('Accuracy of KNN: ',prediction)
prediction = prediction[0]
probability = clf.predict_proba(one_value)[0]
return prediction, probability
class EmotivRecorder():
def __init__(self, ):
multiprocessing.Process.__init__(self)
self.exit = multiprocessing.Event()
def run(self):
print("On emotiv")
os.system('c:\\Python27\python.exe .\Emotiv\Emotrix\emotrix\my_recorder.py ')
self.exit.set()
global my_emotiv
global t0
def koch(second,sexo, edad, pebl, dsmt, hare, ciep, cief, ciec, ciem, ciex, cies, cie):
print('Running Koch')
insert_BD(second,sexo, edad, pebl, dsmt, hare, ciep, cief, ciec, ciem, ciex, cies, cie)
generate_file()
value, confidence = get_knn()
if value:
category = 'true'
confidence = confidence[1] * 100
else:
category = 'false'
confidence = confidence[0] * 100
time.sleep(3)
dnn.main([])
print('category: ', category)
print('confidence: ', confidence)
requests.post('http://localhost:5000/send-koch-response', data={ "category": category, "confidence": confidence })
def startAnswer():
print('Starting answer')
global t0
t0 = int(time.time()-t0)
return 'OK'
def finishAnswer():
global t0
koch(t0,'Masculino', 23, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def startQuestion():
print('Starting question')
global my_emotiv
global t0
t0 = int(time.time())
my_emotiv = EmotivRecorder()
my_emotiv.run()
return 'OK'
koch(2,'Masculino', 23, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
|
[
"pablo.koch26@gmail.com"
] |
pablo.koch26@gmail.com
|
ed2044a2c6ce156532db6f9abe972c1652e8e2d1
|
12c62359f011f92db8ec3649c6c23f9158482085
|
/components/ai.py
|
9d1c4e196d4101ac6ac8212529362a1afc4228a3
|
[] |
no_license
|
Kuerschten/RoguelikeTutorial
|
7cab1274c7ffd936aed5192cb2aa1a9f36973e2a
|
f6e0c35011d8d039c2f8a1244738e925156a2ea0
|
refs/heads/main
| 2023-03-12T21:29:51.004921
| 2021-03-05T20:06:29
| 2021-03-05T20:06:29
| 338,453,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,204
|
py
|
from __future__ import annotations
import random
from typing import List, Optional, Tuple, TYPE_CHECKING
import numpy as np # type: ignore
import tcod
from actions import Action, BumpAction, MeleeAction, MovementAction, WaitAction
if TYPE_CHECKING:
from entity import Actor
class BaseAI(Action):
def perform(self) -> None:
raise NotImplementedError()
def get_path_to(self, dest_x: int, dest_y: int) -> List[Tuple[int, int]]:
"""Compute and return a path to the target position.
If there is no valid path then returns an empty list.
"""
# Copy the walkable array.
cost = np.array(self.entity.gamemap.tiles["walkable"], dtype=np.int8)
for entity in self.entity.gamemap.entities:
# Check that an enitiy blocks movement and the cost isn't zero (blocking.)
if entity.blocks_movement and cost[entity.x, entity.y]:
# Add to the cost of a blocked position.
# A lower number means more enemies will crowd behind each other in
# hallways. A higher number means enemies will take longer paths in
# order to surround the player.
cost[entity.x, entity.y] += 10
# Create a graph from the cost array and pass that graph to a new pathfinder.
graph = tcod.path.SimpleGraph(cost=cost, cardinal=2, diagonal=3)
pathfinder = tcod.path.Pathfinder(graph)
pathfinder.add_root((self.entity.x, self.entity.y)) # Start position.
# Compute the path to the destination and remove the starting point.
path: List[List[int]] = pathfinder.path_to((dest_x, dest_y))[1:].tolist()
# Convert from List[List[int]] to List[Tuple[int, int]].
return [(index[0], index[1]) for index in path]
class ConfusedEnemy(BaseAI):
"""
A confused enemy will stumble around aimlessly for a given number of turns, then revert back to its previous AI.
If an actor occupies a tile it is randomly moving into, it will attack.
"""
def __init__(
self, entity: Actor, previous_ai: Optional[BaseAI], turns_remaining: int
):
super().__init__(entity)
self.previous_ai = previous_ai
self.turns_remaining = turns_remaining
def perform(self) -> None:
# Revert the AI back to the original state if the effect has run its course.
if self.turns_remaining <= 0:
self.engine.message_log.add_message(
f"The {self.entity.name} is no longer confused."
)
self.entity.ai = self.previous_ai
else:
# Pick a random direction
direction_x, direction_y = random.choice(
[
(-1, -1), # Northwest
(0, -1), # North
(1, -1), # Northeast
(-1, 0), # West
(1, 0), # East
(-1, 1), # Southwest
(0, 1), # South
(1, 1), # Southeast
]
)
self.turns_remaining -= 1
# The actor will either try to move or attack in the chosen random direction.
# Its possible the actor will just bump into the wall, wasting a turn.
return BumpAction(self.entity, direction_x, direction_y,).perform()
class HostileEnemy(BaseAI):
def __init__(self, entity: Actor):
super().__init__(entity)
self.path: List[Tuple[int, int]] = []
def perform(self) -> None:
target = self.engine.player
dx = target.x - self.entity.x
dy = target.y - self.entity.y
distance = max(abs(dx), abs(dy)) # Chebyshev distance.
if self.engine.game_map.visible[self.entity.x, self.entity.y]:
if distance <= 1:
return MeleeAction(self.entity, dx, dy).perform()
self.path = self.get_path_to(target.x, target.y)
if self.path:
dest_x, dest_y = self.path.pop(0)
return MovementAction(
self.entity, dest_x - self.entity.x, dest_y - self.entity.y,
).perform()
return WaitAction(self.entity).perform()
|
[
"dk-siggi@web.de"
] |
dk-siggi@web.de
|
32217a6f79616909f2a98f30ead7693f6c395512
|
ec5db30f321fc0c77adc7ec0d567cb0b8c723cef
|
/OpenCV section/face_detector.py
|
a801bfa8f21ffbae648a2c9d87290e291f684f5c
|
[] |
no_license
|
LingzeHu/python-projects
|
41b3fc19544232dcd715a162f6a457dbfd5a3371
|
1271c0c7bc3e58ad21f9a4cecd629be10618ee1a
|
refs/heads/master
| 2022-07-28T11:06:36.362649
| 2020-05-26T01:49:42
| 2020-05-26T01:49:42
| 266,919,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
import cv2
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img = cv2.imread("news.jpg")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img,
scaleFactor=1.1,
minNeighbors=5)
for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)
print(faces)
print(type(faces))
resized = cv2.resize(img, (int(img.shape[1]/3), int(img.shape[0]/3)))
cv2.imshow("Gray", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"lingzehu@gmail.com"
] |
lingzehu@gmail.com
|
2749a89da17d91be60716423a5b52e513374404b
|
7e325da0ec25a56158f4a47acf6f594548a72384
|
/users/migrations/0005_profile_stripe_customer_id.py
|
97a12f3ccd87c340090928f9137cccccc4257501
|
[
"MIT"
] |
permissive
|
drewvpham/xclude.com
|
5102a921721c508552648ee03f5a8e1b0bafb6e8
|
103e89e2326c4c6fbfab819c43bc4e4634913bc9
|
refs/heads/master
| 2022-12-16T06:36:00.631404
| 2019-12-29T20:35:16
| 2019-12-29T20:35:16
| 222,317,889
| 0
| 0
|
MIT
| 2022-12-07T23:54:06
| 2019-11-17T21:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
# Generated by Django 2.2.7 on 2019-12-24 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_profile_one_click_purchasing'),
]
operations = [
migrations.AddField(
model_name='profile',
name='stripe_customer_id',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
[
"drewvpham@gmail.com"
] |
drewvpham@gmail.com
|
2ac42e0a73d94a3ea63cdde82603cce7f4879b66
|
b697b98db859c061c1174837deee1d6fc47d115e
|
/examples/spot/futures/futures_loan_interest_history.py
|
d11b8ced29894bbc52ba7c6b24d2585c250be296
|
[
"MIT"
] |
permissive
|
leozaragoza/binance-connector-python
|
7e684d6e68ff7d580b7e3fa83f952540a79b1120
|
3311d102c9e788e3d71047f0af103c00d1ae2162
|
refs/heads/master
| 2023-07-15T12:27:50.041388
| 2021-08-22T17:08:38
| 2021-08-22T17:08:38
| 396,354,910
| 3
| 0
|
MIT
| 2021-08-22T17:08:38
| 2021-08-15T13:12:41
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
#!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
client = Client(key, secret)
logging.info(client.futures_loan_interest_history())
|
[
"liang.shi@protonmail.com"
] |
liang.shi@protonmail.com
|
7d31daeee7daa90b1dc6e4d1fb65142ca9d46562
|
05c6023c8bc3a495d27ea8028a236aefc0324e31
|
/src/utils/db/matches/DBMatchesHandler.py
|
f415677526b5247e8b46fc975f7abbdcb5fc5c31
|
[] |
no_license
|
huyleminh/network-socket-livescore
|
c3213c22c38a225f0f8da46a8cd8a481afcde8f2
|
abd347dac645aa037149fdb03285db4b71d00597
|
refs/heads/master
| 2023-04-19T05:38:58.370316
| 2021-05-09T02:13:27
| 2021-05-09T02:13:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,476
|
py
|
import json
import shortuuid as shortId
import sys
from pathlib import Path
pathfile = Path(__file__).resolve()
dbRoot = pathfile.parents[1]
sys.path.append(str(dbRoot))
from details.DBDetailsHandler import DBDetailsHandler
class DBMatchesHandler:
@staticmethod
def getAllMatches():
try:
fMatches = open("database/Matches.json")
matches = fMatches.read()
fMatches.close()
except:
return { "status": 500 }
matchesJSON = json.loads(matches)
return { "status": 200, "data": matchesJSON }
@staticmethod
def getMatchById(idMatch):
if idMatch == "":
return { "status": 404 }
res = DBMatchesHandler.getAllMatches()
if res["status"] == 500:
return { "status": 500 }
if res["status"] == 200:
dataMatches = res["data"]
for i in range(0, len(dataMatches)):
if dataMatches[i]["idMatch"] == idMatch:
matchDetailRes = DBDetailsHandler.getDetailsById(dataMatches[i]["details"], dataMatches[i]["idMatch"])
if matchDetailRes["status"] == 200:
matchDetail = matchDetailRes["data"]
return {
"status": 200,
"data": {
"match": dataMatches[i],
"details": matchDetail
}
}
elif matchDetailRes["status"] == 404:
return {
"status": 200,
"data": {
"match": dataMatches[i],
"details": {}
}
}
return { "status": 404 }
@staticmethod
def writeAllMatches(matches):
with open("database/Matches.json", "w") as writeFile:
json.dumps(matches, writeFile)
writeFile.close()
@staticmethod
def createNewMatch(matchInfo):
""" matchInfo: dict type
- home and away: required
- status: FT, time (minutes), HT, Pospt, time (begin time: date type), default is current date
- homeScore and awayScore: default is ?
"""
if not isinstance(matchInfo, dict):
return { "status": 400, "text": "Invalid type" }
if "home" not in matchInfo or "away" not in matchInfo:
return { "status": 400, "text": "Missing attribute" }
newMatch = {
"idMatch": shortId.random(length=10),
"home": matchInfo["home"],
"away": matchInfo["away"],
"status": matchInfo["status"],
"homeScore": matchInfo["homeScore"],
"awayScore": matchInfo["awayScore"],
"details": shortId.random(length=10)
}
# Create new details for this match:
newDetail = { "idDetails": newMatch["details"], "idMatch": newMatch["idMatch"] }
# Insert to database
res = DBMatchesHandler.getAllMatches()
if res["status"] == 500:
return { "status": 500, "text": "Internal error"}
if res["status"] == 200:
dataMatches = res["data"]
dataMatches.append(newMatch)
DBMatchesHandler.writeAllMatches(dataMatches)
return { "status": 201, "text": "Create new ok"}
|
[
"leminhhuy.hcmus@gmail.com"
] |
leminhhuy.hcmus@gmail.com
|
86627084d51566bc70cecda01854f3e0851bcd6f
|
5be853e4bfe6b95c4e311af32e0c5cbf20218adc
|
/src/Kernel/FunctionsForLists.py
|
2c64500f279df13f20b5591fe6b9a060267e8d54
|
[] |
no_license
|
SebastianDapra/KernelSimulator
|
ff89dfd40b91c6470ce7f654218ab33df8f753e9
|
ff14b82a2be30f84301efbba559d1bbd9883195d
|
refs/heads/master
| 2021-01-18T14:14:02.277544
| 2015-12-10T01:54:00
| 2015-12-10T01:54:00
| 42,142,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
class FunctionsForLists:
@staticmethod
def filterList(functionForFilter,listF):
return list(filter(functionForFilter, listF))
@staticmethod
def mapList(function,listM):
return list(map(function,listM))
@staticmethod
def sum(function,listM):
return sum(FunctionsForLists.mapList(function,listM))
@staticmethod
def findFirst(functionForFilter,listF):
return FunctionsForLists.filterList(functionForFilter, listF)[0]
@staticmethod
def exists(functionForFilter,listF):
return FunctionsForLists.filterList(functionForFilter, listF).__len__() > 0
|
[
"luciano.federico.olmedo@gmail.com"
] |
luciano.federico.olmedo@gmail.com
|
4d16d2aff65c43ef586654078b8ea7389ab8be10
|
72eeea6b1d45faab32431d48bc2ab814724ed5d3
|
/model/modeling/necks/__init__.py
|
10ae8fdbda47bc933c0b86d13b98870e01caa66d
|
[] |
no_license
|
wenjunyoung/TF_SAST_Paddle_OCR
|
15ba58be3b5aefefbbcda5e4dd02afa36aa5fbcf
|
60f94befe12cec6c6de72d4107bda7b109abdc6a
|
refs/heads/main
| 2023-07-24T14:47:01.210211
| 2021-09-01T07:02:18
| 2021-09-01T07:02:18
| 397,501,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['build_neck']
def build_neck(config):
# from .db_fpn import DBFPN
# from .east_fpn import EASTFPN
# from .sast_fpn import SASTFPN
from .sast_fpn import tf_SASTFPN
# from .rnn import SequenceEncoder
# from .pg_fpn import PGFPN
support_dict = ['DBFPN', 'EASTFPN', 'SASTFPN', 'tf_SASTFPN', 'SequenceEncoder', 'PGFPN']
module_name = config.pop('name')
assert module_name in support_dict, Exception('neck only support {}'.format(
support_dict))
module_class = eval(module_name)(**config)
return module_class
|
[
"i_yangwenjun@cvte.com"
] |
i_yangwenjun@cvte.com
|
eba3614127140f9ea0fe64dd042430decb11b8c9
|
e4928b77dfc7c50fc2ba23a12a2c9201d08f1f67
|
/Chapter_Problems/Chapter4/4_1.py
|
84dee5fbe800aab6ad5af818625ce6eb12e32f22
|
[] |
no_license
|
ishwarsaini10/Course-Work-Problems
|
02363624b5e43c629724751594fb180b05579aaf
|
fb4afc22319f7ebcd756c279022fdef2279571f9
|
refs/heads/main
| 2023-02-01T00:48:42.389969
| 2020-12-17T13:27:36
| 2020-12-17T13:27:36
| 309,957,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
list a []
list :wq
|
[
"noreply@github.com"
] |
noreply@github.com
|
49df5a742852121ed1640fd82015e88245e9910f
|
98430ad712b7855b3ab9cdab1b31055d7f607d8e
|
/Airline Sentiment Analysis.py
|
0ac57428aec69e3209e8ed7f509c27cfdfdd9541
|
[] |
no_license
|
adobbins1/US-Airline-Twitter-Sentiment-Analysis
|
cb6e7dc1709386a13b67c1b4273937003641dda9
|
db3908dd9d5c9f6f4e5763d7c2d57bed7228f0e0
|
refs/heads/master
| 2022-11-28T04:29:49.735077
| 2020-08-05T21:26:56
| 2020-08-05T21:26:56
| 272,076,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,648
|
py
|
# Austin Dobbins
# DSC 680
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
# Importing Airline Tweets Data Set
df = pd.read_csv(r'C:\Users\austi\OneDrive\Desktop\Tweets.csv')
print(df.head())
# Converting Data Type of Date Features
df['tweet_created'] = pd.to_datetime(df['tweet_created'])
df['date_created'] = df['tweet_created'].dt.date
# Creating Data Frame Containing the Counts of Each Negative, Neutral, and Positive Tweet For Each Airline
# Sorted by Date
d = df.groupby(['date_created', 'airline'])
d = d.airline_sentiment.value_counts()
d.unstack()
print(d)
# Printing the number of Negative, Neutral, and Positive Tweet
print(df.airline_sentiment.value_counts())
# Plotting the Percentage of Tweets for Each Airline
df.airline.value_counts().plot(kind='pie', autopct='%1.0f%%')
plt.title('Percentage of Tweets for Each Airline')
plt.show()
# Plotting Percentage of Positive, Negative, and Neutral Comments
df.airline_sentiment.value_counts().plot(kind='pie', autopct='%1.0f%%', colors=['red', 'yellow', 'green'])
plt.title('Percentage of Positive, Negative, and Neutral Comments')
plt.ylabel('Airline Sentiment')
plt.show()
# Count of Negative Reasons
print(df.negativereason.value_counts())
# Plotting Percentage of Reasons for Negative Comments
df.negativereason.value_counts().plot(kind='pie', autopct='%1.0f%%')
plt.title('Percentage of Reasons for Negative Comments')
plt.ylabel('Negative Comment Reason')
plt.show()
# Plotting Counts of Positive, Neutral, and Negative Comments for Each Airline
airlinesentiment = df.groupby(['airline', 'airline_sentiment']).airline_sentiment.count().unstack()
airlinesentiment.plot(kind='bar')
plt.title('Counts of Positive, Neutral, and Negative Comments for Each Airline')
plt.xlabel('Airline')
plt.show()
# Plotting Confidence Level for Positive, Neutral, and Negative Tweets
sns.barplot(x= 'airline_sentiment', y = 'airline_sentiment_confidence', data=df)
plt.title('Confidence Level for Positive, Neutral, and Negative Tweets')
plt.xlabel('Airline Sentiment')
plt.ylabel('Airline Sentiment Confidence')
plt.show()
# Removing Unneeded Characters: 'RT' '@'
words = ' '.join(df['text'])
cleanedwords = " ".join([word for word in words.split()
if 'http' not in word
and not word.startswith('@')
and word != 'RT'
])
# Calculating Frequency of Words In Tweets
def freq(str):
str = str.split()
str2 = []
for i in str:
if i not in str2:
str2.append(i)
for i in range(0, len(str2)):
if str.count(str2[i]) > 50:
print('Frequency of', str2[i], 'is :', str.count(str2[i]))
# print(freq(cleanedwords))
# Cleaning the Dataset for Modeling
# Dividing Dataset into Features and Labels
features = df.iloc[:, 10].values
labels = df.iloc[:, 1].values
processed_features = []
for sentence in range(0, len(features)):
# Remove special characters
processed_feature = re.sub(r'\W', ' ', str(features[sentence]))
# remove single characters
processed_feature= re.sub(r'\s+[a-zA-Z]\s+', ' ', processed_feature)
# Remove single characters from the start
processed_feature = re.sub(r'\^[a-zA-Z]\s+', ' ', processed_feature)
# Changing multiple spaces to single space
processed_feature = re.sub(r'\s+', ' ', processed_feature, flags=re.I)
# Removing prefixed 'b'
processed_feature = re.sub(r'^b\s+', '', processed_feature)
# Converting to Lowercase
processed_feature = processed_feature.lower()
processed_features.append(processed_feature)
# Creating "Bag of Words" using the 2500 Most Frequently Occurring Words
vectorizer = TfidfVectorizer (max_features=2500, min_df=7, max_df=0.8, stop_words=stopwords.words('english'))
processed_features = vectorizer.fit_transform(processed_features).toarray()
# Starting Model Creation
# Splitting Data into Training and Testing Sets
x_train, x_test, y_train, y_test = train_test_split(processed_features, labels, test_size=0.2, random_state=0)
# Random Forest Classifier
textclassifier = RandomForestClassifier(n_estimators=200)
textclassifier.fit(x_train, y_train)
# Random Forest Prediction
predictions = textclassifier.predict(x_test)
# Random Forest Accuracy Metrics
print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
print(accuracy_score(y_test, predictions))
# SVM Classifier
textclassifier2 = SVC(gamma='auto')
textclassifier2.fit(x_train, y_train)
# SVM Prediction
predictions2 = textclassifier2.predict(x_test)
# SVM Accuracy Metrics
print(confusion_matrix(y_test, predictions2))
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
# Logistic Regression Model
model = LogisticRegression()
model.fit(x_train, y_train)
# Logistic Regression Prediction
predictions3 = model.predict(x_test)
# Logistic Regression Accuracy Metrics
print(confusion_matrix(y_test, predictions3))
print(classification_report(y_test, predictions3))
print(accuracy_score(y_test, predictions3))
|
[
"noreply@github.com"
] |
noreply@github.com
|
bfe55bf5237d1b834b0adcbbcf20d84565aa24f1
|
f87ce51b358caf2497fd0f6d0beb2e18ff13fac5
|
/TheNCollection/userlogin/forms.py
|
5cd0ce0cfe12c6104a68c730d572789dbf2d13ba
|
[] |
no_license
|
naerae100/Droid-
|
035991b151ff0c40bc26668ca03e24d431f1c2c8
|
9f3a9487774b16ab632c5f1a9da7e3aa63617c00
|
refs/heads/main
| 2023-07-31T02:07:40.025592
| 2021-09-30T17:21:59
| 2021-09-30T17:21:59
| 397,072,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
from django.forms import ModelForm
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class CreateUserForm(UserCreationForm):
class Meta:
model= User
fields = ['username', 'email', 'password1', 'password2']
class ProfileForm(ModelForm):
class Meta:
model = Profile
fields = "__all__"
exclude = ['user', 'username', 'email']
|
[
"naerae100@gmail.com"
] |
naerae100@gmail.com
|
b7b8bb8d391d2bf9c02e26c1ae9e2ca3a0ce590c
|
62ee285eb838551a92a9a3a3d6c39892a4eeba8b
|
/user.py
|
10686f1e0efce5a46a80870eb896d9fb0660fd6a
|
[] |
no_license
|
masaki24k/masakiogawa
|
f1967e6639ef9ca60c704398617588c23ab2245b
|
39e64439eb8a5bce1b389f7310c0857c55193414
|
refs/heads/main
| 2023-03-26T17:56:47.797603
| 2021-03-29T13:48:20
| 2021-03-29T13:48:20
| 334,306,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
class User(object):
def __init__(self, id, username, email, password):
self.id = id
self.username = username
self.email = email
self.password = password
|
[
"noreply@github.com"
] |
noreply@github.com
|
4dc8c53bfc1378aebcab4560c9d128583228ac0f
|
a50b9dfd5c3a703f0635350f93d5c1af62a6275c
|
/venv/bin/pip3
|
f9629ddc4ce228f5b980ff2c62da744da033cd74
|
[] |
no_license
|
daniloaugusto0212/EstudoPython
|
8c37475f62adad9a77c7df1d5decd11136014b5c
|
2c3e4b523848cca946fc4fcc0946f3f45cd1f4ac
|
refs/heads/master
| 2021-06-27T12:52:07.765054
| 2021-03-09T23:53:33
| 2021-03-09T23:53:33
| 222,791,817
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
#!/home/o1234/PycharmProjects/novo/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"daniloaugusto0212@hotmail.com"
] |
daniloaugusto0212@hotmail.com
|
|
c3fbe89b0c3818f26b5ad9deb4d5461a71acc106
|
97e306f5f0d4f87f762884522e1bb2a98b114ed6
|
/leetcode/1004/solution.py
|
daf59bdb6fea4cf0ba8ab14b7d8d49e5898cf58a
|
[] |
no_license
|
yibei8811/algorithm
|
a81bd73c361a3ffc0c7c4f196e44fa6bda5d1d20
|
6b74260f81d67fc58f9e656afbe4eb7363c4901b
|
refs/heads/master
| 2023-04-19T10:28:00.114018
| 2021-05-01T12:11:27
| 2021-05-01T12:11:27
| 323,281,538
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
from typing import List
class Solution:
# 如果 K = 3,题目可以转换为求每5个0中间的长度
def longestOnes(self, A: List[int], K: int) -> int:
# if sum(A) + K >= len(A):
# return len(A)
A.insert(0,0)
A.append(0)
n = len(A)
k = K
p = 0
q = 1
result = 0
list0_index = []
for i in range(1, n):
if A[i] == 0:
list0_index.append(i)
if k == 0:
q = i
result = max(result, q - p - 1)
p = list0_index.pop(0)
else:
k -= 1
return max(result, i - p - 1)
# right - left 路径上0的个数 > k 的时候 right++, left++
# right - left 路径上0的个数 <= k right++,left不变
# right++ 窗口才能变大, 则需要A[left]是0的时候, count--
def longestOnes2(self, A: List[int], K: int) -> int:
left,right,count = 0,0,0
for right in range(len(A)):
if A[right] == 0:
count += 1
if count > K:
if A[left] == 0:
count -= 1
left += 1
return right - left + 1
print(Solution().longestOnes2([1,1,1,0,0,0,1,1,1,1,0],2))
print(Solution().longestOnes2([0,0,1,1,0,0,1,1,1,0,1,1,0,0,0,1,1,1,1],3))
print(Solution().longestOnes2([0,0,0,1],4))
|
[
"yibei8811@gmail.com"
] |
yibei8811@gmail.com
|
9c86ad961ad9aa88f17595d10f0dc2850ba5c90f
|
92e3652a7c0bc754a584a2fcde34d9b2f5c486b5
|
/py3file/downloadMp3.py
|
988de0e499a71d418391fda0ba24636aebe36baf
|
[] |
no_license
|
scottph/phlib
|
57146be6ed7e22905e903034566747756584b042
|
537d15518cc342e476cc94a6190d78cdef2aa55b
|
refs/heads/master
| 2016-09-06T18:42:43.621587
| 2012-01-16T08:53:52
| 2012-01-16T08:53:52
| 3,189,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
用法:
先将要下载的歌曲写在一个文本文件中,如song.txt,每首歌占一行.
然后运行:
python getmp3.py song.txt
下载目录为当前目录下的"Music_"
代码: [ 下载 ] [ 隐藏 ] [ 选择 ] [ 收缩 ]
代码: [ 下载 ] [ 显示 ]
使用 python 语法高亮
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
import os
import re
import urllib2
cwd = os.getcwd()
lc = sys.getfilesystemencoding()
downdir = cwd+os.sep+'Music_'
print '\n下载目录为:',downdir,'\n'
if not os.path.isdir(downdir):
print '下载目录不存在,正在创建目录:',downdir
os.mkdir(downdir)
if os.path.isfile(sys.argv[1]):
list_file = sys.argv[1]
else:
list_file = cwd + os.sep + sys.argv[1]
try:
f = file(list_file,'r')
except IOError:
print '歌典列表打开失败,请检查文件是否存在!'
sys.exit()
for eachLine in f:
song = eachLine.strip()
if not song:
continue
mp3file = downdir + os.sep + song + '.mp3'
if os.path.isfile(mp3file):
print '%s.mp3已经存在,转到下一首\n' % song
continue
url="http://box.zhangmen.baidu.com/x?op=12&count=1&title=%s$$" % urllib2.quote(song.decode(lc).encode('gbk'))
xmlf = urllib2.urlopen(url)
txt = xmlf.read()
rex1 = u'(<encode>)(http://.+?/.+?\..+?)(</encode>)'
rex2 = u'(<decode>)(.+?\..+?)(</decode>)'
l1 = re.findall(rex1,txt)
l2 = re.findall(rex2,txt)
url_list = []
for i in range(len(l1)):
temp_list = re.split('/',l1[i][1])
temp_list.pop()
temp_list.append(l2[i][1])
down_url = '/'.join(temp_list)
url_list.append(down_url)
for i in range(len(url_list)):
extname = url_list[i].split('.')[-1] #跳过非MP3的类型
if extname.upper() == 'MP3':
print '\n正在下载歌曲:%s...\n' % song
cmd = 'wget %s -c -t 3 -O "%s"' % (url_list[i],downdir+os.sep+song+'.mp3')
os.system(cmd)
#multGet.MyHttpGet(url_list[i],connections = 10)
if os.path.getsize(mp3file) < 500000L: #小于500K的文件将被删除,并重新下载
print '\n文件过小,可能是目标网站有限制,将尝试下一个链接\n'
os.remove(mp3file)
else:
print '《%s》下载完毕!' % song
break
print '全部下载完毕!'
|
[
"scottph@163.com"
] |
scottph@163.com
|
f121d704f462cbfcf6d8989a237fa08cedb0d211
|
e6a95ea97f285a992a8c39e1ec7dead6eec2e119
|
/src/Services/Singleton.py
|
7e88a2ff972dc150587ae7081796a3977bedd2fd
|
[
"MIT"
] |
permissive
|
xclemence/pykube-switch
|
d132cfdd4bd91c30f5831824214b0362c49ae5cb
|
4b7ac4565cc15469c2109e1a400d78e1d2b14404
|
refs/heads/master
| 2022-12-28T00:39:18.692977
| 2020-10-15T19:48:20
| 2020-10-15T19:48:20
| 300,056,431
| 1
| 0
|
MIT
| 2020-10-14T20:48:22
| 2020-09-30T20:38:38
|
QML
|
UTF-8
|
Python
| false
| false
| 337
|
py
|
from PySide2.QtCore import QObject
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class QObjectSingleton(type(QObject), Singleton):
pass
|
[
"xavier.clemence@gmail.com"
] |
xavier.clemence@gmail.com
|
2c402ba9801ca89d32a15c68cd22e69faf89db6c
|
ae69ca518ba7069268be44e84d48405bf369820b
|
/py/tests/swagger_client/models/tx.py
|
5e59671f321ee5ba3703478f22bea6bc99271d8a
|
[
"ISC"
] |
permissive
|
kxzy1990/epoch
|
3c92920ac0986c5096a4867e356b6d508084ae57
|
d40f331de8ef937872a7cc296ae72e5c0d0f98fe
|
refs/heads/master
| 2021-09-05T22:13:21.195641
| 2018-01-30T20:58:47
| 2018-01-30T20:58:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,794
|
py
|
# coding: utf-8
"""
Aeternity Epoch
This is the [Aeternity](https://www.aeternity.com/) Epoch API.
OpenAPI spec version: 1.0.0
Contact: apiteam@aeternity.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Tx(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'tx': 'str'
}
attribute_map = {
'tx': 'tx'
}
def __init__(self, tx=None):
"""
Tx - a model defined in Swagger
"""
self._tx = None
if tx is not None:
self.tx = tx
@property
def tx(self):
"""
Gets the tx of this Tx.
:return: The tx of this Tx.
:rtype: str
"""
return self._tx
@tx.setter
def tx(self, tx):
"""
Sets the tx of this Tx.
:param tx: The tx of this Tx.
:type: str
"""
self._tx = tx
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Tx):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"dimitar.p.ivanov@gmail.com"
] |
dimitar.p.ivanov@gmail.com
|
92f9238ad3092c97ec4b095701f67d1c55cfd079
|
5d302c38acd02d5af4ad7c8cfe244200f8e8f877
|
/String/1408. String Matching in an Array(Easy).py
|
adf72b4a7fde98423f8ff4a18cf2a13c73a92e79
|
[] |
no_license
|
nerohuang/LeetCode
|
2d5214a2938dc06600eb1afd21686044fe5b6db0
|
f273c655f37da643a605cc5bebcda6660e702445
|
refs/heads/master
| 2023-06-05T00:08:41.312534
| 2021-06-21T01:03:40
| 2021-06-21T01:03:40
| 230,164,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
class Solution:
def stringMatching(self, words: List[str]) -> List[str]:
store = set();
for i in range(len(words)):
for j in range(len(words)):
if len(words[i]) >= len(words[j]) and words[i] != words[j]:
if words[i].find(words[j]) != -1:
store.add(words[j])
return list(store)
#class Solution:
# def stringMatching(self, words: List[str]) -> List[str]:
# res = []
# words.sort(key=len)
# for i, word in enumerate(words):
# for k in range(i+1, len(words)):
# if word in words[k]:
# res.append(word)
# break
#
# return res
|
[
"huangxingyu00@gmail.com"
] |
huangxingyu00@gmail.com
|
d6309335f610181b9f302cef7a872a2c304a842f
|
07392a13541ea574a9616f38977146bb6032cdb4
|
/board/views.py
|
c4ad0c2a65423b5f8add52e0ad13e81093f98053
|
[] |
no_license
|
lutae2000/mysite3
|
9ae2205d3ecab66cf2734620a28da8132ec5979d
|
61670b82d657b9758aa2971f35439174675fd2c2
|
refs/heads/master
| 2020-03-22T09:02:16.407293
| 2018-07-05T07:21:15
| 2018-07-05T07:21:15
| 139,809,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
# Create your views here.
from board.models import Board
def list(request):
board_list = Board.objects.all().order_by('-regdate')
context = {'board_list':board_list}
return render(request, 'board/list.html', context)
#user
#count
def write(request):
return render(request, 'board/write.html')
def modify(request):
content = Board.objects.filter(id=request.GET['id'])
context = {'context',content}
return render(request, 'board/modify.html',context)
def add(request):
board = Board()
board.title = request.POST['title']
board.content = request.POST['content']
board.save()
return HttpResponseRedirect('/board/list')
def view(request):
content = Board.objects.filter(id=request.GET['id'])
context = {'content':content}
print(context)
return render(request, 'board/view.html',context)
|
[
"lutae2000@gmail.com"
] |
lutae2000@gmail.com
|
70dd220917460238b9c07da05f31e6a8feca9f20
|
a97658ae4119fa4753a25620b1c546a0619065a2
|
/helloworld/helloworld_project/settings.py
|
9fc4cf914eca32e3c23b8006c6371593e1152c58
|
[] |
no_license
|
dhuert27/DjangoForBeginners_Exercises
|
b21d7ac2be9298287f4b4e40579f0fae30abf3a2
|
42e453217a9df03237e6031a70ec1418016c7fbf
|
refs/heads/main
| 2023-04-13T16:48:44.583218
| 2021-04-22T03:24:07
| 2021-04-22T03:24:07
| 356,119,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,152
|
py
|
"""
Django settings for helloworld_project project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-sqmx3*48xixz9o#0w#1=$q@9u_%%*#mr08o*=40^+uqb2yr3x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pages.apps.PagesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'helloworld_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'helloworld_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"daniel.huert2000@gmail.com"
] |
daniel.huert2000@gmail.com
|
5994a372ecac4751d01b2d63d8cfe85ccff050c0
|
e9331ebf2d83184924ade1c9e4849c3ba0285bf2
|
/mutiple-devices/vgg16/8devices/node.py
|
8e3356581b54e55c63cec2cc97e4e45d94968fb4
|
[
"Apache-2.0"
] |
permissive
|
parallel-ml/asplos2018-workshop
|
fcf7c13ff6723ac91f156377e27fc5acaf85367a
|
a05a63c17f43926d6411ada625b191db1abbee67
|
refs/heads/master
| 2022-12-02T02:12:34.450130
| 2022-11-29T16:06:36
| 2022-11-29T16:06:36
| 120,949,505
| 15
| 4
|
Apache-2.0
| 2022-11-29T16:06:37
| 2018-02-09T19:58:11
|
Python
|
UTF-8
|
Python
| false
| false
| 11,323
|
py
|
"""
This module shows the node for 8 nodes distributed system setup.
"""
import argparse
import os
import time
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from collections import deque
from multiprocessing import Queue
from threading import Thread, Lock
import avro.ipc as ipc
import avro.protocol as protocol
import avro.schema as schema
import numpy as np
import tensorflow as tf
import yaml
import model as ml
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# read data packet format.
PROTOCOL = protocol.parse(open('resource/image.avpr').read())
class Node(object):
"""
Singleton Node class. It will store data if necessary, record next layer
response time, send data packet to next layer and store the loaded model
in memory without reloading.
Attributes:
ip: A dictionary contains Queue of ip addresses for different models type.
model: Loaded models associated to a node.
graph: Default graph used by Tensorflow.
debug: Flag for debugging.
lock: Threading lock for safe usage of this class. The lock is used
for safe models forwarding. If the models is processing input and
it gets request from other devices, the new request will wait
until the previous models forwarding finishes.
name: Model name.
total: Total time of getting frames.
count: Total number of frames gets back.
input: Store the input for last fully connected layer, it acts as a buffer
that it will kick out extra data and store unused data.
"""
instance = None
def __init__(self):
self.ip = dict()
self.model = None
self.graph = tf.get_default_graph()
self.debug = False
self.lock = Lock()
self.name = 'unknown'
self.total = 0
self.count = 1
self.input = deque()
def log(self, step, data=''):
"""
Log function for debug. Turn the flag on to show each step result.
Args:
step: Each step names.
data: Data format or size.
"""
if self.debug:
print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
for k in range(0, len(step), 68):
print '+{:^68.68}+'.format(step[k:k + 68])
for k in range(0, len(data), 68):
print '+{:^68.68}+'.format(data[k:k + 68])
print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
print
def acquire_lock(self):
self.lock.acquire()
def release_lock(self):
self.lock.release()
def timer(self, interval):
self.total += interval
print '{:s}: {:.3f}'.format(self.name, self.total / self.count)
self.count += 1
@classmethod
def create(cls):
if cls.instance is None:
cls.instance = cls()
return cls.instance
class Responder(ipc.Responder):
""" Responder called by handler when got request. """
def __init__(self):
ipc.Responder.__init__(self, PROTOCOL)
def invoke(self, msg, req):
"""
This functino is invoked by do_POST to handle the request. Invoke handles
the request and get response for the request. This is the key of each node.
All models forwarding and output redirect are done here. Because the invoke
method of initializer only needs to receive the data packet, it does not do
anything in the function and return None.
Because this is a node class, it has all necessary code here for handling
different inputs. Basically the logic is load model as the previous layer
request and run model inference. And it will send the current layer output
to next layer. We write different model's code all here for the sake of
convenience. In order to avoid long waiting time of model reloading, we
make sure each node is assigned to a unique job each time, so it does not
need to reload the model.
Args:
msg: Meta data.
req: Contains data packet.
Returns:
None: It just acts as confirmation for sender.
Raises:
AvroException: if the data does not have correct syntac defined in Schema
"""
node = Node.create()
node.acquire_lock()
if msg.name == 'forward':
try:
with node.graph.as_default():
bytestr = req['input']
if req['next'] == 'block1':
node.log('block1 gets data')
X = np.fromstring(bytestr, np.uint8).reshape(224, 224, 3)
node.model = ml.block1() if node.model is None else node.model
output = node.model.predict(np.array([X]))
node.log('finish block1 forward')
Thread(target=self.send, args=(output, 'block234', req['tag'])).start()
elif req['next'] == 'block234':
node.log('block234 gets data')
X = np.fromstring(bytestr, np.float32).reshape(112, 112, 64)
node.model = ml.block234() if node.model is None else node.model
output = node.model.predict(np.array([X]))
node.log('finish block234 forward')
Thread(target=self.send, args=(output, 'block5', req['tag'])).start()
elif req['next'] == 'block5':
node.log('block5 gets data')
X = np.fromstring(bytestr, np.float32).reshape(14, 14, 512)
node.model = ml.block5() if node.model is None else node.model
output = node.model.predict(np.array([X]))
node.log('finish block5 forward')
for _ in range(2):
Thread(target=self.send, args=(output, 'fc1', req['tag'])).start()
elif req['next'] == 'fc1':
node.log('fc1 gets data')
X = np.fromstring(bytestr, np.float32).reshape(25088)
node.model = ml.fc1() if node.model is None else node.model
output = node.model.predict(np.array([X]))
node.log('finish block6 forward')
Thread(target=self.send, args=(output, 'fc2', req['tag'])).start()
elif req['next'] == 'fc2':
node.log('fc2 gets data')
X = np.fromstring(bytestr, np.float32).reshape(2048)
node.input.append(X)
node.log('input size', str(len(node.input)))
# if the size is not enough, store in the queue and return.
if len(node.input) < 2:
node.release_lock()
return
# too many data packets, then drop some data.
while len(node.input) > 2:
node.input.popleft()
X = np.concatenate(node.input)
node.model = ml.fc2() if node.model is None else node.model
output = node.model.predict(np.array([X]))
node.log('finish model inference')
Thread(target=self.send, args=(output, 'initial', req['tag'])).start()
node.release_lock()
return
except Exception, e:
node.log('Error', e.message)
else:
raise schema.AvroException('unexpected message:', msg.getname())
def send(self, X, name, tag):
"""
Send data to other devices. The data packet contains data and models name.
Ip address of next device pop from Queue of a ip list.
Args:
X: numpy array
name: next device models name
tag: mark the current layer label
"""
node = Node.create()
queue = node.ip[name]
address = queue.get()
# initializer use port 9999 to receive data
port = 9999 if name == 'initial' else 12345
client = ipc.HTTPTransceiver(address, port)
requestor = ipc.Requestor(PROTOCOL, client)
node.name = name
data = dict()
data['input'] = X.tostring()
data['next'] = name
data['tag'] = tag
node.log('finish assembly')
start = time.time()
requestor.request('forward', data)
end = time.time()
node.timer(end - start)
node.log('node gets request back')
client.close()
queue.put(address)
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
"""
do_POST is automatically called by ThreadedHTTPServer. It creates a new
responder for each request. The responder generates response and write
response to data sent back.
"""
self.responder = Responder()
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
""" Handle requests in separate thread. """
def main(cmd):
node = Node.create()
node.debug = cmd.debug
# read ip resources from config file
with open('resource/ip') as file:
address = yaml.safe_load(file)
node.ip['block234'] = Queue()
node.ip['block5'] = Queue()
node.ip['fc1'] = Queue()
node.ip['fc2'] = Queue()
node.ip['initial'] = Queue()
address = address['node']
for addr in address['block234']:
if addr == '#':
break
node.ip['block234'].put(addr)
for addr in address['block5']:
if addr == '#':
break
node.ip['block5'].put(addr)
for addr in address['fc1']:
if addr == '#':
break
node.ip['fc1'].put(addr)
for addr in address['fc2']:
if addr == '#':
break
node.ip['fc2'].put(addr)
for addr in address['initial']:
if addr == '#':
break
node.ip['initial'].put(addr)
server = ThreadedHTTPServer(('0.0.0.0', 12345), Handler)
server.allow_reuse_address = True
server.serve_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='set to debug mode')
cmd = parser.parse_args()
main(cmd)
|
[
"caojiashen24@gmail.com"
] |
caojiashen24@gmail.com
|
ba1e1883e6c6bba64621e521b0b4aa974fe7895f
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/isis/isiscsnpstats1qtr.py
|
5e12cd0cc2e3b1aa65da434a7ed773805e4aebed
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 29,126
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IsisCsnpStats1qtr(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.isis.IsisCsnpStats1qtr", "ISIS CSNP Packets")
counter = CounterMeta("fastCsnpPktsRx", CounterCategory.COUNTER, "packets", "Fast CSNP Packets Recevied")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "fastCsnpPktsRxLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "fastCsnpPktsRxCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "fastCsnpPktsRxPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "fastCsnpPktsRxMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "fastCsnpPktsRxMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "fastCsnpPktsRxAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "fastCsnpPktsRxSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "fastCsnpPktsRxBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "fastCsnpPktsRxThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "fastCsnpPktsRxTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "fastCsnpPktsRxTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "fastCsnpPktsRxRate"
meta._counters.append(counter)
counter = CounterMeta("csnpPktsRx", CounterCategory.COUNTER, "packets", "CSNP Packets Recevied")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "csnpPktsRxLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "csnpPktsRxCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "csnpPktsRxPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "csnpPktsRxMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "csnpPktsRxMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "csnpPktsRxAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "csnpPktsRxSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "csnpPktsRxBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "csnpPktsRxThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "csnpPktsRxTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "csnpPktsRxTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "csnpPktsRxRate"
meta._counters.append(counter)
counter = CounterMeta("csnpPktsTx", CounterCategory.COUNTER, "packets", "CSNP Packets Sent")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "csnpPktsTxLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "csnpPktsTxCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "csnpPktsTxPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "csnpPktsTxMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "csnpPktsTxMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "csnpPktsTxAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "csnpPktsTxSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "csnpPktsTxBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "csnpPktsTxThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "csnpPktsTxTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "csnpPktsTxTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "csnpPktsTxRate"
meta._counters.append(counter)
meta.moClassName = "isisIsisCsnpStats1qtr"
meta.rnFormat = "CDisisIsisCsnpStats1qtr"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current ISIS CSNP Packets stats in 1 quarter"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.isis.If")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.isis.IsisCsnpStats")
meta.rnPrefixes = [
('CDisisIsisCsnpStats1qtr', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "csnpPktsRxAvg", "csnpPktsRxAvg", 46114, PropCategory.IMPLICIT_AVG)
prop.label = "CSNP Packets Recevied average value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxAvg", prop)
prop = PropMeta("str", "csnpPktsRxBase", "csnpPktsRxBase", 46109, PropCategory.IMPLICIT_BASELINE)
prop.label = "CSNP Packets Recevied baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxBase", prop)
prop = PropMeta("str", "csnpPktsRxCum", "csnpPktsRxCum", 46110, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "CSNP Packets Recevied cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxCum", prop)
prop = PropMeta("str", "csnpPktsRxLast", "csnpPktsRxLast", 46108, PropCategory.IMPLICIT_LASTREADING)
prop.label = "CSNP Packets Recevied current value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxLast", prop)
prop = PropMeta("str", "csnpPktsRxMax", "csnpPktsRxMax", 46113, PropCategory.IMPLICIT_MAX)
prop.label = "CSNP Packets Recevied maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxMax", prop)
prop = PropMeta("str", "csnpPktsRxMin", "csnpPktsRxMin", 46112, PropCategory.IMPLICIT_MIN)
prop.label = "CSNP Packets Recevied minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxMin", prop)
prop = PropMeta("str", "csnpPktsRxPer", "csnpPktsRxPer", 46111, PropCategory.IMPLICIT_PERIODIC)
prop.label = "CSNP Packets Recevied periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxPer", prop)
prop = PropMeta("str", "csnpPktsRxRate", "csnpPktsRxRate", 46119, PropCategory.IMPLICIT_RATE)
prop.label = "CSNP Packets Recevied rate"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxRate", prop)
prop = PropMeta("str", "csnpPktsRxSpct", "csnpPktsRxSpct", 46115, PropCategory.IMPLICIT_SUSPECT)
prop.label = "CSNP Packets Recevied suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxSpct", prop)
prop = PropMeta("str", "csnpPktsRxThr", "csnpPktsRxThr", 46116, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "CSNP Packets Recevied thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("csnpPktsRxThr", prop)
prop = PropMeta("str", "csnpPktsRxTr", "csnpPktsRxTr", 46118, PropCategory.IMPLICIT_TREND)
prop.label = "CSNP Packets Recevied trend"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxTr", prop)
prop = PropMeta("str", "csnpPktsRxTrBase", "csnpPktsRxTrBase", 46117, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "CSNP Packets Recevied trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsRxTrBase", prop)
prop = PropMeta("str", "csnpPktsTxAvg", "csnpPktsTxAvg", 46135, PropCategory.IMPLICIT_AVG)
prop.label = "CSNP Packets Sent average value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxAvg", prop)
prop = PropMeta("str", "csnpPktsTxBase", "csnpPktsTxBase", 46130, PropCategory.IMPLICIT_BASELINE)
prop.label = "CSNP Packets Sent baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxBase", prop)
prop = PropMeta("str", "csnpPktsTxCum", "csnpPktsTxCum", 46131, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "CSNP Packets Sent cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxCum", prop)
prop = PropMeta("str", "csnpPktsTxLast", "csnpPktsTxLast", 46129, PropCategory.IMPLICIT_LASTREADING)
prop.label = "CSNP Packets Sent current value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxLast", prop)
prop = PropMeta("str", "csnpPktsTxMax", "csnpPktsTxMax", 46134, PropCategory.IMPLICIT_MAX)
prop.label = "CSNP Packets Sent maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxMax", prop)
prop = PropMeta("str", "csnpPktsTxMin", "csnpPktsTxMin", 46133, PropCategory.IMPLICIT_MIN)
prop.label = "CSNP Packets Sent minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxMin", prop)
prop = PropMeta("str", "csnpPktsTxPer", "csnpPktsTxPer", 46132, PropCategory.IMPLICIT_PERIODIC)
prop.label = "CSNP Packets Sent periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxPer", prop)
prop = PropMeta("str", "csnpPktsTxRate", "csnpPktsTxRate", 46140, PropCategory.IMPLICIT_RATE)
prop.label = "CSNP Packets Sent rate"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxRate", prop)
prop = PropMeta("str", "csnpPktsTxSpct", "csnpPktsTxSpct", 46136, PropCategory.IMPLICIT_SUSPECT)
prop.label = "CSNP Packets Sent suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxSpct", prop)
prop = PropMeta("str", "csnpPktsTxThr", "csnpPktsTxThr", 46137, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "CSNP Packets Sent thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("csnpPktsTxThr", prop)
prop = PropMeta("str", "csnpPktsTxTr", "csnpPktsTxTr", 46139, PropCategory.IMPLICIT_TREND)
prop.label = "CSNP Packets Sent trend"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxTr", prop)
prop = PropMeta("str", "csnpPktsTxTrBase", "csnpPktsTxTrBase", 46138, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "CSNP Packets Sent trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("csnpPktsTxTrBase", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "fastCsnpPktsRxAvg", "fastCsnpPktsRxAvg", 46156, PropCategory.IMPLICIT_AVG)
prop.label = "Fast CSNP Packets Recevied average value"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxAvg", prop)
prop = PropMeta("str", "fastCsnpPktsRxBase", "fastCsnpPktsRxBase", 46151, PropCategory.IMPLICIT_BASELINE)
prop.label = "Fast CSNP Packets Recevied baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxBase", prop)
prop = PropMeta("str", "fastCsnpPktsRxCum", "fastCsnpPktsRxCum", 46152, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Fast CSNP Packets Recevied cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxCum", prop)
prop = PropMeta("str", "fastCsnpPktsRxLast", "fastCsnpPktsRxLast", 46150, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Fast CSNP Packets Recevied current value"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxLast", prop)
prop = PropMeta("str", "fastCsnpPktsRxMax", "fastCsnpPktsRxMax", 46155, PropCategory.IMPLICIT_MAX)
prop.label = "Fast CSNP Packets Recevied maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxMax", prop)
prop = PropMeta("str", "fastCsnpPktsRxMin", "fastCsnpPktsRxMin", 46154, PropCategory.IMPLICIT_MIN)
prop.label = "Fast CSNP Packets Recevied minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxMin", prop)
prop = PropMeta("str", "fastCsnpPktsRxPer", "fastCsnpPktsRxPer", 46153, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Fast CSNP Packets Recevied periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxPer", prop)
prop = PropMeta("str", "fastCsnpPktsRxRate", "fastCsnpPktsRxRate", 46161, PropCategory.IMPLICIT_RATE)
prop.label = "Fast CSNP Packets Recevied rate"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxRate", prop)
prop = PropMeta("str", "fastCsnpPktsRxSpct", "fastCsnpPktsRxSpct", 46157, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Fast CSNP Packets Recevied suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxSpct", prop)
prop = PropMeta("str", "fastCsnpPktsRxThr", "fastCsnpPktsRxThr", 46158, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Fast CSNP Packets Recevied thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("fastCsnpPktsRxThr", prop)
prop = PropMeta("str", "fastCsnpPktsRxTr", "fastCsnpPktsRxTr", 46160, PropCategory.IMPLICIT_TREND)
prop.label = "Fast CSNP Packets Recevied trend"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxTr", prop)
prop = PropMeta("str", "fastCsnpPktsRxTrBase", "fastCsnpPktsRxTrBase", 46159, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Fast CSNP Packets Recevied trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("fastCsnpPktsRxTrBase", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
b24094d77418e88f7477458debc1594cdeb2b6fa
|
021ce16e42120246557dfa210bef6e96a34623b6
|
/tornado_sqlalchemy_login/sqla/models.py
|
6769f29cf020d69ba5c5480e1a770a37fb2ae347
|
[
"Apache-2.0"
] |
permissive
|
timkpaine/tornado-sqlalchemy-login
|
f455b95d60d392202b46758ff230259785f7dc19
|
499bc7d79926b79352a3b9abdb864815e9896274
|
refs/heads/main
| 2022-12-01T22:28:06.620106
| 2022-11-22T01:46:21
| 2022-11-22T01:46:21
| 230,801,513
| 1
| 0
|
Apache-2.0
| 2023-09-04T13:35:46
| 2019-12-29T20:46:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,109
|
py
|
import secrets
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
TOKEN_WIDTH = 64
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String(100), nullable=False, unique=True)
password = Column(String(100), nullable=False)
_email = Column("email", String, nullable=False, unique=True)
apikeys = relationship("APIKey", back_populates="user")
admin = Column(Boolean, default=False)
@hybrid_property
def email(self):
return self._email
@email.setter
def email(self, email):
# TODO validate
self._email = email
def __repr__(self):
return "<User(id='{}', username='{}')>".format(self.id, self.username)
def to_dict(self):
ret = {}
for item in ("id", "username", "email"):
ret[item] = getattr(self, item)
return ret
def from_dict(self, d):
raise NotImplementedError()
class APIKey(Base):
__tablename__ = "apikeys"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id", ondelete="cascade"))
user = relationship("User", back_populates="apikeys")
key = Column(
String(100), nullable=False, default=lambda: secrets.token_urlsafe(TOKEN_WIDTH)
)
secret = Column(
String(100), nullable=False, default=lambda: secrets.token_urlsafe(TOKEN_WIDTH)
)
@staticmethod
def generateKey():
return {
"key": secrets.token_urlsafe(TOKEN_WIDTH),
"secret": secrets.token_urlsafe(TOKEN_WIDTH),
}
def __repr__(self):
return "<Key(id='{}', key='{}', secret='***')>".format(self.id, self.key)
def to_dict(self):
ret = {}
for item in ("id", "user_id", "key", "secret"):
ret[item] = getattr(self, item)
return ret
def from_dict(self, d):
raise NotImplementedError()
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
1ecafdd4fbbaadd0e9d02fa7e8c3c083d19fa32e
|
31a39d7cd26856caaccead96d0fa9c7acd469792
|
/python/Tree/test.py
|
73dc011f02df5a8a453038131d4ae11f73806294
|
[] |
no_license
|
lmy269/practices
|
b19fffb9f2aeae76c8941145353f0fd4f848835b
|
29dc5706ffc146c6c87b195fe94d60ae19e17297
|
refs/heads/master
| 2022-11-07T05:36:38.227660
| 2020-06-29T00:48:55
| 2020-06-29T00:48:55
| 271,398,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,510
|
py
|
from typing import List
import collections
from collections import deque
import unittest
class Solution(unittest.TestCase):
def getFolderNames(self, names: List[str]) -> List[str]:
cache = collections.defaultdict(int)
cacheDeque = collections.defaultdict(deque)
output = []
for value in names:
if cache[value] > 0:
while cacheDeque[value] and cacheDeque[value][0] <= cache[value] + 1:
na = cacheDeque[value].popleft()
if na == cache[value] + 1:
cache[value] += 1
output.append(value + f'({cache[value]})')
cache[value] += 1
else:
cache[value] += 1
output.append(value)
if value.endswith(')'):
values = value.split('(')
if len(values) > 1:
suffixNum = values[-1][0:-1]
try:
index = int(suffixNum)
realvalue = value[0:-(len(values[-1])+1)]
cacheDeque[realvalue].append(index)
except ValueError:
continue
return output
def test_case1(self):
self.assertEqual(self.getFolderNames(["kaido","kaido(1)","kaido","kaido(1)"]), ["kaido","kaido(1)","kaido(2)","kaido(1)(1)"])
if __name__ == "__main__":
unittest.main()
|
[
"miliu@microsoft.com"
] |
miliu@microsoft.com
|
3bb9eaa46dd4fe663b19945b36fc8bb4842a88a2
|
9bf13d7bd0b9e48dc31f1fd6d3e14d3402386ee9
|
/lissandra/dto/league.py
|
3c9a09b5bad2c347b6a48747ec03ef22bda04e5c
|
[
"MIT"
] |
permissive
|
Crimack/lissandra
|
04acd52c9b79e4d7946eef1e821bc8ce417a5a62
|
7f43ca70040ce054f335ad3b134e3bd4ee52ff3e
|
refs/heads/master
| 2022-12-05T01:01:15.825441
| 2020-08-31T23:06:06
| 2020-08-31T23:06:06
| 286,707,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
from .common import DtoObject
class LeagueEntryDto(DtoObject):
pass
class LeagueDto(DtoObject):
pass
class LeagueSummonerEntriesDto(DtoObject):
pass
class LeagueEntriesDto(DtoObject):
pass
class ChallengerLeagueListDto(DtoObject):
pass
class GrandmasterLeagueListDto(DtoObject):
pass
class MasterLeagueListDto(DtoObject):
pass
|
[
"cmckee41@qub.ac.uk"
] |
cmckee41@qub.ac.uk
|
71673848a7d58ab5d627f04535e6fa95bb7030d9
|
931a621adbfa598bd8740828a6d30d52e9821304
|
/tours/migrations/0012_auto_20190404_1849.py
|
91877263060bb2346c82fa0884fd459eb0ffc447
|
[] |
no_license
|
PooyaAlamirpour/Zartour
|
d4102d7095b187e1eca1acc8e395bcb54d134e1c
|
f2a59a9e5ce64eee58615cf47d0c1d534c22b864
|
refs/heads/master
| 2020-05-24T17:28:38.456960
| 2019-05-23T01:11:50
| 2019-05-23T01:11:50
| 187,383,468
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-04-04 18:49
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tours', '0011_auto_20190404_1643'),
]
operations = [
migrations.AddField(
model_name='tour',
name='description',
field=ckeditor.fields.RichTextField(default='22'),
preserve_default=False,
),
migrations.AddField(
model_name='tour',
name='documents',
field=ckeditor.fields.RichTextField(default='22'),
preserve_default=False,
),
]
|
[
"P.Alamirpour@gmail.com"
] |
P.Alamirpour@gmail.com
|
0305aa72da943843e6eb123959977d0d066a616e
|
9f044bfa3235a663e733e7a4c1c3476467de25a9
|
/apps/models.py
|
3a2b13933de4cc62924041b72b81be622e947904
|
[] |
no_license
|
richard1230/flask175_Home_Dynamically_Obtain_Carousel_Data
|
60cf85b4834135c2608c6822afae3b1cfe7d1457
|
115d90dbfa22331802277c068aed24bb40f59573
|
refs/heads/master
| 2020-11-30T14:42:10.091896
| 2019-12-27T10:08:41
| 2019-12-27T10:08:41
| 230,420,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
#encoding: utf-8
from exts import db
from datetime import datetime
from datetime import datetime
class BannerModel(db.Model):
__tablename__ = 'banner'
id = db.Column(db.Integer,primary_key=True,autoincrement=True)
name = db.Column(db.String(255),nullable=False)
image_url = db.Column(db.String(255),nullable=False)
link_url = db.Column(db.String(255),nullable=False)
priority = db.Column(db.Integer,default=0)
create_time = db.Column(db.DateTime,default=datetime.now)
#
#
# class BoardModel(db.Model):
# __tablename__ = 'board'
# id = db.Column(db.Integer,primary_key=True,autoincrement=True)
# name = db.Column(db.String(20),nullable=False)
# create_time = db.Column(db.DateTime,default=datetime.now)
|
[
"huyongjinnuaa@163.com"
] |
huyongjinnuaa@163.com
|
8df839776009a272be53927bd7b7708595455244
|
44a0a00037de8ec94fc1b7a47eb85ea160b047cd
|
/Algorithm/Programmers/STACK_QUE/기능개발.py
|
531fe6f98468f60d81107c5950202b3da36f420f
|
[] |
no_license
|
a23822/TIL
|
ad7a7d7aee73a3f788affb90a00ca889b9630d8c
|
47a135ce85a61f01fad6de50b8fd701ebdbcb1b0
|
refs/heads/master
| 2020-04-14T11:16:18.395890
| 2020-03-01T14:42:19
| 2020-03-01T14:42:19
| 163,809,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,511
|
py
|
"""
런타임 에러 30/100
"""
def solution(progresses, speeds):
answer = []
# 각 작업마다 걸리는 시간을 구해 배열 생성
complete = []
index = len(speeds)
for i in range(index):
cDay_mok = (100 - progresses[i])%speeds[i]
if cDay_mok == 0:
cDay = int((100 - progresses[i])/speeds[i])
else:
cDay = int((100 - progresses[i])/speeds[i]) + 1
complete.append(cDay)
print(complete)
# 배포 배열에 하나씩 넣기
temp = 0
for i in range(index):
if i == 0:
answer.append(1)
else:
if complete[i] <= complete[temp]:
answer[temp] += 1 ###
else:
answer.append(1)
temp = i
return answer
"""
통과 ,
"""
def solution2(progresses, speeds):
answer = []
# 각 작업마다 걸리는 시간을 구해 배열 생성
complete = []
index = len(speeds)
for i in range(index):
cDay_mok = (100 - progresses[i])%speeds[i]
if cDay_mok == 0:
cDay = int((100 - progresses[i])/speeds[i])
else:
cDay = int((100 - progresses[i])/speeds[i]) + 1
complete.append(cDay)
# 배포 배열에 하나씩 넣기
temp = 0
for i in range(index):
if i == 0:
answer.append(1)
else:
if complete[i] <= complete[temp]:
answer[-1] += 1 # 이 부분을 잘못 판단했었음.
else:
answer.append(1)
temp = i
return answer
"""
다른사람풀이
ZIP 활용
"""
def solution3(progresses, speeds):
Q=[]
for p, s in zip(progresses, speeds):
if len(Q)==0 or Q[-1][0]<-((p-100)//s):
Q.append([-((p-100)//s),1])
else:
Q[-1][1]+=1
return [q[1] for q in Q]
"""
다른사람풀이
Math ceil 이용
"""
from math import ceil
def solution4(progresses, speeds):
daysLeft = list(map(lambda x: (ceil((100 - progresses[x]) / speeds[x])), range(len(progresses))))
count = 1
retList = []
for i in range(len(daysLeft)):
try:
if daysLeft[i] < daysLeft[i + 1]:
retList.append(count)
count = 1
else:
daysLeft[i + 1] = daysLeft[i]
count += 1
except IndexError:
retList.append(count)
return retList
|
[
"a23842@naver.com"
] |
a23842@naver.com
|
6849e3aa107e6582238894eb5ace39b380d9102b
|
74ddb61b608bf47b1320a3a66a13c0896bff4444
|
/samples/openapi3/client/petstore/python-experimental/petstore_api/models/whale.py
|
f39c28174404f4f3c268c11125743f22f796eebc
|
[
"Apache-2.0"
] |
permissive
|
ShakedH/openapi-generator
|
616f873dc29edf49e44c4685ebb7f46184ce62fd
|
07647b1a310410a28c95f8b4a9661c0ddeaf1db8
|
refs/heads/master
| 2022-09-08T07:40:44.550163
| 2020-05-28T13:43:38
| 2020-05-28T13:43:38
| 267,639,665
| 0
| 0
|
Apache-2.0
| 2020-05-28T16:26:44
| 2020-05-28T16:26:44
| null |
UTF-8
|
Python
| false
| false
| 6,430
|
py
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class Whale(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'class_name': (str,), # noqa: E501
'has_baleen': (bool,), # noqa: E501
'has_teeth': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_name': 'className', # noqa: E501
'has_baleen': 'hasBaleen', # noqa: E501
'has_teeth': 'hasTeeth', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, class_name, _check_type=True, _spec_property_naming=False, _path_to_item=(), _configuration=None, _visited_composed_classes=(), **kwargs): # noqa: E501
"""whale.Whale - a model defined in OpenAPI
Args:
class_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
has_baleen (bool): [optional] # noqa: E501
has_teeth (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_name = class_name
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c85707eeb5aa3aeb170c7db7d126ea2bef1dd8b6
|
771cf512a2875b060faa5e4040aae3a8de654998
|
/python/strictConcaveArrayRecursion.py
|
2cd8e4310ad745583f6689bd230222fb6f39d3c7
|
[] |
no_license
|
anirudhit/ab-projects
|
466e4e5b6fc3ad7ba0d5b2983567a2dc68a7c8df
|
bdf2030197d6e908e0db214d46ab1d674ee59449
|
refs/heads/master
| 2020-07-19T11:50:11.342317
| 2019-11-20T06:35:58
| 2019-11-20T06:35:58
| 206,443,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
def findMaximum(arr, low, high):
# Base Case: Only one element is present in arr[low..high]*/
if low == high:
return arr[low]
# If there are two elements and first is greater then
# the first element is maximum */
if high == low + 1 and arr[low] >= arr[high]:
return arr[low];
# If there are two elements and second is greater then
# the second element is maximum */
if high == low + 1 and arr[low] < arr[high]:
return arr[high]
mid = (low + high)//2 #low + (high - low)/2;*/
# If we reach a point where arr[mid] is greater than both of
# its adjacent elements arr[mid-1] and arr[mid+1], then arr[mid]
# is the maximum element*/
if arr[mid] > arr[mid + 1] and arr[mid] > arr[mid - 1]:
return arr[mid]
# If arr[mid] is greater than the next element and smaller than the previous
# element then maximum lies on left side of mid */
if arr[mid] > arr[mid + 1] and arr[mid] < arr[mid - 1]:
return findMaximum(arr, low, mid-1)
else: # when arr[mid] is greater than arr[mid-1] and smaller than arr[mid+1]
return findMaximum(arr, mid + 1, high)
# Driver program to check above functions */
# arr = [1, 3, 50, 10, 9, 7, 6]
arr = [7, 5, 3, -1, -7, 5, 9, 100]
n = len(arr)
print ("The maximum element is %d"% findMaximum(arr, 0, n-1))
|
[
"anirudhit@outlook.com"
] |
anirudhit@outlook.com
|
3e2af68956ab395d7d6c3ee1a4238c837c4b51cc
|
b2472967910be9c12576f0f97d33bca0576a8667
|
/atcoder-old/2019/0901_abc139/d.py
|
0f4f619ee2e61c90c3c79b0e44b9d3a7b51c02c3
|
[] |
no_license
|
ykmc/contest
|
85c3d1231e553d37d1235e1b0fd2c6c23f06c1e4
|
69a73da70f7f987eb3e85da503ea6da0744544bd
|
refs/heads/master
| 2020-09-01T22:56:10.444803
| 2020-07-14T11:36:43
| 2020-07-14T11:36:43
| 217,307,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# Python3 (3.4.3)
import sys
input = sys.stdin.readline
# -------------------------------------------------------------
# function
# -------------------------------------------------------------
# -------------------------------------------------------------
# main
# -------------------------------------------------------------
N = int(input())
# i % i+1 = i が最善, あまりは 1 〜 N-1
print(N*(N-1)//2)
|
[
"34961813+ykmc@users.noreply.github.com"
] |
34961813+ykmc@users.noreply.github.com
|
4b0bb27aea8f07ee464581db2188e2705e358034
|
35b7b5c95320fd3548c7e929ea7191ff97f0008e
|
/tver.py
|
ffc58e05042ec645f128dcc8c550e4bfe82ff731
|
[] |
no_license
|
gretastd/basik
|
2fcf08dff1c0f6b82e99b9b9439696392e26b8a2
|
a85fd993ce3aa7c15a6772df0d431e89366c7169
|
refs/heads/master
| 2020-09-13T20:21:31.493173
| 2019-11-20T09:26:12
| 2019-11-20T09:26:12
| 222,892,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
def gumar(n):
b=0
while n!=0:
a=n%10
n=int(n/10)
b=b+a
print(b)
def artadryal(n):
b=1
while n!=0:
a=n%10
n=int(n/10)
b=b*a
print(b)
def agicdzax(n):
while n!=0:
a=n%10
b=int(n/10)
b=a*10+b
print(b)
n=0
def dzaxicaj(n):
while n!=0:
a=n%10
b=int(n/10)
b=b*10+a
print(b)
n=0
def erku(n):
while n!=0:
a=n%10
b=int(n/10)
if a==2:
print("true")
else:
print("false")
n=0
def kent(n):
while n!=0:
a=n%10
b=int(n/10)
if a%2!=0:
print("true")
else:
print("false")
n=0
def arjeq(n,x):
import math
k=1
while k<=n:
a=math.pow(x,k)
print(a)
k=k+1
|
[
"stepanyangreta98@gmail.com"
] |
stepanyangreta98@gmail.com
|
7b6b855791edd206f5db635b20e927ab22d10a3d
|
d670ceefd62a673d7f071ad8255ea1ced2d1be1f
|
/python/example/udp_client.py
|
424b47e0953f4aab5a8c34f7f73840dff577d7b1
|
[] |
no_license
|
wang-jinfeng/bigdata
|
2bfaecefd4aaac83c81e1dbce44cd596da717673
|
b031a5fb2d06c7a2c2942a19d8c818f50ec9b8f6
|
refs/heads/master
| 2022-12-01T22:55:23.425409
| 2021-01-20T10:08:56
| 2021-01-20T10:08:56
| 199,104,425
| 0
| 0
| null | 2022-11-16T01:56:42
| 2019-07-27T02:18:29
|
Scala
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for data in [b'Michael', b'Tracy', b'Sarah']:
# 发送数据:
s.sendto(data, ('127.0.0.1', 9998))
# 接收数据:
print(s.recv(1024).decode('utf-8'))
s.close()
|
[
"jinfeng.wang@mobvista.com"
] |
jinfeng.wang@mobvista.com
|
cc980694f1318e65adc551268f9f91e3059bed20
|
be5401cad765484d2971b160606ba89596364bb8
|
/cbsearch.py
|
c13796d0f61a58b8cf31a0121f44610ef8690b45
|
[] |
no_license
|
Jashpatel1/Collaborative-Pathfinder
|
60117b1aa7baa802c2bda94f8c15aa5563492a90
|
24a6f7772997b509b6fc8a2101d9b50cbfba6d25
|
refs/heads/main
| 2023-01-08T13:02:16.370373
| 2020-11-16T03:39:10
| 2020-11-16T03:39:10
| 305,340,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,264
|
py
|
from util import *
from gworld import *
from visualize import *
import a_star_simple
import a_star_constraint
import random
def get_constraint_astar_path(world, start, goal, constraints=None):
ret_path = a_star_constraint.find_path(world.get_nbor_cells,
start,
goal,
lambda cell: 1,
lambda cell, constraints: world.passable(
cell, constraints),
world.tyx_dist_heuristic,
constraints)
return ret_path
def get_astar_path(world, start, goal):
ret_path, pathcost = a_star_simple.find_path(world.get_nbor_cells,
start,
goal,
lambda cell: 1,
lambda cell: world.passable(cell))
return ret_path, pathcost
def path_spacetime_conv(path_yx, tstart=0):
path_tyx = []
tcurr = tstart
for step_yx in path_yx:
step_tyx = (tcurr, step_yx[0], step_yx[1])
path_tyx.append(step_tyx)
tcurr = tcurr + 1
return (tcurr - tstart), path_tyx
def cell_spacetime_conv(cell, t):
return ((t, cell[0], cell[1]))
def get_max_pathlen(agents, path_seq):
max_pathlen = 0
for agent in agents:
pathlen = len(path_seq[agent])
max_pathlen = pathlen if pathlen > max_pathlen else max_pathlen
return max_pathlen
def path_equalize(agents, path_seq, max_pathlen=-1):
if(max_pathlen < 0):
max_pathlen = get_max_pathlen(agents, path_seq)
for agent in agents:
path = path_seq[agent]
lstep = path[-1]
for step in range(len(path), max_pathlen + TWAIT):
path.append((step, lstep[1], lstep[2]))
path_seq[agent] = path
return path_seq
def steptime_agtb(a, b):
if(a[0] > b[0]):
return True
return False
def tplusone(step):
return ((step[0]+1, step[1], step[2]))
def get_conflicts(agents, path_seq, conflicts_db=None):
tyx_map = dict()
if(not bool(conflicts_db)):
conflicts_db = dict()
random.shuffle(agents)
for agent in agents:
if(agent not in conflicts_db):
conflicts_db[agent] = set()
if(path_seq[agent]):
pathlen = len(path_seq[agent])
for t, tstep in enumerate(path_seq[agent]):
twosteps = [tstep]
if(t > 0):
twosteps.append(tplusone(path_seq[agent][t-1]))
for step in twosteps:
if(step not in tyx_map):
tyx_map[step] = agent
else:
otheragent = tyx_map[step]
if(step not in conflicts_db[agent] and agent != otheragent):
conflicts_db[agent].update({step})
return conflicts_db
def evaluate_path(path_seq, agent, conflicts_db):
all_okay = True
tpath = path_seq[agent]
tconstraints = conflicts_db[agent]
for constraint in tconstraints:
if(constraint in tpath):
all_okay = False
def search(agents, world):
path_seq = dict()
pathcost = dict()
agent_goal = dict()
max_pathlen = 0
restart_loop = False
for agent in agents:
start = world.aindx_cpos[agent]
goal = world.aindx_goal[agent]
pathseq_yx, pathcost[agent] = get_astar_path(world, start, goal)
pathlen, path_seq[agent] = path_spacetime_conv(pathseq_yx)
max_pathlen = pathlen if pathlen > max_pathlen else max_pathlen
conflicts_db = get_conflicts(agents, path_seq)
iter_count = 1
pickd_agents = []
while(True):
max_pathlen = get_max_pathlen(agents, path_seq)
path_seq = path_equalize(agents, path_seq, max_pathlen)
if(iter_count % 2 == 1):
pickd_agents = []
nagents = len(agents)
random.shuffle(agents)
pickd_agents = agents[(nagents/2):]
else:
temp_pickd_agents = []
for agent in agents:
if agent not in pickd_agents:
temp_pickd_agents.append(agent)
pickd_agents = temp_pickd_agents
if(restart_loop):
restart_loop = False
print '\n\nStuck between a rock and a hard place?\nRapid Random Restart to the rescue!\n\n'
for agent in agents:
conflicts_db[agent] = set()
start = world.aindx_cpos[agent]
goal = world.aindx_goal[agent]
pathseq_yx, pathcost[agent] = get_astar_path(
world, start, goal)
pathlen, path_seq[agent] = path_spacetime_conv(pathseq_yx)
max_pathlen = pathlen if pathlen > max_pathlen else max_pathlen
conflicts_db = get_conflicts(agents, path_seq, conflicts_db)
for agent in pickd_agents:
if (agent in conflicts_db):
constraints = conflicts_db[agent]
constraints.update({})
if(bool(constraints)):
start = cell_spacetime_conv(world.aindx_cpos[agent], 0)
goal = cell_spacetime_conv(
world.aindx_goal[agent], SOMETIME)
nw_path, nw_pathlen = get_constraint_astar_path(
world, start, goal, constraints)
if(nw_path):
path_seq[agent] = nw_path
evaluate_path(path_seq, agent, conflicts_db)
else:
path_seq[agent] = [start]
restart_loop = True
if not restart_loop:
path_seq = path_equalize(agents, path_seq, SOMETIME)
conflicts_db = get_conflicts(agents, path_seq, conflicts_db)
break_loop = True
for agent in agents:
ubrokn_conflicts = []
constraints = conflicts_db[agent]
for step in path_seq[agent]:
if(step in constraints):
ubrokn_conflicts.append(step)
if(ubrokn_conflicts):
print '## A', agent, 'UC:', ubrokn_conflicts
print 'Yes, there are conflicts!'
break_loop = False
goal = cell_spacetime_conv(world.aindx_goal[agent], SOMETIME)
if(path_seq[agent][-1] != goal):
break_loop = False
iter_count = iter_count + 1
if(break_loop and not restart_loop):
print 'Loop break!'
break
for agent in agents:
print '\nAgent ', agent, ' cost:', pathcost[agent], ' Path -- ', path_seq[agent]
for agent in agents:
if agent in conflicts_db:
print '\nAgent ', agent, ' Conflicts -- ', conflicts_db[agent]
return path_seq
|
[
"patel.5@iitj.ac.in"
] |
patel.5@iitj.ac.in
|
92a78afdb24938e410a82db5c1f25bc42579d344
|
6c3d60415479708090a2e9760bb7b620458aa96d
|
/Python/django/belt_reviewer/apps/login/urls.py
|
4e095ee0ec0be9bb0445a4f3e9dd7843c4e431ed
|
[] |
no_license
|
bopopescu/dojo_assignments
|
721868825268a242300edcb5f94b1ec32cbc6727
|
45cf4ecf1fcb513699f40d57437be7c9ab74800a
|
refs/heads/master
| 2022-11-18T09:36:37.527001
| 2018-04-11T19:40:55
| 2018-04-11T19:40:55
| 281,831,499
| 0
| 0
| null | 2020-07-23T02:34:45
| 2020-07-23T02:34:45
| null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'process$', views.process),
url(r'books$', views.success),
url(r'login$', views.login),
url(r'add$', views.add),
]
|
[
"victordevera24@gmail.com"
] |
victordevera24@gmail.com
|
0ab766f09b072cf8494f45960dfbd183965b6d10
|
6b971e3401fba1498d8b4f3e1b4a46971ca6d0a9
|
/examples/scroll_area_dynamic_content_layouts.py
|
82af4fc9d3357c0a602b0d880ee3a28e95f86fec
|
[] |
no_license
|
brent-stone/PySide6
|
1e41d76363343bfd238c60a93e11b4a9ac58d57c
|
2927ddbba51b677e4a0eb502b287b8a8d9e964ad
|
refs/heads/main
| 2023-08-14T23:09:29.669245
| 2021-09-05T00:50:41
| 2021-09-05T00:50:41
| 403,183,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
from PySide6.QtWidgets import *
import sys
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self, None)
self.setWindowTitle("My GUI")
self.resize(300, 400)
self.scroll = QScrollArea()
self.scroll.setWidgetResizable(True) # CRITICAL
self.inner = QFrame(self.scroll)
self.inner.setLayout(QVBoxLayout())
self.scroll.setWidget(self.inner) # CRITICAL
self.scroll_layout_dict = {}
b = QPushButton(self.inner)
b.setText("Populate")
b.clicked.connect(self.populate)
self.inner.layout().addWidget(b)
# When creating MainWindow() from scratch like this,
# it's necessary to tell PySide6 which widget is
# the 'central' one for the MainWindow().
self.setCentralWidget(self.scroll)
self.show()
def populate(self):
for i in range(10):
b = QPushButton(self.inner)
b.setText(str(i))
b.clicked.connect(self.del_button)
checkbox = QCheckBox(f"Check {i}!", self.inner)
new_layout = QHBoxLayout(self.inner)
new_layout.addWidget(b)
new_layout.addWidget(checkbox)
n = self.inner.layout().count()
self.inner.layout().insertLayout(n, new_layout)
self.scroll_layout_dict[b] = new_layout
def del_button(self):
button: QPushButton = self.sender()
layout: QVBoxLayout = self.scroll_layout_dict[button]
while layout.count() > 0:
layout.takeAt(0).widget().deleteLater()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec())
|
[
"brent.jk.stone@gmail.com"
] |
brent.jk.stone@gmail.com
|
8f948f5da2515448b700d72a799435448074270c
|
7eeb873b01b1da3401d5b8a802438bcc0e22de22
|
/61_Sets/main.py
|
447df8aecab79571c7d8207257ce7002d3b8894b
|
[] |
no_license
|
Bhushan2581/Python-tuts
|
ed5f1606f23aa1d4370d4ed2e4171c25cfc1f206
|
4f2acc839874fcbc3407ba1e0dc6e2d44d6e6179
|
refs/heads/master
| 2022-07-29T22:38:17.831404
| 2020-05-17T03:48:24
| 2020-05-17T03:48:24
| 264,577,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
pythonlist = ["C", "Python", "Java"]
print(pythonlist[1])
pythonlist = ["C", "Python", "Java"]
print(pythonlist[-1])
pythonlist = ["java", "C", "C++", "PHP", "HTML", "Ruby", "C#"]
print(pythonlist[2:5])
pythonlist = ["java", "C", "C++", "PHP", "HTML", "Ruby", "C#"]
pythonlist[1] = "Laravel"
print(pythonlist)
pythonlist = ["Java", "C", "Python"]
for x in tpythonlistislist:
print(x)
|
[
"noreply@github.com"
] |
noreply@github.com
|
4c82d791ac005304fc15575acf85f39269284e1a
|
f39593e3d41150feaa45dea39050ce52d0f835a0
|
/crimeLA.py
|
40ff5a2de61213df34debd1e54f913da35f3fdb3
|
[] |
no_license
|
jliellen/Vizathon
|
888ed41c87326af36c176981927b2f97bbdb9abf
|
5abb3624c334551cc9023f87c4fbe78808b2fc7b
|
refs/heads/main
| 2023-06-20T18:50:45.851710
| 2021-08-01T16:14:38
| 2021-08-01T16:14:38
| 390,940,580
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import folium
from folium import Marker
from folium.plugins import MarkerCluster
data = pd.read_csv(r'Crime_Data_from_2010_to_2019.csv')
data = data[(data['LAT']!= 0) & (data['LON'] !=0)]
data = data.drop(columns=['DATE OCC', 'TIME OCC', 'AREA ', 'Part 1-2', 'Crm Cd Desc', 'Mocodes', 'Premis Cd', 'Premis Desc', 'Weapon Used Cd', 'Weapon Desc', 'Status', 'Status Desc', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street'])
data['Date Rptd'] = pd.to_datetime(data['Date Rptd'], errors='coerce')
data['year']= data['Date Rptd'].dt.year
# Get the latest date (we loop years later)
freshDate = max(data.year)
# Get the crime data of the year
crimeByYear = data.loc[(data.year == freshDate)]
# Create the map
my_map = folium.Map(location=[34.052235,-118.243683], tiles='OpenStreetMap', zoom_start=4)
# Add points to the map
mc = MarkerCluster()
for idx, row in crimeByYear.iterrows():
if not math.isnan(row['LON']) and not math.isnan(row['LAT']):
# Create pop-up message for each point
pop = ["Vict Age", "Vict Sex", "Vict Descent"]
popmsg = [str(item) + ':' + str(row[item]) for item in pop]
popmsg = '\n'.join(popmsg)
# Add marker to mc
mc.add_child(Marker(location=[row['LAT'], row['LON']], popup=popmsg, tooltip=str(row['LOCATION'])))
# Add mc to map
my_map.add_child(mc)
# Save the map
my_map.save('map_1.html')
|
[
"noreply@github.com"
] |
noreply@github.com
|
c7198b3709f26fddaf360a7db559d549f4583b89
|
a65e5dc54092a318fc469543c3b96f6699d0c60b
|
/Personel/Sandesh/Python/23feb/reverse_string.py
|
5666f30c06762d1e26e632121ba09969fd9dc1f1
|
[] |
no_license
|
shankar7791/MI-10-DevOps
|
e15bfda460ffd0afce63274f2f430445d04261fe
|
f0b9e8c5be7b28298eb6d3fb6badf11cd033881d
|
refs/heads/main
| 2023-07-04T15:25:08.673757
| 2021-08-12T09:12:37
| 2021-08-12T09:12:37
| 339,016,230
| 1
| 0
| null | 2021-08-12T09:12:37
| 2021-02-15T08:50:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
string = input("Plearse enter a string : ")
print("Reverse of string is : ")
print(string[::-1])
|
[
"sandeshpatekar20@gmail.com"
] |
sandeshpatekar20@gmail.com
|
f5586a51b83a59377fbf0ba3e1c1f7cbfca452e3
|
0fd0495f194bc22f0322d02ebabe8add9bf5814c
|
/python_concept_references/excel.py
|
a0cd93b32a6802edc9ce767d467a37f6082774d4
|
[] |
no_license
|
WorldPierce/Automate_the_Boring_Stuff_With_Python
|
bd6982bddefa712598dc5e6eb4cf2a2aa87b2c1f
|
a0bc6ba49d92b25f4fbe8d4fdd9385f294567e4c
|
refs/heads/master
| 2020-12-30T09:58:38.341981
| 2017-08-05T19:51:10
| 2017-08-05T19:51:10
| 99,248,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
# install openpyxl to open spreadsheets
import openpyxl, os
os.chdir('c:\\wffpi\\documents')
workbook = openpyxl.load_workbook('example.xlsx')
type(workbook)
sheet = workbook.get_sheet_by_name('Sheet1')
type(sheet)
workbook.get_sheet_names()
cell = sheet['A1'] # gets cell object for cell A 1
cell.value # gives you cell value
str(sheet[A1].value) # prints string value of any cell
sheet.cell(row=1, column=2) # returns cell object same as sheet['B1']
# useful for loops so you don't convert to A, B etc..
for i in range(1,8):
print(i, sheet.cell(row=i, column=2).value)
|
[
"bildo316@gmail.com"
] |
bildo316@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.