blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4bbdbd678b27608a4aa9e8cac61a1a47f748f054
|
15b008c82d0f564f321a1a55c1275f21c1dc1387
|
/tbs_dc_dtv/libs/display/templatetags/mytags.py
|
ce4a0c2ed7ca81eeac7bce2a6c6b5f9c548f60a0
|
[] |
no_license
|
WallaceWGT/tbs-dc-dtv-new
|
519e632f1c3cb86a65cf06ef04429b8dc0ffd699
|
7fce2a71fc8aeb9f131dabbb4cfe9de73b0b5f40
|
refs/heads/master
| 2020-05-04T15:39:46.424238
| 2019-04-03T09:05:04
| 2019-04-03T09:05:04
| 179,250,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# auth: wallace.wang
"""
标签:更加前端传递数据,对模板进行数据处理
"""
from django import template
from tbs_dc_dtv.libs.display import models
register = template.Library()
@register.filter
def get_report_title(menu_obj):
"""
获取所有的菜单对象
:param menu_obj:
:return:
"""
field_names = menu_obj.bdpvisualreport_set.all()
return field_names
@register.filter
def get_menu_report_number(menu_obj):
"""
获取菜单数量
:param menu_obj:
:return:
"""
number = len(menu_obj.bdpvisualreport_set.all())
return number
@register.filter
def get_role_name(role_obj):
"""
根据用户对象获取该用户的角色名
:param role_obj: 角色对象
:return:
"""
role_name = models.BdpAuthUserRole.objects.filter(uid_id=role_obj).values('rid__role_name')[0]['rid__role_name']
return role_name
@register.filter
def get_menu_name(mid):
"""
根据菜单id获取菜单名称
:param mid:
:return:
"""
menu_name = models.BdpVisualMenu.objects.filter(mid=mid)[0]
return menu_name.menu_name
@register.filter
def get_report(mid):
"""
更加菜单id获取对应菜单下的report对象
:param mid:
:return:
"""
reports = models.BdpVisualReport.objects.filter(mid=mid)
return reports
|
[
"1241499917@qq.com"
] |
1241499917@qq.com
|
6a365c4146e1e1c107834735b5f35457205291ff
|
bdb183769c133f25e92dd6f2a9653fe69cecb715
|
/fds.analyticsapi.engines/fds/analyticsapi/engines/configuration.py
|
745c05de417115239b6199f7ee811b047c402ec7
|
[
"Apache-2.0"
] |
permissive
|
saigiridhar21/analyticsapi-engines-python-sdk
|
5e6ec364791b63250ef7157eee8635c15e31b4f2
|
bb7c3d20c37dc7a30071962f610ad02db6440117
|
refs/heads/master
| 2022-12-06T22:13:11.551527
| 2020-09-02T17:30:07
| 2020-09-02T17:30:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,160
|
py
|
# coding: utf-8
"""
Engines API
Allow clients to fetch Engines Analytics through APIs. # noqa: E501
The version of the OpenAPI document: 2
Contact: analytics.api.support@factset.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s)
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
"""
def __init__(self, host="https://api.factset.com",
api_key=None, api_key_prefix=None,
username="", password=""):
"""Constructor
"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("fds.analyticsapi.engines")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = True
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'Basic':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2\n"\
"SDK Package Version: 4.0.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "https://api.factset.com",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables={}):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
servers = self.get_host_settings()
# check array index out of bound
if index < 0 or index >= len(servers):
raise ValueError(
"Invalid index {} when selecting the host settings. Must be less than {}" # noqa: E501
.format(index, len(servers)))
server = servers[index]
url = server['url']
# go through variable and assign a value
for variable_name in server['variables']:
if variable_name in variables:
if variables[variable_name] in server['variables'][
variable_name]['enum_values']:
url = url.replace("{" + variable_name + "}",
variables[variable_name])
else:
raise ValueError(
"The variable `{}` in the host URL has invalid value {}. Must be {}." # noqa: E501
.format(
variable_name, variables[variable_name],
server['variables'][variable_name]['enum_values']))
else:
# use default value
url = url.replace(
"{" + variable_name + "}",
server['variables'][variable_name]['default_value'])
return url
|
[
"afernandes@factset.com"
] |
afernandes@factset.com
|
ad5bab40d479dd14bb8226a16d9ccc4d7805b718
|
f9c0365f4f28700e2a815124a13d80f19ee17fa2
|
/app/calculator/views.py
|
415f2693332c8715dc6e849143f71a028e7708dc
|
[
"MIT"
] |
permissive
|
dev-johnlopez/assignably
|
ddc1062f43c6030e5b0b9470b5bb04178bae1ec3
|
056960556dd75dfce064970887f37a44a8c66aec
|
refs/heads/master
| 2023-02-09T03:20:20.124887
| 2019-12-03T01:30:50
| 2019-12-03T01:30:50
| 188,880,472
| 1
| 0
|
MIT
| 2023-02-02T04:59:54
| 2019-05-27T16:57:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 10,975
|
py
|
from flask import g, render_template, flash, redirect, url_for, request, \
Blueprint
from app import db
# from app.forms.search import SearchForm
from flask_security import current_user
from app.deals.models import Deal
from app.calculator.models import Proforma, LineItem
from app.calculator.forms import ProformaForm, LineItemForm
import locale
bp = Blueprint('calculator', __name__)
# @bp.before_app_request
# def before_request():
# g.search_form = SearchForm()
@bp.route('/<proforma_id>')
def view(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = ProformaForm(obj=proforma)
return render_template('proformas/view.html',
title="View",
proforma=proforma,
form=form)
@bp.route('/<proforma_id>/details')
def details(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = ProformaForm(obj=proforma)
return render_template('proformas/details.html',
title="Details",
proforma=proforma,
form=form)
@bp.route('/<proforma_id>/calculations')
def calculations(proforma_id):
proforma = Proforma.query.get(proforma_id)
return render_template('proformas/calculations.html',
title="Calculations",
proforma=proforma)
@bp.route('/add/<deal_id>', methods=['GET', 'POST'])
def create(deal_id):
deal = Deal.query.get(deal_id)
form = ProformaForm()
if form.validate_on_submit():
proforma = Proforma()
form.populate_obj(proforma)
deal.addProforma(proforma)
db.session.add(deal)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('calculator/wizard.html',
title="Add Proforma",
deal=deal,
form=form)
@bp.route('/edit/<proforma_id>', methods=['GET', 'POST'])
def edit(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = ProformaForm(obj=proforma)
if form.validate_on_submit():
income = proforma.income
expenses = proforma.expenses
capital_expenditures = proforma.capital_expenditures
loans = proforma.loans
form.populate_obj(proforma)
proforma.income = income
proforma.expenses = expenses
proforma.capital_expenditures = capital_expenditures
proforma.loans = loans
db.session.add(proforma)
db.session.commit()
return redirect(url_for('.details', proforma_id=proforma.id))
return render_template('proformas/create.html',
title="Edit Proforma",
deal=deal,
form=form)
@bp.route('/delete/<proforma_id>', methods=['GET', 'POST'])
def delete(proforma_id):
proforma = Proforma.query.get(proforma_id)
deal = proforma.deal
db.session.delete(proforma)
db.session.commit()
return redirect(url_for('deals.view', deal_id=deal.id))
@bp.route('<proforma_id>/add/income', methods=['GET', 'POST'])
def add_income(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = LineItemForm()
if form.validate_on_submit():
line_item = LineItem()
form.populate_obj(line_item)
proforma.addIncomeLineItem(line_item)
db.session.add(proforma)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/line_item.html',
title="Add Proforma",
line_item_type="Income",
proforma=proforma,
form=form)
@bp.route('edit/income/<line_item_id>', methods=['GET', 'POST'])
def edit_income(line_item_id):
line_item = LineItem.query.get(line_item_id)
proforma = Proforma.query.get(line_item.income_proforma_id)
form = LineItemForm(obj=line_item)
if form.validate_on_submit():
form.populate_obj(line_item)
db.session.add(line_item)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/line_item.html',
title="Edit Proforma",
line_item_type="Income",
proforma=proforma,
form=form)
@bp.route('<proforma_id>/add/fixed_expense', methods=['GET', 'POST'])
def add_fixed_expense(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = LineItemForm()
form.amount_type.data = 'Fixed'
if form.validate_on_submit():
line_item = LineItem()
form.populate_obj(line_item)
proforma.addExpenseLineItem(line_item)
db.session.add(proforma)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/line_item.html',
title="Add Proforma",
line_item_type="Expense",
proforma=proforma,
form=form)
@bp.route('<proforma_id>/add/percent_expense', methods=['GET', 'POST'])
def add_percent_expense(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = LineItemForm()
form.amount_type.data = 'Percent'
form.frequency.data = '1'
if form.validate_on_submit():
line_item = PercentLineItem()
form.populate_obj(line_item)
proforma.addExpenseLineItem(line_item)
db.session.add(proforma)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
for error in form.errors:
flash(error, 'info')
return render_template('proformas/line_item.html',
title="Add Proforma",
line_item_type="Expense",
proforma=proforma,
form=form)
@bp.route('edit/expense/<line_item_id>', methods=['GET', 'POST'])
def edit_expense(line_item_id):
line_item = LineItem.query.get(line_item_id)
proforma = Proforma.query.get(line_item.expense_proforma_id)
form = LineItemForm(obj=line_item)
if form.validate_on_submit():
form.populate_obj(line_item)
db.session.add(line_item)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/line_item.html',
title="Edit Expense",
line_item_type="Expense",
proforma=proforma,
form=form)
@bp.route('/delete/line_item/<line_item_id>', methods=['GET', 'POST'])
def delete_line_item(line_item_id):
line_item = LineItem.query.get(line_item_id)
db.session.delete(line_item)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma_id))
@bp.route('<proforma_id>/add/loan', methods=['GET', 'POST'])
def add_loan(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = LoanForm()
if form.validate_on_submit():
loan = Loan()
form.populate_obj(loan)
proforma.addLoan(loan)
db.session.add(proforma)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/financing.html',
title="Add Loan",
proforma=proforma,
form=form)
@bp.route('edit/loan/<loan_id>', methods=['GET', 'POST'])
def edit_loan(loan_id):
loan = Loan.query.get(loan_id)
proforma = Proforma.query.get(loan.proforma_id)
form = LoanForm(obj=loan)
if form.validate_on_submit():
form.populate_obj(loan)
db.session.add(loan)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/financing.html',
title="Edit Loan",
proforma=proforma,
form=form)
@bp.route('/delete/loan/<loan_id>', methods=['GET', 'POST'])
def delete_loan(loan_id):
loan = Loan.query.get(loan_id)
proforma = Proforma.query.get(loan.proforma_id)
db.session.delete(loan)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
@bp.route('<proforma_id>/add/captial_expenditure', methods=['GET', 'POST'])
def add_capital_expenditure(proforma_id):
proforma = Proforma.query.get(proforma_id)
form = CapitalExpenditureForm()
if form.validate_on_submit():
capital_expenditure = CapitalExpenditure()
form.populate_obj(capital_expenditure)
proforma.addCapitalExpenditure(capital_expenditure)
db.session.add(proforma)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/capital_expenditure.html',
title="Add Capital Expenditure",
proforma=proforma,
form=form)
@bp.route('edit/capital_expenditure/<capital_expenditure_id>',
methods=['GET', 'POST'])
def edit_capital_expenditure(capital_expenditure_id):
capital_expenditure = CapitalExpenditure.query.get(capital_expenditure_id)
proforma = Proforma.query.get(capital_expenditure.proforma_id)
form = CapitalExpenditureForm(obj=capital_expenditure)
if form.validate_on_submit():
form.populate_obj(capital_expenditure)
db.session.add(capital_expenditure)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
return render_template('proformas/capital_expenditure.html',
title="Edit Captial Expenditure",
proforma=proforma,
form=form)
@bp.route('/delete/capital_expenditure/<capital_expenditure_id>',
methods=['GET', 'POST'])
def delete_capital_expenditure(capital_expenditure_id):
capital_expenditure = CapitalExpenditure.query.get(capital_expenditure_id)
proforma = Proforma.query.get(capital_expenditure.proforma_id)
db.session.delete(capital_expenditure)
db.session.commit()
return redirect(url_for('proformas.details', proforma_id=proforma.id))
@bp.app_template_filter()
def currency(value):
if value is None:
return "$0.00"
return locale.currency(value, grouping=True)
@bp.app_template_filter()
def percent(value):
if value is None:
return "0.00%"
if not isinstance(value, float) and not isinstance(value, Decimal):
return value
return "{}%".format(round(value*100, 2))
|
[
"johnlopez@Johns-MacBook-Pro.local"
] |
johnlopez@Johns-MacBook-Pro.local
|
cae68092f8a6f68f028bad871d76ed88fac3892b
|
2079f77161365aaa6b89e00be85f80c09d3edf18
|
/Python/PythonFundamentals/9-python-fundamentals-m08-classes-exercise-files/demos/demos/series.py
|
4c35b56bb49297b850153de8dd7c10b7bbb2dd80
|
[
"Unlicense"
] |
permissive
|
yong0011/Research
|
4ad590f26037c8821480eea6cdd14cbf8e99ac41
|
92d9c6d24c30f81467a7616402a24757b0a88c50
|
refs/heads/master
| 2020-12-02T00:49:38.147443
| 2019-12-11T09:27:03
| 2019-12-11T09:27:03
| 230,835,046
| 1
| 0
|
Unlicense
| 2019-12-30T02:42:44
| 2019-12-30T02:42:44
| null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
'''Read and print an integer series.'''
import sys
def read_series(filename):
with open(filename, mode='rt', encoding='utf-8') as f:
return [int(line.strip()) for line in f]
def main(filename):
series = read_series(filename)
print(series)
if __name__ == '__main__':
main(sys.argv[1])
|
[
"ngohungphuc95@gmail.com"
] |
ngohungphuc95@gmail.com
|
c2bad9c8a1bc1bbb43e1d35ccde5911f97d02932
|
410611bc3f2f66506a19a684579be67eeaf8db9d
|
/batalla.py
|
7449bc1413c14a4d98ce8f7c729285caf8a0fc48
|
[] |
no_license
|
geronix97/exercici1
|
eb801ed0f0f2716fddc780375d071a7e46398f46
|
a8ab6af2f18adc39427e3ae24ffd0f044f0a0b66
|
refs/heads/master
| 2020-12-26T11:49:18.758041
| 2020-02-07T18:55:09
| 2020-02-07T18:55:09
| 237,499,644
| 0
| 0
| null | 2020-02-07T19:41:43
| 2020-01-31T19:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 16,266
|
py
|
import random
class vaixell:
posicio1=[]
posicio2=[]
posicio3=[]
posicio4=[]
tamany=0
tipus=""
vida=True
def enfonsat(self):
if self.tamany==0:
vida==False
return self.tipus+"enfonsat!"
class Taulell:
a = [[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0]]
llistavaixells=[]
bingo=False
def setTaulell(self):
self.a = [[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0]]
def hiCap(self,objecte,posicio,x,y):
if posicio==0:
if objecte.tamany==1:
if self.a[x][y] == 0:
self.a[x][y]=1
objecte.posicio1.append(x)
objecte.posicio1.append(y)
return objecte
else:
return False
elif objecte.tamany==2:
try:
if self.a[x][y]==0 and self.a[x+1][y]==0:
objecte.posicio1.append(x)
objecte.posicio1.append(y)
objecte.posicio2.append(x+1)
objecte.posicio2.append(y)
self.a[x][y]=1
self.a[x+1][y]=1
return objecte
else:
return False
except:
return False
elif objecte.tamany ==3:
try:
if self.a[x][y]==0 and self.a[x+1][y]==0 and self.a[x+2][y]==0:
objecte.posicio1.append(x)
objecte.posicio1.append(y)
objecte.posicio2.append(x+1)
objecte.posicio2.append(y)
objecte.posicio3.append(x+2)
objecte.posicio3.append(y)
self.a[x][y]=1
self.a[x+1][y]=1
self.a[x+2][y]=1
return objecte
else:
return False
except:
return False
elif objecte.tamany ==4:
try:
if self.a[x][y]==0 and self.a[x+1][y]==0 and self.a[x+2][y]==0 and self.a[x+3][y]==0:
objecte.posicio1.append(x)
objecte.posicio1.append(y)
objecte.posicio2.append(x+1)
objecte.posicio2.append(y)
objecte.posicio3.append(x+2)
objecte.posicio3.append(y)
objecte.posicio4.append(x+3)
objecte.posicio4.append(y)
self.a[x][y]=1
self.a[x+1][y]=1
self.a[x+2][y]=1
self.a[x+3][y]=1
return objecte
else:
return False
except:
return False
elif posicio==1:
if objecte.tamany==1:
if self.a[x][y] == 0:
objecte.posicio1.append(x)
objecte.posicio1.append(y)
self.a[x][y]=1
return objecte
else:
return False
elif objecte.tamany==2:
try:
if self.a[x][y]==0 and self.a[x][y+1]==0:
objecte.posicio1.append(x)
objecte.posicio1.append(y)
objecte.posicio2.append(x)
objecte.posicio2.append(y+1)
self.a[x][y]=1
self.a[x][y+1]=1
return objecte
else:
return False
except:
return False
elif objecte.tamany ==3:
try:
if self.a[x][y]==0 and self.a[x][y+1]==0 and self.a[x][y+2]==0:
objecte.posicio1.append(x)
objecte.posicio1.append(y)
objecte.posicio2.append(x)
objecte.posicio2.append(y+1)
objecte.posicio3.append(x)
objecte.posicio3.append(y+2)
self.a[x][y]=1
self.a[x][y+1]=1
self.a[x][y+2]=1
return objecte
else:
return False
except:
return False
elif objecte.tamany ==4:
try:
if self.a[x][y]==0 and self.a[x][y+1]==0 and self.a[x][y+2]==0 and self.a[x][y+3]==0:
objecte.posicio1.append(x)
objecte.posicio1.append(y)
objecte.posicio2.append(x)
objecte.posicio2.append(y+1)
objecte.posicio3.append(x)
objecte.posicio3.append(y+2)
objecte.posicio4.append(x)
objecte.posicio4.append(y+3)
self.a[x][y]=1
self.a[x][y+1]=1
self.a[x][y+2]=1
self.a[x][y+3]=1
return objecte
else:
return False
except:
return False
def crearvaixells(self):
for i in range(4):
v=vaixell()
v.tamany=1
v.tipus="patrulla"
self.llistavaixells.append(v)
for i in range(3):
v=vaixell()
v.tamany=2
v.tipus="fragata"
self.llistavaixells.append(v)
for i in range(2):
v=vaixell()
v.tamany=3
v.tipus="cuirasats"
self.llistavaixells.append(v)
for i in range(1):
v=vaixell()
v.tamany=4
v.tipus="portaAvions"
self.llistavaixells.append(v)
def posicionar(self):
self.crearvaixells()
for i in self.llistavaixells:
i.posicio1=[]
i.posicio2=[]
i.posicio3=[]
i.posicio4=[]
x=random.randint(0,9)
y=random.randint(0,9)
posicio= random.randint(0,1)
while self.hiCap(i,posicio,x,y)== False:
x=random.randint(0,9)
y=random.randint(0,9)
posicio= random.randint(0,1)
# self.printar()
# def introduir(self,num1,num2,lletra):
# self.a[num1][num2]=lletra
def hit(self,x,y):
posicio=[]
self.a[x][y]=2
for i in self.llistavaixells:
#print("posicio1")
try:
if i.posicio1[0]== x and i.posicio1[1]==y:
i.tamany-=1
if i.tamany==0:
print("-----------------------")
print("| "+i.tipus+" enfonsat!"+" |")
print("-----------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| li has donat a un "+i.tipus+" |")
print("-------------------------------")
elif i.posicio2[0]== x and i.posicio2[1]==y:
i.tamany-=1
if i.tamany==0:
print("---------------------------------")
print("| "+i.tipus+" enfonsat!"+" |")
print("---------------------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| li has donat a un "+i.tipus+" |")
print("-------------------------------")
elif i.posicio3[0]== x and i.posicio3[1]==y:
i.tamany-=1
if i.tamany==0:
print("-----------------------")
print("| "+i.tipus+" enfonsat!"+" |")
print("-----------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| li has donat a un "+i.tipus+" |")
print("-------------------------------")
elif i.posicio4[0]== x and i.posicio4[1]==y:
i.tamany-=1
self.a[x][y]=2
if i.tamany==0:
print("-----------------------")
print("| "+i.tipus+" enfonsat!"+" |")
print("-----------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| li has donat a un "+i.tipus+" |")
print("-------------------------------")
except:
posicio=[]
#print(i.tipus)
#print(i.tamany)
#print("--")
#print("next")
def hitcpu(self,x,y):
posicio=[]
self.a[x][y]=2
for i in self.llistavaixells:
#print("posicio1")
try:
if i.posicio1[0]== x and i.posicio1[1]==y:
i.tamany-=1
if i.tamany==0:
print("-----------------------")
print("| "+i.tipus+" enfonsat per l' enemic!"+" |")
print("-----------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| T' han donat a un "+i.tipus+" |")
print("-------------------------------")
elif i.posicio2[0]== x and i.posicio2[1]==y:
i.tamany-=1
if i.tamany==0:
print("---------------------------------")
print("| "+i.tipus+" enfonsat per l' enemic!"+" |")
print("---------------------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| T' han donat a un "+i.tipus+" |")
print("-------------------------------")
elif i.posicio3[0]== x and i.posicio3[1]==y:
i.tamany-=1
if i.tamany==0:
print("-----------------------")
print("| "+i.tipus+" enfonsat per l' enemic!"+" |")
print("-----------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| T' han donat a un "+i.tipus+" |")
print("-------------------------------")
elif i.posicio4[0]== x and i.posicio4[1]==y:
i.tamany-=1
self.a[x][y]=2
if i.tamany==0:
print("-----------------------")
print("| "+i.tipus+" enfonsatper l' enemic!"+" |")
print("-----------------------")
self.llistavaixells.remove(i)
else:
print("-------------------------------")
print("| T' han donat a un "+i.tipus+" |")
print("-------------------------------")
except:
posicio=[]
#print(i.tipus)
#print(i.tamany)
#print("--")
#print("next")
def moure(self):
moviment=[]
x=random.randint(0,9)
y=random.randint(0,9)
moviment.append(x)
moviment.append(y)
return moviment
def gameover(self):
if len(self.llistavaixells)==0:
return True
else:
return False
def printar(self):
llista =""
for i in range(len(self.a)):
for j in range(len(self.a[i])):
if self.a[i][j]==2:
llista+="X "
else:
llista+=str(self.a[i][j])+" "
print(llista)
llista=""
def printarAmagat(self):
llista =""
for i in range(len(self.a)):
for j in range(len(self.a[i])):
if self.a[i][j]==2:
llista+="X "
else:
llista+="* "
print(llista)
llista=""
class Joc():
final= False
def final():
if final== False:
print("hola")
def menu(self):
while True:
print("Benvingut a hundir la flota")
print("Selecciona una opció")
print("\t1 - Introdueix vaixells automaticament.")
opcionMenu = input("")
if opcionMenu=="1":
taulellJugador= Taulell()
taulellCpu= Taulell()
taulellCpu.llistavaixells=[]
taulellJugador.llistavaixells=[]
taulellCpu.setTaulell()
taulellJugador.setTaulell()
taulellCpu.posicionar()
taulellJugador.posicionar()
while True:
print("el teu taulell")
taulellJugador.printar()
print("Taulell Enemic")
taulellCpu.printarAmagat()
fila=0
columna=0
while True:
fila = input("numero de fila: ")
columna = input("numero de columna: ")
if int(fila)<=10 and int(fila) >=0 or int(columna) <=10 and int(columna) >=0:
break
print("no has introduit be les dades")
taulellCpu.hit(int(fila)-1,int(columna)-1)
moviment=taulellCpu.moure()
taulellJugador.hitcpu(moviment[0],moviment[1])
if taulellJugador.gameover()== True:
print("La maquina ha guanyat... un altre cop")
break
elif taulellCpu.gameover()== True:
print("Has guanyat!")
break
else:
print("opcio incorrecte")
#taulell.printar()
# taulellJugador= Taulell()
# taulellJugador.posicionar()
partida= Joc()
partida.menu()
|
[
"gerard@lacetania.com"
] |
gerard@lacetania.com
|
644e40c6107325bbd5bc83d943dae5342e6840d2
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_347/ch60_2020_06_20_17_50_00_812398.py
|
c56e6538b8f07b28a41785924f0cb42c34347010
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
def eh_palindromo(string):
if string == string[-1]:
return True
else:
return False
|
[
"you@example.com"
] |
you@example.com
|
603bbba91b93d5d00d52322fde39e6fdd220e999
|
67c13f25cc77d8eea040efb78287a4af56510cb2
|
/modules/__init__.py
|
0cd8292cb911976e0a04225d0d888612fa108c76
|
[] |
no_license
|
ulgenatakan/fake
|
410f11fe442c1a9774600ebadae5dfcc92938fc1
|
524e180c6f14adb060da5c7f7806592d160728ed
|
refs/heads/master
| 2020-08-07T11:36:22.196312
| 2019-10-11T18:33:52
| 2019-10-11T18:33:52
| 213,434,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from .database import db_add_item, db_clear, db_del_item, db_get_all
from .elimination import create_row_data, filter_dicts, eliminate
from .random_mac_generator import generate_devices, print_devices, generate, get_devices
|
[
"ulgenatakan@gmail.com"
] |
ulgenatakan@gmail.com
|
57a0faa230f7260f44d5ae7dbf3ff16be5f6ad0f
|
5b3bd326998606188b45a7870852643eda024a97
|
/meta_architectures/context_rcnn_lib_tf2.py
|
4989db07136dc450d1fa5058e58e27365b79dd29
|
[] |
no_license
|
KuznetsovIllya/clearml_od_toy
|
31556d0726d15a054c1c18317c361d97801381a4
|
92f15f04a023d4e0e165a250fddc3129144913d0
|
refs/heads/main
| 2023-04-11T05:55:56.248478
| 2021-04-14T15:59:40
| 2021-04-14T15:59:40
| 357,827,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:2ba0224b6fe0f919228a83b2f2a2bef6bbff4e7f747358231bdc235456d58197
size 9287
|
[
"illia.kuznietsov@modern-expo.com"
] |
illia.kuznietsov@modern-expo.com
|
f823640b891cf7e82a82904b7a0dc3559f3ae4ae
|
b5ed18a8400c0ac287fc2c4a3a7fa4804ee0cc05
|
/scripts/run_celery.py
|
73db6db576fb3a60adcd2c33436c9d532a574a8e
|
[
"MIT"
] |
permissive
|
MartykQ/flask-celery-postgres-template
|
db0e5234afac0f986c0501e0e6dabf4f99f9ac9a
|
808d00de77a878ac78442d2956fd54c09fe932f8
|
refs/heads/master
| 2023-06-29T03:19:37.053167
| 2021-07-28T11:28:12
| 2021-07-28T11:28:12
| 389,961,725
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
"""Helper script to make sure celery worker is started correctly"""
from web_app.extensions import FlaskCelery
from web_app import create_app
app = create_app()
celery = FlaskCelery(app=app)
|
[
"fmartyka16@gmail.com"
] |
fmartyka16@gmail.com
|
6ff9ff95fefbaa276dd64e822f01857ee7a656b9
|
11aaeaeb55d587a950456fd1480063e1aed1d9e5
|
/.history/ex45-test_20190608162053.py
|
ead3b46f62d91c9428c264a9f24afe6a0e5ba598
|
[] |
no_license
|
Gr4cchus/Learn-Python-3-The-Hard-Way
|
8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8
|
f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d
|
refs/heads/master
| 2020-05-17T23:18:29.483160
| 2019-06-26T18:42:52
| 2019-06-26T18:42:52
| 184,023,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
class Room1():
def enter():
print("You enter room 1")
class Room2():
def enter():
print("You enter room 2")
class Map():
def __init__(self, starting_room)
self.starting_room = starting_room
self.locations = {
'room1': Room1(),
'room2': Room2()
}
start = Map('room1')
start
|
[
"ahivent@gmail.com"
] |
ahivent@gmail.com
|
83d0a74593c54ec76d33cb5485a9903692decdc3
|
0de5810c702046d8f00c8bcda8543c66f608222f
|
/venv/bin/tor-prompt
|
d17b63310b066687ee943f2ad5db6d165708806a
|
[] |
no_license
|
Subhash1998/product_sale
|
26a688869009035e785791cac1fb2707cab1e221
|
a76f9e75ea1d96832d9280b98da08d0b3c6520fe
|
refs/heads/master
| 2022-12-12T12:13:37.392530
| 2020-06-18T12:46:45
| 2020-06-18T12:46:45
| 128,992,642
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
#!/home/subhash/payment/venv/bin/python
# Copyright 2014-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import stem.interpreter
if __name__ == '__main__':
stem.interpreter.main()
|
[
"you@example.com"
] |
you@example.com
|
|
b7336e4073879a97412a0238f8bf3bf8ace67454
|
32fa2eac81a214133196294c23ebbc41b066f969
|
/data.py
|
2155731f858ec4f998e9a23a1fadf16c9055d8a4
|
[] |
no_license
|
Animatory/DMD_project
|
f741aaefb0c94548650bed2226d942bfe38a0356
|
956c3f3495f77c91bee5e49ea5a45ff70df93394
|
refs/heads/master
| 2020-04-07T14:01:38.565461
| 2018-11-26T20:57:24
| 2018-11-26T20:57:24
| 158,431,278
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
model_classes = ['X', 'S', 'Z', 'C', 'L', 'M']
names = ['ABC', 'XYZ', 'EFG']
colors = ['red', 'green', 'blue', 'yellow']
names = ['Rurk', 'Alex', 'Jhon', 'Ann', 'Jee', 'Markus', 'Marry', 'Stas', 'Li', 'Subham', 'Perry', 'Harry', 'Mia',
'Slava', 'Vova', 'Vlad', 'Olya', 'Jija']
car_parts = [i.strip() for i in open('car_parts.txt', 'r')]
|
[
"rinat.babichev@gmail.com"
] |
rinat.babichev@gmail.com
|
16cd7bab7cf50b112d42ab69847e26c54e5fe48f
|
7575f049b4a8d42d1714dba514e9d3bdb4c45a82
|
/maxflow.py
|
595e1e0db6dc83e3b5cfa56dd0ef7fc21960f0fc
|
[] |
no_license
|
thorehusfeldt/proglager
|
750cd8f44e466e8141552fc4fdffdd8a66affe69
|
247c592252e327f65a524a0b418fffaaa0182c83
|
refs/heads/master
| 2020-04-20T11:00:22.006163
| 2019-02-02T11:09:40
| 2019-02-02T11:09:40
| 168,804,478
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
# Edmonds-Karp with multiedges and capacity scaling
from collections import defaultdict
from collections import deque
n, m, s, t, = map(int, raw_input().split())
N = defaultdict(list) # neighbourhood lists, N[u] = [(w_1, cap_1), (w_2, cap_2), ...]
R = [dict() for _ in range(n)] # R[u][v] = residual cap in G_f from u to v in G_f
C = [dict() for _ in range(n)] # C[u][v] = total cap in G from u to v in G
maxcap = 0
for _ in range(m):
u, v, cap = map(int, raw_input().split())
N[u].append((v,cap))
R[u][v] = cap if v not in R[u] else R[u][v] + cap # edge already there? increase cap
R[v][u] = 0 if u not in R[v] else R[v][u] # same on res edge
C[u][v] = R[u][v] # remember total cap of instance
maxcap = max(maxcap , C[u][v]) # need this for cap scaling
cutoff = maxcap // 2
def BFS(s, t, parent):
Q = deque([s])
M = [False] * n
M[s] = True
while Q:
u = Q.popleft()
for v, res_cap in R[u].items():
if (M[v] == False) and (res_cap > cutoff): # or .. > 0 if cap scaling not necc.
Q.append(v)
M[v] = True
parent[v] = u
return M[t] # the cut would be in M, but here we don't need it
pred = [None] * n
flow = 0
while (True):
while (BFS(s,t,pred)):
f_p = float('inf')
v = t
while (v != s):
u = pred[v]
f_p = min(f_p, R[u][v])
v = u
flow += f_p
v = t
while (v != s):
u = pred[v]
R[u][v] -= f_p
R[v][u] += f_p
v = u
if cutoff == 0: break
cutoff = cutoff // 2
flow_edges = 0
string = ""
for u in range(n):
for v, cap in N[u]:
if R[u][v] < C[u][v]:
flow_edges += 1
f_e = min(C[u][v] - R[u][v], cap)
string += "{} {} {}\n".format( u, v, f_e)
R[u][v] -= f_e
C[u][v] -= f_e
print n, flow, flow_edges
print string
|
[
"thore.husfeldt@gmail.com"
] |
thore.husfeldt@gmail.com
|
c3182071b0e894204d18f4939e191f802569f245
|
c1d68638ccff1d72dd04e4f85bedf5b3146d1c7e
|
/site-packages/PytoClasses.py
|
f61abae06651d5db273c0bf55b7b660deab90c81
|
[
"MIT"
] |
permissive
|
luoyu1993/Pyto
|
ac61f296756a0df9131e50a203cb31efd261b843
|
4f874e0b9203ae8dc0cd447b599358d726c98f10
|
refs/heads/master
| 2022-07-26T10:44:58.987630
| 2018-12-09T19:05:41
| 2018-12-09T19:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
# -*- coding: utf-8 -*-
"""
This module contains classes from the main app used by Pyto.
This module is only for private use. Use the `pyto` API instead.
"""
from rubicon.objc import *
NSBundle = ObjCClass("NSBundle")
def __Class__(name):
return ObjCClass("Pyto."+name)
PyMainThread = __Class__("PyMainThread")
PyThread = __Class__("PyThread")
PyInputHelper = __Class__("PyInputHelper")
PyOutputHelper = __Class__("PyOutputHelper")
PySharingHelper = __Class__("PySharingHelper")
FilePicker = __Class__("PyFilePicker")
Alert = __Class__("PyAlert")
PyContentViewController = __Class__("PyContentViewController")
PyExtensionContext = __Class__("PyExtensionContext")
Python = __Class__("Python")
|
[
"adrilabbelol@gmail.com"
] |
adrilabbelol@gmail.com
|
e562f50add3ca69f109499c50dd0a57425564f89
|
9e5e49444a3fe4856e0884cf812926000f51056e
|
/MBRCommentary/obj/Release/Package/PackageTmp/MBRCommentary/settings.py
|
b92f015f0d4e3ae50aecf47881afd18610a9aad7
|
[] |
no_license
|
comonroe/mbrcommentary
|
e8a3c0b74a4e50e9662303951294df20816c8bbe
|
a703afebbcc2c12c30c7b7617547e34c2171de95
|
refs/heads/master
| 2020-08-30T01:21:24.916820
| 2017-06-21T19:25:44
| 2017-06-21T19:25:44
| 94,387,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
"""
Django settings for MBRCommentary project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import posixpath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e2df6351-f62d-45f1-808b-e5602b3a59c5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['opsfin.azurewebsites.net', 'localhost']
# Application definition
INSTALLED_APPS = [
'app',
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_tables2',
'datetimewidget',
'bootstrapform',
'bootstrap_toolkit'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MBRCommentary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MBRCommentary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'sql_server.pyodbc',
'NAME': 'OpsFinMBRCommentary',
'USER': 'webapp',
'PASSWORD': 'Yukon900',
'HOST': 'opsfin-mbr-commentary.database.windows.net',
'PORT': '1433',
'OPTIONS': {
'driver': 'SQL Server Native Client 11.0',
'MARS_Connection': 'True',
'connection_timeout': 30
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
|
[
"coleman.c.monroe12@gmail.com"
] |
coleman.c.monroe12@gmail.com
|
a7eab3d12285d063f0110ae15dee34d6fe079b01
|
ccef9fa406db259f0db515a39bb28691f506b30f
|
/mnist.py
|
cc59d6d043838ae1abc321f8df555393ba41132a
|
[
"MIT"
] |
permissive
|
fanchy888/digit_ANN
|
5358564b0691a132bcc90231db9eaa7d10f2591c
|
5faf0e574321ff5e3c6b8ec82992d20177be15a0
|
refs/heads/master
| 2020-03-28T17:08:52.344050
| 2018-09-25T08:48:11
| 2018-09-25T08:48:11
| 148,761,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
import numpy as np
import struct
from PIL import Image
import os
data_file = 'train-images.idx3-ubyte' #需要修改的路径
# It's 47040016B, but we should set to 47040000B
data_file_size = 47040016
data_file_size = str(data_file_size - 16) + 'B'
data_buf = open(data_file, 'rb').read()
magic, numImages, numRows, numColumns = struct.unpack_from(
'>IIII', data_buf, 0)
datas = struct.unpack_from(
'>' + data_file_size, data_buf, struct.calcsize('>IIII'))
datas = np.array(datas).astype(np.uint8).reshape(
numImages, 1, numRows, numColumns)
label_file = 'train-labels.idx1-ubyte' #需要修改的路径
# It's 60008B, but we should set to 60000B
label_file_size = 60008
label_file_size = str(label_file_size - 8) + 'B'
label_buf = open(label_file, 'rb').read()
magic, numLabels = struct.unpack_from('>II', label_buf, 0)
labels = struct.unpack_from(
'>' + label_file_size, label_buf, struct.calcsize('>II'))
labels = np.array(labels).astype(np.int64)
datas_root = 'mnist_train' #需要修改的路径
if not os.path.exists(datas_root):
os.mkdir(datas_root)
for i in range(10):
file_name = datas_root + os.sep + str(i)
if not os.path.exists(file_name):
os.mkdir(file_name)
for ii in range(numLabels):
img = Image.fromarray(datas[ii, 0, 0:28, 0:28])
label = labels[ii]
file_name = datas_root + os.sep + str(label) + os.sep + \
'mnist_train_' + str(ii) + '.png'
img.save(file_name)
|
[
"fanchy888@gmail.com"
] |
fanchy888@gmail.com
|
d62122698bcb4a6081b643082fd8fb9a2df8278c
|
7b6377050fba4d30f00e9fb5d56dfacb22d388e1
|
/pqu/Check/t07.py
|
e3c5cacee7c2ca293bce9a269a6a1976e4be1703
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
LLNL/fudge
|
0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370
|
6ba80855ae47cb32c37f635d065b228fadb03412
|
refs/heads/master
| 2023-08-16T21:05:31.111098
| 2023-08-01T22:09:32
| 2023-08-01T22:09:32
| 203,678,373
| 21
| 4
|
NOASSERTION
| 2023-06-28T20:51:02
| 2019-08-21T23:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 972
|
py
|
# <<BEGIN-copyright>>
# Copyright 2022, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
import sys
sys.path.insert( 0, '../../' )
from pqu.PQU import PQU
print()
a2 = PQU( "2.300000000000(1) MeV" )
print(a2)
print(a2.info( significantDigits = 15 ))
a2 = PQU( "2.300000000001(1) MeV" )
print(a2)
print(a2.info( significantDigits = 15 ))
print()
a2 = PQU( "2.300000000003(1)", "MeV" )
print(a2)
print(a2.info( significantDigits = 15 ))
print()
l = PQU( 10., 'm' )
big_l = PQU( 10., 'km' )
sum_l = big_l + l
print(sum_l)
print(l.info( significantDigits = 15 ))
print(big_l.info( significantDigits = 15 ))
print(sum_l.info( significantDigits = 15 ))
print()
E = PQU( 1000, 'MeV/c**2' ) # This is similar to the prior one.
print(E)
print(E.info( significantDigits = 15 ))
kg = E.inUnitsOf( 'kg' )
print(kg)
print(kg.info( significantDigits = 15 ))
|
[
"mattoon1@llnl.gov"
] |
mattoon1@llnl.gov
|
f8d82c2d92729091f1103b49971f703460124072
|
a696aefd4af0fd0f5d9573136e86caeb912c0fce
|
/Naive Bayes Classifier/nblearn.py
|
f50194e11cd549934b86195a7155512eb8bb1e1a
|
[] |
no_license
|
soumyaravi/NLP
|
137493bec7c492addb9358f07cc3ed69cbd744f8
|
4ab513388d278d8f3efcbcd6fd3ab17e820bc957
|
refs/heads/master
| 2020-05-30T19:10:06.352700
| 2017-01-27T02:06:22
| 2017-01-27T02:06:22
| 69,060,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,503
|
py
|
import sys, os
spam = {}
ham = {}
prob_spam = {}
prob_ham = {}
distinct = {}
# read the files and split it into tokens
# call the required functions from the main function
def main(args):
spam_files = 0
ham_files = 0
for subdir, dirs, files in os.walk(args[0]):
for file in files:
if (file.endswith('.txt')):
words = []
with open(os.path.join(subdir, file), "r", encoding="latin1") as infile:
words = infile.read().split()
infile.close()
if file.__contains__('ham'):
ham_files += 1
for datas in words:
data = datas.rstrip()
if data in ham:
ham[data] = ham.get(data) + 1
else:
ham[data] = 1
if data not in distinct:
distinct[data] = 1
else:
spam_files += 1
for data in words:
if data in spam:
spam[data] = spam.get(data) + 1
else:
spam[data] = 1
if data not in distinct:
distinct[data] = 1
return
# Calculate the probability of each word
def findProbability():
spam_count = 0
ham_count = 0
#calculate total number of words in each class
for data in spam:
spam_count += spam.get(data)
for data in ham:
ham_count += ham.get(data)
return {'spam':spam_count,'ham':ham_count}
def writeOutput(spam_count,ham_count):
with open('nbmodel.txt', 'w', encoding="latin1") as out:
total =(len(spam) + len(ham))
out.write("Spam,Total probability," + (str(spam_count)) + "\n")
out.write("Ham,Total probability," + (str(ham_count)) + "\n")
out.write("Unique,words," + str(len(distinct)) + "\n")
out.write("Total,vocabulary," + str(total) + "\n")
for data in spam:
out.write("Spam," + data + "," + str(spam[data]) + "\n")
for data in ham:
out.write("Ham," + data + "," + str(ham[data]) + "\n")
out.close()
return
if __name__ == "__main__":
if len(sys.argv)!=2:
print('Usage: python nblearn.py <path>')
sys.exit(1)
main(sys.argv[1:])
ans = findProbability()
writeOutput(ans['spam'],ans['ham'])
|
[
"soumyara@usc.edu"
] |
soumyara@usc.edu
|
d1b60da90d9782510c69bfde3e81f8eda6aea1e4
|
c72ad7227924948f5ec6b402e476144c2997d47a
|
/learn_python_the_hard_way/ex11_ex12.py
|
fb13d3141a3468b495be0238b136588afbd7fe0f
|
[] |
no_license
|
AbyAbyss/python
|
da510817df2876b6f9264a03c6025b414513ec0c
|
d5386685d5406bb745dc22b926d531005943cd1b
|
refs/heads/master
| 2021-09-25T22:25:04.234344
| 2018-10-25T22:45:59
| 2018-10-25T22:45:59
| 105,034,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
# ex11.py and ex12.py done in python3
# raw_inupt() was renamed to input() in python3
# input() will always take string, int(input()) is used to convert age to int
age = int(input("How old are you? "))
height = input("How tall are you? ")
weight = input("How much do you weigh? ")
print("So, you're %r old, %r tall and %r heavy." % (age, height, weight))
|
[
"abyrocksaji@gmail.com"
] |
abyrocksaji@gmail.com
|
21894e55971b88bf10b449c64acfb93fcede1059
|
146a64a969476f4f8e62959991518193d062fbf1
|
/Week 2 Table Import/import_january_data.py
|
a4d2675d2d3eb9a0b85dfd9619402240e50d4623
|
[] |
no_license
|
qandrew/Andorra-UROP
|
9ad913b6d73ec4c8fcccf631b184fa7190a20323
|
d3c9cf9b273135f0441c17471b2bb8676194ba9a
|
refs/heads/master
| 2021-01-10T04:49:34.577740
| 2016-03-23T01:09:39
| 2016-03-23T01:09:39
| 53,598,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
# March 4 2016
# Andrew Xia importing Data for Andorra Media Lab UROP
# input: DWFET_CDR_CELLID_201501.csv.
# we want day, city, hour, cell phone tower (and others also)
import os
import csv
print 'started'
# print os.getcwd()
#os.chdir('/run/user/1000/gvfs/sftp:host=andorra.media.mit.edu/home/data_commons/cdrs')
#print os.getcwd()
#import data
people_data = {}
with open('../data_commons/cdrs/DWFET_CDR_CELLID_201501.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
# firstrow = True
# i = 0
for row in spamreader:
# if i >= 20:
# break
# i += 1
if row[0] not in people_data.keys(): #not a duplicate
person_data = row[3], row[4], row[6], row[-3], row[-1]
people_data[row[0]] = person_data
#key = person
#value = start time, finish time, cell tower, number called, kind of phone
#print people_data
#print len(people_data)
if 'DS_CDNUMORIGEN' in people_data.keys():
del people_data['DS_CDNUMORIGEN']
with open('january.csv', 'w') as csvfile:
fieldnames = ['DS_CDNUMORIGEN', 'DT_CDDATAINICI', 'DT_CDDATAFI', 'ID_CELLA_INI', 'DS_CDNUMDESTI', 'TAC_IMEI']
# person, start time, end time, cell tower, call number, phone type
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for entry in people_data:
writer.writerow({'DS_CDNUMORIGEN':entry, 'DT_CDDATAINICI':people_data[entry][0], 'DT_CDDATAFI':people_data[entry][1],
'ID_CELLA_INI':people_data[entry][2], 'DS_CDNUMDESTI':people_data[entry][3], 'TAC_IMEI':people_data[entry][4]})
print 'completed'
|
[
"axia@mit.edu"
] |
axia@mit.edu
|
a96f77fd3df9978f4177dda73f49efb9812076f4
|
46c91eba77a1564f911064c95aa5a730f8dd7bfe
|
/decoder.py
|
9d9e8b58fe1c5bb30fab4acd493fe73636bb8baa
|
[] |
no_license
|
chayan/image_captioning
|
2e5ec156b9ed417bd6f8441219e64f777ef4875b
|
b0771d2cb50b86c144c0c0e7e4175591979bdffe
|
refs/heads/master
| 2020-04-21T20:09:54.606283
| 2019-02-09T06:18:58
| 2019-02-09T06:18:58
| 169,833,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,556
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 09:33:46 2019
@author: chayan
"""
#import tensorflow as tf
#from tensorflow.contrib import keras
#L = keras.layers
#import vocab_utils as vutil
#
#vocab = vutil.get_vocab()
#
#IMG_SIZE = 299
#IMG_EMBED_SIZE = 2048
#IMG_EMBED_BOTTLENECK = 120
#WORD_EMBED_SIZE = 100
#LSTM_UNITS = 300
#LOGIT_BOTTLENECK = 120
#pad_idx = vocab[vutil.PAD]
#class decoder:
# # [batch_size, IMG_EMBED_SIZE] of CNN image features
# img_embeds = tf.placeholder('float32', [None, IMG_EMBED_SIZE])
#
# # [batch_size, time steps] of word ids
# sentences = tf.placeholder('int32', [None, None])
#
# # we use bottleneck here to reduce the number of parameters
# # image embedding -> bottleneck
# img_embed_to_bottleneck = L.Dense(IMG_EMBED_BOTTLENECK,
# input_shape=(None, IMG_EMBED_SIZE),
# activation='elu')
#
# # image embedding bottleneck -> lstm initial state
# img_embed_bottleneck_to_h0 = L.Dense(LSTM_UNITS,
# input_shape=(None, IMG_EMBED_BOTTLENECK),
# activation='elu')
#
# # word -> embedding
# word_embed = L.Embedding(len(vocab), WORD_EMBED_SIZE)
#
# # lstm cell (from tensorflow)
# lstm = tf.nn.rnn_cell.LSTMCell(LSTM_UNITS)
#
# # we use bottleneck here to reduce model complexity
# # lstm output -> logits bottleneck
# token_logits_bottleneck = L.Dense(LOGIT_BOTTLENECK,
# input_shape=(None, LSTM_UNITS),
# activation="elu")
#
# # logits bottleneck -> logits for next token prediction
# token_logits = L.Dense(len(vocab),
# input_shape=(None, LOGIT_BOTTLENECK))
#
# # initial lstm cell state of shape (None, LSTM_UNITS),
# # we need to condition it on `img_embeds` placeholder.
# c0 = h0 = img_embed_bottleneck_to_h0(img_embed_to_bottleneck(img_embeds))
#
# # embed all tokens but the last for lstm input,
# # remember that L.Embedding is callable,
# # use `sentences` placeholder as input.
# word_embeds = word_embed(sentences[:, :-1])
#
# # during training we use ground truth tokens `word_embeds` as context for next token prediction.
# # that means that we know all the inputs for our lstm and can get
# # all the hidden states with one tensorflow operation (tf.nn.dynamic_rnn).
# # `hidden_states` has a shape of [batch_size, time steps, LSTM_UNITS].
# hidden_states, _ = tf.nn.dynamic_rnn(lstm, word_embeds,
# initial_state=tf.nn.rnn_cell.LSTMStateTuple(c0, h0))
#
# # now we need to calculate token logits for all the hidden states
#
# # first, we reshape `hidden_states` to [-1, LSTM_UNITS]
# flat_hidden_states = tf.reshape(hidden_states, [-1, LSTM_UNITS]) ### YOUR CODE HERE ###
#
# # then, we calculate logits for next tokens using `token_logits_bottleneck` and `token_logits` layers
# ### YOUR CODE HERE ###
# flat_token_logits = token_logits(token_logits_bottleneck(flat_hidden_states))
#
# # then, we flatten the ground truth token ids.
# # remember, that we predict next tokens for each time step,
# # use `sentences` placeholder.
# flat_ground_truth = tf.reshape(sentences[:, 1:], [-1]) ### YOUR CODE HERE ###
#
# # we need to know where we have real tokens (not padding) in `flat_ground_truth`,
# # we don't want to propagate the loss for padded output tokens,
# # fill `flat_loss_mask` with 1.0 for real tokens (not pad_idx) and 0.0 otherwise.
#
# flat_loss_mask = tf.map_fn(lambda idx: tf.cond(tf.equal(idx, pad_idx), lambda: 0.0, lambda: 1.0),
# flat_ground_truth, dtype='float')
#
# # compute cross-entropy between `flat_ground_truth` and `flat_token_logits` predicted by lstm
# xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
# labels=flat_ground_truth,
# logits=flat_token_logits
# )
#
# # compute average `xent` over tokens with nonzero `flat_loss_mask`.
# # we don't want to account misclassification of PAD tokens, because that doesn't make sense,
# # we have PAD tokens for batching purposes only!
# masked_xent = tf.multiply(xent, flat_loss_mask)
# loss_sum = tf.reduce_sum(masked_xent)
# non_zero_count = tf.cast(tf.math.count_nonzero(masked_xent), tf.float32)
# loss = tf.divide(loss_sum, non_zero_count)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6f6d4de96ca0a1c2543a30b808a0d01e7cebf842
|
7b0f8e5e3dd02f47096bc35322063c7581f95aca
|
/chapter12/avazu_ctr/future_selection.py
|
51daffbfe3d66f4915cfa3d050b08282ce2e8a3f
|
[] |
no_license
|
michaelliu03/Search-Recommend-InAction
|
94f8342573fffa0016bc6b66b0100a81423e5438
|
06408c9a4ec23aad0e604267903be96cb2892ea1
|
refs/heads/master
| 2022-02-25T21:57:56.445082
| 2022-02-17T10:45:15
| 2022-02-17T10:45:15
| 226,252,089
| 40
| 17
| null | 2022-01-10T09:53:50
| 2019-12-06T05:24:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,906
|
py
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:future_selection.py
# @Author: Michael.liu
# @Date:2020/6/4 17:49
# @Desc: this code is ....
import pandas as pd
import numpy as np
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from xgboost import plot_importance
def xgboost_selection_future():
train = pd.read_csv('tr_FE.csv')
y_train = train.click
X_train = train.drop(['click', 'device_ip', 'Unnamed: 0'], axis=1)
cv_params = {'n_estimators': [400, 500, 600, 700, 800]}
other_params ={'learning_rate': 0.1,
'n_estimators': 500,
'max_depth': 5,
'min_child_weight': 1,
'seed': 0,
'subsample': 0.8,
'objective': 'binary:logistic',
'colsample_bytree': 0.8,
'gamma': 0,
}
model = xgb.XGBClassifier(**other_params)
optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='neg_log_loss', cv=5, verbose=1,
n_jobs=4)
optimized_GBM.fit(X_train, y_train)
evalute_result = optimized_GBM.grid_scores_
print('每轮迭代运行结果:{0}'.format(evalute_result))
print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
def modelfit(alg, dtrain, predictors, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params() # 参数
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values) # 训练集数据与标签
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds, show_progress=False)
alg.set_params(n_estimators=cvresult.shape[0])
alg.fit(dtrain[predictors], dtrain['Disbursed'], eval_metric='auc')
# Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:, 1]
# Print model report:
print("Model Report")
# print("Accuracy : %.4g" % accuracy_score(dtrain['Disbursed'].values, dtrain_predictions))
# print("AUC Score (Train): %f" % roc_auc_score(dtrain['Disbursed'], dtrain_predprob))
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
def future_important():
train = pd.read_csv("tr_FE.csv")
# test = pd.read_csv("tr_FE.csv")
#features = pd.read_csv('feature.csv')
y_train = train.click
X_train = train.drop(['click'], axis=1)
model = xgb.XGBRegressor(n_estimators=350, max_depth=10, objective='binary:logistic', min_child_weight=50,
subsample=0.8, gamma=0, learning_rate=0.2, colsample_bytree=0.5, seed=27)
model.fit(X_train, y_train)
# y_test = model.predict(X_test)
plot_importance(model, importance_type="gain")
features = X_train.columns
feature_importance_values = model.feature_importances_
feature_importances = pd.DataFrame({'feature': list(features), 'importance': feature_importance_values})
feature_importances.sort_values('importance', inplace=True, ascending=False)
print(feature_importances)
# print(model.get_booster().get_fscore())
print(model.get_booster().get_score(importance_type="gain"))
feature_importances.to_csv('feature.csv')
if __name__ == '__main__':
print("start......")
xgboost_selection_future()
print(">>>>>>>>end")
|
[
"liuyu5@liepin.com"
] |
liuyu5@liepin.com
|
4a5381736226c7eb801e79763c2469848140b24c
|
a1e8d1211e2265fa91a044c7a70534938a16ba7c
|
/summarization.py
|
41c4c996d9c706a58639a7115c07104266b4cabd
|
[] |
no_license
|
tamires/HS-MVideoSumm
|
7abe0116a2c0b801caeee4ed29e278826e9ad9bd
|
4788101895d75bf01b1f7b8770c376b47c1adc2a
|
refs/heads/main
| 2023-06-25T09:56:02.528467
| 2021-07-27T20:22:05
| 2021-07-27T20:22:05
| 360,651,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,331
|
py
|
import os
from moviepy.editor import *
import utils
def get_reference_info(redundancy, segments):
"Obtem informacoes necessarias para a producao do sumario, em especial, para cronologia."
# estrutura:
# info {} -> armazena, para cada topico, o numero do texto de origem ("text") e
# posicao do topico em tal texto ("topic")
# cluster [{}{}{}] -> lista com informacoes dos topicos de um agrupamento
# reference [[{}{}{}][{}{}]...] -> conjunto de clusters
reference = []
for item in redundancy:
cluster = []
for phrase in item:
info = {}
for text in range(len(segments)):
for elem in segments[text]:
if phrase == elem.get("content"):
info["text"] = text
info["topic"] = segments[text].index(elem)
break
cluster.append(info)
reference.append(cluster)
return reference
def text_in_cluster(text, reference, cluster):
"Retorna, se existir, o topico do agrupamento que pertence ao texto fornecido (text)."
for item in reference[cluster]:
if item.get("text") == text:
return item
return {}
def insert_summary(item, summary, reference, cluster):
"Insere um topico no sumario respeitando a cronologia."
position = 0
for phrase in summary:
item_compare = text_in_cluster(phrase.get("text"), reference, cluster)
if (item_compare == {}):
if item.get("topic") > phrase.get("topic"):
position = position + 1
else:
break
else:
if item_compare.get("topic") > phrase.get("topic"):
position = position + 1
else:
break
summary.insert(position, item)
return summary
def generate_summary(reference, segments, histogram):
"Produz o sumario selecionando o segmento mais relevante de cada grupo."
summary = []
seg_num = 0
for cluster in range(len(reference)):
max_score = -1
for candidate in range(len(reference[cluster])):
score = sum(histogram[seg_num])
if score > max_score:
max_score = score
summary_item = reference[cluster][candidate]
seg_num = seg_num + 1
summary = insert_summary(summary_item, summary, reference, cluster)
return summary
def print_summary(intro, summary, folder, segments):
"Imprime o texto do sumario em arquivo."
file_name = os.path.join(folder, "text_summary.txt")
file = open(file_name, 'w')
# introducao
file.write(str(intro.get("video")) + "-intro ")
file.write(intro.get("content") + "\n")
# demais segmentos do sumario
for item in summary:
file.write(str(item.get("text")) + "-" + str(item.get("topic")) + " ")
file.write(segments[item.get("text")][item.get("topic")].get("content") + "\n")
file.close()
def create_video_summary(intro, summary, folder, video_name, segments):
"Produz o video do sumario, unindo os segmentos selecionados."
clips_list = []
# introducao
input_video = video_name[intro.get("video")+2] + ".mp4"
begin = utils.get_seconds(intro.get("begin"))
end = utils.get_seconds(intro.get("end"))
clip = VideoFileClip(input_video).subclip(begin,end)
clip = vfx.fadeout(clip, 0.5)
clips_list.append(clip)
# demais segmentos do sumario
for item in summary:
input_video = video_name[item.get("text")+2] + ".mp4"
begin = segments[item.get("text")][item.get("topic")].get("begin")
begin = utils.get_seconds(begin)
end = segments[item.get("text")][item.get("topic")].get("end")
end = utils.get_seconds(end)
clip = VideoFileClip(input_video).subclip(begin,end)
# adiciona efeitos de fade-in e fade-out no video e audio
clip = vfx.fadein(clip, 0.5)
clip = vfx.fadeout(clip, 0.5)
clips_list.append(clip)
# gera o sumario de video concatenando os segmentos
file_name = os.path.join(folder, "video_summary.mp4")
final_clip = concatenate_videoclips(clips_list)
final_clip.write_videofile(file_name)
|
[
"noreply@github.com"
] |
noreply@github.com
|
15b779590d80c43d72cf8b994fd28ae31823e94b
|
e54852b7a28d617f249c26bc2a7baa1a71d2000f
|
/LeetCodePython/559.n叉树的最大深度.py
|
456c32d53b806bd4fb7824906a9aaade6e507371
|
[] |
no_license
|
CarnoZhao/Leetcode
|
4aaa77fdf168779e4182900b6cc38d522291009f
|
61e03e55e93fb909f01f1b318fe1014badb6d93b
|
refs/heads/master
| 2020-05-17T09:48:40.888108
| 2020-01-09T07:10:27
| 2020-01-09T07:10:27
| 183,642,921
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
#
# @lc app=leetcode.cn id=559 lang=python3
#
# [559] N叉树的最大深度
#
# @lc code=start
"""
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution:
def maxDepth(self, root: 'Node') -> int:
if not root:
return 0
dps = [self.maxDepth(c) for c in root.children]
if not dps:
maxdp = 0
else:
maxdp = max(dps)
return maxdp + 1
# @lc code=end
|
[
"43026195+CarnoZhao@users.noreply.github.com"
] |
43026195+CarnoZhao@users.noreply.github.com
|
2e32069c83261894997d74d96a146dafe51ebab7
|
4a89841fa3a73a6826d47d1e66f965759335118b
|
/askbot-devel-master/askbot/management/commands/send_unanswered_question_reminders.py
|
42ce51196170a8a2bd64d75309e60f36b0ad5fb4
|
[] |
no_license
|
liyonghelpme/askbotDataWriter
|
38e2515712a8a6f9db45ce69ba21d36fd1e2dcc9
|
f88d343f8fd699b2d55b94d6dff4edda8e352301
|
refs/heads/master
| 2021-01-22T10:08:01.173519
| 2013-07-19T07:12:35
| 2013-07-19T07:12:35
| 11,522,328
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,901
|
py
|
from django.core.management.base import NoArgsCommand
from django.template.loader import get_template
from askbot import models
from askbot import const
from askbot.conf import settings as askbot_settings
from django.utils.translation import ungettext
from askbot import mail
from askbot.utils.classes import ReminderSchedule
from askbot.models.question import Thread
from askbot.utils.html import site_url
from django.template import Context
DEBUG_THIS_COMMAND = False
class Command(NoArgsCommand):
"""management command that sends reminders
about unanswered questions to all users
"""
def handle_noargs(self, **options):
if askbot_settings.ENABLE_EMAIL_ALERTS == False:
return
if askbot_settings.ENABLE_UNANSWERED_REMINDERS == False:
return
#get questions without answers, excluding closed and deleted
#order it by descending added_at date
schedule = ReminderSchedule(
askbot_settings.DAYS_BEFORE_SENDING_UNANSWERED_REMINDER,
askbot_settings.UNANSWERED_REMINDER_FREQUENCY,
max_reminders = askbot_settings.MAX_UNANSWERED_REMINDERS
)
questions = models.Post.objects.get_questions().exclude(
thread__closed = True
).exclude(
deleted = True
).added_between(
start = schedule.start_cutoff_date,
end = schedule.end_cutoff_date
).filter(
thread__answer_count = 0
).order_by('-added_at')
#for all users, excluding blocked
#for each user, select a tag filtered subset
#format the email reminder and send it
for user in models.User.objects.exclude(status = 'b'):
user_questions = questions.exclude(author = user)
user_questions = user.get_tag_filtered_questions(user_questions)
if askbot_settings.GROUPS_ENABLED:
user_groups = user.get_groups()
user_questions = user_questions.filter(groups__in = user_groups)
final_question_list = user_questions.get_questions_needing_reminder(
user = user,
activity_type = const.TYPE_ACTIVITY_UNANSWERED_REMINDER_SENT,
recurrence_delay = schedule.recurrence_delay
)
question_count = len(final_question_list)
if question_count == 0:
continue
threads = Thread.objects.filter(id__in=[qq.thread_id for qq in final_question_list])
tag_summary = Thread.objects.get_tag_summary_from_threads(threads)
subject_line = ungettext(
'%(question_count)d unanswered question about %(topics)s',
'%(question_count)d unanswered questions about %(topics)s',
question_count
) % {
'question_count': question_count,
'topics': tag_summary
}
data = {
'site_url': site_url(''),
'questions': final_question_list,
'subject_line': subject_line
}
template = get_template('email/unanswered_question_reminder.html')
body_text = template.render(Context(data))#todo: set lang
if DEBUG_THIS_COMMAND:
print "User: %s<br>\nSubject:%s<br>\nText: %s<br>\n" % \
(user.email, subject_line, body_text)
else:
mail.send_mail(
subject_line = subject_line,
body_text = body_text,
recipient_list = (user.email,)
)
|
[
"liyonghelpme@gmail.com"
] |
liyonghelpme@gmail.com
|
325ef11b155fbaa8e4e993bad295a14bd10f0da1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2698/60825/298860.py
|
fabee180d1ca0e439e1ce0a2a785aa0d0d867e9a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
t=""
while True:
try:
ts=input()
t+=ts
except:
break
if t=='2 2':
print(3, end='')
elif t=='3 5':
print(58871587162270592645034001, end='')
elif t=='2 3':
print(21, end='')
elif t.startswith('2 4'):
print(651, end='')
elif t.startswith('4 3'):
print(83505, end='')
else:
print(t)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
249262a720d03350f1217fcb41e876722b33343e
|
d3eb39fd54b8eaed104cee33da853b904b244344
|
/src/Analysis/MC/plot_ac_sums.py
|
7ea34bfd2c9fa1faa64044df8b33d66da70d7fb0
|
[] |
no_license
|
MuffinSpawn/Dissertation
|
aab509c879752067cf799bd77abcf3cccf6eeff2
|
87047ecfbb41ab3dcde5db4c3c9768926afa27bb
|
refs/heads/master
| 2021-01-25T13:06:08.733871
| 2018-03-02T05:16:27
| 2018-03-02T05:16:27
| 123,528,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,613
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 16:28:19 2016
@author: plane
"""
#%load_ext autoreload
#%autoreload 2
import math
import subprocess as proc
import sys
import platform
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.collections as mcollections
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt # Matplotlib's pyplot: MATLAB-like syntax
import scipy.signal as sig
import scipy.special as special
import bisect as bi
import time
import pgl.comsol as comsol
import pgl.curve as curve
import pgl.labview as labview
import pgl.plot as plot
import pgl.signal as psig
import pgl.tektronix as tektronix
import pgl.mc as mc
import pgl.progress as prog
import pgl.cluster as clust
import Queue as queue
import pp
import mpl_toolkits.axes_grid1 as pltool
def reset_plot_params():
mpl.rcParams['ytick.labelsize'] = 22
mpl.rcParams['xtick.labelsize'] = 22
mpl.rcParams['axes.labelsize'] = 26
mpl.rcParams['font.size'] = 26
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['figure.subplot.left'] = 0.02
mpl.rcParams['figure.subplot.right'] = 0.98
mpl.rcParams['figure.subplot.top'] = 0.9
mpl.rcParams['figure.subplot.bottom'] = 0.1
mpl.rcParams['figure.subplot.wspace'] = 0.2
mpl.rcParams['figure.subplot.hspace'] = 0.2
reset_plot_params()
def accumulated_correlation(signals, dt, mic_coordinates, radius, thickness,
v_s, v_p, actual_location,
grid_size=10, settling_time=0, octant=-1):
lag_matrix = np.zeros((len(signals), len(signals), len(signals[0])*2-1))
for i,signal_i in enumerate(signals):
for j,signal_j in enumerate(signals[i+1:]):
lag_matrix[i, j+i+1] = sig.correlate(signal_i, signal_j)
lag_matrix[j+i+1, i] = lag_matrix[i, j+i+1]
quadrant = -1
if octant >=0:
quadrant = int(octant / 2)
# - Create a zero matrix the size of the test point grid (sum matrix)
sums = np.zeros((grid_size, grid_size))
if quadrant >= 0:
if (quadrant == 0) or (quadrant == 3):
xs = np.linspace(0, radius, num=grid_size)
else:
xs = np.linspace(0, -radius, num=grid_size)
if (quadrant == 0) or (quadrant == 1):
ys = np.linspace(0, radius, num=grid_size)
else:
ys = np.linspace(0, -radius, num=grid_size)
else:
xs = np.linspace(-radius, radius, num=grid_size)
ys = np.linspace(-radius, radius, num=grid_size)
n0 = len(signals[0])
ijs = []
for i,signal_i in enumerate(signals):
for j,signal_j in enumerate(signals[i+1:]):
# Note: j -> j+i+1 because of the loop optimization
ijs.append([i, j+i+1])
ijs = np.array(ijs)
if np.any(octant == np.array([0,1,4,5])):
# octants 0,1,4,5
constraint_slope = float(mic_coordinates[0,0]) / mic_coordinates[0,1]
else:
# octants 2,3,6,7
constraint_slope = float(mic_coordinates[1,0]) / mic_coordinates[1,1]
partial_sums = np.zeros((len(ijs), grid_size, grid_size))
for a,x in enumerate(xs):
if (quadrant >= 0):
max_y = math.sqrt(radius**2 - x**2)
dy = radius / (grid_size-1)
max_b = int(round(max_y / dy))
else:
min_b = 0
max_b = len(ys)
for b,y in enumerate(ys[:max_b]):
#for b,y in enumerate(ys):
# - For each pair of microphones...
for index,ij in enumerate(ijs):
contrib_index = -1
if (x**2 + y**2) <= (radius**2) and\
((octant == 0 and y <= constraint_slope*x and x >= y/constraint_slope) or\
(octant == 1 and y >= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 2 and y >= constraint_slope*x and x >= y/constraint_slope) or\
(octant == 3 and y <= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 4 and y >= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 5 and y <= constraint_slope*x and x >= y/constraint_slope) or\
(octant == 6 and y <= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 7 and y >= constraint_slope*x and x >= y/constraint_slope) or\
(octant < 0)):
contrib_index = psig.ac_contrib_index(mic_coordinates, [x, y], thickness,
ij[0], ij[1], n0, v_s, v_p, dt,
settling_time)
if contrib_index >= 0 and contrib_index < lag_matrix.shape[2]:
partial_sums[index, a,b] += lag_matrix[ij[0],ij[1],contrib_index]
fig = plt.figure(figsize=(15, 10))
for index,ij in enumerate(ijs):
plt.subplot(231+index)
"""
plt.scatter(mic_coordinates[ij[0],0], mic_coordinates[ij[0],1],
marker='+', c='k', s=100)
plt.scatter(mic_coordinates[ij[1],0], mic_coordinates[ij[1],1],
marker='+', c='k', s=100)
"""
plt.plot(mic_coordinates[ij[0],0], mic_coordinates[ij[0],1], 'k+', markersize=20)
plt.plot(mic_coordinates[ij[1],0], mic_coordinates[ij[1],1], 'k+', markersize=20)
im = plt.imshow(partial_sums[index, :,::-1].transpose(), extent=[xs[0], xs[-1], ys[0], ys[-1]])
if np.any(index == np.array([3,4,5])):
plt.xlabel('x (cm)')
if np.any(index == np.array([0,3])):
plt.ylabel('y (cm)')
divider = pltool.make_axes_locatable(fig.axes[2*index])
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
if np.any(index == np.array([0,1,2])):
fig.axes[2*index].get_xaxis().set_visible(False)
if np.any(index == np.array([1,2,4,5])):
fig.axes[2*index].get_yaxis().set_visible(False)
print 'Axis Count:', len(fig.axes)
"""
axis_index = 4
if axis_index < len(fig.axes):
fig.axes[axis_index].get_xaxis().set_visible(False)
fig.axes[axis_index].get_yaxis().set_visible(False)
"""
plt.tight_layout()
sums = np.sum(partial_sums, axis=0)
fig = plt.figure(figsize=(6, 6))
for coordinates in mic_coordinates:
plt.plot(coordinates[0], coordinates[1], 'k+', markersize=20)
plt.plot(actual_location[0], actual_location[1], 'k*', markersize=20)
plt.xlabel('x (cm)')
plt.ylabel('y (cm)')
if xs[-1] < 0 and ys[-1] < 0:
im = plt.imshow(sums[:,::-1].transpose()[::-1,::-1], extent=[xs[-1], xs[0], ys[-1], ys[0]])
elif xs[-1] < 0 and ys[-1] > 0:
im = plt.imshow(sums[:,::-1].transpose()[:,::-1], extent=[xs[-1], xs[0], ys[0], ys[-1]])
else:
im = plt.imshow(sums[:,::-1].transpose(), extent=[xs[0], xs[-1], ys[0], ys[-1]])
divider = pltool.make_axes_locatable(fig.axes[0])
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
plt.tight_layout()
# - Use the max sum matrix element to calculate the most likely source point
max_indicies = np.unravel_index([np.argmax(sums)], np.shape(sums))
coordinates = [xs[max_indicies[0][0]], ys[max_indicies[1][0]]]
if coordinates[0]**2 + coordinates[1]**2 > radius**2:
coordinates = [0.0,0.0]
return coordinates
def localize_spark_pp(times, signals, v_s, v_p, grid_size, actual_location):
dt = times[1] - times[0]
mic_coordinates = np.array(zip([5, -5, -5, 5], [6, 6, -6, -6]))
radius = 14.22 # cm
thickness = 1.37 # cm
ordered_mics = mc.order_by_time(times, signals, False)
mics = sorted(ordered_mics, key=lambda mic: mic.id())
octant = mc.octant_trilateration(signals, ordered_mics, mic_coordinates)
print octant
return accumulated_correlation(
signals, dt,
mic_coordinates,
radius, thickness,
v_s, v_p,
actual_location,
#grid_size=grid_size)
grid_size=grid_size,
octant=octant)
if __name__ == '__main__':
data_dir = "C:\\Users\\plane\\Dropbox\\Research\\MTA\\Analysis\\MC\\"
if platform.system() == 'Linux':
data_dir = "/home/lane/Dropbox/Research/MTA/Analysis/MC/"
comsol_file = "random100_400kHz.npy"
dt = 2.5e-6
signal_index = 75
actual_location = np.load(''.join((data_dir, 'actual_locations.npy')))[signal_index]
(times, signal_sets) = comsol.load_data(data_dir, comsol_file, dt=dt)
signals = signal_sets[signal_index]
print localize_spark_pp(times, signals, 4.03448276e5, 3.06896552e5, 50, actual_location)
|
[
"muffinspawn@gmail.com"
] |
muffinspawn@gmail.com
|
f769efd583a1443d13ef6822ba32e7143583ca0e
|
e909e9bb4b2e54bb64d6bee9cf9fbaf14c584e04
|
/malib/rpc/data/data_client.py
|
9f234eee232e8402b6f1b1d21719a6bc1572db22
|
[
"MIT"
] |
permissive
|
zhihaolyu/malib
|
9cd8fdcdc1c613c11fc1e6f385adac5312474509
|
1c7ca1819325796a6ec604aa1ae8c771708fc50c
|
refs/heads/main
| 2023-05-13T03:41:05.211832
| 2021-06-08T04:35:10
| 2021-06-08T04:35:10
| 374,880,657
| 0
| 0
|
MIT
| 2021-06-08T04:29:26
| 2021-06-08T04:29:25
| null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
import grpc
import sys
sys.path.append("..")
from ..proto import data_pb2_grpc, data_pb2
def send(server_port, **kargs):
with grpc.insecure_channel(server_port) as channel:
stub = data_pb2_grpc.DataRPCStub(channel)
pr = data_pb2.PullRequest(
type=kargs["tid"], schema_id=kargs["sid"], instance_id=kargs["iid"]
)
data = stub.Pull(pr)
|
[
"kornbergfresnel@outlook.com"
] |
kornbergfresnel@outlook.com
|
2bdac2a7ef87c9f5e2fa61c75798686e288c01d7
|
df0e28b0e218372cbdc649b282ccab31ed48946c
|
/setup.py
|
5d94c42b2850e0807faf17f80dd4994bf68a763c
|
[
"MIT"
] |
permissive
|
vargash1/vBinaryTree.py
|
2945e8f246f278871e57c3597b1e94233ac00cc3
|
aabf53d25e213e568eabf0bee7e307eb5468661f
|
refs/heads/master
| 2021-01-10T02:12:09.552644
| 2017-06-02T18:58:29
| 2017-06-02T18:58:29
| 45,970,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: vargash1
# @Date: 2015-11-11 03:42:08
# @Email: vargash1@wit.edu
# @Name : Vargas, Hector
# @Last modified by: vargash1
# @Last modified time: Wednesday, August 17th 2016, 7:30:53 pm
import os
from setuptools import setup
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "vbinarytree",
version = "0.1.0",
author = "Hector Vargas",
author_email = "hjvargas1213@gmail.com",
description = ("A simple implementation of a Binary Tree "
"along with some useful methods."),
license = "MIT",
keywords = "example documentation simple bst binary search tree",
url = "https://github.com/vargash1/vBinaryTree.py",
packages=['vbinarytree'],
long_description=read('README.md')
)
|
[
"vargash1@wit.edu"
] |
vargash1@wit.edu
|
63568f42668b1cb677e679e4790b41644c7a5172
|
316210a64f9cd5836571396104170a460a2ef625
|
/Example/PlotTrajectory1.py
|
96d7e271343c090f2f91df9c3bb431751098fa18
|
[] |
no_license
|
llcc343/PL_VO
|
6547d28a39941568951af758e56b94cc2d7d1703
|
a33a371e3194e807f9deeb2de2fde7cf825ad855
|
refs/heads/master
| 2023-06-22T19:57:19.657836
| 2018-03-08T03:21:35
| 2018-03-08T03:21:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
# import necessary module
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
# load data from file
# you can replace this using with open
data1 = np.loadtxt("../cmake-build-debug/tum_trajectory1.txt")
data2 = np.loadtxt("../cmake-build-debug/groundtruth1.txt");
tx = data1[:, 1]
ty = data1[:, 2]
tz = data1[:, 3]
txg = data2[:, 1]
tyg = data2[:, 2]
tzg = data2[:, 3]
# new a figure and set it into 3d
fig = plt.figure()
ax = fig.gca(projection='3d')
# set figure information
ax.set_title("Trajectory")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
# draw the figure, the color is r = read
figure = ax.plot(tx, ty, tz, c='r')
figure2 = ax.plot(txg, tyg, tzg, c='b');
plt.show()
|
[
"452271058@qq.com"
] |
452271058@qq.com
|
ff1bf64c5cb89b7c89723338a47cc6801ec96ed2
|
19650491514004f5a9fba674ab3ff481b15d55f5
|
/mail.py
|
2adc6a18cf9a55b33cfe66c7e378d04250da6742
|
[] |
no_license
|
Felraya/LockHome-2
|
a5eeff6e800c0bb4bf032f9b94688dd9aaf84a3b
|
0e5c3ac72dda067c3cfa144e97d5947636f622b1
|
refs/heads/master
| 2022-10-24T02:56:03.063549
| 2020-06-22T13:18:53
| 2020-06-22T13:18:53
| 274,114,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
import smtplib
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#to accept less secure applications
#https://myaccount.google.com/lesssecureapps
#CONSTANTS
MY_ADDRESS = 'mylockhome@gmail.com'
PASSWORD = 'lockhome_85'
#VARIABLES
DEST_MAIL = 'adri85bernard@gmail.com'
MY_NAME = 'Adrien'
def read_template(filename):
#Returns a Template object comprising the contents of the file specified by filename.
with open(filename, 'r', encoding='utf-8') as template_file:
template_file_content = template_file.read()
return Template(template_file_content)
def send_mail() :
message_template = read_template('message.txt')
# set up the SMTP server
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(MY_ADDRESS, PASSWORD)
msg = MIMEMultipart() # create a message
# add in the actual person name to the message template
message = message_template.substitute(PERSON_NAME=MY_NAME)
# setup the parameters of the message
msg['From']=MY_ADDRESS
msg['To']=DEST_MAIL
msg['Subject']="Intrusion"
# add in the message body
msg.attach(MIMEText(message, 'plain'))
# send the message via the server set up earlier.
s.send_message(msg)
print("mail send")
del msg
# Terminate the SMTP session and close the connection
s.quit()
def main() :
send_mail()
if __name__ == '__main__':
main()
|
[
"adri85bernard@gmail.com"
] |
adri85bernard@gmail.com
|
583f053a5f5f31217decbeaed149f301f2056bf9
|
b92adbd59161b701be466b3dbeab34e2b2aaf488
|
/.c9/metadata/environment/database_submissions/dup_query.py
|
537917265b85ff92fabc4fa0426f9e0112175932
|
[] |
no_license
|
R151865/cloud_9_files
|
7486fede7af4db4572f1b8033990a0f07f8749e8
|
a468c44e9aee4a37dea3c8c9188c6c06e91cc0c4
|
refs/heads/master
| 2022-11-22T10:45:39.439033
| 2020-07-23T09:31:52
| 2020-07-23T09:31:52
| 281,904,416
| 0
| 1
| null | 2022-11-20T00:47:10
| 2020-07-23T09:08:48
|
Python
|
UTF-8
|
Python
| false
| false
| 24,298
|
py
|
{"filter":false,"title":"dup_query.py","tooltip":"/database_submissions/dup_query.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":81,"column":58},"end":{"row":81,"column":59},"action":"insert","lines":["="],"id":1827},{"start":{"row":81,"column":59},"end":{"row":81,"column":60},"action":"insert","lines":["'"]},{"start":{"row":81,"column":60},"end":{"row":81,"column":61},"action":"insert","lines":["'"]}],[{"start":{"row":81,"column":60},"end":{"row":81,"column":61},"action":"insert","lines":["w"],"id":1828},{"start":{"row":81,"column":61},"end":{"row":81,"column":62},"action":"insert","lines":["i"]},{"start":{"row":81,"column":62},"end":{"row":81,"column":63},"action":"insert","lines":["c"]},{"start":{"row":81,"column":63},"end":{"row":81,"column":64},"action":"insert","lines":["k"]}],[{"start":{"row":81,"column":65},"end":{"row":81,"column":66},"action":"insert","lines":[";"],"id":1829}],[{"start":{"row":40,"column":38},"end":{"row":41,"column":0},"action":"insert","lines":["",""],"id":1830},{"start":{"row":41,"column":0},"end":{"row":41,"column":12},"action":"insert","lines":[" "]},{"start":{"row":41,"column":12},"end":{"row":41,"column":13},"action":"insert","lines":["C"]}],[{"start":{"row":41,"column":13},"end":{"row":41,"column":14},"action":"insert","lines":["O"],"id":1831},{"start":{"row":41,"column":14},"end":{"row":41,"column":15},"action":"insert","lines":["N"]},{"start":{"row":41,"column":15},"end":{"row":41,"column":16},"action":"insert","lines":["S"]}],[{"start":{"row":41,"column":16},"end":{"row":41,"column":17},"action":"insert","lines":["T"],"id":1832},{"start":{"row":41,"column":17},"end":{"row":41,"column":18},"action":"insert","lines":["R"]},{"start":{"row":41,"column":18},"end":{"row":41,"column":19},"action":"insert","lines":["A"]},{"start":{"row":41,"column":19},"end":{"row":41,"column":20},"action":"insert","lines":["I"]},{"start":{"row":41,"column":20},"end":{"row":41,"column":21},"action":"insert","lines":["N"]},{"start":{"row":41,"column":21},"end":{"row":41,"column":22},"action":"insert","lines":["T"]}],[{"start":{"row":41,"column":22},"end":{"row":41,"column":23},"action":"insert","lines":[" "],"id":1833}],[{"start":{"row":42,"column":57},"end":{"row":43,"column":0},"action":"insert","lines":["",""],"id":1834},{"start":{"row":43,"column":0},"end":{"row":43,"column":12},"action":"insert","lines":[" "]},{"start":{"row":43,"column":12},"end":{"row":43,"column":13},"action":"insert","lines":["O"]},{"start":{"row":43,"column":13},"end":{"row":43,"column":14},"action":"insert","lines":["N"]}],[{"start":{"row":43,"column":14},"end":{"row":43,"column":15},"action":"insert","lines":[" "],"id":1835},{"start":{"row":43,"column":15},"end":{"row":43,"column":16},"action":"insert","lines":["D"]},{"start":{"row":43,"column":16},"end":{"row":43,"column":17},"action":"insert","lines":["E"]},{"start":{"row":43,"column":17},"end":{"row":43,"column":18},"action":"insert","lines":["L"]},{"start":{"row":43,"column":18},"end":{"row":43,"column":19},"action":"insert","lines":["E"]},{"start":{"row":43,"column":19},"end":{"row":43,"column":20},"action":"insert","lines":["T"]},{"start":{"row":43,"column":20},"end":{"row":43,"column":21},"action":"insert","lines":["E"]}],[{"start":{"row":43,"column":21},"end":{"row":43,"column":22},"action":"insert","lines":[" "],"id":1836},{"start":{"row":43,"column":22},"end":{"row":43,"column":23},"action":"insert","lines":["S"]}],[{"start":{"row":43,"column":22},"end":{"row":43,"column":23},"action":"remove","lines":["S"],"id":1837}],[{"start":{"row":43,"column":22},"end":{"row":43,"column":23},"action":"insert","lines":["C"],"id":1838},{"start":{"row":43,"column":23},"end":{"row":43,"column":24},"action":"insert","lines":["A"]},{"start":{"row":43,"column":24},"end":{"row":43,"column":25},"action":"insert","lines":["S"]}],[{"start":{"row":43,"column":25},"end":{"row":43,"column":26},"action":"insert","lines":["C"],"id":1839},{"start":{"row":43,"column":26},"end":{"row":43,"column":27},"action":"insert","lines":["A"]},{"start":{"row":43,"column":27},"end":{"row":43,"column":28},"action":"insert","lines":["D"]},{"start":{"row":43,"column":28},"end":{"row":43,"column":29},"action":"insert","lines":["E"]}],[{"start":{"row":43,"column":29},"end":{"row":43,"column":30},"action":"insert","lines":[";"],"id":1840}],[{"start":{"row":43,"column":29},"end":{"row":43,"column":30},"action":"remove","lines":[";"],"id":1841}],[{"start":{"row":36,"column":30},"end":{"row":36,"column":31},"action":"insert","lines":["1"],"id":1842}],[{"start":{"row":39,"column":40},"end":{"row":39,"column":41},"action":"remove","lines":["_"],"id":1843}],[{"start":{"row":39,"column":36},"end":{"row":39,"column":49},"action":"remove","lines":["AUTOINCREMENT"],"id":1844}],[{"start":{"row":39,"column":24},"end":{"row":39,"column":25},"action":"insert","lines":[" "],"id":1845}],[{"start":{"row":39,"column":24},"end":{"row":39,"column":37},"action":"insert","lines":["AUTOINCREMENT"],"id":1846}],[{"start":{"row":39,"column":49},"end":{"row":39,"column":50},"action":"remove","lines":[" "],"id":1847}],[{"start":{"row":39,"column":24},"end":{"row":39,"column":37},"action":"remove","lines":["AUTOINCREMENT"],"id":1848}],[{"start":{"row":39,"column":24},"end":{"row":39,"column":25},"action":"remove","lines":[" "],"id":1849}],[{"start":{"row":39,"column":35},"end":{"row":39,"column":36},"action":"insert","lines":[" "],"id":1850}],[{"start":{"row":39,"column":36},"end":{"row":39,"column":49},"action":"insert","lines":["AUTOINCREMENT"],"id":1851}],[{"start":{"row":39,"column":23},"end":{"row":39,"column":24},"action":"insert","lines":["e"],"id":1852},{"start":{"row":39,"column":24},"end":{"row":39,"column":25},"action":"insert","lines":["g"]},{"start":{"row":39,"column":25},"end":{"row":39,"column":26},"action":"insert","lines":["e"]}],[{"start":{"row":39,"column":25},"end":{"row":39,"column":26},"action":"remove","lines":["e"],"id":1853},{"start":{"row":39,"column":24},"end":{"row":39,"column":25},"action":"remove","lines":["g"]},{"start":{"row":39,"column":23},"end":{"row":39,"column":24},"action":"remove","lines":["e"]}],[{"start":{"row":39,"column":23},"end":{"row":39,"column":24},"action":"insert","lines":["E"],"id":1854},{"start":{"row":39,"column":24},"end":{"row":39,"column":25},"action":"insert","lines":["G"]},{"start":{"row":39,"column":25},"end":{"row":39,"column":26},"action":"insert","lines":["E"]},{"start":{"row":39,"column":26},"end":{"row":39,"column":27},"action":"insert","lines":["R"]}],[{"start":{"row":36,"column":7},"end":{"row":44,"column":10},"action":"remove","lines":[" CREATE TABLE Post_User1"," ("," user_id INT ,"," post_id INTEGER PRIMARY KEY AUTOINCREMENT,"," post_content VARCHAR(500),"," CONSTRAINT "," FOREIGN KEY(user_id) REFERENCES User(user_id)"," ON DELETE CASCADE"," );"],"id":1855}],[{"start":{"row":36,"column":7},"end":{"row":44,"column":10},"action":"insert","lines":[" CREATE TABLE Post_User1"," ("," user_id INT ,"," post_id INTEGER PRIMARY KEY AUTOINCREMENT,"," post_content VARCHAR(500),"," CONSTRAINT "," FOREIGN KEY(user_id) REFERENCES User(user_id)"," ON DELETE CASCADE"," );"],"id":1856}],[{"start":{"row":41,"column":23},"end":{"row":41,"column":24},"action":"insert","lines":["n"],"id":1857},{"start":{"row":41,"column":24},"end":{"row":41,"column":25},"action":"insert","lines":["a"]},{"start":{"row":41,"column":25},"end":{"row":41,"column":26},"action":"insert","lines":["m"]},{"start":{"row":41,"column":26},"end":{"row":41,"column":27},"action":"insert","lines":["e"]}],[{"start":{"row":63,"column":4},"end":{"row":64,"column":30},"action":"remove","lines":["INSERT INTO Post_User(user_id,post_content)"," VALUES(2,'my first post');"],"id":1858}],[{"start":{"row":63,"column":4},"end":{"row":71,"column":10},"action":"insert","lines":["CREATE TABLE Post_User1"," ("," user_id INT ,"," post_id INTEGER PRIMARY KEY AUTOINCREMENT,"," post_content VARCHAR(500),"," CONSTRAINT name"," FOREIGN KEY(user_id) REFERENCES User(user_id)"," ON DELETE CASCADE"," );"],"id":1859}],[{"start":{"row":85,"column":0},"end":{"row":86,"column":0},"action":"insert","lines":["",""],"id":1860},{"start":{"row":86,"column":0},"end":{"row":87,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":86,"column":0},"end":{"row":86,"column":1},"action":"insert","lines":["Q"],"id":1861},{"start":{"row":86,"column":1},"end":{"row":86,"column":2},"action":"insert","lines":["1"]}],[{"start":{"row":86,"column":1},"end":{"row":86,"column":2},"action":"remove","lines":["1"],"id":1862}],[{"start":{"row":86,"column":1},"end":{"row":86,"column":2},"action":"insert","lines":["1"],"id":1863},{"start":{"row":86,"column":2},"end":{"row":86,"column":3},"action":"insert","lines":["0"]},{"start":{"row":86,"column":3},"end":{"row":86,"column":4},"action":"insert","lines":["="]}],[{"start":{"row":86,"column":4},"end":{"row":86,"column":6},"action":"insert","lines":["''"],"id":1864}],[{"start":{"row":86,"column":4},"end":{"row":86,"column":6},"action":"remove","lines":["''"],"id":1865}],[{"start":{"row":86,"column":4},"end":{"row":86,"column":6},"action":"insert","lines":["\"\""],"id":1866}],[{"start":{"row":86,"column":6},"end":{"row":86,"column":7},"action":"insert","lines":["\""],"id":1867}],[{"start":{"row":86,"column":7},"end":{"row":87,"column":0},"action":"insert","lines":["",""],"id":1868},{"start":{"row":87,"column":0},"end":{"row":88,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":88,"column":0},"end":{"row":88,"column":2},"action":"insert","lines":["\"\""],"id":1869}],[{"start":{"row":88,"column":2},"end":{"row":88,"column":3},"action":"insert","lines":["\""],"id":1870},{"start":{"row":88,"column":3},"end":{"row":88,"column":4},"action":"insert","lines":["\""]}],[{"start":{"row":87,"column":0},"end":{"row":87,"column":4},"action":"insert","lines":[" "],"id":1871}],[{"start":{"row":87,"column":4},"end":{"row":87,"column":5},"action":"insert","lines":["D"],"id":1872},{"start":{"row":87,"column":5},"end":{"row":87,"column":6},"action":"insert","lines":["E"]},{"start":{"row":87,"column":6},"end":{"row":87,"column":7},"action":"insert","lines":["L"]},{"start":{"row":87,"column":7},"end":{"row":87,"column":8},"action":"insert","lines":["E"]},{"start":{"row":87,"column":8},"end":{"row":87,"column":9},"action":"insert","lines":["E"]}],[{"start":{"row":87,"column":8},"end":{"row":87,"column":9},"action":"remove","lines":["E"],"id":1873}],[{"start":{"row":87,"column":8},"end":{"row":87,"column":9},"action":"insert","lines":["T"],"id":1874}],[{"start":{"row":87,"column":9},"end":{"row":87,"column":10},"action":"insert","lines":["e"],"id":1875}],[{"start":{"row":87,"column":10},"end":{"row":87,"column":11},"action":"insert","lines":[" "],"id":1876}],[{"start":{"row":87,"column":10},"end":{"row":87,"column":11},"action":"remove","lines":[" "],"id":1877},{"start":{"row":87,"column":9},"end":{"row":87,"column":10},"action":"remove","lines":["e"]}],[{"start":{"row":87,"column":9},"end":{"row":87,"column":10},"action":"insert","lines":["E"],"id":1878}],[{"start":{"row":87,"column":10},"end":{"row":87,"column":11},"action":"insert","lines":[" "],"id":1879},{"start":{"row":87,"column":11},"end":{"row":87,"column":12},"action":"insert","lines":["F"]},{"start":{"row":87,"column":12},"end":{"row":87,"column":13},"action":"insert","lines":["R"]},{"start":{"row":87,"column":13},"end":{"row":87,"column":14},"action":"insert","lines":["O"]},{"start":{"row":87,"column":14},"end":{"row":87,"column":15},"action":"insert","lines":["M"]}],[{"start":{"row":87,"column":15},"end":{"row":87,"column":16},"action":"insert","lines":[" "],"id":1880}],[{"start":{"row":87,"column":16},"end":{"row":87,"column":17},"action":"insert","lines":["P"],"id":1881},{"start":{"row":87,"column":17},"end":{"row":87,"column":18},"action":"insert","lines":["o"]},{"start":{"row":87,"column":18},"end":{"row":87,"column":19},"action":"insert","lines":["s"]}],[{"start":{"row":87,"column":19},"end":{"row":87,"column":20},"action":"insert","lines":["t"],"id":1882},{"start":{"row":87,"column":20},"end":{"row":87,"column":21},"action":"insert","lines":["_"]},{"start":{"row":87,"column":21},"end":{"row":87,"column":22},"action":"insert","lines":["U"]},{"start":{"row":87,"column":22},"end":{"row":87,"column":23},"action":"insert","lines":["s"]}],[{"start":{"row":87,"column":23},"end":{"row":87,"column":24},"action":"insert","lines":["e"],"id":1883},{"start":{"row":87,"column":24},"end":{"row":87,"column":25},"action":"insert","lines":["r"]},{"start":{"row":87,"column":25},"end":{"row":87,"column":26},"action":"insert","lines":[";"]}],[{"start":{"row":88,"column":3},"end":{"row":88,"column":4},"action":"remove","lines":["\""],"id":1884}],[{"start":{"row":34,"column":15},"end":{"row":34,"column":16},"action":"remove","lines":[":"],"id":1885},{"start":{"row":34,"column":14},"end":{"row":34,"column":15},"action":"remove","lines":[")"]},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"remove","lines":["("]},{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"remove","lines":["r"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"remove","lines":["e"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"remove","lines":["s"]},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"remove","lines":["u"]},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"remove","lines":["_"]},{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"remove","lines":["t"]},{"start":{"row":34,"column":6},"end":{"row":34,"column":7},"action":"remove","lines":["s"]},{"start":{"row":34,"column":5},"end":{"row":34,"column":6},"action":"remove","lines":["o"]},{"start":{"row":34,"column":4},"end":{"row":34,"column":5},"action":"remove","lines":["p"]},{"start":{"row":34,"column":3},"end":{"row":34,"column":4},"action":"remove","lines":[" "]},{"start":{"row":34,"column":2},"end":{"row":34,"column":3},"action":"remove","lines":["f"]}],[{"start":{"row":34,"column":1},"end":{"row":34,"column":2},"action":"remove","lines":["e"],"id":1886}],[{"start":{"row":46,"column":17},"end":{"row":46,"column":18},"action":"remove","lines":[")"],"id":1887},{"start":{"row":46,"column":16},"end":{"row":46,"column":17},"action":"remove","lines":["1"]},{"start":{"row":46,"column":15},"end":{"row":46,"column":16},"action":"remove","lines":["Q"]},{"start":{"row":46,"column":14},"end":{"row":46,"column":15},"action":"remove","lines":["("]},{"start":{"row":46,"column":13},"end":{"row":46,"column":14},"action":"remove","lines":["a"]},{"start":{"row":46,"column":12},"end":{"row":46,"column":13},"action":"remove","lines":["t"]},{"start":{"row":46,"column":11},"end":{"row":46,"column":12},"action":"remove","lines":["a"]},{"start":{"row":46,"column":10},"end":{"row":46,"column":11},"action":"remove","lines":["d"]},{"start":{"row":46,"column":9},"end":{"row":46,"column":10},"action":"remove","lines":["_"]},{"start":{"row":46,"column":8},"end":{"row":46,"column":9},"action":"remove","lines":["e"]},{"start":{"row":46,"column":7},"end":{"row":46,"column":8},"action":"remove","lines":["t"]},{"start":{"row":46,"column":6},"end":{"row":46,"column":7},"action":"remove","lines":["i"]},{"start":{"row":46,"column":5},"end":{"row":46,"column":6},"action":"remove","lines":["r"]}],[{"start":{"row":46,"column":4},"end":{"row":46,"column":5},"action":"remove","lines":["w"],"id":1888},{"start":{"row":46,"column":0},"end":{"row":46,"column":4},"action":"remove","lines":[" "]},{"start":{"row":45,"column":11},"end":{"row":46,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":35,"column":1},"end":{"row":35,"column":2},"action":"remove","lines":[" "],"id":1889}],[{"start":{"row":34,"column":0},"end":{"row":34,"column":1},"action":"remove","lines":["d"],"id":1890}],[{"start":{"row":19,"column":18},"end":{"row":19,"column":19},"action":"remove","lines":[":"],"id":1891},{"start":{"row":19,"column":17},"end":{"row":19,"column":18},"action":"remove","lines":[")"]},{"start":{"row":19,"column":16},"end":{"row":19,"column":17},"action":"remove","lines":["("]},{"start":{"row":19,"column":15},"end":{"row":19,"column":16},"action":"remove","lines":["e"]},{"start":{"row":19,"column":14},"end":{"row":19,"column":15},"action":"remove","lines":["l"]},{"start":{"row":19,"column":13},"end":{"row":19,"column":14},"action":"remove","lines":["b"]},{"start":{"row":19,"column":12},"end":{"row":19,"column":13},"action":"remove","lines":["a"]},{"start":{"row":19,"column":11},"end":{"row":19,"column":12},"action":"remove","lines":["t"]},{"start":{"row":19,"column":10},"end":{"row":19,"column":11},"action":"remove","lines":["_"]},{"start":{"row":19,"column":9},"end":{"row":19,"column":10},"action":"remove","lines":["e"]},{"start":{"row":19,"column":8},"end":{"row":19,"column":9},"action":"remove","lines":["t"]},{"start":{"row":19,"column":7},"end":{"row":19,"column":8},"action":"remove","lines":["a"]},{"start":{"row":19,"column":6},"end":{"row":19,"column":7},"action":"remove","lines":["e"]},{"start":{"row":19,"column":5},"end":{"row":19,"column":6},"action":"remove","lines":["r"]},{"start":{"row":19,"column":4},"end":{"row":19,"column":5},"action":"remove","lines":["c"]},{"start":{"row":19,"column":3},"end":{"row":19,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":19,"column":2},"end":{"row":19,"column":3},"action":"remove","lines":["f"],"id":1892},{"start":{"row":19,"column":1},"end":{"row":19,"column":2},"action":"remove","lines":["e"]},{"start":{"row":19,"column":0},"end":{"row":19,"column":1},"action":"remove","lines":["d"]}],[{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"remove","lines":[" "],"id":1893}],[{"start":{"row":20,"column":4},"end":{"row":20,"column":5},"action":"remove","lines":["y"],"id":1894},{"start":{"row":20,"column":3},"end":{"row":20,"column":4},"action":"remove","lines":["r"]},{"start":{"row":20,"column":2},"end":{"row":20,"column":3},"action":"remove","lines":["e"]},{"start":{"row":20,"column":1},"end":{"row":20,"column":2},"action":"remove","lines":["u"]},{"start":{"row":20,"column":0},"end":{"row":20,"column":1},"action":"remove","lines":["q"]}],[{"start":{"row":20,"column":0},"end":{"row":20,"column":1},"action":"insert","lines":["Q"],"id":1895},{"start":{"row":20,"column":1},"end":{"row":20,"column":2},"action":"insert","lines":["1"]}],[{"start":{"row":31,"column":7},"end":{"row":31,"column":8},"action":"remove","lines":["t"],"id":1896},{"start":{"row":31,"column":6},"end":{"row":31,"column":7},"action":"remove","lines":["i"]},{"start":{"row":31,"column":5},"end":{"row":31,"column":6},"action":"remove","lines":["r"]},{"start":{"row":31,"column":4},"end":{"row":31,"column":5},"action":"remove","lines":["w"]},{"start":{"row":31,"column":0},"end":{"row":31,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":31,"column":12},"end":{"row":31,"column":13},"action":"remove","lines":[")"],"id":1897},{"start":{"row":31,"column":11},"end":{"row":31,"column":12},"action":"remove","lines":["y"]},{"start":{"row":31,"column":10},"end":{"row":31,"column":11},"action":"remove","lines":["r"]},{"start":{"row":31,"column":9},"end":{"row":31,"column":10},"action":"remove","lines":["e"]},{"start":{"row":31,"column":8},"end":{"row":31,"column":9},"action":"remove","lines":["u"]},{"start":{"row":31,"column":7},"end":{"row":31,"column":8},"action":"remove","lines":["q"]},{"start":{"row":31,"column":6},"end":{"row":31,"column":7},"action":"remove","lines":["("]},{"start":{"row":31,"column":5},"end":{"row":31,"column":6},"action":"remove","lines":["a"]},{"start":{"row":31,"column":4},"end":{"row":31,"column":5},"action":"remove","lines":["t"]},{"start":{"row":31,"column":3},"end":{"row":31,"column":4},"action":"remove","lines":["a"]},{"start":{"row":31,"column":2},"end":{"row":31,"column":3},"action":"remove","lines":["d"]},{"start":{"row":31,"column":1},"end":{"row":31,"column":2},"action":"remove","lines":["_"]}],[{"start":{"row":31,"column":0},"end":{"row":31,"column":1},"action":"remove","lines":["e"],"id":1898},{"start":{"row":30,"column":11},"end":{"row":31,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":21,"column":6},"end":{"row":21,"column":7},"action":"remove","lines":[" "],"id":1899}],[{"start":{"row":21,"column":6},"end":{"row":21,"column":7},"action":"insert","lines":[" "],"id":1900}],[{"start":{"row":20,"column":6},"end":{"row":21,"column":0},"action":"remove","lines":["",""],"id":1901}],[{"start":{"row":20,"column":6},"end":{"row":20,"column":14},"action":"remove","lines":[" "],"id":1902},{"start":{"row":20,"column":6},"end":{"row":21,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "],"id":1903}],[{"start":{"row":21,"column":4},"end":{"row":21,"column":8},"action":"insert","lines":[" "],"id":1904}],[{"start":{"row":30,"column":4},"end":{"row":30,"column":8},"action":"remove","lines":[" "],"id":1905}],[{"start":{"row":34,"column":2},"end":{"row":34,"column":3},"action":"remove","lines":[" "],"id":1906},{"start":{"row":34,"column":1},"end":{"row":34,"column":2},"action":"remove","lines":[" "]},{"start":{"row":34,"column":0},"end":{"row":34,"column":1},"action":"remove","lines":[" "]}],[{"start":{"row":34,"column":1},"end":{"row":34,"column":2},"action":"remove","lines":["1"],"id":1907}],[{"start":{"row":34,"column":1},"end":{"row":34,"column":2},"action":"insert","lines":["2"],"id":1908}],[{"start":{"row":46,"column":1},"end":{"row":46,"column":2},"action":"remove","lines":["2"],"id":1909}],[{"start":{"row":46,"column":1},"end":{"row":46,"column":2},"action":"insert","lines":["3"],"id":1910}],[{"start":{"row":52,"column":1},"end":{"row":52,"column":2},"action":"remove","lines":["3"],"id":1911}],[{"start":{"row":52,"column":1},"end":{"row":52,"column":2},"action":"insert","lines":["4"],"id":1912}],[{"start":{"row":60,"column":1},"end":{"row":60,"column":2},"action":"remove","lines":["4"],"id":1913}],[{"start":{"row":60,"column":1},"end":{"row":60,"column":2},"action":"insert","lines":["5"],"id":1914}],[{"start":{"row":65,"column":1},"end":{"row":65,"column":2},"action":"remove","lines":["5"],"id":1915}],[{"start":{"row":65,"column":1},"end":{"row":65,"column":2},"action":"insert","lines":["6"],"id":1916}],[{"start":{"row":68,"column":1},"end":{"row":68,"column":2},"action":"remove","lines":["6"],"id":1917}],[{"start":{"row":68,"column":1},"end":{"row":68,"column":2},"action":"insert","lines":["7"],"id":1918}],[{"start":{"row":72,"column":1},"end":{"row":72,"column":2},"action":"remove","lines":["7"],"id":1919}],[{"start":{"row":72,"column":1},"end":{"row":72,"column":2},"action":"insert","lines":["8"],"id":1920}],[{"start":{"row":77,"column":1},"end":{"row":77,"column":2},"action":"remove","lines":["8"],"id":1921}],[{"start":{"row":77,"column":1},"end":{"row":77,"column":2},"action":"insert","lines":["9"],"id":1922}],[{"start":{"row":80,"column":1},"end":{"row":80,"column":2},"action":"remove","lines":["9"],"id":1923}],[{"start":{"row":80,"column":1},"end":{"row":80,"column":2},"action":"insert","lines":["1"],"id":1924},{"start":{"row":80,"column":2},"end":{"row":80,"column":3},"action":"insert","lines":["0"]}],[{"start":{"row":84,"column":2},"end":{"row":84,"column":3},"action":"remove","lines":["0"],"id":1925}],[{"start":{"row":84,"column":2},"end":{"row":84,"column":3},"action":"insert","lines":["1"],"id":1926}],[{"start":{"row":50,"column":0},"end":{"row":50,"column":1},"action":"insert","lines":[" "],"id":1927}]]},"ace":{"folds":[],"scrolltop":945.7357509671659,"scrollleft":0,"selection":{"start":{"row":62,"column":30},"end":{"row":62,"column":30},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":54,"state":"qqstring3","mode":"ace/mode/python"}},"timestamp":1582709705660,"hash":"8fd201e85f110b60ed46f04eaf6c65cd6bd234ad"}
|
[
"r151865@rguktrkv.ac.in"
] |
r151865@rguktrkv.ac.in
|
c1e1c4a84a8d39bcd2026e291f5dc94c6b2300a3
|
8c5a1efaef84c5208c5887d0d7044192d113f93b
|
/Source/plugin.video.sample/default.py
|
d982bc12d21e89e1393f1d79b5cec3cddf237d46
|
[] |
no_license
|
gitter-badger/YPC
|
e18cf9c1f73e85a1d095e1048388d09119a981fb
|
870578b3728f40bfea28d34dbdaf2de323170ea2
|
refs/heads/master
| 2020-04-04T21:31:52.729669
| 2015-12-05T06:22:50
| 2015-12-05T06:22:50
| 48,638,355
| 0
| 0
| null | 2015-12-27T08:33:54
| 2015-12-27T08:33:54
| null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Based on code from youtube addon
#------------------------------------------------------------
import os
import sys
import plugintools
import xbmc,xbmcaddon
from addon.common.addon import Addon
addonID = 'plugin.video.maryoutube'
addon = Addon(addonID, sys.argv)
local = xbmcaddon.Addon(id=addonID)
icon = local.getAddonInfo('icon')
YOUTUBE_CHANNEL_ID = "maryoutube"
# Entry point
def run():
plugintools.log(""+YOUTUBE_CHANNEL_ID+".run")
# Get params
params = plugintools.get_params()
if params.get("action") is None:
main_list(params)
else:
action = params.get("action")
exec action+"(params)"
plugintools.close_item_list()
# Main menu
def main_list(params):
plugintools.log(""+YOUTUBE_CHANNEL_ID+".main_list "+repr(params))
plugintools.add_item(
#action="",
title="thisisthename",
url="plugin://plugin.video.youtube/user/"+YOUTUBE_CHANNEL_ID+"/",
thumbnail=icon,
folder=True )
run()
|
[
"marduk191@gmail.com"
] |
marduk191@gmail.com
|
c72e6534d57e3dd3bc29b4a3c2aea036437df45e
|
009096c66d3b8072cc4f079d0f9b043a6626f4b8
|
/visualize.py
|
7abeba40fe95cabaae2e9db607b7864b633d89d0
|
[] |
no_license
|
ytwushui/tile_defect
|
cfba068a0498a68612eabc044bc244d49a6eb264
|
dafe4767b9c51bb8e2de959641100f249db74706
|
refs/heads/master
| 2023-02-08T19:09:06.116688
| 2021-01-04T05:52:53
| 2021-01-04T05:52:53
| 324,282,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
import cv2
import numpy as np
class Visualizer:
def visulize_orig(defects_dataloader_train):
# the type of dataloader is [0] for original [i] for ith image(depending on batch size) 0 for channel
# the last 0 is negligible, heer we need to merge the 3 channel original pic with the single channel label
for i, img in enumerate(defects_dataloader_train):
img_batch = img
break
n = 1
for i in range(n):
# raw image is img_batch[0][i].permute(1, 2, 0) and mask is img_batch[1][i][0]
#im1 = cv2.cvtColor(np.array(img_batch[1][i][0]), cv2.COLOR_RGB2BGR)
im1 = img_batch[0][i][0]
im2 = img_batch[1][i][0]
hmerge = np.hstack((im1, im2)) # 水平拼接
cv2.imshow('name',hmerge)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"wy710@163.com"
] |
wy710@163.com
|
a3d0b31035ad6f2f5e0791e55ddbc835e3dea369
|
e0262bf061e7a11b78a83eeb5f9fc196254dfa8b
|
/GenerateOnnxPerfData.py
|
f17ed3721128a78a62f72d882d11398a9dce6a6d
|
[] |
no_license
|
motus/onnx_perf_data
|
d70f9c7f46aea729b65ed4fa9394447e7e1b4650
|
b834d1a07b09790a17a941c6336fff92efd393d9
|
refs/heads/master
| 2020-11-26T17:45:52.386902
| 2020-04-29T00:35:18
| 2020-04-29T00:35:18
| 229,163,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,399
|
py
|
#!/usr/bin/env python3
r"""
Generate test data for ONNX Runtime onnxruntime_perf_test tool.
Run:
python.exe GeneratePerfData.py --model .\model\model.onnx --output .\model\data\
This call will create a directory ./model_data/ and produce protobuf files
containing data for ONNX model inputs, one file for each input.
The files are named `.\model\data\[model_input_name].pb`
After that, run the ONNX Runtime benchmark tool, e.g.
onnxruntime_perf_test.exe -m times -r 1000 model\model.onnx model\data\
NOTE: ORT `onnxruntime_perf_test.exe` is very finicky about the model and data paths!
Both model .onnx file and data directory MUST reside in the same directory!
Also, on Windows it does not like the forward slash `/` as path separator.
For more details on the ONNX perf test tool, see
https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/test/perftest
FIXME: The script currently breaks if some inputs have special characters like `/` or `:`
in their names (this is the case for models converted as is from TensorFlow).
TODO: allow user to specify the distribution if input data, incl. all 0s or 1s.
"""
import os
import argparse
import onnx
import onnx.numpy_helper
import onnxruntime
import numpy as np
def _main():
parser = argparse.ArgumentParser(description="Generate random test input for ONNX model")
parser.add_argument("--model", required=True, help="ONNX model file")
parser.add_argument("--output", required=True, help="Output data directory")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
sess = onnxruntime.InferenceSession(args.model)
for (i, inp) in enumerate(sess.get_inputs()):
# TODO: allow user to specify omitted dimensions instead of always using 1
shape = [s if isinstance(s, int) and s > 0 else 1 for s in inp.shape]
# FIXME: use correct type based on inp.type instead of np.float32
data = np.float32(np.random.randn(*shape))
name = inp.name[:-2] if inp.name[-2:] == ":0" else inp.name
tensor = onnx.numpy_helper.from_array(data, name)
path = os.path.join(args.output, "input_%03d.pb" % i)
print("%s: %s %s/%s %s" % (path, name, inp.type, data.dtype, data.shape))
with open(path, 'wb') as outfile:
outfile.write(tensor.SerializeToString())
if __name__ == "__main__":
_main()
|
[
"sergiym@microsoft.com"
] |
sergiym@microsoft.com
|
d5e9da7158d1d9e5da3315f240ce40a568384534
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/13114025.py
|
94da046c51675f492cbb850c1728133a7ed747e7
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/13114025.py generated: Wed, 25 Jan 2017 15:25:30
#
# Event Type: 13114025
#
# ASCII decay Descriptor: [B_s0 -> (phi(1020) -> mu+ mu-) mu+ mu-]cc
#
from Configurables import Generation
Generation().EventType = 13114025
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_phimumu,mm=MS,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13114025
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
95217192c353378d62e5bf472e9fe7efb3b4f83e
|
d4f9d104479b6f9a64175a3fe8554860bf0d62b2
|
/popular_words.py
|
77bfe82384de8cab45193f40caf3c730eae4076c
|
[] |
no_license
|
pohily/checkio
|
9a09c9c52b4f07438cfe4e00914e8d1cfe844c5d
|
8a0a49126af6e09b9e5e6067f28efbf085cd87f6
|
refs/heads/master
| 2020-05-16T03:18:18.068186
| 2019-07-06T13:22:20
| 2019-07-06T13:22:20
| 182,674,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
def popular_words(text: str, words: list) -> dict:
# your code here
text = text.lower()
result = {}
for word in words:
text1 = text
count = 0
while text1:
if word not in text1:
break
found = text1.index(word)
if found != 0:
m = text1[found-1]
if m != ' ' and m != '\n':
text1 = text1[(found + len(word)):]
continue
text1 = text1[(found + len(word)):]
if not text1:
count += 1
break
if text1[0] == " " or text1[0] == "\n" or text1[0] == ',' or text1[0] == '.':
count += 1
if word not in text1:
break
result[word] = count
return result
print(popular_words('''
And the Raven never flitting still is sitting still is sitting
On the pallid bust of Pallas just above my chamber door
And his eyes have all the seeming of a demon’s that is dreaming
And the lamp-light o’er him streaming throws his shadow on the floor
And my soul from out that shadow that lies floating on the floor
Shall be lifted nevermore
''', ["raven","still","is","floor","nevermore"]))
|
[
"mpohily@gmail.com"
] |
mpohily@gmail.com
|
dd9dda3934d0b1202eb87e210710a14d45fce19a
|
a0929da4ad91f9d271b3da8fb7a64f351c993774
|
/Week03-consuming XML/py08-trains.py
|
824abbed664ad6d619e4f9b23fa8aa575ac14dc0
|
[
"MIT"
] |
permissive
|
bexiturley/DataRepresentation
|
6ca7a522d190cb8d5530bebf3ee625be632cb13a
|
30a20cd737e6c63d0f3d6e50b5f65b4742df76aa
|
refs/heads/master
| 2023-01-04T09:23:46.478585
| 2020-10-22T12:17:16
| 2020-10-22T12:17:16
| 299,306,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
# This program gets all the trains that are located south of Dublin
# and stores the data associated with them
# A program like this would normally store all the data into the csv file
# and let another part of the program analysis the data.
# I am only doing this to demonstrate how you can reduce a dataset as you are reading it
# (you would want to do it with exceptionally large datasets, not like this one).
import requests
import csv
from bs4 import BeautifulSoup
url = "http://api.irishrail.ie/realtime/realtime.asmx/getCurrentTrainsXML"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'xml')
retrieveTags=['TrainStatus',
'TrainLatitude',
'TrainLongitude',
'TrainCode',
'TrainDate',
'PublicMessage',
'Direction'
]
with open('week03_train.csv', mode='w') as train_file:
train_writer = csv.writer(train_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
listings = soup.findAll("objTrainPositions")
for listing in listings:
print(listing)
#print(listing.TrainLatitude.string)
# or
# print(listing.find('TrainLatitude').string)
lat =float( listing.TrainLatitude.string)
if (lat < 53.4):
entryList = []
entryList.append(listing.find('TrainLatitude').string)
train_writer.writerow(entryList)
print (soup.prettify())
|
[
"bexiturley@gmail.com"
] |
bexiturley@gmail.com
|
72ea75c6443a35f07952a2a265ddc5a6f52c1814
|
e13f571a26c22f3867a4134d7be9cbd2e85baf78
|
/ast_reductions.py
|
d46748b610d1f27eee4cb7941a1541ef769f7b1d
|
[] |
no_license
|
teberger/cs554-project2
|
0e2796a5b9f085cf7cea453a8d0c6d6bb2bf2de0
|
fd0b8feec04ff2e015c646f1266bbe5a3462a1fd
|
refs/heads/master
| 2021-01-13T01:58:44.831568
| 2014-12-01T06:50:58
| 2014-12-01T06:50:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,191
|
py
|
from cfg import Grammar, EOF
from ast_parser import Parser, Rose_Tree
import pydot
def simplify_ast(ast):
'''Reduces the AST to a concrete syntax tree (CST), which removes
all the extraneous non-terminal symbols and creates a logical
program structure.
For example a _while_ loop should be represented as it's own tree
node, with a single node with 2 children: the boolean expression
to test and the contents of the while loop.
while loops and if statements are condensed, and sequential
statements (S ; S) are put into one serial block of
code.
:param ast: a RoseTree that represents the AST of the parse
of the arithmetic language defined in the
homework specification.
:return: a ProgramTree with the correct code flow and
precedence ordering.
'''
children_symbols = [c.symbol for c in ast.children if c.value != '']
#while statements: S -> (while) (B) (do) (S) (od)
if 'while' in children_symbols:
return reduceWhile(ast)
#if statements: S -> (if) (B) (then) (S) (else) (S) (fi)
elif 'if' in children_symbols:
return reduceIf(ast)
#else, leave it as is, this contains valuable information
#regarding precedence
else:
node = Rose_Tree(ast.symbol, ast.value)
for c in ast.children:
child_node = reduce_ast(c)
child_node.parent = node
node.children.append(child_node)
return node
def find_child_with_symbol(ast, symbol, num=1):
count = 0
for c in ast.children:
if c.symbol == symbol:
count += 1
if count == num:
return c
return None;
#We know that at this level, the root of the AST is S, we can abstract
#away the S-> ... and replace it with a simplified rose_tree node
#that represents the appropriate structure
def reduceWhile(ast):
new_root = Rose_Tree(symbol = 'while', node_value='while')
new_root.parent = ast.parent
boolean_child = find_child_with_symbol(ast, 'B')
while_block = find_child_with_symbol(ast, 'S')
new_root.children = [reduce_ast(boolean_child),
reduce_ast(while_block)]
return new_root
#reduces an S -> if .... fi production into a single node with only
#the three children: boolean expression, then statement, and else
#statement
def reduceIf(ast):
new_root = Rose_Tree(symbol='if', node_value='if')
new_root.parent = ast.parent
boolean_child = find_child_with_symbol(ast, 'B')
then_statement = find_child_with_symbol(ast, 'S')
else_statement = find_child_with_symbol(ast, 'S', 2)
new_root.children = [reduce_ast(boolean_child),
reduce_ast(then_statement),
reduce_ast(else_statement)]
return new_root
#removes all nodes that were created from using epsilon productions
def filter_epsilon(ast):
if not ast.children and (ast.value == ''):
return None
else:
children = [filter_epsilon(c) for c in ast.children]
tree = Rose_Tree(ast.symbol, ast.value)
tree.children = filter(lambda x: x is not None, children)
return tree
#compresses nodes with only one children since they do not contribute
#to the actual structure of the language
def reduce_singleton_children(ast):
#leaf nodes always stay the same
if len(ast.children) == 0:
return ast
#else, we only have one child, remove ourselves from the equation
elif len(ast.children) == 1:
new_child = reduce_singleton_children(ast.children[0])
return new_child
else:
children = ast.children[0:len(ast.children)]
ast.children = []
for c in children:
new_child = reduce_singleton_children(c)
ast.children.append(new_child)
return ast
#removes all A -> A' edges and condenses them into a the A node like
#we originally wanted in our grammar
def remove_ll1_requirement_syntax(ast):
symbol = ast.symbol
children = ast.children[0:]
reduce_node = None
for c in children:
if (symbol + "'") == c.symbol:
reduce_node = c
if reduce_node is not None:
ast.children.remove(reduce_node)
for c in reduce_node.children:
ast.children.append(c)
for c in ast.children:
remove_ll1_requirement_syntax(c)
def reduce_ast(ast):
'''The compilation of all reductions possible into the smallest AST
that still maintains grammar structure. See filter_epsilon,
simplify_ast, reduce_singleton_children, and
remove_ll1_requirement_syntax.
'''
root = filter_epsilon(ast)
root = simplify_ast(root)
root = reduce_singleton_children(root)
remove_ll1_requirement_syntax(root)
return root
if __name__ == '__main__':
g = Grammar('./testdata/homework1_grammar.txt')
x = Parser(g)
root, _ = x.ll1_parse([('if', 'if'),
('(', '('),
('num', '1'),
(')', ')'),
('relop','<'),
('num', '1'),
('then','then'),
('var', 'x'),
(':=', ':='),
('num','123'),
('aop', '+'),
('num', '5'),
('else', 'else'),
('while', 'while'),
('var', 'x'),
('relop', '<'),
('num', '10'),
('do', 'do'),
('var', 'x'),
(':=',':='),
('var', 'x'),
('aop', '+'),
('num', '1'),
('fi', 'fi'),
('\0', '\0')
])
root = reduce_ast(root)
graph = pydot.Dot('Parse Tree', graph_type='digraph')
g, _ = root.pydot_append(graph, 0)
g.write_png('./testdata/test.png')
|
[
"tberge01@cs.unm.edu"
] |
tberge01@cs.unm.edu
|
4a0aba86ee4d608bc14315f230e19377831ef137
|
7b85adcdb8383d853b576d1fc0ac81431d3c1958
|
/blog/views.py
|
ec6c8f0b61060625f78e28eacb9618b303e570c2
|
[] |
no_license
|
imsharvanj/portfolio
|
1a6baf257a42a84eef6efa2bceba4e3090f8821e
|
f9af19c31501e0a56e1ba275f0cb9e921eafc1ed
|
refs/heads/master
| 2020-09-05T15:39:13.550136
| 2020-05-03T18:08:29
| 2020-05-03T18:08:29
| 220,145,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from .models import Blog
# Create your views here.
def allblogs(request):
blogs = Blog.objects
return render(request, 'blog/allblogs.html', {'blogs':blogs})
def detail(request, blog_id):
detail_blog = get_object_or_404(Blog, pk=blog_id)
return render(request, 'blog/detail.html', {'detail_blog': detail_blog})
|
[
"imsharvanj@gmail.com"
] |
imsharvanj@gmail.com
|
4454ab9a9dac1ad7a248c47a69b70de9fd2fbbc8
|
931593c3bdaab1b28b389e0717341cff59543e02
|
/Python/third-maximum-number.py
|
02e43e5d45d504058a8a309595ecc6a990df5d28
|
[
"MIT"
] |
permissive
|
Kakoedlinnoeslovo/LeetCode
|
56719a0a2b5aa95e08cfcf7826da6041da82ae44
|
63f59214430fb899cd1436532b310d1687f33f55
|
refs/heads/master
| 2020-05-03T18:54:42.075923
| 2016-10-10T08:14:39
| 2016-10-10T08:14:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
# Time: O(n)
# Space: O(1)
# Given an array of integers, return the 3rd Maximum Number in this array,
# if it doesn't exist, return the Maximum Number.
# The time complexity must be O(n) or less.
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 0
top = [float("-inf")] * 3
for num in nums:
if num > top[0]:
top[2] = top[1]
top[1] = top[0]
top[0] = num
count += 1
elif num != top[0] and num > top[1]:
top[2] = top[1]
top[1] = num
count += 1
elif num != top[0] and num != top[1] and num >= top[2]:
top[2] = num
count += 1
if count < 3:
return top[0]
return top[2]
|
[
"noreply@github.com"
] |
noreply@github.com
|
2677c37484b604158cb49492aa1b8bab56eead09
|
64ec6605fbdd2f65779f2305840897a10857e24c
|
/collector.py
|
defb8b388f3d17ac5cdac36ae16ea943ac1a43ef
|
[] |
no_license
|
Ckzzz1/Homework2
|
ff0c16dea7a9128b4647e0340d749da438c2d6ee
|
4f8e00a5b4a3d545b018220c4c1424c815714353
|
refs/heads/master
| 2020-05-18T20:13:04.948211
| 2019-05-02T17:55:18
| 2019-05-02T17:55:18
| 184,626,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
import requests
import time
code = {"年末总人口": '"A030101"', "男性人口": '"A030102"', "女性人口": '"A030103"', "经济活动人口": '"A040101"', "农业总产值": '"A0D0402"', "林业总产值": '"A0D0403"', "牧业总产值": '"A0D0404"', "渔业总产值": '"A0D0405"'}
globals = {
'true': 0,
'false': 1
}
def gettime(): # 用来获取 时间戳
return int(round(time.time() * 1000))
def get_data(parameter): #爬取数据
parameter = code[parameter]
headers = {}
key = {}
url = 'http://data.stats.gov.cn/easyquery.htm'
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
key['m'] = 'QueryData'
key['dbcode'] = 'hgnd'
key['rowcode'] = 'zb'
key['colcode'] = 'sj'
key['wds'] = '[]'
key['dfwds'] = '[{"wdcode":"zb","valuecode":' + parameter + '}]'
key['k1'] = str(gettime())
s = requests.session()
r = s.get(url, params=key, headers=headers)
dic = dict(eval(r.text, globals))
print(dic['returndata']['wdnodes'][0]['nodes'][0]['cname'])
data = {}
for i in dic['returndata']['datanodes']:
data[i['wds'][1]['valuecode']] = i['data']['data']
for date in range(1999, 2009):
key['dfwds'] = '[{"wdcode":"sj","valuecode":"' + str(date) + '"}]'
r = s.get(url, params=key, headers=headers)
dic = dict(eval(r.text, globals))
data[dic['returndata']['datanodes'][0]['wds'][1]['valuecode']] = dic['returndata']['datanodes'][0]['data'][
'data']
print(data)
return data
|
[
"chenkangzzz23@gmail.com"
] |
chenkangzzz23@gmail.com
|
aed9d8a521f0e1c53b73413d7f5f5d17712daaff
|
478a4a0495fafc62000dc53cef749b87b7a9be55
|
/virtual/bin/pip3
|
95352667bd7817eab73dd4a6da56bdc610064dcd
|
[] |
no_license
|
DavidNganga/simple-error
|
696a0f9e6482b38c5670b5d618120a9220b7fcaf
|
351c5ace3c1487570d19ee0b5e0ade70d40f1b1c
|
refs/heads/master
| 2020-03-18T23:38:56.974235
| 2018-06-03T15:30:48
| 2018-06-03T15:30:48
| 135,416,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
#!/home/david/simple-error/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ngashiedavid@gmail.com"
] |
ngashiedavid@gmail.com
|
|
3e38359b554e84357eb028484eb265f71435fb2d
|
d3417e6c71a351d1a29098c454a204578a302ef4
|
/staff_version.py
|
2d98876ee9e4d4bdd70fce66f71ebfbed9976887
|
[] |
no_license
|
GeethikaSuharshani/Student-Progression-Outcome-Prediction-System
|
685b81df8153665a2e2d7bc516d54c3d51471f82
|
9713fbbb994f1e4369244e118e2394ddd4b4d882
|
refs/heads/main
| 2023-04-30T08:51:35.705173
| 2021-05-17T15:40:19
| 2021-05-17T15:40:19
| 368,132,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,885
|
py
|
# start of user-defined functions
def introduction(): # gives an introduction about the program
print('''Hi, This program will be allowed you to get your students` progression outcome of this academic year.''')
print()
print('To get a student`s progression outcome, please enter the following details of that student')
print()
def pass_input(): # get number of credits at pass and validate the user input
try:
global pass_credits
global total_credits
total_credits=0
pass_credits=int(input('Number of credits at pass(including condoned pass) :'))
if pass_credits in credit_range: # check whether the input is in the correct range
total_credits+=pass_credits
else:
print('Number of credits at pass, should be in the range 0,20,40,60,80,100 and 120.So please try again with a valid credit value within this range.')
pass_input()
except:
print('Number of credits at pass should be an integer.So please enter a valid integer.')
pass_input()
def defer_input(): # get number of credits at defer and validate the use input
try:
global defer_credits
global total_credits
defer_credits=int(input('Number of credits at defer :'))
if defer_credits in credit_range: # check whether the input is in the correct range
total_credits+=defer_credits
else:
print('Number of credits at defer, should be in the range 0,20,40,60,80,100 and 120.So please try again with a valid credit value within this range.')
defer_input()
except:
print('Number of credits at defer should be an integer.So please enter a valid integer.')
defer_input()
def fail_input(): # get number of credits at fail and validate the user input
try:
global fail_credits
global total_credits
fail_credits=int(input('Number of credits at fail :'))
if fail_credits in credit_range: # check whether the input is in the correct range
total_credits+=fail_credits
else:
print('Number of credits at fail, should be in the range 0,20,40,60,80,100 and 120.So please try again with a valid credit value within this range.')
fail_input()
except:
print('Number of credits at fail should be an integer.So please enter a valid integer.')
fail_input()
def result(): # check the progression outcome of the student
global progression_outcome
if pass_credits == 120:
progression_outcome='Progress' # progress
elif pass_credits == 100:
progression_outcome='Progress - module trailer' # trailing
elif pass_credits == 80:
progression_outcome='Do not Progress - module retriever' # retriever
elif pass_credits == 60:
progression_outcome='Do not Progress - module retriever' # retriever
elif pass_credits == 40:
if defer_credits == 0:
progression_outcome='Exclude' # excluded
else:
progression_outcome='Do not Progress - module retriever' # retriever
elif pass_credits == 20:
if defer_credits == 20 or defer_credits == 0:
progression_outcome='Exclude' # excluded
else:
progression_outcome='Do not Progress - module retriever' # retriever
else:
if pass_credits == 0:
if defer_credits == 40 or defer_credits == 20 or defer_credits == 0:
progression_outcome='Exclude' # excluded
else:
progression_outcome='Do not Progress - module retriever' # retriever
print('This student`s progression outcome of this academic year is', progression_outcome)
print()
def credit_total(): # check whether the total of credits entered equals to 120
global total_credits
if total_credits == 120:
result()
else:
print('The total of the credits you have entered is not 120.Please try again with correct credit values.')
pass_input()
defer_input()
fail_input()
credit_total()
def histogram_count(): # keeps a count of progress,trailing,retriever and excluded students
global progression_outcome
global Progress
global Trailing
global Retriever
global Excluded
global total_outcome
global quit_program
if progression_outcome == 'Progress':
Progress+=1
elif progression_outcome == 'Progress - module trailer':
Trailing+=1
elif progression_outcome == 'Do not Progress - module retriever':
Retriever+=1
else:
Excluded+=1
total_outcome=Progress + Trailing + Retriever + Excluded # total number of progression outcomes
quit_program=str(input("If you want to quit the program please enter 'q':"))
quit_program=quit_program.lower() # return a copy of the string with all the cased characters converted to lowercase
def histogram(): # makes a histogram of progress,trailing,retriever and excluded students
global Progress
global Trailing
global Retriever
global Excluded
global total_outcome
print('Progress', Progress, ':', '*'*Progress)
print('Trailing', Trailing, ':', '*'*Trailing)
print('Retriever', Retriever, ':', '*'*Retriever)
print('Excluded', Excluded, ':', '*'*Excluded)
print('There are', total_outcome, 'outcomes in total.')
# end of user-defined functions
# main program starts from here
credit_range=[0, 20, 40, 60, 80, 100, 120] # credit range
quit_program=''
Progress=0
Trailing=0
Retriever=0
Excluded=0
introduction()
while quit_program != 'q': # program iterate until the user enter 'q' to quit the program
pass_input()
defer_input()
fail_input()
credit_total()
histogram_count()
histogram()
print('Now you are exiting from the program.')
# end of main program
|
[
"61249152+GeethikaSuharshani@users.noreply.github.com"
] |
61249152+GeethikaSuharshani@users.noreply.github.com
|
17f4fc655f4d36fb7ed667cc4f4de886cc99a35b
|
561d351ca16e3cb4c121445624620c7d727eebbe
|
/LeeBros/모의시험/조삼모사.py
|
68cd45d28137f7c05e7edd07184b7357143d2bd5
|
[] |
no_license
|
lcg5450/algorithm
|
7a91b032a731978a8124c64d2ae01ad323ed659d
|
88d99e60b9707c916c86cb01f4e09cda36d2c41f
|
refs/heads/master
| 2023-03-21T05:32:15.545646
| 2021-03-11T05:56:33
| 2021-03-11T05:56:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
n = int(input())
arr = []
for i in range(n):
arr.append(list(map(int, input().split())))
both=[]
diff=[]
for h in range(0,n,w):
for w in range(1,n,2):
both.append(arr[h][w] + arr[w][h])
|
[
"seoyoung_00@hufs.ac.kr"
] |
seoyoung_00@hufs.ac.kr
|
8efe74872adef5632364a95d1cc58619fe6392aa
|
9dded9364d7a5dd969c7ebb3026c884c8bd3053f
|
/AUDIOCATE/APP/migrations/0007_bookmar.py
|
3f48ecfe06144f2fa60f1fab68e12ab87a7f095c
|
[] |
no_license
|
ehizman/AUDIOCATE
|
5a45828b3e1c6de16826e45195e68b8b0e08ab25
|
edbd7677025d4a431240bf253966d11658e7652d
|
refs/heads/master
| 2023-01-31T03:48:05.645443
| 2020-12-16T02:31:02
| 2020-12-16T02:31:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# Generated by Django 2.2.4 on 2020-12-14 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('APP', '0006_explore_date'),
]
operations = [
migrations.CreateModel(
name='Bookmar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('link', models.CharField(max_length=225)),
],
),
]
|
[
"chukslord1@gmail.com"
] |
chukslord1@gmail.com
|
9735c162d8b0e9232fff475b339a46daf883680c
|
1d5976ffa323596ed0f1a60646c5ed45b2753f6f
|
/get_releases.py
|
1a7e067ca3b68d2073d4400fc192baca9c72cdce
|
[] |
no_license
|
ming4real/pymender
|
7cc8f4ba5dfe363396c2e27c599cd0867552d970
|
7fea1975858a26cc6fed8cf4294a766004e4442d
|
refs/heads/master
| 2023-07-01T08:55:40.497154
| 2021-08-09T09:39:50
| 2021-08-09T09:39:50
| 394,232,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,462
|
py
|
#!/usr/bin/python3
import configparser
import json
import mysql.connector
import pymender
config = configparser.ConfigParser()
config.read('config.ini')
username = config["mender"]["username"]
password = config["mender"]["password"]
db_host = config["database"]["host"]
db_user = config["database"]["user"]
db_password = config["database"]["password"]
db_database = config["database"]["database"]
mender = pymender.Mender()
mender.login(username, password)
releases = mender.listReleases()
db_connection = mysql.connector.connect(
host=db_host,
user=db_user,
password=db_password,
database=db_database)
cursor = db_connection.cursor()
sql = """TRUNCATE releases"""
cursor.execute(sql)
sql = """TRUNCATE artifacts"""
cursor.execute(sql)
for release in releases:
release_name = release["Name"]
sql = """INSERT INTO releases (name) VALUES ("{}")""".format(release_name)
cursor.execute(sql)
release_id = cursor.lastrowid
for artifact in release["Artifacts"]:
mender_id = artifact["id"]
artifact_name = artifact["name"]
file_checksum = artifact["updates"][0]["files"][0]["checksum"]
file_size = artifact["updates"][0]["files"][0]["size"]
file_type = artifact["updates"][0]["type_info"]["type"]
provides_name = artifact["artifact_provides"]["artifact_name"]
try:
provides_checksum = artifact["artifact_provides"]["rootfs-image.checksum"]
except KeyError:
provides_checksum = artifact["artifact_provides"]["rootfs_image_checksum"]
depends_checksum = None
try:
depends_checksum = artifact["artifact_depends"]["rootfs-image.checksum"]
except KeyError:
try:
depends_checksum = artifact["artifact_depends"]["rootfs_image_checksum"]
except KeyError:
pass
artifact_size = artifact["size"]
artifact_sql = """
INSERT INTO artifacts
(release_id, mender_id, artifact_name, file_checksum, file_size, file_type, provides_name, provides_checksum, depends_checksum, artifact_size)
VALUES ({}, "{}", "{}", "{}", {}, "{}", "{}", "{}", "{}", {})
""".format(release_id, mender_id, artifact_name, file_checksum, file_size, file_type, provides_name, provides_checksum, depends_checksum, artifact_size)
print(artifact_sql)
cursor.execute(artifact_sql)
# db_connection.commit()
db_connection.commit()
|
[
"ming@siliconbladeconsultants.com"
] |
ming@siliconbladeconsultants.com
|
aab43a32bb73e22eec08bb5f0047cfe3df1306c8
|
466130cdd16cb6283a8c8545202455734ffcc852
|
/SConscript
|
6f2a9fdcc36907f8671da11c6f3e482a2c797693
|
[
"Apache-2.0"
] |
permissive
|
Guozhanxin/tcpserver
|
dc9cf2d90245fa1bf56738b988a767461ed5bb0c
|
f37429584db669984a2d453220eb067733993d78
|
refs/heads/master
| 2022-06-18T06:02:00.252808
| 2022-05-19T02:33:55
| 2022-05-19T02:33:55
| 196,307,789
| 6
| 6
|
Apache-2.0
| 2022-05-19T02:33:56
| 2019-07-11T02:44:17
|
C
|
UTF-8
|
Python
| false
| false
| 284
|
from building import *
src = ['tcpserver.c']
cwd = GetCurrentDir()
include_path = [cwd]
if GetDepend(['PKG_TCPSERVER_SAMPLE']):
src += ['tcpserver_sample.c']
group = DefineGroup('tcpserver', src, depend = ['PKG_USING_TCPSERVER'], CPPPATH = include_path)
Return('group')
|
[
"guozhanxin@rt-thread.com"
] |
guozhanxin@rt-thread.com
|
|
9f9ec82dae908a1835582a9ba4ff8f9f610f7a5a
|
cee3863c2cdc531734091353df3a95f52c9cf978
|
/SecondProgram/Modules and Functions/TKINTER/frames.py
|
7caeb7e995e823d1852f80021fbe34434e096407
|
[] |
no_license
|
evaristrust/Courses-Learning
|
b6dc0d0ed12e5aa4d06e37efcb5ca1bb266091af
|
c2040b12b78c6819927faada99d03d1637c6a564
|
refs/heads/master
| 2023-07-16T13:40:41.518558
| 2021-09-05T08:43:26
| 2021-09-05T08:43:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
try:
import tkinter
except ImportError: # python 2
import Tkinter as tkinter
# print(tkinter.TkVersion)
# print(tkinter.TclVersion)
# tkinter._test()
# Try something out of tkinter
main_window = tkinter.Tk()
main_window.title("INTRODUCTION TO TKINTER")
main_window.geometry('640x480+8+200')
label = tkinter.Label(main_window, text="HELLO WORLD")
label.pack(side="top")
leftframe = tkinter.Frame(main_window)
leftframe.pack(side="left", anchor="n", fill=tkinter.Y, expand=False)
rightframe = tkinter.Frame(main_window)
rightframe.pack(side="right", anchor="n", expand=True)
canvas = tkinter.Canvas(leftframe,relief="raised", borderwidth=1)
canvas.pack(side="left")
# now add some buttons to our tkinter canvas
button1 = tkinter.Button(rightframe, text="BUTTON1", background="black")
button2 = tkinter.Button(rightframe, text="BUTTON2")
button3 = tkinter.Button(rightframe, text="BUTTON3")
button4 = tkinter.Button(rightframe, text="BUTTON4")
button5 = tkinter.Button(rightframe, text="BUTTON5")
# add pack to the buttons
button1.pack(side="top")
button2.pack(side="top")
button3.pack(side="top")
button4.pack(side="top")
button5.pack(side="top")
main_window.mainloop()
# now buttons are well organised
|
[
"evaristenizey@gmail.com"
] |
evaristenizey@gmail.com
|
a6c86e6bb027e41f316e6cc38c464aac0eb447e7
|
dd7c22891de26e4c90c3ba7e71c0dd2b65e83740
|
/ConvertHtmlToExecl1point3acre.py
|
ac52e7bcf55f48e88b95a166bc79174f5f1228f2
|
[] |
no_license
|
Leogaogithub/pythonUtils
|
572dbdf45bfa18d5b2ad258ab045918a32dc56ce
|
464877f9b2a50434587fe0abfdf4b5218368047c
|
refs/heads/master
| 2021-07-03T03:20:10.812998
| 2019-03-31T22:55:18
| 2019-03-31T22:55:18
| 146,221,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
# This Python file uses the following encoding: utf-8
import os, sys
from lxml import html
import csv, os, json
import requests
from exceptions import ValueError
from time import sleep
from lxml import html
from lxml import etree
# https://www.python.org/dev/peps/pep-0263/
from FileReader import FileReader
from ExcelWriter import ExcelWriter
class ConvertHtmlToExecl1point3acre:
def __init__(self, fileName):
self.fileName = fileName
def getField(self, xpathString):
reader = FileReader(self.fileName)
content = reader.read()
tree = etree.HTML(content)
# https://www.w3schools.com/xml/xpath_syntax.asp
#
titles = tree.xpath(xpathString)
return titles
def getAllFields(self):
# http://www.tizag.com/xmlTutorial/xpathattribute.php
titlePath = '//div[@class="Thread__verticalLine___3WYd8 col-md-10 col-sm-12 col-xs-12"]//h4//a[contains(@href,"https://instant.1point3acres.com/thread/")]//text()'
linkPath ='//div[@class="Thread__verticalLine___3WYd8 col-md-10 col-sm-12 col-xs-12"]//h4//a[contains(@href,"https://instant.1point3acres.com/thread/")]//@href'
titles = self.getField(titlePath)
links = self.getField(linkPath)
i = 0
for title in titles:
i += 1
#print(str(i) + " " + title)
for link in links:
i += 1
#print(str(i) + " " + link)
return titles, links
if __name__ == "__main__":
fileName = '/home/leo/Desktop/houzz.html'
excelFileName = './data/data.xls'
sheetName = 'test'
parser = ConvertHtmlToExecl1point3acre(fileName)
titles, links = parser.getAllFields()
excelWriter = ExcelWriter(excelFileName, sheetName)
excelWriter.write(0, titles)
excelWriter.write(1, links)
excelWriter.save()
|
[
"longhaogao@gmail.com"
] |
longhaogao@gmail.com
|
209448ef6a5c27fc999b9937731a9e813d506e57
|
c7f49a513978561dfa2eac9a5b32ae9833772a82
|
/ecommerce/users/migrations/0001_initial.py
|
fa39af867dfc9a3df26e06b50c02160ca1e1114b
|
[] |
no_license
|
Darioc2280/ecommerce
|
b00932ba90d51aec3e285915932b8385776af5d7
|
c54236966b9991ee62c13f985ed822232faf86e5
|
refs/heads/master
| 2020-09-24T06:00:21.666452
| 2019-12-04T16:22:13
| 2019-12-04T16:22:13
| 225,682,192
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,861
|
py
|
# Generated by Django 2.2.2 on 2019-12-03 18:49
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"jcvb2004@gmail.com"
] |
jcvb2004@gmail.com
|
b1e4c8de22d0b5e677f7feaf538511f49067c141
|
99f01660e30d7e6267c107689a762159564f680d
|
/four.py
|
c4676ec9640a325b87345832b6df9d35aaa530be
|
[] |
no_license
|
saurabhsood91/advent-of-code
|
c3c837176fe825b2ffa1a8a27eb98fce9f7fc84b
|
4aae6539b544b07c2c625e8789e0ccb22ef4f1a6
|
refs/heads/master
| 2020-09-24T17:23:42.015575
| 2019-12-11T07:31:23
| 2019-12-11T07:31:23
| 225,807,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
MIN = 254032
MAX = 789860
def in_range(num):
return num >= MIN and num <= MAX
def ltr_does_not_decrease(num):
digits = [int(char) for char in str(num)]
prev = digits[0]
for digit in digits[1:]:
if digit < prev:
return False
prev = digit
return True
def contains_double(num):
digits = [int(char) for char in str(num)]
prev = digits[0]
for digit in digits[1:]:
if digit == prev:
return True
prev = digit
return False
def is_valid_password(num):
return in_range(num) and ltr_does_not_decrease(num) and contains_double(num)
def tests_ltr_does_not_decrease():
assert ltr_does_not_decrease(223450) == False
def tests_contains_double():
assert contains_double(123789) == False
def test_is_valid_password():
assert is_valid_password(111111) == True
def find_number_of_passwords_in_range():
n_valid_passwords = 0
for num in range(MIN, MAX + 1):
if is_valid_password(num):
n_valid_passwords += 1
return n_valid_passwords
if __name__ == '__main__':
tests_contains_double()
tests_ltr_does_not_decrease()
# test_is_valid_password()
print(find_number_of_passwords_in_range())
|
[
"saurabhsood91@gmail.com"
] |
saurabhsood91@gmail.com
|
0d9a39959f87ab5c544d3d87cf06fa0b349d1680
|
81efc66e005091a94983238ab44d3102e8c6d35b
|
/final_submit_sourcecode/source_code/read_data_pd.py
|
c0d3af5da0deab56e967e9893c505af2c3232bec
|
[
"MIT"
] |
permissive
|
weslai/stabilo_ubicamp2020
|
de4bc8ddd4a8efdb1f68b491b6c61aedfa6372e6
|
3caeca909836ad86430693322500f20b51745927
|
refs/heads/master
| 2023-01-19T14:02:41.813789
| 2020-11-24T22:36:24
| 2020-11-24T22:36:24
| 315,753,042
| 0
| 0
|
MIT
| 2020-11-24T22:37:23
| 2020-11-24T21:09:20
|
Python
|
UTF-8
|
Python
| false
| false
| 10,079
|
py
|
import os
import sys
from typing import List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from icecream import ic
from pandas import DataFrame
from stage_1.utils import as_windowed_np
ic.includeContext = True
max_raw_vals = {"Acc1": 32768, "Acc2": 8192, "Gyro": 32768, "Mag": 8192, "Force": 4096}
max_sis = {"Acc1": 2, "Acc2": 2, "Gyro": 1000, "Mag": 2.4, "Force": 5.32}
BLANK_CHAR_LABEL = "0"
CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
CHAR_CLASSES = [BLANK_CHAR_LABEL] + list(CHARS) # Add 'no char'/blank class
def read_calibration(folder: str) -> DataFrame:
path = os.path.join(folder, "calibration.txt")
return pd.read_csv(path, delimiter=":", header=None, index_col=0).T
def read_sensor_data(folder: str) -> DataFrame:
path = os.path.join(folder, "sensor_data.csv")
return pd.read_csv(path, delimiter=";", index_col=0).drop(columns=["Time"])
def read_labels(folder: str) -> DataFrame:
path = os.path.join(folder, "labels.csv")
return pd.read_csv(path, delimiter=";", index_col=None)
def apply_calibration(data: DataFrame, calibration: DataFrame) -> DataFrame:
# Calibrate accelerometers
for i in (1, 2):
for axis in ("X", "Y", "Z"):
data[f"Acc{i} {axis}"].update(
apply_calibration_to_value(
data[f"Acc{i} {axis}"], max_raw_vals[f"Acc{i}"], max_sis[f"Acc{i}"],
)
)
# Calibrate gyroscope
for axis in ("X", "Y", "Z"):
data[f"Gyro {axis}"].update(
apply_calibration_to_value(
data[f"Gyro {axis}"],
max_raw_vals["Gyro"],
max_sis["Gyro"],
cal_bias=calibration[f"bg{axis.lower()}"].item(),
)
)
# Calibrate magnetometer
for axis in ("X", "Y", "Z"):
data[f"Mag {axis}"].update(
apply_calibration_to_value(
data[f"Mag {axis}"],
max_raw_vals["Mag"],
max_sis["Mag"],
cal_bias=calibration[f"bm{axis.lower()}"].item(),
cal_scale=calibration[f"sm{axis.lower()}"].item(),
)
)
# Calibrate force sensor
data["Force"].update(
apply_calibration_to_value(
data["Force"], max_raw_vals["Force"], max_sis["Force"]
)
)
return data
def apply_calibration_to_value(raw, max_range_raw, max_si=1, cal_bias=0, cal_scale=1):
return ((raw - cal_bias) / (max_range_raw / max_si)) / cal_scale
def interpolate_to_equidistant(data: DataFrame) -> DataFrame:
# TODO: Is interpolation really necessary since it could also only be related to
# bluetooth, since it should be sampled at 100 Hz.
# see https://stabilodigital.com/data/
return data
def get_label_per_t(data: DataFrame, labels: DataFrame) -> DataFrame:
"""Add a label for each time step in data.
Args:
data (DataFrame): Sensor data of shape [T, F], where T time and F is feature
dimension.
labels (DataFrame): Labels of shape [N, 3] containing N char labels with start
and end position.
Returns:
label_per_t (DataFrame): Label for each time step.
"""
label_per_t = []
label_gen = labels.iterrows()
cur_label = next(label_gen)[1] # Get the first label
for data_sample in data.iterrows():
cur_t = data_sample[0]
cur_start = cur_label["start"]
cur_end = cur_label["stop"]
if cur_t >= cur_end:
# Update the label. This assumes
try:
cur_label = next(label_gen)[1]
except StopIteration:
# We don't have any labels for these time steps anymore
# Just use the current label with a time step in the future. This will
# result in a blank label.
cur_label["start"] = cur_t + 1
cur_label["end"] = cur_t + 2
# Also update start and end labels
cur_start = cur_label["start"]
cur_end = cur_label["stop"]
if cur_start <= cur_t < cur_end:
# TODO: Use char or index of CHAR_CLASSES here?
label_per_t.append(cur_label["Label"])
else: # cur_t < cur_label or cur_t >= cur_label
label_per_t.append(BLANK_CHAR_LABEL)
return DataFrame(label_per_t, columns=["Label"])
def split_data_by_label(
data: DataFrame, labels: DataFrame
) -> List[Tuple[str, DataFrame]]:
"""Split data into parts that correspond to a char.
Args:
data (DataFrame): Sensor data of shape [T, F], where T time and F is feature
dimension.
labels (DataFrame): Labels of shape [N, 3] containing N char labels with start
and end position.
Returns:
labeled data (List[str, DataFrame]): List of tuples containing the label char
and the corresponding sensor data.
"""
out = []
for _, label in labels.iterrows():
df = data[(data.index >= label["start"]) & (data.index < label["stop"])]
out.append((label["Label"], df))
return out
def extract_blanks(
data: DataFrame, labels: DataFrame, min_len_ms: int = 50
) -> List[Tuple[str, DataFrame]]:
"""Extract blank parts between the labeled chars.
Args:
data (DataFrame): Sensor data of shape [T, F], where T time and F is feature
dimension.
labels (DataFrame): Labels of shape [N, 3] containing N char labels with start
and end position.
Returns:
labeled data (List[str, DataFrame]): List of tuples containing the blank label
and the corresponding sensor data.
"""
out = []
label_gen = labels.iterrows() # Label generator
_, label = next(label_gen)
for _, next_label in label_gen:
df = data[(data.index >= label["stop"]) & (data.index < next_label["start"])]
if df.shape[0] > 1 and df.index[-1] - df.index[0] >= min_len_ms:
out.append((BLANK_CHAR_LABEL, df))
label = next_label
return out
def get_relevant_label_segment(
sample: DataFrame, force_thresh=None, n_consecutive=1
) -> Tuple[int, int]:
"""Get the relevant segment, that according to the force only contains the part
where the letter was actually written.
Args:
sample (DataFrame): A sample containing sensor data of exactly one letter.
force_thresh (Optional[float]): Threshold that is used to detect if the pen is
currently used to write a letter. If 'None' use 1% of max_range.
n_consecutive (int): Number of consecutive samples that are above the threshold.
Returns:
start (int): Start position in [ms] where 'Force' is > 0, i.e. the pen is
actually writing.
end (int): End of force application.
"""
assert isinstance(sample, DataFrame)
force = sample["Force"].to_numpy()
if force_thresh is None:
force_thresh = 0.01 * max_sis["Force"]
mask = force > force_thresh
if n_consecutive > 1:
mask = as_windowed_np(mask, window_length=n_consecutive)
mask = mask.min(axis=-1)
# Extend mask again, so it has the same shape as force
mask = np.append(mask, [mask[-1]] * n_consecutive)
start = mask.argmax(axis=0)
end = mask.shape[0] - mask[::-1].argmax(axis=0) - 1
end += n_consecutive - 1
return start, end
def idx_to_s(index, offset=None):
if offset is None:
offset = 0.0
elif offset == "min":
offset = index.min()
return (index - offset) / 1000
def read_and_extract_data(folder: str, include_blank=True):
calib = read_calibration(folder)
data = read_sensor_data(folder)
labels = read_labels(folder)
data = apply_calibration(data, calib)
labeled_data = split_data_by_label(data, labels)
if include_blank:
blank_data = extract_blanks(data, labels)
labeled_data.extend(blank_data)
return labeled_data
def test_pipeline(folder):
"""Test the data loading pipeline."""
calib = read_calibration(folder)
data = read_sensor_data(folder)
labels = read_labels(folder)
data = apply_calibration(data, calib)
labeled_data = split_data_by_label(data, labels)
label, sample = labeled_data[0]
start, end = get_relevant_label_segment(sample, force_thresh=0, n_consecutive=3)
t_offset = sample.index.min()
plt.plot(idx_to_s(sample.index, "min"), sample["Force"])
plt.plot(
idx_to_s(sample.index[start], t_offset), sample["Force"].iat[start], "ro",
)
plt.plot(idx_to_s(sample.index[end], t_offset), sample["Force"].iat[end], "ro")
plt.savefig("test_relevant_points.png")
feat_names = list(sample)
fig, axes = plt.subplots(len(feat_names), figsize=(10, 20), sharex=True)
time = idx_to_s(sample.index, "min")
for i, f_n in enumerate(feat_names):
feat = sample[f_n]
axes[i].plot(time, feat)
axes[i].set_title(f_n)
plt.savefig("test_all_feat.png")
def plot_histograms_per_feat(base_folder):
from collections import defaultdict
subfolders = [f.path for f in os.scandir(base_folder) if f.is_dir()]
features = defaultdict(list)
# Iterate over all writer folders
for folder in subfolders:
data = read_and_extract_data(folder)
feat_names = list(data[0][1])
# Get the relevant segment where force is applied
relevant_segments = list(
get_relevant_label_segment(sample) for _, sample in data
)
# Iterate over all featues and concatenate the relevant part of the samples
for f_n in feat_names:
feats = [
sample[f_n][start:end].to_numpy()
for (_, sample), (start, end) in zip(data, relevant_segments)
]
features[f_n].append(np.concatenate(feats))
for f_n, values in features.items():
f = plt.figure()
plt.hist(values)
plt.savefig(f"hist_{f_n}.png")
plt.close(f)
if __name__ == "__main__":
plot_histograms_per_feat(sys.argv[1])
|
[
"we8403964@gmail.com"
] |
we8403964@gmail.com
|
224ef1320f603a86c4059b47859ebb3afa4c02b3
|
4ab1f49be834eb4ac9fd87edae94b338cbe97044
|
/test/test_flask_wtk.py
|
e79734821074cdddfd797cda96ad55bd6f0548d9
|
[] |
no_license
|
chenchonghust/pywtk
|
19dac2613c694654b672a3de825a171fab215a2e
|
230164a3eaa872a01f24b3686e821d7c38f62a18
|
refs/heads/master
| 2020-04-05T02:15:00.662526
| 2017-07-26T15:40:49
| 2017-07-26T15:40:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,724
|
py
|
import json
import numpy
import pandas
from unittest import TestCase, skip
import flask_wtk
from pywtk.wtk_api import FORECAST_ATTRS
import urllib
class TestFlaskWTK(TestCase):
def setUp(self):
self.app = flask_wtk.app.test_client()
def test_sites(self):
site_id = "102445"
req = '/sites?sites=%s&orient=records'%(site_id)
resp = self.app.get(req)
resp_data = resp.get_data()
ret_data = json.loads(resp_data)
expected = {"site_id":102445,"gid":102446,"fraction_of_usable_area":1.0,
"power_curve":"offshore","capacity":16.0,"wind_speed":7.31,
"capacity_factor":0.31,"the_geom":"0101000020E6100000F5D555815AAD51C0AEF204C24EDF4440",
"city":None,"state":None,"country":None,"elevation":None,
"lat":41.744591,"lon":-70.708649}
self.assertEqual(expected, ret_data[0])
wkt = "POLYGON((-120.82763671875 34.452218472826566,-119.19616699218749 34.452218472826566,-119.19616699218749 33.920571528675104,-120.82763671875 33.920571528675104,-120.82763671875 34.452218472826566))"
req = '/sites?wkt=%s&orient=columns'%(wkt)
resp = self.app.get(req)
resp_data = resp.get_data()
ret_data = json.loads(resp_data)
expected = [29375, 29733, 29872, 30019, 30190, 30539, 30712, 30713,
30873, 30874, 31032, 31033, 31034, 31189, 31190, 31191,
31192, 31320, 31321, 31322, 31323, 31324, 31563, 32060,
32314, 32834, 33203, 34828]
site_ids = ret_data['site_id'].values()
for site_id in expected:
self.assertIn(site_id, site_ids)
def test_met(self):
site_id = "102445"
start = pandas.Timestamp('2007-08-01', tz='utc')
end = pandas.Timestamp('2007-08-15', tz='utc')
#expected = [2007,8,1,0,0,1.178,0.9530000000000001,101280.25600000001,292.58,216.43200000000002,4.995]
#expected_dict = dict(zip(["density", "power", "pressure", "temperature", "wind_direction", "wind_speed"], expected[5:]))
attributes = ["power", "wind_direction", "wind_speed", "temperature",
"pressure","density"]
expected = [9.53277647e-01, 2.16432190e+02, 4.99592876e+00, 2.92580750e+02,
1.01280258e+05, 1.17889750e+00]
expected_dict = dict(zip(attributes, expected))
# Bad attributes
req_args = {'sites': site_id, 'start':start.value//10**9, 'end':end.value//10**9,
'attributes': ",".join(attributes)+',bad_attribute'}
req = '/met?%s'%(urllib.urlencode(req_args))
#print "Request is %s"%req
resp = self.app.get(req)
resp_data = resp.get_data()
ret_data = json.loads(resp_data)
self.assertIn("success", ret_data)
self.assertFalse(ret_data["success"])
# Good data
req = '/met?sites=%s&start=%s&end=%s'%(site_id,start.value//10**9, end.value//10**9)
#print "Request is %s"%req
resp = self.app.get(req)
resp_data = resp.get_data()
ret_data = json.loads(resp_data)
self.assertIn(site_id, ret_data)
df = pandas.read_json(json.dumps(ret_data[site_id]))
first_row = df.ix[0].to_dict()
for n,v in expected_dict.items():
self.assertEqual(0, round((v - first_row[n])/v, 7))
#self.assertEqual(expected_dict, df.ix[0].to_dict())
self.assertEqual(14*24*12+1, len(df)) # End is inclusive of midnight
def test_fcst(self):
site_id = "53252"
start = pandas.Timestamp('2007-01-01', tz='utc')
end = pandas.Timestamp('2007-01-02', tz='utc')
expected = numpy.array([6.2671943, 8.6079865, 6.7353525,
6.384234, 0.26309761, 3.6874273, 1.4196928, 0.53551841,
10.572015, 13.249797, 10.526829, 10.306773], dtype='float32')
ex_dict = dict(zip(FORECAST_ATTRS, expected))
req = '/fcst?sites=%s&start=%s&end=%s'%(site_id,start.value//10**9, end.value//10**9)
#print "Request is %s"%req
resp = self.app.get(req)
resp_data = resp.get_data()
ret_data = json.loads(resp_data)
self.assertIn(site_id, ret_data)
fcst_data = pandas.read_json(json.dumps(ret_data[site_id]))
self.assertEqual(25, len(fcst_data))
for k, v in ex_dict.items():
self.assertAlmostEqual(v, fcst_data.ix[0][k])
return
start = pandas.Timestamp('2007-01-01', tz='utc')
end = pandas.Timestamp('2007-01-02', tz='utc')
fcst_data = get_forecast_data("53252", start, end, utc=True)
self.assertEqual(start, fcst_data.index[0])
# From ncdump, all values are float32 which do not compare easily to
# python floats which are float64
expected = numpy.array([6.2671943, 8.6079865, 6.7353525,
6.384234, 0.26309761, 3.6874273, 1.4196928, 0.53551841,
10.572015, 13.249797, 10.526829, 10.306773], dtype='float32')
self.assertEqual(25, len(fcst_data))
self.assertTrue(numpy.array_equal(expected, list(fcst_data.ix[0])))
ex_dict = dict(zip(FORECAST_ATTRS, expected))
# Verify column names are correct
for k, v in ex_dict.items():
self.assertEqual(v, fcst_data.ix[0][k])
self.assertIn(site_id, ret_data)
df = pandas.read_json(ret_data[site_id])
first_row = df.ix[0].to_dict()
for n,v in expected_dict.items():
self.assertAlmostEqual(v, first_row[n])
#self.assertEqual(expected_dict, df.ix[0].to_dict())
self.assertEqual(14*24*12+1, len(df)) # End is inclusive of midnight
|
[
"harry.sorensen@nrel.gov"
] |
harry.sorensen@nrel.gov
|
711832ad027cebcd4da1a6f103b54de05e2338a2
|
a819210511351f55f799040c40dd193a83572919
|
/session4/creat2.py
|
03e85e26a61ee833203e872a99b676cdac224969
|
[] |
no_license
|
notthumann/nguyentrangan-fundametals-c4e22
|
39f1b1ed90e54a348c95c276ff2ec787d2cd5d06
|
259e3cf7c9eb9d3299742e33cce36190f7591fcd
|
refs/heads/master
| 2020-03-29T14:16:30.534116
| 2018-09-30T17:09:20
| 2018-09-30T17:09:20
| 150,008,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
movie = {
"film":"Pulp Fiction",
"year":1994,
}
movie[input("Keys?")] = input("Value?")
print(movie)
|
[
"xero0696@gmail.com"
] |
xero0696@gmail.com
|
a373cc0a67cbee414789762011dca58198fc5ca6
|
015db123e6c79968787965390a71a877be1b87b4
|
/djangoSite/djangoSite/settings.py
|
7811deb553f454a884dc13327d757f6b6cde87e7
|
[] |
no_license
|
IncredibleQuark/pythonIntro
|
9894f9e6456f48b01c7b2837d5e0a9a9fb96d33c
|
399b77e43d23b25b05d1779f92a6a73bf98b0c6d
|
refs/heads/master
| 2020-03-22T14:46:14.020414
| 2019-01-13T18:50:17
| 2019-01-13T18:50:17
| 140,204,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,129
|
py
|
"""
Django settings for djangoSite project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&c^!yn0_mb^slv6_rx7ovx=uaw_j07j8ecuwduqi7xe6cegc46'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'bootstrap3'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoSite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoSite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"lkupinski@gmail.com"
] |
lkupinski@gmail.com
|
2f6a1eeb798db63a03d8bc5094e0b47cfa06081e
|
f616e5d8d09e156c0fa1b08241106121eca9c810
|
/alipay/direct/operate.py
|
6014ded1fba2ee5c579c71406e46aa49db3e10b6
|
[
"BSD-2-Clause"
] |
permissive
|
amyhoo/payment
|
8516226fa6bea8cec1270fb5e3406aa86f2a72e0
|
c243450681650bc3318a337bd550ae0af44ba3e7
|
refs/heads/master
| 2021-01-10T13:47:31.340937
| 2016-01-07T01:57:15
| 2016-01-07T01:57:15
| 42,905,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
# -*- coding: utf-8 -*-
#######################################################################################
# 提交直接支付请求,被payment所调用
#######################################################################################
from billing_payment.alipay.conf import DIRECT_PARAMS
from billing_payment.alipay.gatewayinfo import Alipay
from billing_payment.payment.mixins import PaySessionMan
def AlipayDirectHandle(order_info,**kwargs):
'''
处理支付宝请求
'''
gateway_info=dict(DIRECT_PARAMS,**{
'service':'create_direct_pay_by_user',
'subject':order_info['subject'],
'total_fee':order_info['total_fee'],
'out_trade_no':order_info['out_trade_no'],
'notify_url':order_info['notify_url'],
'return_url':order_info['return_url'] ,
})
gateway_info={key:gateway_info[key] for key in gateway_info if gateway_info[key]!=None}
alipay=Alipay(**gateway_info)
return alipay.request('get')
|
[
"yamin_xu@163.com"
] |
yamin_xu@163.com
|
e6a40ac9b3d5b601e26251a61ac8dff11ebac2bd
|
8ac5dc1f54440f186f1c736cd95be7a28a0bb958
|
/2010/dice.py
|
4331d0f13c342cfeb85c6eb1ff84bd656866a1ac
|
[] |
no_license
|
joeiddon/british_informatics_olympiad
|
604cfce98cc54f0de757e9ea083175f842fafb24
|
3fe6a1f294429b22624a81ba6ef8c365913b204a
|
refs/heads/master
| 2021-09-25T07:07:10.433774
| 2018-10-19T09:23:36
| 2018-10-19T09:23:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
class Dice:
x = 5
y = 5
up = 1 #to sky
right = 4 #to right
top = 2 #to front (away)
heading = "forward"
def move(self):
global grid
v = grid[y][x]
v = (v + self.top - 1) % 6 + 1
def modPos(self):
self.x = (self.x + 11) % 11
self.y = (self.y + 11) % 11
def leftRoll(self):
right = 7 - self.up
top = self.right
self.right = right
self.top = top
self.x -= 1
self.modPos()
heading = "left"
def rightRoll(self):
right = self.up
top = 7 - self.right
self.right = right
self.top = top
self.x += 1
self.modPos()
heading = "right"
def forwardRoll(self):
up = 7 - self.top
top = self.up
self.up = up
self.top = top
self.y += 1
self.modPos()
heading = "forward"
def backwardRoll(self):
up = self.top
top = 7 - self.up
self.up = up
self.top = top
self.y -= 1
self.modPos()
heading = "backward"
grid = [[1]*11]*11
die = Dice()
|
[
"joedominciddon@gmail.com"
] |
joedominciddon@gmail.com
|
d78166b045ca1249faed5e2e6179a0bde644dd95
|
fdee3863c5fa0181b5e478a824a6468059cb958c
|
/AULA_31_COMPREENSAO_EM_LISTA.py
|
9f1d1de5311607ab10ed2c8ddddfc385b2c5cd5e
|
[] |
no_license
|
afaqueti/python_udemy
|
5ab0ca251143b6c4d169a642903c7676f475150e
|
ee0e8f2f15665401ca0cce166a3cd55135f42a2d
|
refs/heads/master
| 2020-04-12T07:05:00.363820
| 2019-03-04T13:45:46
| 2019-03-04T13:45:46
| 162,356,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
# COMPREENSÃO EM LISTA
x = []
for i in range(0,10):
x += [i]
print(i)
x2 = [i for i in range(0,11)]
print(x2)
x3 = [i* 2 for i in range(1,11)]
print(x3)
x4 = [i for i in range(0,21) if i % 2 == 0]
print(x4)
lista = []
lista = [variavel_igual for variavel_igual in 'alessandro']
print(lista)
print(len(lista))
# CONVERSÃO DE TEMPERATURA DE CELSIUS PARA FAHRENHEIT
cel = [0, 10,15,20,30,50,100]
far = [temp for temp in cel]
print(far)
far = [(temp * (9/5) + 32) for temp in cel]
print(far)
|
[
"afaqueti@gmail.com"
] |
afaqueti@gmail.com
|
6a00d42b5a145a315a3ece2a6f86be6e55ab9489
|
4ecd7a780ecbb9c3375d95360eaac0e1638185c8
|
/Python/main.py
|
b304c76691819045d72a067cd372dc7df2b6316d
|
[
"MIT"
] |
permissive
|
Sasmita-cloud/Hacktoberfest2021_PatternMaking
|
dd50c8a81bce9da25650b3b497330a9c150d54da
|
af48c242c10b08b74cf42c35b8550a45f1d9cbff
|
refs/heads/main
| 2023-08-29T22:56:59.953119
| 2021-10-14T11:49:43
| 2021-10-14T11:49:43
| 415,092,226
| 0
| 0
|
MIT
| 2021-10-08T18:38:33
| 2021-10-08T18:38:32
| null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
n=5
for i in range(n):
for j in range(i+1):
print("*",end="")
print()
|
[
"satakshi.pal.civ19@itbhu.ac.in"
] |
satakshi.pal.civ19@itbhu.ac.in
|
0079ec1753397ec8e2d4db72f17762047e237974
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_113/ch20_2020_09_16_11_25_21_743333.py
|
2edf4921f042e6256794183644e3ed17b47e767a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
dist = int(input('Qual a distancia de sua viajem?: '))
if dist <= 200:
preco = dist * 0.5
print(preco)
elif dist > 200:
dist -= 200
preco = dist * 0.45
preco += 100
print ('{0:.2f}'.format(preco))
|
[
"you@example.com"
] |
you@example.com
|
1609c6c3f1c94842f8f310f93f085fce5e8c1f17
|
66e66b87029c6ab498c48823545800ef0af4ac7c
|
/Python/LX/7/7.9.py
|
8e121db2fe9623db56424c20917f4c79b348696f
|
[] |
no_license
|
zhenyuzzc/source-code
|
aced32c5177e3a8822a0eefcfd1f1800646b7e2b
|
c243c79608c585ae10362735081fc06f672248b9
|
refs/heads/master
| 2023-08-10T15:39:29.428196
| 2021-10-09T12:38:57
| 2021-10-09T12:38:57
| 406,938,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
sandwich_orders = ['yanmaipeigen','pastrami','jingqiangyu','pastrami','jidanhuotui','pastrami','tudouni']
print("五香徐牛肉已经卖完了(pastrami)")
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
print("只有这些了:")
print(sandwich_orders)
|
[
"zehnchao_zhu@qq.com"
] |
zehnchao_zhu@qq.com
|
9b8d5b002e735a2f759c9eabe3156845f8be7fde
|
dcf104fd22872b09a2e5c34d998d37282ae61b21
|
/build/lib/mrcnn/model.py
|
9923736652e74d49d00786a7d3723f8bd3b09c1d
|
[
"MIT"
] |
permissive
|
SAFARIZADEH/Stone_Detection_MRCNN
|
159d3bc48b629e70f68b79c6066736202cae4e09
|
5554291c0c4f59ffaa02accf50689e2f7336b088
|
refs/heads/main
| 2023-03-22T02:12:37.373327
| 2021-03-20T14:25:26
| 2021-03-20T14:25:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126,959
|
py
|
"""
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn = lambda: tf.argmax(positive_overlaps, axis=1),
false_fn = lambda: tf.cast(tf.constant([]),tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is deprecated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False,
no_augmentation_sources=None):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The contents
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
no_augmentation_sources = no_augmentation_sources or []
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinitely.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
# If the image source is not to be augmented pass None as augmentation
if dataset.image_info[image_id]['source'] in no_augmentation_sources:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=None,
use_mini_mask=config.USE_MINI_MASK)
else:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
# Conditional import to support versions of Keras before 2.2
# TODO: remove in about 6 months (end of 2018)
try:
from keras.engine import saving
except ImportError:
# Keras before 2.2 used the 'topology' namespace.
from keras.engine import topology as saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, layers)
else:
saving.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE,
no_augmentation_sources=no_augmentation_sources)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
|
[
"deolipankaj94@gmail.com"
] |
deolipankaj94@gmail.com
|
5aba40b94b3f2255d4c544bddbc1b7ac1e8899e6
|
902b02a62e28e7c767bc9e084a36a363383c8e0e
|
/libStash/books/migrations/0004_auto_20200920_0907.py
|
bdcc221cbe94291f82e4abbc468522a40f16d185
|
[
"MIT"
] |
permissive
|
Dev-Rem/libStash
|
8030a07f1924455cb54827d628a3d588aa9ee4b8
|
a364e9997c1c91b09f5db8a004deb4df305fa8cf
|
refs/heads/master
| 2023-04-06T01:27:34.930198
| 2021-05-01T14:21:26
| 2021-05-01T14:21:26
| 296,617,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# Generated by Django 3.1.1 on 2020-09-20 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0003_auto_20200920_0822'),
]
operations = [
migrations.AlterField(
model_name='author',
name='address',
field=models.TextField(max_length=150),
),
]
|
[
"Rem_files@Aremus-MacBook-Air.local"
] |
Rem_files@Aremus-MacBook-Air.local
|
059c904f3d531856298874d795a6cf925be019fc
|
548444cdf0ebdefe73c5f1c7c044079f21ccd54a
|
/2 - Variáveis.py
|
85d5a9bd44ff1aad40cc377d1aefdc36e5adbba2
|
[
"MIT"
] |
permissive
|
gednt/Python-Projects
|
a4d62e83d3a27798ed67e6bccca41d6e4405234f
|
45a6dd160b7ec2d1bb2a7aa1289802dc294e1a3d
|
refs/heads/main
| 2023-05-03T06:40:42.715637
| 2021-05-17T01:04:32
| 2021-05-17T01:04:32
| 368,018,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
a = 1
b = 5.2
c = a+b
d = "Eu sou uma string"
e = True;
f = False;
print (c)
print (d)
print (e)
print (f)
|
[
"big.gamesedicas@gmail.com"
] |
big.gamesedicas@gmail.com
|
6451035e29061d208fd1945893c984c0c86d26a1
|
cc5a3fa80d2ae90afc2626e4a82b9a927726dfa0
|
/huaweicloud-sdk-frs/huaweicloudsdkfrs/v2/model/add_faces_by_url_response.py
|
a0b5174b327a3d9e652b2d8df2e8c1d453bf59e8
|
[
"Apache-2.0"
] |
permissive
|
Logan118/huaweicloud-sdk-python-v3
|
eca15e9b08bdccef7122e40735d444ddc958efa8
|
bb230c03bd00225b9f5780a56adce596e9456420
|
refs/heads/master
| 2023-07-17T14:57:50.799564
| 2021-08-25T10:40:43
| 2021-08-25T10:40:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,991
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AddFacesByUrlResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'face_set_id': 'str',
'face_set_name': 'str',
'faces': 'list[FaceSetFace]'
}
attribute_map = {
'face_set_id': 'face_set_id',
'face_set_name': 'face_set_name',
'faces': 'faces'
}
def __init__(self, face_set_id=None, face_set_name=None, faces=None):
"""AddFacesByUrlResponse - a model defined in huaweicloud sdk"""
super(AddFacesByUrlResponse, self).__init__()
self._face_set_id = None
self._face_set_name = None
self._faces = None
self.discriminator = None
if face_set_id is not None:
self.face_set_id = face_set_id
if face_set_name is not None:
self.face_set_name = face_set_name
if faces is not None:
self.faces = faces
@property
def face_set_id(self):
"""Gets the face_set_id of this AddFacesByUrlResponse.
人脸库ID。 调用失败时无此字段。
:return: The face_set_id of this AddFacesByUrlResponse.
:rtype: str
"""
return self._face_set_id
@face_set_id.setter
def face_set_id(self, face_set_id):
"""Sets the face_set_id of this AddFacesByUrlResponse.
人脸库ID。 调用失败时无此字段。
:param face_set_id: The face_set_id of this AddFacesByUrlResponse.
:type: str
"""
self._face_set_id = face_set_id
@property
def face_set_name(self):
"""Gets the face_set_name of this AddFacesByUrlResponse.
人脸库名称。 调用失败时无此字段。
:return: The face_set_name of this AddFacesByUrlResponse.
:rtype: str
"""
return self._face_set_name
@face_set_name.setter
def face_set_name(self, face_set_name):
"""Sets the face_set_name of this AddFacesByUrlResponse.
人脸库名称。 调用失败时无此字段。
:param face_set_name: The face_set_name of this AddFacesByUrlResponse.
:type: str
"""
self._face_set_name = face_set_name
@property
def faces(self):
"""Gets the faces of this AddFacesByUrlResponse.
人脸库当中的人脸结构,详见[FaceSetFace](zh-cn_topic_0106912070.xml)。 调用失败时无此字段。
:return: The faces of this AddFacesByUrlResponse.
:rtype: list[FaceSetFace]
"""
return self._faces
@faces.setter
def faces(self, faces):
"""Sets the faces of this AddFacesByUrlResponse.
人脸库当中的人脸结构,详见[FaceSetFace](zh-cn_topic_0106912070.xml)。 调用失败时无此字段。
:param faces: The faces of this AddFacesByUrlResponse.
:type: list[FaceSetFace]
"""
self._faces = faces
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddFacesByUrlResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
db92b7df978f1d2ea37151b6a3e0b0ba066b14b1
|
0f2c708469c9ad413644636268c58d8853719489
|
/source/histogram_window.py
|
1445e4f39aa81cd83d779c0873f75a82032fd35a
|
[
"MIT"
] |
permissive
|
alex-turantsev/Misoi_kontr1
|
0f50b68d3cfc1b4cfbbe80573956c41edba82cc3
|
874a04b609eb7cfd5b7717ec4b7dd2321362855f
|
refs/heads/master
| 2021-01-11T18:17:52.378472
| 2016-10-07T12:39:59
| 2016-10-07T12:39:59
| 69,336,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,968
|
py
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import Tkinter as tk
from PIL import ImageTk, Image
import ntpath
class histogram_window(tk.Tk):
def __init__(self, image, path):
tk.Tk.__init__(self)
self.root = self
self.title('Histogram '+ntpath.basename(path))
self.geometry('530x350');
self.minsize(540, 350)
self.maxsize(540, 350)
intensities = self.calculate_intensities(image)
self.draw_histogram(intensities)
self.mainloop()
def calculate_intensities(self,image):
pixels = image.load()
width, height = image.size
intensities = [0]*256
for i in range(width): # for every pixel:
for j in range(height):
R,G,B = pixels[i,j]
index = float(R + G + B) / float(3)
intensities[int(index)] += 1
return intensities
def draw_histogram(self, data):
c_width = 540 # Define it's width
c_height = 350 # Define it's height
c = tk.Canvas(self, width=c_width, height=c_height, bg= 'white') # Create a canvas and use the earlier dimensions
c.pack()
y_gap = 3
y_stretch = c_height -y_gap*2 # The highest y = max_data_value * y_stretch # The gap between lower canvas edge and x axis
x_stretch = 1.8 # Stretch x wide enough to fit the variables
x_width = 0.2 # The width of the x-axis
x_gap = 5 # The gap between left canvas edge and y axis
max_element = 0
index = 0
for i in range(len(data)):
if data[i] > max_element:
index = i
max_element = data[i]
for i in range(len(data)): # A quick for loop to calculate the rectangle
x = i # coordinates of each bar
y = data[i]
x0 = x * x_stretch + x * x_width + x_gap # Bottom left coordinate
y0 = c_height - (y * y_stretch/max_element + y_gap) # Top left coordinates
x1 = x * x_stretch + x * x_width + x_width + x_gap # Bottom right coordinates
y1 = c_height - y_gap
if int(y0) == y1:
y0 -= 1 # Top right coordinates
c.create_rectangle(x0, y0, x1, y1, fill="grey", outline="grey") # Draw the bar
#c.create_text(x0+2, y0, anchor=tk.SW, text=str(y)) # Put the y value above the bar
|
[
"alex.turantsev@gmail.com"
] |
alex.turantsev@gmail.com
|
48d3d2345ecb774006b7797e6dfb19ea0489873f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/147/28536/submittedfiles/swamee.py
|
87c492832ea836815b97f42dfba4a884abe8852b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
# -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
g=9.81
epsilon=0.000005
f=float(input('digite valor de f:'))
L=float(input('digite valor de L:'))
Q=float(input('digite valor de Q:'))
deltaH=float(input('digite valor de deltaH:'))
v=float(input('digite valor de v:'))
d=((8*f*L*(Q**2))/((math.pi**2)*g*deltaH))**0.5
rey=(4*Q)/(math.pi*d*v)
k=(0.25)/math.log10((epsilon/(3.7*d))+(5.7/(rey**0.9))**2)
print('o valor de d é %.2f' %d)
print('o valor de rey é %.2f' %rey)
print('o valor de k é %.2f' %k)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7ebfec0556e46db57e2c4d1eca4d13ef6452d0ce
|
005a6421cd6159fb6be8c61cc675654377e8f226
|
/cairis/core/TemplateObstacleParameters.py
|
dd6ad8c3e8c2351b3464509195afd601d5e88470
|
[
"Apache-2.0"
] |
permissive
|
cairis-platform/cairis
|
d667bc91ba28f0b7cd4fc88e6528eb3339e4ee6f
|
55abb93a9377664f5b03c027bad7ce3cf168c5ad
|
refs/heads/master
| 2023-04-06T17:04:08.781186
| 2023-02-17T22:51:15
| 2023-02-17T22:51:15
| 3,790,944
| 105
| 36
|
Apache-2.0
| 2022-03-19T15:04:14
| 2012-03-21T20:17:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
from . import ObjectCreationParameters
class TemplateObstacleParameters(ObjectCreationParameters.ObjectCreationParameters):
def __init__(self,obsName,obsCat,obsDef,obsConcerns,obsResp,obsProb,obsProbRat):
ObjectCreationParameters.ObjectCreationParameters.__init__(self)
self.theName = obsName
self.theCategory = obsCat
self.theDefinition = obsDef
self.theConcerns = obsConcerns
self.theResponsibilities = obsResp
self.theProbability = obsProb
self.theProbabilityRationale = obsProbRat
def name(self): return self.theName
def category(self): return self.theCategory
def definition(self): return self.theDefinition
def concerns(self): return self.theConcerns
def responsibilities(self): return self.theResponsibilities
def probability(self): return self.theProbability
def probabilityRationale(self): return self.theProbabilityRationale
|
[
"shamal.faily@googlemail.com"
] |
shamal.faily@googlemail.com
|
e0cef3709184ba38a5b1c49088dd488ff94fe2d7
|
5a4436884af5341ce855c0e84866b972a0f61c05
|
/day4/classes/student/13.py
|
42d32a7eccc68bd225d21bb75b5d00847af1380c
|
[] |
no_license
|
sreejithev/pythoncodes
|
74a420c4f025b893e27f17ba85632a4a096f17fd
|
70df14871a9687916d1c4ada76c055607f13e8ce
|
refs/heads/master
| 2021-01-21T20:59:47.056167
| 2017-06-19T09:43:17
| 2017-06-19T09:43:17
| 92,292,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
class student:
def __init__(self, name, age, rank):
self.name = name
self.age = age
self.rank = rank
def __str__(self):
return 'name = %s, age = %d, rank = %d' % (self.name, self.age, self.rank)
def __lt__(self, other):
if self.rank > other.rank:
return True
else:
return False
student1 = student('John', 20, 100)
student2 = student('Ram', 19, 120)
s = student2 < student1
print s
# s = student2__lt__(student1)
|
[
"sreejithevwyd@gmail.com"
] |
sreejithevwyd@gmail.com
|
33986ed30f53a19439cdd7d07c782a582f0d133e
|
18d087b0fca0f80018861da6197e30d712fc248b
|
/S05/question_files/main.com.py
|
9a975a8ffc12d8004a7b40ff9d66beceaed06180
|
[] |
no_license
|
pymft/mft-05
|
6a92f3e9e9e9568b602f0de8daae310e76646fac
|
dde1ff239163123494535ab1b4c3c86c4b01599f
|
refs/heads/master
| 2020-06-03T21:56:12.289836
| 2019-08-29T13:52:02
| 2019-08-29T13:52:02
| 191,747,168
| 1
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
import glob
def path_to_root(dct, number):
parent = dct[number]
if parent == 0:
return [number]
return path_to_root(dct, parent) + [number]
def convert(parents):
children = {0: []}
for k in parents:
children[k] = []
for k in parents:
val = parents[k]
children[val].append(k)
return children
def find_no_children_nodes(parent_to_children):
res = []
for k in parent_to_children:
if parent_to_children[k] == []:
res.append(k)
return res
child_to_parent = {}
list_of_files = glob.glob('./files/*.txt')
for f in list_of_files:
child = f[8:-4]
parent = open(f).read()
child = int(child)
parent = int(parent)
child_to_parent[child] = parent
parent_to_children = convert(child_to_parent)
print(child_to_parent)
print(parent_to_children)
max_path = []
for node in find_no_children_nodes(parent_to_children):
path = path_to_root(child_to_parent, node)
if len(path) > len(max_path):
max_path = path
print(path_to_root(child_to_parent, 6638932548))
print(max_path)
|
[
"naeini.v@gmail.com"
] |
naeini.v@gmail.com
|
1e7f94b39fd5b2962a68f37cebd211a87940d2eb
|
da5aba84c12fe9fcb6c2743295977f6efa3f3198
|
/ManacherPalindromicTest.py
|
61474f7f9e7e8e455b5eacbc2c7e3df69e920b9a
|
[] |
no_license
|
JayFu/learningnote
|
e123dd465f3caf1bd1a18cb9056dad37c7e899d0
|
c2c0e819f59754d5dbe67d6e22cd011f899a6106
|
refs/heads/master
| 2021-01-19T07:06:52.144444
| 2018-07-16T04:00:23
| 2018-07-16T04:00:23
| 87,522,721
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
# -*- coding: utf-8 -*-
s = 'ababa'
import pdb
# THE_ANSWER = 42
# T = [THE_ANSWER]
# for c in s:
# T.append(c)
# T.append(THE_ANSWER)
# m = 0
# c, r, size = 1, 2, len(T)
# P = [0, 1] + [None] * (size-2)
# maxIndex , maxCount = 0, 1
# for i in range(2, size):
# print(P)
# print(i, c, m, r)
# m = c*2 - i
# if r > i and P[m] < r-i:
# P[i] = P[m]
# print('case1')
# continue
# count = min(i, size-i-1)
# for n in range((1 if r <= i else r + 1 - i), count + 1):
# if T[i+n] != T[i - n]:
# count = n - 1
# print('case2')
# break
# c = i
# r = i + count
# P[i] = count
# if count > maxCount:
# maxCount = count
# maxIndex = i - count
# maxIndex = maxIndex // 2
# print(s[maxIndex:maxIndex+maxCount])
#预处理
s='#'+'#'.join(s)+'#'
RL=[0]*len(s)
MaxRight=0
pos=0
MaxLen=0
for i in range(len(s)):
print(RL)
print(i, MaxRight, RL[i], pos)
# pdb.set_trace()
if i<MaxRight:
RL[i]=min(RL[2*pos-i], MaxRight-i)
else:
RL[i]=1
#尝试扩展,注意处理边界
while i-RL[i]>=0 and i+RL[i]<len(s) and s[i-RL[i]]==s[i+RL[i]]:
RL[i]+=1
#更新MaxRight,pos
if RL[i]+i-1>MaxRight:
MaxRight=RL[i]+i-1
pos=i
#更新最长回文串的长度
# MaxLen=max(MaxLen, RL[i])
if RL[i] > MaxLen:
OutPalStr = s[i - RL[i] // 2:i + RL[i] //2]
print(i)
MaxLen = max(MaxLen, RL[i])
OutPalStr = OutPalStr.replace("#", "")
print(OutPalStr)
|
[
"382603080@qq.com"
] |
382603080@qq.com
|
9a9b569145dc076f76a7d6fdce9825a684656da8
|
edf8c5d748c8dd495a6173f355b7ba7bb6a0e662
|
/results-combinations/Subplot_gradient_density_isolation.py
|
3ae7eb0399d364cd55e0dbc95a6d0e8a78544e54
|
[] |
no_license
|
mdodovic/Flybys-galactic-simulation
|
66de7f2cc5e805db800bd911d9cc21ba7f8d0c67
|
78d11f8dda261d21c97575910b4d916ba48fd8a8
|
refs/heads/master
| 2022-12-16T22:35:53.563458
| 2020-09-22T09:01:05
| 2020-09-22T09:01:05
| 296,823,203
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import math as math
putanja_load = "C:/Users/matij/Desktop/Kodovi_za_analizu/rezultati/M31_izolacija/gradijent/1FI_RO_"
putanja_sacuvaj = "C:/Users/matij/Desktop/Kodovi_za_analizu/rezultati/Gradijenti_gustine/"
ugao_0, gradijent_0 = np.loadtxt(putanja_load + str(0).zfill(3) + ".txt", unpack = True)
ugao_50, gradijent_50 = np.loadtxt(putanja_load + str(50).zfill(3) + ".txt", unpack = True)
ugao_100, gradijent_100 = np.loadtxt(putanja_load + str(100).zfill(3) + ".txt", unpack = True)
# 3 in row
f, axarr = plt.subplots(1, 3, figsize=(20, 4))
f.subplots_adjust(hspace=0.2, wspace = 0.2, left = 0.05, right=0.95, bottom = 0.15 , top = 0.9)
axarr[0].plot(ugao_0,gradijent_0,c='black')
axarr[0].set_xlabel(r'$\alpha$ [ $^\circ$ ]',fontsize=16)
axarr[0].set_ylabel(r'$\Delta\rho [ 10^7 M_dot/kpc^2]$',fontsize=16)
axarr[0].set_xlim(0,360)
#axarr[0, 0].set_xlabel("dd")
#axarr[0, 0].set_title('Axis [0,0]')
axarr[1].plot(ugao_50,gradijent_50,c='black')
axarr[1].set_xlabel(r'$\alpha$ [ $^\circ$ ]',fontsize=16)
axarr[1].set_xlim(0,360)
#axarr[0, 1].set_title('Axis [0,1]')
axarr[2].plot(ugao_50,gradijent_50,c='black')
axarr[2].set_xlabel(r'$\alpha$ [ $^\circ$ ]',fontsize=16)
axarr[2].set_xlim(0,360)
#axarr[1, 0].set_title('Axis [1,0]')
#axarr[1, 1].scatter(x, y ** 2)
#axarr[1, 1].set_title('Axis [1,1]')
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
#plt.setp([a.get_xticklabels() for a in axarr[:]], visible=False)
plt.setp([a.get_yticklabels() for a in axarr[1:3]], visible=False)
plt.savefig(putanja_sacuvaj + "izolacija_graijent_sub3",dpi=90)
plt.savefig(putanja_sacuvaj + "izolacija_graijent_sub3.eps",dpi=90)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d4ae837f8ee5ab3da832697da7fc9e396c228f27
|
3c9cba5c6b94300d97486408b79ddd85112599f9
|
/examples/esp32spi_wpa2ent_simpletest.py
|
d8d2a143992eadbf92258198a560a27e3cbd44bc
|
[
"MIT"
] |
permissive
|
lemonkey/Adafruit_CircuitPython_ESP32SPI
|
cd6b77dd77c54a35e5742316cae69009acfb373e
|
85248eff0727172ef3c656fb916f670d6a2264ea
|
refs/heads/master
| 2020-06-17T21:08:25.347921
| 2019-07-18T21:43:14
| 2019-07-18T21:43:14
| 196,055,225
| 0
| 0
|
MIT
| 2019-07-09T17:38:32
| 2019-07-09T17:38:32
| null |
UTF-8
|
Python
| false
| false
| 3,335
|
py
|
# Example code implementing WPA2 Enterprise mode
#
# This code requires firmware version 1.3.0, or newer, running
# on the ESP32 WiFi co-processor. The latest firmware, and wiring
# info if you are using something other than a PyPortal, can be found
# in the Adafruit Learning System:
# https://learn.adafruit.com/adding-a-wifi-co-processor-to-circuitpython-esp8266-esp32/firmware-files#esp32-only-spi-firmware-3-8
import re
import time
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
import adafruit_esp32spi.adafruit_esp32spi_requests as requests
# Version number comparison code. Credit to gnud on stackoverflow
# (https://stackoverflow.com/a/1714190), swapping out cmp() to
# support Python 3.x and thus, CircuitPython
def version_compare(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return (normalize(version1) > normalize(version2)) - (normalize(version1) < normalize(version2))
print("ESP32 SPI WPA2 Enterprise test")
# For running on the PyPortal, use this block
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
# For a board that doesn't have the ESP pin definitions, use this block and
# set the pins as needed.
#esp32_cs = DigitalInOut(board.D8)
#esp32_ready = DigitalInOut(board.D5)
#esp32_reset = DigitalInOut(board.D7)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
requests.set_interface(esp)
if esp.status == adafruit_esp32spi.WL_IDLE_STATUS:
print("ESP32 found and in idle mode")
# Get the ESP32 fw version number, remove trailing byte off the returned bytearray
# and then convert it to a string for prettier printing and later comparison
firmware_version = ''.join([chr(b) for b in esp.firmware_version[:-1]])
print("Firmware vers.", firmware_version)
print("MAC addr:", [hex(i) for i in esp.MAC_address])
# WPA2 Enterprise support was added in fw ver 1.3.0. Check that the ESP32
# is running at least that version, otherwise, bail out
assert version_compare(firmware_version, "1.3.0") >= 0, (
"Incorrect ESP32 firmware version; >= 1.3.0 required.")
# Set up the SSID you would like to connect to
# Note that we need to call wifi_set_network prior
# to calling wifi_set_enable.
esp.wifi_set_network(b'YOUR_SSID_HERE')
# If your WPA2 Enterprise network requires an anonymous
# identity to be set, you may set that here
esp.wifi_set_entidentity(b'')
# Set the WPA2 Enterprise username you'd like to use
esp.wifi_set_entusername(b'MY_USERNAME')
# Set the WPA2 Enterprise password you'd like to use
esp.wifi_set_entpassword(b'MY_PASSWORD')
# Once the network settings have been configured,
# we need to enable WPA2 Enterprise mode on the ESP32
esp.wifi_set_entenable()
# Wait for the network to come up
print("Connecting to AP...")
while not esp.is_connected:
print(".", end = "")
time.sleep(2)
print("")
print("Connected to", str(esp.ssid, 'utf-8'), "\tRSSI:", esp.rssi)
print("My IP address is", esp.pretty_ip(esp.ip_address))
print("IP lookup adafruit.com: %s" % esp.pretty_ip(esp.get_host_by_name("adafruit.com")))
print("Ping google.com: %d ms" % esp.ping("google.com"))
print("Done!")
|
[
"docmollo@gmail.com"
] |
docmollo@gmail.com
|
374356a21b4334113e480f33ccacdbbfdf194a45
|
f9b0e99cc026cb8e0adf4e33f935d6f3b21c861d
|
/script/mpd/old/mpd_album_art.py
|
1979e7f00d3e438d370524b89c0f070272455f80
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
DamienRobert/dotfiles
|
66fec5eee5fc83d3a59f1c41bc96ad2b11dcd57b
|
3a2beac107a9b4a0e04770fccdafbf834c5126a7
|
refs/heads/master
| 2021-05-15T02:19:44.166903
| 2020-03-13T16:45:20
| 2020-03-13T16:45:20
| 12,380,199
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,231
|
py
|
#! /usr/bin/env python3
# From: https://github.com/jameh/mpd-album-art
# Modifications by Damien Robert
# MPD Album Art
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
:copyright: 2013 Jamie Macdonald
:license: GNU General Public License (GPL) version 3
.. testsetup::
from mpd_album_art import Grabber
"""
from pylast import LastFMNetwork, AlbumSearch
from mpd2 import MPDClient, socket
import os, sys
try:
# Python 3
from urllib.request import urlretrieve
except ImportError:
# Python 2
from urllib import urlretrieve
_image_extensions = (".jpg", ".jpeg", ".png")
"""Extensions to qualify a file as an image"""
_last_fm_api_key = "6b6d8768f0c41f7e82da0de44e1db76a"
"""LastFM API key for the application"""
class Grabber(object):
"""An object to grab artwork for MPD songs
:param str save_dir:
Directory into which Grabber should download new images.
:param str library_dir:
Directory MPD is currently playing from.
Construct a Grabber with ``save_dir = ~/.covers``:
>>> import os
>>> home_dir = os.environ['HOME']
>>> cover_path = os.path.join(home_dir, '.covers')
>>> grabber = Grabber(cover_path)
specify more stuff:
>>> music_path = os.path.join(home_dir, 'Music/Library')
>>> grabber = Grabber(save_dir=cover_path, library_dir=music_path)
"""
def __init__(self, save_dir, library_dir=None):
super(Grabber, self).__init__()
self.save_dir = save_dir
if library_dir is None:
library_dir = os.path.join(os.environ['HOME'], 'Music')
self.library_dir = library_dir
def get_art(self, song):
"""Get artwork from LastFM.
Before connecting to network, if there exists a file whose name begins
with ``_sanitize(song['artist'] + ' ' + song['album'])``, return that
file path.
Do not overwrite existing files.
:param dict song:
A dictionary with keys ``'album'`` and ``'artist'`` to correspond
with string representations of the album and artist (resp.) of
interest. Use ``MPDClient.currentsong()`` to return uch a dictionary
.
:return:
A string representation of the local file path to the image file for
``song`` or ``None`` if no results found
"""
album_tofind=_sanitize(song['artist'],song['album'])
l = [n for n in os.listdir(self.save_dir)
if n.startswith(album_tofind)]
if l != []:
file_path = os.path.join(self.save_dir, l[0])
sys.stderr.write("Found {}\n".format(file_path))
return file_path
# Define the search network compatible with LastFM API
network = LastFMNetwork(api_key = _last_fm_api_key)
album_search = AlbumSearch(song['album'], network)
#if int(album_search.get_total_result_count()) == 0:
if album_search.get_total_result_count() == None:
# LastFm does not have this album, links to unknown.png
sys.stderr.write("Last.FM: no results\n")
unknown=os.path.join(self.save_dir,'unknown.png')
album_tofind+=".png"
os.symlink(unknown,os.path.join(self.save_dir,album_tofind))
return album_tofind
# Get the first hit, since there is at least one result -
# the "I'm feeling lucky" approach.
album = album_search.get_next_page()[0]
# Get url of album art from ``pylast.AlbumSearch`` object
img_url = album.get_cover_image()
file_path = os.path.join(self.save_dir,
self._get_save_name(song, img_url))
# Check if this file exists in filesystem already
if os.path.isfile(file_path):
sys.stderr.write("Last.FM: we already had the album {}!\n".format(file_path))
return file_path
else:
try:
# Download the image
urlretrieve(img_url, file_path)
except e:
sys.stderr.write(e + "\n")
return None
sys.stderr.write("Last.FM: found {}\n".format(file_path))
return file_path
def get_local_art(self, song):
"""Get artwork from ``song`` folder.
:param dict song:
A dictionary with keys ``'album'`` and ``'artist'`` to correspond
with string representations of the album and artist (resp.) of
interest. Such a dictionary returns from
``MPDClient.currentsong()``
:return:
A string representation of the local file path to the largest image
file for ``song`` found in ``song_folder``, or ``None`` if no results
found
"""
song_folder = os.path.dirname(os.path.join(self.library_dir,
song['file']))
images = self._get_images_from_folder(song_folder)
if images == []:
sys.stderr.write("No local results from {}\n".format(song_folder))
return None
# Pick the largest file
images = [os.path.join(song_folder, i) for i in images]
file_path = max(images, key=os.path.getsize)
sys.stderr.write('Found image in {}.\n'.format(song_folder))
return file_path
def _get_save_name(self, song, img_url=None):
"""Return file name of album art at img_url
:param dict song:
A dictionary with keys ``'album'`` and ``'artist'`` to correspond
with string representations of the album and artist (resp.) of
interest. Such a dictionary returns from
``MPDClient.currentsong()``
:param str img_url:
A string representation of an external url points to the album
artwork image file
:return:
``str`` of the form 'Artist_Name_Album_Name_h45h.png', or if
``img_url`` is ``None``, simply 'Artist_Name_Album_Name'. In this
example, ``'h45h.png'`` extracts from the original filename in
``img_url``.
"""
save_name = song['artist'] + ' ' + song['album']
extra=""
if img_url:
extra = img_url.split("/")[-1]
return _sanitize(song['artist'], song['album'], extra)
def _get_images_from_folder(self, folder):
"""Get a list of image files from ``folder``.
:return list:
A list of file names from within ``folder`` that end with an extension
defined in the ``image_extensions`` tuple.
"""
return [f for f in os.listdir(folder) if f.endswith(_image_extensions)]
def _sanitize(artist,album,extra=""):
"""Replace occurences of ``disallowed_characters`` in name with
underscores and double quotation marks with single quotation marks.
"""
name="#"+artist+"#"+album+"#"+extra
disallowed_characters = r",\/:<>?*| "
for character in disallowed_characters:
name = name.replace(character, '_')
# Replace " with '
return name.replace('"', "'")
if __name__ == '__main__':
import argparse, os
home_dir = os.environ['HOME']
if 'MPD_HOST' in os.environ:
mpd_host = os.environ['MPD_HOST']
else:
mpd_host = '/run/user/1000/mpd_sock'
parser = argparse.ArgumentParser()
#parser.add_argument('-n', '--hostname', type=str, default='localhost')
parser.add_argument('-n', '--hostname', type=str, default=mpd_host)
parser.add_argument('-p', '--port', type=int, default=6600)
parser.add_argument('-m', '--music_dir', type=str,
default=os.path.join(home_dir, '.mpd/music/'))
parser.add_argument('-a', '--art_dir', type=str,
default=os.path.join(home_dir, '.mpd/music/.covers'))
args = parser.parse_args()
# initialize MPD client
mpd_client = MPDClient()
grabber = Grabber(save_dir=args.art_dir, library_dir=args.music_dir)
try:
# connect client to MPD server
mpd_client.connect(args.hostname, args.port)
except socket.error:
# Cannot connect
sys.stderr.write('MPD not running?'+'\n')
sys.exit(1)
current_song = mpd_client.currentsong()
if current_song == {}:
# No song is playing
print("")
# try local pics
else:
art=grabber.get_local_art(current_song)
if art is not None:
print(art)
else:
# try lastFM pics
art=grabber.get_art(current_song)
if art is not None:
print(art)
# done
mpd_client.disconnect()
sys.exit(0)
|
[
"damien.olivier.robert+git@gmail.com"
] |
damien.olivier.robert+git@gmail.com
|
071d6d5852eff59b3a1dc1dfe98706254fa481ae
|
901944f407f4a06a4c4027d6139ce21165976857
|
/RL4/rl_mar2018_99_stableversion_andimplicit/train4.py
|
0e03a42afd50da8c62faf9a50b887d5278e716f9
|
[] |
no_license
|
chriscremer/Other_Code
|
a406da1d567d63bf6ef9fd5fbf0a8f177bc60b05
|
7b394fa87523803b3f4536b316df76cc44f8846e
|
refs/heads/master
| 2021-01-17T02:34:56.215047
| 2020-05-26T13:59:05
| 2020-05-26T13:59:05
| 34,680,279
| 7
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,372
|
py
|
import os
from os.path import expanduser
home = expanduser("~")
import sys
for i in range(len(sys.path)):
if 'er/Documents' in sys.path[i]:
sys.path.remove(sys.path[i])#[i]
break
import copy
import glob
import os
import time
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
sys.path.insert(0, '../baselines/')
sys.path.insert(0, '../baselines/baselines/common/vec_env')
from subproc_vec_env import SubprocVecEnv
sys.path.insert(0, './utils/')
from envs import make_env, make_env_monitor, make_env_basic
# from agent_modular2 import a2c
# from agent_modular2 import ppo
# from agent_modular2 import a2c_minibatch
# from agent_modular2 import a2c_list_rollout
# from agent_modular2 import a2c_with_var
from a2c_agents import a2c
from train_utils import do_vid, do_gifs, do_params, do_ls, update_ls_plot, save_params_v2, load_params_v2
sys.path.insert(0, './visualizations/')
from make_plots import make_plots
import argparse
import json
import subprocess
from discriminator import CNN_Discriminator
from discrim_preds import discrim_predictions
def train(model_dict):
def update_current_state(current_state, state, channels):
# current_state: [processes, channels*stack, height, width]
state = torch.from_numpy(state).float() # (processes, channels, height, width)
# if num_stack > 1:
#first stack*channel-channel frames = last stack*channel-channel , so slide them forward
current_state[:, :-channels] = current_state[:, channels:]
current_state[:, -channels:] = state #last frame is now the new one
return current_state
def update_rewards(reward, done, final_rewards, episode_rewards, current_state):
# Reward, Done: [P], [P]
# final_rewards, episode_rewards: [P,1]. [P,1]
# current_state: [P,C*S,H,W]
reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float() #[P,1]
episode_rewards += reward #keeps track of current episode cumulative reward
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) #[P,1]
final_rewards *= masks #erase the ones that are done
final_rewards += (1 - masks) * episode_rewards #set it to the cumulative episode reward
episode_rewards *= masks #erase the done ones
masks = masks.type(dtype) #cuda
if current_state.dim() == 4: # if state is a frame/image
current_state *= masks.unsqueeze(2).unsqueeze(2) #[P,1,1,1]
else:
current_state *= masks #restart the done ones, by setting the state to zero
return reward, masks, final_rewards, episode_rewards, current_state
num_frames = model_dict['num_frames']
cuda = model_dict['cuda']
which_gpu = model_dict['which_gpu']
num_steps = model_dict['num_steps']
num_processes = model_dict['num_processes']
seed = model_dict['seed']
env_name = model_dict['env']
save_dir = model_dict['save_to']
num_stack = model_dict['num_stack']
algo = model_dict['algo']
save_interval = model_dict['save_interval']
log_interval = model_dict['log_interval']
save_params = model_dict['save_params']
vid_ = model_dict['vid_']
gif_ = model_dict['gif_']
ls_ = model_dict['ls_']
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = str(which_gpu)
if cuda:
torch.cuda.manual_seed(seed)
dtype = torch.cuda.FloatTensor
model_dict['dtype']=dtype
else:
torch.manual_seed(seed)
dtype = torch.FloatTensor
model_dict['dtype']=dtype
# Create environments
print (num_processes, 'processes')
monitor_rewards_dir = os.path.join(save_dir, 'monitor_rewards')
if not os.path.exists(monitor_rewards_dir):
os.makedirs(monitor_rewards_dir)
print ('Made dir', monitor_rewards_dir)
envs = SubprocVecEnv([make_env(env_name, seed, i, monitor_rewards_dir) for i in range(num_processes)])
if vid_:
print ('env for video')
envs_video = make_env_monitor(env_name, save_dir)
if gif_:
print ('env for gif')
envs_gif = make_env_basic(env_name)
if ls_:
print ('env for ls')
envs_ls = make_env_basic(env_name)
obs_shape = envs.observation_space.shape # (channels, height, width)
obs_shape = (obs_shape[0] * num_stack, *obs_shape[1:]) # (channels*stack, height, width)
shape_dim0 = envs.observation_space.shape[0] #channels
model_dict['obs_shape']=obs_shape
model_dict['shape_dim0']=shape_dim0
# print (envs.action_space)
# print (envs.action_space.shape)
action_size = envs.action_space.shape[0]
print (obs_shape)
print(action_size)
fasd
if action_size == 1:
action_size = 2
# model_dict['action_size'] = envs.action_space.n
model_dict['action_size'] = action_size
# Create agent
if algo == 'a2c':
agent = a2c(envs, model_dict)
print ('init a2c agent')
elif algo == 'ppo':
agent = ppo(envs, model_dict)
print ('init ppo agent')
elif algo == 'a2c_minibatch':
agent = a2c_minibatch(envs, model_dict)
print ('init a2c_minibatch agent')
elif algo == 'a2c_list_rollout':
agent = a2c_list_rollout(envs, model_dict)
print ('init a2c_list_rollout agent')
elif algo == 'a2c_with_var':
agent = a2c_with_var(envs, model_dict)
print ('init a2c_with_var agent')
# elif algo == 'a2c_bin_mask':
# agent = a2c_with_var(envs, model_dict)
# print ('init a2c_with_var agent')
# agent = model_dict['agent'](envs, model_dict)
#Load model
if model_dict['load_params']:
# agent.actor_critic = torch.load(os.path.join(args.load_path))
# agent.actor_critic = torch.load(args.load_path).cuda()
if model_dict['load_params_implicit']:
# load_params_v2(home+'/Documents/tmp/confirm_implicit_works3/BreakoutNoFrameskip-v4/A2C_Implicit/seed0/', agent, 5500160, model_dict)
# load_params_v2(home+'/Documents/tmp/confirm_works_1_withsaving/PongNoFrameskip-v4/a2c/seed0/', agent, 8000160, model_dict)
# print ('loaded ', args.load_path)
if model_dict['load_number'] == 1:
# load_params_v2(home+'/Documents/tmp/confirm_works_1_withsaving/PongNoFrameskip-v4/a2c/seed0/', agent, 3000160, model_dict)
load_params_v2(home+'/Documents/tmp/confirm_implicit_works3/BreakoutNoFrameskip-v4/A2C_Implicit/seed0/', agent, 1000160, model_dict)
elif model_dict['load_number'] == 3:
# load_params_v2(home+'/Documents/tmp/confirm_works_1_withsaving/PongNoFrameskip-v4/a2c/seed0/', agent, 6000160, model_dict)
load_params_v2(home+'/Documents/tmp/confirm_implicit_works3/BreakoutNoFrameskip-v4/A2C_Implicit/seed0/', agent, 3000160, model_dict)
elif model_dict['load_number'] == 5:
# load_params_v2(home+'/Documents/tmp/confirm_works_1_withsaving/PongNoFrameskip-v4/a2c/seed0/', agent, 9000160, model_dict)
load_params_v2(home+'/Documents/tmp/confirm_implicit_works3/BreakoutNoFrameskip-v4/A2C_Implicit/seed0/', agent, 5000160, model_dict)
# else:
# load_params_v2(home+'/Documents/tmp/confirm_works_1_withsaving/PongNoFrameskip-v4/a2c/seed0/', agent, 8000160, model_dict)
else:
PROBLEM
if model_dict['implicit']:
action_predictor = CNN_Discriminator(model_dict).cuda()
print ('init action_predictor')
# Init state
state = envs.reset() # (processes, channels, height, width)
current_state = torch.zeros(num_processes, *obs_shape) # (processes, channels*stack, height, width)
current_state = update_current_state(current_state, state, shape_dim0).type(dtype) #add the new frame, remove oldest
agent.insert_first_state(current_state) #storage has states: (num_steps + 1, num_processes, *obs_shape), set first step
# These are used to compute average rewards for all processes.
episode_rewards = torch.zeros([num_processes, 1]) #keeps track of current episode cumulative reward
final_rewards = torch.zeros([num_processes, 1])
num_updates = int(num_frames) // num_steps // num_processes
save_interval_num_updates = int(save_interval /num_processes/num_steps)
#Begin training
# count =0
start = time.time()
start2 = time.time()
for j in range(num_updates):
for step in range(num_steps):
# Act, [P,1], [P], [P,1], [P]
# value, action = agent.act(Variable(agent.rollouts.states[step], volatile=True))
value, action, action_log_probs, dist_entropy = agent.act(Variable(agent.rollouts.states[step]))#, volatile=True))
# print (action_log_probs.size())
# print (dist_entropy.size())
cpu_actions = action.data.squeeze(1).cpu().numpy() #[P]
# cpu_actions = action.data.cpu().numpy() #[P]
# print (actions.size())
# Step, S:[P,C,H,W], R:[P], D:[P]
state, reward, done, info = envs.step(cpu_actions)
# Record rewards and update state
reward, masks, final_rewards, episode_rewards, current_state = update_rewards(reward, done, final_rewards, episode_rewards, current_state)
current_state = update_current_state(current_state, state, shape_dim0)
# Agent record step
# agent.insert_data(step, current_state, action.data, value.data, reward, masks, action_log_probs.data, dist_entropy.data)
agent.insert_data(step, current_state, action.data, value, reward, masks, action_log_probs, dist_entropy) #, done)
#Optimize agent
if model_dict['implicit']:
discrim_errors = discrim_predictions(model_dict, agent.rollouts, action_predictor)
discrim_errors_reverse = discrim_predictions(model_dict, agent.rollouts, action_predictor, reverse=True)
#Optimize action_predictor
action_predictor.optimize(discrim_errors)
#Optimize agent
agent.update2(discrim_errors, discrim_errors_reverse) #agent.update(j,num_updates)
# #Old
else:
agent.update() #agent.update(j,num_updates)
agent.insert_first_state(agent.rollouts.states[-1])
# print ('save_interval_num_updates', save_interval_num_updates)
# print ('num_updates', num_updates)
# print ('j', j)
total_num_steps = (j + 1) * num_processes * num_steps
# if total_num_steps % save_interval == 0 and save_dir != "":
if j % save_interval_num_updates == 0 and save_dir != "" and j != 0:
#Save model
if save_params:
do_params(save_dir, agent, total_num_steps, model_dict)
save_params_v2(save_dir, agent, total_num_steps, model_dict)
#make video
if vid_:
do_vid(envs_video, update_current_state, shape_dim0, dtype, agent, model_dict, total_num_steps)
#make gif
if gif_:
do_gifs(envs_gif, agent, model_dict, update_current_state, update_rewards, total_num_steps)
#Print updates
if j % log_interval == 0:# and j!=0:
end = time.time()
to_print_info_string = "{}, {}, {:.1f}/{:.1f}/{:.1f}/{:.1f}, {}, {:.1f}, {:.1f}".format(j, total_num_steps,
final_rewards.min(),
final_rewards.median(),
final_rewards.mean(),
final_rewards.max(),
int(total_num_steps / (end - start)),
end - start,
end - start2)
print(to_print_info_string)
start2 = time.time()
to_print_legend_string = "Upts, n_timesteps, min/med/mean/max, FPS, Time"
if j % (log_interval*30) == 0:
if ls_:
do_ls(envs_ls, agent, model_dict, total_num_steps, update_current_state, update_rewards)
# print("Upts, n_timesteps, min/med/mean/max, FPS, Time, Plot updated, LS updated")
# print(to_print_info_string + ' LS recorded')#, agent.current_lr)
# else:
#update plots
try:
if ls_:
update_ls_plot(model_dict)
make_plots(model_dict)
print(to_print_legend_string + " Plot updated")
except:
raise #pass
print(to_print_legend_string)
try:
make_plots(model_dict)
except:
print ()
# pass #raise
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--m')
args = parser.parse_args()
#Load model dict
with open(args.m, 'r') as infile:
model_dict = json.load(infile)
train(model_dict)
|
[
"chris.a.cremer@gmail.com"
] |
chris.a.cremer@gmail.com
|
d251d96aa118620cf5b52cd71eb9e82cbb437e15
|
8a41a7f9340cfa784cb36d35dca1ecb1630e4097
|
/Programming/Python/TestFrameworks/pytest_practice/test_pytest_requests.py
|
f15e998dc821c7bb5c3b6d0ca5291bb0d4f895cd
|
[] |
no_license
|
anishst/Learn
|
02e6b6cce43cf21621d328ef0fc25168267a9a3d
|
a1aed8b78b19acdb23e20be57b67fb242e0aefc5
|
refs/heads/master
| 2022-05-13T10:17:40.293640
| 2022-03-30T12:44:21
| 2022-03-30T12:44:21
| 173,595,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
# http://pythontesting.net/framework/pytest/pytest-fixtures-nuts-bolts/#scope
import pytest
@pytest.fixture()
def my_fixture(request):
print('\n-----------------')
print('fixturename : %s' % request.fixturename)
print('scope : %s' % request.scope)
print('function : %s' % request.function.__name__)
print('cls : %s' % request.cls)
print('module : %s' % request.module.__name__)
print('fspath : %s' % request.fspath)
print('-----------------')
if request.function.__name__ == 'test_three':
request.applymarker(pytest.mark.xfail)
def test_one(my_fixture):
print('test_one():')
class TestClass():
def test_two(self, my_fixture):
print('test_two()')
def test_three(my_fixture):
print('test_three()')
assert False
|
[
"sebastian_anish@bah.com"
] |
sebastian_anish@bah.com
|
af8494e947871f48f118b9641e19590b73ad4e2b
|
b9b495a55e55f4c227aedfd55c6e409503353991
|
/simulation.py
|
9dc68c46b0b264421cd09eec4f62ffa6f094799d
|
[] |
no_license
|
Pontiky/path-following-mobile-robot
|
6e5f0bbbe9b83bef34e970577c9a26a18a1e6b00
|
04a35c6b83f131fdf1912158e870d9aea040b43a
|
refs/heads/main
| 2023-02-10T10:34:25.959747
| 2021-01-07T22:13:17
| 2021-01-07T22:13:17
| 322,663,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,257
|
py
|
from suiveur import Suiveur
from cible import Cible
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from collections import namedtuple
from threading import Thread
import time # Log temporel
temps = []
erreurs = [ [[], []], [[], []] ]
lyapunov = []
n_err, n_sim = 0, 0
close = False
pause = True
# Evenements lies aux figures
def onPress(event):
global pause
if event.key == ' ':
pause ^= True
elif event.key == 'left' or event.key == 'q':
cible.leftKey = True
elif event.key == 'right' or event.key == 'd':
cible.rightKey = True
elif event.key == 'o':
Cible(cible._x, cible._y, cible._theta, ax, 'k', 1, 2, Te, "ligne")
elif event.key == 'p':
Suiveur(suiveur._x, suiveur._y, suiveur._theta, ax, 'r', cible)
def onRelease(event):
if event.key == 'left' or event.key == 'q':
cible.leftKey = False
elif event.key == 'right' or event.key == 'd':
cible.rightKey = False
def stop(event):
global close
close = True
plt.close('all')
# Initialisation de la fenêtre et des axes de simulation
fig = plt.figure(figsize=(10, 10))
fig.canvas.mpl_connect('key_press_event', onPress)
fig.canvas.mpl_connect('key_release_event', onRelease)
fig.canvas.mpl_connect('close_event', stop)
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
ax = plt.axes(xlim=(0, 6), ylim=(0, 6))
ax.grid()
# Mise a jour de la simulation
def simulation():
global pause, close, n_sim, Te
t0 = time.perf_counter()
if save:
errFile = open("erreurs.txt", 'w')
errFile.write(str(cible.getX())+'\t'+str(cible.getY())+'\t'+str(cible.getTheta())+'\t'+str(suiveur.getX())+'\t'+str(suiveur.getY())+'\t'+str(suiveur.getTheta())+'\t0\t0\t0\t0\t0\t0\t0\t0\n')
while not close:
time.sleep(Te)
if not pause:
errList = suiveur.handleErrors()
if save: errFile.write(str(n_sim*Te)+'\t'+str(errList.XY)+'\t'+str(errList.TH)+'\t'+str(errList.X)+'\t'+str(errList.Y)+'\t'+str(errList.V)+'\t'+str(errList.W)+'\t'+str(errList.LYA)+'\t'+str(errList.U1)+'\t'+str(errList.U2)+'\t'+str(errList.VC)+'\t'+str(errList.WC)+'\t'+str(errList.VS)+'\t'+str(errList.WS)+'\n')
cible.avancer()
suiveur.suivre()
n_sim += 1
if n_sim%int(0.5/Te) == 0:
appendErrors(errList)
print(time.perf_counter()-t0, "/ 0.5") # temps pour simuler 0.5s
t0 = time.perf_counter()
if trace:
ax.plot(cible._x, cible._y, '.', color='k', lw=0.1)
ax.plot(suiveur._x, suiveur._y, '.', color='r', lw=0.1)
if save: errFile.close()
def animation(i):
return []
# Initialisation de la fenêtre et des axes des erreurs
figE, axE = plt.subplots(2,2,figsize=(12, 6))
figE.canvas.mpl_connect('key_press_event', onPress)
figE.canvas.mpl_connect('key_release_event', onRelease)
figE.canvas.mpl_connect('close_event', stop)
figE.canvas.mpl_disconnect(figE.canvas.manager.key_press_handler_id)
# Mise a jour des erreurs
def appendErrors(errList):
global n_err
temps.append(n_err*0.5)
erreurs[0][0].append(errList.XY)
erreurs[0][1].append(errList.TH)
erreurs[1][0].append(errList.X)
erreurs[1][1].append(errList.Y)
n_err += 1
def init_errors():
axE[0][0].set_ylabel('Erreur position')
axE[0][1].set_ylabel('Erreur angulaire')
axE[1][0].set_ylabel('Erreur X')
axE[1][1].set_ylabel('Erreur Y')
errList = suiveur.handleErrors()
appendErrors(errList)
for k in range(0, 2):
for m in range(0, 2):
axE[k][m].plot(temps, erreurs[k][m], '.-', color='#1f77ba', lw=2)
axE[k][m].set_xlim((0, 0.5))
axE[k][m].grid()
def errors(i):
global pause, Te, n_err, n_sim
if not pause:
for k in range(0, 2):
for m in range(0, 2):
axE[k][m].plot(temps, erreurs[k][m], color='#1f77ba', lw=2)
axE[k][m].set_xlim((0, n_err*0.5))
# Initialisation des paramètres et des robots
Te = 0.005
trace = True
save = False
cible = Cible(3, 3, 0, ax, 'k', 1, 2, Te, "huit") # ligne / cercle / huit / random / custom / control
suiveur = Suiveur(1, 1, 180, ax, 'r', cible)
# Légende
ax.text(0.03, 0.95, '- Robot suiveur', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, color=suiveur._color, fontsize=10, bbox={'facecolor': 'white', 'alpha': 0.8, 'pad': 13})
ax.text(0.03, 0.93, '- Robot cible', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, color=cible._color, fontsize=10)
ax.text(0.03, 0.97, 'Légende :', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, color='k', fontsize=10)
# Lancement de la simulation
anim = FuncAnimation(fig, animation, frames=30000, interval=20, cache_frame_data=False, save_count=0, repeat=False)
err = FuncAnimation(figE, errors, init_func=init_errors, frames=1200, interval=500, cache_frame_data=False, save_count=0, repeat=False)
Thread(target=simulation).start()
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
0f3a73e751671254e4991470d00ff51b9c7ef8c4
|
1ace6b79b41b69006de37fe936964b3087e67736
|
/1-5.py
|
9f9b5076ac092e9e7cb6d4f14ebb4149b3ebfd2c
|
[] |
no_license
|
thlong78984278/Crypto
|
77f38d94e727a7872f89cd565b675024f0a7c932
|
2f3f46aa744c7292f6e31260b05329bc0cada817
|
refs/heads/main
| 2023-04-11T09:11:06.785153
| 2021-04-21T13:54:06
| 2021-04-21T13:54:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
from turing_machine import TuringMachine2
transition_function = {
("init"," "):("q0","R"),
("q0","0"):("q1","X"),
("q0","1"):("n","1"),
("q0","X"):("y","X"),
("q1","X"):("q2","R"),
("q2","0"):("q2","R"),
("q2","X"):("n","X"),
("q2","1"):("q3","R"),
("q3","1"):("q3","R"),
("q3","0"):("n","0"),
("q3"," "):("q4","L"),
("q3","X"):("q4","L"), # ->|
("q4","1"):("q5","X"),
("q5","X"):("q6","L"),
("q6","0"):("q6","L"),
("q6","1"):("q6","L"),
("q6","X"):("q0","R")
}
# 00001111
# X X
final_states = { "n", "y"}
t = TuringMachine2(" 0000011111",
initial_state="init",
final_states=final_states,
transition_function=transition_function)
print("Input on tape:\n", t.get_tape())
while not t.final():
t.step()
|
[
"noreply@github.com"
] |
noreply@github.com
|
618747c026393c3f4992753224e43445c535883a
|
771d4422f57e275c73b331fd8c2b3157456b99df
|
/Filtering_Server/Filters/ideal_low_pass.py
|
e980b1a4c7814f16ae695d526ba4a1eccb47c223
|
[
"MIT"
] |
permissive
|
FullPint/Frequency-Filter-Server
|
bd6c7d8e01b3598f2809abcc771125a9763c9df8
|
60ed3f17f8873ff92b6f52e7dd861c9d99dea40c
|
refs/heads/master
| 2020-04-08T10:05:48.355837
| 2018-12-08T04:55:25
| 2018-12-08T04:55:25
| 159,253,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
import numpy as np
from .filter import Filter
class IdealLowPass(Filter):
def __init__(self, shape, cutoff):
super().__init__(shape)
self.cutoff = cutoff
def build_filter(self):
mask = np.zeros(self.shape)
index_iterator = np.nditer(mask, flags=['multi_index'])
while not index_iterator.finished:
u = index_iterator.multi_index[0]
v = index_iterator.multi_index[1]
distance = super().calculate_distance(u, v)
if distance <= self.cutoff:
mask[u][v] = 1
else:
mask[u][v] = 0
index_iterator.iternext()
self.mask = mask
|
[
"davila.alec@gmail.com"
] |
davila.alec@gmail.com
|
90006734794ffa89b6727b8d9bd24e1921e6c7d1
|
e465773cf0bbcdde70da6062a99cc94c23eab71d
|
/sensors/apps.py
|
a1ad8dafc62164785847466f4959adffbd3fc3be
|
[] |
no_license
|
mkotha33/WaterManagementSystem
|
0cff3ba808c59e88b7e69b053c8759415d5927c7
|
f62a1b4f36407ce7e13d1dd14c8579306cddff59
|
refs/heads/master
| 2020-04-26T10:02:21.369656
| 2019-03-02T11:40:11
| 2019-03-02T11:40:11
| 173,475,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class SensorsConfig(AppConfig):
name = 'sensors'
|
[
"maahi.kotha@gmail.com"
] |
maahi.kotha@gmail.com
|
ea1ba40bbb985fe5ead1c94c1eb2832e03ae4197
|
72058fc820cd7703f371d170d8ea04e372a12d42
|
/lafilm_rental/model/sale.py
|
4a875e25d785bd67887c9c74a9740bb9662a1b1a
|
[] |
no_license
|
billyxu-lafilm/lafilm
|
7767e2b792fbacd385beea4193da4e49bf9e3066
|
75e6d5df74c5cbaea610ed5beaca190257c49204
|
refs/heads/master
| 2020-12-19T21:23:43.996244
| 2020-04-21T05:55:57
| 2020-04-21T05:55:57
| 235,857,172
| 0
| 0
| null | 2020-04-21T05:57:16
| 2020-01-23T18:18:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 955
|
py
|
# -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
import logging
from odoo.exceptions import UserError, ValidationError
_logger = logging.getLogger(__name__)
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.onchange('product_id')
def product_id_change(self):
if not self.product_id:
return super(SaleOrderLine, self).product_id_change()
partner_set = False
for grp in self.product_id.group_ids:
for user in grp.users:
if user.partner_id.id == self.order_id.partner_id.id:
partner_set = True
if not partner_set:
raise UserError(_('%s is not allowed to rent this %s ') % (
self.order_id.partner_id.name, self.product_id.name))
domain = super(SaleOrderLine, self).product_id_change()
return domain
|
[
"dshah@adalogic.co"
] |
dshah@adalogic.co
|
67878e053fb1001efd8f2518f446de52112fa2b8
|
940bb431eff5192f7332e749fba0a4874630beb8
|
/src/middleware_security/utils.py
|
81cdf4f30af14ea902c502d845cfa62dcef032db
|
[] |
no_license
|
red-cientifica-peruana/middleware-security
|
63c9374f3547cb5ed4b4848860f1f976aba457b7
|
23479f532d26353291c2972881a29a7df31f2f5c
|
refs/heads/master
| 2021-01-12T10:15:19.866181
| 2018-07-18T14:55:37
| 2018-07-18T14:55:37
| 76,400,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from functools import wraps
from falcon_exceptions import HTTPException
def scope_verify(scope=None):
""" Decorator for scope verify """
def method_decorator(func):
@wraps(func)
def method_wrapper(*args, **kwargs):
scope_obj = args[0].scope if not scope else scope
context = args[1].context
if isinstance(scope_obj, dict):
func_name = func.__name__.split('_')
scope_obj = scope_obj.get(func_name[1], None)
if not 'token_scopes' in context:
func(*args, **kwargs)
else:
if scope_obj is None:
raise HTTPException(500, "The scope was not set correctly")
token_scopes = context['token_scopes']
parts_scope = scope_obj.split(':')
if len(parts_scope) < 3 or len(parts_scope) > 3:
raise HTTPException(500, "The scope was not set correctly")
if (parts_scope[0] not in token_scopes or
parts_scope[1] not in token_scopes[parts_scope[0]] or
parts_scope[2] not in token_scopes[parts_scope[0]][parts_scope[1]]):
raise HTTPException(
403,
dev_msg="You are not authorized to perform this action",
user_msg="No se encuentra autorizado para realizar esta acción")
func(*args, **kwargs)
return method_wrapper
return method_decorator
|
[
"noreply@github.com"
] |
noreply@github.com
|
a42e82398aa18becbcfed7a7470e597ea6fa315e
|
34b424abf175eb85ad32f7c3eeb6c1ccf388b66f
|
/Clases/Invasor.py
|
588d274ed1990afd8261ca9e6e8973b58638f539
|
[] |
no_license
|
Rexwar/G1SpaceInvader
|
9dc660665034b47ff9a090d65f630674189386d7
|
bb68d8ceafab929e07c9eaf26e55e56e6f46b627
|
refs/heads/master
| 2020-09-27T19:18:56.464673
| 2019-12-09T01:38:42
| 2019-12-09T01:38:42
| 226,590,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
import sys, pygame, math
from pygame.locals import *
from random import randint
from . import Proyectil
class Invasor(pygame.sprite.Sprite):
def __init__(self, posx, posy, distancia, imagenUno, imagenDos):
#propiedades del invasor
pygame.sprite.Sprite.__init__(self)
self.frec = 1
self.imagenA = pygame.image.load(imagenUno)
self.imagenB = pygame.image.load(imagenDos)
self.listaImagenes = [self.imagenA, self.imagenB]
self.posImagen = 0
self.imagenInvasor = self.listaImagenes[self.posImagen]
self.rect = self.imagenInvasor.get_rect()
self.listaDisparo = []
self.velocidad = 1
self.rect.top = posy
self.rect.left = posx
self.rangoDisparo = 5 #entre 0 y 1000
self.tiempoCambio = 1
self.conquista = False
self.derecha = True #direccion
self.contador = 0 #choques antes de descender
self.Maxdescenso = self.rect.top + 10
self.limiteDerecha = posx + distancia
self.limiteIzquierda = posx - distancia
def dibujar(self, superficie):
#se dibuja la imagen actual en la lista
self.imagenInvasor = self.listaImagenes[self.posImagen]
superficie.blit(self.imagenInvasor, self.rect)
def comportamiento(self, tiempo, enPausa):
#se mueve y ataca
#movimiento constante y disparo aleatorio
#print (self.tiempoCambio, tiempo)
if self.conquista == False:
if not enPausa:
self.__ataque()
self.__movimientos()
if self.tiempoCambio <= tiempo * self.frec: #animacion
self.posImagen +=1
self.tiempoCambio += 1
if self.posImagen > len(self.listaImagenes)-1:
self.posImagen = 0
def __movimientos(self):
if self.contador < 2:
self.__movimientoLateral()
else:
self.__descenso()
def __descenso(self):
if self.Maxdescenso == self.rect.top:
self.contador = 0
self.Maxdescenso = self.rect.top + 40
else:
self.rect.top += 1
def __movimientoLateral(self):
if self.derecha == True:
self.rect.left = self.rect.left + self.velocidad
if self.rect.left > self.limiteDerecha: # limite der de movimiento
self.derecha = False
self.contador += 1
else:
self.rect.left = self.rect.left - self.velocidad
if self.rect.left < self.limiteIzquierda: #limite izq
self.derecha = True
def __ataque(self):
if (randint(0,10000)<self.rangoDisparo):
self.__disparo()
def __disparo(self):
x,y = self.rect.center
miProyectil = Proyectil.Proyectil(x,y, "Imagenes/disparob.jpg",False)
self.listaDisparo.append(miProyectil)
|
[
"reyvaldesm@gmail.com"
] |
reyvaldesm@gmail.com
|
d5d79bc641bfa5323938802c7e9287693b1ac8f5
|
98c7bf36a116078d8a84984ea4cf30eb5293cd14
|
/functional_tests/test_layout_and_styling.py
|
90d82cf2402103a63b40fe456cbc5cc78623abc3
|
[] |
no_license
|
maugravena/python-tdd-book
|
54dccabb523fdb498fe5394328b943ab975c3535
|
913a98ccf7b5db44c27f2f05a7e2edeacedb0a4c
|
refs/heads/master
| 2021-07-11T16:53:38.126014
| 2020-05-19T23:32:39
| 2020-05-19T23:32:39
| 246,428,029
| 0
| 0
| null | 2021-06-10T22:40:12
| 2020-03-10T23:13:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 922
|
py
|
from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
def test_layout_and_styling(self):
# Edith goes to the home page
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# She notice the input box is nicely centered
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta=10
)
# She starts a new list and sees the input is nicely centered there too
inputbox.send_keys('testing')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: testing')
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=10
)
|
[
"maugravena@gmail.com"
] |
maugravena@gmail.com
|
08a69238bd7160d33ffd5cf0c96d89c94638de23
|
13d52041d7ba53ccc1708a5931b69f8b13ebd0f6
|
/4_observer_pattern/good_exemple/api/user.py
|
c65384b673d0e8b3f1da61a0ee9710fbd3569e62
|
[] |
no_license
|
iemsec/DesginPattern
|
0967c5cae53e228311e3003900eeafd1bccc908d
|
80b5426f1f83b646819188c7fb22c207ba1d05c7
|
refs/heads/main
| 2023-07-12T19:44:08.563756
| 2021-08-06T05:46:28
| 2021-08-06T05:46:28
| 389,579,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from lib.stringtools import get_random_string
from lib.db import find_user, create_user
from .event import post_event
def register_new_user(name:str, password: str, email:str):
user = create_user(name, password, email)
post_event("user registred", user)
def password_forgotten(email: str):
user = find_user(email)
user.reset_code = get_random_string(16)
post_event("password forgotten", user)
|
[
"edouard.mortier@gmail.com"
] |
edouard.mortier@gmail.com
|
e70987c307aca85353a6a8cf1ef04ea55045f210
|
da0398358decb9d8a5d6d56c14708721096b4694
|
/ex20.py
|
aa62b29949d7990121dc4fd31849cf52723b7937
|
[] |
no_license
|
hninyu46/coding-sprint
|
8e867c3ebabe91b455ba23349b7e7ee22e3a71a7
|
1be34b422a992a1afc2ae0fd25c777220a36afaa
|
refs/heads/master
| 2020-03-21T20:45:29.580098
| 2018-07-06T15:16:15
| 2018-07-06T15:16:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
from sys import argv
script, input_file = argv
def print_all(f):
print(f.read())
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print(line_count, f.readline())
current_file = open(input_file)
print("First let's print the whole file: \n")
print_all(current_file)
print("Now let's rewind, kind of like a tape.")
rewind(current_file)
print("Let's print three lines:")
current_line = 1
print_a_line(current_file, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
|
[
"hninyuaungcu@gmail.com"
] |
hninyuaungcu@gmail.com
|
3e4bd1d9a17337e27b4205d675eb8514769efaf1
|
f36ba051f333cfcc28d04938461e26bc13d10545
|
/Assignment 3/client_udp.py
|
459c86c7c8181479c0335ff9f551c18656ee0971
|
[] |
no_license
|
wi15b045/OEC
|
d6b5f80ebc64aa0f85197a418f99c576f5ebd71b
|
f4b9f050acccdf20e702007cd1163b866379d293
|
refs/heads/master
| 2021-01-25T11:49:39.995457
| 2017-06-14T06:50:53
| 2017-06-14T06:50:53
| 93,949,063
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
import socket
import uuid
import struct
CLIENT_ID = uuid.uuid4()
MCAST_GRP = '192.168.1.255'
MCAST_PORT = 5007
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.sendto(bytes("C_CON:"+str(CLIENT_ID),"UTF-8"), (MCAST_GRP, MCAST_PORT))
|
[
"wi15b045@technikum-wien.at"
] |
wi15b045@technikum-wien.at
|
bc4d884d0c92d2823f1ba3f94f059e5ec5ea1670
|
3586b39f6c2a587077dd60cd518e862b65bec47e
|
/100-Days-of-Python/Day-17/main.py
|
80d3a1a2fda636bef136753773feeee959abd5aa
|
[] |
no_license
|
hpisme/Python-Projects
|
9e7f483a228ab18f4cb22810a8e23cf379f315bc
|
2d8605c324c30e83390dafd6bf63b164372905ba
|
refs/heads/main
| 2023-05-20T12:40:37.122173
| 2021-06-05T23:18:14
| 2021-06-05T23:18:14
| 367,402,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = []
for question in question_data:
question_text = question["text"]
question_answer = question["answer"]
new_question = Question(question_text, question_answer)
question_bank.append(new_question)
new_quiz = QuizBrain(question_bank)
while new_quiz.still_has_questions():
new_quiz.next_question()
print("You've completed the quiz.")
print(f"Your final score is {new_quiz.score} / {len(question_bank)}")
|
[
"noreply@github.com"
] |
noreply@github.com
|
c8d9b002d12d8875c9916bb4aebc75003313354f
|
13270c82b319dc288e588466b39708f5cc7a869d
|
/keyvalue/parsetriples.py
|
79d017768744525fbf18c0afcf7fcc4b3fde7265
|
[] |
no_license
|
ian27diaz/Practica1_Cloud
|
20dd03ac41cca194d4748d38bdfaab84a53b6ed8
|
51c51e6e5becc4602d4f734344e60f8ee7f0cf17
|
refs/heads/master
| 2022-04-05T13:10:01.226226
| 2020-02-23T01:05:19
| 2020-02-23T01:05:19
| 242,435,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
import re
class ParseTriples():
def __init__(self,filename):
super().__init__()
self._filename = filename
self._file = open(self._filename,"r",errors='ignore')
def getNext(self):
if(self._file.closed):
return None
line = self._file.readline()
while((isinstance(line,str)) and line.startswith("#")):
line = self._file.readline()
if(not line):
print(line)
return None
m = re.match('<(.+)>\s*<(.+)>\s*[<"](.+)[>"]',line.strip())
if(m):
return m.group(1),m.group(2),m.group(3)
else:
return
|
[
"anand_ian@hotmail.com"
] |
anand_ian@hotmail.com
|
705a2e2159d701decdd09d78d2292c2ae8353281
|
25af4e33f78e43169befe796aba842e02d7c0de3
|
/exe039 - alistamento Militar.py
|
33140b5e157c35cbdfbae4d9aa4b91fdbfb555b4
|
[
"MIT"
] |
permissive
|
carlosbandelli/Exercicios_em_Python
|
c6415e458c524493435be53dcddec5a1780ed3d9
|
2cd5bd837fdc51932f9605db32366ad0e3871d87
|
refs/heads/main
| 2023-08-18T03:58:40.084727
| 2021-09-19T00:08:00
| 2021-09-19T00:08:00
| 380,223,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
from datetime import date
atual = date.today().year
nasc = int(input('Ano de nascimento: '))
idade = atual - nasc
print(f'Quem nasceu em {nasc} tem {idade} anos em {atual} ')
if idade == 18:
print('Você tem que se alistar IMEDIATMENTE!')
elif idade < 18 :
saldo = 18 - idade
print(f'Ainda faltam {saldo} anos para alistamento')
elif idade > 18:
saldo = idade - 18
print(f'Você já deveria ter se alistado há {saldo} anos')
|
[
"carlosbandelliv@gmail.com"
] |
carlosbandelliv@gmail.com
|
c585322fee69db35d2475c2f9d5d331d9b590f4e
|
d5b64a2a9b1846aa7cc394f8d12da61c0ddcad8c
|
/ess_gym_env/erdosstate.py
|
767dbc2e696f21bdc7c6ac10d372da761f19712c
|
[] |
no_license
|
mzaffran/DRL_ESS_games
|
c09e9c47d9f50761752f3a8a18bb6ae3616dd5db
|
beecaefd504bee53e863f7ecc65fd37b474be45d
|
refs/heads/master
| 2023-04-05T10:07:50.135450
| 2022-06-13T16:33:17
| 2022-06-13T16:33:17
| 243,806,048
| 0
| 0
| null | 2023-03-26T19:47:01
| 2020-02-28T16:26:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 11,183
|
py
|
import numpy as np
import gym
class ErdosState(gym.Space):
"""
State type for Erdos game. Input looks like spaces.ErdosState(K, potential, weights, dist_probs, high_one_prob)
K is of type int, giving the number of levels, potential the difficulty of the game
and weights just to recycle computation, giving the number of weights for each level
for use in sampling. Remainder are probabilities used for sampling
"""
def __init__(self, K, potential, weights, unif_prob, geo_prob, diverse_prob,
state_unif_prob, high_one_prob, geo_high,
unif_high, geo_ps, train_attacker, cattacker):
assert K <= 40, "K is too large! Computing 2^-K may be very slow and inaccurate"
self.K = K
self.potential = potential
self.weights = weights
self.geo_prob = geo_prob
self.unif_prob = unif_prob
self.diverse_prob = diverse_prob
self.state_unif_prob = state_unif_prob
self.high_one_prob = high_one_prob
self.geo_high = geo_high
self.unif_high = unif_high
self.geo_ps = geo_ps
self.train_attacker = train_attacker
self.cattacker = cattacker
self.states_table = None
self.all_states_table = None
self.dtype = int
def potential_fn(self, state):
return np.sum(state*self.weights)
def sample(self):
"""
Samples a random start state based on initialization configuration
"""
# pick sample type according to probability
samplers = ["unif", "geo", "diverse", "state_unif"]
sample_idx = np.random.multinomial(1, [self.unif_prob, self.geo_prob, self.diverse_prob, self.state_unif_prob])
idx = np.argmax(sample_idx)
sampler = samplers[idx]
if sampler == "unif":
return self.unif_sampler()
if sampler == "geo":
return self.geo_sampler()
if sampler == "diverse":
return self.diverse_sampler()
if sampler == "unif_state":
return self.state_unif_sampler()
def get_high_one(self, state):
"""
Takes in state and adds one piece at a high level
"""
non_zero_idxs = [-2, -3, -4]
idx_idxs = np.random.randint(low=0, high=3, size=10)
for idx_idx in idx_idxs:
non_zero_idx = non_zero_idxs[idx_idx]
if self.potential_fn(state) + self.weights[non_zero_idx] <= self.potential:
state[non_zero_idx] += 1
break
return state
def unif_sampler(self):
"""
Samples pieces for states uniformly, for levels 0 to self.unif_high
"""
state = np.zeros(self.K+1, dtype=int)
# adds high one according to probability
high_one = np.random.binomial(1, self.high_one_prob)
if high_one:
state = self.get_high_one(state)
# checks potential of state, returning early if necessary
if (self.potential - self.potential_fn(state)) <= 0:
return state
# samples according to uniform probability
pot_state = self.potential_fn(state)
for i in range(max(10, int(1/(100000*self.weights[0])))):
levels = np.random.randint(low=0, high=self.unif_high, size=int(np.min([100000, 1.0/self.weights[0]])))
# adds on each level as the potential allows
for l in levels:
if pot_state + self.weights[l] <= self.potential:
state[l] += 1
pot_state += self.weights[l]
# checks potential to break
if pot_state >= self.potential - max(1e-8, self.weights[0]):
break
# checks potential to break
if pot_state >= self.potential - max(1e-8, self.weights[0]):
break
return state
def geo_sampler(self):
"""
Samples pieces for states with geometric distributions, for levels 0 to self.geo_high
and buckets them in from lowest level to highest level
"""
state = np.zeros(self.K+1, dtype=int)
# adds high one according to probability
high_one = np.random.binomial(1, self.high_one_prob)
if high_one:
state = self.get_high_one(state)
# pick the p in Geometric(p), where p is randomly chosen from predefined list of ps
ps = self.geo_ps
p_idx = np.random.randint(low=0, high=len(ps))
p = ps[p_idx]
for i in range(max(1000, int(1/(100000*self.weights[0])))):
# get pieces at different levels, highest level = self.geo_high
assert self.K+1 < 30, "K too high, cannot use geo sampler"
levels = np.random.geometric(p, int(1.0/self.weights[0])) - 1
idxs = np.where(levels < self.geo_high)
levels = levels[idxs]
# bin the levels into the same place which also sorts them from 0 to K
# counts created separately to ensure correct shape
tmp = np.bincount(levels)
counts = np.zeros(self.K + 1)
counts[:len(tmp)] = tmp
# add levels to state with lowest levels going first
for l in range(self.K + 1):
max_pieces = (self.potential - self.potential_fn(state))/self.weights[l]
max_pieces = int(np.min([counts[l], max_pieces]))
state[l] += max_pieces
# checks potential to break
if self.potential_fn(state) >= self.potential - max(1e-8, self.weights[0]):
break
# checks potential to break
if self.potential_fn(state) >= self.potential - max(1e-8, self.weights[0]):
break
return state
def simplex_sampler(self, n):
""" Samples n non-negative values between (0, 1) that sum to 1
Returns in sorted order. """
# edge case: n = 1
if n == 1:
return np.array([self.potential])
values = [np.random.uniform() for i in range(n-1)]
values.extend([0,1])
values.sort()
values_arr = np.array(values)
xs = values_arr[1:] - values_arr[:-1]
# return in decresing order of magnitude, to use for higher levels
xs = self.potential*np.sort(xs)
xs = xs[::-1]
return xs
def diverse_sampler(self):
"""
Tries to sample state to increase coverage in state space. Does this with three steps
Step 1: Uniformly samples the number of non-zero idxs
Step 2: Gets a set of idxs (between 0 to K-2) with size the number of nonzero idxs
in Step 1
Step 3: Divides up the potential available uniformly at random between the chosen idxs
"""
# Sample number of nonzero idxs
num_idxs = np.random.randint(low=1, high=self.K-1)
# Sample actual idxs in state that are nonzero
idxs = []
all_states =[ i for i in range(self.K - 1)] # can have nonzero terms up to state[K-2]
for i in range(num_idxs):
rand_id = np.random.randint(low=0, high=len(all_states))
idxs.append(all_states.pop(rand_id))
# sort idxs from largest to smallest to allocate
# potential correctly
idxs.sort()
idxs.reverse()
# allocate potential
xs = self.simplex_sampler(num_idxs)
# fill with appropriate number of pieces adding on any remaindr
remainder = 0
state = np.zeros(self.K+1, dtype=int)
for i in range(num_idxs):
idx = idxs[i]
pot_idx = xs[i] + remainder
num_pieces = int(pot_idx/self.weights[idx])
state[idx] += num_pieces
# update remainder
remainder = pot_idx - num_pieces*self.weights[idx]
return state
def state_unif_sampler(self):
"""
Sampler that draws a start state uniformly from self.states_table
"""
assert self.states_table != None, "states_table attribute is not set, call enumerate_states_potential"
high = len(self.states_table)
idx = np.random.randint(low=0, high=high)
state = np.array(self.states_table[idx]).astype("int")
return state
def enumerate_states_core(self, K, potential, N, weights):
"""
This function takes in values for K, potential, N and weights
and enumerates all states of that potential, returning them as
a list.
"""
# base case
if K == 2:
result = []
max_N = np.floor(N*(weights[0]/weights[K-1])).astype("int") + 1
for i in range(max_N):
result.append([N - 2*i, i])
# recursion
else:
result = []
scaling = (weights[0]/weights[K-1])
max_N = np.floor(N*scaling).astype("int") + 1
for i in range(max_N):
recursed_results = self.enumerate_states_core(K-1, potential-i*weights[K-1], int(N - i/scaling), weights[:-1])
# edit recursed results and append
for state in recursed_results:
state.append(i)
# add on to list of states
result.extend(recursed_results)
# NOTE: result contains list of states that are missing level K (which must always be 0)
# this needs to be added on after getting the result
return result
def enumerate_states_potential(self):
assert self.K <= 10, "K is too large for enumerating all states!"
N = int(self.potential*np.power(2.0, self.K))
raw_states = self.enumerate_states_core(self.K, self.potential, N, self.weights)
# add 0 term corresponding to level K to raw states
for state in raw_states:
state.append(0)
self.states_table = raw_states
def enumerate_states_all(self, upperbound=1.0):
"""This function enumerates all states of all potentials
from 0 to upper bound"""
assert self.K < 10, "K is too large for enumerating all states"
all_states = []
for n in range(int(upperbound/self.weights[0])+1):
N=n
potential = float(n)*self.weights[0]
raw_states = self.enumerate_states_core(self.K, potential, N, self.weights)
# add on top 0 and append to results
for state in raw_states:
state.append(0)
all_states.extend(raw_states)
self.all_states_table = all_states
def contains(self, action):
"""
TODO(maithra)
"""
pass
@property
def shape(self):
if self.train_attacker:
return (self.K+1,)
else:
return (2*self.K + 2,)
def __repr__(self):
return "ErdosGame" + str(self.shape)
|
[
"luismontero@hotmail.fr"
] |
luismontero@hotmail.fr
|
56d1d7f52cb51f97c009df9a03ed9f71b02fa6a5
|
a1becad0595e29ac56645eeb22d7dee675107c50
|
/tests/test_kernel.py
|
9c832f845bc4c275df45b3e150697ee20da78b5b
|
[
"MIT"
] |
permissive
|
tagordon/specgp
|
afa87f718af1c27f42f6cf86e532f708f80b95ad
|
de09ef4d05c6eadad02d407f03b71cfce4d65c76
|
refs/heads/master
| 2022-11-19T18:59:53.626083
| 2020-07-21T14:46:53
| 2020-07-21T14:46:53
| 272,232,294
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
import exoplanet as xo
import specgp as sgp
import numpy as np
import theano.tensor as tt
import theano
logS0 = tt.scalar()
logw0 = tt.scalar()
logQ = tt.scalar()
logS0.tag.test_value = -5.0
logw0.tag.test_value = -2.0
logQ.tag.test_value = 1.0
term = xo.gp.terms.SHOTerm(S0=tt.exp(logS0), w0=tt.exp(logw0), Q=tt.exp(logQ))
alpha = tt.vector()
alpha.tag.test_value = np.array([1, 2, 3])
kernel = sgp.terms.KronTerm(term, alpha=alpha)
Q = alpha[:, None]*alpha[None, :]
t = tt.vector()
diag = tt.vector()
t.tag.test_value = np.linspace(0, 10, 10)
diag.tag.test_value = 1e-5 * np.ones(30)
cov = (tt.slinalg.kron(term.to_dense(t, tt.zeros_like(t)), Q)
+ tt.diag(diag))
args = [logS0, logw0, logQ, alpha, t, diag]
get_K = theano.function(args, cov)
K = get_K(-5, -2, 1, [1, 2, 3], np.linspace(0, 10, 10), 1e-5 * np.ones(30))
diag = tt.dmatrix()
mean = tt.dmatrix()
diag.tag.test_value = 1e-5 * np.ones((3, 10))
mean.tag.test_value = np.zeros_like(diag)
gp = xo.gp.GP(kernel=kernel, diag=diag, mean=sgp.means.KronMean(mean), x=t, J=2)
z = tt.dmatrix()
z.tag.test_value = np.zeros((30, 1))
args = [logS0, logw0, logQ, alpha, t, diag, z]
apply_inv = theano.function(args, gp.apply_inverse(z))
mult_l = theano.function(args, gp.dot_l(z))
args = [logS0, logw0, logQ, alpha, t, diag]
log_det = theano.function(args, gp.log_det)
args = [-5, -2, 1, [1, 2, 3], np.linspace(0, 10, 10), 1e-5 * np.ones((3, 10)), z]
def test_inverse():
z = np.random.randn(30, 1)
y = np.dot(np.linalg.inv(K), z)
args = [-5, -2, 1, [1, 2, 3], np.linspace(0, 10, 10), 1e-5 * np.ones((3, 10)), z]
assert tt.allclose(y, apply_inv(*args))
def test_determinant():
det = np.linalg.det(K)
args = [-5, -2, 1, [1, 2, 3], np.linspace(0, 10, 10), 1e-5 * np.ones((3, 10))]
assert tt.allclose(np.log(det), log_det(*args))
def test_dot_l():
z = np.random.randn(30, 1)
args = [-5, -2, 1, [1, 2, 3], np.linspace(0, 10, 10), 1e-5 * np.ones((3, 10)), z]
y = np.dot(np.linalg.cholesky(K), z)
assert tt.allclose(y, mult_l(*args))
|
[
"tagordon@uw.edu"
] |
tagordon@uw.edu
|
14082e84e8cc42dec1bcbc028a0ce10087db4dd4
|
4d4fcde3efaa334f7aa56beabd2aa26fbcc43650
|
/server/src/uds/migrations/0039_auto_20201111_1329.py
|
4d48ca91318e70def9c7828155e6812d0e528f18
|
[] |
no_license
|
xezpeleta/openuds
|
a8b11cb34eb0ef7bb2da80f67586a81b2de229ef
|
840a7a02bd7c9894e8863a8a50874cdfdbf30fcd
|
refs/heads/master
| 2023-08-21T17:55:48.914631
| 2021-10-06T10:39:06
| 2021-10-06T10:39:06
| 414,489,331
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
# Generated by Django 3.1.2 on 2020-11-11 13:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('uds', '0038_auto_20200505_config'),
]
operations = [
migrations.RemoveField(
model_name='metapool',
name='accessCalendars',
),
migrations.RemoveField(
model_name='metapool',
name='pools',
),
migrations.RemoveField(
model_name='servicepool',
name='accessCalendars',
),
migrations.RemoveField(
model_name='servicepool',
name='actionsCalendars',
),
]
|
[
"dkmaster@dkmon.com"
] |
dkmaster@dkmon.com
|
9832a16a7fbbfe46de219209e3666a1f73284be1
|
d946f613a88e02abc1ecd07b93ce44271bc9baa9
|
/Tests.py
|
4eaef73d379a59360cc02cbc79991ecb19abf11a
|
[
"MIT"
] |
permissive
|
dwelch91/RegExpBuilder
|
a4b4e3f5cbaff2b02eefeae9e41be0d6cbc67bcf
|
3bbbfe7599160a4ef4c90ec0f78e23eeeb48c6a0
|
refs/heads/master
| 2021-01-16T00:47:36.790640
| 2013-09-03T22:57:41
| 2013-09-03T22:57:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,477
|
py
|
import unittest
from RegExpBuilder import RegExpBuilder
class Test(unittest.TestCase):
def test_start_of_line(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(1).of("p")
regex = regex.get_regexp()
self.assertTrue(regex.match("p") is not None)
self.assertTrue(regex.match("qp") is None)
def test_end_of_line(self):
regex = RegExpBuilder()
regex.exactly(1).of("p")
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("p") is not None)
self.assertTrue(regex.match("pq") is None)
def test_eitherLike_orLike(self):
p1 = RegExpBuilder().exactly(1).of("p")
p2 = RegExpBuilder().exactly(2).of("q")
regex = RegExpBuilder()
regex.start_of_line()
regex.either([p1, p2])
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("p") is not None)
self.assertTrue(regex.match("qq") is not None)
self.assertTrue(regex.match("pqq") is None)
self.assertTrue(regex.match("qqp") is None)
def test_orLike_chain(self):
p1 = RegExpBuilder().exactly(1).of("p")
p2 = RegExpBuilder().exactly(1).of("q")
p3 = RegExpBuilder().exactly(1).of("r")
regex = RegExpBuilder()
regex.either([p1, p2, p3])
regex = regex.get_regexp()
self.assertTrue(regex.match("p") is not None)
self.assertTrue(regex.match("q") is not None)
self.assertTrue(regex.match("r") is not None)
self.assertTrue(regex.match("s") is None)
def test_orString(self):
regex = RegExpBuilder()
regex.either(["p", "q"])
regex = regex.get_regexp()
self.assertTrue(regex.match("p") is not None)
self.assertTrue(regex.match("q") is not None)
self.assertTrue(regex.match("r") is None)
def test_exactly(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(3).of("p")
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("ppp") is not None)
self.assertTrue(regex.match("pp") is None)
self.assertTrue(regex.match("pppp") is None)
def test_min(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.min(2).of("p")
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("pp") is not None)
self.assertTrue(regex.match("ppp") is not None)
self.assertTrue(regex.match("ppppppp") is not None)
self.assertTrue(regex.match("p") is None)
def test_max(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.max(3).of("p")
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("p") is not None)
self.assertTrue(regex.match("pp") is not None)
self.assertTrue(regex.match("ppp") is not None)
self.assertTrue(regex.match("pppp") is None)
self.assertTrue(regex.match("pppppppp") is None)
def test_min_max(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.min(3).max(7).of("p")
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("ppp") is not None)
self.assertTrue(regex.match("ppppp") is not None)
self.assertTrue(regex.match("ppppppp") is not None)
self.assertTrue(regex.match("pp") is None)
self.assertTrue(regex.match("p") is None)
self.assertTrue(regex.match("pppppppp") is None)
self.assertTrue(regex.match("pppppppppppp") is None)
def test_of(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(2).of("p p p ")
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("p p p p p p ") is not None)
self.assertTrue(regex.match("p p p p pp") is None)
def test_ofAny(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(3).of_any()
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("pqr") is not None)
def test_ofGroup(self):
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(3).of("p").as_group()
regex.exactly(1).of("q")
regex.exactly(1).of_group(1)
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("pppqppp") is not None)
def test_fromClass(self):
someLetters = ["p", "q", "r"]
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(3).from_class(someLetters)
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("ppp") is not None)
self.assertTrue(regex.match("qqq") is not None)
self.assertTrue(regex.match("ppq") is not None)
self.assertTrue(regex.match("rqp") is not None)
self.assertTrue(regex.match("pyy") is None)
def test_notFromClass(self):
someLetters = ["p", "q", "r"]
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(3).not_from_class(someLetters)
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("lmn") is not None)
self.assertTrue(regex.match("mnq") is None)
def test_like(self):
pattern = RegExpBuilder().min(1).of("p").min(2).of("q")
regex = RegExpBuilder()
regex.start_of_line()
regex.exactly(2).like(pattern)
regex.end_of_line()
regex = regex.get_regexp()
self.assertTrue(regex.match("pqqpqq") is not None)
self.assertTrue(regex.match("qppqpp") is None)
def test_reluctantly(self):
regex = RegExpBuilder()
regex.exactly(2).of("p")
regex.min(2).of_any().reluctantly()
regex.exactly(2).of("p")
regex = regex.get_regexp()
self.assertTrue(regex.match("pprrrrpprrpp").group() == "pprrrrpp")
def test_ahead(self):
regex = RegExpBuilder()
regex.exactly(1).of("dart")
regex.ahead(RegExpBuilder().exactly(1).of("lang"))
regex = regex.get_regexp()
self.assertTrue(regex.match("dartlang").group() == "dart")
self.assertTrue(regex.match("dartpqr") is None)
def test_notAhead(self):
regex = RegExpBuilder()
regex.exactly(1).of("dart")
regex.not_ahead(RegExpBuilder().exactly(1).of("pqr"))
regex = regex.get_regexp()
self.assertTrue(regex.match("dartlang") is not None)
self.assertTrue(regex.match("dartpqr") is None)
def test_asGroup(self):
regex = RegExpBuilder()
regex.min(1).max(3).of("p")
regex.exactly(1).of("dart").as_group()
regex.exactly(1).from_class(["p", "q", "r"])
regex = regex.get_regexp()
self.assertTrue(regex.match("pdartq").group(1) == "dart")
if __name__ == '__main__':
unittest.main()
|
[
"thebinarysearchtree@gmail.com"
] |
thebinarysearchtree@gmail.com
|
48123256affe557478e48fe3db0412f0b90416ef
|
96bf3f197930199d51959d42fce2c516d8d8693c
|
/older_settings/8-17-2021/30000.3000.100.04.20000.py
|
32e6d3822e65f60cc9973b4ff375da3ad5f07cc2
|
[] |
no_license
|
NuttareeB/partition-query
|
4903f57ba1c75968f2f36cff0de65761640d1818
|
3bae47ac9093bac9a7b0c828b5d4340ebc2e55ae
|
refs/heads/master
| 2023-07-05T11:37:12.316878
| 2021-08-26T00:11:12
| 2021-08-26T00:11:12
| 387,634,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
import numpy as np
from nestedloop import run
#run(2000, 50, 1000)
run(20000, 2000, 512, 60, 0.4, 9000)
|
[
"doskenob@oregonstate.edu"
] |
doskenob@oregonstate.edu
|
488891e2153d459e2a52cb6f3815d37c56249090
|
3656aaf1dc614ae18e396b0bf6759e0d7aaeb28f
|
/Verification.py
|
84f6fd7dc7c61382d76ebfe11f3b0dceb4156973
|
[] |
no_license
|
cncn0069/catanddog
|
66890b19033a609264dfd40ebc78724af3869b47
|
e5533c0e640bf49ff335ae0286e1c5ff082271e1
|
refs/heads/master
| 2022-12-16T02:05:00.433481
| 2020-09-20T05:25:34
| 2020-09-20T05:25:34
| 297,008,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import matplotlib.pyplot as plt
import read
acc = read.history.history['acc']
val_acc = read.history.history['val_acc']
loss = read.history.history['loss']
val_loss = read.history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label = 'Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo', label='Training acc')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
|
[
"cncn0069@naver.com"
] |
cncn0069@naver.com
|
fd23825963857085e6a758aa9e4ce4ade0c26564
|
8d0613d5c8b14ad86408ea58507a8006886fdfbb
|
/lab14/code/torchTest.py
|
aec043952f0acfad73f9ad9c3b5266f5ddd6f780
|
[
"MIT"
] |
permissive
|
StCzzZ/EE208-labs
|
deeac192dd9c8423e28399e8dde0278840b28ee4
|
5e6fc8208c0d0bfa5c338d1294e23fc6848d6fc6
|
refs/heads/main
| 2023-03-19T15:07:31.899774
| 2021-01-15T12:22:07
| 2021-01-15T12:22:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
import time
import numpy as np
import torch
from utilities import readAndPreprocessImage, normalize
from torchvision.datasets.folder import default_loader
model = torch.hub.load('pytorch/vision', 'resnet50', pretrained=True)
print(model)
|
[
"953327699@qq.com"
] |
953327699@qq.com
|
9177310144ac6320e28207fa88ccaac564986c25
|
f827a5caefb76c3b34bc4264f05b8f6e02846be8
|
/assignment2/cs231n/solver.py
|
1733b52c5e35c740651ea2c8c0403545dbd73e37
|
[] |
no_license
|
kujira70/CS231
|
8cdf279d231088f3a7b4068e2199394c2ff2e2c2
|
8c837bc7ec2afece1aebcf0724dd18908aec61c9
|
refs/heads/master
| 2022-04-26T01:10:20.259944
| 2022-04-17T03:35:10
| 2022-04-17T03:35:10
| 138,230,742
| 1
| 0
| null | 2018-06-21T23:08:04
| 2018-06-21T23:08:04
| null |
UTF-8
|
Python
| false
| false
| 12,147
|
py
|
from __future__ import print_function, division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import os
import pickle as pickle
import numpy as np
from cs231n import optim
class Solver(object):
"""
A Solver encapsulates all the logic necessary for training classification
models. The Solver performs stochastic gradient descent using different
update rules defined in optim.py.
The solver accepts both training and validation data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a Solver instance, passing the
model, dataset, and various options (learning rate, batch size, etc) to the
constructor. You will then call the train() method to run the optimization
procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists of the
accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = {
'X_train': # training data
'y_train': # training labels
'X_val': # validation data
'y_val': # validation labels
}
model = MyAwesomeModel(hidden_size=100, reg=10)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A Solver works on a model object that must conform to the following API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(X, y) must be a function that computes training-time loss and
gradients, and test-time classification scores, with the following inputs
and outputs:
Inputs:
- X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,) giving labels for X where y[i] is the
label for X[i].
Returns:
If y is None, run a test-time forward pass and return:
- scores: Array of shape (N, C) giving classification scores for X where
scores[i, c] gives the score of class c for X[i].
If y is not None, run a training time forward and backward pass and
return a tuple of:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, data, **kwargs):
"""
Construct a new Solver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data containing:
'X_train': Array, shape (N_train, d_1, ..., d_k) of training images
'X_val': Array, shape (N_val, d_1, ..., d_k) of validation images
'y_train': Array, shape (N_train,) of labels for training images
'y_val': Array, shape (N_val,) of labels for validation images
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the
learning rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient
during training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every
print_every iterations.
- verbose: Boolean; if set to false then no output will be printed
during training.
- num_train_samples: Number of training samples used to check training
accuracy; default is 1000; set to None to use entire training set.
- num_val_samples: Number of validation samples to use to check val
accuracy; default is None, which uses the entire validation set.
- checkpoint_name: If not None, then save model checkpoints here every
epoch.
"""
self.model = model
self.X_train = data['X_train']
self.y_train = data['y_train']
self.X_val = data['X_val']
self.y_val = data['y_val']
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.num_train_samples = kwargs.pop('num_train_samples', 1000)
self.num_val_samples = kwargs.pop('num_val_samples', None)
self.checkpoint_name = kwargs.pop('checkpoint_name', None)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in list(kwargs.keys()))
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
# Make a deep copy of the optim_config for each parameter
self.optim_configs = {}
for p in self.model.params:
d = {k: v for k, v in self.optim_config.items()}
self.optim_configs[p] = d
def _step(self):
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
num_train = self.X_train.shape[0]
batch_mask = np.random.choice(num_train, self.batch_size)
X_batch = self.X_train[batch_mask]
y_batch = self.y_train[batch_mask]
# Compute loss and gradient
loss, grads = self.model.loss(X_batch, y_batch)
self.loss_history.append(loss)
# Perform a parameter update
for p, w in self.model.params.items():
dw = grads[p]
config = self.optim_configs[p]
next_w, next_config = self.update_rule(w, dw, config)
self.model.params[p] = next_w
self.optim_configs[p] = next_config
def _save_checkpoint(self):
if self.checkpoint_name is None: return
checkpoint = {
'model': self.model,
'update_rule': self.update_rule,
'lr_decay': self.lr_decay,
'optim_config': self.optim_config,
'batch_size': self.batch_size,
'num_train_samples': self.num_train_samples,
'num_val_samples': self.num_val_samples,
'epoch': self.epoch,
'loss_history': self.loss_history,
'train_acc_history': self.train_acc_history,
'val_acc_history': self.val_acc_history,
}
filename = '%s_epoch_%d.pkl' % (self.checkpoint_name, self.epoch)
if self.verbose:
print('Saving checkpoint to "%s"' % filename)
with open(filename, 'wb') as f:
pickle.dump(checkpoint, f)
def check_accuracy(self, X, y, num_samples=None, batch_size=100):
"""
Check accuracy of the model on the provided data.
Inputs:
- X: Array of data, of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,)
- num_samples: If not None, subsample the data and only test the model
on num_samples datapoints.
- batch_size: Split X and y into batches of this size to avoid using
too much memory.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = X.shape[0]
if num_samples is not None and N > num_samples:
mask = np.random.choice(N, num_samples)
N = num_samples
X = X[mask]
y = y[mask]
# Compute predictions in batches
num_batches = N // batch_size
if N % batch_size != 0:
num_batches += 1
y_pred = []
for i in range(num_batches):
start = i * batch_size
end = (i + 1) * batch_size
scores = self.model.loss(X[start:end])
y_pred.append(np.argmax(scores, axis=1))
y_pred = np.hstack(y_pred)
acc = np.mean(y_pred == y)
return acc
def train(self):
"""
Run optimization to train the model.
"""
num_train = self.X_train.shape[0]
iterations_per_epoch = max(num_train // self.batch_size, 1)
num_iterations = self.num_epochs * iterations_per_epoch
for t in range(num_iterations):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print('(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, self.loss_history[-1]))
# At the end of every epoch, increment the epoch counter and decay
# the learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
for k in self.optim_configs:
self.optim_configs[k]['learning_rate'] *= self.lr_decay
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
first_it = (t == 0)
last_it = (t == num_iterations - 1)
if first_it or last_it or epoch_end:
train_acc = self.check_accuracy(self.X_train, self.y_train,
num_samples=self.num_train_samples)
val_acc = self.check_accuracy(self.X_val, self.y_val,
num_samples=self.num_val_samples)
self.train_acc_history.append(train_acc)
self.val_acc_history.append(val_acc)
self._save_checkpoint()
if self.verbose:
print('(Epoch %d / %d) train acc: %f; val_acc: %f' % (
self.epoch, self.num_epochs, train_acc, val_acc))
# Keep track of the best model
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_params = {}
for k, v in self.model.params.items():
self.best_params[k] = v.copy()
# At the end of training swap the best params into the model
self.model.params = self.best_params
|
[
"noreply@github.com"
] |
noreply@github.com
|
01e16c5be3860add1a9a44be60f7efbac7e004a4
|
2b5213572cec414bcc32e34a8e50c7bec98f4edb
|
/org_tel_spider/settings.py
|
82f81f197c2b9c752dbf780b77976084b86ee1fa
|
[] |
no_license
|
zhangdaniel0715/org_tel_spider
|
b5f051c6afd522ce8f2c39068c689408c1349faa
|
2eb29e53b390bd4c4549164b8ba06ae68280bbea
|
refs/heads/master
| 2021-07-06T00:39:17.333774
| 2017-09-29T03:51:35
| 2017-09-29T03:51:35
| 104,741,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for org_tel_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'org_tel_spider'
SPIDER_MODULES = ['org_tel_spider.spiders']
NEWSPIDER_MODULE = 'org_tel_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'org_tel_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'org_tel_spider.middlewares.OrgTelSpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'org_tel_spider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'org_tel_spider.pipelines.OrgTelSpiderPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"zhangdaniel0715@gmail.com"
] |
zhangdaniel0715@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.