blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ae07d9fe3c8063862d3cb81c34ef5f2de166ac7
|
6cc7698c8b7342c748fc04d400f549e44271e098
|
/predict.py
|
7896e023cf22e2897189599ccf0f0b07fc776484
|
[
"MIT"
] |
permissive
|
wanghuajing/unet-pytorch
|
83f3da145f8c023ac9133f7c25f4ad29ea229c85
|
53e187e15d3676f08c8f594017f026047faff66a
|
refs/heads/main
| 2023-04-01T17:59:17.836596
| 2021-04-11T15:35:19
| 2021-04-11T15:35:19
| 356,885,695
| 0
| 0
|
MIT
| 2021-04-11T14:11:11
| 2021-04-11T14:11:10
| null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
'''
predict.py有几个注意点
1、该代码无法直接进行批量预测,如果想要批量预测,可以利用os.listdir()遍历文件夹,利用Image.open打开图片文件进行预测。
具体流程可以参考get_miou_prediction.py,在get_miou_prediction.py即实现了遍历。
2、如果想要保存,利用r_image.save("img.jpg")即可保存。
3、如果想要原图和分割图不混合,可以把blend参数设置成False。
4、如果想根据mask获取对应的区域,可以参考detect_image函数中,利用预测结果绘图的部分,判断每一个像素点的种类,然后根据种类获取对应的部分。
seg_img = np.zeros((np.shape(pr)[0],np.shape(pr)[1],3))
for c in range(self.num_classes):
seg_img[:, :, 0] += ((pr == c)*( self.colors[c][0] )).astype('uint8')
seg_img[:, :, 1] += ((pr == c)*( self.colors[c][1] )).astype('uint8')
seg_img[:, :, 2] += ((pr == c)*( self.colors[c][2] )).astype('uint8')
'''
from PIL import Image
from unet import Unet
unet = Unet()
while True:
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = unet.detect_image(image)
r_image.show()
|
[
"noreply@github.com"
] |
wanghuajing.noreply@github.com
|
35aa5f67eaf92ada9ca02bdadece45cd5b1d6a23
|
615d597cea35a44f15b7777b1c0817ed2e0f6ddc
|
/stock/models/stock_rule.py
|
aa3bd687f9c8a966a3e208d5dfe4e0149f9a862c
|
[] |
no_license
|
madrara256/e-procurement
|
87aa5b2d3929fa995e018e6c36e34b35c64c1c24
|
2de4a3bbc4ded4c2a13d7b9654ecf42c8d0764ec
|
refs/heads/master
| 2023-02-19T00:02:11.309658
| 2021-01-11T09:17:04
| 2021-01-11T09:17:04
| 285,800,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,889
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import OrderedDict
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.tools.misc import split_every
from psycopg2 import OperationalError
from odoo import api, fields, models, registry, _
from odoo.osv import expression
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class StockRule(models.Model):
""" A rule describe what a procurement should do; produce, buy, move, ... """
_name = 'stock.rule'
_description = "Stock Rule"
_order = "sequence, id"
name = fields.Char(
'Name', required=True, translate=True,
help="This field will fill the packing origin and the name of its moves")
active = fields.Boolean(
'Active', default=True,
help="If unchecked, it will allow you to hide the rule without removing it.")
group_propagation_option = fields.Selection([
('none', 'Leave Empty'),
('propagate', 'Propagate'),
('fixed', 'Fixed')], string="Propagation of Procurement Group", default='propagate')
group_id = fields.Many2one('procurement.group', 'Fixed Procurement Groups')
action = fields.Selection(
selection=[('pull', 'Pull From'), ('push', 'Push To'), ('pull_push', 'Pull & Push')], string='Action',
required=True)
sequence = fields.Integer('Sequence', default=20)
company_id = fields.Many2one('res.company', 'Company',
default=lambda self: self.env.user.company_id)
location_id = fields.Many2one('stock.location', 'Destination Location', required=True)
location_src_id = fields.Many2one('stock.location', 'Source Location')
route_id = fields.Many2one('stock.location.route', 'Route', required=True, ondelete='cascade')
procure_method = fields.Selection([
('make_to_stock', 'Take From Stock'),
('make_to_order', 'Trigger Another Rule')], string='Move Supply Method',
default='make_to_stock', required=True,
help="""Create Procurement: A procurement will be created in the source location and the system will try to find a rule to resolve it. The available stock will be ignored.
Take from Stock: The products will be taken from the available stock.""")
route_sequence = fields.Integer('Route Sequence', related='route_id.sequence', store=True, readonly=False, compute_sudo=True)
picking_type_id = fields.Many2one(
'stock.picking.type', 'Operation Type',
required=True)
delay = fields.Integer('Delay', default=0, help="The expected date of the created transfer will be computed based on this delay.")
partner_address_id = fields.Many2one('res.partner', 'Partner Address', help="Address where goods should be delivered. Optional.")
propagate = fields.Boolean(
'Propagate cancel and split', default=True,
help="When ticked, if the move is splitted or cancelled, the next move will be too.")
warehouse_id = fields.Many2one('stock.warehouse', 'Warehouse')
propagate_warehouse_id = fields.Many2one(
'stock.warehouse', 'Warehouse to Propagate',
help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)")
auto = fields.Selection([
('manual', 'Manual Operation'),
('transparent', 'Automatic No Step Added')], string='Automatic Move',
default='manual', index=True, required=True,
help="The 'Manual Operation' value will create a stock move after the current one. "
"With 'Automatic No Step Added', the location is replaced in the original move.")
rule_message = fields.Html(compute='_compute_action_message')
@api.onchange('picking_type_id')
def _onchange_picking_type(self):
""" Modify locations to the default picking type's locations source and
destination.
"""
self.location_src_id = self.picking_type_id.default_location_src_id.id
self.location_id = self.picking_type_id.default_location_dest_id.id
@api.onchange('route_id', 'company_id')
def _onchange_route(self):
""" Ensure that the rule's company is the same than the route's company. """
if self.route_id.company_id:
self.company_id = self.route_id.company_id
if self.picking_type_id.warehouse_id.company_id != self.route_id.company_id:
self.picking_type_id = False
domain = {'company_id': self.route_id.company_id and [('id', '=', self.route_id.company_id.id)] or []}
return {'domain': domain}
def _get_message_values(self):
""" Return the source, destination and picking_type applied on a stock
rule. The purpose of this function is to avoid code duplication in
_get_message_dict functions since it often requires those data.
"""
source = self.location_src_id and self.location_src_id.display_name or _('Source Location')
destination = self.location_id and self.location_id.display_name or _('Destination Location')
operation = self.picking_type_id and self.picking_type_id.name or _('Operation Type')
return source, destination, operation
def _get_message_dict(self):
""" Return a dict with the different possible message used for the
rule message. It should return one message for each stock.rule action
(except push and pull). This function is override in mrp and
purchase_stock in order to complete the dictionary.
"""
message_dict = {}
source, destination, operation = self._get_message_values()
if self.action in ('push', 'pull', 'pull_push'):
suffix = ""
if self.procure_method == 'make_to_order' and self.location_src_id:
suffix = _("<br>A need is created in <b>%s</b> and a rule will be triggered to fulfill it.") % (source)
message_dict = {
'pull': _('When products are needed in <b>%s</b>, <br/> <b>%s</b> are created from <b>%s</b> to fulfill the need.') % (destination, operation, source) + suffix,
'push': _('When products arrive in <b>%s</b>, <br/> <b>%s</b> are created to send them in <b>%s</b>.') % (source, operation, destination)
}
return message_dict
@api.depends('action', 'location_id', 'location_src_id', 'picking_type_id', 'procure_method')
def _compute_action_message(self):
""" Generate dynamicaly a message that describe the rule purpose to the
end user.
"""
for rule in self.filtered(lambda rule: rule.action):
message_dict = rule._get_message_dict()
message = message_dict.get(rule.action) and message_dict[rule.action] or ""
if rule.action == 'pull_push':
message = message_dict['pull'] + "<br/><br/>" + message_dict['push']
rule.rule_message = message
def _run_push(self, move):
""" Apply a push rule on a move.
If the rule is 'no step added' it will modify the destination location
on the move.
If the rule is 'manual operation' it will generate a new move in order
to complete the section define by the rule.
Care this function is not call by method run. It is called explicitely
in stock_move.py inside the method _push_apply
"""
new_date = fields.Datetime.to_string(move.date_expected + relativedelta(days=self.delay))
if self.auto == 'transparent':
move.write({
'date': new_date,
'date_expected': new_date,
'location_dest_id': self.location_id.id})
# avoid looping if a push rule is not well configured; otherwise call again push_apply to see if a next step is defined
if self.location_id != move.location_dest_id:
# TDE FIXME: should probably be done in the move model IMO
move._push_apply()
else:
new_move_vals = self._push_prepare_move_copy_values(move, new_date)
new_move = move.sudo().copy(new_move_vals)
move.write({'move_dest_ids': [(4, new_move.id)]})
new_move._action_confirm()
def _push_prepare_move_copy_values(self, move_to_copy, new_date):
company_id = self.company_id.id
if not company_id:
company_id = self.sudo().warehouse_id and self.sudo().warehouse_id.company_id.id or self.sudo().picking_type_id.warehouse_id.company_id.id
new_move_vals = {
'origin': move_to_copy.origin or move_to_copy.picking_id.name or "/",
'location_id': move_to_copy.location_dest_id.id,
'location_dest_id': self.location_id.id,
'date': new_date,
'date_expected': new_date,
'company_id': company_id,
'picking_id': False,
'picking_type_id': self.picking_type_id.id,
'propagate': self.propagate,
'warehouse_id': self.warehouse_id.id,
}
return new_move_vals
def _run_pull(self, product_id, product_qty, product_uom, location_id, name, origin, values):
if not self.location_src_id:
msg = _('No source location defined on stock rule: %s!') % (self.name, )
raise UserError(msg)
# create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
# Search if picking with move for it exists already:
group_id = False
if self.group_propagation_option == 'propagate':
group_id = values.get('group_id', False) and values['group_id'].id
elif self.group_propagation_option == 'fixed':
group_id = self.group_id.id
data = self._get_stock_move_values(product_id, product_qty, product_uom, location_id, name, origin, values, group_id)
# Since action_confirm launch following procurement_group we should activate it.
move = self.env['stock.move'].sudo().with_context(force_company=data.get('company_id', False)).create(data)
move._action_confirm()
return True
def _get_custom_move_fields(self):
""" The purpose of this method is to be override in order to easily add
fields from procurement 'values' argument to move data.
"""
return []
def _get_stock_move_values(self, product_id, product_qty, product_uom, location_id, name, origin, values, group_id):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'pull' or 'pull_push') set on it.
:param procurement: browse record
:rtype: dictionary
'''
date_expected = fields.Datetime.to_string(
fields.Datetime.from_string(values['date_planned']) - relativedelta(days=self.delay or 0)
)
# it is possible that we've already got some move done, so check for the done qty and create
# a new move with the correct qty
qty_left = product_qty
move_values = {
'name': name[:2000],
'company_id': self.company_id.id or self.location_src_id.company_id.id or self.location_id.company_id.id or values['company_id'].id,
'product_id': product_id.id,
'product_uom': product_uom.id,
'product_uom_qty': qty_left,
'partner_id': self.partner_address_id.id or (values.get('group_id', False) and values['group_id'].partner_id.id) or False,
'location_id': self.location_src_id.id,
'location_dest_id': location_id.id,
'move_dest_ids': values.get('move_dest_ids', False) and [(4, x.id) for x in values['move_dest_ids']] or [],
'rule_id': self.id,
'procure_method': self.procure_method,
'origin': origin,
'picking_type_id': self.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, route.id) for route in values.get('route_ids', [])],
'warehouse_id': self.propagate_warehouse_id.id or self.warehouse_id.id,
'date': date_expected,
'date_expected': date_expected,
'propagate': self.propagate,
'priority': values.get('priority', "1"),
}
for field in self._get_custom_move_fields():
if field in values:
move_values[field] = values.get(field)
return move_values
def _log_next_activity(self, product_id, note):
existing_activity = self.env['mail.activity'].search([('res_id', '=', product_id.product_tmpl_id.id), ('res_model_id', '=', self.env.ref('product.model_product_template').id),
('note', '=', note)])
if not existing_activity:
# If the user deleted todo activity type.
try:
activity_type_id = self.env.ref('mail.mail_activity_data_todo').id
except:
activity_type_id = False
self.env['mail.activity'].create({
'activity_type_id': activity_type_id,
'note': note,
'user_id': product_id.responsible_id.id,
'res_id': product_id.product_tmpl_id.id,
'res_model_id': self.env.ref('product.model_product_template').id,
})
def _make_po_get_domain(self, values, partner):
return ()
class ProcurementGroup(models.Model):
"""
The procurement group class is used to group products together
when computing procurements. (tasks, physical products, ...)
The goal is that when you have one sales order of several products
and the products are pulled from the same or several location(s), to keep
having the moves grouped into pickings that represent the sales order.
Used in: sales order (to group delivery order lines like the so), pull/push
rules (to pack like the delivery order), on orderpoints (e.g. for wave picking
all the similar products together).
Grouping is made only if the source and the destination is the same.
Suppose you have 4 lines on a picking from Output where 2 lines will need
to come from Input (crossdock) and 2 lines coming from Stock -> Output As
the four will have the same group ids from the SO, the move from input will
have a stock.picking with 2 grouped lines and the move from stock will have
2 grouped lines also.
The name is usually the name of the original document (sales order) or a
sequence computed if created manually.
"""
_name = 'procurement.group'
_description = 'Procurement Group'
_order = "id desc"
partner_id = fields.Many2one('res.partner', 'Partner')
name = fields.Char(
'Reference',
default=lambda self: self.env['ir.sequence'].next_by_code('procurement.group') or '',
required=True)
move_type = fields.Selection([
('direct', 'Partial'),
('one', 'All at once')], string='Delivery Type', default='direct',
required=True)
@api.model
def run(self, product_id, product_qty, product_uom, location_id, name, origin, values):
""" Method used in a procurement case. The purpose is to supply the
product passed as argument in the location also given as an argument.
In order to be able to find a suitable location that provide the product
it will search among stock.rule.
"""
values.setdefault('company_id', self.env['res.company']._company_default_get('procurement.group'))
values.setdefault('priority', '1')
values.setdefault('date_planned', fields.Datetime.now())
rule = self._get_rule(product_id, location_id, values)
if not rule:
raise UserError(_('No procurement rule found in location "%s" for product "%s".\n Check routes configuration.') % (location_id.display_name, product_id.display_name))
action = 'pull' if rule.action == 'pull_push' else rule.action
if hasattr(rule, '_run_%s' % action):
getattr(rule, '_run_%s' % action)(product_id, product_qty, product_uom, location_id, name, origin, values)
else:
_logger.error("The method _run_%s doesn't exist on the procument rules" % action)
return True
@api.model
def _search_rule(self, route_ids, product_id, warehouse_id, domain):
""" First find a rule among the ones defined on the procurement
group, then try on the routes defined for the product, finally fallback
on the default behavior
"""
if warehouse_id:
domain = expression.AND([['|', ('warehouse_id', '=', warehouse_id.id), ('warehouse_id', '=', False)], domain])
Rule = self.env['stock.rule']
res = self.env['stock.rule']
if route_ids:
res = Rule.search(expression.AND([[('route_id', 'in', route_ids.ids)], domain]), order='route_sequence, sequence', limit=1)
if not res:
product_routes = product_id.route_ids | product_id.categ_id.total_route_ids
if product_routes:
res = Rule.search(expression.AND([[('route_id', 'in', product_routes.ids)], domain]), order='route_sequence, sequence', limit=1)
if not res and warehouse_id:
warehouse_routes = warehouse_id.route_ids
if warehouse_routes:
res = Rule.search(expression.AND([[('route_id', 'in', warehouse_routes.ids)], domain]), order='route_sequence, sequence', limit=1)
return res
@api.model
def _get_rule(self, product_id, location_id, values):
""" Find a pull rule for the location_id, fallback on the parent
locations if it could not be found.
"""
result = False
location = location_id
while (not result) and location:
result = self._search_rule(values.get('route_ids', False), product_id, values.get('warehouse_id', False), [('location_id', '=', location.id), ('action', '!=', 'push')])
location = location.location_id
return result
def _merge_domain(self, values, rule, group_id):
return [
('group_id', '=', group_id), # extra logic?
('location_id', '=', rule.location_src_id.id),
('location_dest_id', '=', values['location_id'].id),
('picking_type_id', '=', rule.picking_type_id.id),
('picking_id.printed', '=', False),
('picking_id.state', 'in', ['draft', 'confirmed', 'waiting', 'assigned']),
('picking_id.backorder_id', '=', False),
('product_id', '=', values['product_id'].id)]
@api.model
def _get_moves_to_assign_domain(self):
return expression.AND([
[('state', 'in', ['confirmed', 'partially_available'])],
[('product_uom_qty', '!=', 0.0)]
])
@api.model
def _run_scheduler_tasks(self, use_new_cursor=False, company_id=False):
# Minimum stock rules
self.sudo()._procure_orderpoint_confirm(use_new_cursor=use_new_cursor, company_id=company_id)
# Search all confirmed stock_moves and try to assign them
domain = self._get_moves_to_assign_domain()
moves_to_assign = self.env['stock.move'].search(domain, limit=None,
order='priority desc, date_expected asc')
for moves_chunk in split_every(100, moves_to_assign.ids):
self.env['stock.move'].browse(moves_chunk)._action_assign()
if use_new_cursor:
self._cr.commit()
if use_new_cursor:
self._cr.commit()
# Merge duplicated quants
self.env['stock.quant']._merge_quants()
self.env['stock.quant']._unlink_zero_quants()
@api.model
def run_scheduler(self, use_new_cursor=False, company_id=False):
""" Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules
and the availability of moves. This function is intended to be run for all the companies at the same time, so
we run functions as SUPERUSER to avoid intercompanies and access rights issues. """
try:
if use_new_cursor:
cr = registry(self._cr.dbname).cursor()
self = self.with_env(self.env(cr=cr)) # TDE FIXME
self._run_scheduler_tasks(use_new_cursor=use_new_cursor, company_id=company_id)
finally:
if use_new_cursor:
try:
self._cr.close()
except Exception:
pass
return {}
@api.model
def _procurement_from_orderpoint_get_order(self):
return 'location_id'
@api.model
def _procurement_from_orderpoint_get_grouping_key(self, orderpoint_ids):
orderpoints = self.env['stock.warehouse.orderpoint'].browse(orderpoint_ids)
return orderpoints.location_id.id
@api.model
def _procurement_from_orderpoint_get_groups(self, orderpoint_ids):
""" Make groups for a given orderpoint; by default schedule all operations in one without date """
return [{'to_date': False, 'procurement_values': dict()}]
@api.model
def _procurement_from_orderpoint_post_process(self, orderpoint_ids):
return True
def _get_orderpoint_domain(self, company_id=False):
domain = [('company_id', '=', company_id)] if company_id else []
domain += [('product_id.active', '=', True)]
return domain
@api.model
def _procure_orderpoint_confirm(self, use_new_cursor=False, company_id=False):
""" Create procurements based on orderpoints.
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing
1000 orderpoints.
This is appropriate for batch jobs only.
"""
if company_id and self.env.user.company_id.id != company_id:
# To ensure that the company_id is taken into account for
# all the processes triggered by this method
# i.e. If a PO is generated by the run of the procurements the
# sequence to use is the one for the specified company not the
# one of the user's company
self = self.with_context(company_id=company_id, force_company=company_id)
OrderPoint = self.env['stock.warehouse.orderpoint']
domain = self._get_orderpoint_domain(company_id=company_id)
orderpoints_noprefetch = OrderPoint.with_context(prefetch_fields=False).search(domain,
order=self._procurement_from_orderpoint_get_order()).ids
while orderpoints_noprefetch:
if use_new_cursor:
cr = registry(self._cr.dbname).cursor()
self = self.with_env(self.env(cr=cr))
OrderPoint = self.env['stock.warehouse.orderpoint']
orderpoints = OrderPoint.browse(orderpoints_noprefetch[:1000])
orderpoints_noprefetch = orderpoints_noprefetch[1000:]
# Calculate groups that can be executed together
location_data = OrderedDict()
def makedefault():
return {
'products': self.env['product.product'],
'orderpoints': self.env['stock.warehouse.orderpoint'],
'groups': []
}
for orderpoint in orderpoints:
key = self._procurement_from_orderpoint_get_grouping_key([orderpoint.id])
if not location_data.get(key):
location_data[key] = makedefault()
location_data[key]['products'] += orderpoint.product_id
location_data[key]['orderpoints'] += orderpoint
location_data[key]['groups'] = self._procurement_from_orderpoint_get_groups([orderpoint.id])
for location_id, location_data in location_data.items():
location_orderpoints = location_data['orderpoints']
product_context = dict(self._context, location=location_orderpoints[0].location_id.id)
substract_quantity = location_orderpoints._quantity_in_progress()
for group in location_data['groups']:
if group.get('from_date'):
product_context['from_date'] = group['from_date'].strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if group['to_date']:
product_context['to_date'] = group['to_date'].strftime(DEFAULT_SERVER_DATETIME_FORMAT)
product_quantity = location_data['products'].with_context(product_context)._product_available()
for orderpoint in location_orderpoints:
try:
op_product_virtual = product_quantity[orderpoint.product_id.id]['virtual_available']
if op_product_virtual is None:
continue
if float_compare(op_product_virtual, orderpoint.product_min_qty, precision_rounding=orderpoint.product_uom.rounding) <= 0:
qty = max(orderpoint.product_min_qty, orderpoint.product_max_qty) - op_product_virtual
remainder = orderpoint.qty_multiple > 0 and qty % orderpoint.qty_multiple or 0.0
if float_compare(remainder, 0.0, precision_rounding=orderpoint.product_uom.rounding) > 0:
qty += orderpoint.qty_multiple - remainder
if float_compare(qty, 0.0, precision_rounding=orderpoint.product_uom.rounding) < 0:
continue
qty -= substract_quantity[orderpoint.id]
qty_rounded = float_round(qty, precision_rounding=orderpoint.product_uom.rounding)
if qty_rounded > 0:
values = orderpoint._prepare_procurement_values(qty_rounded, **group['procurement_values'])
try:
with self._cr.savepoint():
self.env['procurement.group'].run(orderpoint.product_id, qty_rounded, orderpoint.product_uom, orderpoint.location_id,
orderpoint.name, orderpoint.name, values)
except UserError as error:
self.env['stock.rule']._log_next_activity(orderpoint.product_id, error.name)
self._procurement_from_orderpoint_post_process([orderpoint.id])
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
orderpoints_noprefetch += [orderpoint.id]
cr.rollback()
continue
else:
raise
try:
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
cr.rollback()
continue
else:
raise
if use_new_cursor:
cr.commit()
cr.close()
return {}
|
[
"herbertichama@outlook.com"
] |
herbertichama@outlook.com
|
65bb81d4314999319c0f30744efefc7124c14693
|
18376b2103475a887eecd81b5a51cfc6dab2d2b9
|
/toh/hero/migrations/0001_initial.py
|
19677e5eb7cfc2724cd66178312df90fdbae9937
|
[] |
no_license
|
ray017/swpp2020-django-practice
|
c3bb2bdcbcb21146baa2df83aec74529ca19c270
|
0e24718dcc21c549485471ef9699a8e9f9ee7043
|
refs/heads/main
| 2022-12-31T11:32:01.538840
| 2020-10-16T06:21:43
| 2020-10-16T06:21:43
| 304,281,159
| 0
| 0
| null | 2020-10-15T09:51:02
| 2020-10-15T09:51:01
| null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# Generated by Django 3.1.2 on 2020-10-15 10:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hero',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
],
),
]
|
[
"ray017@LAPTOP-V7DL32V1.localdomain"
] |
ray017@LAPTOP-V7DL32V1.localdomain
|
8cf9276b48526e7b2877ed7f30d0e043f4f3825a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03937/s717343350.py
|
4fc16d3130e5ed7a94fa2de6bd7cac4dd26a9a42
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from itertools import chain
H, W = map(int, input().split())
count = 0
for _ in range(H):
l = list(input())
count += l.count("#")
if count == H+W - 1:
print("Possible")
else:
print("Impossible")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0854de8432c78fee1ead984ad10827d1222f3980
|
e3a0a30411efb2753f3059ec23d037d84f06f4b3
|
/Week01/hw0pr2b.py
|
e419db1302242770c4236ff505e68142fd957bf1
|
[] |
no_license
|
timc823/IST341_Course_Work
|
994d2f389ce0dd3147a7799485fe7017cef81f18
|
0c39e1b03efaf157abcd0790fc53d675f4fd383d
|
refs/heads/master
| 2022-04-25T20:49:36.278826
| 2020-04-22T19:41:31
| 2020-04-22T19:41:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,819
|
py
|
# coding: utf-8
#
# hw0pr2b.py
#
import time
def adventure():
sdelay = 1.0 # change to 0.0 for testing or speed runs,
mdelay = 2.0 # ..larger for dramatic effect!
ldelay = 3.0
print()
print("Notice!! Please follow the instruction to enter the answer and make sure there's no typo")
print("Please also follow the upper or the lower case that hint provide")
print("Otherwise you might cause the program crash or the story mignt now show correctly.")
print()
time.sleep(ldelay)
print("Let's start now!")
time.sleep(mdelay)
print()
print("Recently, I went to a road trip.")
print("So, if you are interested, I can share my story with you.")
print()
interested = input("Please tell me are you interested to my story? [yes/no]")
#4 An if, elif, ... control structure (with one or more elifs but no trailing else at all)
time.sleep(sdelay)
if interested =='yes':
print("Cool, so let's start now!")
elif interested != 'yes':
print("Alright, I think you can force quit this program now :(")
print()
cityvisited = input("Before we start, do you want to make a guess how long did I drive? [yes/no]")
#5 An if control structure (with no trailing elif nor trailing else at all)
if cityvisited =='yes':
print("Cool, but I will keep this as secret now!")
print()
time.sleep(sdelay)
username = input("Now, tell me a name that you prefer me to call you: [Enter a name] ")
print()
print("Welcome,", username, "Now, let me start to tell you my story")
print("If you want to go on a roadtrip, where do you want to go?")
print("For me, I study in LA now, so I want to drive all the way up to Vancouver")
print("However, there is a limitation for my visa so I decided only drive to Seattle")
print("The total distance is 3152 miles, crazy huh?")
print("During this trip, I visited Sacramento,Portland and Seattle")
print("There are two of them I love most, make a guess which two cities are my best love")
time.sleep(sdelay)
print()
firstg = input("Please make a first guess: [Sacramento/Portland/Seattle]")
print('Remember, DO NOT enter the same guess for your second guess')
time.sleep(mdelay)
print('Ready for the second guess now? ')
time.sleep(sdelay)
print()
secondg = input("Please make a second guess: [Sacramento/Portland/Seattle]")
time.sleep(mdelay)
print('......loading')
time.sleep(ldelay)
#2 An if, elif, elif, ... and else control structure (with at least two elifs)
if firstg =='Sacramento' and secondg =='Portland':
print("Nice guess, but Sacramento is too quiet and there was nothing I can visit when I went there")
elif firstg =='Sacramento' and secondg =='Seattle':
print("Nice guess, but Sacramento is too quiet and there was nothing I can visit when I went there")
elif firstg =='Portland' and secondg =='Seattle':
print("Brilliant, you go me!")
elif firstg =='Seattle' and secondg =='Portland':
print("Brilliant, you go me!")
elif firstg =='Portland' and secondg =='Sacramento':
print("Nice guess, but Sacramento is too quiet and there was nothing I can visit when I went there")
elif firstg =='Seattle' and secondg =='Sacramento':
print("Nice guess, but Sacramento is too quiet and there was nothing I can visit when I went there")
print()
time.sleep(ldelay)
print('Now, I am thinking my next trip, but I am not sure where to visit')
print('Can you give me a hand?')
print("Your quest: Choose to visit Kyoto or Osaka")
print()
Destination = input("Where should I go? [Kyoto/Osaka]")
#1 An if, elif, and else control structure (with exactly one elif)
if Destination == "Kyoto":
print("Nice, I heard Kyoto has lots of cool building, I want to get there and get some photos")
elif Destination == "Osaka":
print("OMG! I want to go to Universal, I am a fan of Harry Potter?")
else:
print("you chose somewhere I never though to go, is it fun?")
print()
time.sleep(sdelay)
print("Hmmm......")
print("Kyoto has some famous buildings\n")
time.sleep(sdelay)
print("Universal Studio has Harry Potter theme park, sounds amazing to me")
print("Hmmm......So hard to decide")
time.sleep(ldelay)
print()
print("Hmmm.....I think I make a decision")
print("Do you know where do I want to go next time?")
time.sleep(mdelay)
print()
print("A magic wand and two desination sign shows in front of you, grab the wand and point it to the correct answer")
print()
choice = input("Where do I want to go next time? [Kyoto/Osaka] ")
#3 An if, else control structure (with zero elifs)
if choice == "Osaka":
print("the magic wand is waving it self and seems it is reading some spell...?\n")
time.sleep(sdelay)
print("the wand made a door in front of you, open the door and walk in..")
time.sleep(sdelay)
print("You see the corner shows the sign of 'Hogsmeade'\n")
time.sleep(sdelay)
print("You succeed, you got me that I want to visit Osaka for Harry Potter theme park")
print("Enjoy Universal,", username, "!")
else:
print("the magic wand is waving it self and seems it is reading some spell...?")
time.sleep(sdelay)
print("the wand made a door in front of you, open the door and walk in..")
time.sleep(sdelay)
print()
print("There are lots of famous buildings")
print("but there are also a lot of snow too")
print("I don't really like to visit the city during snowing day, you made a wrong guess.")
print("Farewell,", username, ".")
|
[
"chih-ting.chen@cgu.edu"
] |
chih-ting.chen@cgu.edu
|
0f00bb137bb1bbff7568e514a7fd66bba708c4e5
|
f1b278a07261d0f20de94c493f6c3260320ffcc4
|
/tongji.py
|
6af473407ece4873115e2c5238b9ae75fe547ee2
|
[] |
no_license
|
Derek-Wds/GAMBLE
|
6ca167399033c51f19b08ee868ec2a433b143e5a
|
7a356be93a52c082aadcd2e2f2621d90e486ca0a
|
refs/heads/master
| 2021-01-17T20:46:41.272844
| 2017-03-07T03:10:58
| 2017-03-07T03:10:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
import random
def roll(i,money,bet):
dice1=int(random.randint(1,6))# To give each dice
dice2=int(random.randint(1,6))# a random value
dice3=int(random.randint(1,6))
if (dice1==dice2 and dice1==dice3):
money+=bet*odds
i=30
return money, i
r=int(input('How many times do you want to test?\n'))
odds=int(input('What is the odds?\n'))
ak=0
bk=0
ck=0
dk=0
ek=0
fk=0
negative=0
zero=0
for k in range(1,r+1):
i=0
money=odds
while i<odds:
money-=1
money,i=roll(i,money,1)
i+=1
if money-odds<0:
negative+=1
elif money-odds==0:
zero+=1
elif money-odds>0 and money-odds<5:
ak+=1
elif money-odds>=5 and money-odds<10:
bk+=1
elif money-odds>=10 and money-odds<15:
ck+=1
elif money-odds>=15 and money-odds<20:
dk+=1
elif money-odds>=20 and money-odds<25:
ek+=1
elif money-odds>=25:
fk+=1
positive=r-negative-zero
print('total\tnegative\tpositive\t0\t0 to 5\t5 to 10\t10 to 15\t15 to 20\t20 to 25\t25 to 30')
print(r,'\t',negative,'\t',positive,'\t',zero,'\t',ak,'\t',bk,'\t',ck,'\t',dk,'\t',ek,'\t',fk)
print('The posibility of making money is','\t',positive/r)
print('The posibility of a draw is','\t',zero/r)
|
[
"Max@Hongyis-MacBook-Air.local"
] |
Max@Hongyis-MacBook-Air.local
|
d263ca6a0b902e7d0954ea8836ebe1e455e10dd5
|
0cf6fbbbe4ccf76acbedd4aac08f9d0f468049fb
|
/statistics/CheckboxStatistics.py
|
f98709dd165ae55a8f4836afe2b59fabd0574415
|
[
"Apache-2.0"
] |
permissive
|
jmptrader/pulpo-forms-django
|
78cb9e2e4d96f3d86c0d9e06a82784b5d488e350
|
60d268faa492ba8256cc32b3108d6a27dabcd40f
|
refs/heads/master
| 2021-12-05T18:47:41.175539
| 2015-08-11T17:57:05
| 2015-08-11T17:57:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
from pulpo_forms.statistics.serializers import ListStatisticsSerializer
class CheckboxStatistics():
def __init__(self, data_list, options):
self.total_per_option = []
self.options = []
self.total_filled = 0
self.total_not_filled = 0
# Initiate lists
for option in options:
self.total_per_option.append(0)
self.options.append(option["label"])
# Count and remove null values
# Count not null values data and insert them into an auxiliary list
aux_list = []
for data in data_list:
if data != "":
aux_list += data.split("#")
self.total_filled += 1
else:
self.total_not_filled += 1
total_options = len(options)
for data in aux_list:
pos = 0
while (pos != total_options) and (int(data) != options[pos]["id"]):
pos += 1
if pos != total_options:
self.total_per_option[pos] += 1
else:
raise Exception("Data does not match with any option")
def getSerializedData(self):
return ListStatisticsSerializer(self).data
|
[
"vmartinez@trea.uy"
] |
vmartinez@trea.uy
|
21c31b96d0c66af5c7d03f308110458f7ddc012e
|
12841fa83a58964be49ca1f17dee72b37d27b950
|
/main/ensepro_main.py
|
5d6dca831ad4037d429746065149932fd14f954c
|
[] |
no_license
|
Ensepro/ensepro-core
|
8dc050a7bec4a813d95a424d00d4e6f0a8065607
|
09a1a55e4a261e5e5fdc0e6292c4a42d9bb118b6
|
refs/heads/develop
| 2021-12-11T02:26:30.270018
| 2019-06-28T21:25:32
| 2021-10-30T06:00:54
| 151,914,650
| 0
| 0
| null | 2020-07-20T21:24:22
| 2018-10-07T06:59:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
# -*- coding: utf-8 -*-
"""
@project ensepro
@since 09/05/2018
@author Alencar Rodrigo Hentges <alencarhentges@gmail.com>
"""
import os
import sys
# Seta no path do sistema a pasta que a pasta deste arquivo está contido
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from main import main_utils
from main import main_params
import ensepro
from ensepro.cbc import atualizar_frase
from ensepro.cbc import consultar
from ensepro.constantes.constantes import LoggerConstantes
if len(sys.argv) < 2:
print("Parametro '-frase' ou '-arquivo-frases' deve ser passado. '-h' ou '--help' para ver outras opcoes.")
exit(1)
args = main_params.get_args()
logger = LoggerConstantes.default_logger()
frases_texto = []
frases_analisadas = []
frases_reanalisadas = []
respostas = []
file = open("resultados.txt", mode="a", encoding="UTF-8") if args.save_txt else None
if args.arquivo_frases:
if not args.quiet:
print("Carregando frases do arquivo:", args.arquivo_frases, "...", end="", flush=True)
frases_texto = main_utils.carregar_frases(args.arquivo_frases)
if not args.quiet:
print("done")
if args.frase:
frases_texto.append(args.frase)
if not args.quiet and not args.somente_resposta:
print("Analisando frase(s)... ")
deve_responder = (args.verbose or args.resposta) and not args.sem_resposta
def analisar(frase_texto):
frase_final = None
resposta = []
frase_original = ensepro.analisar_frase(frase_texto)
frases_analisadas.append(frase_original)
if deve_responder or args.final:
frase_final = atualizar_frase(frase_original)
frases_reanalisadas.append(frase_final)
if deve_responder:
resposta = consultar(frase_final)
respostas.append(resposta)
if args.save_json and not args.save_txt:
return
if args.original and args.final:
main_utils.comparar_frases(ensepro, frase_original, frase_final, args, file=file)
return
if args.original:
main_utils.print_frase(ensepro, frase_original, args, file=file)
if args.final:
main_utils.print_frase(ensepro, frase_final, args, file=file)
if deve_responder:
main_utils.print_resposta(ensepro, resposta, args.somente_resposta, file=file)
for frase_texto in frases_texto:
try:
analisar(frase_texto)
except Exception as ex:
logger.exception(ex)
print("\n\n{}".format(ex))
# raise ex
if args.save_json:
resultado_json = []
for index in range(len(frases_analisadas)):
json = {
"frase_original": frases_analisadas[index]
}
if args.verbose or args.resposta or args.final:
json["frase_final"] = frases_reanalisadas[index]
if deve_responder:
json["resposta"] = respostas[index]
resultado_json.append(json)
ensepro.save_as_json(resultado_json, "resultados.json")
|
[
"alencarhentges@gmail.com"
] |
alencarhentges@gmail.com
|
cf2688693037986a8c1994ddd8f7622cb68216b2
|
1e2bab7fff94995667f0ce282f3d12fb21c8e744
|
/jump7.py
|
76b870ffd3b08e86975984917db48a07123475c6
|
[] |
no_license
|
13725398579/JUMP7
|
813ece90e493d5dbf19270a322ae7267c536c13b
|
393c4209c7d8045129734b3571d98f682fad7d6e
|
refs/heads/master
| 2021-04-02T05:10:05.784969
| 2020-03-18T14:03:07
| 2020-03-18T14:03:07
| 248,244,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
i=0
while i<100:
i+=1
if i%7 == 0:
continue
elif i%10 == 7:
continue
elif i//10 == 7:
continue
print(i)
|
[
"172559276@qq.com"
] |
172559276@qq.com
|
b5f79c79cc76a5ea5c4919a46766e6f09c053559
|
b7bedc60ee259c708d32e23bcb7b874f3903fc89
|
/scripts/chgadd.py
|
25f99fa8afb34dbee7de1e93e935540a2d1603f8
|
[] |
no_license
|
znotft/ase_tools
|
f8b0858a7f73b2d000dba276f5e29577895048a8
|
cffcf76ead01de7b687b34242d3cda5bb5c16ffd
|
refs/heads/master
| 2021-01-16T19:40:32.460842
| 2012-11-06T09:03:40
| 2012-11-06T09:03:40
| 9,125,779
| 8
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
#!/usr/bin/env python
"""
A script which reads CHGCAR files and adds the
densities together.
Output is to a file called CHGADD.
Depends on ase.
"""
import os
import sys
import numpy
import time
from ase.calculators.vasp import VaspChargeDensity
starttime = time.clock()
print "Starting calculation at",
print time.strftime("%H:%M:%S on %a %d %b %Y")
# Find out how many arguments were on the command line,
# all but the first two should contain files with densities
# to subtract
nsubtract = len(sys.argv)-2
if not nsubtract >= 1:
print "\n** ERROR: Must specify name of at least two files on command line."
print "eg. chgadd.py CHGCAR1 CHGCAR2 [CHGCAR3 ...]"
print "The reference density is taken from the first filename."
print "The densities in the files after this will be subtracted from the reference."
sys.exit(0)
# Check that files exist
for name in sys.argv[1:]:
if not os.path.isfile(name):
print "\n** ERROR: Input file %s was not found." % name
sys.exit(0)
# Read information from command line
# First specify location of CHGCAR file with reference density
CHGCARfile1 = sys.argv[1].lstrip()
# Open geometry and density class objects
#-----------------------------------------
print "Reading density data from file %s ..." % CHGCARfile1,
sys.stdout.flush()
vasp_charge1 = VaspChargeDensity(filename = CHGCARfile1)
chg1 = vasp_charge1.chg[-1]
atoms1 = vasp_charge1.atoms[-1]
del vasp_charge1
print "done."
chgadd=chg1
for CHGCARfile2 in sys.argv[2:]:
CHGCARfile2 = CHGCARfile2.strip()
print "Reading density data from file %s ..." % CHGCARfile2,
sys.stdout.flush()
vasp_charge2 = VaspChargeDensity(filename = CHGCARfile2)
chg2 = vasp_charge2.chg[-1]
del vasp_charge2
print "done."
# Make sure that the second data set is on the same grid
#--------------------------------------------------------
if chg2.shape != chg1.shape:
print "\n**ERROR: Two sets of data are not on the same grid."
print "Data from file %s on %dx%dx%d grid." % (CHGCARfile1,chg1.shape[0],chg1.shape[1],chg1.shape[2])
print "Data from file %s on %dx%dx%d grid.\n" % (CHGCARfile2,chg2.shape[0],chg2.shape[1],chg2.shape[2])
sys.exit(0)
else:
print "Adding data from file %s ..." % CHGCARfile2,
sys.stdout.flush()
# Add charge density
#-----------------
chgadd += chg2
print "done."
zero = raw_input("Set negative values of the added charge density to zero (Yes/No): ")
vasp_charge_add = VaspChargeDensity(filename=None)
vasp_charge_add.atoms=[atoms1,]
vasp_charge_add.chg=[chgadd,]
# Print out charge density
#--------------------------
# Check whether CHGADD exists
if os.path.isfile("./CHGADD"):
print "\n**WARNING: A file called CHGADD already exists in this directory."
yesno=raw_input("Type y to continue and overwrite it, any other key to stop\n")
if yesno!="y":
sys.exit(0)
print "Writing added density data to file CHGADD ...",
sys.stdout.flush()
vasp_charge_add.write(filename="CHGADD",format="chgcar")
print "done."
endtime = time.clock()
runtime = endtime-starttime
print "\nEnd of calculation."
print "Program was running for %.2f seconds." % runtime
|
[
"jonbj@ifm.liu.se"
] |
jonbj@ifm.liu.se
|
4c5f331968b0e30a568f4769b5736940f82bcbd5
|
66e58cba20414214ebca7bb4a035796d379f57f1
|
/Flask-Fundamentals/Dojo-Survey-With-Validation/server.py
|
4b3c8de452bf289ecce7557c9dfdc14751e4ecac
|
[] |
no_license
|
NathanHaberman/Python-Flask-Django-CodingDojo
|
08623db1c1fa9076cd90901fed39678b2a8b795a
|
c6013c9e8133a7e2b70bfc357c3364edf3992d1c
|
refs/heads/master
| 2021-01-23T17:09:25.703718
| 2017-09-21T21:09:03
| 2017-09-21T21:09:03
| 102,763,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
from flask import Flask, render_template, request, redirect, flash
app = Flask(__name__)
app.secret_key = "Shhhh..."
@app.route('/')
def root():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
name = request.form['name']
location = request.form['locations']
comments = request.form['comments']
messages = False
if len(name) == 0:
flash("Name cannot be empty")
messages = True
if len(comments) == 0:
flash("Comments cannot be empty")
messages = True
if len(comments) >= 120:
flash("Comments cannot be longer than 120 characters")
messages = True
if messages:
return redirect('/')
else:
return render_template('submitted.html', name= name, location= location, comments= comments)
app.run(debug=True)
|
[
"na.haberman@gmail.com"
] |
na.haberman@gmail.com
|
9d1cdb46710c53e7a1370e0fbf14a3b5a9f52d56
|
9aff3641623e6bdaa6d084aa70766342b84f48e8
|
/HelloWorld.py
|
7f16a0603b2dfec5392faea779f1b9222abc31fa
|
[] |
no_license
|
ilkerdemirel/HelloWorld
|
6634e6bbdf9cbf27cbb411c8e9273c67fa8655b1
|
2fbcb71d9099edd3b7b52939fb29bd2bb448fca5
|
refs/heads/master
| 2021-01-21T19:50:47.390123
| 2017-05-23T13:10:22
| 2017-05-23T13:10:22
| 92,160,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
'''
Created on 23.05.2017
@author: idemirel
'''
print("Hello World!")
|
[
"noreply@github.com"
] |
ilkerdemirel.noreply@github.com
|
8fb6a16dffe7820751340754781016bea840a92d
|
a79239cfdca9493916acfe472ebbafa3bbb2715a
|
/Cipher_Crypto/cipher1.py
|
b5c33fb222b34c3a9fded5c92e42b07338431175
|
[] |
no_license
|
abhi-bs-360/CloudBox
|
6cfa274147758c5bf1a45e668bc8b72900fd53ec
|
987cd86f3b51652b6749da3f65a23bfaaea6733e
|
refs/heads/master
| 2023-01-03T18:39:09.581313
| 2020-10-30T11:35:15
| 2020-10-30T11:35:15
| 283,173,449
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
message = "Burning 'em, if you ain't quick and nimble I go crazy when I hear a cymbal"
result = []
n = len(message)
c = 0
print('\n')
for i in range(n):
if c == 0:
result += chr(ord(message[i]) ^ ord('I'))
c += 1
elif c == 1:
result += chr(ord(message[i]) ^ ord('C'))
c += 1
elif c == 2:
result += chr(ord(message[i]) ^ ord('E'))
c = 0
x = "".join(result)
x = x.encode()
print(x.hex(), '\n')
|
[
"noreply@github.com"
] |
abhi-bs-360.noreply@github.com
|
eabfde6c712de514aa95dfc4d63bd9ca2a65e808
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/tree-big-33.py
|
a36bf0c97597d37ce596efb36b668bcaa2d00c94
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,848
|
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
$FuncDef
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
61c424fa240c8f98aa583628c0449c512b9393f6
|
9e93ced65a4a99033f08f9b0cd2cbb3ac3199d81
|
/cut_and_cleanup.py
|
53db6ff62ad25bbf22a1ea4309ce024cb0549139
|
[] |
no_license
|
basimar/ub_diverses
|
805c66513f44ab58d9012eae4029a10662c82638
|
140f6294d95884836ca7f3bcdeef6d2324f1d68a
|
refs/heads/main
| 2023-03-28T15:45:27.109490
| 2021-04-06T17:02:12
| 2021-04-06T17:02:12
| 353,639,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# cut and cleanup big input file
# * cut according to defined criteria in row (basically list of sublibraries)
# * cleanup subfield codes preceding values
import csv
import re
infile = 'PST_all.seq'
outfile = 'all_out.csv'
with open(infile) as f1:
with open(outfile, 'w') as f2:
for row in f1:
# create litte dict with subfield keys
sfdict = {i[:1] : i[1:] for i in re.split('\$\$', row)}
# pluck necessary values, use get() for NONE
sys = row[0:9]
itemkey = sfdict.get('1')
sublibrary = sfdict.get('b', 'NO-SUBLIBRARY').ljust(6)
location = sfdict.get('c', 'NO-LOCATION').ljust(6)
mattype = sfdict.get('o')
itemstatus = sfdict.get('d')
processstatus = sfdict.get('e')
holnumber = sfdict.get('r')
callnumber = sfdict.get('h')
dedupkey = sys + sublibrary + location
# write csv file
csvrow = [dedupkey, sys, sublibrary, location, itemkey, mattype, itemstatus, processstatus, holnumber, callnumber]
outwriter = csv.writer(f2, dialect='unix', delimiter='|', quoting=0)
outwriter.writerow(csvrow)
|
[
"basil.marti@unibas.ch"
] |
basil.marti@unibas.ch
|
609437e51586cca3e956d412b39176464f3db457
|
b918483a6ab3b4949dad9f05c1085073d24b5abf
|
/message_classifier/settings.py
|
3569167c35773e557538e5a22f31f9e3d1581ec7
|
[] |
no_license
|
unicefuganda/rapidsms-message-classifier
|
e3081759b1c660a3760d8d65e608c01477a718c0
|
62986307929c81e78836bfce1be681297840971d
|
refs/heads/master
| 2020-04-24T08:08:52.213357
| 2013-11-28T09:07:16
| 2013-11-28T09:07:16
| 2,664,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,770
|
py
|
STOP_WORDS = ["the", "da", "ureport", "u report", "u-report", "yes", "no", "is", "kampala", "ug", "and", "radio",
"scout", "scouts", "district", "not",
"are", "their", "they", "from", "heard", "that", "for", "can", "but", "have", "about", "them", "should",
"Busia",
"Kiryandongo",
"Kole",
"Moroto",
"Mbarara",
"Pader",
"Alebtong",
"Buvuma",
"Amuru",
"Wakiso",
"Soroti",
"Rubirizi",
"Gomba",
"Butambala",
"Dokolo",
"Iganga",
"Amudat",
"Ssembabule",
"Kyenjojo",
"Kibuku",
"Luuka",
"Tororo",
"Isingiro",
"Kampala",
"Apac",
"Kamuli",
"Masindi",
"Bundibugyo",
"Sheema",
"Mukono",
"Nakasongola",
"Buvuma",
"Buyende",
"Hoima",
"Kanungu",
"Kisoro",
"Kamwenge",
"Katakwi",
"Kibaale",
"Kiruhura",
"Kapchorwa",
"Kayunga",
"Kiboga",
"Koboko",
"Kotido",
"Gulu",
"Kitgum",
"Namayingo",
"Nakapiripirit",
"Kumi",
"Moyo",
"Kween",
"Lamwo",
"Buliisa",
"Zombo",
"Amuria",
"Amolatar",
"Agago",
"Abim",
"Adjumani",
"Nwoya",
"Otuke",
"Ntungamo",
"Namutumba",
"Napak",
"Mpigi",
"Oyam",
"Nakaseke",
"Bulambuli",
"Lira",
"Kaabong",
"Yumbe",
"Manafwa",
"Mitooma",
"Bushenyi",
"Kabale",
"Kabarole",
"Kalangala",
"Ntoroko",
"Mubende",
"Nebbi",
"Masaka",
"Bugiri",
"Mayuge",
"Pallisa",
"Kyankwanzi",
"Bududa",
"Buhweju",
"Mbale",
"Bukwo",
"Serere",
"Rukungiri",
"Buikwe",
"Bukedea",
"Rakai",
"Bukomansimbi",
"Budaka",
"Ibanda",
"Arua",
"Jinja",
"Kaliro",
"Kaberamaido",
"Kalungu",
"Kasese",
"Butaleja",
"Mityana",
"Lyantonde",
"Kyegegwa",
"Luwero",
"Lwengo",
"Ngora",
"Maracha",
"Sironko",
"should",
"them",
"and",
"their",
"village",
"what",
"from",
"name",
"its",
"our",
"with",
"report",
"some",
"there",
"who",
"because",
"over",
"like",
"also",
"good",
"why",
"you",
"has",
"you",
"years",
"about",
"advert",
"this",
"iam",
"through",
"mps",
"out",
"how",
"more",
"think",
"but",
"will",
"the",
"which",
"other",
"what",
"for",
"are",
"ureport",
"know",
"that",
"give",
"district",
"coz",
"than",
"was",
"what",
"those",
"very",
"when",
"our", "shd", "all", "such", "your", "through", "know", "know", "every", "may", "bad", "her", "ureporters"
,
"would", "take", "you", "let", "ureport", "due", "against", "can", "the", "how",
"where", "thank", "don", "not", "sub", "age", "being", "thanks",
"join", "donyo", "even", "many", "will", "this", "way", "can", "about",
"was", "with", "the", "use", "had", "any", "only", "have"]
|
[
"mossplix@gmail.com"
] |
mossplix@gmail.com
|
9505c99b9b00bd3bae77507cdcbc52d543b996b8
|
d26e27cea2ed4a58584d65464b62c93f4c918037
|
/image/urls.py
|
d70bebc2b2f11668efeace14184b525ea5a0e241
|
[] |
no_license
|
wchpeng/atlweb_py3
|
9462d9f46a48c42ab93f37f2985d19e00a71d7bf
|
2c075409a43a9ca91414e43b1960d8e39458f385
|
refs/heads/master
| 2021-05-10T10:28:45.176942
| 2018-03-25T09:44:02
| 2018-03-25T09:44:02
| 118,387,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
from image import views
from django.conf.urls import url
from rest_framework import routers
router = routers.SimpleRouter()
router.register(r"albums", views.AlbumListView)
router.register(r"album-mod", views.AlbumModView)
router.register(r"picture", views.PictureCreateView)
urlpatterns = [
url(r"^index/$", views.index), # 图片主页
url(r"^index1/$", views.index1), # 图片主页数据
url(r"^upload-pic/$", views.upload_pic), # 上传图片页面,自动切图
url(r"^search-index/$", views.search_index), # 搜索页面数据
url(r"^album-page/(.+?)/(\d+)/$", views.album_detail_page), # 图册页
# url(r"^album-page/(\d+)/$", views),
]
urlpatterns += router.urls
|
[
"869858978@qq.com"
] |
869858978@qq.com
|
1225ed96a1b496feebaa25304b217be38fc1e40a
|
783070b66238376d7d00c9ef56b644528ec9b8ed
|
/b100.py
|
2fbd3c2d1a0a1215a4bae97cf289d1fd1a53f7e1
|
[] |
no_license
|
muhammed94munshid/code-kata-beginner-2
|
4e2edb88f904e0c221b535f48db890acdcbc22b6
|
948ecfd6a552e5dc8252f6c0068dfbd72b6609d8
|
refs/heads/master
| 2021-04-29T20:18:08.259392
| 2018-05-24T06:29:54
| 2018-05-24T06:29:54
| 121,594,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
a=input()
b=1
for i in range(len(a)):
b=b*int(a[i])
print(b)
|
[
"noreply@github.com"
] |
muhammed94munshid.noreply@github.com
|
a2467d19551923a36b5091091a4db896d736c431
|
f2ef7ffd6438776c9da621c1611c52ef7016b675
|
/test.py
|
b909efcec191c3b19d3a427c5c83de6d5a984a54
|
[
"BSD-3-Clause"
] |
permissive
|
mdkearns/StatiX
|
b493bcd656605a83f9b1b6b0a9c0bf780e4e9d33
|
3dd5affec5d82c5923da171decb8bc39c4dc95a9
|
refs/heads/master
| 2020-03-22T21:22:11.311764
| 2018-07-17T01:19:41
| 2018-07-17T01:19:41
| 140,680,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
import statix.describe as d
import statix.inference as inf
import statix.visualize as v
import numpy as np
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print("\nSorted Data:\t", sorted(data), "\n")
print("Statix mean:\t", d.mean(data), end='\t\t')
print("Numpy mean:\t\t", np.mean(data))
print("Statix median:\t", d.median(data), end='\t\t')
print("Numpy median:\t", np.median(data))
print("Statix var:\t\t", d.var(data), end='\t\t')
print("Numpy var:\t\t", np.var(data))
print("Statix std:\t\t", round(d.std(data), ndigits=3), end='\t\t')
print("Numpy std:\t\t", round(np.std(data), ndigits=3))
print()
print("Statix range:\t\t\t", d.get_range(data))
print("Statix 75th percentile:\t", d.percentile(data, 75))
print("Statix 0.75 quantile:\t", d.quantile(data, 0.75))
print("Statix IQR:\t\t\t\t", d.iqr(data))
|
[
"mattdkearns@gmail.com"
] |
mattdkearns@gmail.com
|
23aebbb3e989e43c45958995f6e0857d6b3dc91f
|
83e2fe88d4537403ee710b631d7d3e8c65415092
|
/services/web__waveneyadvertiser24_co_uk.py
|
b129dad99946de8725ac10def82cdee605e4b53b
|
[] |
no_license
|
ArchiveTeam/NewsGrabber-Services
|
b01f560b18bd5395673132321c16bcf7602608f1
|
2d52eb06a6ca767f4b1d1e623505fa427b6af459
|
refs/heads/master
| 2020-01-23T21:40:57.220775
| 2019-07-01T09:56:38
| 2019-07-01T09:56:38
| 74,690,301
| 3
| 8
| null | 2019-06-03T19:49:47
| 2016-11-24T16:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
refresh = 43200
version = 20161106.01
urls = ['http://www.waveneyadvertiser24.co.uk/home']
regex = [r'^https?:\/\/[^\/]*waveneyadvertiser24\.co\.uk']
videoregex = []
liveregex = []
|
[
"Arkiver@hotmail.com"
] |
Arkiver@hotmail.com
|
ca9a7123a44acff7e634bd8b345c095e45b07be9
|
66249074c301868e6317a4b136d58973f2a7556e
|
/setup.py
|
45baf449291f9dedcff6407727ebc88bdfb559ef
|
[
"MIT"
] |
permissive
|
umutkirgoz/python-bayes-redis
|
4bb6b6b94a615b6d9aa5d635faf27cb8a436a69b
|
13656902c0785d0e12cf28c351ac75f8a1885383
|
refs/heads/master
| 2021-01-17T14:14:03.641775
| 2014-02-24T12:22:17
| 2014-02-24T12:22:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
from setuptools import setup, Extension
def readme():
with open('README.rst') as f:
return f.read()
setup(ext_modules=[Extension("BayesRedis", ["BayesRedis/__init__.c"])],
name='bayesredis',
version='1.2.0',
description='A Simple Naive Bayes Classifier in Python',
long_description=readme(),
keywords='bayes naive classifier redis cython machine learning',
url='https://github.com/tistaharahap/python-bayes-redis',
author='Batista Harahap',
author_email='batista@bango29.com',
license='MIT',
packages=['BayesRedis'],
setup_requires=['redis>=2.7.0', 'hiredis>=0.1.0'],
zip_safe=False)
|
[
"batista@bango29.com"
] |
batista@bango29.com
|
81844f377a61ee2af54f98b23f281dbc5ee14f6b
|
d6eac61fe6fb6473b80b8fc6db9b094a6fe8cbb6
|
/parallel_run/driver_ecosys_DukeForest_mpi4py.py
|
617a139ac1fd70c1238ca136fd3b68d7bfac9074
|
[
"BSD-3-Clause"
] |
permissive
|
jinyun1tang/ECOSYS
|
1c3a06d6581261faf5d2ff36b38dcd624204ab4c
|
0038610916bad2a2c756e19dc29fa60f7adef873
|
refs/heads/master
| 2023-04-01T12:47:07.022653
| 2023-03-30T16:12:14
| 2023-03-30T16:12:14
| 144,186,189
| 37
| 37
|
BSD-3-Clause
| 2023-03-11T19:41:50
| 2018-08-09T17:53:57
|
Fortran
|
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
from mpi4py import MPI
import glob
import simulator as sm
import os
import numpy as np
import sys
# Note rank 0 is not used for simulations. It causes problems with the subprocess module.
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
# print('nproc = ', nproc)
sim_name = '/global/home/users/yaningl/repos/ecosys_duke_run/ecosys.x'
sim_folder = '/global/scratch/yaningl/Research/Ecosys_DukeForest/ecosys_duke_forest_base'
base_file = 'ecosys_parameters.dat'
ecosys_file = 'rundk3_restart_1996_1997_input'
nparam = 4
variable = ['ECO_CO2_FLUX']
year = ['1997']
data_file = glob.glob(os.path.join(sim_folder, '*'))
data_file = [os.path.basename(df) for df in data_file]
data_file.remove(base_file)
var_list = []
for i in range(0, nparam+1):
var_list.append('#parameter0' + str(i+1) + '#')
sim_ecosys_DF = sm.sim_ecosys_DukeForest(sim_name, base_file,
var_list, ecosys_file, data_file,
sim_folder, variable, year)
if rank == 0:
nsample_per_param = 3
nsample = nsample_per_param**nparam
lower_end = 0.1
upper_end = 10.0
# last parameter value
lpv = 1.0
sz = (upper_end-lower_end)/(nsample_per_param-1)
param_idx = 0
parameters = np.zeros((nsample, nparam+1))
for i1 in range(nsample_per_param):
for i2 in range(nsample_per_param):
for i3 in range(nsample_per_param):
for i4 in range(nsample_per_param):
parameters[param_idx,:] = [lower_end+i1*sz, lower_end+i2*sz,
lower_end+i3*sz, lower_end+i4*sz, lpv]
param_idx = param_idx+1
nsim = param_idx
if rank == 0:
for i in range(1,nproc):
comm.send(nsim, dest=i)
if rank != 0:
nsim = comm.recv(source=0)
comm.Barrier()
# Note the -1's below is to skip rank 0 (no simulation on rank 0)
index_local = list(range(rank-1, nsim, nproc-1))
if rank == 0:
# print('rank {0}: Sending data to the other ranks'.format(rank))
# sys.stdout.flush()
# parameters_local = parameters[index_local, :]
# So if there are too many processors, we do not need to use the excessive ones
for j in range(1,np.min([nproc, nsim+1])):
comm.send(parameters[list(range(j-1,nsim,nproc-1)),:], dest=j)
if rank != 0 and rank < nsim+1:
parameters_local = comm.recv(source=0)
comm.Barrier()
# if rank != 0:
# print('rank {0}: Data have been received'.format(rank))
# print('rank {0}: index_local is '.format(rank), index_local)
if rank != 0 and rank < nsim+1:
postfix = [str(i) for i in index_local]
sim_ecosys_DF.create_files(parameters_local, postfix)
sim_ecosys_DF.run_serial()
# now get objective functions on each cpu
sim_out_local, sim_date_local = sim_ecosys_DF.output()
obs_file = 'observations.dat'
output_file = 'output.dat'
std_dev_file = 'std_dev.dat'
obs = np.loadtxt(obs_file)
output = np.loadtxt(output_file)
lik_std = np.loadtxt(std_dev_file)
# Prior values for x
x_prior = 2.0*np.ones(nparam+1, )
# Prior standard deviations for x
x_std = np.ones(nparam+1, )
objfunc_local = sim_ecosys_DF.objfunc_nlposterior(parameters_local,
sim_out_local, x_prior,
x_std, obs, lik_std)
comm.send(objfunc_local, dest=0)
comm.Barrier()
if rank == 0:
objfunc_all = np.zeros(nsim, )
for i in range(1, np.min([nproc, nsim+1])):
recved = comm.recv(source=i)
objfunc_all[list(range(i-1,nsim,nproc-1))] = recved
idx_max = np.argmax(objfunc_all)
objfunc_max = objfunc_all[idx_max]
print('maximum objective function value is: ', objfunc_max)
print('maximum objective function value is obtained in the simulation folder : ',
idx_max)
with open(os.getcwd()+'/sim_'+str(idx_max)+'/'+base_file, 'r') as fh:
param_opt = [float(s) for s in fh.readline().split()]
print('optimal parameters after grid search are: ', param_opt)
|
[
"yaningliu@Yanings-MacBook-Pro.local"
] |
yaningliu@Yanings-MacBook-Pro.local
|
a41fa7dd1dac43ff8d5ed591ad5193be4b832adf
|
97f285b6f8016a8d1d2d675fffb771df3c9e37b9
|
/misc/ipv6calc.py
|
21e413c9eb23cdc49e5a8a81c77f567463558f47
|
[] |
no_license
|
oskomorokhov/python
|
ef5408499840465d18852954aee9de460d0e7250
|
8909396c4200bd2fca19d3f216ed5f484fb2192a
|
refs/heads/master
| 2021-05-14T09:27:25.413163
| 2019-12-12T21:00:05
| 2019-12-12T21:00:05
| 116,327,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,837
|
py
|
# Ugliest IPv6 Calc in the world
import re
def input_get():
# Gather input, validate against regex, assign defaults
ip = input("Enter IPv4 Address: ")
# ip_pattern=re.compile("^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$")
ip = "2001:0123:4567:FFFF:FFFF:FFFF:FFFF:FFFF" if not ip else ip
print(ip)
mask = input("Enter Prefix or Mask: ")
# mask_pattern=re.compile("^(((255\.){3}(255|254|252|248|240|224|192|128|0+))|((255\.){2}(255|254|252|248|240|224|192|128|0+)\.0)|((255\.)(255|254|252|248|240|224|192|128|0+)(\.0+){2})|((255|254|252|248|240|224|192|128|0+)(\.0+){3}))$")
# prefix_pattern=re.compile("^([0-9]|[1-2][0-9]|[3][0-2])$")
mask = "48" if not mask else mask_ds2bs(mask)
print(mask)
s_mask = input("Enter Subnet/Supernet Prefix or Mask: ")
s_mask = "64" if not s_mask else mask_ds2bs(s_mask)
print(s_mask)
return (ip, int(mask), int(s_mask))
def per(n):
# Utility foo to derive bits-to-flip & their permutations
# l=[]
#print("per p argument",n)
for i in range(2**n):
p = bin(i)[2:].zfill(n)
#print("per p",p)
yield p
def ip_ds2bs(ip_d_string):
# Convert IP in hex notation(delimimed with dots) into 128-bit bin string
#print("ip_ds2bs argument",ip_d_string)
ip_d_arr = ip_d_string.split(":")
ip_b_arr = (bin(int(object, 16))[2:].zfill(16) for object in ip_d_arr)
ip_b_str = ''.join(ip_b_arr)
#print("ip_ds2bs result",ip_b_str)
return ip_b_str
def ip_bs2ds(ip_b_str, oct=16):
# Convert IP in 128-bit bin string notation into hex notation(delimited with colons)
#print("ip_bs2ds arguments",ip_b_str,oct)
ip_d_str = ':'.join(hex(int(ip_b_str[i:i+oct], 2))[2:].zfill(4)
for i in range(0, len(ip_b_str), oct))
#print("ip_bs2ds result",ip_d_str)
return ip_d_str
def mask_ds2bs(mask_d_str):
# TBD Convert mask in decimal notation(delimited with dots) into prefix (dec str)
if int(mask_d_str) in range(129):
return mask_d_str
mask_d_arr = mask_d_str.split(".")
mask_b_arr = (bin(int(object, 16))[2:].zfill(16) for object in mask_d_arr)
prefix = (''.join(mask_b_arr)).count("1")
return prefix
def subnet_main(ip, prefix, s_prefix):
# Derive number of subnets within network,total number of hosts,subnet addresses(bin str) or supernet
ip_b_str = ip_ds2bs(ip)
# print("subnet_main,ip_b_str",ip_b_str)
net_b_str = ip_b_str[:prefix]+(128-prefix)*'0'
# print("subnet_main,net_b_str",net_b_str)
net_d_str = ip_bs2ds(net_b_str)
if prefix < s_prefix:
#print("subnetmain if prefix<s_prefix")
n_subnets = 2**(s_prefix-prefix)
# print("subnet_main,n_subnets",n_subnets)
n_hosts_total = 2**(128-prefix)
# print("subnet_main,n_hosts_total",n_hosts_total)
subnets = []
bits_to_flip = per(s_prefix-prefix)
for i in range(n_subnets):
subnet = net_b_str[:prefix]+next(bits_to_flip)+net_b_str[s_prefix:]
# print("subnet_main,subnet",subnet)
subnets.append(subnet)
# print("subnet_main,list(subnets)",list(subnets))
#print("subnet appended")
return (subnets, n_subnets, net_b_str, n_hosts_total)
else:
#print("subnetmain else")
supernet = net_b_str[:s_prefix]+(128-s_prefix)*'0'
return (supernet, net_d_str, net_b_str)
def subnet_detail(subnet_b_str, s_prefix):
# Derive Hosts Range & Broadcast Address
#print("subnet_detail arguments",subnet_b_str,s_prefix)
hostmin = subnet_b_str[:-1]+"0"
#print("subnet_detail hostmin",hostmin)
hostmax = (subnet_b_str[:s_prefix]+(128-s_prefix)*'1')[:-1]+'1'
#print("subnet_detail hostmax",hostmax)
# bcast=subnet_b_str[:s_prefix]+(128-s_prefix)*'1'
n_hosts = 2**(128-s_prefix)
#print("subnet_detail n_hosts",n_hosts)
result = list((ip_bs2ds(i) for i in (hostmin, hostmax)))
#print("subnet_detail result",result)
result.append(n_hosts)
#print("subnet_detail result,appended with n_hosts",result)
return result
def output_subnet(subnets_arr, l='', id=''):
run_count = +1
if len(subnets_arr) > 100:
limit = 10
else:
limit = len(subnets_arr)
if id:
print("\r\nSubnet %d:\nNetwork: %s/%s" %
(j+1, ip_bs2ds(id), input[2]))
print("Host_min: %s \nHost_max: %s \nHosts: %d" %
(tuple(subnet_detail(id, input[2]))))
else:
for j, i in enumerate(subnets_arr[:limit]):
print("\r\nSubnet %d:\nNetwork: %s/%s" %
(j+1, ip_bs2ds(i), input[2]))
print("Host_min: %s \nHost_max: %s \nHosts: %d" %
(tuple(subnet_detail(i, input[2]))))
def execute():
# Master foo
input = input_get()
data = subnet_main(*input)
# print(len(data[0]),data[1],data[2])
print("\r\nNetwork: %s/%s" % (ip_bs2ds(data[2]), input[1]))
print("Host_min: %s \nHost_max: %s \nHosts: %d" %
(tuple(subnet_detail(data[2], input[1]))))
if input[1] < input[2]:
print("\r\nSubnets: %d" % data[1])
print("Hosts total: %d" % data[3])
# print(len(data[0]))
if len(data[0]) > 100:
limit = 10
else:
limit = len(data[0])
for j, i in enumerate(data[0][:limit]):
print("\r\nSubnet %d:\nNetwork: %s/%s" %
(j+1, ip_bs2ds(i), input[2]))
print("Host_min: %s \nHost_max: %s \nHosts: %d" %
(tuple(subnet_detail(i, input[2]))))
else:
#print("execute else")
print("Supernet: %s/%s" % (ip_bs2ds(data[0]), input[2]))
print("Host_min: %s \nHost_max: %s \nHosts: %d" %
(tuple(subnet_detail(data[0], input[2]))))
|
[
"oskom85@gmail.com"
] |
oskom85@gmail.com
|
2c8e8203b7ae449bd0e4fa632c1c23286f36d607
|
10ab6b6e0f898594f3e0d6420a723d0bc47000ca
|
/manage.py
|
52520a93014b5bbebdd88b45a1cf934de9cec210
|
[] |
no_license
|
dragonfi/django_sort_app_example
|
a154eee03c8caa6ed6cd3956057ced5272b9210b
|
8d14e5233d799efcff21b1d7ca5acfd6dd7c451c
|
refs/heads/master
| 2021-01-19T09:40:53.268006
| 2015-02-04T21:06:31
| 2015-02-04T21:06:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_sort_app.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"david.gabor.bodor@gmail.com"
] |
david.gabor.bodor@gmail.com
|
01d7e22b2ebe92ad8a33fc98929214001462b14e
|
810509036758c1d68e6947e1e797f265b48ab716
|
/06/main.py
|
176870890f182f91da2bec4341963871d09d776e
|
[] |
no_license
|
tomtomklima/adventofcode2020
|
d53e252c718d76c0d921e114153ba84376c42a2a
|
71b936ea2738e84e70d71ce9d015c4ac539b5906
|
refs/heads/master
| 2023-02-10T15:54:46.824763
| 2021-01-03T13:56:29
| 2021-01-03T13:56:29
| 323,973,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
from collections import Counter
if True:
f = open('input.txt', 'r')
content = f.read()
else:
f = open('test.txt', 'r')
content = f.read()
groups = content.split('\n\n')
answersAnyoneCount = 0
answersEveryoneCount = 0
for group in groups:
answers = group.split('\n')
lettersCount = {}
for answer in answers:
for letter in answer:
if letter in lettersCount:
lettersCount[letter] += 1
else:
lettersCount[letter] = 1
answersAnyoneCount += len(lettersCount)
maximumHits = len(answers)
for key, result in lettersCount.items():
if result == maximumHits:
answersEveryoneCount += 1
print(answersAnyoneCount)
print(answersEveryoneCount)
|
[
"tom.k@skaut.cz"
] |
tom.k@skaut.cz
|
0dac87e50d5bf330112fd6ae66dc57f8e2e50d7f
|
8dcaaa87309ced422e9f283fa39ca5e2cfac5f02
|
/oricus_lib/Status.py
|
a4ed3882ae12310f68b9293ab5d8ca7ec79644a9
|
[] |
no_license
|
johnpbloch/Oricus
|
23265d415faf85ada527f6d678b59c119a0d6366
|
205645d0670ca696afd3f21b9fdc2770724cbe1e
|
refs/heads/master
| 2021-01-23T07:20:50.940696
| 2012-12-27T18:59:55
| 2012-12-27T18:59:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from gi.repository import GObject
class StatusBar():
statusbar = None
def __init__(self, statusbar):
self.statusbar = statusbar
def set(self, message, context=None):
if context is None:
context = Types.DEFAULT
self.statusbar.push(context, message)
def clear(self, context=None, Delay=None):
if context is None:
context = Types.DEFAULT
if Delay is None:
self.statusbar.pop(context)
return False
try:
GObject.timeout_add(Delay, self.clear, context)
except:
pass
class Types():
(DEFAULT,
STARTUP,
TOGGLE) = range(1, 4)
|
[
"johnpbloch@gmail.com"
] |
johnpbloch@gmail.com
|
c36eeeeb26c3fc97ab7e5ef4792a1af65fa3dd55
|
6c14b6ef63c0f380a63ffb8f7e9e120afab321c3
|
/scripts/database.py
|
accf534459923fd472a66cbad8117e991c89a026
|
[] |
no_license
|
pal25/snscholar
|
488f5662c495409f1da942b545a56cdd67860b06
|
fbe00b3ea4d4e6390f2a648f0732324a63134202
|
refs/heads/master
| 2021-01-10T05:20:35.796898
| 2013-02-24T06:14:39
| 2013-02-24T06:14:39
| 3,675,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
from flask.ext.script import Manager
from snscholar import create_app
from snscholar.extensions import db
from snscholar.users.models import User
from snscholar.courses.models import Course, user_course_join
from snscholar.books.models import Book, book_course_join, book_user_join
manager = Manager(create_app())
app = create_app()
@manager.command
def create_tables():
db.drop_all()
db.create_all()
@manager.command
def create_user():
user = User('pal25', 'pal25@case.edu', 'dev')
db.session.add(user)
db.session.commit()
if __name__ == "__main__":
manager.run()
|
[
"pal25@case.edu"
] |
pal25@case.edu
|
d0e7e40f837b93de9673e400a269935076b67c2d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_lacrosse.py
|
07e4ef590b80cff024ac1d8fc9fac49e6d48fe06
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
#calss header
class _LACROSSE():
def __init__(self,):
self.name = "LACROSSE"
self.definitions = [u"a game played by two teams in which the players each use a long stick with a net at the end to catch, carry, and throw a small ball, and try to get the ball in the other team's goal"]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9cab2665d530f8dd07987d67df27e99e4e5de1a7
|
bb121db77a53448f5235b5524eadbe3b59efea04
|
/assignments/sets/set_operations.py
|
2849217b82e63c233839f03e6e352f9294dc6281
|
[] |
no_license
|
RoslinErla/AllAssignments
|
a8bfd4e4df5177e76472678cbfeb13b1d49abc56
|
1a8f098e9ecde015de70970cd5c17501c510fb19
|
refs/heads/master
| 2020-07-27T12:44:29.974705
| 2019-11-15T13:28:44
| 2019-11-15T13:28:44
| 209,094,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
# Write a program that:
# Reads in two lists of integers from the user and converts them to sets and prints out the sets.
# Allows the user to repeatedly perform intersection, union and difference on the two sets and prints out the result of each operation
# Example input/ouput:
# Input a list of integers separated with a comma: 1,2,3,4
# Input a list of integers separated with a comma: 1,1,3,3,5,6
# {1, 2, 3, 4}
# {1, 3, 5, 6}
# 1. Intersection
# 2. Union
# 3. Difference
# 4. Quit
# Set operation: 1
# {1, 3}
# 1. Intersection
# 2. Union
# 3. Difference
# 4. Quit
# Set operation: 2
# {1, 2, 3, 4, 5, 6}
# 1. Intersection
# 2. Union
# 3. Difference
# 4. Quit
# Set operation: 3
# {2, 4}
# 1. Intersection
# 2. Union
# 3. Difference
# 4. Quit
# Set operation: 4
def make_list():
first_input = input("Input a list of integers separated with a comma: ").split(",")
first_list = map(int,first_input)
second_input = input("Input a list of integers separated with a comma: ").split(",")
second_list = map(int,second_input)
return first_list,second_list
def make_set(list1,list2):
set_1 = set(list1)
set_2 = set(list2)
return set_1,set_2
def pick_operation():
print("1. Intersection")
print("2. Union")
print("3. Difference")
print("4. Quit")
operation = input("Set operation: ")
return operation
def intersection(set_a,set_b):
intersection_set = set_a & set_b
print(intersection_set)
def union(set_a,set_b):
union_set = set_a | set_b
print(union_set)
def difference(set_a,set_b):
difference_set = set_a - set_b
print(difference_set)
def what_happens(operation,set_1,set_2):
if operation == "1":
intersection(set_1,set_2)
elif operation == "2":
union(set_1,set_2)
elif operation == "3":
difference(set_1,set_2)
def main():
list_a,list_b = make_list()
set_a,set_b = make_set(list_a,list_b)
print(set_a)
print(set_b)
operation = pick_operation()
while operation != "4":
what_happens(operation,set_a,set_b)
operation = pick_operation()
main()
|
[
"roslin19@ru.is"
] |
roslin19@ru.is
|
779b6508535880476993aade4a530e0f9785f9af
|
b79a9889b77e644447413ec578d788ad8ffd76f3
|
/adamax.py
|
ce6043756aade98e4a08b37d097742285691aaad
|
[
"MIT"
] |
permissive
|
BenJamesbabala/DNGPU
|
cfabe5c2332990b525e52c23b19d0925256a1674
|
26befddccd29db4c18cb834054f9c08fcc5bdc4a
|
refs/heads/master
| 2020-05-27T21:23:56.166892
| 2017-02-28T10:43:02
| 2017-02-28T10:43:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,069
|
py
|
# This code is adapted from https://github.com/openai/iaf/blob/master/tf_utils/adamax.py
# The MIT License (MIT)
#
# Original work Copyright (c) 2016 openai
# Modified work Copyright (c) 2016 Institute of Mathematics and Computer Science, Latvia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Improving the Neural GPU Architecture for Algorithm Learning"""
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
import tensorflow as tf
class AdamaxOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm with gradient clipping.
See [Kingma et. al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
@@__init__
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999,epsilon=1e-8,clip_multiplier=1.2, clip_epsilon = 1e-4, use_locking=False, name="Adamax"):
"""Construct a new AdaMax optimizer with gradient clipping.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
clip_multiplier: Multiplier for second moment estimate for gradient clipping. We have not verified that the default value is optimal.
clip_epsilon: Gradients smaller than this are not clipped. Also, it has some effect on the first few optimization steps. We have not verified that the default value is optimal.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adamax".
"""
super(AdamaxOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self.clip_multiplier = clip_multiplier
self.clip_epsilon = clip_epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _create_slots(self, var_list):
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
m = self.get_slot(var, "m")
clipVal = m*self.clip_multiplier+self.clip_epsilon
grad = tf.clip_by_value(grad, -clipVal, clipVal)
v = self.get_slot(var, "v")
v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
m_t = m.assign(tf.maximum(beta2_t * m, tf.abs(grad)))
g_t = v_t / (m_t+epsilon_t)
var_update = state_ops.assign_sub(var, lr_t * g_t)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
|
[
"noreply@github.com"
] |
BenJamesbabala.noreply@github.com
|
43105d14ddf9525238e94937e281b9b0da53cd93
|
1a30ee3bd8f80e38b6f37e31ba8fd54c53bb02af
|
/gs/DtjkUpdateJobRenBao.py
|
6f81c58f5680f938e2d4abcbed9b3b386dbcc553
|
[] |
no_license
|
piaoxue85/GsCrawlerV3
|
530c8d2a2c3e45185607466610838203214f24c2
|
ebb8d341b1f91ba7467d9efd395d98ecefcbf8cb
|
refs/heads/master
| 2020-05-19T02:44:46.610442
| 2017-11-02T07:57:26
| 2017-11-02T07:57:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,167
|
py
|
# coding=utf-8
import PackageTool
from gs.UpdateFromTableMySQL import UpdateFromTable
from gs import MySQL
import traceback
from gs import TimeUtils
from Crawler import GsCrawler
import json
from gs.USCC import check
class DtjkUpdateJob(UpdateFromTable):
online_province = [
u'上海市',
u'北京市',
u'广东省',
u'江苏省',
u'浙江省',
u'山东省',
u'河北省', u'福建省',
u'天津市', u'湖北省',
u'河南省',
u'海南省',
u'重庆市',
u'贵州省',
u'湖南省',
u'陕西省',
u'山西省',
u'黑龙江省',
u'吉林省',
u'内蒙古自治区',
u'广西壮族自治区',
u'云南省',
u'西藏自治区',
u'工商总局',
u'宁夏回族自治区',
u'甘肃省',
u'青海省',
u'江西省',
u'新疆维吾尔自治区',
u'四川省',
u'辽宁省',
u'安徽省',
]
def __init__(self):
super(DtjkUpdateJob, self).__init__()
# def set_config(self):
# self.searcher = GsCrawler()
# # self.searcher.dst_topic = 'GSCrawlerTest'
# self.src_table = 'enterprise_credit_info.dtjk_company_src'
# self.pk_name = 'mc'
def set_config(self):
self.searcher = GsCrawler()
self.searcher.dst_topic = "GsCrawlerOnline"
self.src_table = 'enterprise_credit_info.dtjk_company_src_renbao'
self.pk_name = 'mc'
def run(self):
# cnt_0 = 0
# cnt_1 = 0
# cnt_2 = 0
# cnt_999 = 0
fail_dict = dict()
update_result = {u'更新成功': 0, u'查无结果': 0, u'更新失败': 0, u'未上线': 0}
while True:
# print json.dumps(fail_dict, ensure_ascii=False)
sql_1 = "select mc,province,xydm from " \
"(" \
"select * from %s where update_status=-1 order by last_update_time limit 30 " \
") t " \
"order by RAND() limit 1" % self.src_table
# print sql_1
res_1 = MySQL.execute_query(sql_1)
if len(res_1) > 0:
mc = res_1[0][0]
province = res_1[0][1]
xydm = res_1[0][2]
print mc, province
self.info(mc + '|' + province)
sql_2 = "update %s set update_status=-2,last_update_time=now() " \
"where mc='%s'" \
% (self.src_table, mc)
MySQL.execute_update(sql_2)
try:
if province in self.online_province:
if province in (u'河北省',
u'宁夏回族自治区',
u'河南省',
u'海南省',
u'重庆市',
u'江西省',
u'贵州省',
u'湖南省',
u'陕西省',
u'山西省',
u'黑龙江省',
u'吉林省',
u'内蒙古自治区',
u'广西壮族自治区',
u'云南省',
u'西藏自治区',
u'青海省',
u'新疆维吾尔自治区',
u'甘肃省',
u'工商总局',
u'浙江省',
u'江苏省',
u'广东省',
u'上海市',
# u''
) and check(xydm):
keyword = xydm
else:
keyword = mc
update_status = self.searcher.crawl(keyword=keyword, province=province)
else:
update_status = 999
sql_3 = "update %s set update_status=%d, last_update_time=now() " \
"where mc='%s'" % \
(self.src_table, update_status, mc)
if mc in fail_dict:
fail_dict.pop(mc)
except Exception, e:
# traceback.print_exc(e)
self.info(traceback.format_exc(e))
if fail_dict.get(mc, 0) > 10:
update_status = 3
if mc in fail_dict:
fail_dict.pop(mc)
else:
update_status = -1
fail_dict[mc] = fail_dict.get(mc, 0) + 1
# self.info(str(e))
sql_3 = "update %s set update_status=%d " \
"where mc='%s'" % \
(self.src_table, update_status, mc)
self.searcher.delete_tag_a_from_db(mc, province)
MySQL.execute_update(sql_3)
# print 'update_status', update_status
if update_status == 0:
update_result[u'查无结果'] += 1
elif update_status == 1:
update_result[u'更新成功'] += 1
elif update_status == 999:
update_result[u'未上线'] += 1
else:
update_result[u'更新失败'] += 1
self.info(json.dumps(update_result, ensure_ascii=False))
else:
self.info(u'更新完毕')
break
if __name__ == '__main__':
job = DtjkUpdateJob()
job.run()
|
[
"18801791073@163.com"
] |
18801791073@163.com
|
3300a9187963df0c2c2903b069c4c598e0b0e31a
|
9c5d96a79fd3ff3f57a585195541c1fa44c56338
|
/537. Complex Number Multiplication.py
|
5a0a4d85a57b7568ee6eaa2ebcc86e4b8bbab917
|
[] |
no_license
|
luoy2/leetcode-python
|
663ab60e589a05dbe62fabc832bf8a8848647f19
|
c926ee2f8a1f543e481d7c396965fcd9a3fc8f41
|
refs/heads/master
| 2022-05-16T02:29:08.663249
| 2022-04-11T03:01:15
| 2022-04-11T03:01:15
| 82,005,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
'''
A complex number can be represented as a string on the form "real+imaginaryi" where:
real is the real part and is an integer in the range [-100, 100].
imaginary is the imaginary part and is an integer in the range [-100, 100].
i2 == -1.
Given two complex numbers num1 and num2 as strings, return a string of the complex number that represents their multiplications.
Example 1:
Input: num1 = "1+1i", num2 = "1+1i"
Output: "0+2i"
Explanation: (1 + i) * (1 + i) = 1 + i2 + 2 * i = 2i, and you need convert it to the form of 0+2i.
Example 2:
Input: num1 = "1+-1i", num2 = "1+-1i"
Output: "0+-2i"
Explanation: (1 - i) * (1 - i) = 1 + i2 - 2 * i = -2i, and you need convert it to the form of 0+-2i.
Constraints:
num1 and num2 are valid complex numbers.
'''
def get_num(num: str):
real, complex_ = num.split('+')
real = int(real)
complex_ = int(complex_.split("i")[0])
print(real, complex_)
return real, complex_
class Solution:
def complexNumberMultiply(self, num1: str, num2: str) -> str:
real1, comp1 = get_num(num1)
real2, comp2 = get_num(num2)
real3 = real1 * real2 - comp1 * comp2
comp3 = real1 * comp2 + real2 * comp1
return f'{real3}+{comp3}i'
|
[
"luoy2@hotmail.com"
] |
luoy2@hotmail.com
|
05ecdd251a2c562544819dfa994a0bae1609f7db
|
b911744e6b7e464e7f7bc4151b5cc170e33701b2
|
/dashborad/form/product.py
|
a1b2a096b2392a74aaba2a3245305d2486a462c3
|
[] |
no_license
|
Wstc2013/reboot_lianxi
|
199bc9cf2ce6ef9a017ca3e1589b2596fcd283e6
|
5de1a5c818e7e764a8cd5ed9950ce646c7daf458
|
refs/heads/master
| 2021-01-21T12:21:16.824326
| 2017-06-26T02:58:25
| 2017-06-26T02:58:25
| 91,792,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,833
|
py
|
#!/usr/bin/env python
#-*- coding:utf8 -*-
from django import forms
from dashborad.models import Product,Userprofile
from dashborad import models
class Product(forms.Form):
name = forms.CharField(required=True,max_length=32,
error_messages = {'required': '业务线名称不能为空', 'max_length': '业务线名称长度错误'})
p_product = forms.CharField(required=True)
module_letter = forms.CharField(required=True,max_length=10,
error_messages={'required': '业务线英文名称不能为空', 'max_length': '业务线英文名称用户长度错误'})
op_interface = forms.MultipleChoiceField(choices=((user_obj.name,user_obj.email) for user_obj in Userprofile.objects.all()))
dev_interface = forms.MultipleChoiceField(choices=((user_obj.name,user_obj.email) for user_obj in Userprofile.objects.all()))
def clean_p_product(self):
p_product = self.cleaned_data.get('p_product')
try:
p_id = int(p_product)
except ValueError:
forms.ValidationError('上级业务线错误')
else:
print type(p_id)
if p_id == 0:
return None
try:
p_obj = models.Product.objects.get(pk=p_id)
return p_obj
except Exception:
forms.ValidationError('业务线不存在')
def clean_module_letter(self):
module_letter = self.cleaned_data.get('module_letter')
return module_letter.lower()
def clean_op_interface(self):
op_interface = self.cleaned_data.get('op_interface')
return ','.join(op_interface)
def clean_dev_interface(self):
dev_interface = self.cleaned_data.get('dev_interface')
return ','.join(dev_interface)
|
[
"xiaoyong.feng@cnsha-61418-mac.local"
] |
xiaoyong.feng@cnsha-61418-mac.local
|
62bea2ef9a0edf4ff2efea4ce6f269ea9fd9f97e
|
b6d723d7ac20b5b01a55e574088d053c9952ef76
|
/AdaptivePlanning/scripts/csv_io.py
|
e4a8ceb354d2f0ef4d4debcc5803a4572fafedfb
|
[] |
no_license
|
amoghntt/Aspire
|
d704c53bbfe89794fc34e6ea3bd605d51eb0a754
|
97953894c82ac565d451df9bd6eea35d23e83c6b
|
refs/heads/master
| 2021-08-30T15:07:18.408757
| 2017-12-18T11:27:49
| 2017-12-18T11:27:49
| 114,597,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
def read_data(UC4_Data):
f = open("UC4_Data.csv")
#ignore header
f.readline()
samples = []
target = []
for line in f:
line = line.strip().split(",")
sample = [float(x) for x in line]
samples.append(sample)
return samples
def write_delimited_file(UC4_Data, data,header=None, delimiter=","):
f_out = open(file_path,"w")
if header is not None:
f_out.write(delimiter.join(header) + "\n")
for line in data:
if isinstance(line, str):
f_out.write(line + "\n")
else:
f_out.write(delimiter.join(line) + "\n")
f_out.close()
|
[
"109100@NTTDATA.COM"
] |
109100@NTTDATA.COM
|
e8c9479740a1bd7682f0c73d9bf6be00e8556670
|
7e2aa3098be442333d9d460aa22cb63790f2200b
|
/at_tmp/model/FUNC/DBC/DBC_OPT.py
|
4dd866299670287436452ff1f9d75fec0a994a3a
|
[
"Apache-2.0"
] |
permissive
|
zuoleilei3253/zuoleilei
|
deec298da9ece0470ebc94cd427c1352ff2bcb8d
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
refs/heads/master
| 2020-06-21T19:29:14.817013
| 2019-07-19T15:07:32
| 2019-07-19T15:07:32
| 197,536,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,253
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/26 9:16
# @Author : bxf
# @File : DBC_OPT.py
# @Software: PyCharm
from model.util.TMP_DB_OPT import *
from model.util.TMP_PAGINATOR import *
from model.util.newID import *
from model.util.GET_PARAM import *
from model.FUNC.PARAMS_OPT import *
'''
数据检查基本信息维护
'''
class DBC_OPT:
def __init__(self,token):
self.token=token
def getLists(self,data,**kwargs):
'''
获取信息列表
:param data:
:param kwargs:
:return:
'''
try:
page = data.get('_page')
records = data.get('_limit')
group_id = data.get('group_id')
dbc_sql='select * from dbc_case_info WHERE '
dbc_lists=GET_RECORDS(dbc_sql,page,records,group_id=group_id,token=self.token)
return_data=respdata().sucessResp(dbc_lists)
return json.dumps(return_data,cls=MyEncoder,ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def getListsForSuite(self, data, **kwargs):
'''
获取信息列表
:param data:
:param kwargs:
:return:
'''
try:
page = data.get('_page')
records = data.get('_limit')
group_id = data.get('group_id')
get_data = data.to_dict()
del get_data['_page']
del get_data['_limit']
del get_data['group_id']
sql_doc = searchToDatabase('api_case_info', get_data)
dbc_sql = 'select dbc_id info_id,dbc_desc info_desc,dbc_status info_status,dbc_type info_type,init_data from dbc_case_info where init_data is not NULL and '+sql_doc
dbc_lists = GET_RECORDS_SQL(dbc_sql, page, records,group_id=group_id,token=self.token)
data = dbc_lists[0]
case_list = dbc_lists[2]#getJsonFromDatabase(dbc_lists[1])
tb_data = []
if case_list:
for i in case_list:
init_data = i['init_data']
del i['init_data']
if init_data !=None:
i['init_data'] = json.loads(init_data)
else:
i['init_data']=None
tb_data.append(i)
else:
tb_data = []
data['tb_data'] = tb_data
return_data = respdata().sucessResp(data)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def dbcDetail(self,dbc_id):
'''
获取明细
:param data:
:return:
'''
try:
sql="SELECT t.*,t2.env_id FROM dbc_case_info t LEFT JOIN t_env_detail t2 ON t2.env_d_id=t.env_d_id WHERE t.dbc_id='"+str(dbc_id)+"'"
dbc_detail=getJsonFromDatabase(sql)
if dbc_detail:
dbc_detail=dbc_detail[0]
# 获取参数 增加判断参数表中是否存在分支
if dbc_detail["init_data"] != None:
if PARAMS_OPT(self.token, dbc_id).getData():
init_data = toDict(PARAMS_OPT(self.token, dbc_id).getData()[0]["init_data"])
else:
init_data = toDict(dbc_detail["init_data"])
else:
init_data = []
dbc_detail["init_data"] = init_data
return_data = respdata().sucessResp(dbc_detail)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
else:
return_data = respdata().failMessage('', '不存在该接口信息!')
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def dbcInsert(self,data):
'''
插入基本信息DBC
:param data:
:return:
'''
try:
get_data = json.loads(data)
group_id = getCode(get_data['group_id'])
get_data['group_id'] = group_id
insert_result = insertToDatabase('dbc_case_info', get_data)
return_data = respdata().sucessMessage('', '新增成功,新增记录数为: ' + str(insert_result))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '新增失败,请检查!错误信息为:' + str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def dbcUpdate(self,data):
'''
更新信息
:return:
'''
try:
get_data = json.loads(data)
dbc_id=get_data["dbc_id"]
if "env_id" in get_data:
del get_data['env_id']
# 【修改】 增加保存参数到参数表中分支操作
if "init_data" in get_data:
init_data_b = json.dumps(get_data['init_data'])
if get_data['init_data'] is None or get_data['init_data'] == []:
get_data['init_data'] = json.dumps([])
else:
init_data_a = get_data['init_data']
init_data_list = []
for i in init_data_a:
key = i[0]
param_list = [key, None, {}]
init_data_list.append(param_list)
get_data['init_data'] = json.dumps(init_data_list, ensure_ascii=False)
# 判断参数表里的数据是否存在
if PARAMS_OPT(self.token, dbc_id).getData():
PARAMS_OPT(self.token, dbc_id).updateData(init_data_b) # 更新操作
else:
PARAMS_OPT(self.token, dbc_id).insertData(init_data_b) # 插入操作
update_result = updateToDatabase('dbc_case_info', get_data, dbc_id=dbc_id)
return_data = respdata().sucessMessage('', '更新成功,更新条数为:' + str(update_result))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '更新失败,请检查!错误信息为:' + str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def dbcDelete(self,data):
'''
删除信息
:param data:
:return:
'''
try:
dba = getJsonMysql()
dbc_id = data
delsql = 'delete from dbc_case_info where dbc_id="' + str(dbc_id) + '"'
DB_CONN().db_Update(delsql)
return_data = json.dumps(respdata().sucessMessage('', '删除成功!~'))
return return_data
except Exception as e:
return_data = json.dumps(respdata().otherResp(e, '删除失败!~'))
return return_data
def dbcSearch(self,data):
try:
page = data.get('_page')
records = data.get('_limit')
group_id = data.get('group_id')
del data['_page']
del data['_limit']
del data['group_id']
if data:
search_sql=searchToDatabase('dbc_case_info',data)
else:
search_sql="SELECT * FROM dbc_case_info WHERE"
result=GET_RECORDS(search_sql,page,records,group_id=group_id,token=self.token)
return_data = respdata().sucessResp(result)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def dbcParams(self,data):
dbc_id = json.loads(data)["dbc_id"]
init_dataa = toDict(PARAMS_OPT(self.token, dbc_id).getData()[0]["init_data"])
return_data= self.dbcInit( data, init_dataa)
return return_data
def dbcInit(self, data,init_data):
param_data = json.loads(data)["params"]
params_init1 = []
for i in param_data:
params_init = GET_Variable(i)
params_init1.extend(params_init)
params = list(set(params_init1))
dbc_id = json.loads(data)["dbc_id"]
person_params = PARAMS_OPT(self.token, dbc_id).getData()
if person_params:
params_list = self.intiChange(params, init_data)
return_data = respdata().sucessMessage(params_list, '')
return json.dumps(return_data, ensure_ascii=False)
else:
sql = "select * from dbc_case_info WHERE dbc_id ='" + dbc_id + "'"
shell_info = getJsonFromDatabase(sql)
if shell_info:
init_data = json.loads(shell_info[0]["init_data"])
if init_data == None:
params_list = []
for i in params:
param = [i, None, {}]
params_list.append(param)
return_data = respdata().sucessMessage(params_list, '')
return json.dumps(return_data, ensure_ascii=False)
else:
params_list = self.intiChange(params, init_data)
return_data = respdata().sucessMessage(params_list, '')
return json.dumps(return_data, ensure_ascii=False)
else:
return_data = respdata().failMessage('', '獲取參數錯誤,請檢查!~~')
return json.dumps(return_data, ensure_ascii=False)
def intiChange(self, param_list, init_data):
init_data_new = []
for n in param_list:
init_list = []
for i in init_data:
init_list.append(i[0])
if n in init_list:
index = init_list.index(n)
init_data_new.append(init_data[index])
else:
param = [n, None, {}]
init_data_new.append(param)
return init_data_new
def paramSave(self, data):
try:
data = toDict(data)
info_id = data["dbc_id"]
init_data = data["init_data"]
result = PARAMS_OPT(self.token, info_id).saveParams("dbc_case_info", "dbc_id", init_data)
return_data = respdata().sucessResp('')
return json.dumps(return_data, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
|
[
"ray-zuo@qq.com"
] |
ray-zuo@qq.com
|
7ce2b56a64c0f3de5cf6fc9a3b18c6bc8863411f
|
6cfdee9c005b3f677e41514c50cf84e74e6044f5
|
/49. Group Anagrams.py
|
7ccd892ae9a1e37b99a1b3ebaccea54f88d52f1c
|
[] |
no_license
|
nehabais31/LeetCode-Solutions
|
886e94bbea4067cf0533230674605e8c6b0c1847
|
c49269adac280f7f64849f3aff7f2c4a17f0b5e4
|
refs/heads/main
| 2023-05-01T08:43:28.811996
| 2021-05-18T15:47:32
| 2021-05-18T15:47:32
| 325,687,603
| 0
| 0
| null | 2021-01-29T22:11:51
| 2020-12-31T01:45:25
| null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
# -*- coding: utf-8 -*-
"""
Given an array of strings strs,
group the anagrams together.
You can return the answer in any order.
An Anagram is a word or phrase formed by
rearranging the letters of a different word or phrase,
typically using all the original letters exactly once.
"""
class Solution:
def groupAnagrams(self, strs):
shuffled_word = {}
for word in strs:
word_sort = ''.join(sorted(word))
if word_sort not in shuffled_word.keys():
shuffled_word[word_sort] = [word]
else:
shuffled_word[word_sort].append(word)
return shuffled_word.values()
sol = Solution()
# Test cases
strs1 = ["eat","tea","tan","ate","nat","bat"]
strs2 = ['']
strs3 = 'a'
print(sol.groupAnagrams(strs1))
print(sol.groupAnagrams(strs2))
print(sol.groupAnagrams(strs3))
|
[
"nehabais31@gmail.com"
] |
nehabais31@gmail.com
|
1e70f01c75fa7546927a0afcb5849eeb138d3a3f
|
cf5077d06c5145d93b44c0c00bb93f93fbf4d59d
|
/account/migrations/0010_auto__add_field_userprofile_zipcode.py
|
5eff1f01d11d5d4a42fa1fa5f71a8f65dd836727
|
[] |
no_license
|
su-danny/famdates
|
16a9ee01d259c9978278415943d918fd47bdfc9e
|
301cf997985172c146d917c832390e0db57c03c5
|
refs/heads/master
| 2016-08-06T18:17:30.345319
| 2014-03-11T10:34:31
| 2014-03-11T10:34:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,488
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.zipcode'
db.add_column('account_userprofile', 'zipcode',
self.gf('django.db.models.fields.CharField')(max_length=5, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.zipcode'
db.delete_column('account_userprofile', 'zipcode')
models = {
'account.facebooksession': {
'Meta': {'unique_together': "(('user', 'uid'), ('access_token', 'expires'))",
'object_name': 'FacebookSession'},
'access_token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '103'}),
'expires': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'account.interest': {
'Meta': {'object_name': 'Interest'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['account.InterestCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sub_category': (
'django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'account.interestcategory': {
'Meta': {'object_name': 'InterestCategory'},
'human_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'account.mailingaddress': {
'Meta': {'object_name': 'MailingAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'address_2': (
'django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'account.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about_me': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'acct_type': (
'django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'avatar': (
'django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'background': (
'django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'blocked_users': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'date_founded': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deactivated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'default_interest_feed': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['account.InterestCategory']", 'null': 'True', 'blank': 'True'}),
'email_notification': (
'django.db.models.fields.CharField', [], {'default': "'enabled'", 'max_length': '50'}),
'facebook_uid': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'fan_zone_interests': ('django.db.models.fields.related.ManyToManyField', [],
{'blank': 'True', 'related_name': "'fan_zone'", 'null': 'True',
'symmetrical': 'False', 'to': "orm['account.Interest']"}),
'fitness_nutrition_interests': ('django.db.models.fields.related.ManyToManyField', [],
{'blank': 'True', 'related_name': "'fitness_nutrition'", 'null': 'True',
'symmetrical': 'False', 'to': "orm['account.Interest']"}),
'game_time_interests': ('django.db.models.fields.related.ManyToManyField', [],
{'blank': 'True', 'related_name': "'game_time'", 'null': 'True',
'symmetrical': 'False', 'to': "orm['account.Interest']"}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'hide_birthday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': (
'django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'profile_type': ('django.db.models.fields.CharField', [],
{'default': "'normal'", 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'receive_email_for_new_message': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_from_groups': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'stats': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'use_gravatar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'user_profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': (
'django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account']
|
[
"su-tn@simpleunion.com"
] |
su-tn@simpleunion.com
|
e7537b99cf180b996f3c2e276aa62d073b5e3a37
|
e52b0124ad5f875ea16a10cc8aa5e771f5d7c3ea
|
/guniflask/security/authentication_provider.py
|
c24a1350d9865af253e41985da2220e481b5a133
|
[
"MIT"
] |
permissive
|
jadbin/guniflask
|
24ec0c755827fe15ebbfeaec3149882ac6bc79b9
|
f0f5029d03219b7793482dc3ed09eab508e538d6
|
refs/heads/master
| 2023-08-18T07:56:36.331037
| 2023-08-09T02:48:23
| 2023-08-09T02:48:23
| 147,511,047
| 14
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
from abc import ABCMeta, abstractmethod
from typing import Type, Optional
from guniflask.security.authentication import Authentication
class AuthenticationProvider(metaclass=ABCMeta):
@abstractmethod
def authenticate(self, authentication: Authentication) -> Optional[Authentication]:
pass # pragma: no cover
@abstractmethod
def supports(self, authentication_cls: Type[Authentication]) -> bool:
pass
|
[
"jadbin.com@hotmail.com"
] |
jadbin.com@hotmail.com
|
e6524536fe95931723ae5c10e7fcb42314797381
|
10e9dd3bcd8bd5444813cc4085c9f1439da64833
|
/manage.py
|
be520bb4d1f54a5b5a3b7ba648ba37d26938a557
|
[] |
no_license
|
chornieglaza/bratsva
|
a2d817ff51522b99ca0b1231d13af5280c91d041
|
867c9a0364c74e312f2334f3d717320c64098b7e
|
refs/heads/master
| 2023-04-14T19:32:42.278084
| 2021-04-28T21:35:30
| 2021-04-28T21:35:30
| 362,541,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Meto.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"eloylezcano77@gmail.com"
] |
eloylezcano77@gmail.com
|
1674492835bf36283aedd67efa037b2d42a71005
|
e2005ddf2bd2387bb82eb97db2851a43bef87c10
|
/models.py
|
98e73367037173a494123f99701a70f1a427db4f
|
[] |
no_license
|
jaxazul/pensioners
|
008d73e23b688f9b6fa8400959d6c84e3f5c1861
|
b7bc3f298e070929631ac3ca65ebfc5c83ed7989
|
refs/heads/master
| 2023-04-18T11:36:10.261461
| 2021-05-04T18:58:02
| 2021-05-04T18:58:02
| 358,665,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class PensionersDetails(models.Model):
id = models.IntegerField(primary_key=True)
image = models.CharField(max_length=256)
name = models.CharField(max_length=256)
date_of_birth = models.CharField(max_length=256)
status = models.CharField(max_length=256)
location = models.CharField(max_length=256)
service_begin = models.CharField(max_length=256)
service_ends = models.CharField(max_length=256)
last_assignment = models.CharField(max_length=256)
class Meta:
managed = False
db_table = 'pensioners_details'
|
[
"olugbilehassan@gmail.com"
] |
olugbilehassan@gmail.com
|
0e94a66d4f3e332d04a02bfb8cf800faea4dfefa
|
3a9726f550f370aaa27271d74e9b1605f120e3c2
|
/trollstrings.py
|
ab7b04a09c59f7ad519a7d923840c67f53e0de1d
|
[] |
no_license
|
gwendalcr/review-o-matic
|
6fc2b2884a01bcd42e37887d23e81e63bd84ee2f
|
772826cd38227ba7723ed0a645b443aa52a34174
|
refs/heads/master
| 2020-06-28T01:48:05.981884
| 2019-06-26T19:38:58
| 2019-06-27T17:57:41
| 200,110,802
| 0
| 0
| null | 2019-08-01T19:51:49
| 2019-08-01T19:51:49
| null |
UTF-8
|
Python
| false
| false
| 5,166
|
py
|
class ReviewStrings(object):
HEADER='''
-- Automated message --
'''
FOUND_ISSUES_HEADER_SINGLE='''
The following issue was found with your patch:
'''
FOUND_ISSUES_HEADER_MULTIPLE='''
The following issues were found with your patch:
'''
SUCCESS='''
No changes have been detected between this change and its upstream source!
'''
POSITIVE_VOTE='''
This patch is certified {} by review-o-matic!
'''
CLEAN_BACKPORT_HEADER='''
This change has a BACKPORT prefix, however it does not differ from its upstream
source. The BACKPORT prefix should be primarily used for patches which were
altered during the cherry-pick (due to conflicts or downstream inconsistencies).
'''
MISSING_FIELDS='''
Your commit message is missing the following required field(s):
{}
'''
FEEDBACK_AFTER_ISSUES='''
Enough with the bad news! Here's some more feedback on your patch:
'''
MISSING_HASH_HEADER='''
Your commit message is missing the upstream commit hash. It should be in the
form:
'''
MISSING_HASH_FOOTER='''
Hint: Use the '-x' argument of git cherry-pick to add this automagically
'''
INVALID_HASH_HEADER='''
The commit hash(es) you've provided in your commit message could not be found
upstream. The following hash/remote/branch tuple(s) were tried:
'''
INVALID_HASH_LINE='''
{}
from remote {}
'''
MISSING_AM='''
Your commit message is missing the patchwork URL. It should be in the
form:
(am from https://patchwork.kernel.org/.../)
'''
DIFFERS_HEADER='''
This patch differs from the source commit.
'''
ALTERED_UPSTREAM='''
Since this is not labeled as BACKPORT, it shouldn't. Either this reviewing
script is incorrect (totally possible, pls send patches!), or something changed
when this was backported. If the backport required changes, please consider
using the BACKPORT label with a description of your downstream changes in your
commit message
Below is a diff of the upstream patch referenced in this commit message, vs this
patch.
'''
ALTERED_FROMLIST='''
Changes have been detected between the patch on the list and this backport.
Since the diff algorithm used by the developer to generate this patch may
differ from the one used to review, this could be a false negative.
If the backport required changes to the FROMLIST patch, please consider adding
a BACKPORT label to your subject.
Below is the generated diff of the fromlist patch referenced in this commit
message vs this patch.
'''
BACKPORT_FROMLIST='''
Below is the generated diff of the fromlist patch referenced in this commit
message vs this patch. This message is posted to make reviewing backports
easier.
Since the diff algorithm used by the developer to generate this patch may
differ from the one used to review, there is a higher chance that this diff is
incorrect. So take this with a grain of salt.
'''
CLEAR_VOTES='''
Changes were detected between this patch and the upstream version referenced in
the commit message.
Comparing FROMLIST backports is less reliable than UPSTREAM/FROMGIT patches
since the diff algorithms can differ between developer machine and this
review script. As such, it's usually not worthwhile posting the diff. Looks like
you'll have to do this review the old fashioned way!
'''
BACKPORT_DIFF='''
This is expected, and this message is posted to make reviewing backports easier.
'''
FOUND_FIXES_REF_HEADER='''
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! NOTE: This patch has been referenced in the Fixes: tag of another commit. If
!! you haven't already, consider backporting the following patch[es]:'''
FIXES_REF_LINE='''
!! {}'''
FIXES_REF_FOOTER='''
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
'''
FOOTER='''
To learn more about backporting kernel patches to Chromium OS, check out:
https://chromium.googlesource.com/chromiumos/docs/+/master/kernel_faq.md#UPSTREAM_BACKPORT_FROMLIST_and-you
If you're curious about how this message was generated, head over to:
https://github.com/atseanpaul/review-o-matic
This link is not useful:
https://thats.poorly.run/
'''
ISSUE_SEPARATOR='''
>>>>>>> Issue {}
'''
FEEDBACK_SEPARATOR='''
>>>>>> Feedback {}
'''
REVIEW_SEPARATOR='''
------------------
'''
WEB_LINK='''
If you would like to view the upstream patch on the web, follow this link:
{}
'''
SWAG = ['Frrrresh', 'Crisper Than Cabbage', 'Awesome', 'Ahhhmazing',
'Cool As A Cucumber', 'Most Excellent', 'Eximious', 'Prestantious',
'Supernacular', 'Bodacious', 'Blue Chip', 'Blue Ribbon', 'Cracking',
'Dandy', 'Dynamite', 'Fab', 'Fabulous', 'Fantabulous',
'Scrumtrulescent', 'First Class', 'First Rate', 'First String',
'Five Star', 'Gangbusters', 'Grand', 'Groovy', 'HYPE', 'Jim-Dandy',
'Snazzy', 'Marvelous', 'Nifty', 'Par Excellence', 'Peachy Keen',
'PHAT', 'Prime', 'Prizewinning', 'Quality', 'Radical', 'Righteous',
'Sensational', 'Slick', 'Splendid', 'Lovely', 'Stellar', 'Sterling',
'Superb', 'Superior', 'Superlative', 'Supernal', 'Swell', 'Terrific',
'Tip-Top', 'Top Notch', 'Top Shelf', 'Unsurpassed', 'Wonderful']
|
[
"seanpaul@chromium.org"
] |
seanpaul@chromium.org
|
79f3d3d9e4e003a88313ec24a4c8aef4d4152db2
|
9b65c4283013d9e49ee9fad4d68cb3e6c6ed739d
|
/functions1.py
|
cd7e86286eb9735161fdd86a7fff386482dec039
|
[] |
no_license
|
EhsanJ8/Project
|
2386961caf99dd781a82c4ccd5ae9ccea5404472
|
d4ad1e8a864d9676cb417c9776327993b4caf733
|
refs/heads/master
| 2023-06-15T22:02:32.191868
| 2021-07-15T11:38:32
| 2021-07-15T11:38:32
| 381,886,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
import requests
import json
from requests.models import Response
url="https://api.telegram.org/bot1848540350:AAG5JclNyx3863mB19cvYGlx5K8Adlcvge4/"
def get_all_updates():
reponse=requests.get(url + "getUpdates")
return reponse.json()
def get_last_update(allupdates):
return allupdates["result"][-1]
data = get_last_update(get_all_updates())
# print(data)
def get_chat_id(update):
return update['message']['chat']['id']
def send_message(chat_id , text):
send = {
'chat_id' : chat_id,
'text' : text
}
respond = requests.post(url+"sendMessage" , send)
return respond
# send_message(get_chat_id(data) , 'Hello')
|
[
"chiefehsan1997@gmail.com"
] |
chiefehsan1997@gmail.com
|
78f6fb1a1767e721987d4d5c6735803bb9f58886
|
5f15c1f99777ee3699913f633fa1c11507781f99
|
/src/delta5interface/Delta5Interface.py
|
b1c26ec273894d68ab44b099476505caa4671248
|
[
"MIT"
] |
permissive
|
realhuno/ARSADelta5
|
739f7756d4cb4b857ba6237f991a55d6338086bd
|
1da624cb8a61c2576b4b62f12e9e3d514bcfbf6f
|
refs/heads/master
| 2020-04-05T08:56:19.748558
| 2018-11-08T20:05:43
| 2018-11-08T20:05:43
| 156,735,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,815
|
py
|
'''Delta 5 hardware interface layer.'''
import smbus # For i2c comms
import gevent # For threads and timing
from gevent.lock import BoundedSemaphore # To limit i2c calls
from Node import Node
from BaseHardwareInterface import BaseHardwareInterface
READ_ADDRESS = 0x00 # Gets i2c address of arduino (1 byte)
READ_FREQUENCY = 0x03 # Gets channel frequency (2 byte)
READ_LAP_STATS = 0x05
READ_CALIBRATION_THRESHOLD = 0x15
READ_CALIBRATION_MODE = 0x16
READ_CALIBRATION_OFFSET = 0x17
READ_TRIGGER_THRESHOLD = 0x18
READ_FILTER_RATIO = 0x19
READ_NODE_SCALE = 0x20
WRITE_FREQUENCY = 0x51 # Sets frequency (2 byte)
WRITE_CALIBRATION_THRESHOLD = 0x65
WRITE_CALIBRATION_MODE = 0x66
WRITE_CALIBRATION_OFFSET = 0x67
WRITE_TRIGGER_THRESHOLD = 0x68
WRITE_FILTER_RATIO = 0x69
WRITE_NODE_SCALE = 0x70
UPDATE_SLEEP = 0.1 # Main update loop delay
I2C_CHILL_TIME = 0.075 # Delay after i2c read/write
I2C_RETRY_COUNT = 5 # Limit of i2c retries
def unpack_8(data):
return data[0]
def pack_8(data):
return [data]
def unpack_16(data):
'''Returns the full variable from 2 bytes input.'''
result = data[0]
result = (result << 8) | data[1]
return result
def pack_16(data):
'''Returns a 2 part array from the full variable.'''
part_a = (data >> 8)
part_b = (data & 0xFF)
return [part_a, part_b]
def unpack_32(data):
'''Returns the full variable from 4 bytes input.'''
result = data[0]
result = (result << 8) | data[1]
result = (result << 8) | data[2]
result = (result << 8) | data[3]
return result
def validate_checksum(data):
'''Returns True if the checksum matches the data.'''
if data is None:
return False
checksum = sum(data[:-1]) & 0xFF
return checksum == data[-1]
class Delta5Interface(BaseHardwareInterface):
def __init__(self):
BaseHardwareInterface.__init__(self)
self.update_thread = None # Thread for running the main update loop
self.pass_record_callback = None # Function added in server.py
self.hardware_log_callback = None # Function added in server.py
self.i2c = smbus.SMBus(1) # Start i2c bus
self.semaphore = BoundedSemaphore(1) # Limits i2c to 1 read/write at a time
self.i2c_timestamp = -1
# Scans all i2c_addrs to populate nodes array
self.nodes = [] # Array to hold each node object
i2c_addrs = [8, 10, 12, 14, 16, 18, 20, 22] # Software limited to 8 nodes
for index, addr in enumerate(i2c_addrs):
try:
self.i2c.read_i2c_block_data(addr, READ_ADDRESS, 1)
print "Node FOUND at address {0}".format(addr)
gevent.sleep(I2C_CHILL_TIME)
node = Node() # New node instance
node.i2c_addr = addr # Set current loop i2c_addr
node.index = index
self.nodes.append(node) # Add new node to Delta5Interface
except IOError as err:
print "No node at address {0}".format(addr)
gevent.sleep(I2C_CHILL_TIME)
for node in self.nodes:
node.frequency = self.get_value_16(node, READ_FREQUENCY)
if node.index == 0:
self.calibration_threshold = self.get_value_16(node,
READ_CALIBRATION_THRESHOLD)
self.calibration_offset = self.get_value_16(node,
READ_CALIBRATION_OFFSET)
self.trigger_threshold = self.get_value_16(node,
READ_TRIGGER_THRESHOLD)
self.filter_ratio = self.get_value_8(node,
READ_FILTER_RATIO)
# self.node_scale = self.get_value_16(node,
# READ_NODE_SCALE)
else:
self.set_calibration_threshold(node.index, self.calibration_threshold)
self.set_calibration_offset(node.index, self.calibration_offset)
self.set_trigger_threshold(node.index, self.trigger_threshold)
# self.set_node_scale(node.index, self.node_scale)
#
# Class Functions
#
def log(self, message):
'''Hardware log of messages.'''
if callable(self.hardware_log_callback):
string = 'Delta 5 Log: {0}'.format(message)
self.hardware_log_callback(string)
#
# Update Loop
#
def start(self):
if self.update_thread is None:
self.log('Starting background thread.')
self.update_thread = gevent.spawn(self.update_loop)
def update_loop(self):
while True:
self.update()
gevent.sleep(UPDATE_SLEEP)
def update(self):
for node in self.nodes:
data = self.read_block(node.i2c_addr, READ_LAP_STATS, 17)
if data != None:
lap_id = data[0]
ms_since_lap = unpack_32(data[1:])
node.current_rssi = unpack_16(data[5:])
node.trigger_rssi = unpack_16(data[7:])
node.peak_rssi_raw = unpack_16(data[9:])
node.peak_rssi = unpack_16(data[11:])
node.loop_time = unpack_32(data[13:])
if lap_id != node.last_lap_id:
if node.last_lap_id != -1 and callable(self.pass_record_callback):
self.pass_record_callback(node, ms_since_lap)
node.last_lap_id = lap_id
#
# I2C Common Functions
#
def i2c_sleep(self):
if self.i2c_timestamp == -1:
return
time_passed = self.milliseconds() - self.i2c_timestamp
time_remaining = (I2C_CHILL_TIME * 1000) - time_passed
if (time_remaining > 0):
# print("i2c sleep {0}".format(time_remaining))
gevent.sleep(time_remaining / 1000.0)
def read_block(self, addr, offset, size):
'''Read i2c data given an address, code, and data size.'''
success = False
retry_count = 0
data = None
while success is False and retry_count < I2C_RETRY_COUNT:
try:
with self.semaphore: # Wait if i2c comms is already in progress
self.i2c_sleep()
data = self.i2c.read_i2c_block_data(addr, offset, size + 1)
self.i2c_timestamp = self.milliseconds()
if validate_checksum(data):
success = True
data = data[:-1]
else:
# self.log('Invalid Checksum ({0}): {1}'.format(retry_count, data))
retry_count = retry_count + 1
except IOError as err:
self.log(err)
self.i2c_timestamp = self.milliseconds()
retry_count = retry_count + 1
return data
def write_block(self, addr, offset, data):
'''Write i2c data given an address, code, and data.'''
success = False
retry_count = 0
data_with_checksum = data
data_with_checksum.append(offset)
data_with_checksum.append(sum(data_with_checksum) & 0xFF)
while success is False and retry_count < I2C_RETRY_COUNT:
try:
with self.semaphore: # Wait if i2c comms is already in progress
self.i2c_sleep()
self.i2c.write_i2c_block_data(addr, offset, data_with_checksum)
self.i2c_timestamp = self.milliseconds()
success = True
except IOError as err:
self.log(err)
self.i2c_timestamp = self.milliseconds()
retry_count = retry_count + 1
return success
#
# Internal helper fucntions for setting single values
#
def get_value_8(self, node, command):
data = self.read_block(node.i2c_addr, command, 1)
result = None
if data != None:
result = unpack_8(data)
return result
def get_value_16(self, node, command):
data = self.read_block(node.i2c_addr, command, 2)
result = None
if data != None:
result = unpack_16(data)
return result
def set_and_validate_value_8(self, node, write_command, read_command, in_value):
success = False
retry_count = 0
out_value = None
while success is False and retry_count < I2C_RETRY_COUNT:
self.write_block(node.i2c_addr, write_command, pack_8(in_value))
out_value = self.get_value_8(node, read_command)
if out_value == in_value:
success = True
else:
retry_count = retry_count + 1
self.log('Value Not Set ({0}): {1}/{2}/{3}'.format(retry_count, write_command, in_value, node))
if out_value == None:
out_value = in_value
return out_value
def set_and_validate_value_16(self, node, write_command, read_command, in_value):
success = False
retry_count = 0
out_value = None
while success is False and retry_count < I2C_RETRY_COUNT:
self.write_block(node.i2c_addr, write_command, pack_16(in_value))
out_value = self.get_value_16(node, read_command)
if out_value == in_value:
success = True
else:
retry_count = retry_count + 1
self.log('Value Not Set ({0}): {1}/{2}/{3}'.format(retry_count, write_command, in_value, node))
if out_value == None:
out_value = in_value
return out_value
#
# External functions for setting data
#
def set_frequency(self, node_index, frequency):
node = self.nodes[node_index]
node.frequency = self.set_and_validate_value_16(node,
WRITE_FREQUENCY,
READ_FREQUENCY,
frequency)
def set_calibration_threshold(self, node_index, threshold):
node = self.nodes[node_index]
node.calibration_threshold = self.set_and_validate_value_16(node,
WRITE_CALIBRATION_THRESHOLD,
READ_CALIBRATION_THRESHOLD,
threshold)
def set_calibration_threshold_global(self, threshold):
self.calibration_threshold = threshold
for node in self.nodes:
self.set_calibration_threshold(node.index, threshold)
return self.calibration_threshold
def set_calibration_mode(self, node_index, calibration_mode):
node = self.nodes[node_index]
self.set_and_validate_value_8(node,
WRITE_CALIBRATION_MODE,
READ_CALIBRATION_MODE,
calibration_mode)
def enable_calibration_mode(self):
for node in self.nodes:
self.set_calibration_mode(node.index, True);
def set_calibration_offset(self, node_index, offset):
node = self.nodes[node_index]
node.calibration_offset = self.set_and_validate_value_16(node,
WRITE_CALIBRATION_OFFSET,
READ_CALIBRATION_OFFSET,
offset)
def set_calibration_offset_global(self, offset):
self.calibration_offset = offset
for node in self.nodes:
self.set_calibration_offset(node.index, offset)
return self.calibration_offset
def set_trigger_threshold(self, node_index, threshold):
node = self.nodes[node_index]
node.trigger_threshold = self.set_and_validate_value_16(node,
WRITE_TRIGGER_THRESHOLD,
READ_TRIGGER_THRESHOLD,
threshold)
def set_trigger_threshold_global(self, threshold):
self.trigger_threshold = threshold
for node in self.nodes:
self.set_trigger_threshold(node.index, threshold)
return self.trigger_threshold
def set_filter_ratio(self, node_index, filter_ratio):
node = self.nodes[node_index]
node.filter_ratio = self.set_and_validate_value_8(node,
WRITE_FILTER_RATIO,
READ_FILTER_RATIO,
filter_ratio)
def set_filter_ratio_global(self, filter_ratio):
self.filter_ratio = filter_ratio
for node in self.nodes:
self.set_filter_ratio(node.index, filter_ratio)
return self.filter_ratio
def set_node_scale(self, node_index, node_scale):
node = self.nodes[node_index]
node.node_scale = self.set_and_validate_value_16(node,
WRITE_NODE_SCALE,
READ_NODE_SCALE,
node_scale)
def intf_simulate_lap(self, node_index):
node = self.nodes[node_index]
node.current_rssi = 11
node.trigger_rssi = 22
node.peak_rssi_raw = 33
node.peak_rssi = 44
node.loop_time = 55
self.pass_record_callback(node, 100)
def get_hardware_interface():
'''Returns the delta 5 interface object.'''
return Delta5Interface()
|
[
"patric.hainz@gmx.at"
] |
patric.hainz@gmx.at
|
b12cd85ce1adc2eeb0c14de7cadfbfc86252efb9
|
6e2bfd9b875b5f9ace608945600c05c214346260
|
/Python/basic/app.py
|
bffaacb6267eaf4c8734bb856b440ba9a6d6f231
|
[] |
no_license
|
abdilfaruq/LearnPython
|
7e079e8f0e69bc9276d78834346ba1e82b091b17
|
fb2b2541afd9531293d608c5c4d624d0985a96e9
|
refs/heads/main
| 2023-06-03T02:04:13.229241
| 2021-06-14T17:53:45
| 2021-06-14T17:53:45
| 376,902,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
#Module
"""
import data as d
print(d.person)
print(d.printNama("jack"))
from data import person, printNama
print(person)
print(printNama("jack"))
"""
import data
print(data.person)
print(data.printNama("jack"))
#Built in Module
"""
import datetime
date = datetime.datetime(2021, 1, 13)
print(date)
"""
import datetime
now = datetime.datetime.now()
print("\n",now)
print(now.strftime("%Y, %B, %d"))
|
[
"noreply@github.com"
] |
abdilfaruq.noreply@github.com
|
b4ddef1979f3d80f7d1bf524fda4e3bf1a7c2edb
|
e2469c3ca6c9ce57dddf7e5d3eb0ccf6d6875997
|
/src/Model.py
|
03e7d6c8f0722f0dc6013f236002d3d51587de4e
|
[] |
no_license
|
pranavlal30/NeuralNet-
|
8f9384023742f8a66984eda810c44f13b8aed707
|
553f1bf2dc865d6dde96cefdb82f02d0c86f1416
|
refs/heads/master
| 2020-04-29T19:36:37.498752
| 2019-03-18T20:08:52
| 2019-03-18T20:08:52
| 176,360,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,882
|
py
|
import numpy as np
from hiddenLayer import HiddenLayer
from outputLayer import OutputLayer
from collections import defaultdict
class Model:
def __init__(self):
self.input_size = 0
self.output_size = 0
self.hidden_layers = []
self.output_layer = 0
self.metrics = defaultdict(list)
def build_model(self, input_size, output_size, hidden_layer_info):
## Function to build the model ##
## Arguments:
## input_size : Number of input features
## output_size : Number of output classes
## hidden_layer_info : array with size of hidden layers
np.random.seed(42)
self.input_size = input_size
self.output_size = output_size
prev_layer_size = input_size
for hidden_layer_size in hidden_layer_info:
self.hidden_layers.append(HiddenLayer(prev_layer_size, hidden_layer_size))
prev_layer_size = hidden_layer_size
self.output_layer = OutputLayer(prev_layer_size, self.output_size)
def feedforward(self, input):
##Feedforward function. Makes a forward pass through each layer.
current_layer_input = input
for layer in self.hidden_layers:
current_layer_input = layer.forward(current_layer_input)
yhat = self.output_layer.forward(current_layer_input)
return yhat
def backward_propagation(self, y_pred, y_true, x_train):
##Backpropagation function. Makes a backward pass through each layer.
loss_derivative = self.categorical_loss(y_pred, y_true, derivation = True)
prev_layer_output = self.hidden_layers[-1].output
delta = self.output_layer.backward(prev_layer_output, loss_derivative)
next_layer_weight = self.output_layer.weights
for layer_no in range(len(self.hidden_layers)-1,0,-1):
prev_layer_output = self.hidden_layers[layer_no-1].output
delta = self.hidden_layers[layer_no].backward(delta, prev_layer_output, next_layer_weight)
next_layer_weight = self.hidden_layers[layer_no].weights
delta = self.hidden_layers[0].backward(delta, x_train, next_layer_weight)
def update_parameters(self, learning_rate):
for layer in self.hidden_layers:
layer.weights = layer.weights - (learning_rate * layer.weight_gradient)
self.output_layer.weights = self.output_layer.weights - (learning_rate * self.output_layer.weight_gradient)
def categorical_loss(self, y_pred, y_true, derivation = False):
if derivation:
return - (y_true/y_pred)
else:
return -np.sum(np.log(y_pred[y_true == 1])) / len(y_pred)
def accuracy(self, y_pred, y_train):
return np.round(np.float(sum(np.argmax(y_train, axis = -1) == np.argmax(y_pred, axis = -1)))/len(y_pred) * 100, 2)
def evaluate_model(self, x_val, y_val):
y_pred = self.feedforward(x_val)
return self.accuracy(y_pred, y_val)
def shuffle_dataset(self, x, y):
assert len(x) == len(y)
p = np.random.permutation(len(y))
return x[p], y[p]
def evaluate_model(self, x, y):
y_pred = np.zeros(y.shape)
loss = 0
for i in range(len(y)):
y_pred[i] = self.feedforward(x[i])
loss = loss + self.categorical_loss(y_pred[i], y[i])
loss = loss / len(y)
accuracy = self.accuracy(y_pred, y)
return (loss, accuracy)
def fit(self, x_train, y_train, x_val = 0, y_val = 0, learning_rate = 0.01, epochs = 5):
for epoch in range(epochs):
print("Epoch: " + str(epoch+1))
for i in range(len(x_train)):
y_pred = self.feedforward(x_train[i])
loss = self.categorical_loss(y_pred, y_train[i])
self.backward_propagation(y_pred, y_train[i], x_train[i])
self.update_parameters(learning_rate)
(train_loss, train_accuracy) = self.evaluate_model(x_train, y_train)
(val_loss, val_accuracy) = self.evaluate_model(x_val, y_val)
self.metrics['acc'].append(train_accuracy)
self.metrics['loss'].append(train_loss)
self.metrics['val_acc'].append(val_accuracy)
self.metrics['val_loss'].append(val_loss)
x_train, y_train = self.shuffle_dataset(x_train, y_train)
return self.metrics
|
[
"pranav.lal30@gmail.com"
] |
pranav.lal30@gmail.com
|
67e6122af7dc8ce05eaf37c222fc63694777adbd
|
b5803da09ae899adacb7549daecd34cbb1fde325
|
/myprojectenv/bin/wheel
|
1a446c8e61f4b7d54b489c088e47dcc3ce5f3e23
|
[
"MIT"
] |
permissive
|
Umidrifkatov/itlabs
|
acd702a8ff5ee449e8f199fec55f1ddf4b7f804a
|
87905c964db497a3781040693f5bac49ccc13ae0
|
refs/heads/master
| 2022-06-18T02:57:22.689172
| 2020-05-08T06:34:08
| 2020-05-08T06:34:08
| 262,229,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
#!/home/user/myproject/myprojectenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
|
ce9f012d9df5cfda7aaaf72546c934028dc47983
|
cfbb34dd860a0707a5c6937c3e9dfe8d74433e27
|
/survey_form/wsgi.py
|
d5fe015aa43f2289f84db81261f2e472bc8bbb94
|
[] |
no_license
|
andamuthu/test
|
eba4876fc886877eeb458f4a37e96ddb02dbfed1
|
49db219a905ea3ebcb71120dac3f1d66821cc752
|
refs/heads/master
| 2022-12-09T13:19:31.618651
| 2020-09-07T04:10:00
| 2020-09-07T04:10:00
| 292,813,311
| 0
| 0
| null | 2020-09-07T04:10:01
| 2020-09-04T09:58:00
|
Python
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for survey_form project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'survey_form.settings')
application = get_wsgi_application()
|
[
"andamuthu.a@gmail.com"
] |
andamuthu.a@gmail.com
|
d2ef114a00ba5b4f380bf39aa57aa8df100c7d5f
|
a2703c903f90d2eb3a2dcd161f71667a304de120
|
/client/node_modules/chokidar/node_modules/fsevents/build/config.gypi
|
7159dd237d2796a2a9959e0615e4f835b3f1783f
|
[
"MIT"
] |
permissive
|
mJiyan/mercedes-coding-challenge
|
dcae18358bb0d76f90aba9de60cc267452b622b9
|
fda17ec697b5c7b0e7f46b5588e4b8df8d92733c
|
refs/heads/master
| 2023-06-18T17:32:21.670308
| 2021-07-17T07:13:29
| 2021-07-17T07:13:29
| 379,578,265
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/mjiyan/Library/Caches/node-gyp/14.16.1",
"standalone_static_library": 1,
"version_git_tag": "true",
"init_license": "MIT",
"registry": "https://registry.yarnpkg.com",
"version_commit_hooks": "true",
"bin_links": "true",
"save_prefix": "^",
"strict_ssl": "true",
"version_git_message": "v%s",
"version_git_sign": "",
"ignore_scripts": "",
"user_agent": "yarn/1.22.4 npm/? node/v14.16.1 darwin x64",
"init_version": "1.0.0",
"ignore_optional": "",
"version_tag_prefix": "v"
}
}
|
[
"m.jiyan_aslan@hotmail.com"
] |
m.jiyan_aslan@hotmail.com
|
fc98534217d6f3f2b8dd044e6270e904d659d5d0
|
ef94cddf90ac74d674598ac22b105bb58c1303da
|
/base/clases_y_objetos.py
|
ebce95a52617de24b52a46773792f152bf2f3604
|
[] |
no_license
|
IvanLpJc/python-course
|
559f22bc6fe822c5ed2ef18f991f982098baf200
|
d5345267c3b21d58db97fd69bed75996d01d7df7
|
refs/heads/main
| 2023-06-18T19:52:41.014667
| 2021-07-20T11:57:18
| 2021-07-20T11:57:18
| 385,641,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
print("*********************************************")
print("* Clases y objetos *")
print("*********************************************")
class claseSilla:
color = "blanco"
precio = 100
silla = claseSilla()
print(silla.color)
print(silla.precio)
silla2 = claseSilla()
silla2.color = "verde"
silla2.precio = 120
print(silla2.color)
print(silla2.precio)
class Persona:
def __init__(self, nombre, edad) -> None:
self.nombre = nombre
self.edad = edad
def saludar(self):
print(f"Hola, me llamo {self.nombre} y tengo {self.edad} años")
persona = Persona("Juan", 37)
persona.saludar()
print("*********************************************")
print("* Funciones *")
print("*********************************************")
def saludo(nombre):
print("Buenos días " + nombre)
saludo("Pepe")
def suma(n1,n2):
return n1+n2
print(suma(5,8))
# Paso de valor por referencia
colores = ['rojo','verde','azul']
def incluir_color(colores, color):
colores.append(color) #incluye el color nuevo en colores
incluir_color(colores, "negro")
print(colores)
print("*********************************************")
print("* Funciones lambda *")
print("*********************************************")
# FUncion pequeña y anonima
resultado = lambda numero: numero + 30 # La función recibe un número y le suma 30, devolviendo el resultado
print(resultado(10)) # A la función lambda se le llamaría así, devolviendo 40
resultado2 = lambda n1, n2: n1+n2
print(resultado2(5,8))
print("*********************************************")
print("* Ejercicio 1 *")
print("*********************************************")
# Crear clase Coche con los atributos marca, color, combustible y cilindrada
# Crear la función __init__ que asigne los parametros de la clase a los atributos
# Crear la función mostrar_características usando print para mostrar los atributos
# Crear un objeto coche1 con los atributos opel rojo gasolina y 1.6
# Ejecutar mostrar_caracteristicas
class Coche:
def __init__(self, marca, color, combustible, cilindrada) -> None:
self.marca = marca
self.color = color
self.combustible = combustible
self.cilindrada = cilindrada
def mostrar_características(self):
print(f"El coche es un {self.marca}, de color {self.color}, utiliza {self.combustible} y tiene un motor de {self.cilindrada} litros")
coche1 = Coche("Opel", "rojo", "gasolina", '1.6')
coche1.mostrar_características()
print("*********************************************")
print("* Ejercicio 2 *")
print("*********************************************")
# Crea una funcion lambda que calcule la media de tres notas
media = lambda n1,n2,n3: (n1+n2+n3)/3
print(media(5,7,2))
|
[
"malverano78@gmail.com"
] |
malverano78@gmail.com
|
31cf54c91723e09a32191bd268d56fa21112d361
|
12183b476970b06f2aeb58584b38976a4938599e
|
/func/utils.py
|
7c2da2417109a7c3b6fdb9177b03799914e8099e
|
[] |
no_license
|
IsraelRamirez/SOAP-Python
|
5d9234ed2a6a0cddccec39ced5f3123aea6f9784
|
53a40fc674d281278afea3966db80cc737bc2fb6
|
refs/heads/master
| 2022-11-20T06:48:00.615514
| 2020-07-27T16:31:55
| 2020-07-27T16:31:55
| 276,525,269
| 0
| 0
| null | 2020-07-27T16:31:21
| 2020-07-02T02:05:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,323
|
py
|
### Clases internas
from models.carrera import carrera as carreras
from models.rut import rut as ruts
### Librerias útiles
import pg
from openpyxl import Workbook
import os
query = "SELECT codCarrera, vacant, nem, ranking,matematica,lenguaje,histociencia, firsts FROM ponderados"
# Ingresar datos de la base de datos
dbhost = 'localhost'
dbname = 'psudb'
dbuser = 'psu'
dbpsw = 'psu'
def participantes():
print("\n@====Participantes===@")
print("\n@===Israel Ramirez===@")
print("\n@===Humberto Roman===@")
print("\n@===Victor Araya=====@")
print("\n@====Participantes===@")
# Entrega el inidice donde se debe situar la mejor carrera dentro de la listas de ponderaciones para ese rut
# @param rut Objeto "ruts" con la información del postulante
# @param ponderacion ponderación obtenida para una cierta carrera
# @param carrera carrera a la que se le calcula la ponderación
# @param listofcarreras lista de todas las carreras
# @return devuelve el indice
def indexador(rut,ponderacion,carrera,listofcarreras):
for i in range(0,len(rut.carreraPondera)):
if(ponderacion > rut.carreraPondera[i][1]):
return i
elif(ponderacion == rut.carreraPondera[i][1]):
for j in range(0,len(listofcarreras)):
if(listofcarreras[j].codCarrera == rut.carreraPondera[i][0]):
if(carrera.first == listofcarreras[j].first):
if(carrera.vacant<=listofcarreras[j].vacant):
return i
else:
break
elif(carrera.first > listofcarreras[j].first):
return i
else:
break
return -1
# Función que entrega el índice donde debe ir el rut dentro de la carrera.
# Nota: Se considera que tiene mayor prioridad alquien que entró antes a la carrera, es por eso que solo verifica
# si es estrictamente mayor que...
# @param rut Objeto "ruts" con los datos de los ruts
# @param carrera Objeto "carreras" con los datos de una carrera
# @return Devuelve el índice donde debe ingresarse el rut dentro de la carrera
def indexadorSimple(rut,carrera):
for i in range(0,len(carrera.personas)):
if(rut.carreraPondera[0][1] > carrera.personas[i].carreraPondera[0][1]):
return i
return -1
# Función que se conecta a la base de datos y devuelve los resultados de la consulta
# @param query sentencia SQL
# @param devuelve los resultados de la sentencia SQL
def dbquery(query):
conn = pg.DB(host=dbhost,user=dbuser,passwd=dbpsw, dbname=dbname)
result = conn.query(query)
conn.close()
return result
# Función que genera una lista de carreras, con el codigo de carrera correspondiente.
# @return Devuelve una lista de las carreras inicialiazadas con el código de carrera.
def initCarreras():
listofcarreras = []
queryResult = dbquery(query)
for row in queryResult:
if(row[0]):
tmpCarrera = carreras()
tmpCarrera.codCarrera = int(row[0])
tmpCarrera.vacant = int(row[1])
tmpCarrera.ponderaciones.append(float(row[2]))
tmpCarrera.ponderaciones.append(float(row[3]))
tmpCarrera.ponderaciones.append(float(row[4]))
tmpCarrera.ponderaciones.append(float(row[5]))
tmpCarrera.ponderaciones.append(float(row[6]))
tmpCarrera.first = float(row[7])
listofcarreras.append(tmpCarrera)
else:
break
return listofcarreras
# Función que genera, según los ruts ingresados en la carrera, un excel con los datos obtenidos
# @param listofcarreras con las carreras, sus ruts ingresados y sus ponderaciones
def getExcel(listofcarreras):
wb = Workbook()
for i in range(0,len(listofcarreras)):
sheet = wb.create_sheet(str(listofcarreras[i].codCarrera)) ###...Crea una nueva hoja...
row = 0
for j in range(0,len(listofcarreras[i].personas)): ###...Y procede finlamente a registrar a cada estudiante en el excel
row+=1
sheet['A'+str(row)] = str(listofcarreras[i].personas[j].rut)
sheet['B'+str(row)] = listofcarreras[i].personas[j].carreraPondera[0][1]
del wb['Sheet']
nombre="tmp.xlsx"
wb.save(nombre)
|
[
"nicexreal@gmail.com"
] |
nicexreal@gmail.com
|
6498e1e16d21c3f27776a003f156ecfdfe0b9f63
|
865dc5767cd53a6a9b28e9a6484922281801a04d
|
/transmitter.py
|
8057f8fdf64e622ced1155408c94d4fbb35c0d4b
|
[
"MIT"
] |
permissive
|
ghTravis/NetSim
|
649d1878966f3879b091d3f92298be0b4085892a
|
53db51bd8b708ee61e562f182adb235a37864cd7
|
refs/heads/master
| 2021-07-08T17:12:02.509887
| 2017-09-30T00:11:14
| 2017-09-30T00:11:14
| 105,326,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,208
|
py
|
#!/usr/bin/env python
from __future__ import print_function
from random import randint
from lib import socket_wrapper as SWrapper
from lib import packet_wrapper as PWrapper
from threading import Timer
import sys
import socket
import time
import pickle
import ConfigParser
import logging
class Transmitter(object):
"""
Client Class
Establish connection with server and download a file
"""
def __init__(self):
"""
Init
Client object initialization
:param host:
:param port:
:return:
"""
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
# create a file handler
handler = logging.FileHandler('transmitter.log')
handler.setLevel(logging.INFO)
# add the handlers to the logger
self.logger.addHandler(handler)
# Consume the configuration file
self.config = ConfigParser.RawConfigParser()
self.config.read('config.ini')
self.port = int(self.config.get('transmitter', 'port'))
self.host = self.config.get('transmitter', 'host')
self.bufsize = 1024
self.socket_send = SWrapper.Socket(port=self.port+1)
self.socket_recv = SWrapper.Socket(port=self.port)
self.packets = None
self.rate = None
self.sequence_num = 0
self.timer = None
# Connect to a remote host by opening a socket for communication
self.logger.info("Bound to port {}".format(self.port))
self.logger.info("Connected to port {}".format(self.port+1))
self.socket_send.socket_send.connect((self.host, self.port+1))
self.socket_recv.socket_recv.bind(("", self.port))
def main(self):
"""
main
Send a command to a server for file transfer actions
:param command:
:param filename:
:return:
"""
# Maintain continuous loop
while True:
# Loop through window size of self.packets -1 (EOT packet always at the end)
# Keep a list of packets we need to be ACK'd
packet_list = []
packet_list = self.transmit()
# Wait for the packets to come back ACK'd, and for a final EOT packet
while len(packet_list)+1 > 0:
# Get data from the receive socket
data, address = self.socket_recv.socket_recv.recvfrom(self.bufsize)
data_obj = pickle.loads(data)
if data_obj.packet_type == 0:
self.logger.info("Received EOT packet with sequence number {}".format(data_obj.sequence_number))
#self.timer.cancel()
if len(packet_list) > 0:
time.sleep(self.rate)
self.logger.info("Packet list not empty, trying retransmit of packets: {}...".format(packet_list))
self.transmit(packet_list, force=True)
continue
self.sequence_num = data_obj.sequence_number+1
break
# We don't want any packet that isn't of type ACK (2) or EOT
if data_obj.packet_type != 2:
continue
# Continue if the packet we receive does not have an ACK number
try:
packet_list.remove(data_obj.ack_num)
except ValueError as e:
continue
self.logger.info("Received type {} packet with sequence number {} from server: {}".format(data_obj.packet_type,
data_obj.sequence_number,
data_obj.payload))
self.logger.info("removing sequence number {} from packet_list, packets still remaining: {}".format(data_obj.ack_num, packet_list))
self.socket_send.socket_send.close()
self.socket_recv.socket_recv.close()
def transmit(self, packet_list=[], force=False):
if force == True:
for x in packet_list:
# Create data packet and add to the packet_list
packet = PWrapper.Packet(1, x, "a", len(packet_list)+1)
#packet_list.append(self.sequence_num)
# Send packet to the sending socket, serialize the packet object with pickle
self.socket_send.socket_send.send(pickle.dumps(packet))
else:
x = 0
while x < int(self.packets) - 1:
# Create data packet and add to the packet_list
packet = PWrapper.Packet(1, self.sequence_num, "a", int(self.packets))
packet_list.append(self.sequence_num)
# Send packet to the sending socket, serialize the packet object with pickle
self.socket_send.socket_send.send(pickle.dumps(packet))
# Increment the counter and sequence number
x += 1
self.sequence_num += 1
# append the EOT packet to the end, increment the sequence number, send it to the channel
time.sleep(self.rate)
packet = PWrapper.Packet(0, self.sequence_num, "", int(self.packets))
packet_list.append(self.sequence_num)
self.sequence_num += 1
self.socket_send.socket_send.send(pickle.dumps(packet))
# self.timer = Timer(3, self.timeout(packet_list))
# self.timer.start()
self.logger.info(packet_list)
return packet_list
def timeout(self, packet_list):
print("Timed out while waiting for packets...")
self.transmit(packet_list)
if __name__ == "__main__":
# Initialize the Server Object
packets = sys.argv[1]
rate = float(sys.argv[2])
# Call send(), handle errors and close socket if exception
try:
run = Transmitter()
run.packets = packets
run.rate = float(rate)
run.main()
except KeyboardInterrupt as e:
run.socket_send.socket_send.close()
run.socket_recv.socket_recv.close()
exit()
|
[
"tryder@hotheadgames.com"
] |
tryder@hotheadgames.com
|
e73a7666e80d8ddc141304fb5c0093c649e0046d
|
71a2563d2587d215b0b29051ed770b8120ca1030
|
/2015_8_27_testAutomation_pexpect_xml_exception_novation/ssh_connect.py
|
dd22d976ae0a57606496acb0b6914582ef21d432
|
[] |
no_license
|
liam-Dean/TestAutomation
|
7ed30d742333946d131b71e012b7b89291b32cee
|
5eff85e2f45170114d327a1cefe020e2d6dc304f
|
refs/heads/master
| 2021-01-24T01:00:09.061704
| 2015-09-01T01:10:04
| 2015-09-01T01:10:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'ezhicdi'
import paramiko,sys,traceback
from paramiko.py3compat import input
try:
import ssh_interactive
except ImportError:
from . import ssh_interactive
class ssh:
def __init__(self,hostname,username,password,port=22):
self.hostname=hostname
self.username=username
self.password=password
self.port=port
self.client = ''
self.chan = ''
def connect(self):
# now, connect and use paramiko Client to negotiate SSH2 across the connection
try:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print('***Connecting***')
except Exception as e:
print('*** Caught exception: %s: %s' % (e.__class__, e))
traceback.print_exc()
try:
self.client.close()
except:
pass
sys.exit(1)
def execute_command(self,commands):
for command in commands:
stdin, stdout, stderr = self.client.exec_command(command)
print stdout.readlines()
def interactive_shell(self):
self.chan = self.client.invoke_shell()
print('*** interactive!\n')
interactive.interactive_shell(self.chan)
def disconnect(self):
self.chan.close()
self.client.close()
print "***disconnect!***"
|
[
"jason-dean@outlook.com"
] |
jason-dean@outlook.com
|
a09878bdc8bdf8a2faffc52c9e75c10d1d861837
|
c9ee5e7dd913c498b739732460d4aedd054034a3
|
/predict/attention/predict.py
|
aca54cab30a7328808a28b8ad976b2e8fc0c7fa5
|
[] |
no_license
|
patrickbryant1/xprize_covid
|
73a307369aa837e79485ba8c93088fcc578ab523
|
4aa28a63687145a60ce2339548ba7d40fbe35655
|
refs/heads/main
| 2023-08-25T21:30:41.779311
| 2021-11-09T10:59:16
| 2021-11-09T10:59:16
| 314,192,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,499
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
import numpy as np
import os
import sys
import glob
from tensorflow.keras.models import model_from_json
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
import tensorflow.keras.models as M
import pdb
def load_model():
'''Load the model
'''
intercepts = []
coefs = []
#Fetch model weights
all_weights = glob.glob('./model/fold*')
#Add weights to model
all_models = []
for weights in all_weights:
#Load model
json_file = open('./model/model.json', 'r')
model_json = json_file.read()
model = model_from_json(model_json)
model.load_weights(weights)
all_models.append(model)
return all_models
def predict(start_date, end_date, path_to_ips_file, output_file_path):
"""
Will be called like:
python predict.py -s start_date -e end_date -ip path_to_ip_file -o path_to_output_file
Generates and saves a file with daily new cases predictions for the given countries, regions and intervention
plans, between start_date and end_date, included.
:param start_date: day from which to start making predictions, as a string, format YYYY-MM-DDD
:param end_date: day on which to stop making predictions, as a string, format YYYY-MM-DDD
:param path_to_ips_file: path to a csv file containing the intervention plans between inception date (Jan 1 2020)
and end_date, for the countries and regions for which a prediction is needed
:param output_file_path: path to file to save the predictions to
:return: Nothing. Saves the generated predictions to an output_file_path CSV file
with columns "CountryName,RegionName,Date,PredictedDailyNewCases"
"""
# !!! YOUR CODE HERE !!!
ID_COLS = ['CountryName',
'RegionName',
'GeoID',
'Date']
NPI_COLS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing',
'H6_Facial Coverings']
#1. Select the wanted dates from the ips file
start_date = pd.to_datetime(start_date, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date, format='%Y-%m-%d')
# Load historical intervention plans, since inception
hist_ips_df = pd.read_csv(path_to_ips_file,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str},
error_bad_lines=True)
# Add GeoID column that combines CountryName and RegionName for easier manipulation of data",
#hist_ips_df['RegionName'] = hist_ips_df['RegionName'].fillna(0)
hist_ips_df['GeoID'] = hist_ips_df['CountryName'] + '__' + hist_ips_df['RegionName'].astype(str)
# Fill any missing NPIs by assuming they are the same as previous day
for npi_col in NPI_COLS:
hist_ips_df.update(hist_ips_df.groupby(['CountryName', 'RegionName'])[npi_col].ffill().fillna(0))
# Intervention plans to forecast for: those between start_date and end_date
ips_df = hist_ips_df[(hist_ips_df.Date >= start_date) & (hist_ips_df.Date <= end_date)]
ips_df.GeoID.unique()
#2. Load the model
all_models = load_model()
#3. Load the additional data
data_path = '../../data/adjusted_data.csv'
adjusted_data = pd.read_csv(data_path,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str,
"RegionCode": str,
"Country_index":int,
"Region_index":int},
error_bad_lines=False)
# Add RegionID column that combines CountryName and RegionName for easier manipulation of data
adjusted_data['RegionName'] = adjusted_data['RegionName'].replace('0', np.nan)
adjusted_data['GeoID'] = adjusted_data['CountryName'] + '__' + adjusted_data['RegionName'].astype(str)
adjusted_data = adjusted_data.fillna(0)
#4. Run the predictor
additional_features = ['smoothed_cases',
'cumulative_smoothed_cases',
'rescaled_cases',
'cumulative_rescaled_cases',
'rescaled_cases_daily_change',
'smoothed_cases_daily_change',
'monthly_temperature',
'retail_and_recreation',
'grocery_and_pharmacy',
'parks',
'transit_stations',
'workplaces',
'residential',
'gross_net_income',
'population_density',
'pdi', 'idv', 'mas', 'uai', 'ltowvs', 'ivr']
#NB_LOOKBACK_DAYS=21
# Make predictions for each country,region pair
geo_pred_dfs = []
for g in ips_df.GeoID.unique():
print('Predicting for', g)
#Get intervention plan for g
ips_gdf = ips_df[ips_df.GeoID == g]
# Pull out all relevant data for g
adjusted_data_gdf = adjusted_data[adjusted_data.GeoID == g]
#Check the timelag to the last known date
last_known_date = adjusted_data_gdf.Date.max()
#It may be that the start date is much ahead of the last known date, where input will have to be predicted
# Start predicting from start_date, unless there's a gap since last known date
current_date = min(last_known_date + np.timedelta64(1, 'D'), start_date)
#Select everything from df up tp current date
adjusted_data_gdf = adjusted_data_gdf[adjusted_data_gdf['Date']<current_date]
adjusted_data_gdf = adjusted_data_gdf.reset_index()
#Check if enough data to predict
if len(adjusted_data_gdf)<21:
print('Not enough data for',g)
pdb.set_trace()
continue
#Get population
population = adjusted_data_gdf['population'].values[0]
#Normalize the cases by 100'000 population - remember to scale back for predictions as well
adjusted_data_gdf['rescaled_cases']=adjusted_data_gdf['rescaled_cases']/(population/100000)
adjusted_data_gdf['cumulative_rescaled_cases']=adjusted_data_gdf['cumulative_rescaled_cases']/(population/100000)
adjusted_data_gdf['smoothed_cases']=adjusted_data_gdf['smoothed_cases']/(population/100000)
adjusted_data_gdf['cumulative_smoothed_cases']=adjusted_data_gdf['cumulative_smoothed_cases']/(population/100000)
#Add daily change
adjusted_data_gdf['rescaled_cases_daily_change']=np.append(np.zeros(1),np.array(adjusted_data_gdf['rescaled_cases'])[1:]-np.array(adjusted_data_gdf['rescaled_cases'])[:-1])
adjusted_data_gdf['smoothed_cases_daily_change']=np.append(np.zeros(1),np.array(adjusted_data_gdf['smoothed_cases'])[1:]-np.array(adjusted_data_gdf['smoothed_cases'])[:-1])
#Get historical NPIs
historical_npis_g = np.array(adjusted_data_gdf[NPI_COLS])
#Get other daily features
adjusted_additional_g = np.array(adjusted_data_gdf[additional_features])
#Get future NPIs
future_npis = np.array(ips_gdf[NPI_COLS])
# Prepare data
#Additional features
X_additional = adjusted_additional_g
#Get NPIS
X_npis = historical_npis_g
# Make prediction for each requested day
geo_preds = []
days_ahead = 0
while current_date <= end_date:
X = np.concatenate([X_additional,X_npis],axis=1)
# Make the prediction
pred = []
for m in all_models:
pred.append(m.predict(np.array([X[-7:,:]]))[0])
pred = np.array(pred)
# Do not allow predicting negative cases
pred[pred<0]=0
#Do not allow predicting more cases than 20 % of population at a given day
pred[pred>(0.2*population/100000)]=0.2*population/100000
std_pred = np.std(pred,axis=0)
pred = np.average(pred,axis=0)
# Add if it's a requested date
if current_date+ np.timedelta64(21, 'D') >= start_date:
#Append the predicted dates
days_for_pred = current_date+ np.timedelta64(21, 'D')-start_date
geo_preds.extend(pred[-days_for_pred.days:])
#print(current_date.strftime('%Y-%m-%d'), pred)
else:
print(current_date.strftime('%Y-%m-%d'), pred, "- Skipped (intermediate missing daily cases)")
# Append the prediction and npi's for the next x predicted days
# in order to rollout predictions for further days.
future_additional = np.repeat(np.array([adjusted_additional_g[-1,:]]),len(pred),axis=0)
future_additional[:,0]=pred #add predicted cases
future_additional[:,1]=np.cumsum(pred) #add predicted cumulative cases
#!!!!!!!!!!!!!!!
#Look up monthly temperature for predicted dates: 'monthly_temperature'
#!!!!!!!!!!!!!!!
adjusted_additional_g = np.append(adjusted_additional_g, future_additional,axis=0)
historical_npis_g = np.append(historical_npis_g, future_npis[days_ahead:days_ahead + 21], axis=0)
# Move to next period
current_date = current_date + np.timedelta64(21, 'D')
days_ahead += 21
# Create geo_pred_df with pred column
geo_pred_df = ips_gdf[ID_COLS].copy()
geo_pred_df['PredictedDailyNewCases'] = np.array(geo_preds[:len(geo_pred_df)]) #*(population/100000)
#Check
adjusted_data_gdf = adjusted_data[adjusted_data.GeoID == g]
adjusted_data_gdf['smoothed_cases']=adjusted_data_gdf['smoothed_cases'] #*(population/100000)
geo_pred_df = pd.merge(geo_pred_df,adjusted_data_gdf[['Date','smoothed_cases']],on='Date',how='left')
geo_pred_df['population']=population
#Save
geo_pred_dfs.append(geo_pred_df)
#4. Obtain output
# Combine all predictions into a single dataframe - remember to only select the requied columns later
pred_df = pd.concat(geo_pred_dfs)
# Save to a csv file
#All
pred_df.to_csv('all_'+output_file_path, index=False)
#Only the required columns
pred_df.drop(columns={'GeoID','smoothed_cases','population'}).to_csv(output_file_path, index=False)
print("Saved predictions to", output_file_path)
return None
# !!! PLEASE DO NOT EDIT. THIS IS THE OFFICIAL COMPETITION API !!!
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date",
dest="start_date",
type=str,
required=True,
help="Start date from which to predict, included, as YYYY-MM-DD. For example 2020-08-01")
parser.add_argument("-e", "--end_date",
dest="end_date",
type=str,
required=True,
help="End date for the last prediction, included, as YYYY-MM-DD. For example 2020-08-31")
parser.add_argument("-ip", "--interventions_plan",
dest="ip_file",
type=str,
required=True,
help="The path to an intervention plan .csv file")
parser.add_argument("-o", "--output_file",
dest="output_file",
type=str,
required=True,
help="The path to the CSV file where predictions should be written")
args = parser.parse_args()
print("Generating predictions from", args.start_date, "to", args.end_date,"...")
predict(args.start_date, args.end_date, args.ip_file, args.output_file)
print("Done!")
|
[
"patrick.bryant@live.com"
] |
patrick.bryant@live.com
|
718483a85d7529f7919f5929300066f497654161
|
ab690adfd91fae5fa7d37e8b4232b8eefdced156
|
/python/bin/wheel
|
ca3c988afd29611923c5f414e40199d29709de56
|
[] |
no_license
|
eitikimura/structured-streaming
|
c4fab990f35cf65e9ca72576004c65cec8868c3c
|
80c2d593ca53daf9e2d23f7d37b3fd8e15e8b21c
|
refs/heads/master
| 2020-05-18T20:29:04.152676
| 2019-05-02T19:01:38
| 2019-05-02T19:01:38
| 184,633,860
| 9
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
#!/Users/eiti/git-repository/structured-streaming/python/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"eiti.kimura@movile.com"
] |
eiti.kimura@movile.com
|
|
8863ef368a9a92f74c2c1e4f235d9242e2c2a107
|
feff8828195b5855025cabf650862b4c6f9ff6df
|
/RootEBDS/apps/cfg/serializers.py
|
5df41d87f89aeb963c27a4869983e118f346fc20
|
[] |
no_license
|
2511zzZ/EBDS-server
|
99d42f11215b65e2906faf9d2f4aad8f3c46c998
|
c708bb6e7babf86f8101cce1bd18c5b3eae3779e
|
refs/heads/master
| 2021-07-06T08:46:01.799333
| 2020-03-02T12:34:16
| 2020-03-02T12:34:16
| 244,362,822
| 0
| 0
| null | 2021-06-10T22:37:33
| 2020-03-02T12:14:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,790
|
py
|
from collections import OrderedDict
from rest_framework import serializers
from .models import CfgBaseInquiry, CfgUserInquiry
from .models import CfgAlertCondition, CfgAlertTransfer
class BaseInquirySerializer(serializers.ModelSerializer):
mode = serializers.IntegerField(min_value=1)
class Meta:
model = CfgBaseInquiry
fields = "__all__"
class UserInquirySerializer(serializers.ModelSerializer):
scope = {
"value_scope": [0, 60],
"status_scope": [0, 1]
}
class Meta:
model = CfgUserInquiry
fields = ("cfg", "status", "value")
read_only_fields = ["cfg", "mode"]
def __switch_cfg_mode__(self, response, cfg):
"""
根据cfg选择提供设置方式
:param response:
:param cfg: BaseInquiry对象
:return: 筛选后的response
"""
# 1只能设置value, 2只能设置status
if cfg.mode == 1:
response.pop("status")
if cfg.mode == 2:
response.pop("value")
return response
def __get_pretty_response__(self, response):
"""
通过字典的形式嵌套原有的response, 以提供带有详细描述的接口数据
:param response:
:param cfg:
:return:
"""
for key in response.keys():
if key != "cfg":
response[key] = {"value": response[key],
"scope": self.scope[key + "_scope"]}
return response
def to_representation(self, instance):
print(instance.status)
response = self.__switch_cfg_mode__(super().to_representation(instance), instance.cfg)
pretty_response = self.__get_pretty_response__(response)
return pretty_response
def validate_status(self, status):
if self.instance is None:
raise serializers.ValidationError("应该先添加用户设置")
if self.instance.cfg.mode != 2:
raise serializers.ValidationError(self.instance.cfg.name + "不支持开关")
return status
def validate_value(self, value):
if self.instance is None:
raise serializers.ValidationError("应该先添加用户设置")
cur_instance_cfg = self.instance.cfg
if cur_instance_cfg.id == 1:
if value <= 0:
raise serializers.ValidationError(
cur_instance_cfg.name + ":不能小于或等于0")
if value > 60:
raise serializers.ValidationError(
cur_instance_cfg.name + ":不能大于60")
else:
raise serializers.ValidationError("当前mode不支持value")
return value
class CfgAlertConditionSerializer(serializers.ModelSerializer):
duration_scope = [0, 60]
percent_scope = [0.00, 1.00]
class Meta:
model = CfgAlertCondition
fields = ('duration', 'percent')
def to_representation(self, instance):
response = super().to_representation(instance)
response["duration"] = {"value": response["duration"],
"scope": [self.duration_scope[0], self.duration_scope[1]]}
response["percent"] = {"value": float(response["percent"]),
"scope": [self.percent_scope[0], self.percent_scope[1]]}
return response
def validate_duration(self, duration):
"""范围1-60(单位:minutes)"""
if duration > self.duration_scope[1] or duration < self.duration_scope[0]:
raise serializers.ValidationError(f"范围需要在{self.duration_scope[0]}-"
f"{self.duration_scope[1]}之间")
return duration
def validate_percent(self, percent):
"""范围0-1的小数"""
if percent > self.percent_scope[1] or percent < self.percent_scope[0]:
raise serializers.ValidationError(f"范围需要在{self.percent_scope[0]*100}%-"
f"{self.percent_scope[1]*100}%之间")
return percent
class CfgAlertTransferSerializer(serializers.ModelSerializer):
timeout_scope = [1, 60]
max_timeout_scope = [3, 1440]
class Meta:
model = CfgAlertTransfer
fields = ("timeout", "max_timeout")
def to_representation(self, instance):
response = super().to_representation(instance)
response["timeout"] = {"value": response["timeout"],
"scope": [self.timeout_scope[0], self.timeout_scope[1]]}
response["max_timeout"] = {"value": response["max_timeout"],
"scope": [self.max_timeout_scope[0],
self.max_timeout_scope[1]]}
return response
def validate_timeout(self, timeout):
"""范围1-60(单位:minutes)"""
if timeout > self.timeout_scope[1] or timeout < self.timeout_scope[0]:
raise serializers.ValidationError(f"范围需要在{self.timeout_scope[0]}-"
f"{self.timeout_scope[1]}之间")
return timeout
def validate_max_timeout(self, max_timeout):
"""范围3*timeout-1440(单位:minutes)"""
post_kv = self.context['request'].POST
if "timeout" in post_kv: # 如果用户post两个值的处理
max_timeout_scope_min = 3 * int(post_kv["timeout"])
else:
max_timeout_scope_min = 3 * self.instance.timeout
if max_timeout > self.max_timeout_scope[1] or max_timeout < max_timeout_scope_min:
raise serializers.ValidationError(f"范围需要在3倍的timeout-"
f"{self.max_timeout_scope[1]}之间")
return max_timeout
|
[
"2511672322@qq.com"
] |
2511672322@qq.com
|
9cdb5138986510649b7ecfba68261e7c1d8a0844
|
28dbd700cbf4845502d1f4da99d176f97bd6db2a
|
/popstr/popstr.py
|
30ae259a437cb7c08265bf8b8cf131de85e69733
|
[] |
no_license
|
fluhus/kwas
|
eef32d1d3e317d3a3312adb4b04891c4b09fc80a
|
88cf4e37744bb7d11fb07b128e527784e5f06bd8
|
refs/heads/main
| 2023-08-04T14:44:33.323157
| 2023-07-23T10:35:16
| 2023-07-23T10:35:16
| 542,435,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,494
|
py
|
import json
import os
from argparse import ArgumentParser
from collections import defaultdict
from ctypes import CDLL, CFUNCTYPE, POINTER, c_char_p, c_int64, c_uint8
from os.path import join
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from numpy.linalg import norm
from sklearn.decomposition import PCA
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
plt.style.use('ggplot')
# TODO(amit): Put main code in a function.
def load_has_raw():
"""Calls the low-level code for loading the HAS matrix.
Returns a numpy array of 1/0 and the kmers as strings."""
allocfunc = CFUNCTYPE(None, c_int64, c_int64, c_int64)
puint8 = POINTER(c_uint8)
pstr = POINTER(c_char_p)
load = CDLL(libfile).cLoadMatrix
load.argtypes = [c_char_p, allocfunc, POINTER(puint8), POINTER(pstr)]
buf: np.ndarray = None
pbuf = (1 * puint8)(puint8())
pkmers = (1 * pstr)(pstr())
nkmers = 0
@allocfunc
def alloc(nvals, nk, k):
"""Allocates buffers for the matrix data."""
nonlocal buf, pbuf, pkmers, nkmers
buf = np.zeros(nvals, dtype='uint8')
pbuf[0] = buf.ctypes.data_as(puint8)
strs = [('\0' * (k + 1)).encode() for _ in range(nk)]
pkmers[0] = (nk * c_char_p)(*strs)
nkmers = nk
load(infile.encode(), alloc, pbuf, pkmers)
kmers = [pkmers[0][i].decode() for i in range(nkmers)]
buf = buf.reshape([nkmers, len(buf) // nkmers])
return buf, kmers
def load_has():
"""Returns a dataframe with HAS data."""
buf, kmers = load_has_raw()
df = pd.DataFrame(buf)
df.index = kmers
return df.transpose()
def try_setproctitle():
"""Sets the process name if the setproctitle library is available."""
try:
from setproctitle import setproctitle
except ModuleNotFoundError:
return
setproctitle('popstr')
def random_rows_cols(a, r, c):
"""Returns indexes of random selection of r rows and c columns."""
rows = subset(a.shape[0], r)
cols = subset(a.shape[1], c)
return rows, cols
def shuffle_rows_cols(mm):
"""Returns mm with its rows and columns shuffled."""
rows, cols = random_rows_cols(mm, mm.shape[0], mm.shape[1])
return mm[rows][:, cols]
def mini_pca(a, rows, cols, b=None, n=2):
"""Projects a (or b) of a PCA space created with the given row and columns
indexes."""
if b is None:
b = a
return StandardScaler().fit_transform(
PCA(n).fit(a[rows][:, cols]).transform(b[:, cols]))
def my_mds(x, n=2):
"""Runs MDS on the given matrix."""
return MDS(n).fit_transform(x, init=PCA(n).fit_transform(x))
def cosine(a, b):
"""Returns the cosine similarity between a and b."""
return np.dot(a, b) / (norm(a) * norm(b))
def my_mds_cosine(x, n=2):
"""Runs MDS using cosine dissimilarity."""
dists = np.array([[1 - cosine(a, b) for a in x] for b in x])
return MDS(n, dissimilarity='precomputed').fit_transform(
dists,
init=PCA(n).fit_transform(x),
)
def subset(a, b):
"""Returns a random subset of a of size b."""
return np.random.choice(a, b, replace=False)
def pca_distances(rows, cols, drows, n=2):
"""Returns an array of pairwise distances of elements in m after PCA."""
mini = mini_pca(m, rows, cols, b=m2, n=n)
mini = mini[drows]
d = np.array(
[norm(u - v) for i, u in enumerate(mini) for v in mini[i + 1:]])
d /= norm(d) * 2**0.5
return d
def groupby(items, key_func, value_func):
""""Groups items into lists by their key function."""
result = defaultdict(list)
for x in items:
result[key_func(x)].append(value_func(x))
return result
def create_final_pca(n):
"""Creates the final projection matrix for use in downstream analysis."""
print('Creating final PCA')
pca = PCA(n).fit(df.values)
comp = pca.components_
evr = pca.explained_variance_ratio_
print('Explained variance:', evr, 'Sum:', evr.sum())
del pca
fout = join(outdir, 'popstr.json')
print('Writing to ' + fout)
header = df.columns.tolist()
with open(fout, 'wt') as f:
for row in comp:
json.dump({h: v for h, v in zip(header, row)}, f)
f.write('\n')
evr_file = fout[:-5] + '.explnvrnc.json'
print('Writing explained variance to:', evr_file)
json.dump(evr.tolist(), open(evr_file, 'wt'))
def plot_subsample_projections(steps: int):
"""Plots PCA projections of subsamples of the matrix."""
ratios = [2**i for i in range(steps)]
ratios.reverse()
plt.figure(dpi=150, figsize=(15, 10))
for i, a in enumerate(ratios):
plt.subplot(231 + i)
randr, randc = random_rows_cols(m, rr // a, cc // a)
mini = mini_pca(m, randr, randc, b=m2)
plt.scatter(mini[:, 0], mini[:, 1], alpha=0.3)
plt.xlabel(f'{rr//a} samples, {cc//a} k-mers')
plt.subplot(232)
plt.title('Population Structure PCA for\nDifferent Data Subsamples')
plt.tight_layout()
plt.savefig(join(outdir, 'popstr_pca.png'))
plt.close()
def plot_distances_mds(steps: int, ss_samples=False):
"""Plots distance PCoA for subsamples of different sizes."""
plt.figure(dpi=150)
dists = []
rrows = subset(rr, rr)
groups = []
cur_rr, cur_cc = rr, cc
for _ in range(steps):
if ss_samples:
# Subsample samples & kmers:
# xo
# oo
blocks = [
[[0, cur_rr // 2], [cur_cc // 2, cur_cc]],
[[cur_rr // 2, cur_rr], [0, cur_cc // 2]],
[[cur_rr // 2, cur_rr], [cur_cc // 2, cur_cc]],
]
else:
# Subsample only kmers:
# xooo
# xooo
# xooo
# xooo
blocks = [[[0, cur_rr], [cur_cc * i // 4, cur_cc * (i + 1) // 4]]
for i in range(1, 4)]
for block in blocks:
rrange = np.arange(*block[0])
crange = np.arange(*block[1])
dists.append(pca_distances(rrange, crange, rrows, n=2))
groups.append(f'{cur_rr} samples, {cur_cc} k-mers')
cur_rr //= (2 if ss_samples else 1)
cur_cc //= (2 if ss_samples else 4)
dists = np.array(dists)
mds = my_mds_cosine(dists)
for k, v in groupby(zip(mds, groups), lambda x: x[1],
lambda x: x[0]).items():
arr = np.array(v)
plt.scatter(arr[:, 0], arr[:, 1], alpha=0.5, label=k)
plt.legend()
plt.title('PCoA of Distance Vectors for\nDifferent Subsample Sizes')
plt.tight_layout()
plt.savefig(join(outdir, 'popstr_mds.png'))
plt.close()
try_setproctitle()
parser = ArgumentParser()
parser.add_argument('-o', type=str, help='Output directory', default='.')
parser.add_argument('-i', type=str, help='Input HAS file', required=True)
parser.add_argument('-s', type=str, help='Hasmat library file', required=True)
args = parser.parse_args()
infile = args.i
outdir = args.o
libfile = args.s
os.makedirs(outdir, exist_ok=True)
print('Loading data')
df = load_has()
m = df.values
nval = m.shape[0] * m.shape[1]
print(f'Shape: {m.shape} ({nval/2**20:.0f}m values)')
del nval
print('Shuffling')
m = shuffle_rows_cols(m)
rr, cc = m.shape
# Divide samples to 2, one half for calculating PCA and one for testing.
m2 = m[rr // 2:]
m = m[:rr // 2]
rr, cc = m.shape
plot_subsample_projections(6)
plot_distances_mds(5, False)
create_final_pca(10)
|
[
"amit.lavon@weizmann.ac.il"
] |
amit.lavon@weizmann.ac.il
|
b5edb2c0273e7124ef114e4082b10abf6aafb93c
|
da732353198b48b421de3beb08b425380f881e2c
|
/bin/jp.py
|
2b74941fa568b9330c0547010e8f60f4504a717d
|
[] |
no_license
|
Oakafee/Loverslist
|
278b2982b54a4f90741cee8ae1fb29a57f8e15e5
|
49bc86091265a80def79e39764ded180ed158e5d
|
refs/heads/master
| 2020-12-02T22:37:13.604661
| 2017-07-03T22:57:05
| 2017-07-03T22:57:05
| 96,155,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
#!/Users/Oakafee/loverslist/bin/python
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
|
[
"jefrttt@gmail.com"
] |
jefrttt@gmail.com
|
9fc449edf72fcda74552315b31a10280c86b6eaa
|
f4251487d8de4db09e166e7b6840aec105cb363e
|
/bbf.py
|
f18f747d6eac321f43671179e76569ae596c5353
|
[
"MIT"
] |
permissive
|
Whitie/brainfuck-stuff
|
421fc5aafd2b3f363921c386fcd239b07d676bbd
|
263b6582fde107af5365d280da510974f3af415c
|
refs/heads/master
| 2021-06-24T16:28:34.724604
| 2020-12-06T15:50:34
| 2020-12-06T15:50:34
| 180,211,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,786
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
from collections import deque
from functools import partial
from struct import pack, unpack
DATASIZE = 30000
MAX_NESTING_LEVELS = 100
BF_COMMANDS = {
'MOVE_RIGHT': '>',
'MOVE_LEFT': '<',
'INCREMENT': '+',
'DECREMENT': '-',
'WRITE_CHAR': '.',
'READ_CHAR': ',',
'LOOP_START': '[',
'LOOP_END': ']',
}
_OPCODES = {
'MOVE': 0x01,
'INCREMENT': 0x03,
'WRITE_CHAR': 0x10,
'READ_CHAR': 0x11,
'LOOP_START': 0x20,
'LOOP_END': 0x22,
'SET_NULL': 0x30,
'BF_DEBUG': 0xdb,
}
# Helper functions (only for optimization)
def balance(match, inc, dec):
s = match.group(0)
count_inc = s.count(inc)
count_dec = s.count(dec)
if count_inc > count_dec:
return (count_inc - count_dec) * inc
else:
return (count_dec - count_inc) * dec
def aggregate(match, sym):
return '{}{}'.format(sym, len(match.group(0)))
def optimize(code, commands):
cl = len(code)
print('Cleaned code length:', cl)
code = re.sub(
r'[{INCREMENT}{DECREMENT}]{{2,}}'.format(**commands),
partial(balance, inc=commands['INCREMENT'], dec=commands['DECREMENT']),
code
)
code = re.sub(
r'[{MOVE_LEFT}{MOVE_RIGHT}]{{2,}}'.format(**commands),
partial(balance, inc=commands['MOVE_RIGHT'],
dec=commands['MOVE_LEFT']),
code
)
code = re.sub(
r'\{LOOP_START}(\{INCREMENT}|\{DECREMENT})\{LOOP_END}'.format(
**commands
),
'%', code
)
code = re.sub(r'^\{LOOP_START}.+?\{LOOP_END}'.format(**commands), '',
code, flags=re.DOTALL)
lookup = {
'I': commands['INCREMENT'],
'D': commands['DECREMENT'],
'R': commands['MOVE_RIGHT'],
'L': commands['MOVE_LEFT'],
}
for sym, instruction in lookup.items():
sym_re = r'{}{{2,}}'.format(re.escape(instruction))
code = re.sub(sym_re, partial(aggregate, sym=sym), code)
ocl = len(code)
if cl <= ocl:
print('No optimization found')
else:
print('Optimized code length:', ocl)
return code
class BrainfuckInterpreter:
def __init__(self, filename, datasize=DATASIZE, debug='', **commands):
self.filename = filename
with open(filename) as fp:
code = fp.read()
self.compiled_filename = '{}.bfc'.format(os.path.splitext(filename)[0])
self.commands = BF_COMMANDS.copy()
self.commands['BF_DEBUG'] = debug
self.commands.update(commands)
self.cleaned_code = self._clean_code(code)
self.code = optimize(self.cleaned_code, self.commands)
self.data = bytearray(datasize)
self.ptr = 0
self.pc = 0
self.input_buffer = deque(maxlen=255)
self.ast = []
self._stack = []
self._load_compiled()
def _load_compiled(self):
if os.path.isfile(self.compiled_filename):
# check here if source is newer
with open(self.compiled_filename, 'rb') as fp:
data = fp.read(3)
while data:
self.ast.append((None, *unpack('=Bh', data)))
data = fp.read(3)
def _clean_code(self, raw_code):
print('Input length:', len(raw_code), file=sys.stderr)
tmp = []
for c in raw_code:
if c in self.commands.values():
tmp.append(c)
return ''.join(tmp)
def _get_num(self):
d = self.code[self.pc]
while True:
if self.code[self.pc + 1].isdigit():
self.pc += 1
d += self.code[self.pc]
else:
break
return int(d)
def _get_input(self):
try:
inp = self.input_buffer.popleft()
except IndexError:
inp = input('$ ')
if not inp:
inp = '\x00'
elif len(inp) > 1:
self.input_buffer.extend(inp[1:])
inp = inp[0]
return inp
def process(self):
print('Cleaned code:', self.cleaned_code, file=sys.stderr)
if len(self.code) < len(self.cleaned_code):
print('Optimized code:', self.code, file=sys.stderr)
loop = 0
while self.pc < len(self.code):
instruction = self.code[self.pc]
self.process_instruction(instruction, loop)
loop += 1
try:
with open(self.compiled_filename, 'wb') as fp:
for op in self.ast:
fp.write(pack('=Bh', op[1], op[2]))
except Exception as err:
print('Error while writing compiled file:', err, file=sys.stderr)
def process_instruction(self, instruction, loop):
if instruction == self.commands['MOVE_RIGHT']:
self.ast.append(('MOVE', _OPCODES['MOVE'], 1))
elif instruction == self.commands['MOVE_LEFT']:
self.ast.append(('MOVE', _OPCODES['MOVE'], -1))
elif instruction == self.commands['INCREMENT']:
self.ast.append(('INCREMENT', _OPCODES['INCREMENT'], 1))
elif instruction == self.commands['DECREMENT']:
self.ast.append(('INCREMENT', _OPCODES['INCREMENT'], -1))
elif instruction == self.commands['WRITE_CHAR']:
self.ast.append(('WRITE_CHAR', _OPCODES['WRITE_CHAR'], 0))
elif instruction == self.commands['READ_CHAR']:
self.ast.append(('READ_CHAR', _OPCODES['READ_CHAR'], 0))
elif instruction == self.commands['LOOP_START']:
self._stack.append(loop)
self.ast.append(('LOOP_START_TMP', 0, 0))
elif instruction == self.commands['LOOP_END']:
start = self._stack.pop()
self.ast.append(('LOOP_END', _OPCODES['LOOP_END'], start))
self.ast[start] = ('LOOP_START', _OPCODES['LOOP_START'], loop)
elif instruction == self.commands['BF_DEBUG']:
self.ast.append(('BF_DEBUG', _OPCODES['BF_DEBUG'], 0))
# Special optimized instructions
elif instruction == '%':
self.ast.append(('SET_NULL', _OPCODES['SET_NULL'], 0))
elif instruction == 'I':
self.pc += 1
self.ast.append(('INCREMENT', _OPCODES['INCREMENT'],
self._get_num()))
elif instruction == 'D':
self.pc += 1
self.ast.append(('INCREMENT', _OPCODES['INCREMENT'],
-self._get_num()))
elif instruction == 'R':
self.pc += 1
self.ast.append(('MOVE', _OPCODES['MOVE'], self._get_num()))
elif instruction == 'L':
self.pc += 1
self.ast.append(('MOVE', _OPCODES['MOVE'], -self._get_num()))
else:
raise ValueError(f'Illegal instruction: {instruction}')
self.pc += 1
def _debug_print(self):
data = [
x for x in zip(range(100), self.data[:100],
map(chr, self.data[:100]))
if x[1]
]
print(
'PC: {pc} PTR: {ptr} VAL: {val} DATA (P, D, C): {data} '.format(
pc=self.pc, ptr=self.ptr, val=self.data[self.ptr], data=data
)
)
def run(self):
if not self.ast:
self.process()
print('Number of bytecode instructions:', len(self.ast))
self.pc = 0
start = time.monotonic()
while self.pc < len(self.ast):
name, op, arg = self.ast[self.pc]
if op == 0x01:
self.ptr += arg
elif op == 0x03:
self.data[self.ptr] = (self.data[self.ptr] + arg) & 0xff
elif op == 0x10:
print(chr(self.data[self.ptr]), end='')
elif op == 0x11:
self.data[self.ptr] = ord(self._get_input())
elif op == 0x20:
if self.data[self.ptr] == 0:
self.pc = arg
elif op == 0x22:
if self.data[self.ptr] != 0:
self.pc = arg
elif op == 0x30:
self.data[self.ptr] = 0
elif op == 0xdb:
self._debug_print()
else:
raise ValueError(f'Illegal instruction: {name}, OP: 0x{op:x}')
self.pc += 1
duration = time.monotonic() - start
print('Duration: {:.2f}ms'.format(duration * 1000))
def main(filename):
bf = BrainfuckInterpreter(filename, debug='')
bf.run()
if __name__ == '__main__':
try:
main(sys.argv[1])
except IndexError:
code = input('Enter code to execute: ')
bf = BrainfuckInterpreter(code, debug='?')
bf.process()
|
[
"weimann.th@yahoo.com"
] |
weimann.th@yahoo.com
|
d60337f09b30285efc27e1a3d719e9431cc0467f
|
18b9626926dbc8b115b5e3c764befc0803796394
|
/Python/238. Product of Array Except Self/producti.py
|
d3c84751d942939376b6ac7e134c6b3b49635e6e
|
[] |
no_license
|
brownesc/LeetCode
|
07a194798bfff28ae004904db0274d18d7a010c6
|
45e707993c89f124739cedd3c7cfcfca4787f6c3
|
refs/heads/master
| 2022-12-06T07:27:38.644677
| 2020-08-17T05:25:22
| 2020-08-17T05:25:22
| 276,159,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
# https://leetcode.com/problems/product-of-array-except-self/
"""
Given an array nums of n integers where n > 1, return an array output such that output[i] is equal to the product of all the
elements of nums except nums[i].
Example:
Input: [1,2,3,4]
Output: [24,12,8,6]
Constraint: It's guaranteed that the product of the elements of any prefix or suffix of the array (including the whole array)
fits in a 32 bit integer.
Note: Please solve it without division and in O(n).
Follow up:
Could you solve it with constant space complexity? (The output array does not count as extra space for the purpose of space
complexity analysis.)
"""
def productExceptSelf(nums):
left_product = []
right_product = []
product = 1
for num in nums:
product*=num
left_product.append(product)
#Reset for right side
product = 1
for i in range(len(nums)-1,-1,-1):
product*=nums[i]
right_product.append(product)
#fix the reversal
right_product = right_product[::-1]
for i in range(len(nums)):
if i==0:
nums[i]=right_product[i+1]
elif i==len(nums)-1:
nums[i]=left_product[i-1]
else:
nums[i]= left_product[i-1]*right_product[i+1]
return nums
print(productExceptSelf([1,2,3]))
|
[
"andresuncle@gmail.com"
] |
andresuncle@gmail.com
|
8d3b38199917e2fa33dcbebde716821d996887f7
|
1bef94923ef98ee8c4c745477edd5031fcc43eed
|
/Server/app/views/daughter/auth.py
|
be3813ab47bcc2372580b29f4519c0e04ac578fc
|
[
"MIT"
] |
permissive
|
Moms-Day/Moms-Day_Backend
|
3bbc719ebcac0eec9b7a6c42ebcf9dd5a8f99e42
|
0359f91cf04dfdb653deeeb7eb2ba0676e9a3feb
|
refs/heads/master
| 2021-07-16T20:17:55.814611
| 2018-11-17T13:16:30
| 2018-11-17T13:16:30
| 134,725,800
| 4
| 1
| null | 2018-06-04T14:20:25
| 2018-05-24T14:19:11
| null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
from flask import Blueprint, request, abort
from flask_restful import Api
from flask_jwt_extended import create_access_token, create_refresh_token
from werkzeug.security import check_password_hash
from flasgger import swag_from
from app.views import BaseResource, json_required
from app.models.account import DaughterModel
from app.docs.daughter.auth import DAUGHTER_AUTH_POST
api = Api(Blueprint(__name__, __name__))
api.prefix = '/daughter'
@api.resource('/auth')
class DaughterAuth(BaseResource):
@swag_from(DAUGHTER_AUTH_POST)
@json_required({'id': str, 'pw': str})
def post(self):
user = DaughterModel.objects(id=request.json['id']).first()
return ({
'accessToken': create_access_token(user.id),
'refreshToken': create_refresh_token(user.id)
}, 200) if user and check_password_hash(user.pw, request.json['pw']) else abort(401)
|
[
"jerion7474@gmail.com"
] |
jerion7474@gmail.com
|
8e00318e601ae0ea996f2780621cf57635d2b94c
|
c81d7dfef424b088bf2509a1baf406a80384ea5a
|
/venv/Lib/site-packages/twilio/rest/preview/hosted_numbers/authorization_document.py
|
610059b75ec45154d8396888722fdb1b0162d8bb
|
[] |
no_license
|
Goutham2591/OMK_PART2
|
111210d78fc4845481ed55c852b8f2f938918f4a
|
cb54fb21ebf472bffc6ee4f634bf1e68303e113d
|
refs/heads/master
| 2022-12-10T01:43:08.213010
| 2018-04-05T02:09:41
| 2018-04-05T02:09:41
| 124,828,094
| 0
| 1
| null | 2022-12-07T23:43:03
| 2018-03-12T03:20:14
|
Python
|
UTF-8
|
Python
| false
| false
| 17,273
|
py
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AuthorizationDocumentList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the AuthorizationDocumentList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
"""
super(AuthorizationDocumentList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/AuthorizationDocuments'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams AuthorizationDocumentInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'])
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists AuthorizationDocumentInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance]
"""
return list(self.stream(limit=limit, page_size=page_size))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AuthorizationDocumentInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return AuthorizationDocumentPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AuthorizationDocumentInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AuthorizationDocumentPage(self._version, response, self._solution)
def create(self, hosted_number_order_sids, address_sid, email,
cc_emails=values.unset):
"""
Create a new AuthorizationDocumentInstance
:param unicode hosted_number_order_sids: A list of HostedNumberOrder sids.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param unicode cc_emails: A list of emails.
:returns: Newly created AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
data = values.of({
'HostedNumberOrderSids': serialize.map(hosted_number_order_sids, lambda e: e),
'AddressSid': address_sid,
'Email': email,
'CcEmails': serialize.map(cc_emails, lambda e: e),
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return AuthorizationDocumentInstance(self._version, payload)
def get(self, sid):
"""
Constructs a AuthorizationDocumentContext
:param sid: AuthorizationDocument sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
return AuthorizationDocumentContext(self._version, sid=sid)
def __call__(self, sid):
"""
Constructs a AuthorizationDocumentContext
:param sid: AuthorizationDocument sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
return AuthorizationDocumentContext(self._version, sid=sid)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentList>'
class AuthorizationDocumentPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the AuthorizationDocumentPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
"""
super(AuthorizationDocumentPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AuthorizationDocumentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
return AuthorizationDocumentInstance(self._version, payload)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentPage>'
class AuthorizationDocumentContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, sid):
"""
Initialize the AuthorizationDocumentContext
:param Version version: Version that contains the resource
:param sid: AuthorizationDocument sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
super(AuthorizationDocumentContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid}
self._uri = '/AuthorizationDocuments/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a AuthorizationDocumentInstance
:returns: Fetched AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return AuthorizationDocumentInstance(self._version, payload, sid=self._solution['sid'])
def update(self, hosted_number_order_sids=values.unset,
address_sid=values.unset, email=values.unset, cc_emails=values.unset,
status=values.unset):
"""
Update the AuthorizationDocumentInstance
:param unicode hosted_number_order_sids: A list of HostedNumberOrder sids.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param unicode cc_emails: A list of emails.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:returns: Updated AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
data = values.of({
'HostedNumberOrderSids': serialize.map(hosted_number_order_sids, lambda e: e),
'AddressSid': address_sid,
'Email': email,
'CcEmails': serialize.map(cc_emails, lambda e: e),
'Status': status,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return AuthorizationDocumentInstance(self._version, payload, sid=self._solution['sid'])
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentContext {}>'.format(context)
class AuthorizationDocumentInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
class Status(object):
OPENED = "opened"
SIGNING = "signing"
SIGNED = "signed"
CANCELED = "canceled"
FAILED = "failed"
def __init__(self, version, payload, sid=None):
"""
Initialize the AuthorizationDocumentInstance
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
super(AuthorizationDocumentInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'address_sid': payload['address_sid'],
'status': payload['status'],
'email': payload['email'],
'cc_emails': payload['cc_emails'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'url': payload['url'],
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid']}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AuthorizationDocumentContext for this AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
if self._context is None:
self._context = AuthorizationDocumentContext(self._version, sid=self._solution['sid'])
return self._context
@property
def sid(self):
"""
:returns: AuthorizationDocument sid.
:rtype: unicode
"""
return self._properties['sid']
@property
def address_sid(self):
"""
:returns: Address sid.
:rtype: unicode
"""
return self._properties['address_sid']
@property
def status(self):
"""
:returns: The Status of this AuthorizationDocument.
:rtype: AuthorizationDocumentInstance.Status
"""
return self._properties['status']
@property
def email(self):
"""
:returns: Email.
:rtype: unicode
"""
return self._properties['email']
@property
def cc_emails(self):
"""
:returns: A list of emails.
:rtype: unicode
"""
return self._properties['cc_emails']
@property
def date_created(self):
"""
:returns: The date this AuthorizationDocument was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this AuthorizationDocument was updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a AuthorizationDocumentInstance
:returns: Fetched AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
return self._proxy.fetch()
def update(self, hosted_number_order_sids=values.unset,
address_sid=values.unset, email=values.unset, cc_emails=values.unset,
status=values.unset):
"""
Update the AuthorizationDocumentInstance
:param unicode hosted_number_order_sids: A list of HostedNumberOrder sids.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param unicode cc_emails: A list of emails.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:returns: Updated AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
return self._proxy.update(
hosted_number_order_sids=hosted_number_order_sids,
address_sid=address_sid,
email=email,
cc_emails=cc_emails,
status=status,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentInstance {}>'.format(context)
|
[
"amatar@unomaha.edu"
] |
amatar@unomaha.edu
|
63bd6a252dddc090640bc453862684e100056c04
|
6c790b128f5fb8870ddc861f785d1563fd539358
|
/test_roots.py
|
9adfde84a6dc28dcd72d8cadfe8ba7c9f9caa0bd
|
[] |
no_license
|
mpstewart1/cs107test
|
f5ffa116f003916b447a4f53f7b2c884f66b4f52
|
b4d255813310c2baef2aedc7b61132f008ab2704
|
refs/heads/main
| 2023-02-08T09:27:47.404439
| 2020-10-16T00:06:25
| 2020-10-16T00:06:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
import pytest
import roots
def test_quadroots_result():
assert roots.quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))
def test_quadroots_types():
with pytest.raises(TypeError):
roots.quad_roots("", "green", "hi")
def test_quadroots_zerocoeff():
with pytest.raises(ValueError):
roots.quad_roots(a=0.0)
|
[
"matthew_stewart@g.harvard.edu"
] |
matthew_stewart@g.harvard.edu
|
997793d9ae8c2b258868629e356cf63351e833ac
|
cacbb81f7142afb4fc01f5e6b9466916fb06cbb3
|
/auctions/migrations/0008_watchlistentry.py
|
63d98451829e51c336e1f753655b8575030e67a4
|
[] |
no_license
|
jcarlson212/Auction_Site
|
9d041671dadf41468733bf6115e0902a70719860
|
87a1d191a13da96c7d35abe76b2768a5a3f6e31d
|
refs/heads/master
| 2022-11-13T05:03:27.464301
| 2020-07-10T06:14:04
| 2020-07-10T06:14:04
| 277,413,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# Generated by Django 3.0.8 on 2020-07-09 02:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0007_auto_20200708_1910'),
]
operations = [
migrations.CreateModel(
name='WatchListEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('auction', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auctions.Auction')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"jcarlson212@gmail.com"
] |
jcarlson212@gmail.com
|
fac0bae74d32020423877b09f1ad467c8cead96d
|
d9d1d9e2e5ff535f559342d0aa7e9fbccc649406
|
/SConstruct
|
9991fb8271dd9f467ad39512d972523da7cfb1ba
|
[] |
no_license
|
lsst-dm/obs_ztf
|
37e4f0cc0610b3c1faa65011278706494f2036b3
|
001b4afbd407e4ea3404cef32263ace3e283e2cd
|
refs/heads/main
| 2023-07-21T20:04:22.816986
| 2023-07-10T00:56:34
| 2023-07-10T00:56:34
| 156,426,422
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
# -*- python -*-
from lsst.sconsUtils import scripts
scripts.BasicSConstruct("obs_ztf")
|
[
"rhl@astro.princeton.edu"
] |
rhl@astro.princeton.edu
|
|
73c67101bc92f65ebaf38d53505358c017ccf405
|
a697d58185671dae3c99aafbf9aba7da5df8a7d7
|
/env/lib/python3.7/site-packages/colour/volume/datasets/optimal_colour_stimuli.py
|
af6142f9bad2b3497a52526432eb557eeb256667
|
[] |
no_license
|
Dnathan33/Data-Visualization-Exploration
|
10ebe0363bb88d1935abac126ab3fae2bbff2f90
|
aedf76082297ca9d08f189707656b044a2ed83da
|
refs/heads/master
| 2022-12-23T09:35:09.239778
| 2020-09-17T00:12:30
| 2020-09-17T00:12:30
| 296,174,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,026
|
py
|
# -*- coding: utf-8 -*-
"""
Optimal Colour Stimuli
======================
Defines *MacAdam Optimal Colour Stimuli* for various illuminants in *CIE xyY*
colourspace.
The *Optimal Colour Stimuli* data is in the form of a *dict* of
*ndarray* as follows::
{'name': ndarray, ..., 'name': ndarray}
where each *ndarray* contains a collection of optimal colour stimulus *ndarray*
as follows::
[np.array(['x', 'y', 'Y']], ..., np.array(['x', 'y', 'Y']))
See Also
--------
`Optimal Colour Stimuli - MacAdam Limits Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/volume/macadam_limits.ipynb>`_
References
----------
- :cite:`MacAdam1935a` : MacAdam, D. L. (1935). Maximum Visual Efficiency of
Colored Materials. Journal of the Optical Society of America, 25(11),
361-367. doi:10.1364/JOSA.25.000361
- :cite:`Wikipedia2004a` : Wikipedia. (2004). Surfaces. Retrieved September
10, 2014, from http://en.wikipedia.org/wiki/Gamut#Surfaces
- :cite:`Wyszecki2000bb` : Wyszecki, G., & Stiles, W. S. (2000).
Table I(3.7). In Color Science: Concepts and Methods, Quantitative Data and
Formulae (pp. 776-777). Wiley. ISBN:978-0471399186
- :cite:`Wyszecki2000bh` : Wyszecki, G., & Stiles, W. S. (2000).
Table II(3.7). In Color Science: Concepts and Methods, Quantitative Data
and Formulae (pp. 778-779). Wiley. ISBN:978-0471399186
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.utilities import CaseInsensitiveMapping
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'A_OPTIMAL_COLOUR_STIMULI', 'C_OPTIMAL_COLOUR_STIMULI',
'D65_OPTIMAL_COLOUR_STIMULI', 'ILLUMINANTS_OPTIMAL_COLOUR_STIMULI'
]
A_OPTIMAL_COLOUR_STIMULI = np.array([
[0.1120, 0.1985, 10],
[0.0859, 0.2957, 10],
[0.0549, 0.4593, 10],
[0.0433, 0.5548, 10],
[0.0386, 0.6764, 10],
[0.0441, 0.7368, 10],
[0.0578, 0.7834, 10],
[0.0786, 0.8102, 10],
[0.1030, 0.8188, 10],
[0.1276, 0.8151, 10],
[0.1510, 0.8054, 10],
[0.7188, 0.2812, 10],
[0.7112, 0.2773, 10],
[0.6506, 0.2469, 10],
[0.6015, 0.2228, 10],
[0.5604, 0.2032, 10],
[0.5179, 0.1839, 10],
[0.4590, 0.1606, 10],
[0.4302, 0.1526, 10],
[0.3946, 0.1488, 10],
[0.3514, 0.1519, 10],
[0.2949, 0.1610, 10],
[0.2452, 0.1706, 10],
[0.2009, 0.1797, 10],
[0.1197, 0.3185, 20],
[0.0977, 0.4993, 20],
[0.0929, 0.6609, 20],
[0.1073, 0.7534, 20],
[0.1187, 0.7744, 20],
[0.1335, 0.7863, 20],
[0.1505, 0.7896, 20],
[0.1683, 0.7863, 20],
[0.2028, 0.7690, 20],
[0.3641, 0.6326, 20],
[0.4206, 0.5776, 20],
[0.7008, 0.2991, 20],
[0.6726, 0.2834, 20],
[0.6350, 0.2629, 20],
[0.6020, 0.2454, 20],
[0.5601, 0.2246, 20],
[0.5005, 0.2027, 20],
[0.4823, 0.2013, 20],
[0.4532, 0.2053, 20],
[0.4281, 0.2118, 20],
[0.3651, 0.2320, 20],
[0.3070, 0.2521, 20],
[0.2500, 0.2721, 20],
[0.1828, 0.2960, 20],
[0.1442, 0.3923, 30],
[0.1407, 0.4547, 30],
[0.1393, 0.4995, 30],
[0.1390, 0.5533, 30],
[0.1402, 0.6008, 30],
[0.1439, 0.6546, 30],
[0.1535, 0.7106, 30],
[0.1667, 0.7410, 30],
[0.1763, 0.7503, 30],
[0.2002, 0.7548, 30],
[0.2403, 0.7366, 30],
[0.6800, 0.3198, 30],
[0.6759, 0.3173, 30],
[0.6488, 0.3006, 30],
[0.6208, 0.2837, 30],
[0.5863, 0.2637, 30],
[0.5606, 0.2500, 30],
[0.5382, 0.2402, 30],
[0.5168, 0.2358, 30],
[0.4791, 0.2435, 30],
[0.4295, 0.2636, 30],
[0.3905, 0.2807, 30],
[0.3290, 0.3083, 30],
[0.2202, 0.3576, 30],
[0.1769, 0.4360, 40],
[0.1800, 0.5225, 40],
[0.1881, 0.6104, 40],
[0.1958, 0.6562, 40],
[0.2019, 0.6791, 40],
[0.2106, 0.6997, 40],
[0.2314, 0.7173, 40],
[0.2405, 0.7178, 40],
[0.2607, 0.7118, 40],
[0.3023, 0.6839, 40],
[0.5021, 0.4968, 40],
[0.6570, 0.3427, 40],
[0.2151, 0.4588, 50],
[0.2202, 0.5035, 50],
[0.2303, 0.5698, 50],
[0.2392, 0.6119, 50],
[0.2507, 0.6483, 50],
[0.2574, 0.6615, 50],
[0.2660, 0.6720, 50],
[0.2842, 0.6781, 50],
[0.2994, 0.6742, 50],
[0.3244, 0.6595, 50],
[0.5025, 0.4961, 50],
[0.6332, 0.3664, 50],
[0.6296, 0.3635, 50],
[0.6054, 0.3447, 50],
[0.5803, 0.3257, 50],
[0.5600, 0.3111, 50],
[0.5350, 0.2957, 50],
[0.5207, 0.2913, 50],
[0.4996, 0.2960, 50],
[0.4503, 0.3221, 50],
[0.4000, 0.3511, 50],
[0.3587, 0.3751, 50],
[0.3105, 0.4031, 50],
[0.2546, 0.4358, 50],
[0.2576, 0.4662, 60],
[0.2656, 0.5051, 60],
[0.2702, 0.5247, 60],
[0.2806, 0.5633, 60],
[0.2898, 0.5910, 60],
[0.3000, 0.6140, 60],
[0.3192, 0.6345, 60],
[0.3400, 0.6339, 60],
[0.3797, 0.6090, 60],
[0.4252, 0.5692, 60],
[0.4923, 0.5056, 60],
[0.5995, 0.3999, 60],
[0.6065, 0.3871, 60],
[0.5751, 0.3606, 60],
[0.5508, 0.3403, 60],
[0.5252, 0.3217, 60],
[0.5139, 0.3168, 60],
[0.5005, 0.3178, 60],
[0.4761, 0.3301, 60],
[0.4496, 0.3461, 60],
[0.4103, 0.3705, 60],
[0.3375, 0.4161, 60],
[0.3124, 0.4318, 60],
[0.2634, 0.4626, 60],
[0.3038, 0.4616, 70],
[0.3105, 0.4832, 70],
[0.3202, 0.5119, 70],
[0.3255, 0.5258, 70],
[0.3395, 0.5580, 70],
[0.3537, 0.5806, 70],
[0.3810, 0.5916, 70],
[0.3900, 0.5886, 70],
[0.3999, 0.5835, 70],
[0.5005, 0.4967, 70],
[0.5690, 0.4300, 70],
[0.5849, 0.4143, 70],
[0.5812, 0.4106, 70],
[0.5776, 0.4070, 70],
[0.5706, 0.4001, 70],
[0.5351, 0.3661, 70],
[0.5202, 0.3530, 70],
[0.5004, 0.3407, 70],
[0.4904, 0.3412, 70],
[0.4794, 0.3466, 70],
[0.4703, 0.3519, 70],
[0.3706, 0.4174, 70],
[0.3501, 0.4310, 70],
[0.3219, 0.4497, 70],
[0.3527, 0.4480, 80],
[0.3603, 0.4657, 80],
[0.3803, 0.5061, 80],
[0.4100, 0.5440, 80],
[0.4299, 0.5467, 80],
[0.4402, 0.5426, 80],
[0.4598, 0.5298, 80],
[0.4803, 0.5130, 80],
[0.5000, 0.4954, 80],
[0.5218, 0.4750, 80],
[0.5419, 0.4559, 80],
[0.5603, 0.4380, 80],
[0.5566, 0.4338, 80],
[0.5457, 0.4217, 80],
[0.5190, 0.3928, 80],
[0.5004, 0.3744, 80],
[0.4916, 0.3672, 80],
[0.4799, 0.3636, 80],
[0.4751, 0.3652, 80],
[0.4698, 0.3679, 80],
[0.4560, 0.3767, 80],
[0.4011, 0.4146, 80],
[0.3805, 0.4289, 80],
[0.3704, 0.4358, 80],
[0.4016, 0.4288, 90],
[0.4033, 0.4319, 90],
[0.4081, 0.4402, 90],
[0.4158, 0.4531, 90],
[0.4308, 0.4756, 90],
[0.4458, 0.4935, 90],
[0.4552, 0.5011, 90],
[0.4658, 0.5049, 90],
[0.4854, 0.4999, 90],
[0.5081, 0.4842, 90],
[0.5228, 0.4717, 90],
[0.5343, 0.4614, 90],
[0.5304, 0.4565, 90],
[0.5158, 0.4381, 90],
[0.4987, 0.4173, 90],
[0.4827, 0.3990, 90],
[0.4656, 0.3859, 90],
[0.4562, 0.3900, 90],
[0.4420, 0.3999, 90],
[0.4275, 0.4103, 90],
[0.4079, 0.4244, 90],
[0.4024, 0.4283, 90],
[0.4250, 0.4183, 95],
[0.4276, 0.4223, 95],
[0.4351, 0.4339, 95],
[0.4447, 0.4476, 95],
[0.4550, 0.4607, 95],
[0.4660, 0.4728, 95],
[0.4787, 0.4823, 95],
[0.4921, 0.4849, 95],
[0.5032, 0.4816, 95],
[0.5189, 0.4719, 95],
[0.5151, 0.4667, 95],
[0.4901, 0.4334, 95],
[0.4740, 0.4131, 95],
[0.4588, 0.3975, 95],
[0.4504, 0.3999, 95],
[0.4392, 0.4080, 95],
[0.4294, 0.4151, 95],
[0.4254, 0.4180, 95],
])
"""
*CIE Standard Illuminant A* *Optimal Colour Stimuli*.
A_OPTIMAL_COLOUR_STIMULI : ndarray
References
----------
:cite:`Wyszecki2000bb`
"""
C_OPTIMAL_COLOUR_STIMULI = np.array([
[0.1363, 0.0692, 10],
[0.1308, 0.0792, 10],
[0.0808, 0.2132, 10],
[0.0371, 0.4135, 10],
[0.0251, 0.5007, 10],
[0.0181, 0.5893, 10],
[0.0181, 0.6718, 10],
[0.0276, 0.7416, 10],
[0.0434, 0.7890, 10],
[0.0687, 0.8178, 10],
[0.0996, 0.8252, 10],
[0.7040, 0.2946, 10],
[0.5126, 0.1913, 10],
[0.3424, 0.1028, 10],
[0.2813, 0.0771, 10],
[0.2518, 0.0693, 10],
[0.2378, 0.0674, 10],
[0.2230, 0.0663, 10],
[0.1868, 0.0664, 10],
[0.1628, 0.0676, 10],
[0.1289, 0.1268, 20],
[0.1230, 0.1438, 20],
[0.1027, 0.2152, 20],
[0.0762, 0.3420, 20],
[0.0572, 0.4775, 20],
[0.0500, 0.6250, 20],
[0.0637, 0.7410, 20],
[0.0787, 0.7747, 20],
[0.0992, 0.7975, 20],
[0.1239, 0.8055, 20],
[0.1518, 0.7983, 20],
[0.6717, 0.3273, 20],
[0.5542, 0.2513, 20],
[0.4077, 0.1603, 20],
[0.3463, 0.1263, 20],
[0.3195, 0.1150, 20],
[0.3075, 0.1122, 20],
[0.2968, 0.1104, 20],
[0.2586, 0.1104, 20],
[0.1918, 0.1182, 20],
[0.1302, 0.1764, 30],
[0.1255, 0.1980, 30],
[0.1092, 0.2845, 30],
[0.0909, 0.4178, 30],
[0.0855, 0.5500, 30],
[0.0836, 0.6110, 30],
[0.0911, 0.6700, 30],
[0.0975, 0.7140, 30],
[0.1100, 0.7487, 30],
[0.1294, 0.7700, 30],
[0.1462, 0.7806, 30],
[0.1698, 0.7793, 30],
[0.1957, 0.7696, 30],
[0.6390, 0.3613, 30],
[0.5530, 0.2950, 30],
[0.4300, 0.2040, 30],
[0.3733, 0.1658, 30],
[0.3485, 0.1528, 30],
[0.3300, 0.1462, 30],
[0.3140, 0.1443, 30],
[0.3045, 0.1447, 30],
[0.2643, 0.1503, 30],
[0.1383, 0.2180, 40],
[0.1350, 0.2425, 40],
[0.1246, 0.3363, 40],
[0.1179, 0.4720, 40],
[0.1343, 0.6800, 40],
[0.1596, 0.7377, 40],
[0.1766, 0.7470, 40],
[0.1952, 0.7500, 40],
[0.2437, 0.7305, 40],
[0.2964, 0.6903, 40],
[0.3200, 0.6357, 40],
[0.6065, 0.3925, 40],
[0.5395, 0.3320, 40],
[0.4347, 0.2410, 40],
[0.3833, 0.2000, 40],
[0.3607, 0.1851, 40],
[0.3527, 0.1807, 40],
[0.3453, 0.1777, 40],
[0.3325, 0.1752, 40],
[0.3260, 0.1750, 40],
[0.3003, 0.1783, 40],
[0.2727, 0.1844, 40],
[0.2276, 0.1955, 40],
[0.1510, 0.2520, 50],
[0.1497, 0.2785, 50],
[0.1462, 0.3736, 50],
[0.1490, 0.5017, 50],
[0.1589, 0.5990, 50],
[0.1677, 0.6411, 50],
[0.1782, 0.6750, 50],
[0.1913, 0.6980, 50],
[0.2222, 0.7185, 50],
[0.2867, 0.6936, 50],
[0.3412, 0.6493, 50],
[0.4066, 0.5890, 50],
[0.5759, 0.4231, 50],
[0.5207, 0.3655, 50],
[0.4304, 0.2737, 50],
[0.3844, 0.2309, 50],
[0.3489, 0.2071, 50],
[0.3347, 0.2026, 50],
[0.3175, 0.2046, 50],
[0.3000, 0.2092, 50],
[0.2746, 0.2162, 50],
[0.2024, 0.2373, 50],
[0.1694, 0.2797, 60],
[0.1698, 0.3065, 60],
[0.1732, 0.3995, 60],
[0.1847, 0.5156, 60],
[0.2011, 0.5982, 60],
[0.2117, 0.6316, 60],
[0.2238, 0.6567, 60],
[0.2525, 0.6823, 60],
[0.2694, 0.6840, 60],
[0.3344, 0.6502, 60],
[0.3908, 0.6016, 60],
[0.4605, 0.5364, 60],
[0.5470, 0.4514, 60],
[0.5004, 0.3963, 60],
[0.4217, 0.3042, 60],
[0.3803, 0.2593, 60],
[0.3500, 0.2330, 60],
[0.3376, 0.2284, 60],
[0.3238, 0.2294, 60],
[0.3132, 0.2322, 60],
[0.2593, 0.2497, 60],
[0.1932, 0.3005, 70],
[0.1953, 0.3263, 70],
[0.2064, 0.4136, 70],
[0.2261, 0.5163, 70],
[0.2495, 0.5835, 70],
[0.2733, 0.6282, 70],
[0.3063, 0.6432, 70],
[0.3213, 0.6415, 70],
[0.3408, 0.6316, 70],
[0.3876, 0.5999, 70],
[0.5187, 0.4780, 70],
[0.4795, 0.4243, 70],
[0.4107, 0.3319, 70],
[0.3566, 0.2675, 70],
[0.3460, 0.2578, 70],
[0.3356, 0.2525, 70],
[0.3185, 0.2544, 70],
[0.2875, 0.2651, 70],
[0.2290, 0.2868, 70],
[0.2236, 0.3120, 80],
[0.2282, 0.3382, 80],
[0.2465, 0.4183, 80],
[0.2743, 0.5056, 80],
[0.2991, 0.5591, 80],
[0.3136, 0.5784, 80],
[0.3284, 0.5913, 80],
[0.3570, 0.5932, 80],
[0.3785, 0.5912, 80],
[0.4493, 0.5433, 80],
[0.4901, 0.5038, 80],
[0.4562, 0.4505, 80],
[0.3966, 0.3584, 80],
[0.3631, 0.3103, 80],
[0.3391, 0.2815, 80],
[0.3304, 0.2754, 80],
[0.3229, 0.2756, 80],
[0.3035, 0.2802, 80],
[0.2747, 0.2926, 80],
[0.2276, 0.3119, 80],
[0.2631, 0.3192, 90],
[0.2697, 0.3410, 90],
[0.2956, 0.4111, 90],
[0.3302, 0.4827, 90],
[0.3590, 0.5232, 90],
[0.3742, 0.5364, 90],
[0.3896, 0.5438, 90],
[0.4020, 0.5493, 90],
[0.4221, 0.5430, 90],
[0.4397, 0.5350, 90],
[0.4555, 0.5235, 90],
[0.4295, 0.4741, 90],
[0.3330, 0.3080, 90],
[0.3230, 0.2975, 90],
[0.3180, 0.2958, 90],
[0.2980, 0.3030, 90],
[0.2813, 0.3106, 90],
[0.2857, 0.3185, 95],
[0.2943, 0.3395, 95],
[0.3226, 0.4055, 95],
[0.3608, 0.4679, 95],
[0.3907, 0.5025, 95],
[0.4055, 0.5126, 95],
[0.4209, 0.5180, 95],
[0.4300, 0.5195, 95],
[0.4070, 0.4720, 95],
[0.3630, 0.3855, 95],
[0.3270, 0.3172, 95],
[0.3160, 0.3069, 95],
[0.3053, 0.3096, 95],
])
"""
*CIE Illuminant C* *Optimal Colour Stimuli*.
C_OPTIMAL_COLOUR_STIMULI : ndarray
References
----------
:cite:`MacAdam1935a`
"""
D65_OPTIMAL_COLOUR_STIMULI = np.array([
[0.1346, 0.0747, 10],
[0.0990, 0.1607, 10],
[0.0751, 0.2403, 10],
[0.0391, 0.4074, 10],
[0.0211, 0.5490, 10],
[0.0177, 0.6693, 10],
[0.0344, 0.7732, 10],
[0.0516, 0.8055, 10],
[0.0727, 0.8223, 10],
[0.0959, 0.8261, 10],
[0.1188, 0.8213, 10],
[0.7035, 0.2965, 10],
[0.6832, 0.2853, 10],
[0.6470, 0.2653, 10],
[0.5517, 0.2132, 10],
[0.5309, 0.2019, 10],
[0.4346, 0.1504, 10],
[0.3999, 0.1324, 10],
[0.3549, 0.1101, 10],
[0.3207, 0.0945, 10],
[0.2989, 0.0857, 10],
[0.2852, 0.0808, 10],
[0.2660, 0.0755, 10],
[0.2186, 0.0707, 10],
[0.1268, 0.1365, 20],
[0.1081, 0.1984, 20],
[0.0894, 0.2766, 20],
[0.0660, 0.4074, 20],
[0.0549, 0.4971, 20],
[0.0479, 0.6227, 20],
[0.0565, 0.7312, 20],
[0.0927, 0.8005, 20],
[0.1289, 0.8078, 20],
[0.1479, 0.8026, 20],
[0.1664, 0.7941, 20],
[0.6708, 0.3289, 20],
[0.6591, 0.3213, 20],
[0.5988, 0.2820, 20],
[0.5514, 0.2513, 20],
[0.5018, 0.2197, 20],
[0.4502, 0.1874, 20],
[0.4045, 0.1601, 20],
[0.3762, 0.1443, 20],
[0.3440, 0.1284, 20],
[0.3185, 0.1196, 20],
[0.2935, 0.1164, 20],
[0.2528, 0.1189, 20],
[0.2205, 0.1229, 20],
[0.1282, 0.1889, 30],
[0.1067, 0.3003, 30],
[0.0990, 0.3535, 30],
[0.0929, 0.4041, 30],
[0.0846, 0.5028, 30],
[0.0819, 0.6020, 30],
[0.0836, 0.6491, 30],
[0.1004, 0.7433, 30],
[0.1481, 0.7857, 30],
[0.1799, 0.7787, 30],
[0.2119, 0.7609, 30],
[0.6368, 0.3628, 30],
[0.6281, 0.3561, 30],
[0.5682, 0.3098, 30],
[0.5271, 0.2784, 30],
[0.4977, 0.2562, 30],
[0.4504, 0.2212, 30],
[0.4219, 0.2008, 30],
[0.3999, 0.1859, 30],
[0.3801, 0.1732, 30],
[0.3491, 0.1574, 30],
[0.3350, 0.1536, 30],
[0.3197, 0.1526, 30],
[0.2021, 0.1732, 30],
[0.1360, 0.2324, 40],
[0.1266, 0.3030, 40],
[0.1219, 0.3504, 40],
[0.1183, 0.3985, 40],
[0.1155, 0.4509, 40],
[0.1141, 0.5055, 40],
[0.1312, 0.7047, 40],
[0.1516, 0.7454, 40],
[0.1853, 0.7587, 40],
[0.2129, 0.7510, 40],
[0.2415, 0.7344, 40],
[0.6041, 0.3954, 40],
[0.5969, 0.3888, 40],
[0.5524, 0.3484, 40],
[0.5257, 0.3244, 40],
[0.4980, 0.2997, 40],
[0.4598, 0.2661, 40],
[0.3696, 0.1949, 40],
[0.3603, 0.1898, 40],
[0.3501, 0.1859, 40],
[0.3375, 0.1841, 40],
[0.2581, 0.2001, 40],
[0.2220, 0.2095, 40],
[0.1771, 0.2214, 40],
[0.1491, 0.2679, 50],
[0.1441, 0.3511, 50],
[0.1429, 0.4025, 50],
[0.1429, 0.4479, 50],
[0.1472, 0.5522, 50],
[0.1548, 0.6201, 50],
[0.1621, 0.6570, 50],
[0.1790, 0.7035, 50],
[0.1929, 0.7201, 50],
[0.2114, 0.7277, 50],
[0.2991, 0.6851, 50],
[0.5731, 0.4262, 50],
[0.5668, 0.4195, 50],
[0.5492, 0.4009, 50],
[0.4795, 0.3281, 50],
[0.4514, 0.2994, 50],
[0.4113, 0.2600, 50],
[0.3897, 0.2401, 50],
[0.3509, 0.2139, 50],
[0.3391, 0.2126, 50],
[0.3211, 0.2155, 50],
[0.3042, 0.2200, 50],
[0.2466, 0.2374, 50],
[0.2041, 0.2507, 50],
[0.1674, 0.2959, 60],
[0.1677, 0.3520, 60],
[0.1700, 0.4130, 60],
[0.1749, 0.4782, 60],
[0.1801, 0.5257, 60],
[0.1873, 0.5730, 60],
[0.1994, 0.6257, 60],
[0.2088, 0.6523, 60],
[0.2506, 0.6927, 60],
[0.2703, 0.6900, 60],
[0.2930, 0.6798, 60],
[0.5435, 0.4552, 60],
[0.5379, 0.4483, 60],
[0.4775, 0.3751, 60],
[0.4522, 0.3450, 60],
[0.4138, 0.3005, 60],
[0.3611, 0.2472, 60],
[0.3497, 0.2405, 60],
[0.3395, 0.2388, 60],
[0.3195, 0.2429, 60],
[0.2963, 0.2505, 60],
[0.2701, 0.2595, 60],
[0.2270, 0.2747, 60],
[0.2037, 0.2830, 60],
[0.1916, 0.3164, 70],
[0.1958, 0.3656, 70],
[0.2003, 0.4069, 70],
[0.2065, 0.4485, 70],
[0.2150, 0.4963, 70],
[0.2221, 0.5295, 70],
[0.2298, 0.5597, 70],
[0.2402, 0.5918, 70],
[0.2550, 0.6237, 70],
[0.2784, 0.6484, 70],
[0.3000, 0.6521, 70],
[0.5148, 0.4825, 70],
[0.5097, 0.4753, 70],
[0.4776, 0.4304, 70],
[0.4508, 0.3933, 70],
[0.4192, 0.3505, 70],
[0.4005, 0.3259, 70],
[0.3706, 0.2890, 70],
[0.3663, 0.2842, 70],
[0.3517, 0.2699, 70],
[0.3364, 0.2634, 70],
[0.3194, 0.2671, 70],
[0.3007, 0.2739, 70],
[0.2664, 0.2872, 70],
[0.2232, 0.3290, 80],
[0.2404, 0.4145, 80],
[0.2496, 0.4504, 80],
[0.2583, 0.4801, 80],
[0.2760, 0.5308, 80],
[0.3023, 0.5809, 80],
[0.3092, 0.5892, 80],
[0.3318, 0.6041, 80],
[0.3515, 0.6048, 80],
[0.3679, 0.5995, 80],
[0.4080, 0.5750, 80],
[0.4858, 0.5081, 80],
[0.4811, 0.5005, 80],
[0.4634, 0.4719, 80],
[0.4514, 0.4526, 80],
[0.4299, 0.4158, 80],
[0.4001, 0.3720, 80],
[0.3732, 0.3319, 80],
[0.3603, 0.3139, 80],
[0.3500, 0.3009, 80],
[0.3307, 0.2866, 80],
[0.2730, 0.3080, 80],
[0.2519, 0.3169, 80],
[0.2400, 0.3219, 80],
[0.2639, 0.3331, 90],
[0.2801, 0.3832, 90],
[0.2864, 0.4008, 90],
[0.3059, 0.4486, 90],
[0.3182, 0.4746, 90],
[0.3317, 0.4994, 90],
[0.3513, 0.5278, 90],
[0.3657, 0.5421, 90],
[0.3946, 0.5537, 90],
[0.4126, 0.5510, 90],
[0.4354, 0.5406, 90],
[0.4530, 0.5293, 90],
[0.4486, 0.5210, 90],
[0.4444, 0.5131, 90],
[0.4325, 0.4906, 90],
[0.4215, 0.4700, 90],
[0.3990, 0.4284, 90],
[0.3749, 0.3849, 90],
[0.3504, 0.3431, 90],
[0.3349, 0.3196, 90],
[0.3217, 0.3084, 90],
[0.3099, 0.3124, 90],
[0.2852, 0.3235, 90],
[0.2711, 0.3299, 90],
[0.2875, 0.3320, 95],
[0.2949, 0.3513, 95],
[0.3067, 0.3800, 95],
[0.3230, 0.4150, 95],
[0.3368, 0.4415, 95],
[0.3508, 0.4654, 95],
[0.3644, 0.4856, 95],
[0.3765, 0.5007, 95],
[0.3887, 0.5126, 95],
[0.4003, 0.5206, 95],
[0.4108, 0.5251, 95],
[0.4281, 0.5268, 95],
[0.4204, 0.5109, 95],
[0.4132, 0.4959, 95],
[0.4031, 0.4751, 95],
[0.3697, 0.4076, 95],
[0.3498, 0.3692, 95],
[0.3401, 0.3513, 95],
[0.3295, 0.3331, 95],
[0.3167, 0.3189, 95],
[0.3148, 0.3195, 95],
[0.3103, 0.3214, 95],
[0.3006, 0.3259, 95],
[0.2900, 0.3308, 95],
])
"""
*CIE Standard Illuminant D Series D65* *Optimal Colour Stimuli*.
D65_OPTIMAL_COLOUR_STIMULI : ndarray
References
----------
:cite:`Wyszecki2000bh`
"""
ILLUMINANTS_OPTIMAL_COLOUR_STIMULI = CaseInsensitiveMapping({
'A': A_OPTIMAL_COLOUR_STIMULI,
'C': C_OPTIMAL_COLOUR_STIMULI,
'D65': D65_OPTIMAL_COLOUR_STIMULI
})
ILLUMINANTS_OPTIMAL_COLOUR_STIMULI.__doc__ = """
Illuminants *Optimal Colour Stimuli*.
References
----------
:cite:`Wikipedia2004a`
ILLUMINANTS_OPTIMAL_COLOUR_STIMULI : CaseInsensitiveMapping
**{'A', 'C', 'D65'}**
"""
|
[
"nathnaelmelkamu@gmail.com"
] |
nathnaelmelkamu@gmail.com
|
55357b335289b42a1eb2927d0de6ef1686e39773
|
e5e2b7da41fda915cb849f031a0223e2ac354066
|
/sdk/python/pulumi_azure_native/containerregistry/v20201101preview/import_pipeline.py
|
c2b85b346c7f41cc482394c63cf3b0447a0d6150
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
johnbirdau/pulumi-azure-native
|
b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25
|
d676cc331caa0694d8be99cb90b93fa231e3c705
|
refs/heads/master
| 2023-05-06T06:48:05.040357
| 2021-06-01T20:42:38
| 2021-06-01T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,440
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ImportPipelineArgs', 'ImportPipeline']
@pulumi.input_type
class ImportPipelineArgs:
def __init__(__self__, *,
registry_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
source: pulumi.Input['ImportPipelineSourcePropertiesArgs'],
identity: Optional[pulumi.Input['IdentityPropertiesArgs']] = None,
import_pipeline_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]]] = None,
trigger: Optional[pulumi.Input['PipelineTriggerPropertiesArgs']] = None):
"""
The set of arguments for constructing a ImportPipeline resource.
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input['ImportPipelineSourcePropertiesArgs'] source: The source properties of the import pipeline.
:param pulumi.Input['IdentityPropertiesArgs'] identity: The identity of the import pipeline.
:param pulumi.Input[str] import_pipeline_name: The name of the import pipeline.
:param pulumi.Input[str] location: The location of the import pipeline.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]] options: The list of all options configured for the pipeline.
:param pulumi.Input['PipelineTriggerPropertiesArgs'] trigger: The properties that describe the trigger of the import pipeline.
"""
pulumi.set(__self__, "registry_name", registry_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "source", source)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if import_pipeline_name is not None:
pulumi.set(__self__, "import_pipeline_name", import_pipeline_name)
if location is not None:
pulumi.set(__self__, "location", location)
if options is not None:
pulumi.set(__self__, "options", options)
if trigger is not None:
pulumi.set(__self__, "trigger", trigger)
@property
@pulumi.getter(name="registryName")
def registry_name(self) -> pulumi.Input[str]:
"""
The name of the container registry.
"""
return pulumi.get(self, "registry_name")
@registry_name.setter
def registry_name(self, value: pulumi.Input[str]):
pulumi.set(self, "registry_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group to which the container registry belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def source(self) -> pulumi.Input['ImportPipelineSourcePropertiesArgs']:
"""
The source properties of the import pipeline.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input['ImportPipelineSourcePropertiesArgs']):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityPropertiesArgs']]:
"""
The identity of the import pipeline.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityPropertiesArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="importPipelineName")
def import_pipeline_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the import pipeline.
"""
return pulumi.get(self, "import_pipeline_name")
@import_pipeline_name.setter
def import_pipeline_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "import_pipeline_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the import pipeline.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]]]:
"""
The list of all options configured for the pipeline.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def trigger(self) -> Optional[pulumi.Input['PipelineTriggerPropertiesArgs']]:
"""
The properties that describe the trigger of the import pipeline.
"""
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: Optional[pulumi.Input['PipelineTriggerPropertiesArgs']]):
pulumi.set(self, "trigger", value)
class ImportPipeline(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
import_pipeline_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ImportPipelineSourcePropertiesArgs']]] = None,
trigger: Optional[pulumi.Input[pulumi.InputType['PipelineTriggerPropertiesArgs']]] = None,
__props__=None):
"""
An object that represents an import pipeline for a container registry.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']] identity: The identity of the import pipeline.
:param pulumi.Input[str] import_pipeline_name: The name of the import pipeline.
:param pulumi.Input[str] location: The location of the import pipeline.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]] options: The list of all options configured for the pipeline.
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input[pulumi.InputType['ImportPipelineSourcePropertiesArgs']] source: The source properties of the import pipeline.
:param pulumi.Input[pulumi.InputType['PipelineTriggerPropertiesArgs']] trigger: The properties that describe the trigger of the import pipeline.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ImportPipelineArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An object that represents an import pipeline for a container registry.
:param str resource_name: The name of the resource.
:param ImportPipelineArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ImportPipelineArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
import_pipeline_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ImportPipelineSourcePropertiesArgs']]] = None,
trigger: Optional[pulumi.Input[pulumi.InputType['PipelineTriggerPropertiesArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ImportPipelineArgs.__new__(ImportPipelineArgs)
__props__.__dict__["identity"] = identity
__props__.__dict__["import_pipeline_name"] = import_pipeline_name
__props__.__dict__["location"] = location
__props__.__dict__["options"] = options
if registry_name is None and not opts.urn:
raise TypeError("Missing required property 'registry_name'")
__props__.__dict__["registry_name"] = registry_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if source is None and not opts.urn:
raise TypeError("Missing required property 'source'")
__props__.__dict__["source"] = source
__props__.__dict__["trigger"] = trigger
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerregistry/v20201101preview:ImportPipeline"), pulumi.Alias(type_="azure-native:containerregistry:ImportPipeline"), pulumi.Alias(type_="azure-nextgen:containerregistry:ImportPipeline"), pulumi.Alias(type_="azure-native:containerregistry/v20191201preview:ImportPipeline"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20191201preview:ImportPipeline")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ImportPipeline, __self__).__init__(
'azure-native:containerregistry/v20201101preview:ImportPipeline',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ImportPipeline':
"""
Get an existing ImportPipeline resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ImportPipelineArgs.__new__(ImportPipelineArgs)
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["options"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["source"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["trigger"] = None
__props__.__dict__["type"] = None
return ImportPipeline(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityPropertiesResponse']]:
"""
The identity of the import pipeline.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the import pipeline.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def options(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The list of all options configured for the pipeline.
"""
return pulumi.get(self, "options")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the pipeline at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> pulumi.Output['outputs.ImportPipelineSourcePropertiesResponse']:
"""
The source properties of the import pipeline.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def trigger(self) -> pulumi.Output[Optional['outputs.PipelineTriggerPropertiesResponse']]:
"""
The properties that describe the trigger of the import pipeline.
"""
return pulumi.get(self, "trigger")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
johnbirdau.noreply@github.com
|
30103bb8678508e3031d47989953002bf4914e44
|
f00d42d3723bf8b99d348eb83d9ebbf86e97a486
|
/readTemperatureToAWS.py
|
ebae9f914d59718ffa6bffad3537cc1f5fd21e53
|
[] |
no_license
|
NSAquarimat/Raspberry-Pi-2
|
f5469a98d5bd50e6ebcba54c645e8dec8f3a1a62
|
377e24528898c6820175a2dad7e03acdd38f330e
|
refs/heads/master
| 2020-12-30T11:29:01.571699
| 2017-05-17T12:07:08
| 2017-05-17T12:07:08
| 91,571,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
#!/usr/bin/python
import datetime
import time
import serial
import serial.tools.list_ports
import requests
import json
from const import Constant
from logmessages import LogMessage
class ReadTemperature:
const = ''
logMessage = ''
def __init__(self):
self.const = Constant()
self.logMessage = LogMessage()
def run(self):
try:
while True:
readings = self.readDataFromUSB()
# readings = [' ', '', 'RH=31.4 ', '', 'T=+23.3 ', '', 'RH=31.4 ', '', 'T=-23.4 ']
if len(readings) > 0:
data = self.processData(readings)
self.uploadDataToAws(data)
self.logMessage.logBySection('Data saved : ' + str(datetime.datetime.now(self.const.APPLICATION_TIMEZONE)),
self.const.LOG_SECTION_TEMPERATURE)
time.sleep(40)
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
self.run()
pass
def processData(self, log):
rows = []
v1 = ''
v2 = ''
try:
for data in log:
reading = data.strip()
if len(reading) > 0:
final = reading.split('=')
if len(final) > 0:
if final[0] == 'RH':
v1 = final[1]
elif final[0] == 'T':
v2 = final[1]
if len(v1) > 0 and len(v2) > 0:
rows.append([v1, v2])
v1 = ''
v2 = ''
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
return rows
def readDataFromUSB(self):
data = []
try:
serialPort = serial.Serial('/dev/ttyUSB0', baudrate=2400, timeout=10)
temperatureReading = serialPort.read(1024)
if len(temperatureReading) > 0:
data = temperatureReading.splitlines()
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
return data
def uploadDataToAws(self, log):
try:
postData = json.dumps(log)
r = requests.post(self.const.AWS_URL, data=postData)
self.logMessage.logBySection('Response : ' + str(r.text), self.const.LOG_SECTION_TEMPERATURE)
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
def sendDataToSheet(self, data):
try:
# scope = [self.const.SOURCE_URL]
# creds = ServiceAccountCredentials.from_json_keyfile_name(self.const.CLIENT_KEY_FILE, scope)
# client = gspread.authorize(creds)
client = gspread.login('developersa48@gmail.com', 'rrkelocjnerxxfox')
# sheet = client.open(self.const.SHEET_NAME).sheet1
sheet = client.open('livoltTemperature').sheet1
for reading in data:
sheet.append_row(reading)
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
obReadRemp = ReadTemperature()
obReadRemp.run()
|
[
"noreply@github.com"
] |
NSAquarimat.noreply@github.com
|
96e1094392d1d4f001e2fa8086053069b4eddd06
|
ed4939fc54877e7bce03f2bfbde530a54f415da2
|
/retrocontador.py
|
3ae2f35754dd46c8e7799ad27d76b5e71a2a80b9
|
[] |
no_license
|
eldeivich/m02_boot_0
|
ebd98d4ecd0422d80986f4ebf6d0c37125b2c368
|
e9335e3de999194a87c11561e16a1a0d03213e6a
|
refs/heads/master
| 2020-07-20T01:55:38.829243
| 2019-09-06T13:16:12
| 2019-09-06T13:16:12
| 206,551,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
def retrocontador(e):
print("{},".format(e), end="")
if e > 0:
retrocontador(e - 1)
retrocontador(10)
def sumatorio(n):
if n < 0:
return n + sumatorio(n-1)
|
[
"47783040+eldeivich@users.noreply.github.com"
] |
47783040+eldeivich@users.noreply.github.com
|
4912c77bb556abef5524a0cdd1fe770883f51b60
|
9a4bebbed984ff81761001ea66044e2e222d7f69
|
/example.py
|
211eb506c32ed58797c49f581dd34025272c56a9
|
[] |
no_license
|
Lggggggx/Meta_Data
|
b5c3511cad04370cd7035c69682739327b5b82ea
|
adfd9b61eb7c24f955b40bf02fb9b2962146b89e
|
refs/heads/master
| 2020-04-16T09:30:40.958985
| 2019-06-05T07:40:04
| 2019-06-05T07:40:04
| 165,466,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,897
|
py
|
import copy
import scipy.io as sio
import numpy as np
from sklearn.datasets import make_classification
from sklearn.externals import joblib
from alipy import ToolBox
from alipy.query_strategy.query_labels import QueryInstanceGraphDensity, QueryInstanceQBC, \
QueryInstanceQUIRE, QueryRandom, QueryInstanceUncertainty, QureyExpectedErrorReduction, QueryInstanceLAL
from meta_data import DataSet
from QueryMetaData import QueryMetaData
dataset_path = './newdata/'
datasetnames = np.load('datasetname.npy')
# datasetname = 'echocardiogram'
# datasetname = 'australian'
# datasetname = 'blood'
# datasetname = 'texture'
datasetnames = ['tic-tac-toe']
for datasetname in datasetnames:
dt = DataSet(datasetname, dataset_path)
X = dt.X
y = dt.y.ravel()
y = np.asarray(y, dtype=int)
alibox = ToolBox(X=X, y=y, query_type='AllLabels', saving_path='./experiment_result/')
# Split data
alibox.split_AL(test_ratio=0.3, initial_label_rate=0.05, split_count=5)
# Use the default Logistic Regression classifier
model = alibox.get_default_model()
# The cost budget is 50 times querying
stopping_criterion = alibox.get_stopping_criterion('num_of_queries', 30)
# experiment
meta_regressor = joblib.load('meta_lr.joblib')
# meta_query = QueryMetaData(X, y, meta_regressor)
meta_result = []
for round in range(5):
meta_query = QueryMetaData(X, y, meta_regressor)
# Get the data split of one fold experiment
train_idx, test_idx, label_ind, unlab_ind = alibox.get_split(round)
# Get intermediate results saver for one fold experiment
saver = alibox.get_stateio(round)
# calc the initial point
model.fit(X=X[label_ind.index, :], y=y[label_ind.index])
pred = model.predict(X[test_idx, :])
accuracy = sum(pred == y[test_idx]) / len(test_idx)
saver.set_initial_point(accuracy)
while not stopping_criterion.is_stop():
# Select a subset of Uind according to the query strategy
# Passing model=None to use the default model for evaluating the committees' disagreement
select_ind = meta_query.select(label_ind, unlab_ind, model=None)
label_ind.update(select_ind)
unlab_ind.difference_update(select_ind)
# Update model and calc performance according to the model you are using
model.fit(X=X[label_ind.index, :], y=y[label_ind.index])
pred = model.predict(X[test_idx, :])
accuracy = alibox.calc_performance_metric(y_true=y[test_idx],
y_pred=pred,
performance_metric='accuracy_score')
# Save intermediate results to file
st = alibox.State(select_index=select_ind, performance=accuracy)
saver.add_state(st)
saver.save()
# Passing the current progress to stopping criterion object
stopping_criterion.update_information(saver)
# Reset the progress in stopping criterion object
stopping_criterion.reset()
meta_result.append(copy.deepcopy(saver))
random = QueryRandom(X, y)
random_result = []
for round in range(5):
# Get the data split of one fold experiment
train_idx, test_idx, label_ind, unlab_ind = alibox.get_split(round)
# Get intermediate results saver for one fold experiment
saver = alibox.get_stateio(round)
# calc the initial point
model.fit(X=X[label_ind.index, :], y=y[label_ind.index])
pred = model.predict(X[test_idx, :])
accuracy = sum(pred == y[test_idx]) / len(test_idx)
saver.set_initial_point(accuracy)
while not stopping_criterion.is_stop():
# Select a subset of Uind according to the query strategy
# Passing model=None to use the default model for evaluating the committees' disagreement
select_ind = random.select(unlab_ind)
label_ind.update(select_ind)
unlab_ind.difference_update(select_ind)
# Update model and calc performance according to the model you are using
model.fit(X=X[label_ind.index, :], y=y[label_ind.index])
pred = model.predict(X[test_idx, :])
accuracy = alibox.calc_performance_metric(y_true=y[test_idx],
y_pred=pred,
performance_metric='accuracy_score')
# Save intermediate results to file
st = alibox.State(select_index=select_ind, performance=accuracy)
saver.add_state(st)
saver.save()
# Passing the current progress to stopping criterion object
stopping_criterion.update_information(saver)
# Reset the progress in stopping criterion object
stopping_criterion.reset()
random_result.append(copy.deepcopy(saver))
def main_loop(alibox, strategy, round):
# Get the data split of one fold experiment
train_idx, test_idx, label_ind, unlab_ind = alibox.get_split(round)
# Get intermediate results saver for one fold experiment
saver = alibox.get_stateio(round)
while not stopping_criterion.is_stop():
# Select a subset of Uind according to the query strategy
# Passing model=None to use the default model for evaluating the committees' disagreement
select_ind = strategy.select(label_ind, unlab_ind, batch_size=1)
label_ind.update(select_ind)
unlab_ind.difference_update(select_ind)
# Update model and calc performance according to the model you are using
model.fit(X=X[label_ind.index, :], y=y[label_ind.index])
pred = model.predict(X[test_idx, :])
accuracy = alibox.calc_performance_metric(y_true=y[test_idx],
y_pred=pred,
performance_metric='accuracy_score')
# Save intermediate results to file
st = alibox.State(select_index=select_ind, performance=accuracy)
saver.add_state(st)
# Passing the current progress to stopping criterion object
stopping_criterion.update_information(saver)
# Reset the progress in stopping criterion object
stopping_criterion.reset()
return saver
unc_result = []
qbc_result = []
eer_result = []
for round in range(5):
train_idx, test_idx, label_ind, unlab_ind = alibox.get_split(round)
# Use pre-defined strategy
unc = QueryInstanceUncertainty(X, y)
qbc = QueryInstanceQBC(X, y)
eer = QureyExpectedErrorReduction(X, y)
# random = QueryRandom(X, y)
unc_result.append(copy.deepcopy(main_loop(alibox, unc, round)))
qbc_result.append(copy.deepcopy(main_loop(alibox, qbc, round)))
eer_result.append(copy.deepcopy(main_loop(alibox, eer, round)))
# random_result.append(copy.deepcopy(main_loop(alibox, random, round)))
analyser = alibox.get_experiment_analyser(x_axis='num_of_queries')
analyser.add_method(method_name='QBC', method_results=qbc_result)
analyser.add_method(method_name='Unc', method_results=unc_result)
analyser.add_method(method_name='EER', method_results=eer_result)
analyser.add_method(method_name='random', method_results=random_result)
analyser.add_method(method_name='Meta', method_results=meta_result)
analyser.plot_learning_curves(title=datasetname, std_area=False, saving_path='./experiment_result/')
|
[
"3123652@qq.com"
] |
3123652@qq.com
|
1f84d29ac64c60eecc68e8ab8b8b9a597d982553
|
fb41f80102617035b37a172dd33d46170e19e8ba
|
/rpg_bookshelf/library/migrations/0011_auto_20170406_0841.py
|
094d894ce5a1631828fa4be402b7c7e26c8553b9
|
[] |
no_license
|
Behir92/RPG_Bookshelf
|
10934c567c8869a2f50740a53532825b47bcc74c
|
1d277d7472df64d6f3a4760b5d88c40ff53ed3d5
|
refs/heads/master
| 2021-01-18T22:47:28.514017
| 2017-04-07T09:35:55
| 2017-04-07T09:35:55
| 87,072,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-04-06 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0010_auto_20170406_0712'),
]
operations = [
migrations.AlterField(
model_name='book',
name='type',
field=models.IntegerField(choices=[(1, 'Podręcznik źródłowy'), (2, 'Dodatek'), (3, 'Inne')]),
),
]
|
[
"behir92@gmail.com"
] |
behir92@gmail.com
|
a1d01f459da48bd0f9617167f10533c1bf94e83a
|
792f8763aab5ede4313b069365244a2a9807f8d5
|
/advancedlist.py
|
b41eb6cfa1971d2807be8bbfebc49c37952709b5
|
[] |
no_license
|
arunvenkatram/python
|
e314da806df933a16299bd8c09a9a34ffad6f949
|
12246485509df96515ccac1fd1f9994f3ef96065
|
refs/heads/master
| 2020-04-02T04:58:08.670269
| 2018-12-30T13:45:25
| 2018-12-30T13:45:25
| 154,045,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
#!/bin/python
import argparse
parser=argparse.ArgumentParser(description='search for words inluding partial words')
parser.add_argument('snippet', help='enter the partial work here')
args = parser.parse_args()
snippet=args.snippet.lower()
dict = open('/usr/share/dict/linux.words', 'r')
words=dict.readlines()
matches=[]
for word in words:
if snippet in word.lower():
matches.append(word)
print(matches)
|
[
"arun92venkat@gmail.com"
] |
arun92venkat@gmail.com
|
e2312ec39fc16ac1782ca4510f5ea78a54196564
|
f157368c667ff35e0d398b6e7041e3cd5c39ebd5
|
/03-fetchfeed.py
|
73189d7dcb0825240eb7956087137df72f37540f
|
[] |
no_license
|
unJASON/TF-learning
|
13e99b8670a09ed1dcbe0c847f3c6b9e97821443
|
349ce8dea1e466d21b5f006cb87326759f949306
|
refs/heads/master
| 2020-04-08T12:13:00.556932
| 2018-11-30T07:22:54
| 2018-11-30T07:22:54
| 159,337,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
import tensorflow as tf
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
add = tf.add(input2,input3)
mul = tf.multiply(input1,add)
with tf.Session() as sess:
#fetch 同时运行多个op
result = sess.run([mul,add])
print(result)
#feed
input1 = tf.placeholder(tf.float32) #32位浮点型占位符
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1,input2)
with tf.Session() as sess:
#feed数据已字典形式传入
print(sess.run(output,feed_dict={input1:[7.],input2:[2.]}))
|
[
"2693017973@qq.com"
] |
2693017973@qq.com
|
f2f999b2e9c47efa460879ae97f8c6f10827f7f3
|
1e7a65a1f80b17dad71d1fae4b65182b8dcafc2f
|
/views/decorators.py
|
7ca832b9dfa694a8d232b653af234b32481dee4a
|
[
"MIT"
] |
permissive
|
openedoo/module_employee
|
767af7fdf873146c989e0d9a95a9828a08bc7529
|
fa52a93f2f9cfe6160ff75bc2eebabd86ccf318f
|
refs/heads/master
| 2020-05-27T01:13:54.370790
| 2017-04-17T07:32:08
| 2017-04-17T07:32:08
| 82,520,169
| 0
| 1
| null | 2017-03-30T12:40:41
| 2017-02-20T05:28:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
from functools import wraps
from flask import g, flash, url_for
from openedoo.core.libs import session, redirect
from modules.module_employee.models import Employee, Setting
def setup_required(f):
@wraps(f)
def wrap(*args, **kwargs):
"""Checks if there is any employee or not"""
employee = Employee.check_records()
if not employee:
flash("You don't have administrator. Register one now.")
return redirect(url_for('module_employee.setup'))
return f(*args, **kwargs)
return wrap
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
"""Checks user is logged in or not in the session"""
session.permanent = True
try:
if session['username'] is False:
flash('You must login first!')
return redirect(url_for('module_employee.login'))
return f(*args, **kwargs)
except KeyError:
flash('Your session is timeout!')
return redirect(url_for('module_employee.login'))
return wrap
def site_setting(f):
@wraps(f)
def wrap(*args, **kwargs):
if not hasattr(g, 'school') or g.school is None:
g.school = {'name': ''}
setting = Setting()
schoolData = setting.get_existing_name()
if schoolData:
g.school = schoolData
return f(*args, **kwargs)
return wrap
|
[
"dwipurnomo.yk@gmail.com"
] |
dwipurnomo.yk@gmail.com
|
d6d1cf0aae0f255c589c42898ace8a3e6abc4d99
|
814ef5164c2a7e8da31ffa7d67af44fc32d1cbe0
|
/App/config.py
|
0df707f9eab58b5fcad0261f8d9476f8096d4ccc
|
[] |
no_license
|
RayDeveloper/pharm-backend
|
8a3b892f31a43c1e7021402fcef9a36176ae3cc7
|
96a7579b2991e17c0e0dcff9d496ddfb769d0035
|
refs/heads/main
| 2023-08-28T12:13:05.658270
| 2021-11-09T03:34:41
| 2021-11-09T03:34:41
| 422,045,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
import json, os
CONFIG = None
environment = os.environ.get('ENV')
if environment == 'production' or environment == 'staging':
CONFIG = {
"DEBUG" : False,
"JWT_EXPIRATION_DELTA": 7,
"SECRET_KEY" : os.environ.get('SECRET_KEY'),
"SQLALCHEMY_DATABASE_URI" : os.environ.get('SQLALCHEMY_DATABASE_URI'),
"ENV":'production'
}
else:
with open('environment.staging.json') as config_file:
CONFIG = json.load(config_file)
|
[
"snickdx@gmail.com"
] |
snickdx@gmail.com
|
2df9f187b41621c0b62ed5a79857cf5e30baeb5c
|
c34c5f6f5f4c727804628a8e113fd79ea51a71b3
|
/PreprocessingCode/InputTextAnalysis.py
|
973aecf85bb82291f5d9c332d177723e93df17b4
|
[] |
no_license
|
Aarolon/MLProject
|
b58e1e55a698a0b718498f222dae2ebf251e229d
|
b69346868993f2a4e0911f1cdad6a5f550e548de
|
refs/heads/main
| 2023-01-23T17:59:54.824715
| 2020-12-02T02:30:44
| 2020-12-02T02:30:44
| 315,759,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,827
|
py
|
import preprocessing
import vectorization
import pandas as pd;
import twitterAPIKeys as t
# words_df = preprocessing.getDfFromJSON('All_Beauty.json.gz')
# words_df = words_df[:10] #just testing on a small substring of data
words_df = pd.read_csv('All_Beauty10000.csv')
words_df
#at this point words df is just a column of review texts and their associated scores
# preprocessing.preprocessForSentimentAnalsis(words_df['reviewText'][4], preprocessing.stopwords,preprocessing.lemmatizer);
# words_df['documents']=words_df['reviewText'].map(preprocessing.preprocess
# words_df = words_df[words_df['documents'] != False]
#documents column now is just the preprocessed words stripped of fluff, ready to be turned into a sparse matrix
#First we just need a list of all of the words
#now we construct our CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer;
from sklearn.feature_extraction.text import TfidfVectorizer;
#generate a sparce array from the things
words_df['documents'] = [" ".join(preprocessing.tokenize(doc)).split(" ") for doc in words_df['documents']]
all_words = vectorization.getAllWordsFromDF(words_df, 'documents')
docList= [" ".join(doc) for doc in words_df['documents']]
# docList = vectorization.ListToString(words_df,'documents')
v,sparceVector = vectorization.vectorize(CountVectorizer, all_words, docList)
sv_array = sparceVector.toarray()
#now we just need to form our labels in whatever way we want them to
words_df["pos_neg"] = words_df['overall'].map(vectorization.binarizeRating)
import sklearn
import numpy as np
xTrain, xTest, yTrain, yTest = sklearn.model_selection.train_test_split(sv_array,list(words_df['pos_neg']),test_size = .3);
xTrain[0].shape
ytrain = np.array(yTrain)
ytest = np.array(yTest)
ytrain.shape
xTrain.shape
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from sklearn.preprocessing import normalize
modelAmazon = Sequential()
modelAmazon.add(Dense(50, activation = "relu", input_shape=(8250, )))
# Hidden - Layers
modelAmazon.add(Dropout(0.3, noise_shape=None, seed=None))
modelAmazon.add(Dense(50, activation = "relu"))
modelAmazon.add(Dropout(0.3, noise_shape=None, seed=None))
modelAmazon.add(Dense(50, activation = "relu"))
# Output- Layer
modelAmazon.add(Dense(1, activation = "sigmoid"))
modelAmazon.summary()
modelAmazon.compile( optimizer = "adam",loss = "binary_crossentropy", metrics = ["accuracy"])
results = modelAmazon.fit(
xTrain, ytrain,
epochs= 10,
batch_size = 700,
validation_data = (xTest, ytest)
)
print(np.mean(results.history["accuracy"]))
from keras.datasets import imdb
index = imdb.get_word_index()
def transformToIMDB(doc,numWords):
sparce_array = np.zeros(10000);
for word in doc:
if index[word]<numWords:
print(index[word])
(training_data, training_targets), (testing_data, testing_targets) = imdb.load_data(num_words = 10000)
len(training_data[6])
len(training_data[6])
data = np.concatenate((training_data, testing_data), axis=0)
targets = np.concatenate((training_targets, testing_targets), axis=0)
def vectorize(sequences, dimension = 10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1
return results
data = vectorize(data)
training_targets[8]
targets = np.array(targets).astype("float32")
test_x = data[:10000]
type(test_x[0][0])
test_y = targets[:10000]
train_x = data[10000:]
train_y = targets[10000:]
def runOnSample(input):
prepped = preprocessing.preprocessForSentimentAnalsis(input,preprocessing.stopwords,preprocessing.lemmatizer)
print(prepped)
prepped= " ".join(prepped)
print(prepped)
sparce_inputs = v.transform([prepped]).toarray()
return modelAmazon.predict(sparce_inputs)[0][0]
def getSentimentOfTopic(topic, nTweets):
tweets = t.getTopic(topic, nTweets)
print(tweets)
prepped = [ preprocessing.preprocessForSentimentAnalsis(tweet,preprocessing.stopwords,preprocessing.lemmatizer) for tweet in tweets]
actual_words= [getActuallyUsedWords(doc) for doc in prepped]
prepped= [" ".join(doc) for doc in prepped]
sparce_inputs = v.transform(prepped).toarray()
output = modelAmazon.predict(sparce_inputs)
output = list(np.squeeze(output))
print(output)
return pd.DataFrame(data= {"Tweets" : tweets,"Trained Words":actual_words,"Output" : output})
def sentimentCalculation(norm):
if norm<.2:
return "Very Negative"
if norm<.4:
return "Negative"
if norm<.5:
return "Somewhat Negative"
if norm<.5:
return "Somewhat Negative"
if norm<.75:
return "Neutral"
if norm<1:
return "Positive"
def getSemanticsAnalysis():
text = entry1.get()
score = runOnSample(text)
label1 = tk.Label(root, text=str(score)+" ("+sentimentCalculation(score)+")" )
canvas1.create_window(200, 230, window=label1)
def getActuallyUsedWords(phrase):
out = []
for word in phrase:
if max(v.transform([word]).toarray()[0]) !=0:
out = out+ [word]
return out;
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 400, height = 300, relief = 'raised')
canvas1.pack()
label1 = tk.Label(root, text='Sentiment Analysis Using Neural Network')
label1.config(font=('helvetica', 14))
canvas1.create_window(200, 25, window=label1)
label2 = tk.Label(root, text='Input text to be evalutated:')
label2.config(font=('helvetica', 10))
canvas1.create_window(200, 100, window=label2)
entry1 = tk.Entry (root)
canvas1.create_window(200, 140, window=entry1)
button1 = tk.Button(text='Predict Sentiment', command=getSemanticsAnalysis, bg='brown', fg='white', font=('helvetica', 9, 'bold'))
canvas1.create_window(200, 180, window=button1)
root.mainloop()
|
[
"erickgomez@mines.edu"
] |
erickgomez@mines.edu
|
48dd335939066781cab20f76da01345b22c0ea34
|
d046fd4ac8e52ed8054199765893f7e1a71302f2
|
/master/bt5/slapos_cloud/SkinTemplateItem/portal_skins/slapos_cloud/Alarm_garbageCollectNonAllocatedRootTree.py
|
e9825070b0f4b1285eef5653c261f238a292c95d
|
[] |
no_license
|
jakop345/slapos.core
|
7538418056be6541e9ee7a70d2d6b694e03daafc
|
410dfb506b7ec17745365d573e7401f217b74ed4
|
refs/heads/master
| 2020-06-18T21:43:39.012812
| 2016-11-24T11:06:49
| 2016-11-24T11:06:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
portal = context.getPortalObject()
select_dict= {'default_aggregate_uid': None}
portal.portal_catalog.searchAndActivate(
portal_type=('Slave Instance', 'Software Instance'),
validation_state='validated',
default_aggregate_uid=None,
select_dict=select_dict,
left_join_list=select_dict.keys(),
method_id='Instance_tryToGarbageCollectNonAllocatedRootTree',
activate_kw={'tag': tag}
)
context.activate(after_tag=tag).getId()
|
[
"alain.takoudjou@nexedi.com"
] |
alain.takoudjou@nexedi.com
|
b0f90126d112f8cb5a8031b6ab8276de24e876d1
|
5696f91a586d8ad7e28be77d7161b7b7e24aba8a
|
/micro_influencer_utilities.py
|
292fde9da942012beb7d7167e18e299b3dfda9e6
|
[
"Apache-2.0"
] |
permissive
|
project-72/micro-influencer-detector
|
855415b91512649a935cbd71050c7691794ca733
|
a2db70cd847fb80d761b452a3506d829eb2db2b7
|
refs/heads/master
| 2022-01-19T00:48:27.688279
| 2019-07-09T08:50:27
| 2019-07-09T08:50:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,076
|
py
|
# This Python file uses the following encoding: utf-8
#!/usr/bin/
import tweepy
import os
import time
import sys
from pathlib import Path
from datetime import datetime
import json
import re
#------------------------------------------------------#
#-------------authentication phase started-------------#
#------------------------------------------------------#
def authentication(pathToDevKeyAndSecret, pathToTwitterAuthData):
try:
f = open(pathToDevKeyAndSecret, "r") #retrieving key and secret in a local file, not available on github
#ask this info to the developer of the app
except IOError:
print ("file with key and secret of Twitter app not found, ask to the developer\n")
exit()
else:
print ("file opening and information retrieving")
#read my developer app key and secret from local file .gitignore
consumer_key = f.readline().rstrip('\n')
consumer_secret = f.readline().rstrip('\n')
f.close()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
twitterAuthData = Path(pathToTwitterAuthData) #here we find key and secret of the user using the app on Twitter
if not twitterAuthData.is_file() or os.stat(pathToTwitterAuthData).st_size == 0:
#no previous authentication data, need to autenthicate via browser
try:
redirect_url = auth.get_authorization_url()
print("Redirect url:", redirect_url)
except tweepy.TweepError:
print ('Error! Failed to get request token.')
verifier = raw_input('Verifier:')
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print ('Error! Failed to get access token.')
access_token = auth.access_token
access_token_secret = auth.access_token_secret
twitterAuthData = open(pathToTwitterAuthData, "w")
twitterAuthData.write(auth.access_token+"\n"+auth.access_token_secret+"\n");
twitterAuthData.close();
else:
#already got auth data, read it from file
twitterAuthData = open(pathToTwitterAuthData, "r")
access_token = twitterAuthData.readline().rstrip('\n')
access_token_secret = twitterAuthData.readline().rstrip('\n')
twitterAuthData.close()
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)#, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
print('[0] authentication completed with success')
return api
def create_all_necessary_folders(pathToDataFolder, topic_selected):
#--Create the Data collection folder if not exists--#
if not os.path.exists(pathToDataFolder): #here we will store data collection after tweets retrieval
os.makedirs(pathToDataFolder)
#--One folder per topic, in order to do not overwrite folders--#
pathToDataFolder = pathToDataFolder+"/"+topic_selected
#--Create the potential micro influencer list folder if not exists--#
if not os.path.exists(pathToDataFolder+"/00_potential_micro_influencers_users"):
os.makedirs(pathToDataFolder+"/00_potential_micro_influencers_users")
#--Create the follower list folder for all potential micro influencers if not exists--#
if not os.path.exists(pathToDataFolder+"/01_followers_list"):
os.makedirs(pathToDataFolder+"/01_followers_list")
#--Create the selected and filtered tweets folder for all potential micro influencers if not exists--#
if not os.path.exists(pathToDataFolder+"/02_users_tweets"):
os.makedirs(pathToDataFolder+"/02_users_tweets")
#--Create users_parameters folder of potential micr0 infuencer if not exists--#
if not os.path.exists(pathToDataFolder+"/03_users_parameters/recall"):
os.makedirs(pathToDataFolder+"/03_users_parameters/recall")
if not os.path.exists(pathToDataFolder+"/03_users_parameters/embeddness"):
os.makedirs(pathToDataFolder+"/03_users_parameters/embeddness")
if not os.path.exists(pathToDataFolder+"/03_users_parameters/interest"):
os.makedirs(pathToDataFolder+"/03_users_parameters/interest")
if not os.path.exists(pathToDataFolder+"/03_users_parameters/big5"):
os.makedirs(pathToDataFolder+"/03_users_parameters/big5")
if not os.path.exists(pathToDataFolder+"/03_users_parameters/y"):
os.makedirs(pathToDataFolder+"/03_users_parameters/y")
if not os.path.exists(pathToDataFolder+"/03_users_parameters/table"):
os.makedirs(pathToDataFolder+"/03_users_parameters/table")
if not os.path.exists("dataset"):
os.makedirs("dataset")
if not os.path.exists("GloVe"):
os.makedirs("GloVe")
print ("[1] all folders created or checked")
return pathToDataFolder
def topic_selection():
topic_selected = input('What topic are you looking micro-influencers for?\n')
if not topic_selected.startswith('#'):
topic_selected = "#"+topic_selected
#print topic_selected
return topic_selected
def user_list_from_topic_selected(topic_selected, api):
users_returned = []
print("Looking for users with at least 1k and at most 20k followers,")
print("having recently spoke about topic selected", topic_selected)
for tweet in limit_handled(tweepy.Cursor(api.search,q=topic_selected, count = 100, lang = "en").items(1000)): #now 1000, we'll exec on more topics
if (tweet.user.followers_count>1000 and tweet.user.followers_count<20000):
#print (tweet.user.screen_name)
if tweet.user.friends_count < tweet.user.followers_count:
users_returned.append(tweet.user.screen_name)
unique_users_returned = set(users_returned)
unique_users_returned = list(unique_users_returned)
return unique_users_returned
def store_user_list_csv(pathToStore, unique_users_returned):
fp1 = open(pathToStore, "w")
for mi_username in unique_users_returned:
if mi_username == unique_users_returned[-1]:
fp1.write(str(mi_username))
else:
fp1.write(str(mi_username)+",")
fp1.close()
print ("[2] List of potential micro influencers stored.")
def retrieve_user_list(pathToUserList):
unique_users_returned = []
f = open(pathToUserList, "r")
content = f.read()
unique_users_returned = content.split(",")
return unique_users_returned
def limit_handled(cursor):
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(15*60)
def retrieve_and_store_followers_csv(pathToFollowerList, unique_users_returned, api):
user_progress = 0
for i in unique_users_returned:
count = 0
user_progress +=1
while True:
try:
print ("retrieving followers of: " + i)
print("progress: " + str(user_progress) + "/" + str(len(unique_users_returned)))
fp2 = open(pathToFollowerList + i +".csv", "w")
for follower_id in limit_handled(tweepy.Cursor(api.followers_ids, screen_name=i).items()):
if count == 0:
fp2.write(str(follower_id))
count +=1
else:
fp2.write(","+str(follower_id))
count +=1
fp2.close()
break #exiting infinite while loop
except tweepy.TweepError:
time.sleep(15)
print (i + "'s followers stored. They are " + str(count))
print ("[3] Storing users followers phase completed.")
def retrieve_and_store_tweet_tab_back(pathToUserTweets, unique_users_returned, api):
user_progress = 0
for username in unique_users_returned:
user_progress +=1
while True:
try:
#get tweets
print ("Searching tweets of " + username)
print("progress: " + str(user_progress) + "/" + str(len(unique_users_returned)))
#fp3 = open(pathToDataFolder+"/02_users_tweets"+"/"+username, "w")
fp3 = open(pathToUserTweets+username, "w")
for page in limit_handled(tweepy.Cursor(api.user_timeline, username, count=100, lang = "en").pages()): #all tweets
for tweet in page:
fp3.write(str(tweet.id)+"\t")
new_tweet = ""
tweet_cleaned = tweet.text.split("\n")
for sintagma in tweet_cleaned:
new_tweet = new_tweet + " " + sintagma
new_tweet2 = ""
tweet_cleaned2 = new_tweet.split("\t")
for sintagma2 in tweet_cleaned2:
new_tweet2 = new_tweet2 + " " + sintagma2
fp3.write(new_tweet2 + "\n")
#at the end of the story we have ----> TweetId\tTweetText\n
fp3.close()
break #exiting infinite while loop
except tweepy.TweepError as e:
print(e)
print ("[4]tweets retrieved and stored")
def compute_and_store_embeddeness(pathToFollowerList, pathToUserParameters, unique_users_returned):
compare_follows_dict = {}
for username in unique_users_returned:
username_followers_list = []
fp2 = open(pathToFollowerList+username+".csv", "r")
username_followers_list = fp2.read().split(",")
fp2.close()
compare_follows_dict[username] = username_followers_list
print ("[5] dictionary created")
for user in compare_follows_dict:
embeddnessScore = 0.0
total_overlapping = 0.0 #sum up all followers of a mi when compare in other mi followers list
followers_count = len(compare_follows_dict[user])
for user2 in compare_follows_dict:
if user != user2 :
same_followers_list = set(compare_follows_dict[user]) & set(compare_follows_dict[user2])
total_overlapping += len(same_followers_list)
if followers_count > 0:
embeddnessScore = total_overlapping/followers_count
else:
embeddnessScore = 0.0
#fp4 = open(pathToDataFolder+"/03_users_parameters/embeddness"+"/"+user+"_embeddnessScore.txt", "w");
fp4 = open(pathToUserParameters+"embeddness/"+user, "w")
fp4.write(str(embeddnessScore))
fp4.close()
print ("[6] embeddness score computed and stored")
def compute_and_store_interest(topic_selected, pathToUserTweets, pathToUserParameters, unique_users_returned):
for user in unique_users_returned:
#print topic_selected
#print topic_selected[1:]
significative_tweets_counter = 0.0
total_tweets = 0.0
f = open(pathToUserTweets+user, "r")
for line in f.readlines():
if topic_selected in line or topic_selected[1:] in line:
significative_tweets_counter +=1
total_tweets += 1
f.close()
if total_tweets > 0:
Sint = (significative_tweets_counter/total_tweets)
else:
Sint = 0.0
fout = open(pathToUserParameters+"interest/"+user, "w")
fout.write(str(Sint))
fout.close()
def compute_and_store_recall(topic_selected, pathToFollowerList, pathToUserTweets, pathToUserParameters, unique_users_returned, api):
user_progress = 0
for username in unique_users_returned:
user_progress += 1
print (username)
print("progress: " + str(user_progress) + "/" + str(len(unique_users_returned)))
username_followers_list = []
fp2 = open(pathToFollowerList+username+".csv", "r")
username_followers_list = fp2.read().split(",")
fp2.close()
significative_tweets_counter = 0.0
total_retweets_performed_by_followers = 0.0
user_tweets_counter = 0.0
fp3 = open(pathToUserTweets+username, "r")
for line in fp3.readlines():
user_tweets_counter += 1
if topic_selected in line: #or topic_selected[1:] in line:
significative_tweets_counter +=1
informations = []
informations = line.split("\t")
while True:
try:
if informations[0].isdigit():
try:
statuses = api.retweets(informations[0])
for status in statuses:
#print "status user id :" + str(status.user.id)
if str(status.user.id).rstrip("\n") in username_followers_list:
total_retweets_performed_by_followers +=1
except tweepy.RateLimitError:
time.sleep(15*60)
break
except:
print("Some error occurred, I'm trying again")
fp3.close()
if significative_tweets_counter > 0:
recallScore = (total_retweets_performed_by_followers/significative_tweets_counter)#/len(username_followers_list) rimosso per la non pagination quindi 100 retweet per tutti
else:
recallScore = 0.0
if user_tweets_counter > 0:
interest_in_that_topic = significative_tweets_counter/user_tweets_counter
else:
interest_in_that_topic = 0.0
fp4 = open(pathToUserParameters+"recall/"+username, "w");
fp4.write(str(recallScore))
fp4.close()
print ("[7] filtered by topic tweets printed and recall score calculated")
def compute_and_store_engagement(topic_selected, pathToUserTweets, pathToUserParameters, unique_users_returned, api):
user_progress = 0
for user in unique_users_returned:
tweet_skipped = 0
user_progress += 1
print (user)
print("progress: " + str(user_progress) + "/" + str(len(unique_users_returned)))
fin = open(pathToUserTweets+user, "r")
total_sum = 0.0
total_tweets = 0.0
followers_count = api.get_user(user).followers_count
for line in fin.readlines():
elements = line.split("\t") #tweet_id \t tweet_text
try:
tweet = api.get_status(elements[0])
sum_retweet_like = tweet.retweet_count + tweet.favorite_count
total_sum += sum_retweet_like
total_tweets += 1
except tweepy.RateLimitError:
print("Rate limit, i'm sleeping")
time.sleep(15*60)
except:
tweet_skipped +=1
print("Tweepy error, tweet skipped over " + str(total_tweets), tweet_skipped)
continue
Score_engagement = (total_sum/followers_count)/total_tweets
fin.close()
if not os.path.exists(pathToUserParameters+"/engagement/"):
os.makedirs(pathToUserParameters+"/engagement/")
fout = open(pathToUserParameters+"/engagement/"+user,"w")
fout.write(str(Score_engagement))
fout.close()
|
[
"simone.leonardi.rd@gmail.com"
] |
simone.leonardi.rd@gmail.com
|
b7c373b4b5a12cdda3db6d7c6a5e5658426da9c7
|
62652efa27184d8dc137a58489ba89c6af617d59
|
/python/gui/Gui_3DS.py
|
be563d97242552faab29ecb01f0b8370329cf08c
|
[
"MIT"
] |
permissive
|
DGU-DAI-Lab/Artifact-Mapping-from-3D-Scanning
|
f88f577c6e004c1da4751cbd315f562cfcec0256
|
8d5020d239e1558c5a1762478e4b3ac1166ccfef
|
refs/heads/master
| 2022-03-25T13:53:51.978064
| 2020-01-15T06:10:12
| 2020-01-15T06:10:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
import tkinter as tk
import stl
from stl import mesh
import numpy as np
from src import DepthSegment as ds
from src import Svgfy as svg
from stl import mesh
import time
def set_root(window):
global root
root = window
def Rotate_UseCogCov():
pass
def Rotate_Direct():
pass
def Mapping_SVG_noBackground():
pass
def Mapping_SVG_plainBackground():
pass
def Mapping_SVG_LinearGradientByNormal():
pass
def Mapping_SVG_LinearGradientByRatio():
pass
def Mapping_WindowDetect():
pass
def main(fname):
print('3ds')
obj = mesh.Mesh.from_file(fname)
print('\n>>> Start.\n')
print('D-Segmentation start.')
start = time.time()
d = ds.DepthSegmentation(obj)
front,section,rear = d
end = time.time()
print('D-Segmentation done.')
print('took %f seconds.' % (end-start))
print()
total_before = obj.__len__()
total_after = len(front)+len(section)+len(rear)
diff = total_before-total_after
print('BEFORE >>> total : %d' % total_before)
print('AFTER >>> total : %d [ front : %d | section : %d | rear : %d ]' % (total_after, len(front), len(section), len(rear)))
print('* %d (%.2f%%) decreased.' % ( diff, diff/total_before*100 ))
print()
# 2nd Phase
print('SVG-Converting start.')
start = time.time()
def create_svg_file(path,data):
f = open(path,'w')
f.write(data)
f.close()
create_svg_file('../output/svg/section.svg', svg.build_section(section))
create_svg_file('../output/svg/front.svg', svg.build_surface(front))
create_svg_file('../output/svg/rear.svg', svg.build_surface(rear) )
end = time.time()
print('SVG-Converting done.')
print('took %f seconds.' % (end-start))
print()
print('\n>>> Done.\n')
|
[
"hepheir@gmail.com"
] |
hepheir@gmail.com
|
32cd57d99d7c5431cc357f0727c5ab5626d2224b
|
2b56f75932d239fff9abfc0a84bd273a5436a06f
|
/trails/feeds/360cryptolocker.py
|
3aef9c6a819f7d25d8d20606ae4e7d7a09317775
|
[
"MIT"
] |
permissive
|
dyhpoon/maltrail
|
d89ea395c74b6bca9b77ad3c2198f6e92d0ff664
|
7c9c7226161d8c0e233db7718be60912544bb586
|
refs/heads/master
| 2021-07-21T19:33:21.682448
| 2017-10-31T12:21:43
| 2017-10-31T12:21:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2017 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "http://data.netlab.360.com/feeds/dga/cryptolocker.txt"
__check__ = "netlab 360"
__info__ = "cryptolocker (malware)"
__reference__ = "360.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for match in re.finditer(r"(?m)^([\w.]+)\s+2\d{3}\-", content):
retval[match.group(1)] = (__info__, __reference__)
return retval
|
[
"miroslav.stampar@gmail.com"
] |
miroslav.stampar@gmail.com
|
5c358e24f6cf03739854757ba2aba966a2d92983
|
bc3c7b4b9bd1df0306d2c52ca0683915b0865fea
|
/courses/templatetags/course_extras.py
|
30a63ad75517c5e24ea03bbcd91e6c8f673552af
|
[] |
no_license
|
magyarn/learning_site
|
7747fa2b175137489f50c0486da6a20f2fc55107
|
c9c02603c001ef8e546e38bea4b69b27824fb0a0
|
refs/heads/master
| 2020-04-22T01:37:03.692717
| 2019-02-10T21:26:19
| 2019-02-10T21:26:19
| 170,019,836
| 0
| 0
| null | 2019-02-10T21:26:20
| 2019-02-10T19:53:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
from django import template
from django.utils.safestring import mark_safe
import markdown2
from courses.models import Course
register = template.Library()
@register.simple_tag
def newest_course():
''' Gets the most recent course that was added to the library. '''
return Course.objects.filter(published=True).latest('created_at')
@register.inclusion_tag('courses/course_nav.html')
def nav_courses_list():
''' Returns dictionary of courses to display as navigation pane. '''
courses = Course.objects.filter(
published=True
).order_by('-created_at').values('id', 'title')[:5]
return {'courses': courses}
@register.filter('time_estimate')
def time_estimate(word_count):
''' Estimates the number of minutes it will take to complete a step based on
the passed in word count '''
minutes = round(word_count/20)
return minutes
@register.filter('markdown_to_html')
def markdown_to_html(markdown_text):
'''Converts markdown text to HTML'''
html_body = markdown2.markdown(markdown_text)
return mark_safe(html_body)
|
[
"magyarn@umich.edu"
] |
magyarn@umich.edu
|
c297389ece5661707f7704f66b4e36a39205d9e9
|
8dbe24acd1ad993ec1989b6814bdd9ba64675655
|
/graph/union_find_clustering.py
|
ffdc54093388de1f50f3f112fdd59e61b6f9880c
|
[] |
no_license
|
stefanosc/algorithms
|
9764f31feee630212a2037d20dfa1224336f1d2c
|
c3b0488d96071ae6df23958aea36393815351fc6
|
refs/heads/master
| 2021-01-14T08:51:54.211194
| 2015-04-12T21:53:42
| 2015-04-12T21:53:42
| 27,975,754
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
import numpy as np
class UnionFind(object):
"""simple implementation of UnionFind algorithm
initialize members, rank and number of sets"""
def __init__(self, size=2**24, arr=None):
self.members = np.arange(size)
self.rank = np.ones(size, dtype=int)
self.sets = len(arr.nonzero()[0])
def connected(self, node1, node2):
"""simply compare if 2 nodes have the same root"""
return self.find_root(node1) == self.find_root(node2)
def union(self, node1, node2):
"""find root of both nodes and return if they are already
the same.
Otherwise we need to unite them, in this case decrease
the number of sets.
Than simply change the root of the node which has lower rank"""
root1 = self.find_root(node1)
root2 = self.find_root(node2)
if root1 == root2:
return None
self.sets -= 1
if self.rank[root1] < self.rank[root2]:
self.members[root1] = root2
else:
if self.rank[root1] == self.rank[root2]:
self.rank[root1] += 1
self.members[root2] = root1
def find_root(self, node):
"""This implementation is the path compression principle
of the UnionFind data structure. As we search for the root
and go up the 'chain' we compress the path by assigning the
root of each node to the root of their parent."""
while node != self.members[node]:
self.members[node] = self.members[self.members[node]]
node = self.members[node]
return node
|
[
"stefano@bvprojects.org"
] |
stefano@bvprojects.org
|
a2a715b286fd319896732dc70e8d19cfaf70d286
|
64842e59f10d8a4a3e1a457ce9d91e6a05f536cc
|
/addtobuildlist/__init__.py
|
bd04c18d2810dbce4715a9ed3984d03f1d74eb7b
|
[
"MIT"
] |
permissive
|
Tibibv/addtobuildlist
|
9bd905e620ad6630e5d48d1ec88d9cd0e957aa19
|
e204fd243c14655277c57fcaa61ebadd5c30a4bf
|
refs/heads/master
| 2021-08-30T01:17:43.484273
| 2017-12-15T14:10:23
| 2017-12-15T14:10:23
| 114,133,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
from .addtobuildlist import*
|
[
"kron_bv@yahoo.com"
] |
kron_bv@yahoo.com
|
f418ecdd5c0785b88db763b0865a7202e2cef0ed
|
7e72a6796727ca6c24b5a41f2c7418c3a5960b83
|
/accounting_module/migrations/0003_alter_chartofaccount_trialbalance_chart.py
|
f610d3c4651bb3f392040908c2a7fe0fe87c9f4e
|
[] |
no_license
|
joeygracesabusido/accounting-backend
|
269d479ecb2818b2a4e8e2433915829ee23438ed
|
97f4f276abd8767f92dec4e22db347c578cb61ed
|
refs/heads/master
| 2023-08-19T11:37:56.636476
| 2021-10-07T08:43:39
| 2021-10-07T08:43:39
| 409,522,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# Generated by Django 3.2.7 on 2021-09-23 06:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting_module', '0002_alter_chartofaccount_trialbalance_chart'),
]
operations = [
migrations.AlterField(
model_name='chartofaccount',
name='trialBalance_chart',
field=models.CharField(choices=[('CA', 'Current Asset'), ('NCA', 'Non-Current Asset'), ('CL', 'Current Liability'), ('NCL', 'Non-Current Liability'), ('E', 'Equity'), ('I', 'Income'), ('EXCOS', 'Cost OF Sale Expense'), ('EXGA', 'General and Adminimistrative Expense')], max_length=100),
),
]
|
[
"joeygracesabusido@gmail.com"
] |
joeygracesabusido@gmail.com
|
01c9cc55ebc02a0841e1bb9573a58989a8c6e1bb
|
9f00c5af33e50470c72827b2009899bb8d43c299
|
/stream_service/forms.py
|
8de6d848151d5c94233d23ee0bb859b840673f5c
|
[] |
no_license
|
saadullahaleem/twitchstreamviewer
|
d73bb15c8978ae0ef2110000c0c33aae3b569cf9
|
68eca97e974ba728b8a78f1feef630e5596dab69
|
refs/heads/master
| 2022-12-12T00:47:13.982763
| 2020-02-21T10:20:44
| 2020-02-21T10:20:44
| 168,964,033
| 1
| 1
| null | 2022-12-08T01:34:58
| 2019-02-03T16:09:27
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
from django import forms
from .models import User
class StreamerForm(forms.ModelForm):
favorite_streamer = forms.CharField(label='Your Favorite Streamer', max_length=100)
class Meta:
model = User
fields = ['favorite_streamer']
|
[
"aleemsaadullah@gmail.com"
] |
aleemsaadullah@gmail.com
|
505e0a1908f4b2835e3edd6eb05f1f81e6a69552
|
62ccd4ceba82cbe22840b4cafad86e948e1c26d7
|
/apps/news/views.py
|
68b506be2a32506a3e6216bb98ee65e3882286f4
|
[] |
no_license
|
eshafik/newsfeed-portal
|
4e27b78c0c91c86f5ee3fe53bd2a2e922268f35e
|
a54ef7fe5b959c6cf22296aad143943b8e0f17f7
|
refs/heads/master
| 2023-07-14T02:12:22.621087
| 2021-08-18T11:11:34
| 2021-08-18T11:11:34
| 397,561,587
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from app_libs.custom_pagination import CustomPagination
from apps.news.utils import get_news
class NewsFeedAPI(APIView):
"""
News Feed APIView for User
URL:/api/v1/news
Method: GET
"""
permission_classes = (IsAuthenticated, )
def get(self, request):
news = get_news(countries=self.request.user.userpreference.country,
source=self.request.user.userpreference.source)
if not news:
return Response(data={"message": "Data not found! You have to set your country and source preference."},
status=status.HTTP_404_NOT_FOUND)
paginator = CustomPagination()
page = paginator.paginate_queryset(news, request)
return paginator.get_paginated_response(page)
|
[
"shafikul@evaly.com.bd"
] |
shafikul@evaly.com.bd
|
a85f26a7cd053860dbfc7f30b95806ad33143a4a
|
2c191b6dc3cabf394208af8c33190840d4ace153
|
/python/utils/utils.py
|
a442f6094932e5724291dba3b3ed8aec4d82038d
|
[] |
no_license
|
mattrero/competitive_programming
|
48b5bf4bebb38dac7ef15364b72ed6c2271e72dd
|
db8f59b58d07ac096a9eb746d202978c63f4bcc3
|
refs/heads/master
| 2022-11-13T02:51:09.033935
| 2020-06-23T16:09:34
| 2020-06-23T16:09:34
| 274,448,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
from collections import defaultdict
import sys
# map(int, list1, list2)
# ord('a')
# chr(97)
import math
def distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))
def distance_path(p1, p2):
x1, y1 = p1
x2, y2 = p2
return abs(x1-x2) + abs(y1-y2)
import queue
class Node:
def __init__(self, node_id, **kwargs):
self.node_id = node_id
for key, value in kwargs.items():
setattr(self, key, value)
def __eq__(self, other):
return self.node_id == other.node_id
def __hash__(self):
return hash(self.node_id)
def __repr__(self):
d = dict(self.__dict__)
del d['node_id']
return str(d)
def __lt__(self, other):
# TODO implement this
return False
class _PathNode:
def __init__(self, score, node, parent = None):
self.score = score
self.node = node
self.parent = parent
def __lt__(self, other):
if self.score == other.score:
self_path = self.get_path()
other_path = self.get_path()
if len(self_path) < len(other_path):
return True
if len(self_path) > len(other_path):
return False
for n1, n2 in zip(self_path, other.get_path()):
if n1 == n2:
continue
return n1 < n2
return False
return self.score < other.score
def get_path(self):
path = [self.node]
current = self
while current.parent:
current = current.parent
path.insert(0, current.node)
return path
def solve_a_star(start, goal_or_eval, get_neighbors):
"""
:param nodes: List of all nodes
:param start:
:param goal_or_eval: Either a goal of type Node or a function receiving a _PathNode
:param get_neighbors: List neighbors of specific node : (score, neighbor)
:return:
"""
if type(goal_or_eval) is Node:
eval_goal = lambda n: n.node == goal_or_eval
else:
eval_goal = goal_or_eval
q = queue.PriorityQueue()
q.put(_PathNode(0, start))
done = set()
while not q.empty():
current = q.get() #type: _PathNode
if eval_goal(current):
return current.get_path()
if current.node in done:
continue
done.add(current.node)
for add_score, neighbor in get_neighbors(current.node):
if neighbor not in done:
q.put(_PathNode(current.score + add_score, neighbor, current))
def bruteforce(start,
get_neighbors,
is_valid_path = lambda node_path:len(node_path.get_path()) == len(set(node_path.get_path())),
is_first_best = lambda a,b: a.score < b.score):
q = queue.Queue()
q.put(_PathNode(0, start))
best_path = None
while not q.empty():
current = q.get() #type: _PathNode
if is_first_best(current, best_path):
best_path = current
for add_score, neighbor in get_neighbors(current.node):
path = _PathNode(current.score + add_score, neighbor, current)
if is_valid_path(path):
q.put(path)
return best_path
|
[
"mattrero@users.noreply.github.com"
] |
mattrero@users.noreply.github.com
|
f118da72e29c89e38ee1dce1d9d61c0a5e0458a3
|
bd6c319dfe739cf9c43539a08d2f2fd4701b2281
|
/RellMarket/rellmarket/rellmarket/urls.py
|
948a8dcb656ee66ceb436382cce11cafb4f49edb
|
[] |
no_license
|
Sherelle20/Mes-minis-projets-Python
|
0eeef417d50631ded7e6907af44e34e5ac2fe4d7
|
7237f755f397c1c1f9fcf8bb15090adff58abccd
|
refs/heads/master
| 2023-08-02T15:47:21.972107
| 2021-06-11T21:11:14
| 2021-06-11T21:11:14
| 242,397,427
| 3
| 0
| null | 2021-09-22T19:35:15
| 2020-02-22T19:13:36
|
Python
|
UTF-8
|
Python
| false
| false
| 848
|
py
|
"""rellmarket URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('market.urls')),
path('social', include('social_django.urls'))
]
|
[
"sherellekana@gmail.com"
] |
sherellekana@gmail.com
|
fa691145cc454830758f1355446dce886a36720f
|
c6850632041713a88725ac6bb6530433ba1157b4
|
/ftc18/urls.py
|
1245612de97bad35326baaf92fc5dcbfdcda08ab
|
[] |
no_license
|
ameklou/ftc18
|
dbc41b23200f2479ee40ff5fa146d22767e0410f
|
a68cc0e36da08e6ce1e7f9c0c9132a04c049efec
|
refs/heads/master
| 2020-03-16T16:24:41.084379
| 2018-05-10T07:47:33
| 2018-05-10T07:47:33
| 132,786,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
"""ftc18 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"amekley@gmail.com"
] |
amekley@gmail.com
|
dbf421074c106bc1d05cce54faa250618bf782e1
|
15f3dc2b77a49978c66d3c9e2626d4aceb7311cd
|
/execute.py
|
3d8d1580c2054a06d85185c7fe20585409d71af6
|
[] |
no_license
|
walk1ng/vlad
|
fecc7d552972dc18107b04cd75d29d09e20b4248
|
bfb7dd4427bb3fe0076d481c9c9f3145cb50a1c5
|
refs/heads/master
| 2021-01-10T20:22:31.831885
| 2015-03-27T09:37:19
| 2015-03-27T09:37:19
| 32,856,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
#! /usr/bin/python
import os
import re
import json
import subprocess
try:
import xml.etree.cElementTree as ET
except ImportError, e:
import xml.etree.ElementTree as ET
tests = os.environ["alltests"]
testConfigXmlName = "tests.xml"
# retrieve test case
def RetrieveCase():
cases_need_run = []
cases = (re.sub('},{','}*{', tests[1:-1])).split('*')
for c in cases:
j = json.loads(c)
casename = j.get('testcase','')
tree = ET.parse(testConfigXmlName)
casecmd = (tree.find('tests/test[@name=' + '"' + casename +'"' + ']'))[0].text
case = {'name' : casename, 'cmd' : casecmd}
cases_need_run.append(case)
return cases_need_run
def Run(case):
casename, casecmd = case['name'], case['cmd']
print
print "*" * 30
print "TEST START: %s" % casename
print "EXECUTE COMMAND: %s" % casecmd
#rtn = os.system(casecmd)
p = subprocess.Popen(casecmd,shell=True)
rtn = p.returncode
result=""
if rtn != 0:
result = "FAILED"
else:
result = "PASS"
print "TEST %s: %s" % (result, casename)
print "*" * 30
print
def ExecuteTest():
cases_need_run = RetrieveCase()
print "Below tests will be run:"
for c in cases_need_run:
print c['name']
print
for c in cases_need_run:
Run(c)
ExecuteTest()
|
[
"v-lii@OSTC-Wei.fareast.corp.microsoft.com"
] |
v-lii@OSTC-Wei.fareast.corp.microsoft.com
|
a199e92bc39388de339beabe46b9635f7607472f
|
d3343d11764b98746e8d95865187463120c33c94
|
/tests/karaoke_bar_test.py
|
c62d7111e947eca7244299ba128fc483ed65453a
|
[] |
no_license
|
randomlyalex-codeclan/CCC
|
e9bddb13bcbd3ef042b3b1297b7d40942affb8ed
|
3b3b51ae9fbe3a1fc64ada2c043e95b612549433
|
refs/heads/main
| 2023-01-21T10:56:03.515817
| 2020-11-30T22:57:22
| 2020-11-30T22:57:22
| 316,791,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,067
|
py
|
import unittest
from src.karaoke_bar import KaraokeBar
from src.song import Song
from src.room import Room
from src.guest import Guest
class TestKaraokeBar(unittest.TestCase):
def setUp(self):
# rooms setup
self.room001 = Room("Room 1", 10)
self.room002 = Room("Room 2", 5)
self.room003 = Room("Room 3", 20)
self.room004 = Room("Room 4", 4) # the self within the unittesting really throws me, i think i need further talk over this.
self.room005 = Room("Room 5", 7)
self.room006 = Room("Room 6", 15)
self.front_desk = Room("Front Desk", 30)
# setup a main test bar with rooms
self.tone_deaf = KaraokeBar("Tone Deaf", [
self.front_desk,
self.room001,
self.room002,
self.room003,
self.room004,
self.room005,
self.room006
]
)
# guests setup
self.mark = Guest("Mark")
self.ben = Guest("Ben", 1)
self.jane = Guest("Jane", 2, "Wonderwall", 100.00)
self.peter = Guest("Peter")
self.john = Guest("John", 3)
self.sally = Guest("Sally")
self.dan = Guest("Dan")
# songs setup
self.song001 = Song("Shake It Off", "Taylor Swift", "2000", "Pop", 150)
self.song002 = Song("Stronger", "Kelly Clarkson", "2000", "Pop", 145)
self.song003 = Song("I Will Survive", "Gloria Gaynor", "1970", "Disco", 140)
self.song004 = Song("It’s Raining Men", "The Weather Girls", "1980", "Disco", 150)
self.song005 = Song("Single Ladies", "Beyoncé", "2000", "R&B", 157)
self.song006 = Song("Like a Virgin", "Madonna", "1980", "Pop", 150)
self.song007 = Song("Wrecking Ball", "Miley Cyrus", "2010", "Pop", 150)
self.song008 = Song("Emotions", "Mariah Carey", "1990", "Pop", 150)
self.song009 = Song("Rehab", "Amy Winehouse", "2000", "Indie", 167)
self.song010 = Song("Black Velvet", "Alannah Myles", "1980", "Country", 150)
self.song011 = Song("Son of a Preacher Man","Dusty Springfield,", "1990", "Pop", 134)
self.song012 = Song("Sweet Caroline", "Neil Diamond", "1990", "Rock", 150)
self.song013 = Song("Don’t Stop Believin’","Journey", "1980", "Rock", 150)
self.song014 = Song("Bohemian Rhapsody", "Queen", "1970", "Rock", 152)
self.song015 = Song("Wonderwall", "Oasis", "1990", "Pop", 136)
self.song016 = Song("My Way", "Frank Sinatra", "1960", "Pop", 150)
self.song017 = Song("I Wanna Be Sedated","the Ramones", "1970", "Rock", 194)
self.song018 = Song("Losing My Religion", "R.E.M.", "1990", "Pop", 150)
self.song019 = Song("Never Gonna Give You Up","Rick Astley", "1980", "Pop", 150)
self.song020 = Song("Mack the Knife", "Bobby Darin", "1955", "Pop", 150)
self.song021 = Song("If I Was Your Girlfriend","Prince", "1980", "Funk", 175)
self.song022 = Song("500 Miles", "The Proclaimers","1980", "Classic", 151)
self.song023 = Song("These Boots Are Made for Walking","Nancy Sinatra", "1960", "Classic", 150)
self.song024 = Song("Crazy", "Patsy Cline", "1960", "Classic", 150)
self.song025 = Song("Happy", "Pharrell Williams","2010", "Classic", 150)
self.song026 = Song("Copacabana", "Barry Manilow","1970", "Classic", 150)
self.song027 = Song("That’s the Way (I Like It)","KC and the Sunshine Band", "1990", "Classic", 150)
self.song028 = Song("Celebration", "Kool and the Gang", "1960", "Classic", 167)
self.song029 = Song("Funkytown", "Lipps, Inc", "1970", "Classic", 123)
self.song030 = Song("Don’t Worry, Be Happy","Bobby McFerrin", "1960", "Classic", 143)
self.song031 = Song("Eye of the Tiger", "Survivor","1980", "Classic", 150)
# Test room name exists
def test_room_name_exists(self):
self.assertEqual("Front Desk", self.tone_deaf.rooms_list[0].name)
# Test room creation
def test_room_creation(self):
test_room_1 = Room("Test Name 1", 100)
test_room_2 = Room("Galaxy far far away", 1000)
test_karaoke_bar = KaraokeBar("All ears", [test_room_1, test_room_2])
self.assertEqual("Galaxy far far away",
test_karaoke_bar.rooms_list[1].name)
# Test search for guest
def test_search_guest_return_room(self):
self.tone_deaf.rooms_list[0] = self.room001
self.tone_deaf.rooms_list[1] = self.room002
self.tone_deaf.rooms_list[2] = self.room003
self.tone_deaf.rooms_list[1].occupants.append(self.mark)
self.assertEqual(self.room002, self.tone_deaf.search_for_guest(self.mark))
# Test add a guest to a room they aren't in
def test_add_remove_guest__to_empty_room(self):
self.assertEqual("Added to Room 2", self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.room002))
self.assertEqual(self.room002, self.tone_deaf.search_for_guest(self.peter))
#test add 1 people to a room
def test_add_7_people_to_a_room(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.ben, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.room003)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.john, self.room004)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.room005)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.sally, self.room006)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.dan, self.room002)
self.assertEqual(self.room002, self.tone_deaf.search_for_guest(self.mark))
# Test add the same guest twice, which should remove them.
def test_add_remove_guest__to_room_with_them_in(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, self.room001)
self.assertEqual("Removed from Room 1", self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, None))
#def test_add_guest_to_full_room(self): - ext
def test_add_guest_to_full_room(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.ben, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.john, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.sally, self.room002) #both of these shouldnt add
self.tone_deaf.add_remove_guest_to_room_by_guest(self.dan, self.room002) #both of these should add
self.assertEqual(5,len(self.tone_deaf.roll_call()))
# move guests between rooms test
def test_move_guests_between_rooms(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.ben, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.john, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.room002)
self.assertEqual(False, self.tone_deaf.move_guests_between_rooms(self.room002, self.room004)) # this should be rejected
self.assertEqual(True, self.tone_deaf.move_guests_between_rooms(self.room002, self.front_desk)) # but then this should be allowed
self.assertEqual(True, self.tone_deaf.move_guests_between_rooms(self.front_desk, self.room005)) # but then this should be allowed
# Test Add a song to a room it isn't in
def test_add_song__to_room_by_song(self):
self.tone_deaf.add_remove_song_to_room_by_song(self.song016, self.room002)
self.assertEqual(True, self.song016 in self.tone_deaf.rooms_list[2].songs_list)
# Test the same with a few songs
def test_add_a_few_songs__to_room_by_song(self):
self.tone_deaf.add_remove_song_to_room_by_song(self.song010, self.room003)
self.assertEqual("Added Shake It Off",self.tone_deaf.add_remove_song_to_room_by_song(self.song001, self.room003))
self.tone_deaf.add_remove_song_to_room_by_song(self.song016, self.room002)
self.assertEqual(True, self.song016 in self.tone_deaf.rooms_list[2].songs_list)
# Test Remove a song from a room it is already in
def test_remove_song__to_room_by_song(self):
self.tone_deaf.add_remove_song_to_room_by_song(self.song010, self.room003)
self.tone_deaf.add_remove_song_to_room_by_song(self.song001, self.room003)
self.tone_deaf.add_remove_song_to_room_by_song(self.song016, self.room002)
self.assertEqual("Removed My Way", self.tone_deaf.add_remove_song_to_room_by_song(self.song016, self.room002))
self.assertEqual(False, self.song016 in self.tone_deaf.rooms_list[2].songs_list)
# Test a roll call of guests
def test_roll_call_of_guests(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, self.room001)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.ben, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.room003)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.john, self.room004)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.room005)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.sally, self.room006)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.dan, self.room002)
self.assertEqual(7,len(self.tone_deaf.roll_call()))
# Test add a guest with friends to a room
# Test room empty
def test_empty_room(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, self.room001)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.ben, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.john, self.room004)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.room005)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.sally, self.room006)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.dan, self.room002)
self.assertEqual(7,len(self.tone_deaf.roll_call()))
self.tone_deaf.empty_room(self.room002)
self.assertEqual(4,len(self.tone_deaf.roll_call()))
# Test list of Empty rooms
def test_return_empty_rooms(self):
self.assertEqual(6,len(self.tone_deaf.find_empty_rooms()))
# test empty rooms after adding and removing and then emptying a room
def test_return_empty_rooms_better(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.mark, self.room001)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.ben, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.room002)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.john, self.room004)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.room005)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.sally, self.room006)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.dan, self.room002)
self.tone_deaf.empty_room(self.room002)
self.assertEqual([self.room002,self.room003],self.tone_deaf.find_empty_rooms())
#favourite song test
def test_guests_fav_song_is_in_the_room(self):
self.tone_deaf.add_remove_song_to_room_by_song(self.song010, self.room005)
self.tone_deaf.add_remove_song_to_room_by_song(self.song001, self.room005)
self.tone_deaf.add_remove_song_to_room_by_song(self.song016, self.room005)
self.tone_deaf.add_remove_song_to_room_by_song(self.song015, self.room005)
self.assertEqual("Whoo! Fav Track!",self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.room005))
# test main guest paying for 1 hour
def test_guest_paying_for_1_hour(self):
self.tone_deaf.add_remove_guest_to_room_by_guest(self.peter, self.front_desk)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.john, self.front_desk)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.jane, self.front_desk)
self.tone_deaf.add_remove_guest_to_room_by_guest(self.sally, self.front_desk)
self.tone_deaf.pay_for_time_and_room(self.jane, 1, self.room005, self.front_desk)
self.assertEqual(60.00, self.jane.wallet)
self.assertEqual(40.00, self.tone_deaf.till)
self.assertEqual(4, len(self.room005.occupants))
# Test add a list of song id's to a room
# Test check list of songs time, and dont allow more songs than time on the room
# Test Add a genre to a room
# Test Add an artist to a room
# Test add a decade to a room
# Test add a year group that doesnt exist
# test itemised_tab as dictionary of key: guest / value: money owed. total of this passes to current tab attribute.
# test buying food/drink
|
[
"alexdodd@gmail.com"
] |
alexdodd@gmail.com
|
30318fa7ac460f2de3a4584845693a5c0513a403
|
abad7ebe878cfd15b68713d79be02afdc0327f87
|
/scripts/combine_reports.py
|
12ebdc252e2dd3753f7953ffdc955270a287a03c
|
[
"BSD-2-Clause"
] |
permissive
|
chrisspen/action-optimizer
|
22870a0827f5c9bfaed737046349c109aa097ec0
|
1bbc1b85452cbba9af6cad77aff96d189d783d9d
|
refs/heads/master
| 2023-08-17T20:48:51.940798
| 2023-07-18T17:05:51
| 2023-07-18T17:05:51
| 180,441,930
| 0
| 0
|
BSD-2-Clause
| 2023-07-06T23:25:13
| 2019-04-09T20:11:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,256
|
py
|
#!/usr/bin/env python
import os
from datetime import date
from collections import OrderedDict
import pathlib
from pyexcel_ods import save_data
from pandas_ods_reader import read_ods
current_path = pathlib.Path(__file__).parent.resolve()
pcc_path = os.path.abspath(f'{current_path}/../reports/{date.today()}/pcc.ods')
print(f'Using PCC path: {pcc_path}')
assert os.path.isfile(pcc_path)
ens_path = os.path.abspath(f'{current_path}/../reports/{date.today()}/ensemble.ods')
print(f'Using ENS path: {ens_path}')
assert os.path.isfile(ens_path)
pcc_df = read_ods(pcc_path, 1, headers=True)
ens_df = read_ods(ens_path, 1, headers=True)
feature_values = {} # {name: [values]}
pcc_confidences = {}
ens_confidences = {}
pcc_values = {}
ens_values = {}
for row in pcc_df.to_dict(orient='records'):
name = row['name']
confidence = row['pcc_zero']
best_value = row['best_value']
feature_values.setdefault(name, [])
feature_values[name].append(confidence)
pcc_values[name] = best_value
pcc_confidences[name] = confidence
for row in ens_df.to_dict(orient='records'):
name = row['name']
confidence = row['confidence']
best_value = row['best value']
if not best_value:
confidence = 1 - confidence
feature_values.setdefault(name, [])
feature_values[name].append(confidence)
ens_values[name] = best_value
ens_confidences[name] = confidence
final_values = [] # [(score, name)]
for name, confidences in feature_values.items():
mean_confidence = sum(confidences) / len(confidences)
final_values.append((mean_confidence, name))
final_values.sort()
for name, conf in final_values:
print(name, conf)
combine_fn = os.path.abspath(f'{current_path}/../reports/{date.today()}/combine-{date.today()}.ods')
print(f'Saving {combine_fn}.')
data = OrderedDict()
data.update({"Sheet 1": [
['name', 'score', 'pcc_conf', 'ens_conf', 'human_conf', 'agg_conf', 'pcc_value', 'ens_value']] + \
[
[name, conf, pcc_confidences.get(name, 0.5), ens_confidences.get(name, 0.5), 0.5, f'=(C{i}+D{i}+E{i})/3',
pcc_values.get(name, ''), ens_values.get(name, '')] for i, (conf, name) in enumerate(final_values, 2)]
})
save_data(combine_fn, data)
print(f'Saved {combine_fn}.')
|
[
"chrisspen@gmail.com"
] |
chrisspen@gmail.com
|
39224ae101ec103e520dac01bb8256d5729e040b
|
0c325cf7a68ef51067ed8db566d525a20de5b635
|
/other/panda365/panda365/pd/payment/payssion/tests/test_notify.py
|
7777e556adf2c9c1a0088674cffd872501ca4946
|
[] |
no_license
|
alinzel/NOTES
|
2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241
|
3e0594641a605580e920d0b08a251fbc99f34e2f
|
refs/heads/master
| 2023-01-08T22:48:30.762625
| 2020-01-17T09:14:47
| 2020-01-17T09:14:47
| 175,339,492
| 0
| 0
| null | 2022-12-27T15:01:19
| 2019-03-13T03:28:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
import pytest
from pd.payment.models import PaymentStatus
from pd.payment.payssion.client import PaymentStatus as PayssionPaymentStatus
URL = '/v1/payments/payssion/notify'
@pytest.fixture
def post_data(payment):
return {
'pm_id': payment.method,
'transaction_id': payment.transaction_id,
'order_id': payment.ref_id,
'amount': str(payment.amount),
'currency': str(payment.currency),
'state': PayssionPaymentStatus.pending.name,
'notify_sig': 'notify_sig',
}
def test_notify(
client, payment, post_data, mock_signals, mock_payssion_notify_signature,
):
post_data['state'] = PayssionPaymentStatus.completed.name
for i in range(2): # should be idempotent for the same input
resp = client.post(URL, data=post_data)
assert resp.status_code == 200, resp.json
assert payment.status == PaymentStatus.success
assert mock_signals.payment_succeeded.send.call_count == 1
def test_notify_pending(
client, payment, post_data, mock_payssion_notify_signature,
):
resp = client.post(URL, data=post_data)
assert resp.status_code == 200
assert payment.status == PaymentStatus.pending
def test_notify_no_such_payment(
client, post_data, payment, mock_payssion_notify_signature,
):
post_data['order_id'] = '123'
post_data['state'] = PayssionPaymentStatus.completed.name
resp = client.post(URL, data=post_data)
assert resp.status_code == 404
def test_notify_amount_mismatch(
client, post_data, payment, mock_payssion_notify_signature,
):
post_data['amount'] = '1000000'
post_data['state'] = PayssionPaymentStatus.completed.name
resp = client.post(URL, data=post_data)
assert resp.status_code == 200 # should ack the notification
# should mark the status to be error
assert payment.status == PaymentStatus.error
def test_notify_currency_mismatch(
client, post_data, payment, mock_signals, mock_payssion_notify_signature
):
post_data['currency'] = 'MYR'
post_data['state'] = PayssionPaymentStatus.completed.name
resp = client.post(URL, data=post_data)
assert resp.status_code == 200 # should ack the notification
# should mark the status to be error
assert payment.status == PaymentStatus.error
assert mock_signals.payment_error.send.called
def test_invalid_signature(client, post_data):
resp = client.post(URL, data=post_data)
assert resp.status_code == 422
def test_invalid_state(client, post_data, mock_payssion_notify_signature):
post_data['state'] = 'a new state payssion defined but have not told us'
resp = client.post(URL, data=post_data)
assert resp.status_code == 422
def test_bad_request(client, post_data, mock_payssion_notify_signature):
for k in post_data:
data = dict(post_data)
data.pop(k)
resp = client.post(URL, data=data)
assert resp.status_code == 422
|
[
"944951481@qq.com"
] |
944951481@qq.com
|
b22bd1e8b2d7b82847082af4a842da0be1e342f3
|
0d02a984525be692c39644391aff56e248f8aa02
|
/2021/2021-01/beginner/solutions/power.py
|
1f8ecda2d1b6025dfc4726c89cc70764c36de054
|
[
"Unlicense"
] |
permissive
|
PythonAberdeen/user_group
|
19f9cb3ec37dafdeb3d4273b83a9aab6b10edea9
|
e0c4c58fe72a48e3f58df687493c84c8ebd71657
|
refs/heads/master
| 2023-07-20T00:01:43.531887
| 2023-07-12T18:56:01
| 2023-07-12T18:56:01
| 208,131,601
| 15
| 28
|
Unlicense
| 2023-06-15T14:46:55
| 2019-09-12T19:34:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 769
|
py
|
def parse_power_log(fn="../power.txt"):
with open(fn) as f:
log = [float(l.split()[3]) for l in f if l]
return log
def on_off_points(log, cutoff):
for i in range(1, len(log)):
if log[i] < cutoff and log[i-1] > cutoff:
print(f"Switch off at {i}")
if log[i] > cutoff and log[i-1] < cutoff:
print(f"Switch on at {i}")
def average(l):
return sum(l) / len(l)
if __name__=="__main__":
log = parse_power_log()
avg = average(log)
on_off_points(log, avg)
on_vals = [p for p in log if p > avg]
off_vals = [p for p in log if p < avg]
lights_curr = average(on_vals) - average(off_vals)
print(f"Average current draw for lights: {lights_curr:.3f}A ({12*lights_curr:.3f}W)")
|
[
"rmcw@allmail.net"
] |
rmcw@allmail.net
|
61d209278088381d9794b82ef2cf7fe96f31b107
|
c8c95520fb1a17d627a0256df2c6702f4f53403a
|
/12.11.2b_exercises.py
|
2f50c9eb07dbc5061e36c13b9437317825bb656e
|
[] |
no_license
|
wsargeant/httlacs
|
608f1b4b34c95f18f934f10883beb56a2895c269
|
3337b369c541e18d5ed9ecbca35494c3ebcfa591
|
refs/heads/master
| 2023-02-21T21:10:03.228762
| 2021-01-25T08:44:46
| 2021-01-25T08:44:46
| 284,936,152
| 0
| 0
| null | 2020-08-28T10:35:17
| 2020-08-04T09:32:38
| null |
UTF-8
|
Python
| false
| false
| 61
|
py
|
import math
print(math.ceil(4.506))
print(math.floor(4.506))
|
[
"69194027+wsargeant@users.noreply.github.com"
] |
69194027+wsargeant@users.noreply.github.com
|
4c4680255beda441d17753ed435c8838e7bf9df1
|
d5ed6362732d31d07ddac3d0aef53910aa85c64a
|
/djangochallenge/manage.py
|
6954e89bc4ab42cac604e85c6d5d0c0190fea84d
|
[] |
no_license
|
mhdi01/django-challenge
|
9a44fde57d9115b3570b23b8c370d772e8571419
|
73c34dd7a86ac7e77f75745cb9207a686fd43325
|
refs/heads/main
| 2023-08-14T20:38:19.242783
| 2021-10-04T20:59:47
| 2021-10-04T20:59:47
| 413,062,548
| 0
| 0
| null | 2021-10-03T11:49:31
| 2021-10-03T11:49:31
| null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangochallenge.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"mhdi.gh75@gmail.com"
] |
mhdi.gh75@gmail.com
|
adf2b60a46576ac2f2a2186669ab4ddc1c820f4a
|
1de899061b0c7150791a4425b54d01ef0a8860c3
|
/itinerary/urls.py
|
483ca8f64f6338b3a1398b4163c206603dfad900
|
[] |
no_license
|
sjkingo-archive/itinerary-app
|
396878618ed28f6a4bc2fbcdeb94d11c205c8cf0
|
477e1735879fdac249594d2565cedb25c8353646
|
refs/heads/master
| 2020-04-16T04:26:32.617898
| 2013-06-07T23:02:05
| 2013-06-07T23:02:05
| 10,558,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('transit.urls')),
)
|
[
"sam@sjkwi.com.au"
] |
sam@sjkwi.com.au
|
4d22e9c0f1e4ff6ebebe1e642c3f6c3cec3d46ad
|
8a739f5a4a2e87103a6a4e723642d6bde00176d7
|
/311_Solution/swagger_client/models/internal_model.py
|
da7a53b445796971116e570ae8221de52e5c9d14
|
[] |
no_license
|
zhangwexxx/zhangwexxx
|
ec857192061afa0804bf0426ed70b6843e924610
|
41de08f7618cac88785f4932c0de7c1c6140a4ab
|
refs/heads/main
| 2023-04-21T18:01:10.023673
| 2021-05-10T16:32:15
| 2021-05-10T16:32:15
| 366,102,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,158
|
py
|
# coding: utf-8
"""
Speech to Text API v3.0
Speech to Text API v3.0. # noqa: E501
OpenAPI spec version: v3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class InternalModel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'storage_prefix': 'str',
'_self': 'str'
}
attribute_map = {
'storage_prefix': 'storagePrefix',
'_self': 'self'
}
def __init__(self, storage_prefix=None, _self=None, _configuration=None): # noqa: E501
"""InternalModel - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._storage_prefix = None
self.__self = None
self.discriminator = None
if storage_prefix is not None:
self.storage_prefix = storage_prefix
if _self is not None:
self._self = _self
@property
def storage_prefix(self):
"""Gets the storage_prefix of this InternalModel. # noqa: E501
:return: The storage_prefix of this InternalModel. # noqa: E501
:rtype: str
"""
return self._storage_prefix
@storage_prefix.setter
def storage_prefix(self, storage_prefix):
"""Sets the storage_prefix of this InternalModel.
:param storage_prefix: The storage_prefix of this InternalModel. # noqa: E501
:type: str
"""
self._storage_prefix = storage_prefix
@property
def _self(self):
"""Gets the _self of this InternalModel. # noqa: E501
The location of this entity. # noqa: E501
:return: The _self of this InternalModel. # noqa: E501
:rtype: str
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this InternalModel.
The location of this entity. # noqa: E501
:param _self: The _self of this InternalModel. # noqa: E501
:type: str
"""
self.__self = _self
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InternalModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InternalModel):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InternalModel):
return True
return self.to_dict() != other.to_dict()
|
[
"wei.zhang@ottawa.ca"
] |
wei.zhang@ottawa.ca
|
812bb42913c37911176f71ead7157dd0be155e2c
|
259a3724e54989c2a89071495328b29198d3cdde
|
/avcon-service/src/controllers/convert_controller.py
|
6b13b5489881c6825924042003e86b8423ec6ea1
|
[] |
no_license
|
pablovicz/avcon
|
14f5f9d788978cf82e91f966368a96d8af661a66
|
91f0a3e0980493d7fc16275a2314aa8e4077875f
|
refs/heads/main
| 2023-08-14T04:15:49.705222
| 2021-09-13T10:54:55
| 2021-09-13T10:54:55
| 400,773,257
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,828
|
py
|
from src.utils.converters import AudioMediaConverter, VideoMediaConverter
import src.utils.application_utils as ut
from flask import make_response
from werkzeug.utils import secure_filename
import base64
import shutil
import time
import os
class ConvertController:
def __init__(self, request):
self.__media_type = request.form.get("media_type")
self.__source = request.form.get("source")
self.__target = request.form.get("target")
self.__file = request.files['file']
self.__original_filename = request.form.get("filename")
self.__filename = secure_filename(self.__original_filename)
#print(f'{self.__original_filename} ({self.__media_type}): {self.__source} -> {self.__target}')
self.__source_full_path = f'{ut.get_file_path("source")}.{self.__source}'
self.__converted_full_path = f'{ut.get_file_path("converted")}.{self.__target}'
self.__create_temp_path()
def execute(self):
self.__download_request_file()
self.__convert()
def __convert(self):
if self.__media_type == 'audio':
AudioMediaConverter(self.__source, self.__target).convert()
if self.__media_type == 'video' and self.__target.upper() in ["MP3", "WAV", "OGG", "FLAC", "AC3"]:
AudioMediaConverter(self.__source, self.__target).convert()
if self.__media_type == 'video' and self.__target.upper() in ["MP4", "MPEG", "FLV", "AVI", "MOV", "MKV", "WMV"]:
VideoMediaConverter(self.__source, self.__target).convert()
def get_response(self):
response = None
if os.path.exists(self.__converted_full_path):
response = make_response()
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.set('Content-Type', f'application/octet-stream')
response.headers['filename'] = f'{self.__filename.split(".")[0]}.{self.__target}'
response.data = self.__codec_file()
self.__clean_temp_path()
return response
def __download_request_file(self):
if self.__file and self.__allowed_file(self.__original_filename):
self.__file.save(self.__source_full_path)
while not os.path.exists(self.__source_full_path):
time.sleep(0.5)
def __codec_file(self):
with open(self.__converted_full_path, 'rb') as fh:
return base64.b64encode(fh.read())
@staticmethod
def __create_temp_path():
if not os.path.exists(ut.get_temp_path()):
os.system(f'mkdir {ut.get_temp_path()}')
@staticmethod
def __clean_temp_path():
shutil.rmtree(ut.get_temp_path())
@staticmethod
def __allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].upper() in ut.ALLOWED_EXTENSIONS
|
[
"pablowoina2205@gmail.com"
] |
pablowoina2205@gmail.com
|
a9e95064605cc0e71483b567144732c698fda5d7
|
611ce9c6023c2a3b63692c9357e04fb18b866090
|
/server.py
|
d22bde1865d5d2ff98cdb0429df0bc58433fd516
|
[] |
no_license
|
ritiksatokar/Dotrix
|
8397cf76ddcf327a37f80b230c6bc439da397663
|
2e0316be5d18596da3090393c4b5b01748ca38ac
|
refs/heads/master
| 2023-05-14T20:57:24.327463
| 2021-06-02T14:06:47
| 2021-06-02T14:10:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/joingame', methods=['GET'])
def join_room():
game_id = request.args.get('gameid', False)
print(game_id)
return {'msg': f'Congratulations! You are eligible to join the room {game_id}'}
@app.route('/newgame', methods=['POST'])
def create_new_game():
request_json = request.get_json()
dot_rows = request_json.get('rows', False)
dot_cols = request_json.get('columns', False)
print(f'{dot_rows} x {dot_cols}')
return {'game':f'this is your game is this and size is {dot_rows} x {dot_cols}'}
if __name__ == "__main__":
app.run(debug=True, port=5000)
|
[
"sagar.singh0304@gmail.com"
] |
sagar.singh0304@gmail.com
|
234ceac2e5ea146e47aca95a83b2a88c2b7a9d6c
|
78b7c97a9020bd8bcd4f8cc10c3cf4efddeb00fd
|
/11_if_elif_else.py
|
781b9d5c3ccb14065418f7d04d760be8f23fcef1
|
[] |
no_license
|
9Brothers/Learn.Python
|
ca69269741fa286d8a5442708674de4c76589c65
|
af430564d0be8f05663167d3c26ca84fda897dc8
|
refs/heads/master
| 2021-07-18T17:57:47.238256
| 2020-05-15T17:05:37
| 2020-05-15T17:05:37
| 160,976,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
# a = 1
# b = 1
# bol = b is a
# if a is 2 :
# a += 10
# print(a)
notas = [ 10, 9, 8 ,9 ,5, 10, 7 ]
for nota in notas :
if nota >= 9 :
print("Aluno nota {n} foi aprovado".format(n = nota))
elif nota >= 7 :
print("Aluno nota {n} está de recuperacão".format(n = nota))
else :
print("Aluno nota {n} foi reprovado".format(n = nota))
|
[
"heber.sousa@viavarejo.com.br"
] |
heber.sousa@viavarejo.com.br
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.