blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6b153bf96ce71b451479b1de85e5d447c280b49 | d848bfea4045d3a4844298aec39e046d1345318a | /catkin_ws/devel/lib/python2.7/dist-packages/unitree_legged_msgs/msg/_IMU.py | 4b189cbbb6583edf67f84b6c1eb127f18b32b696 | [] | no_license | KyleM73/pronto | 37730ec478b9642ff616d61a6c50b1086f547c0f | 819f87fbc39004293413fb9fc137cdbce238f0db | refs/heads/main | 2023-06-02T08:51:44.126947 | 2021-06-18T20:50:09 | 2021-06-18T20:50:09 | 378,260,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | /home/ooboontoo/catkin_ws/devel/.private/unitree_legged_msgs/lib/python2.7/dist-packages/unitree_legged_msgs/msg/_IMU.py | [
"kyle.morgenstein@gmail.com"
] | kyle.morgenstein@gmail.com |
70243d88dd542f055a340a1e310647eb9f0dc9be | 18810b28505bf2625b40ebd9a673548203616261 | /Job Scraper/apply.py | 71784c63fa34df09b85c8e58f8fd794768b8ecd3 | [] | no_license | hzbrz/Scrapers | b7af43172218da32f0fcf605249803f082a8ec56 | 419341badcf6b0bc1fd01c403837c65ccb0492b5 | refs/heads/master | 2021-07-13T09:26:00.313902 | 2020-06-04T06:29:20 | 2020-06-04T06:29:20 | 136,759,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | def easy_app(easy_app_element):
print(easy_app_element) | [
"hasanthedev@gmail.com"
] | hasanthedev@gmail.com |
485c81b87d1eebfd760738dacd40b66dfdd2c946 | 74c8c890de1522cff4b8a30b54d801bbc440265f | /Desafios/desafio029.py | a1602a393ace223e763099e9b3764f43c6bcbb62 | [] | no_license | fndalemao/Python | b302d90bdc190c61304868b5c8906e723830220c | d402dc2896ece28e2f46771b279c85ff0296067f | refs/heads/master | 2020-04-03T15:48:18.435243 | 2019-04-11T04:01:06 | 2019-04-11T04:01:06 | 155,301,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | vel = int(input('Digite sua velocidade: '))
if vel > 80:
print('Você foi multado em {} reais!'.format((vel-80)*7))
else:
print('Você está dentro do limite de velocidade!') | [
"fnd.alemao02@gmail.com"
] | fnd.alemao02@gmail.com |
96f6f306ef276af2e33a2bfaa7d6b1e25cb524ae | 516ddb388a2f32fa5faf2877d0842e8ad6f02550 | /task/bin/pip3 | 30d052acbb62a0461430fcc21f80a3c689d18f10 | [] | no_license | md131376st/task6 | cf49b770c29d51a5abaaa737b089fbce02210d38 | 7ddc384bcb2f700b10cbf2e2d19ecee899b3689e | refs/heads/master | 2020-03-28T12:21:30.112278 | 2018-09-18T16:17:47 | 2018-09-18T16:17:47 | 148,290,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | #!/Users/md/summer/task6/task/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"mona131376st@gmail.com"
] | mona131376st@gmail.com | |
3a9884fb534bd51716b75014723d49e7b5590761 | 59c55725576bbf0e2f6617507ba2f1db639abb3f | /analytic_billing_plan/wizard/analytic_billing_plan_line_make_sale.py | 921890470694b17358282339a41cfc55af455bcf | [] | no_license | bmya/eficent-odoo-addons | e3426ebaf1f59e52726253fc1dd36a09d9363059 | 5d8ddfa384ab4417f42bda103b71d926848035f6 | refs/heads/7.0 | 2021-01-21T16:48:55.312452 | 2015-11-04T14:11:19 | 2015-11-04T14:11:19 | 45,649,141 | 1 | 3 | null | 2015-11-06T00:35:17 | 2015-11-06T00:35:17 | null | UTF-8 | Python | false | false | 11,703 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class analytic_billing_plan_line_make_sale(orm.TransientModel):
_name = "analytic.billing.plan.line.make.sale"
_description = "Analytic billing plan line make sale"
def _get_order_lines(self, cr, uid, context=None):
"""
Returns the order lines associated to the analytic accounts selected.
"""
if context is None:
context = {}
record_ids = context and context.get('active_ids', False)
if record_ids:
order_line_ids = []
line_plan_obj = self.pool.get('analytic.billing.plan.line')
for line in line_plan_obj.browse(cr, uid, record_ids,
context=context):
for order_line in line.order_line_ids:
order_line_id = order_line and order_line.id
order_line_ids.extend([order_line_id])
if order_line_ids:
return order_line_ids
return False
def _get_default_shop(self, cr, uid, context=None):
company_id = self.pool.get('res.users').browse(
cr, uid, uid, context=context).company_id.id
shop_ids = self.pool.get('sale.shop').search(
cr, uid, [('company_id', '=', company_id)], context=context)
if not shop_ids:
raise osv.except_osv(_('Error!'),
_('There is no default shop '
'for the current user\'s company!'))
return shop_ids[0]
_columns = {
'order_line_ids': fields.many2many('sale.order.line',
'make_sale_order_line_rel',
'order_line_id',
'make_sale_order_id'),
'shop_id': fields.many2one('sale.shop', 'Shop', required=True),
'invoice_quantity': fields.selection([('order',
'Ordered Quantities')],
'Invoice on',
help="The sales order will "
"automatically create the "
"invoice proposition "
"(draft invoice).",
required=True),
'order_policy': fields.selection([('manual', 'On Demand')],
'Create Invoice',
help="""This field controls how
invoice and delivery
operations are synchronized.""",
required=True),
}
_defaults = {
'order_line_ids': _get_order_lines,
'shop_id': _get_default_shop,
'order_policy': 'manual',
'invoice_quantity': 'order',
}
def make_sales_orders(self, cr, uid, ids, context=None):
"""
To make sales.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
record_ids = context and context.get('active_ids', False)
make_order = self.browse(cr, uid, ids[0], context=context)
res = []
if record_ids:
billing_plan_obj = self.pool.get('analytic.billing.plan.line')
order_obj = self.pool.get('sale.order')
order_line_obj = self.pool.get('sale.order.line')
partner_obj = self.pool.get('res.partner')
acc_pos_obj = self.pool.get('account.fiscal.position')
list_line = []
customer_data = False
company_id = False
sale_id = False
account_id = False
for line in billing_plan_obj.browse(cr, uid, record_ids,
context=context):
uom_id = line.product_uom_id
if not line.customer_id:
raise osv.except_osv(
_('Could not create sale order !'),
_('You have to enter a customer.'))
if customer_data is not False \
and line.customer_id != customer_data:
raise osv.except_osv(
_('Could not create sale order !'),
_('You have to select lines '
'from the same customer.'))
else:
customer_data = line.customer_id
partner_addr = partner_obj.address_get(
cr, uid, [customer_data.id], ['default',
'invoice',
'delivery',
'contact'])
newdate = datetime.today()
partner = customer_data
pricelist_id = partner.property_product_pricelist \
and partner.property_product_pricelist.id \
or False
price_unit = line.price_unit
line_company_id = line.company_id \
and line.company_id.id \
or False
if company_id is not False \
and line_company_id != company_id:
raise osv.except_osv(
_('Could not create sale order !'),
_('You have to select lines '
'from the same company.'))
else:
company_id = line_company_id
shop_id = make_order.shop_id \
and make_order.shop_id.id \
or False
line_account_id = line.account_id \
and line.account_id.id \
or False
if account_id is not False \
and line_account_id != account_id:
raise osv.except_osv(
_('Could not create billing request!'),
_('You have to select lines from the '
'same analytic account.'))
else:
account_id = line_account_id
sale_order_line = {
'name': line.name,
'product_uom_qty': line.unit_amount,
'product_id': line.product_id.id,
'product_uom': uom_id.id,
'price_unit': price_unit,
'notes': line.notes,
}
taxes = False
if line.product_id:
taxes_ids = line.product_id.product_tmpl_id.taxes_id
taxes = acc_pos_obj.map_tax(
cr, uid, partner.property_account_position,
taxes_ids)
if taxes:
sale_order_line.update({
'tax_id': [(6, 0, taxes)]
})
list_line.append(sale_order_line)
if sale_id is False:
sale_id = order_obj.create(cr, uid, {
'origin': '',
'shop_id': shop_id,
'partner_id': customer_data.id,
'pricelist_id': pricelist_id,
'partner_invoice_id': partner_addr['invoice'],
'partner_order_id': partner_addr['contact'],
'partner_shipping_id': partner_addr['delivery'],
'date_order':
newdate.strftime('%Y-%m-%d %H:%M:%S'),
'fiscal_position':
partner.property_account_position and
partner.property_account_position.id or False,
'company_id': company_id,
'payment_term':
partner.property_payment_term and
partner.property_payment_term.id or False,
'project_id': account_id,
'invoice_quantity': make_order.invoice_quantity,
'order_policy': make_order.order_policy,
}, context=context)
if line.account_id.user_id:
order_obj.message_subscribe_users(
cr, uid, [sale_id],
user_ids=[line.account_id.user_id.id])
sale_order_line.update({
'order_id': sale_id
})
order_line_id = order_line_obj.create(cr, uid,
sale_order_line,
context=context)
values = {
'order_line_ids': [(4, order_line_id)]
}
billing_plan_obj.write(cr, uid, [line.id], values,
context=context)
res.append(order_line_id)
return {
'domain': "[('id','in', ["+','.join(map(str, res))+"])]",
'name': _('Billing request lines'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order.line',
'view_id': False,
'context': False,
'type': 'ir.actions.act_window'
}
analytic_billing_plan_line_make_sale() | [
"jordi.ballester@eficent.com"
] | jordi.ballester@eficent.com |
c256fd9a9df13486844f0267178661e718862bac | 2caa155818c373fd5d9530e761bb7636255d4588 | /python_exs_curso_em_video/ex052.py | 986413a60540b5b6f85da16b65a2665f428fc48b | [
"MIT"
] | permissive | David-Matos-Sousa/David-no-GitHub | 8f64a5713d12a76d6a288363b8ade6de79f98fca | a46e762912f869db34705905b20f8221db2c22f6 | refs/heads/master | 2022-12-25T19:48:08.945055 | 2020-10-04T16:58:51 | 2020-10-04T16:58:51 | 291,156,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | num = int(input('Write a number:'))
div = 0
for c in range(1, num+1):
if num % c == 0:
print('\033[34m', end='')
div = div + 1
else:
print('\033[31m', end='')
print('{} '.format(c), end='')
print('\n\033[m The number {} was divisible {} times'.format(num, div))
if div == 2:
print('It is a prime')
else:
print('It is not a prime')
| [
"david.matos.sousa@gmail.com"
] | david.matos.sousa@gmail.com |
394b9512d2800f6c8f99772dfb85f1a9e29e87b2 | d653c7f6667403ca587b6c68fc4af28b74adf083 | /blog/models/post.py | e399a54e1ded0a08e3de3667660873892049d8d9 | [] | no_license | JoshParinussa/django-blog | 5e1aafd6849f5c070ed5f4901d7b8d16df71d7cf | dde1d708617742b84e56d38678acad198eb51b4f | refs/heads/master | 2022-12-12T16:40:56.662777 | 2020-09-12T11:25:12 | 2020-09-12T11:25:12 | 294,923,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """Banner models."""
from django.db import models
from django.utils.translation import gettext as _
from sorl.thumbnail import ImageField
from blog.helpers import file as file_helper
from .base import BaseModel
class Post(BaseModel):
"""Blog model."""
title = models.CharField(_("title"), max_length=255, db_index=True)
category = models.ForeignKey('Category', on_delete=models.CASCADE, related_name=('blog'), verbose_name=_('category'))
content = models.TextField(_("content"), blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(max_length=200, unique=True)
image = ImageField(_("image"), upload_to=file_helper.DateUploadPath('blog/blog'))
class Meta:
ordering = ['-created_at']
def __str__(self):
return self.title
| [
"yehezkieljosh@gmail.com"
] | yehezkieljosh@gmail.com |
cf19035358b62ecd6f88a9ffd8a23464e7149a69 | 2863645627f099cad919c12dd1949bb3704923d2 | /tests/test_datasets/test_bottom_up_dataset.py | b218c4942ad7ae8b65bab8f7ea505b0d2bd7baf1 | [
"Apache-2.0"
] | permissive | zhangyu92/mmpose | d43c873111ac5a4614d3675495de3af771626eec | 17557522ce3e41f830973079c5b4321935c41439 | refs/heads/master | 2023-02-18T20:44:54.814555 | 2021-01-12T03:21:11 | 2021-01-12T03:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,707 | py | import tempfile
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from mmpose.datasets import DATASETS
def convert_coco_to_output(coco):
outputs = []
for img_id in coco.getImgIds():
preds = []
scores = []
image = coco.imgs[img_id]
ann_ids = coco.getAnnIds(img_id)
for ann_id in ann_ids:
keypoints = np.array(coco.anns[ann_id]['keypoints']).reshape(
(-1, 3))
K = keypoints.shape[0]
if sum(keypoints[:, 2]) == 0:
continue
preds.append(
np.concatenate((keypoints[:, :2], np.ones(
[K, 1]), np.ones([K, 1]) * ann_id),
axis=1))
scores.append(1)
img_path = []
img_path[:0] = image['file_name']
output = (np.stack(preds), scores, img_path, None)
outputs.append(output)
return outputs
def test_bottom_up_COCO_dataset():
dataset = 'BottomUpCocoDataset'
# test COCO datasets
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False)
_ = dataset_class(
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
assert custom_dataset.num_images == 4
_ = custom_dataset[0]
outputs = convert_coco_to_output(custom_dataset.coco)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
assert_almost_equal(infos['AP'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
def test_bottom_up_CrowdPose_dataset():
dataset = 'BottomUpCrowdPoseDataset'
# test CrowdPose datasets
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False)
_ = dataset_class(
ann_file='tests/data/crowdpose/test_crowdpose.json',
img_prefix='tests/data/crowdpose/',
data_cfg=data_cfg,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/crowdpose/test_crowdpose.json',
img_prefix='tests/data/crowdpose/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
image_id = 103319
assert image_id in custom_dataset.img_ids
assert len(custom_dataset.img_ids) == 2
_ = custom_dataset[0]
outputs = convert_coco_to_output(custom_dataset.coco)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
assert_almost_equal(infos['AP'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
def test_bottom_up_MHP_dataset():
dataset = 'BottomUpMhpDataset'
# test MHP datasets
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
dataset_joints=16,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
)
_ = dataset_class(
ann_file='tests/data/mhp/test_mhp.json',
img_prefix='tests/data/mhp/',
data_cfg=data_cfg,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/mhp/test_mhp.json',
img_prefix='tests/data/mhp/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
image_id = 2889
assert image_id in custom_dataset.img_ids
assert len(custom_dataset.img_ids) == 2
_ = custom_dataset[0]
outputs = convert_coco_to_output(custom_dataset.coco)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
assert_almost_equal(infos['AP'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
| [
"noreply@github.com"
] | noreply@github.com |
10576346c4146798661b702e5f8b36a7e905b5f2 | 9f68ebfdfdc6d53f83552dcb221ae76d181790d3 | /sales/migrations/0007_invoice_becomes_receipt_too.py | bd1cf54803a022aa54c55d8ea506af4f5b7a18c3 | [] | no_license | marcor/silversly | 90695502cb4eeb5c66274275d22c6aaeec39dc04 | 68cdf30cdca5e475caae6dd1358ae75e0414c2b0 | refs/heads/master | 2023-04-30T22:55:05.203805 | 2021-05-21T17:59:18 | 2021-05-21T17:59:18 | 369,352,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,492 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Receipt.cart'
db.alter_column('sales_receipt', 'cart_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sales.Cart'], unique=True, null=True))
# Deleting field 'Invoice.cart'
db.delete_column('sales_invoice', 'cart_id')
# Deleting field 'Invoice.id'
db.delete_column('sales_invoice', 'id')
# Adding field 'Invoice.receipt_ptr'
db.add_column('sales_invoice', 'receipt_ptr', self.gf('django.db.models.fields.related.OneToOneField')(default=0, to=orm['sales.Receipt'], unique=True, primary_key=True), keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Receipt.cart'
raise RuntimeError("Cannot reverse this migration. 'Receipt.cart' and its values cannot be restored.")
# Adding field 'Invoice.cart'
db.add_column('sales_invoice', 'cart', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sales.Cart'], unique=True, null=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'Invoice.id'
raise RuntimeError("Cannot reverse this migration. 'Invoice.id' and its values cannot be restored.")
# Deleting field 'Invoice.receipt_ptr'
db.delete_column('sales_invoice', 'receipt_ptr_id')
models = {
'inventory.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Category']", 'null': 'True', 'blank': 'True'})
},
'inventory.price': {
'Meta': {'unique_together': "(('pricelist', 'product'),)", 'object_name': 'Price'},
'gross': ('common.models.FixedDecimalField', [], {'max_digits': '7', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markup': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'net': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'pricelist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Pricelist']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Product']"})
},
'inventory.pricelist': {
'Meta': {'object_name': 'Pricelist'},
'default_markup': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'default_method': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'primary_key': 'True'})
},
'inventory.product': {
'Meta': {'ordering': "['name', 'code']", 'object_name': 'Product'},
'base_price': ('common.models.FixedDecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '2'}),
'catalogue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Category']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '13'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_quantity': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '3'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'prices': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['inventory.Pricelist']", 'null': 'True', 'through': "orm['inventory.Price']", 'symmetrical': 'False'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '3'}),
'suppliers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['people.Supplier']", 'null': 'True', 'through': "orm['inventory.Supply']", 'symmetrical': 'False'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'inventory.supply': {
'Meta': {'unique_together': "(('product', 'supplier'),)", 'object_name': 'Supply'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('common.models.FixedDecimalField', [], {'max_digits': '8', 'decimal_places': '3'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Product']"}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Supplier']"}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'people.bank': {
'Meta': {'object_name': 'Bank'},
'abi': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'cab': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'people.customer': {
'Meta': {'object_name': 'Customer'},
'cf': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'due': ('common.models.FixedDecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'pricelist': ('django.db.models.fields.related.ForeignKey', [], {'default': "'Pubblico'", 'to': "orm['inventory.Pricelist']"})
},
'people.supplier': {
'Meta': {'ordering': "['name']", 'object_name': 'Supplier'},
'email': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '15', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '15', 'blank': 'True'})
},
'sales.cart': {
'Meta': {'object_name': 'Cart'},
'current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Customer']", 'null': 'True'}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'final_discount': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'final_total': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pricelist': ('django.db.models.fields.related.ForeignKey', [], {'default': "'Pubblico'", 'to': "orm['inventory.Pricelist']"}),
'rounded': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sales.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Cart']"}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'final_discount': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'final_price': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'final_value': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '7', 'decimal_places': '2'}),
'update': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'sales.ddt': {
'Meta': {'ordering': "['date', 'number']", 'object_name': 'Ddt', '_ormbases': ['sales.Receipt']},
'date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'main_address': ('django.db.models.fields.TextField', [], {}),
'number': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'receipt_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Receipt']", 'unique': 'True', 'primary_key': 'True'}),
'shipping_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'shipping_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 5, 21, 13, 41, 58, 142622)'}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'sales.invoice': {
'Meta': {'object_name': 'Invoice', '_ormbases': ['sales.Receipt']},
'bank': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['people.Bank']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'costs': ('common.models.FixedDecimalField', [], {'default': '0', 'max_digits': '7', 'decimal_places': '2'}),
'immediate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'payment_method': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'receipt_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Receipt']", 'unique': 'True', 'primary_key': 'True'}),
'receipts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'proxy_receipt'", 'null': 'True', 'to': "orm['sales.Receipt']"}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'sales.receipt': {
'Meta': {'object_name': 'Receipt'},
'cart': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Cart']", 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'sales.scontrino': {
'Meta': {'object_name': 'Scontrino', '_ormbases': ['sales.Receipt']},
'cf': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'unique': 'True', 'blank': 'True'}),
'due': ('common.models.FixedDecimalField', [], {'max_digits': '7', 'decimal_places': '2'}),
'receipt_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Receipt']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['sales']
| [
"marcor@users.noreply.github.com"
] | marcor@users.noreply.github.com |
eca7173612171328c19b0b90c0a3639ff207df10 | 1860918ce852099127e1109a470e131b49bb1992 | /bdrmapit/bin/cython | 4af1479af91c21da144f2558d3e29a161232d525 | [] | no_license | CAIDA/mapkit-traceroute-bdrmapit-pipeline | 55b002a0df56d2c41d458b9ad05126957f8be519 | 9b709b0f27bcf95c77b65d0b7df511b639503098 | refs/heads/master | 2023-08-28T17:14:51.332621 | 2021-10-10T20:34:09 | 2021-10-10T20:34:09 | 229,122,638 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | #!/project/mapkit/agamerog/country_asn_analysis/elverton/internet-flattening/aslinks_pipeline/bdrmapit/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from Cython.Compiler.Main import setuptools_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(setuptools_main())
| [
"agamerog.mit@gmail.com"
] | agamerog.mit@gmail.com | |
46037bb5c0b7e53e047bcfb8753331ef30f63ef7 | 2eb413eafcf7b7ea6aefa41420e6adfdebdb21cb | /scripts/xln/trimDerivedTables.py | f42828ed7427c6a64864bb3d5ed7ebfe4871760d | [] | no_license | SerhiiSkrypchenko/Scripts | 92493f3c1e66fab6f496160380094f2c63bf98f9 | 0a5b8f339a3c45d63955d75ec4752d0ac7323678 | refs/heads/master | 2023-02-06T08:47:54.385193 | 2023-01-30T12:50:30 | 2023-01-30T12:50:30 | 217,525,702 | 0 | 0 | null | 2023-01-30T10:29:15 | 2019-10-25T12:09:25 | Python | UTF-8 | Python | false | false | 553 | py | import requests
import config_Luna_Wallet
url = config_Luna_Wallet.xln_mn_2
ADMIN_PASSWORD = config_Luna_Wallet.ADMIN_PASSWORD_T1
def trimDerivedTables(url):
print("---------- START trimDerivedTables on --->>> " + url + " <<< ----")
querystring = {"requestType": "trimDerivedTables", "adminPassword": ADMIN_PASSWORD}
response = requests.request("POST", url + "/api/rpc", params=querystring)
print(response.text)
print("--------END of trimDerivedTables proccess on peer --->>> " + url + " <<< --------")
trimDerivedTables(url)
| [
"43746242+SerhiiSkrypchenko@users.noreply.github.com"
] | 43746242+SerhiiSkrypchenko@users.noreply.github.com |
afaa879df31238594f246e718dd47f1d00af9697 | 2c6678a6e10852605b1ce57e8ab5962f25806877 | /eCommerce/store/admin.py | 2c644739d7c3b813cce8c18a97191f36e0e3723f | [] | no_license | Souvikkkk7/first-ecom | ed8c4d24235aa144d79ba8ab0bee41361bec65ba | 7558373b53fd5eed9df5517dd77cc12f3c5192c3 | refs/heads/master | 2022-11-17T23:29:07.985677 | 2020-07-16T19:07:11 | 2020-07-16T19:07:11 | 280,233,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Customer)
admin.site.register(Product)
admin.site.register(Image)
admin.site.register(Order)
admin.site.register(OrderItem)
admin.site.register(ShippingAddress)
admin.site.register(Description) | [
"sdhali469@gmail.com"
] | sdhali469@gmail.com |
914db65235c0f013520dd3bda449daee33be2fff | 0e1c3903ef3b11c2c6fe52a8970c7ec3aed82288 | /posthog/management/commands/create_bulk_events.py | c0a1378511c78b107346ce5d4bd3ff93c72d1e7a | [
"MIT"
] | permissive | mindhash/posthog | 55021cce5b1cc30839296890e5e19b7e5ae6f4b1 | 904c4c5d17a2d50019717b169e107ce441cadac7 | refs/heads/master | 2022-11-21T18:20:37.006995 | 2020-07-13T08:36:38 | 2020-07-13T08:36:38 | 279,254,284 | 1 | 0 | MIT | 2020-07-13T09:15:47 | 2020-07-13T09:15:47 | null | UTF-8 | Python | false | false | 7,878 | py | import random
import json
import uuid
import psycopg2
from urllib.parse import urlparse
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from django.core import serializers
from dateutil.relativedelta import relativedelta
from pathlib import Path
from typing import List
import time
from typing import Iterator, Optional
import io
from posthog.models import (
Event,
Element,
Team,
Person,
PersonDistinctId,
Funnel,
Action,
ActionStep,
FunnelStep,
)
def clean_csv_value(value: Optional[any]) -> str:
if value is None:
return r"\N"
return str(value).replace("\n", "\\n")
class StringIteratorIO(io.TextIOBase):
def __init__(self, iter: Iterator[str]):
self._iter = iter
self._buff = ""
def readable(self) -> bool:
return True
def _read1(self, n: Optional[int] = None) -> str:
while not self._buff:
try:
self._buff = next(self._iter)
except StopIteration:
break
ret = self._buff[:n]
self._buff = self._buff[len(ret) :]
return ret
def read(self, n: Optional[int] = None) -> str:
line = []
if n is None or n < 0:
while True:
m = self._read1()
if not m:
break
line.append(m)
else:
while n > 0:
m = self._read1(n)
if not m:
break
n -= len(m)
line.append(m)
return "".join(line)
class Command(BaseCommand):
help = "Create bulk events for testing"
def add_arguments(self, parser):
parser.add_argument("--team_id", nargs="+", type=int, help="specify the team id eg. --team_id 1")
parser.add_argument(
"--mode",
nargs="+",
default=["create"],
help="""
'delete' for deleting bulk demo data
or 'create' for creating bulk demo data;
default 'create'
eg. --mode delete
""",
)
def handle(self, *args, **options):
team_id = options["team_id"]
mode = options["mode"][0]
if not team_id:
print("Please specify the --team id")
return
team = Team.objects.get(pk=team_id[0])
with open(Path("posthog/demo_data.json").resolve(), "r") as demo_data_file:
demo_data = json.load(demo_data_file)
base_url = "127.0.0.1/bulk_demo/"
if mode.lower() == "delete":
start_time = time.time()
self._delete_demo_data(team)
print("--- %s seconds ---" % (time.time() - start_time))
else:
self._delete_demo_data(team)
self._create_funnel(base_url, team)
start_time = time.time()
self._create_events(demo_data, team, base_url)
print("--- %s seconds ---" % (time.time() - start_time))
def _create_events(self, demo_data, team, base_url):
result = urlparse(settings.DATABASE_URL)
database = result.path[1:]
hostname = result.hostname
try:
conn = psycopg2.connect(dbname=database, host=hostname)
except:
print("Unable to connect to the database")
conn.autocommit = True
cur = conn.cursor()
Person.objects.bulk_create([Person(team=team, properties={"is_demo": True}) for _ in range(0, 100)])
distinct_ids: List[PersonDistinctId] = []
demo_data_index = 0
for index, person in enumerate(Person.objects.filter(team=team)):
distinct_id = str(uuid.uuid4())
distinct_ids.append(PersonDistinctId(team=team, person=person, distinct_id=distinct_id))
if index % 3 == 0:
person.properties.update(demo_data[demo_data_index])
person.save()
demo_data_index += 1
events_string_iterator = StringIteratorIO(
(
"|".join(
map(
clean_csv_value,
(
random.choice(["autocapture", "$pageview", "$hello"]),
json.dumps(
{
"$current_url": base_url + random.choice(["", "1/", "2/"]),
"$browser": random.choice(["Chrome", "Safari", "Firefox"]),
"$lib": "web",
}
),
json.dumps(
{
"tag_name": random.choice(["a", "href"]),
"attr_class": ["btn", "btn-success"],
"attr_id": random.choice(["sign-up", "click"]),
"text": random.choice(["Sign up", "Pay $10"]),
}
),
now() - relativedelta(days=random.choice(range(7))) + relativedelta(seconds=15),
team.id,
distinct_id,
),
)
)
+ "\n"
for _ in range(10000)
)
)
cur.copy_from(
events_string_iterator,
"posthog_event",
sep="|",
columns=["event", "properties", "elements", "timestamp", "team_id", "distinct_id",],
)
PersonDistinctId.objects.bulk_create(distinct_ids)
cur.close()
def _delete_demo_data(self, team):
result = urlparse(settings.DATABASE_URL)
database = result.path[1:]
hostname = result.hostname
try:
conn = psycopg2.connect(dbname=database, host=hostname)
except:
print("Unable to connect to the database")
conn.autocommit = True
cur = conn.cursor()
people = PersonDistinctId.objects.filter(team=team, person__properties__is_demo=True)
distinct_ids = tuple([item["distinct_id"] for item in list(people.values("distinct_id"))])
if distinct_ids:
query = "DELETE from posthog_event WHERE distinct_id in {}".format(str(distinct_ids))
cur.execute(query)
cur.close()
Person.objects.filter(team=team, properties__is_demo=True).delete()
Funnel.objects.filter(team=team, name__contains="HogFlix").delete()
Action.objects.filter(team=team, name__contains="HogFlix").delete()
def _create_funnel(self, base_url, team):
homepage = Action.objects.create(team=team, name="HogFlix homepage view")
ActionStep.objects.create(action=homepage, event="$pageview", url=base_url, url_matching="exact")
user_signed_up = Action.objects.create(team=team, name="HogFlix signed up")
ActionStep.objects.create(
action=homepage, event="$autocapture", url="%s1/" % base_url, url_matching="exact",
)
user_paid = Action.objects.create(team=team, name="HogFlix paid")
ActionStep.objects.create(
action=homepage, event="$autocapture", url="%s2/" % base_url, url_matching="exact",
)
funnel = Funnel.objects.create(team=team, name="HogFlix signup -> watching movie")
FunnelStep.objects.create(funnel=funnel, action=homepage, order=0)
FunnelStep.objects.create(funnel=funnel, action=user_signed_up, order=1)
FunnelStep.objects.create(funnel=funnel, action=user_paid, order=2)
| [
"noreply@github.com"
] | noreply@github.com |
f24458a14352f36a4dc76daf6247758d8ae07d7f | d016e0c9a54e6e3d46b86894c99cc7739c02c7f5 | /tree.py | 39c72f9656aca2166fc707de71253d5a7a982770 | [] | no_license | rinkeshpanwar/datastructure | 31f25c036ac314867d3f93a1094d5232342d351b | cbd0656b163473ed8c906b539265874cf22231c9 | refs/heads/main | 2023-02-17T17:42:39.301587 | 2021-01-12T07:01:59 | 2021-01-12T07:01:59 | 328,899,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | import collections
class node:
def __init__(self,data=None):
self.data = data
self.left =None
self.right = None
class tree:
def __init__(self):
self.dq = collections.deque()
def insert(self,data):
return node(data)
def inorder(self,root):
if root == None:
return
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def inverse(self,root):
if root == None:
return
self.inverse(root.left)
self.inverse(root.right)
temp = root.left
root.left = root.right
root.right = temp
def bst(self,root):
if root==None:
self.dq.append(root)
return
print(root.data)
self.dq.append(root.left)
self.dq.append(root.right)
self.bst(self.dq.popleft())
t = tree()
root = t.insert(10)
root.left = t.insert(20)
root.right = t.insert(30)
root.left.left = t.insert(40)
root.left.right = t.insert(50)
root.right.left = t.insert(60)
root.right.right = t.insert(70)
root.right.right.right = t.insert(70)
#t.inorder(root)
#t.inverse(root)
print("---------------------")
#t.inorder(root)
t.bst(root) | [
"rinkeshpanwar1997@gmail.com"
] | rinkeshpanwar1997@gmail.com |
e4fd0b88f086e8155bee37b5546c0096f7760d3e | e78154abbb8bacf5afccda9da371684cbeabad36 | /envs/ALPHA-POPEGO/lib/python2.5/site-packages/ipython-0.8.2-py2.5.egg/IPython/Release.py | c22250cf389d6cc8e86540e756de11ec217a66b1 | [
"BSD-3-Clause"
] | permissive | enterstudio/popego | 1a196fabc374c0f45764e5c74bd7752236424040 | 2d09e793d9d2f297139edb325b8a70ddda9b2705 | refs/heads/master | 2021-04-09T16:39:40.781634 | 2016-10-14T16:53:47 | 2016-10-14T16:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | # -*- coding: utf-8 -*-
"""Release data for the IPython project.
$Id: Release.py 2855 2007-11-06 06:53:49Z vivainio $"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Copyright (c) 2001 Janko Hauser <jhauser@zscout.de> and Nathaniel Gray
# <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
revision = '2876M'
#version = '0.8.2.svn.r' + revision.rstrip('M')
version = '0.8.2'
description = "An enhanced interactive Python shell."
long_description = \
"""
IPython provides a replacement for the interactive Python interpreter with
extra functionality.
Main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Readline based name completion.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* Configuration system with easy switching between different setups (simpler
than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available at the IPython subversion
repository_.
.. _repository: http://ipython.scipy.org/svn/ipython/ipython/trunk#egg=ipython-dev
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez@colorado.edu'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com')
}
url = 'http://ipython.scipy.org'
download_url = 'http://ipython.scipy.org/dist'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
keywords = ['Interactive','Interpreter','Shell']
| [
"santisiri@gmail.com"
] | santisiri@gmail.com |
fad738ad7b13dcce721b2525ae36d3b0f26ae079 | 320b78882c944bad612f5da589ad6319b7339339 | /Aula22/Exercicio1.py | 7a5afcaf2b4f7ee2815ada89d6ce127101c7f91f | [] | no_license | talissad/TrabalhosPython | aed1d26b5a72b79d6b26aff9e62e7d76963ae081 | e7a890ef5695bf8eeb8929eb6fafafff4ed1b7b9 | refs/heads/master | 2022-04-10T09:49:04.749672 | 2020-02-27T14:46:20 | 2020-02-27T14:46:20 | 220,218,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | # Aula 21 - 09-12-2019
# Crie uma classe cliente:
# 1) deve ter como atributos: codigo, cpf, nome, idade, sexo
# 2) como metodos: receber salario, comprar, pagar divida
# Quando recebe aumenta o dinheiro na carteira.
# quando compra aumenta os bens e diminui o dinheiro na carteira
# Se comprar e não tiver dinheiro o suficiente deve diminuir o dinheiro da carteira e aumentar a divida.
# Para pagar a divida tem que ter dinheiro o suficiente na carteira
# 3) atributos de estado: dinheiro na carteira, divida, bens
class Cliente:
def __init__(self, codigo, cpf, nome, idade, sexo):
self.codigo = codigo
self.cpf = cpf
self.nome = nome
self.idade = idade
self.sexo = sexo
self.divida = 0
self.bens = []
self.dinheiro_carteira = 0
def receber_salario(self, receber):
self.dinheiro_carteira = receber + self.dinheiro_carteira
def compra(self, bem, valor):
self.bens.append(bem)
if valor <= self.dinheiro_carteira:
self.dinheiro_carteira = self.dinheiro_carteira - valor
else:
self.divida = self.divida - (self.dinheiro_carteira - valor)
self.dinheiro_carteira = 0
def divida(self, divida):
if self.divida < self.dinheiro_carteira:
self.divida = self.bens - self.dinheiro_carteira
pessoa = Cliente(10, 8181548, 'Amanda', 18, 'f')
pessoa.receber_salario(8000)
pessoa.compra = ('casa', 90000)
print(pessoa.dinheiro_carteira)
print(pessoa.bens)
print(pessoa.divida)
| [
"900161@proway.treina"
] | 900161@proway.treina |
6a6ebe3550b44d0e3ce445ed0151ed8f95c18ec0 | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /2.1.2/_downloads/boxplot_demo1.py | aac441baa4f86269d657f3d8b96bfebf095017f7 | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 7,720 | py | """
========
Boxplots
========
Visualizing boxplots with matplotlib.
The following examples show off how to visualize boxplots with
Matplotlib. There are many options to control their appearance and
the statistics that they use to summarize the data.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
# Fixing random state for reproducibility
np.random.seed(19680801)
# fake up some data
spread = np.random.rand(50) * 100
center = np.ones(25) * 50
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
data = np.concatenate((spread, center, flier_high, flier_low), 0)
fig, axs = plt.subplots(2, 3)
# basic plot
axs[0, 0].boxplot(data)
axs[0, 0].set_title('basic plot')
# notched plot
axs[0, 1].boxplot(data, 1)
axs[0, 1].set_title('notched plot')
# change outlier point symbols
axs[0, 2].boxplot(data, 0, 'gD')
axs[0, 2].set_title('change outlier\npoint symbols')
# don't show outlier points
axs[1, 0].boxplot(data, 0, '')
axs[1, 0].set_title("don't show\noutlier points")
# horizontal boxes
axs[1, 1].boxplot(data, 0, 'rs', 0)
axs[1, 1].set_title('horizontal boxes')
# change whisker length
axs[1, 2].boxplot(data, 0, 'rs', 0, 0.75)
axs[1, 2].set_title('change whisker length')
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.05, top=0.9,
hspace=0.4, wspace=0.3)
# fake up some more data
spread = np.random.rand(50) * 100
center = np.ones(25) * 40
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
d2 = np.concatenate((spread, center, flier_high, flier_low), 0)
data.shape = (-1, 1)
d2.shape = (-1, 1)
# Making a 2-D array only works if all the columns are the
# same length. If they are not, then use a list instead.
# This is actually more efficient because boxplot converts
# a 2-D array into a list of vectors internally anyway.
data = [data, d2, d2[::2, 0]]
# Multiple box plots on one Axes
fig, ax = plt.subplots()
ax.boxplot(data)
plt.show()
###############################################################################
# Below we'll generate data from five different probability distributions,
# each with different characteristics. We want to play with how an IID
# bootstrap resample of the data preserves the distributional
# properties of the original sample, and a boxplot is one visual tool
# to make this assessment
numDists = 5
randomDists = ['Normal(1,1)', ' Lognormal(1,1)', 'Exp(1)', 'Gumbel(6,4)',
'Triangular(2,9,11)']
N = 500
norm = np.random.normal(1, 1, N)
logn = np.random.lognormal(1, 1, N)
expo = np.random.exponential(1, N)
gumb = np.random.gumbel(6, 4, N)
tria = np.random.triangular(2, 9, 11, N)
# Generate some random indices that we'll use to resample the original data
# arrays. For code brevity, just use the same random indices for each array
bootstrapIndices = np.random.random_integers(0, N - 1, N)
normBoot = norm[bootstrapIndices]
expoBoot = expo[bootstrapIndices]
gumbBoot = gumb[bootstrapIndices]
lognBoot = logn[bootstrapIndices]
triaBoot = tria[bootstrapIndices]
data = [norm, normBoot, logn, lognBoot, expo, expoBoot, gumb, gumbBoot,
tria, triaBoot]
fig, ax1 = plt.subplots(figsize=(10, 6))
fig.canvas.set_window_title('A Boxplot Example')
fig.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title('Comparison of IID Bootstrap Resampling Across Five Distributions')
ax1.set_xlabel('Distribution')
ax1.set_ylabel('Value')
# Now fill the boxes with desired colors
boxColors = ['darkkhaki', 'royalblue']
numBoxes = numDists*2
medians = list(range(numBoxes))
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = list(zip(boxX, boxY))
# Alternate between Dark Khaki and Royal Blue
k = i % 2
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
ax1.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes + 0.5)
top = 40
bottom = -5
ax1.set_ylim(bottom, top)
ax1.set_xticklabels(np.repeat(randomDists, 2),
rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(numBoxes) + 1
upperLabels = [str(np.round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(numBoxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], top - (top*0.05), upperLabels[tick],
horizontalalignment='center', size='x-small', weight=weights[k],
color=boxColors[k])
# Finally, add a basic legend
fig.text(0.80, 0.08, str(N) + ' Random Numbers',
backgroundcolor=boxColors[0], color='black', weight='roman',
size='x-small')
fig.text(0.80, 0.045, 'IID Bootstrap Resample',
backgroundcolor=boxColors[1],
color='white', weight='roman', size='x-small')
fig.text(0.80, 0.015, '*', color='white', backgroundcolor='silver',
weight='roman', size='medium')
fig.text(0.815, 0.013, ' Average Value', color='black', weight='roman',
size='x-small')
plt.show()
###############################################################################
# Here we write a custom function to bootstrap confidence intervals.
# We can then use the boxplot along with this function to show these intervals.
def fakeBootStrapper(n):
'''
This is just a placeholder for the user's method of
bootstrapping the median and its confidence intervals.
Returns an arbitrary median and confidence intervals
packed into a tuple
'''
if n == 1:
med = 0.1
CI = (-0.25, 0.25)
else:
med = 0.2
CI = (-0.35, 0.50)
return med, CI
inc = 0.1
e1 = np.random.normal(0, 1, size=(500,))
e2 = np.random.normal(0, 1, size=(500,))
e3 = np.random.normal(0, 1 + inc, size=(500,))
e4 = np.random.normal(0, 1 + 2*inc, size=(500,))
treatments = [e1, e2, e3, e4]
med1, CI1 = fakeBootStrapper(1)
med2, CI2 = fakeBootStrapper(2)
medians = [None, None, med1, med2]
conf_intervals = [None, None, CI1, CI2]
fig, ax = plt.subplots()
pos = np.array(range(len(treatments))) + 1
bp = ax.boxplot(treatments, sym='k+', positions=pos,
notch=1, bootstrap=5000,
usermedians=medians,
conf_intervals=conf_intervals)
ax.set_xlabel('treatment')
ax.set_ylabel('response')
plt.setp(bp['whiskers'], color='k', linestyle='-')
plt.setp(bp['fliers'], markersize=3.0)
plt.show()
| [
"tcaswell@gmail.com"
] | tcaswell@gmail.com |
60daa405763d1b7b37c74167e6234513e70bff79 | 1d1ca8f286a49bc832745b3f50879bf2ea381e6b | /recognize.py | b57ad22dbfb02b3d71ee513fc320cc2118687ba8 | [
"Apache-2.0"
] | permissive | old-school-kid/Wavenet | f3937994d10d9fbd637bf9940b7c001f9212c87f | ada74d4752c0b35cc948723957de38bc708290a5 | refs/heads/main | 2023-06-09T21:17:06.259253 | 2021-07-08T16:25:05 | 2021-07-08T16:25:05 | 384,158,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | # -*- coding: utf-8 -*-
import sugartensor as tf
import numpy as np
import librosa
from model import *
import data
__author__ = 'namju.kim@kakaobrain.com'
# set log level to debug
tf.sg_verbosity(10)
#
# hyper parameters
#
batch_size = 1 # batch size
num_blocks = 3 # dilated blocks
num_dim = 128 # latent dimension
#
# inputs
#
# vocabulary size
voca_size = data.voca_size
# mfcc feature of audio
x = tf.placeholder(dtype=tf.sg_floatx, shape=(batch_size, None, 20))
# sequence length except zero-padding
seq_len = tf.not_equal(x.sg_sum(dims=2), 0.).sg_int().sg_sum(dims=1)
def res_block(tensor, size, rate, dim=num_dim):
# filter convolution
conv_filter = tensor.sg_aconv1d(size=size, rate=rate, act='tanh', bn=True)
# gate convolution
conv_gate = tensor.sg_aconv1d(size=size, rate=rate, act='sigmoid', bn=True)
# output by gate multiplying
out = conv_filter * conv_gate
# final output
out = out.sg_conv1d(size=1, dim=dim, act='tanh', bn=True)
# residual and skip output
return out + tensor, out
# expand dimension
z = x.sg_conv1d(size=1, dim=num_dim, act='tanh', bn=True)
# dilated conv block loop
skip = 0 # skip connections
for i in range(num_blocks):
for r in [1, 2, 4, 8, 16]:
z, s = res_block(z, size=7, rate=r)
skip += s
# final logit layers
logit = (skip
.sg_conv1d(size=1, act='tanh', bn=True)
.sg_conv1d(size=1, dim=voca_size))
# ctc decoding
decoded, _ = tf.nn.ctc_beam_search_decoder(logit.sg_transpose(perm=[1, 0, 2]), seq_len, merge_repeated=False)
# to dense tensor
y = tf.sparse_to_dense(decoded[0].indices, decoded[0].shape, decoded[0].values) + 1
#
# regcognize wave file
#
# command line argument for input wave file path
tf.sg_arg_def(file=('', 'speech wave file to recognize.'))
# load wave file
wav, sr = librosa.load(tf.sg_arg().file, mono=True)
# get mfcc feature
mfcc = np.transpose(np.expand_dims(librosa.feature.mfcc(wav, sr), axis=0), [0, 2, 1])
# run network
with tf.Session() as sess:
# init variables
tf.sg_init(sess)
# restore parameters
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('asset/train'))
# run session
label = sess.run(y, feed_dict={x: mfcc})
# print label
data.print_index(label)
| [
"mishrasp393@gmail.com"
] | mishrasp393@gmail.com |
31298541903089b84d357150a735501103053981 | 0a57f05221d425119cb2994c5686a95e01b33d46 | /ex21.py | 67a0f965521d5f8cce8027401d93c01786fc9214 | [] | no_license | luroto/lpthw | 371ad2de422e7656b9f18461808d28847d17971f | e89329477d0c5c5b34d7998832b395c05385876b | refs/heads/master | 2022-06-02T17:56:01.873932 | 2020-05-02T17:52:11 | 2020-05-02T17:52:11 | 260,742,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | def add(a,b):
print(f"ADDING {a} + {b}")
return a + b
def substract(a, b):
print(f"SUBSRACTING {a} - {b}")
return a - b
def multiply(a, b):
print(f"MULTIPLYING {a} * {b}")
return a * b
def divide(a, b):
print(f"DIVIDING {a} / {b}")
return (a / b)
print("Let's do some math with just functions")
age = add(30, 5)
height = substract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print(f"Age: {age}, Height: {height}, Weight {weight}, IQ {iq}")
# A puzzle for the extra credit, type it in anyway
print("Here's a puzzle")
what = add(age, substract(height, multiply(weight, divide(iq, 2))))
print("That becomes: ", what, "Can you do it by hand?")
| [
"774@holbertonschool.com"
] | 774@holbertonschool.com |
a4c42472fa3d15a356100869d5c0683205652cd8 | 8fed701e8e04c4bf84fe9e43bbeaf5f39d416664 | /sagemaker/keras/container/games/tictactoe/keras/NNet.py | 54e973aa59905d8f9f60213abb7a068442b3ced3 | [] | no_license | ggiallo28/neural-network-genetic-algorithm | c992f9f53c0802f4168c1d5419f560fdec051db1 | 0316bb91f5dbeee8f3c352694d27ab662824d13e | refs/heads/master | 2023-04-03T23:45:31.500461 | 2019-06-30T13:17:51 | 2019-06-30T13:17:51 | 172,666,707 | 0 | 0 | null | 2023-03-24T22:55:48 | 2019-02-26T08:12:06 | Python | UTF-8 | Python | false | false | 3,058 | py | import argparse
import os
import shutil
import time
import random
import numpy as np
import math
import sys
sys.path.append('..')
from utils import *
from NeuralNet import NeuralNet
import argparse
from .TicTacToeNNet import TicTacToeNNet as onnet
"""
NeuralNet wrapper class for the TicTacToeNNet.
Author: Evgeny Tyurin, github.com/evg-tyurin
Date: Jan 5, 2018.
Based on (copy-pasted from) the NNet by SourKream and Surag Nair.
"""
#args = dotdict({
# 'lr': 0.001,
# 'dropout': 0.3,
# 'epochs': 3,
# 'batch_size': 64,
# 'cuda': True,
# 'num_channels': 512,
#})
class NNetWrapper(NeuralNet):
def __init__(self, game, args):
self.nnet = onnet(game, args)
self.game = game
self.name = str(hex(id(self)))
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
self.loss = 99999999999
self.args = args
def train(self, examples):
"""
examples: list of examples, each example is of form (board, pi, v)
"""
end = time.time()
input_boards, target_pis, target_vs = list(zip(*examples))
input_boards = np.asarray(input_boards)
target_pis = np.asarray(target_pis)
target_vs = np.asarray(target_vs)
train_history = self.nnet.model.fit(x = input_boards, y = [target_pis, target_vs], batch_size = self.args.batch_size, epochs = self.args.epochs, verbose=0)
self.loss = train_history.history['loss']
v0 = len(examples)
v1 = round(time.time()-end,2)
v2 = round(train_history.history['loss'][0],5)
v3 = round(train_history.history['pi_loss'][0],5)
v4 = round(train_history.history['v_loss'][0],5)
print('Examples {} | Time Total: {}s | loss {} | pi_loss {} | v_loss {}'.format(v0,v1,v2,v3,v4))
def predict(self, board):
"""
board: np array with board
"""
# timing
# preparing input
board = board[np.newaxis, :, :]
# run
with self.nnet.session.as_default():
with self.nnet.graph.as_default():
pi, v = self.nnet.model.predict(board)
return pi[0], v[0]
def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(folder, filename)
if not os.path.exists(folder):
os.mkdir(folder)
self.nnet.model.save_weights(filepath)
def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L98
filepath = os.path.join(folder, filename)
if not os.path.exists(filepath):
raise("No model in path '{}'".format(filepath))
self.nnet.model.load_weights(filepath)
return self
def get_weights(self):
return np.array(self.nnet.model.get_weights())
def set_weights(self, weights):
self.nnet.model.set_weights(weights)
return self
def get_loss(self):
return self.loss
| [
"gianluigi.mucciolo@xpeppers.com"
] | gianluigi.mucciolo@xpeppers.com |
e7eee4841d85a333ebb5e70c5cd6218388d11953 | 2ac0efad0ff5a1d9266119bded4a1120e6b19f1b | /Project_undefined/wechatMoney/wechat_red_envelope_2.py | d93eb0ec0b18f7699d7be3dfa32e9c942b462af2 | [] | no_license | boyac/pyUndefined | 3b91e4d1b0b8b544029ba1ffc8bc4ef0b2aa8f43 | 913d88eb0c1348d91dff025fa3b1f5643e583a75 | refs/heads/master | 2021-06-22T09:59:16.605391 | 2021-02-03T09:49:28 | 2021-02-03T09:49:28 | 62,711,768 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding: utf-8 -*-
__author__ = 'Boya Chiou'
import numpy as np, numpy.random
class Lucky:
"""
The user assigns a lump sum to a group red envelope, and the number of small red envelopes within it.
"""
def __init__(self, amount, n):
self.amount = amount
self.n = n
def money(self):
self.hong_bao = np.random.dirichlet(np.ones(self.n),size=1) * self.amount
np.set_printoptions(precision=2)
self.hong_bao = np.round(self.hong_bao, decimals=2).flatten()
return self.hong_bao.tolist()
#return reduce(lambda x, y: x+y, self.hong_bao) #works the same as flatten()
def env(self):
for index, element in enumerate(Lucky.money(self)):
print '猴年發財 No.{}, {} 元'.format(index+1, element)
def main():
test = Lucky(10,3)
print test.money()
print test.env()
if __name__== "__main__":
main()
| [
"chinawhitewhy@gmail.com"
] | chinawhitewhy@gmail.com |
d952153518b3548b45a3b32e997c5a804e137594 | 08fdbaab91f447a2c290bf2d084958476ebf3919 | /project/settings.py | 21a3b809dc9fcb66df98cf6b775608385d3a946f | [] | no_license | yhoiseth/database | 057dbe987545e577455b3e596411a7c52021bc7a | 531b6e37856c1a77a3686ff7d8730c4da34340eb | refs/heads/master | 2022-04-28T20:29:00.744181 | 2019-10-26T12:59:28 | 2019-10-26T12:59:28 | 216,885,249 | 0 | 0 | null | 2022-04-22T22:38:27 | 2019-10-22T18:41:29 | Python | UTF-8 | Python | false | false | 2,345 | py | import os
from typing import List
from django_heroku import settings
AUTH_USER_MODEL = "app.User"
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = "Dummy key"
ENVIRONMENT = os.getenv("ENVIRONMENT", "production")
DEBUG = ENVIRONMENT == "development"
ALLOWED_HOSTS: List = []
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"app",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "project.wsgi.application"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.getenv("DATABASE_NAME", "database"),
"USER": os.getenv("DATABASE_USER", "yngve"),
"PASSWORD": os.getenv("DATABASE_PASSWORD", ""),
"HOST": os.getenv("DATABASE_HOST", "localhost"),
"PORT": "5432",
}
}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
settings(locals())
| [
"yngve@hoiseth.net"
] | yngve@hoiseth.net |
f9e03766b92f7594acf0b234d6939d7287936474 | f30286966b67ca71a717a702a86560a20ea0af9e | /migrations/0013_auto_20190617_1903.py | 7d8fa331043d3b7b851520bb8a1699dbfd7cecfd | [
"Apache-2.0"
] | permissive | j-ollivier/sonov-main | 9a2397bb619227d50b048a5a3f9b2a4e30f97b97 | 78123513fa73deae174363750fd64ab3e92a3d2a | refs/heads/master | 2020-03-18T03:06:33.682840 | 2019-10-20T15:49:12 | 2019-10-20T15:49:12 | 134,223,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.1.7 on 2019-06-17 17:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_tag_category'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='category',
field=models.PositiveIntegerField(choices=[(1, 'Style de musique'), (2, 'Type de performance'), (3, 'Ambiance')]),
),
]
| [
"pro.jollivier@gmail.com"
] | pro.jollivier@gmail.com |
990775905446504fb3fa261f8eacb2b0990801a4 | 5296b6666b3401ac532e9d1f6eba6549c48295c3 | /test.py | 2c7fe45d6039908eb9bf3f8bef82eb11b2fc06b2 | [] | no_license | skotin/germ | 5a2cffdc1667808647d067e400efc4c7929e0cab | e3418cb63b8d2e1bca97368d531cf245e30635be | refs/heads/master | 2020-05-31T04:47:06.607006 | 2013-01-04T04:24:57 | 2013-01-04T04:24:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | __author__ = 'Serg'
import math,pygame,time
#for i in range(0,20):
# print i%3%2
a=1
b=2
def myprint(a,b):
print "a=",a,", b=",b
myprint(a,b)
start = time.clock()
for i in range(1,1000000):
t=a
a=b
b=t
end = time.clock()
myprint(a,b)
print 'Time 1: %s' % (end - start)
myprint(a,b)
start = time.clock()
for i in range(1,1000000):
a,b=b,a
end = time.clock()
myprint(a,b)
print 'Time 2: %s' % (end - start)
| [
"jetbrains@Ret.local"
] | jetbrains@Ret.local |
009684393084452c5b0ff73051c5f4001fc8dcf9 | f30542a38c878de60587010cfbdfbec99acd5ba3 | /program.py | b37fe7f3cfcb7ae28db9f9b0ff8725986a301065 | [] | no_license | qetennyson/WeatherClient | 249b7f250c492113831cbf6f1c34376b41084bff | 7f2653a90c531e7f2d8cd09f28b34b3ef8c83775 | refs/heads/master | 2020-03-18T04:43:51.534031 | 2018-05-21T17:34:07 | 2018-05-21T17:34:07 | 134,302,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | import requests
import bs4
import collections
WeatherReport = collections.namedtuple('WeatherReport',
'cond, temp, scale, loc')
def main():
print_the_header()
z_code = input("What zipcode do you want the weather for (ex. 42101)? ")
html = get_html_from_web(z_code)
report = get_weather_from_html(html)
# this is quite limited. we have to check our
# return value in all cases to print the correct thing
print("The temp in this location is {}.".format(report[1]))
# see? nonsense! Let's use named tuples.
print('The temp in {} is {} and {} {}'.format(
report[2],
report[0],
report[1],
report[3]
)) # The power of named tuples.
print('The temp in {} is {} {} and {}'.format(
report.loc,
report.temp,
report.scale,
report.cond
))
# display forecastpip
def print_the_header():
print("--------------------------")
print(" LILOO'S WEATHER APP")
print("--------------------------")
print()
def get_html_from_web(zipcode):
url = "https://www.wunderground.com/weather-forecast/{}".format(zipcode)
response = requests.get(url)
return response.text
def get_weather_from_html(html):
# cityCSS = '.region-content-header h1'
# weatherScaleCSS = '.wu-unit-temperature.wu-label'
# weatherTempCSS = '.wu-unit-temperature.wu-value'
# weatherConditionCss = '.condition-icon'
soup = bs4.BeautifulSoup(html, 'html.parser')
loc = soup.find(class_='region-content-header').find('h1').get_text()
condition = soup.find(class_='condition-icon').get_text()
temp = soup.find(class_='wu-unit-temperature').find(class_='wu-value').get_text()
scale = soup.find(class_='wu-unit-temperature').find(class_='wu-label').get_text()
loc = cleanup_text(loc)
loc = find_citystate_from_location(loc)
condition = cleanup_text(condition)
temp = cleanup_text(temp)
scale = cleanup_text(scale)
# print(condition, temp, scale, loc)
# return condition, temp, scale, loc
# PERFECTION.
report = WeatherReport(cond=condition, temp=temp, scale=scale, loc=loc)
return report
def find_citystate_from_location(loc):
parts = loc.split("\n")
return parts[0].strip()
# Define a function using a text hint parameter (: str does not affect runtime)
def cleanup_text(text: str):
if not text:
return text
text = text.strip()
return text
if __name__ == '__main__':
main()
| [
"quincytennyson8@gmail.com"
] | quincytennyson8@gmail.com |
2a860191471fc855f28ac3836dcfbfa78f252b2e | 296d8a539b3946df0ae6d527acb407bf78e5c10c | /grammar.py | ffc89cfa2549d7d136d5bb249c7346cb18613b5f | [] | no_license | Coswold/tweet_generator | 8dc4b2b409c3b665a644e072fb6eec4d00bc869e | 19ba5ae4c98e7e9de4ad048db9d03067dea77a73 | refs/heads/master | 2023-02-24T20:39:08.126372 | 2019-07-06T21:03:12 | 2019-07-06T21:03:12 | 155,779,429 | 0 | 0 | null | 2023-02-02T06:34:03 | 2018-11-01T21:42:33 | Python | UTF-8 | Python | false | false | 201 | py | def grammar (sentence):
sentence = sentence.capitalize()
if sentence[len(sentence) - 1] == ',':
sentence[len(sentence) - 1] == '.'
else:
sentence += '.'
return sentence
| [
"studentloaner2@Admins-MacBook-Pro.local"
] | studentloaner2@Admins-MacBook-Pro.local |
04fbafe84494848f4aa99009abefa1c69bb5d05d | b8b6214ee3a21534d736b0c7ea42ab3d82394c15 | /python_fund/list_advanced/even_numbers.py | 3da254265d46162fd360edc7c7034dcb49940a10 | [] | no_license | calendula547/python_fundamentals_2020 | 10b232b75e4fb6e0f5a97dabf5b10d214b1e6b31 | bf735bdab22b4b2d3ea53c90786ad18051e22cb8 | refs/heads/main | 2023-04-11T23:25:16.826902 | 2021-04-28T07:18:48 | 2021-04-28T07:18:48 | 362,365,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | nums = list(map(int, (input().split(", "))))
even_nums_indices = []
for i in range(len(nums)):
if nums[i] % 2 == 0:
even_nums_indices.append(i)
print(even_nums_indices)
| [
"nevzaharieva@gmail.com"
] | nevzaharieva@gmail.com |
233ed265bef7e632cc81bca6d416bb261f3e9c21 | e4689f203b51ea5f987caf3487a4dc51f770a0ae | /converter/data_check.py | 7adc87cf41f82f928e249a70f802a9399d1097e6 | [
"MIT"
] | permissive | shijun18/Spine_Seg | b93a1e4a9185b05b0cfdeaaf382b502c0e7763a5 | 90c41d8ee08235c43bd3a5236da5a0ee7066fced | refs/heads/main | 2023-06-10T11:04:43.420324 | 2021-07-04T08:00:39 | 2021-07-04T08:00:39 | 371,382,899 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | import os
import numpy as np
import json
import pandas as pd
from utils import hdf5_reader
def data_check(input_path,annotation_list):
slice_num = 0
csv_info = []
class_list = []
for item in os.scandir(input_path):
csv_item = []
print(item.name)
csv_item.append(item.name)
img = hdf5_reader(item.path,'image')
lab = hdf5_reader(item.path,'label')
print(img.shape)
slice_num += img.shape[0]
csv_item.append(img.shape[0])
csv_item.append(np.max(img))
csv_item.append(np.min(img))
print(np.max(img),np.min(img))
print(np.unique(lab))
class_list.extend(list(np.unique(lab))[1:])
csv_info.append(csv_item)
col = ['id','slices_num','max','min']
csv_file = pd.DataFrame(columns=col, data=csv_info)
csv_file.to_csv('./data_check.csv', index=False)
print('total slice: %d'%slice_num)
for i in range(len(annotation_list)):
print('%s : %d'%(annotation_list[i],class_list.count(i+1)))
def cal_mean_std(data_path):
image = []
for item in os.scandir(data_path):
img = hdf5_reader(item.path,'image').flatten()
image.extend(img)
print('mean:%.3f' % np.mean(image))
print('std:%.3f' % np.std(image))
if __name__ == "__main__":
json_file = './static_files/spine.json'
with open(json_file, 'r') as fp:
info = json.load(fp)
# data_check(info['npy_path'],info['annotation_list'])
cal_mean_std(info['npy_path']) | [
"46990068+shijun18@users.noreply.github.com"
] | 46990068+shijun18@users.noreply.github.com |
cd5164c69ee16da41cde15a50b45625ba1443c1f | 1d6c1eec6a8f6ba732c6dd5218f80403532313b8 | /nums.py | 7bbbed0312528b953279dab2feae74f5cd225730 | [] | no_license | Quitedeer/my_python | 816713a57194888f86a914a715c6e8f3e6aa9b58 | 2ac39087036400ce92c08c33c77aae1373b8e1ec | refs/heads/master | 2020-07-25T00:21:58.704160 | 2019-11-17T22:33:25 | 2019-11-17T22:33:25 | 208,097,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | s = 'В разные эпохи и у разных народов число\
Пи имело разное значение. Например,\
в Древнем Египте оно равнялось 3.1604 \
у индусов оно приобрело значение 3.162 \
китайцы пользовались числом, равным 3.1459 \
Буквенное обозначение число Пи получило только \
в 1706 году – оно происходит от начальных букв \
двух греческих слов, означающих окружность и \
периметр. Буквой π число наделил математик Джонс,\
а прочно вошла в математику она уже в 1737 году.'
a=0
b=[]
s1 = ' '
for i in range(len(s)):
if a == 0:
if s[i].isdigit():
s1 += s[i]
a=1
else:
if s[i] != ' ':
s1 += s[i]
else:
a=0
b+=[float(s1)]
s1=''
print(b)
print(len(b))
print(max(b))
| [
"varya.dementjeva@gmail.com"
] | varya.dementjeva@gmail.com |
aff3c2c6450bef98b974e283d127f004593fee34 | db918436073a11388d9f87f71c0567a40a15ff57 | /app/__init__.py | 830f20e2be2303dae5b2101bf680549cea718f4d | [] | no_license | githubdelegate/websocket_test | 822815f411926def0a7aea8f549546bb9f373dc3 | c408c84681702d2648527530f266e3565d47d49d | refs/heads/master | 2020-03-20T08:43:52.630897 | 2018-06-26T06:09:01 | 2018-06-26T06:09:01 | 137,317,353 | 0 | 0 | null | 2018-08-21T00:33:55 | 2018-06-14T06:43:25 | HTML | UTF-8 | Python | false | false | 479 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.debug = True
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:root@127.0.0.1:3306/movie"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
db = SQLAlchemy(app)
db.create_all(app)
from app.home import home as home_blueprint
from app.admin import admin as admin_blueprint
app.register_blueprint(home_blueprint)
app.register_blueprint(admin_blueprint, url_prefix="/admin")
| [
"ostmail@163.com"
] | ostmail@163.com |
29aa7eefb7323c5953972bcecbf05797b238b684 | e42cce21fbb3c4fe3f271c2029d9659270a968ab | /vmrunapi/vmrunapi.py | cde0c05a165dbfc2cd3c7b87f6803f601bfd2453 | [] | no_license | cloudbase/maas-hacks | d086a91338e45121dafb33734ba4977e31851dbc | 0e2cc5537ff64376505c1e9e77dcdf3657fc4d78 | refs/heads/master | 2016-09-06T13:02:15.808249 | 2014-04-30T00:24:58 | 2014-04-30T00:24:58 | 17,869,386 | 5 | 0 | null | 2014-05-06T01:23:22 | 2014-03-18T14:43:58 | Python | UTF-8 | Python | false | false | 3,400 | py | #!/usr/bin/python
import flask
import os
import re
import subprocess
import sys
if sys.platform == 'win32':
from win32com.shell import shell
from win32com.shell import shellcon
app = flask.Flask(__name__)
STARTED = "started"
STOPPED = "stopped"
def _get_matching_vmx_path(path, mac_address):
mac_address_re = re.compile(r'^ethernet(\d+)\.address(\s*)=(\s*)\"%s\"$' %
mac_address.upper())
for root, dirs, file_names in os.walk(path):
for file_name in file_names:
if os.path.splitext(file_name)[1].lower() == '.vmx':
vmx_path = os.path.join(root, file_name)
with open(vmx_path, 'rb') as f:
for l in f:
if mac_address_re.match(l):
return vmx_path
def _get_vmx_base_path():
if sys.platform == 'darwin':
return os.path.expanduser("~/Documents/Virtual Machines")
elif sys.platform == 'win32':
documents_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL,
None, 0)
return os.path.join(documents_dir, "Virtual Machines")
else:
return os.path.expanduser("~/vmware")
def _get_vmrun():
if sys.platform == 'darwin':
return ("/Applications/VMware Fusion.app/Contents/Library/vmrun",
"fusion")
else:
# Make sure to have vmrun in the PATH
return ("vmrun", "ws")
def _execute_process(args):
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
(out, err) = p.communicate()
return (out, err, p.returncode)
def _exec_vmrun_cmd(cmd, vmx_path=None):
(vmrun_path, vmrun_type) = _get_vmrun()
args = [vmrun_path, "-T", vmrun_type, cmd]
if vmx_path:
args.append(vmx_path)
(out, err, exit_code) = _execute_process(args)
if exit_code:
raise Exception("vmrun failed: %s" % out)
return out
@app.route('/vmrun/vm/find_by_mac_address/<string:mac_address>',
methods = ['GET'])
def get_vmx_path_bymac_address(mac_address):
base_path = _get_vmx_base_path()
vmx_path = _get_matching_vmx_path(base_path, mac_address)
if not vmx_path:
flask. abort(404)
else:
return vmx_path
def _get_json_vmx_path():
if not flask.request.json:
flask.abort(400)
vmx_path = flask.request.json.get('vmx_path')
if not vmx_path:
flask.abort(400)
if not os.path.exists(vmx_path):
flask.abort(404)
return vmx_path
@app.route('/vmrun/vm/start', methods = ['POST'])
def start_vm():
vmx_path = _get_json_vmx_path()
_exec_vmrun_cmd("start", vmx_path)
return STARTED
@app.route('/vmrun/vm/stop', methods = ['POST'])
def stop_vm():
vmx_path = _get_json_vmx_path()
_exec_vmrun_cmd("stop", vmx_path)
return STARTED
@app.route('/vmrun/vm/status', methods = ['POST'])
def get_vm_status():
status = STOPPED
vmx_path = _get_json_vmx_path()
running_vmx_paths = _exec_vmrun_cmd("list").split("\n")[1:-1]
for running_vmx_path in running_vmx_paths:
if vmx_path == running_vmx_path:
status = STARTED
break
return status
if __name__ == '__main__':
app.run(host="0.0.0.0", port=6000, debug = True)
| [
"apilotti@cloudbasesolutions.com"
] | apilotti@cloudbasesolutions.com |
5cd4b51f31296a5d47ca6420b2a7f882817ae9de | ed29a3788ecd26e9e16a39ea0bf69ce25d286d3d | /Codewars/level7/remove_consecutive_duplicate_words.py | 17ffdee99b6a2b82b69be24b78841b5ddfb59f00 | [] | no_license | Bradley94/misc-theory-work | c01059e22cfb0c8f1223a935b2d44f7c33ae6e7b | 7f95ba25de5326d19ce2405afd3f6fbfbddb0b79 | refs/heads/master | 2023-01-03T05:03:25.470327 | 2020-11-02T16:48:17 | 2020-11-02T16:48:17 | 295,984,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | """
Your task is to remove all consecutive duplicate words from string, leaving only first words entries. For example:
"alpha beta beta gamma gamma gamma delta alpha beta beta gamma gamma gamma delta"
--> "alpha beta gamma delta alpha beta gamma delta"
"""
def remove_consecutive_duplicates(s):
results =[]
for word in s.split():
if word not in results:
results.append(word)
elif results[-1] != word: # if word isn't the same as the current last word in results, append word
results.append(word)
return ' '.join(results)
"""
import codewars_test as test
# TODO Write tests
import solution # or from solution import example
# test.assert_equals(actual, expected, [optional] message)
@test.describe("Example")
def test_group():
@test.it("test case")
def test_case():
test.assert_equals(remove_consecutive_duplicates('alpha beta beta gamma gamma gamma delta alpha beta beta gamma gamma gamma delta'), 'alpha beta gamma delta alpha beta gamma delta');
"""
| [
"noreply@github.com"
] | noreply@github.com |
b5f2d8b366ba2b11e7364e206c5f48149a027e36 | 9596182c6505cb4b2b746b501d799eb3e083d479 | /python/PZEntityManager.py | 35f29a00d900933950167a96edb5b480db6c01f8 | [] | no_license | ColinGilbert/projectzombie | 2b55361c6f1c5d41d82d2fb03d200fd4e3be2f19 | af772cd66fabf899eb124602dcbd4e6c4a21b467 | refs/heads/master | 2021-01-22T02:07:58.275092 | 2015-04-06T00:57:57 | 2015-04-06T00:57:57 | 33,460,671 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,744 | py | '''
Created on Jul 2, 2010
@author: gnulinux
'''
import ogre.renderer.OGRE as ogre
#from cgkit.cgtypes import vec3, quat
from PZEntity import Entity
import numpy as np
from array import array
from PFieldPathFinding import PFieldPathFinder
import copy
class EntityManager():
'''
classdocs
'''
def __init__(self,scaleDict):
'''
Constructor
'''
#self._entList = []
#self._rdrEntManager = renderEntityManager
#self._pff =
self.scaleDict = scaleDict
def update(self, sw, dt, thrustOffset):
self._pff.updateEnts(self._entArray, self._bulletMgr.bulletEnts, thrustOffset)
#self._rdrEntManager.updateEnts(self._entList, dt)
def update2(self, dt):
self._pff.updateEnts(self._entArray)
for x in self._entArray:
ent = x[0]
#if ent._climb:
ent._worldPos = ent._worldPos + ent._worldOrient.zAxis()*2*130*dt
#send out the msgs
#a = [i, [ent._worldPos.x, ent._worldPos.y, ent._worldPos.z], [ent._worldOrient.x, ent._worldOrient.y, ent._worldOrient.z, ent._worldOrient.w]]
self._rdrEntManager.update2(self._entArray)
def setScaleDict(self, scaleDict):
self._scaleDict = scaleDict
def reset(self):
for ent, copy in zip(self._entArray, self._entCopy):
ent[0]._worldPos = copy[0]
ent[0]._worldOrient = copy[1]
def loadMap(self):
self._pff = PFieldPathFinder(self.scaleDict)
self._pff.loadMaps("city3_obs.png")
#self._pff.loadMaps("cityblockterrain_new_obs.png")
self._pff.initEnts(self._entArray)
def createEntities(self, pos, orient, resource="ninja.mesh"):
numOfEnts = pos[0].shape[0]
self._entArray = np.empty([numOfEnts, 1], type(Entity))
self._entCopy = []
for i, (ii, jj, kk) in enumerate(zip(pos[0], pos[1], pos[2])):
wp = ogre.Vector3(float(ii), float(jj), float(kk))
og = ogre.Quaternion(orient.w, orient.x, orient.y, orient.z)
self._entArray[i] = Entity(resource, i, worldPos = wp, o=og)
self._entCopy.append((wp, og))
#self._rdrEntManager.createEntities(self._entArray)
def setBulletManager(self, bulletManager):
self._bulletMgr = bulletManager
def createEntity(self,resource="ninja.mesh"):
#ent = Entity(resource,len(self._entList),worldPos=initParams[0],orient=initParams[1])
#self._entList.append(ent)
#self._rdrEntManager.insert(ent,inspect=False)
return;
| [
"llwijk@200bf698-8c57-11dd-a017-db29c870d619"
] | llwijk@200bf698-8c57-11dd-a017-db29c870d619 |
8cc39834a3986a41c0b6c4717eda289d67aa0f2a | 7d3cb9e6ac0f2a0f217fb8ad77076fd4f719a437 | /xen_signature/apps/pdf_to_image/migrations/0003_auto_20181020_1658.py | 75cf4f4498115f51b134898cac32d0c1bc38dea3 | [] | no_license | FlashBanistan/django-xen-signature | b88b0698b00390e019ebb419d74043f1e36777ba | b390e9aa069c89021e63e41a554489ccf9d685a5 | refs/heads/master | 2020-04-02T06:11:24.486660 | 2018-10-26T17:17:20 | 2018-10-26T17:17:20 | 154,135,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # Generated by Django 2.1.2 on 2018-10-20 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdf_to_image', '0002_auto_20181020_1657'),
]
operations = [
migrations.RenameField(
model_name='documentimage',
old_name='image_height',
new_name='height',
),
migrations.RenameField(
model_name='documentimage',
old_name='image_width',
new_name='width',
),
migrations.AlterField(
model_name='documentimage',
name='image',
field=models.ImageField(height_field='height', upload_to='', width_field='width'),
),
]
| [
"FlashBanistan66@gmail.com"
] | FlashBanistan66@gmail.com |
01b19daef8aab2a869f8f843620617c2c27f6912 | 529e713a78e82de2ae5d44cfb8ef209e0894d72a | /numpy-rng/arrays.py | 9dd8d7b53619dbb04a4f83b22c11f636bd268b8c | [
"MIT"
] | permissive | realpython/materials | cd2f548276be2c82f134ca03eadb1cd279e0f26e | d2d62756d3854f54a12a767f2bf9470486c0ceef | refs/heads/master | 2023-09-05T22:12:29.806738 | 2023-08-31T20:56:28 | 2023-08-31T20:56:28 | 132,374,697 | 4,678 | 6,482 | MIT | 2023-09-12T22:22:06 | 2018-05-06T20:46:18 | HTML | UTF-8 | Python | false | false | 145 | py | import numpy as np
rng = np.random.default_rng()
print(rng.random(size=(5,)))
print(rng.random(size=(5, 3)))
print(rng.random(size=(3, 4, 2)))
| [
"noreply@github.com"
] | noreply@github.com |
c8c91b8f93916d59a46e5052ed5bf42d766b5c99 | e914da03391c81b69ae47c3dfaabb119259eb66f | /aon_decoder.py | 4f5b351c967cae4c175b18aac6ed5d606fc3f548 | [] | no_license | koder-ua/python_practice | 25f67e7c2333c0f96a2a711947e87951769570db | a68b8fc9c12e841b7355c745db6d104205ea568f | refs/heads/master | 2021-01-22T04:22:58.642582 | 2015-12-15T14:16:40 | 2015-12-15T14:16:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | #!/usr/bin/env python
# -*- coding:utf8 -*-
"""
Homework for Automatic Number Identification (ANI)
https://github.com/koder-ua/python-classes/blob/master/slides/pdf/FF_tasks.pdf
Slide #7
"""
def decode(string):
"""
ANI decoder:
- combine repeated characters (2333# -> 3)
- remove single characters (1234 -> None)
- repeat last character before "##" (33## -> 33")
:param string: string
:return string: processed string
"""
# split all repeated symbols as a standalone strings
# string = ["".join(grp) for _, grp in itertools.groupby(string)]
splitted_string = []
n = 0
k = 0
while n < len(string):
while k < len(string) - 1:
if string[k] == string[k + 1]:
k += 1
else:
break
k += 1
splitted_string.append(string[n:k])
n = k
# get first character from splitted strings + remove single-length strings
string = "".join([i[0] for i in splitted_string if len(i) != 1])
result = ""
for i, v in enumerate(string):
if v == "#":
if i == 0 and len(string) > 1: # checking leading '#' in string
continue
elif i == 0:
return None
else:
result += string[i - 1]
else:
result += string[i]
return result
def test_decode():
assert decode("") == ""
assert decode("1") == ""
assert decode("11111") == "1"
assert decode("11#") == "1"
assert decode("11##") == "11"
assert decode("11122234###55") == "1225"
assert decode("##") is None
assert decode("12345##") is None
assert decode("221133444##") == "21344"
assert decode("###33###22##") == "3322"
assert decode("###33###22##1#") == "3322"
print("Passed successfully")
def main():
"main"
test_decode()
return 0
if __name__ == "__main__":
exit(main())
| [
"vitaliy@kulanov.org.ua"
] | vitaliy@kulanov.org.ua |
c2fa064613a1f526d84a65e037a0b775ba7bfa1e | 97aa750d98b33fb015c5b01e1c9f1608d1bd2248 | /jollycoin/transaction.py | 707c60cc90ece59eb9deec3a42c0fe9fc679fa97 | [
"MIT"
] | permissive | jollycoin/jollycoin | 5e9cdc6a04e8378c67a305f475b793203aa46eb3 | 14151e4445948201bc8ab77e9a8d423ad16ae950 | refs/heads/master | 2022-01-07T06:55:00.379236 | 2019-05-23T20:00:09 | 2019-05-23T20:00:09 | 108,996,695 | 0 | 0 | MIT | 2019-05-23T20:00:10 | 2017-10-31T13:05:05 | null | UTF-8 | Python | false | false | 6,818 | py | from typing import TypeVar, Dict
from decimal import Decimal
from datetime import datetime
from collections import OrderedDict
import json
import random
from . import crypto
# we require it defined like this because of python3.6
# it will be overwritten once Transaction class is defined
Transaction = TypeVar('Transaction')
class TransactionError(Exception):
pass
class Transaction:
def __init__(self: Transaction,
version: str,
id_: str,
time_: str,
sender_address: str,
recipient_address: str,
sender_public_key: str,
amount: int,
fee: int,
signature: str,
hash_: str,
check: bool=True):
assert version == '1.0'
self.version = version
self.id = id_
self.time = time_
self.sender_address = sender_address
self.recipient_address = recipient_address
self.sender_public_key = sender_public_key
self.amount = None if amount is None else int(amount)
self.fee = None if fee is None else int(fee)
self.signature = signature
self.hash = hash_
if check:
if not self.verify_hash():
raise TransactionError('invalid hash')
if not self.verify_signature():
raise TransactionError('invalid signature')
@classmethod
def gen_random_id(cls) -> str:
r = random.randint(0, 2 ** 256)
r = r.to_bytes(32, byteorder='big')
r = crypto.sha256(r)
r = r.hexdigest()
return r
@classmethod
def get_time_now(cls) -> str:
return datetime.utcnow().isoformat()
def to_dict(self: Transaction) -> OrderedDict:
data = OrderedDict([
['version', self.version],
['id', self.id],
['time', self.time],
['sender_address', self.sender_address],
['recipient_address', self.recipient_address],
['sender_public_key', self.sender_public_key],
['amount', int(self.amount)],
['fee', int(self.fee)],
['signature', self.signature],
['hash', self.hash],
])
return data
@classmethod
def from_dict(cls: type, data: Dict, check: bool=True) -> Transaction:
tx = Transaction(
version=data['version'],
id_=data['id'],
time_=data['time'],
sender_address=data['sender_address'],
recipient_address=data['recipient_address'],
sender_public_key=data['sender_public_key'],
amount=data['amount'],
fee=data['fee'],
signature=data['signature'],
hash_=data['hash'],
check=check,
)
return tx
def serialize(self: Transaction) -> str:
data = self.to_dict()
message = json.dumps(data)
return message
@classmethod
def deserialize(cls: type, message: str, check: bool=True) -> Transaction:
data = json.loads(message)
tx = Transaction.from_dict(data, check=check)
return tx
def verify(self: Transaction) -> bool:
if not self.verify_hash():
return False
if not self.verify_signature():
return False
return True
def verify_signature(self: Transaction) -> bool:
data = OrderedDict([
['version', self.version],
['id', self.id],
['time', self.time],
['sender_address', self.sender_address],
['recipient_address', self.recipient_address],
['sender_public_key', self.sender_public_key],
['amount', self.amount],
['fee', self.fee],
# without signature
# without hash
])
message = json.dumps(data)
return crypto.verify_message(self.sender_public_key,
self.signature,
message)
def verify_hash(self: Transaction) -> bool:
return self.hash == self.calc_hash()
def calc_hash(self: Transaction) -> str:
data = OrderedDict([
['version', self.version],
['id', self.id],
['time', self.time],
['sender_address', self.sender_address],
['recipient_address', self.recipient_address],
['sender_public_key', self.sender_public_key],
['amount', self.amount],
['fee', self.fee],
['signature', self.signature],
# without hash
])
message = json.dumps(data)
message_bytes = message.encode()
hash_ = crypto.sha256(message_bytes).hexdigest()
return hash_
def sign(self: Transaction, private_key: str) -> str:
data = OrderedDict([
['version', self.version],
['id', self.id],
['time', self.time],
['sender_address', self.sender_address],
['recipient_address', self.recipient_address],
['sender_public_key', self.sender_public_key],
['amount', self.amount],
['fee', self.fee],
# without signature
# without hash
])
message = json.dumps(data)
signature = crypto.sign_message(private_key, message)
self.signature = signature
self.hash = self.calc_hash()
return self
def test1():
sk0, pk0, addr0 = crypto.generate_private_public_address_key()
sk1, pk1, addr1 = crypto.generate_private_public_address_key()
tx0 = Transaction(
version='1.0',
id_=Transaction.gen_random_id(),
time_=Transaction.get_time_now(),
sender_address=addr0,
recipient_address=addr1,
sender_public_key=pk0,
amount=1_000_000_000,
fee=1_000,
signature=None,
hash_=None,
check=False,
).sign(sk0)
msg0 = tx0.serialize()
print(msg0)
tx1 = Transaction.deserialize(msg0)
print(tx1.verify_signature())
def test2():
sk0, pk0, addr0 = crypto.generate_private_public_address_key()
sk1, pk1, addr1 = crypto.generate_private_public_address_key()
tx0 = Transaction(
version='1.0',
id_=Transaction.gen_random_id(),
time_=Transaction.get_time_now(),
sender_address=addr0,
recipient_address=addr1,
sender_public_key=pk0,
amount=1_000_000_000,
fee=1_000,
signature=None,
hash_=None,
check=False,
).sign(sk0)
msg0 = tx0.serialize()
print(msg0)
tx1 = Transaction.deserialize(msg0)
print(tx1.verify_signature())
if __name__ == '__main__':
test2()
| [
"noreply@github.com"
] | noreply@github.com |
2cb549fab7ccf5db93a112f7980fa14fbc3ffbd0 | 8e7e51ff8b9c1103d10aa86c3d1cb446cfb25e4c | /djeniesecurity/djeniesecurity/urls.py | c409e1093e267c2e36d190bdc95028974c4ec905 | [] | no_license | huogerac/modulo4 | b2c6e07f5e2928182a03edac503d0a4468736007 | b30e056fb5a4703255982a349ed184beaea010fd | refs/heads/master | 2021-01-17T21:25:03.926382 | 2013-09-23T10:21:51 | 2013-09-23T10:21:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djeniesecurity.views.home', name='home'),
# url(r'^djeniesecurity/', include('djeniesecurity.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('',
url(r'', include('cms.urls')),
url(r'', include('sms.urls')),
)
| [
"huogerac@gmail.com"
] | huogerac@gmail.com |
06a59e32f037c215fdd5e541b97856291bb4a2c7 | fded1e6c9cdd64bd96f0bad1d2877a65a0202631 | /src/simulacra/cluster/__init__.py | 4c576ebc5c4b160c199211499b0c315b87a4a462 | [
"MIT"
] | permissive | johnvonlzf/simulacra | 46100c33be6992b1f45d7272884689579c28bd37 | b89fd0abf59debf077a4ce4cc46d5e0c58f53b4d | refs/heads/master | 2022-01-23T23:04:08.191552 | 2018-10-14T15:53:23 | 2018-10-14T15:53:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | from .interface import *
from .job_creation import *
from .processing import *
| [
"josh.karpel@gmail.com"
] | josh.karpel@gmail.com |
cb613a9a3f6aa424cb036ac23144bd0df777b427 | e7422eae5799add31d929029cf71b4045a412dbd | /CG/lab2/lab2.py | 40e8cf9dd59dc41fd4068b6cf99e75f2ea71bb07 | [] | no_license | hhhhhhhhah/CG | a1edf5a0ac5b5b1754da231070026ae96e32e5ca | 5adb2d1c19f14101b68182e530654845cc80ea8b | refs/heads/master | 2021-08-31T23:42:50.556380 | 2017-12-23T14:56:38 | 2017-12-23T14:56:38 | 105,590,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
a = 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.axis('off')
# Вершины клина
v = np.array([[a, a, 0], [-a, a, 0], [-a, -a, 0], [a, -a, 0], [0, 0, -2*a/np.sqrt(2)], [0, 0, 2*a/np.sqrt(2)]])
ax.scatter3D(v[:, 0], v[:, 1], v[:, 2])
# Генерация граней клина
gr = [[v[0], v[1], v[4]], [v[0], v[3], v[4]], [v[2], v[1], v[4]], [v[2], v[3], v[4]], [v[0], v[1], v[5]], [v[0], v[3], v[5]],\
[v[2], v[1], v[5]], [v[2], v[3], v[5]]]
face_color = [0.5, 0.5, 1]
# Добавление на график клина
ax.add_collection3d(Poly3DCollection(gr, facecolors=face_color, linewidths=1, edgecolors='red', alpha=0.1))
plt.show()
| [
"filbaun@gmail.com"
] | filbaun@gmail.com |
267fe225eba8e89d2d695df128a07d16d65a1f14 | 168322e08da047df57a0ea5dbecf985ee63e94a8 | /.~c9_invoke_t4F4Zb.py | a359c8dfeea3764b101c63ac100a0c2f930038ec | [] | no_license | SteveJerettMcQueen/recordtransactions | 286a4f0b4d8bb99e119864b1c9690eb730f131b9 | ce20e7c67885f6d30247957abb91d025988e652d | refs/heads/master | 2021-09-06T10:41:47.529056 | 2018-02-05T17:29:27 | 2018-02-05T17:29:27 | 112,973,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,599 | py | import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
import matplotlib.dates as mdates
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import seaborn as sns
import earnings as ear
from util import to_pivot_table
################################################################################
# Heatmaps
# Map month and year on sum
def save_heat_map_f():
fig = plt.figure(figsize=(7, 7))
d = to_pivot_table(ear.earns,'Month','Year','Net_Pay', np.sum)
ax = sns.heatmap(data=d, vmin=d.min().min(), vmax=d.max().max(),
annot=True, fmt='.2f', linewidths=.5,
cbar_kws={"shrink": .80}, cmap='BuPu')
ax.set_title('Sum Of Net Pay Per Month', fontsize=11)
ax.set_xlabel('Year', fontsize=11)
ax.set_ylabel('Month', fontsize=11)
fig = ax.get_figure()
fig.savefig('graphs/heatmap_f.svg')
# Line charts
# Chart on net pay
def save_line_chart_c():
fig, axes = plt.subplots(figsize=(15, 4))
for k, df in ear.by_workplace:
ax = df.plot(x='Check_Date', y='Net_Pay', kind='line', ax=axes, label=k)
ax.set_title('Net Pay over Time', fontsize=11)
ax.set_xlabel('Date', fontsize=11)
ax.set_ylabel('Net Pay', fontsize=11)
fig = ax.get_figure()
fig.savefig('graphs/line_chart_c.svg')
def save_line_chart_c_plotly():
data = []
for k, df in ear.by_workplace:
data.append(
go.Scatter(
name=k,
x=df['Check_Date'],
y=df['Net_Pay'],
fill='tozeroy'
)
)
layout = go.Layout(
title='Net Pay over Time',
xaxis=dict(title='Date'),
yaxis=dict(title='Net Pay'),
autosize=False,
width=1000,
height=500
)
py.image.save_as({'data': data, 'layout': layout}, 'graphs/line_chart_c_plotly.jpeg')
################################################################################
# Univariate histograms
x = ear.earns['Net_Pay']
# Histogram of net pay
def save_hist_e():
fig = plt.figure(figsize=(10, 7))
ax = sns.kdeplot(x, shade=True)
ax.set_xlabel('Net Pay', fontsize=11)
ax.set_title('Distribution of Net Pay', fontsize=11)
fig = ax.get_figure()
fig.savefig('graphs/histogram_e.svg')
################################################################################
def save_figs():
save_heat_map_f()
save_line_chart_c()
save_line_chart_c_plotly()
save_hist_e()
save_figs() | [
"sjmsj1321@yahoo.com"
] | sjmsj1321@yahoo.com |
57c5f0267b758e4eb4c42389e10c758178243ed3 | c703b8ac3b5545857f6c95efa2d61eaf7a664021 | /iPERCore/models/networks/discriminators/patch_dis.py | 83491547d5a6977f9e719ae8d16041dd07558ae4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | iPERDance/iPERCore | d29681d229b3098b3517b1abf4f7ea65f579de73 | fcf9a18ffd66bf3fdd3eea4153a3bc4785131848 | refs/heads/main | 2023-07-30T15:04:15.835396 | 2023-04-12T14:21:23 | 2023-04-12T14:21:23 | 313,664,064 | 2,520 | 339 | Apache-2.0 | 2023-05-12T03:26:52 | 2020-11-17T15:36:25 | Python | UTF-8 | Python | false | false | 2,757 | py | # Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import torch
import torch.nn as nn
import functools
class PatchDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=32, n_layers=3, max_nf_mult=8,
norm_type="batch", use_sigmoid=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(PatchDiscriminator, self).__init__()
norm_layer = self._get_norm_layer(norm_type)
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, max_nf_mult)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, max_nf_mult)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def _get_norm_layer(self, norm_type="batch"):
if norm_type == "batch":
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == "instance":
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == "batchnorm2d":
norm_layer = nn.BatchNorm2d
else:
raise NotImplementedError(f"normalization layer [{norm_type}] is not found")
return norm_layer
def forward(self, input):
"""Standard forward."""
return self.model(input)
| [
"liuwen@shanghaitech.edu.cn"
] | liuwen@shanghaitech.edu.cn |
ac134464a47b592c7910c67633b8e7a152effe85 | 0c91c7415e7c6dec237c60283e23abcc7b797fe1 | /python/python-practice/call_func.py | e8a688664e0223dd6ce0ce6ee5c647e5ad2cfe35 | [] | no_license | bookpark/Fastcampus | ac6c1280e0c91d33942717a50a52083284cf410b | 5eea00aa5bd41d1a7da75995899c05516a5f235c | refs/heads/master | 2018-07-11T08:38:59.633991 | 2018-06-01T08:02:17 | 2018-06-01T08:02:17 | 104,309,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def print_func():
print('call func')
def execute(f):
f()
print(print_func)
print(print_func())
def call_string():
return 'call string'
def capital(f):
capitalize.f()
print(execute)
| [
"bkbkgg@gmail.com"
] | bkbkgg@gmail.com |
8f01e04333af1cea09f3e51b223b8fc8dda8dd49 | 4cf04ef3c4b894270bd5ea93e8c104a08be055ef | /src/config.py | f74c386c3229cc8bca225ea4faee27a4999d20f5 | [] | no_license | CF-chen-feng-CF/yolov3.pytorch | 36df1d5827bad40da352e60252d89d9dd577e39d | a7a8a5b6b8e893651309d081fb4fcc5c4d06ce9c | refs/heads/master | 2021-05-17T19:34:23.776109 | 2019-02-21T08:31:15 | 2019-02-21T08:31:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | import os
import json
opj = os.path.join
ROOT = '/home/penggao/projects/detection/yolo3'
LOG_ROOT = opj(ROOT, 'logs')
CKPT_ROOT = opj(ROOT, 'checkpoints')
def parse_names(path):
"""Parse names .json"""
with open(path) as json_data:
d = json.load(json_data)
return d
def create_category_mapping(d):
mapping = dict()
for idx, id in enumerate(d):
mapping[id] = idx
return mapping
# datasets config
datasets = {
'coco': {
'num_classes': 80,
'train_imgs': '/media/data_2/COCO/2017/val2017',
'val_imgs': '/media/data_2/COCO/2017/val2017',
'train_anno': '/media/data_2/COCO/2017/annotations/instances_val2017.json',
'val_anno': '/media/data_2/COCO/2017/annotations/instances_val2017.json',
'category_id_mapping': create_category_mapping([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]),
'class_names': ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'sofa', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tvmonitor', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
},
'voc': {
'num_classes': 20,
'train_imgs': '/media/data_2/VOCdevkit/voc_train.txt',
'val_imgs': '/media/data_2/VOCdevkit/2007_test.txt',
'class_names': ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'],
'result_dir': opj(ROOT, 'metrics/voc/detections')
},
'linemod': {
'num_classes': 1,
'root': '/media/data_2/SIXDB/hinterstoisser/test/'
}
}
# network config
network = {
'voc': {
'cfg': opj(ROOT, 'lib/yolov3-voc.cfg')
},
'coco': {
'cfg': opj(ROOT, 'lib/yolov3-coco.cfg')
},
'linemod': {
'cfg': opj(ROOT, 'lib/yolov3-linemod.cfg')
}
}
# evaluation config
evaluate = {
'result_dir': opj(ROOT, 'assets/results')
}
colors = {} | [
"370095872@qq.com"
] | 370095872@qq.com |
5cc40f6f01d9530255a06e81239788b2eae2fb46 | 981ecc9cf59dd6f839c3e40d26601efb1d073558 | /src/face_recognition/youtube_dl/aes.py | c5bb3c4ef1561847a1025a0b35095a2224582efe | [
"MIT"
] | permissive | lodemo/CATANA | 469e0684b816f09ac74f186552b463cc77db369e | a349f460772511ccbb16429b40bfb50f774d45d4 | refs/heads/master | 2023-03-30T04:07:12.070332 | 2021-02-03T21:47:32 | 2021-02-03T21:47:32 | 102,767,095 | 12 | 6 | MIT | 2023-03-24T21:55:24 | 2017-09-07T17:36:45 | Jupyter Notebook | UTF-8 | Python | false | false | 16,123 | py | from __future__ import unicode_literals
import base64
from math import ceil
from .utils import bytes_to_intlist, intlist_to_bytes
BLOCK_SIZE_BYTES = 16
def aes_ctr_decrypt(data, key, counter):
"""
Decrypt with aes in counter mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
returns the next counter block
@returns {int[]} decrypted data
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data = []
for i in range(block_count):
counter_block = counter.next_value()
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
block += [0] * (BLOCK_SIZE_BYTES - len(block))
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
decrypted_data += xor(block, cipher_counter_block)
decrypted_data = decrypted_data[:len(data)]
return decrypted_data
def aes_cbc_decrypt(data, key, iv):
"""
Decrypt with aes in CBC mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV
@returns {int[]} decrypted data
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data = []
previous_cipher_block = iv
for i in range(block_count):
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
block += [0] * (BLOCK_SIZE_BYTES - len(block))
decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block
decrypted_data = decrypted_data[:len(data)]
return decrypted_data
def aes_cbc_encrypt(data, key, iv):
"""
Encrypt with aes in CBC mode. Using PKCS#7 padding
@param {int[]} data cleartext
@param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV
@returns {int[]} encrypted data
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
encrypted_data = []
previous_cipher_block = iv
for i in range(block_count):
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
remaining_length = BLOCK_SIZE_BYTES - len(block)
block += [remaining_length] * remaining_length
mixed_block = xor(block, previous_cipher_block)
encrypted_block = aes_encrypt(mixed_block, expanded_key)
encrypted_data += encrypted_block
previous_cipher_block = encrypted_block
return encrypted_data
def key_expansion(data):
"""
Generate key schedule
@param {int[]} data 16/24/32-Byte cipher key
@returns {int[]} 176/208/240-Byte expanded key
"""
data = data[:] # copy
rcon_iteration = 1
key_size_bytes = len(data)
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
while len(data) < expanded_key_size_bytes:
temp = data[-4:]
temp = key_schedule_core(temp, rcon_iteration)
rcon_iteration += 1
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
for _ in range(3):
temp = data[-4:]
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
if key_size_bytes == 32:
temp = data[-4:]
temp = sub_bytes(temp)
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:]
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
data = data[:expanded_key_size_bytes]
return data
def aes_encrypt(data, expanded_key):
"""
Encrypt one block with aes
@param {int[]} data 16-Byte state
@param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte cipher
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
for i in range(1, rounds + 1):
data = sub_bytes(data)
data = shift_rows(data)
if i != rounds:
data = mix_columns(data)
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
return data
def aes_decrypt(data, expanded_key):
"""
Decrypt one block with aes
@param {int[]} data 16-Byte cipher
@param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte state
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
for i in range(rounds, 0, -1):
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
if i != rounds:
data = mix_columns_inv(data)
data = shift_rows_inv(data)
data = sub_bytes_inv(data)
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
return data
def aes_decrypt_text(data, password, key_size_bytes):
"""
Decrypt text
- The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
- Mode of operation is 'counter'
@param {str} data Base64 encoded string
@param {str,unicode} password Password (will be encoded with utf-8)
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
@returns {str} Decrypted data
"""
NONCE_LENGTH_BYTES = 8
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
password = bytes_to_intlist(password.encode('utf-8'))
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
nonce = data[:NONCE_LENGTH_BYTES]
cipher = data[NONCE_LENGTH_BYTES:]
class Counter(object):
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
def next_value(self):
temp = self.__value
self.__value = inc(self.__value)
return temp
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
plaintext = intlist_to_bytes(decrypted_data)
return plaintext
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16)
SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
(0x1, 0x2, 0x3, 0x1),
(0x1, 0x1, 0x2, 0x3),
(0x3, 0x1, 0x1, 0x2))
MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
(0x9, 0xE, 0xB, 0xD),
(0xD, 0x9, 0xE, 0xB),
(0xB, 0xD, 0x9, 0xE))
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
0x53, 0xF5, 0x04, 0x0C, 0x14, 0x3C, 0x44, 0xCC, 0x4F, 0xD1, 0x68, 0xB8, 0xD3, 0x6E, 0xB2, 0xCD,
0x4C, 0xD4, 0x67, 0xA9, 0xE0, 0x3B, 0x4D, 0xD7, 0x62, 0xA6, 0xF1, 0x08, 0x18, 0x28, 0x78, 0x88,
0x83, 0x9E, 0xB9, 0xD0, 0x6B, 0xBD, 0xDC, 0x7F, 0x81, 0x98, 0xB3, 0xCE, 0x49, 0xDB, 0x76, 0x9A,
0xB5, 0xC4, 0x57, 0xF9, 0x10, 0x30, 0x50, 0xF0, 0x0B, 0x1D, 0x27, 0x69, 0xBB, 0xD6, 0x61, 0xA3,
0xFE, 0x19, 0x2B, 0x7D, 0x87, 0x92, 0xAD, 0xEC, 0x2F, 0x71, 0x93, 0xAE, 0xE9, 0x20, 0x60, 0xA0,
0xFB, 0x16, 0x3A, 0x4E, 0xD2, 0x6D, 0xB7, 0xC2, 0x5D, 0xE7, 0x32, 0x56, 0xFA, 0x15, 0x3F, 0x41,
0xC3, 0x5E, 0xE2, 0x3D, 0x47, 0xC9, 0x40, 0xC0, 0x5B, 0xED, 0x2C, 0x74, 0x9C, 0xBF, 0xDA, 0x75,
0x9F, 0xBA, 0xD5, 0x64, 0xAC, 0xEF, 0x2A, 0x7E, 0x82, 0x9D, 0xBC, 0xDF, 0x7A, 0x8E, 0x89, 0x80,
0x9B, 0xB6, 0xC1, 0x58, 0xE8, 0x23, 0x65, 0xAF, 0xEA, 0x25, 0x6F, 0xB1, 0xC8, 0x43, 0xC5, 0x54,
0xFC, 0x1F, 0x21, 0x63, 0xA5, 0xF4, 0x07, 0x09, 0x1B, 0x2D, 0x77, 0x99, 0xB0, 0xCB, 0x46, 0xCA,
0x45, 0xCF, 0x4A, 0xDE, 0x79, 0x8B, 0x86, 0x91, 0xA8, 0xE3, 0x3E, 0x42, 0xC6, 0x51, 0xF3, 0x0E,
0x12, 0x36, 0x5A, 0xEE, 0x29, 0x7B, 0x8D, 0x8C, 0x8F, 0x8A, 0x85, 0x94, 0xA7, 0xF2, 0x0D, 0x17,
0x39, 0x4B, 0xDD, 0x7C, 0x84, 0x97, 0xA2, 0xFD, 0x1C, 0x24, 0x6C, 0xB4, 0xC7, 0x52, 0xF6, 0x01)
RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03,
0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1,
0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78,
0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e,
0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38,
0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10,
0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba,
0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57,
0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8,
0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0,
0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7,
0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d,
0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1,
0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab,
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
def sub_bytes(data):
return [SBOX[x] for x in data]
def sub_bytes_inv(data):
return [SBOX_INV[x] for x in data]
def rotate(data):
return data[1:] + [data[0]]
def key_schedule_core(data, rcon_iteration):
data = rotate(data)
data = sub_bytes(data)
data[0] = data[0] ^ RCON[rcon_iteration]
return data
def xor(data1, data2):
return [x ^ y for x, y in zip(data1, data2)]
def rijndael_mul(a, b):
if(a == 0 or b == 0):
return 0
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
def mix_column(data, matrix):
data_mixed = []
for row in range(4):
mixed = 0
for column in range(4):
# xor is (+) and (-)
mixed ^= rijndael_mul(data[column], matrix[row][column])
data_mixed.append(mixed)
return data_mixed
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
data_mixed = []
for i in range(4):
column = data[i * 4: (i + 1) * 4]
data_mixed += mix_column(column, matrix)
return data_mixed
def mix_columns_inv(data):
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
def shift_rows(data):
data_shifted = []
for column in range(4):
for row in range(4):
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
return data_shifted
def shift_rows_inv(data):
data_shifted = []
for column in range(4):
for row in range(4):
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
return data_shifted
def inc(data):
data = data[:] # copy
for i in range(len(data) - 1, -1, -1):
if data[i] == 255:
data[i] = 0
else:
data[i] = data[i] + 1
break
return data
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
| [
"moritzlode@gmail.com"
] | moritzlode@gmail.com |
e0a7315e974496146f931f1dccb8aff89ce1264d | 1ca94f20401cc0bd33a7a935dea2f3c66776dbe4 | /users/models.py | b8680d9a1d986368544da5d9676214693646fa7a | [] | no_license | liangsongyou/news-18 | 468d06a854e3bf6b5389e6efbb2b1a812d45fef6 | 45619e32d7f950d75949912ee8c570903f6c39f3 | refs/heads/master | 2020-04-11T15:59:26.136085 | 2018-12-15T13:54:16 | 2018-12-15T13:54:16 | 161,909,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
age = models.PositiveIntegerField(default=0)
| [
"yuebei58@gmail.com"
] | yuebei58@gmail.com |
e1241643f1fdabd9675e8ec25ea0a5b2350349a4 | 62d6a37e1fb1b224b53e14a1cf151ef0571aa20f | /tests/fixtures/tests.py | abc94a63d35f2bfe008a3e1bdcf4d4b144ec1bb5 | [] | no_license | katrid/orun | 4fa0f291a1ef43f16bc1857a170fc0b2e5e06739 | bfc6dae06182124ba75b1f3761d81ba8ca387dea | refs/heads/master | 2023-08-30T03:58:34.570527 | 2023-08-09T04:05:30 | 2023-08-09T04:05:30 | 66,562,767 | 14 | 4 | null | 2023-01-06T22:29:37 | 2016-08-25T14:01:44 | Python | UTF-8 | Python | false | false | 2,024 | py | from orun.test import TestCase
from orun.apps import apps
from orun.db import connection
class FixturesTest(TestCase):
fixtures = {
'fixtures': [
'fixtures.author.csv', 'fixtures.author.tsv', 'data.xml', 'fixtures.book.tsv', 'fixtures.book.csv',
'metadata.%(db_vendor)s.sql',
],
}
def test_load_data(self):
Author = apps['fixtures.author']
Book = apps['fixtures.book']
objs = list(Author.objects.all())
self.assertEqual(len(objs), 9)
book = Book.objects.get(pk=1)
self.assertEqual(book.author.name, 'Xml Author 1')
book = Book.objects.get(pk=2)
self.assertEqual(book.author.name, 'Author 2')
def test_xml_objects(self):
Object = apps['ir.object']
obj1 = Object.objects.get_object('fixtures/xml/author/1')
self.assertEqual(obj1.name, 'fixtures/xml/author/1')
author1 = obj1.content_object
self.assertEqual(author1.name, 'Xml Author 1')
self.assertEqual(obj1.name, 'fixtures/xml/author/1')
obj2 = Object.objects.get_object('fixtures/xml/author/2')
author2 = obj2.content_object
self.assertEqual(obj2.name, 'fixtures/xml/author/2')
self.assertEqual(author2.name, 'Xml Author 2')
# test deleted
with self.assertRaises(Object.DoesNotExist):
Object.objects.get_object('fixtures/xml/author/4/delete')
Author = apps['fixtures.author']
with self.assertRaises(Author.DoesNotExist):
Author.objects.get(name='Xml Author 4')
def test_sql_fixtures(self):
with connection.cursor() as cursor:
# Testing created view
cursor.execute('''select * from books order by id''')
books = cursor.fetchall()
self.assertEqual(len(books), 2)
self.assertEqual(books[0][0], 1)
self.assertEqual(books[1][0], 2)
def test_web_fixtures(self):
View = apps['ui.view']
views = View.objects.all()
| [
"alexandre@katrid.com"
] | alexandre@katrid.com |
1e6895e6f359a03fff2e6129c7a5e162e1c1d48a | 4ad53199feb82d911bd2edbe0b5713da8c1909c1 | /pytablewriter/style/__init__.py | 6be6ff1844a0928139b11cb5ac086bac8216c4f9 | [
"MIT"
] | permissive | thombashi/pytablewriter | 9bf8b73da0eb18dba835e951021fd581958a4d12 | 49f9da777625a5b920c2c87c5e086d33d19a80d4 | refs/heads/master | 2023-08-19T05:13:15.333317 | 2023-07-01T08:03:47 | 2023-07-01T08:03:47 | 59,484,958 | 609 | 43 | MIT | 2021-09-20T15:26:45 | 2016-05-23T13:25:53 | Python | UTF-8 | Python | false | false | 1,006 | py | from dataproperty import Align, Format
from ._cell import Cell
from ._font import FontSize, FontStyle, FontWeight
from ._style import DecorationLine, Style, ThousandSeparator, VerticalAlign
from ._styler import (
GFMarkdownStyler,
HtmlStyler,
LatexStyler,
MarkdownStyler,
NullStyler,
ReStructuredTextStyler,
TextStyler,
get_align_char,
)
from ._styler_interface import StylerInterface
from ._theme import ColSeparatorStyleFilterFunc, StyleFilterFunc, Theme, fetch_theme, list_themes
__all__ = (
"Align",
"Format",
"Cell",
"FontSize",
"FontStyle",
"FontWeight",
"Style",
"ThousandSeparator",
"VerticalAlign",
"DecorationLine",
"GFMarkdownStyler",
"HtmlStyler",
"LatexStyler",
"MarkdownStyler",
"NullStyler",
"ReStructuredTextStyler",
"StylerInterface",
"TextStyler",
"ColSeparatorStyleFilterFunc",
"StyleFilterFunc",
"Theme",
"get_align_char",
"fetch_theme",
"list_themes",
)
| [
"tsuyoshi.hombashi@gmail.com"
] | tsuyoshi.hombashi@gmail.com |
08089a76a10f77d295414b1e754f50c25d3125b0 | 2f2d01957abfbbe3fc79d8727cf43a096cd454d8 | /for_opencv/document1_1.py | 5f47a319249173ac902793abb716595b42e9a74c | [] | no_license | bizhili/for_python | 61b26becd46e9ce519619e01d4a8525b5fa03519 | 727d6a384bc3f0af23c33643d35f5e20799d85ee | refs/heads/master | 2020-07-08T02:23:23.868222 | 2019-08-30T07:11:49 | 2019-08-30T07:11:49 | 203,539,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | import time
from math import sqrt
def is_prime(n):
for c in range(2,int(sqrt(n))+1):
if n%c==0:
return False
return True if n!=1 else False
def main():
filenames=('a.txt','b.txt','c.txt')
fs=[]
for f in filenames:
fs.append(open(f,'w'))
for number in range(1,10000):
if is_prime(number):
if number<100:
fs[0].write(str(number) + '\t')
elif number<1000:
fs[1].write(str(number)+'\t')
else:
fs[2].write(str(number)+'\t')
print('that s ok')
def main1():
try:
with open('li2.jpg','rb') as p1:
data=p1.read()
with open('li3.jpg','wb') as p2:
p2.write(data)
except FILENOTFOUND:
print('dadada')
print('ok')
if __name__=='__main__':
main1()
| [
"3067842904@qq.com"
] | 3067842904@qq.com |
51f25d94a6156be528cb69c1e4fa12a1e9105b24 | 752914aef43ee1f486ca4d8356a4c1f26d2a00f8 | /MLPonqm7/BOB/qm7BOB/mlp_bob_formatted.py | ec1c3fdbfb714270dca1374913d6e88def44f808 | [] | no_license | vinodrajendran001/Molecules-Prediction | 75a71465126285a3034111d9904b03788bd3b463 | 8dd8798397debdde9392c5a6c2aee2dcaa921d92 | refs/heads/master | 2021-01-10T02:22:40.918540 | 2016-02-16T10:42:47 | 2016-02-16T10:42:47 | 51,827,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,775 | py | __author__ = 'vinod'
import pickle
import cPickle as cp
import gzip
import time
import numpy as np
import theano.tensor as T
import climin.stops
import climin.initialize
import climin.project
import climin.schedule
import climin.mathadapt as ma
from breze.learn.mlpbobformatted import Mlp, FastDropoutNetwork
from sklearn.preprocessing import scale
from breze.learn.data import one_hot
import breze.learn.base
import os
import matplotlib.pyplot as plt
datafile = '/home/hpc/pr63so/ga93yih2/Dataset/qm7b_bob_formatted.pkl'
dataset = pickle.load(open(datafile, 'r'))
split = 1
P = np.hstack(dataset['P'][range(0, split)+ range(split+1, 5)].flatten())
X = dataset['B'][P]
Z = dataset['T'][P]
#only atomization energy
Z = Z[:,0]
Z = Z.reshape(Z.shape[0], 1)
train_labels = Z
Ptest = dataset['P'][split]
TX = dataset['B'][Ptest]
TZ = dataset['T'][Ptest]
TZ = TZ[:,0]
TZ = TZ.reshape(TZ.shape[0], 1)
test_labels = TZ
Z = scale(Z, axis=0)
TZ = scale(TZ, axis=0)
weights = []
batch_size = 25
#max_iter = max_passes * X.shape[ 0] / batch_size
max_iter = 75000000
n_report = X.shape[0] / batch_size
stop = climin.stops.AfterNIterations(max_iter)
pause = climin.stops.ModuloNIterations(n_report)
optimizer = 'gd', {'step_rate': 0.001, 'momentum': 0}
typ = 'plain'
if typ == 'plain':
m = Mlp(2099, [400, 100], 1, X, Z,
hidden_transfers=['tanh', 'tanh'], out_transfer='identity', loss='squared', optimizer=optimizer, batch_size=batch_size, max_iter=max_iter)
elif typ == 'fd':
m = FastDropoutNetwork(2099, [800, 800], 14, X, Z, TX, TZ,
hidden_transfers=['tanh', 'tanh'], out_transfer='identity', loss='squared',
p_dropout_inpt=.1,
p_dropout_hiddens=.2,
optimizer=optimizer, batch_size=batch_size, max_iter=max_iter)
#climin.initialize.randomize_normal(m.parameters.data, 0, 1 / np.sqrt(m.n_inpt))
m.init_weights()
#Transform the test data
#TX = m.transformedData(TX)
TX = np.array([m.transformedData(TX) for _ in range(10)]).mean(axis=0)
print TX.shape
losses = []
print 'max iter', max_iter
X, Z, TX, TZ = [breze.learn.base.cast_array_to_local_type(i) for i in (X, Z, TX, TZ)]
for layer in m.mlp.layers:
weights.append(m.parameters[layer.weights])
weight_decay = ((weights[0]**2).sum()
+ (weights[1]**2).sum()
+ (weights[2]**2).sum())
weight_decay /= m.exprs['inpt'].shape[0]
m.exprs['true_loss'] = m.exprs['loss']
c_wd = 0.1
m.exprs['loss'] = m.exprs['loss'] + c_wd * weight_decay
mae = T.abs_((m.exprs['output'] * np.std(train_labels, axis=0) + np.mean(train_labels, axis=0))- m.exprs['target']).mean(axis=0)
f_mae = m.function(['inpt', 'target'], mae)
rmse = T.sqrt(T.square((m.exprs['output'] * np.std(train_labels, axis=0) + np.mean(train_labels, axis=0))- m.exprs['target']).mean(axis=0))
f_rmse = m.function(['inpt', 'target'], rmse)
start = time.time()
# Set up a nice printout.
keys = '#', 'seconds', 'loss', 'val loss', 'mae_train', 'rmse_train', 'mae_test', 'rmse_test'
max_len = max(len(i) for i in keys)
header = '\t'.join(i for i in keys)
print header
print '-' * len(header)
results = open('result.txt', 'a')
results.write(header + '\n')
results.write('-' * len(header) + '\n')
results.close()
EXP_DIR = os.getcwd()
base_path = os.path.join(EXP_DIR, "pars.pkl")
base_path1 = os.path.join(EXP_DIR, "best_pars.pkl")
n_iter = 0
if os.path.isfile(base_path):
with open('pars.pkl', 'rb') as tp:
n_iter, best_pars = cp.load(tp)
m.parameters.data[...] = best_pars
for i, info in enumerate(m.powerfit((X, Z), (TX, TZ), stop, pause)):
if info['n_iter'] % n_report != 0:
continue
passed = time.time() - start
losses.append((info['loss'], info['val_loss']))
info.update({
'time': passed,
'mae_train': f_mae(m.transformedData(X), train_labels),
'rmse_train': f_rmse(m.transformedData(X), train_labels),
'mae_test': f_mae(TX, test_labels),
'rmse_test': f_rmse(TX, test_labels)
})
if os.path.isfile(base_path1):
info['n_iter'] += n_iter
row = '%(n_iter)i\t%(time)g\t%(loss)f\t%(val_loss)f\t%(mae_train)s\t%(rmse_train)s\t%(mae_test)s\t%(rmse_test)s' % info
#row = '%(n_iter)i\t%(mae_train)s' % info
results = open('result.txt','a')
print row
results.write(row + '\n')
results.close()
with open('pars.pkl', 'wb') as fp:
cp.dump((info['n_iter'], info['best_pars']), fp)
m.parameters.data[...] = info['best_pars']
with open('best_pars.pkl', 'wb') as bp:
cp.dump(info['best_pars'], bp)
Y = m.predict(m.transformedData(X))
TY = m.predict(TX)
output_train = Y * np.std(train_labels, axis=0) + np.mean(train_labels, axis=0)
output_test = TY * np.std(train_labels, axis=0) + np.mean(train_labels, axis=0)
print 'TRAINING SET\n'
print('MAE: %s kcal/mol'%np.abs(output_train - train_labels).mean(axis=0))
print('RMSE: %s kcal/mol'%np.square(output_train - train_labels).mean(axis=0) ** .5)
print 'TESTING SET\n'
print('MAE: %s kcal/mol'%np.abs(output_test - test_labels).mean(axis=0))
print('RMSE: %s kcal/mol'%np.square(output_test - test_labels).mean(axis=0) ** .5)
mae_train = np.abs(output_train - train_labels).mean(axis=0)
rmse_train = np.square(output_train - train_labels).mean(axis=0) ** .5
mae_test = np.abs(output_test - test_labels).mean(axis=0)
rmse_test = np.square(output_test - test_labels).mean(axis=0) ** .5
results = open('result.txt', 'a')
results.write('Training set:\n')
results.write('MAE:\n')
results.write("%s" %mae_train)
results.write('\nRMSE:\n')
results.write("%s" %rmse_train)
results.write('\nTesting set:\n')
results.write('MAE:\n')
results.write("%s" %mae_test)
results.write('\nRMSE:\n')
results.write("%s" %rmse_test)
results.close()
| [
"vinodr.msec@gmail.com"
] | vinodr.msec@gmail.com |
6609cf56139197e5b6ce8b7736b492dc732cad8d | 6c94a71b2e2757fd78cb0f78f00d6b656a474d7c | /2-6.py | 5a44fc105e90c84c760702315118475ec69dbfda | [] | no_license | albastienex/TEST | 23429304aac7d6eef444a9fecc9ecb4b22919b1a | 9c28df7d0adf39afeb74c742326451afb711b916 | refs/heads/master | 2023-03-01T09:14:21.804490 | 2021-01-30T07:08:20 | 2021-01-30T07:08:20 | 332,668,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | a=1.79
a=float(input())
print(int(a*10)%10)
#print(int(float(input()) * 10) % 10) | [
"a980364@gmail.com"
] | a980364@gmail.com |
d8298e301e9ebd7c818b76c18f9cf8a0e5446e36 | 559166a4eb2ae1d51a1053e5f37b08c49a2adb3e | /Interface_MAB/test2.py | f3b00531c268833b866c109c0fbf9813c15929fe | [] | no_license | joker21663/Interface_MAB | 60ec14aa62d0b26a4d37ff469c2daab8572e4e18 | 4227aa9bca9ec68069067f7d323da10992021579 | refs/heads/master | 2021-01-17T15:36:14.539302 | 2016-06-21T08:52:07 | 2016-06-21T08:52:07 | 58,457,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gtk
class TestWindow:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
box = gtk.VBox()
button0 = gtk.Button("Test Button")
label0 = button0.get_children()[0]
label0.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse('red'))
button1 = gtk.Button(stock=gtk.STOCK_ABOUT)
alignment = button1.get_children()[0]
hbox = alignment.get_children()[0]
image, label1 = hbox.get_children()
label1.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse('blue'))
box.add(button0)
box.add(button1)
window.add(box)
window.set_size_request(200, 200)
window.show_all()
def close_application(self, widget, event, data=None):
gtk.main_quit()
return False
if __name__ == "__main__":
TestWindow()
gtk.main() | [
"joker@localhost.localdomain"
] | joker@localhost.localdomain |
435b0627724efbcd7a0b660dfb6a13edf64dd46d | 93a56f93e547153d9664bb31124925fdda4e6780 | /dj_test1/dj_test1/wsgi.py | 692b1d642989fb05ea12c7b9d2205b2177cd6143 | [] | no_license | weizhibing/python_djangoPro | e0191eadd2c2e022c802178b45e3709b7c3c061a | e0f32cee023574861124b9794a31e5b555daa0d2 | refs/heads/master | 2020-05-17T09:46:00.990354 | 2019-04-26T14:26:08 | 2019-04-26T14:26:08 | 183,641,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
"""
WSGI config for dj_test1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_test1.settings")
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
b12c0fb45f697b54880348bc5234ea5e8967228d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq1197.py | 1ebb841cfc54a0fc26e0f2bd3522d7dfdaa63405 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=51
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=31
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=32
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.H.on(input_qubit[1])) # number=44
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=45
c.append(cirq.H.on(input_qubit[1])) # number=46
c.append(cirq.X.on(input_qubit[1])) # number=41
c.append(cirq.H.on(input_qubit[1])) # number=48
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=49
c.append(cirq.H.on(input_qubit[1])) # number=50
c.append(cirq.X.on(input_qubit[0])) # number=26
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=37
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=38
c.append(cirq.H.on(input_qubit[1])) # number=39
c.append(cirq.X.on(input_qubit[1])) # number=35
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=36
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=43
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=47
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=22
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[1])) # number=29
c.append(cirq.Y.on(input_qubit[4])) # number=28
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1197.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
2b1327c03bb9c5dc46f3c52af92183372bce9d3b | cc088b4cf927a0918a6187ac98c42e526d5da2d7 | /utils/utils.py | aabc68c0cf0a55eba87af60e5e5ab469c4bcc97a | [
"MIT"
] | permissive | hajungong007/SPRNet | e7a4274ea74b9dc99e2217088766b6988ea44b23 | 92bcbeedcd1ae9a3a1e9e9669a06998b78648465 | refs/heads/master | 2023-01-21T18:01:04.425289 | 2020-12-04T04:36:56 | 2020-12-04T04:36:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,272 | py | import os
import numpy as np
import torch
import math
import cv2
import torchvision.transforms.functional as F
irange = range
def make_grid(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Make a grid of images_l1loss_ssim.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images_l1loss_ssim all of the same size.
nrow (int, optional): Number of images_l1loss_ssim displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by subtracting the minimum and dividing by the maximum pixel value.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If True, scale each image in the batch of
images_l1loss_ssim separately rather than the (min, max) over all images_l1loss_ssim.
pad_value (float, optional): Value for the padded pixels.
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = torch.stack(tensor, dim=0)
if tensor.dim() == 2: # single image H x W
tensor = tensor.view(1, tensor.size(0), tensor.size(1))
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = torch.cat((tensor, tensor, tensor), 0)
tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2))
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images_l1loss_ssim
tensor = torch.cat((tensor, tensor, tensor), 1)
if normalize is True:
tensor = tensor.clone() # avoid modifying tensor in-place
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min + 1e-5)
def norm_range(t, range):
if range is not None:
norm_ip(t, range[0], range[1])
else:
norm_ip(t, float(t.min()), float(t.max()))
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
if tensor.size(0) == 1:
return tensor.squeeze()
# make the mini-batch of images_l1loss_ssim into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
grid.narrow(1, y * height + padding, height - padding) \
.narrow(2, x * width + padding, width - padding) \
.copy_(tensor[k])
k = k + 1
return grid
def make_all_grids(tensors, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Save a given Tensor into an image file.
Args:
tensors (list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images_l1loss_ssim by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
ndarr = None
for tensor in tensors:
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
if ndarr is None:
ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
else:
ndarr = np.hstack(
(ndarr, grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()))
return ndarr
def save_image(tensors, filename, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Save a given Tensor into an image file.
Args:
tensors (list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images_l1loss_ssim by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
ndarr = None
for tensor in tensors:
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
if ndarr is None:
ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
else:
ndarr = np.hstack(
(ndarr, grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()))
# return ndarr
cv2.imwrite(filename, ndarr)
def save_networks(epoch, net, save_dir='models'):
"""
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
save_filename = '%s_net_%s.pth' % (epoch, 'latest')
save_path = os.path.join(save_dir, save_filename)
torch.save(net, save_path)
def test_data_preprocess(img):
img = img.transpose((2, 0, 1))
img = img.astype("float32") / 255.
img = torch.from_numpy(img)
return img.to("cuda")
| [
"nglequocviet@gmail.com"
] | nglequocviet@gmail.com |
c6648052d504c66fdd82e66d076da7dc9f764989 | f2828d2adf11c6e7f7050812d45d7525306cd8f3 | /Aula 1/A1Ex3.py | 2c27f681570fd06db386ddce30ec9932b306b42e | [] | no_license | VitorSRamos/IntroPython | 9384d66994977c87fda91abbcd6b52af214e7870 | 342919a89dfa1521f5a86e16b6acd87ede32c9a9 | refs/heads/master | 2020-03-30T15:19:10.583649 | 2018-10-08T02:23:03 | 2018-10-08T02:23:03 | 151,357,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | #Exercício 3
a = 3
b = -4
c = -10
delta = (b**2)-(4*a*c)
y1 = (-b + (delta)**(0.5))/(2*a)
y2 = (-b - (delta)**(0.5))/(2*a)
print ('As raízes da função são' , y1 , 'e' , y2 ,)
| [
"noreply@github.com"
] | noreply@github.com |
c378f62bb39cc0420ff55a8945b7cf08be9326f4 | 4facdac2e8a2be71c319cb82b0aea89783be6aa9 | /python.venv/bin/kill_instance | 0f57b88160acfb04f094de69c6198bbbd459cb7a | [] | no_license | agilebeat/BDSO-text-analysis | 1c9e44b20923d3535100ab5febf50fee620279c1 | ac961b77dc3a0ed33fe3e49597115c741d1a04e8 | refs/heads/master | 2022-02-19T20:01:51.793807 | 2019-08-26T14:11:09 | 2019-08-26T14:11:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | #!/root/bdso/python.venv/bin/python3
import sys
from optparse import OptionParser
import boto
from boto.ec2 import regions
def kill_instance(region, ids):
"""Kill an instances given it's instance IDs"""
# Connect the region
ec2 = boto.connect_ec2(region=region)
for instance_id in ids:
print("Stopping instance: %s" % instance_id)
ec2.terminate_instances([instance_id])
if __name__ == "__main__":
parser = OptionParser(usage="kill_instance [-r] id [id ...]")
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
(options, args) = parser.parse_args()
if not args:
parser.print_help()
sys.exit(1)
for r in regions():
if r.name == options.region:
region = r
break
else:
print("Region %s not found." % options.region)
sys.exit(1)
kill_instance(region, args)
| [
"Seunghye.Wilson@agilebeat.com"
] | Seunghye.Wilson@agilebeat.com | |
23e1f4403f33f22267341c871867fca421ebeeac | 08b826c94a78dc1ceeba4e561237fef8cc5929f0 | /twitter_credentials.py | 42c1be1baf9a50c9c5a3b7886e06a83298b9e9bd | [] | no_license | bhadra28/Twitter_score_calculator | f9490cca28366a8c5b0bda6ae5eea82187c55442 | 8ce1a3b2ffae2489b29433f7437d93a94e248c7e | refs/heads/main | 2023-06-16T03:13:46.613237 | 2021-07-06T13:26:42 | 2021-07-06T13:26:42 | 383,467,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | # Variables that contains the user credentials to access Twitter API
ACCESS_TOKEN = "1395632396205268995-9CLMX3RpdybP72cJgCOS0Yq6UpcCYn"
ACCESS_TOKEN_SECRET = "a9EFJSI4SozGfGqT47N0I2uDoWKQUXFuBXYvsbWw1KF77"
CONSUMER_KEY = "b0OiZs1MrgbN51rwA8YNu8XUS"
CONSUMER_SECRET = "A2iIQFAiOZbC0LkbEvZypbK2RGGNqgnvtNMeAMbrP5VLJmRjEW"
| [
"noreply@github.com"
] | noreply@github.com |
63e2e817f780c3fe671c795146ef75a062a2ce01 | 23148187fbcda3dc6ec4883cc960a1c3d00f5844 | /tests/__init__.py | 7fb557f71f2819e3b27479b94bef968e28e7eae0 | [
"BSD-3-Clause"
] | permissive | yinxiaojing1/pre_epi_seizures | c1944dbd5bda82c820bccab874b67a0d4efcff31 | e4e83e10256a4ef402061ca8bcbc3331fd8fc431 | refs/heads/master | 2020-03-17T17:31:19.224073 | 2018-05-16T16:47:13 | 2018-05-16T16:47:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | # -*- coding: utf-8 -*-
"""Unit test package for pre_epi_seizures."""
| [
"fmsargo@gmail.com"
] | fmsargo@gmail.com |
ad3a293d4e7e01087f5620cbe77f946ebfbfb544 | ed4b1613c43112ed66d9017246e7862a97f25f6c | /testfrontdroana/testfrontdroana/wsgi.py | 22139567b34b4f8e49e7dfae41d7387b81aa4504 | [] | no_license | SeemaSP/sb-bootstrapfinalrepo | 9e9f1c24ec7201617081d4e578b23ce2aea813bd | 5e3deda70914aed9e5258fd125c1127c8765a5cc | refs/heads/master | 2020-07-23T07:57:26.163324 | 2016-11-15T12:57:16 | 2016-11-15T12:57:16 | 73,813,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
WSGI config for testfrontdroana project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testfrontdroana.settings")
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
d0a6d89461ffbf3a57691489e3c9c3e1b6d69ed8 | cfe1551cab9f24788dc4ee06d344e05c581c6566 | /predictvenv/Lib/site-packages/ttaa_utils/dictionaries.py | 5327e64b45730e0bb8e54daa4018fb6548504cae | [] | no_license | moshuixin/ETR_prediction | 5e38644a3760b2671dc3480794a3fb4ecdad0f45 | 17faeea197e28d8e2ce21d53caaba4c9cc3a5c52 | refs/heads/master | 2022-12-21T22:05:14.300322 | 2018-04-23T11:59:36 | 2018-04-23T11:59:36 | 130,687,314 | 0 | 1 | null | 2022-12-19T05:42:56 | 2018-04-23T11:38:29 | Python | UTF-8 | Python | false | false | 3,068 | py | """Tools made to work with dictionaries"""
class DicArrays:
"""
Basic tools to work with array of dictioanries
"""
def __init__(self, dics=(), **kwargs):
self.dics = dics
def list_key_values(self, key):
""" Returns a list that contains value=dict[key] for each dictionary
in the array. In other words, it groups by key. It assumes that the
dictionary has the same/equivalent type of elements under the given
key.
:param key: str : string for a dictioanary key
:return: list
"""
keys = []
for dic_ in self.dics:
if key in dic_.keys():
keys.append(dic_[key])
return keys
def sum_by_key(self, key):
"""Sums all the values under the given key from all the dictionaries
in the array. It assumes that the dictionary has the same/equivalent
type of elements and they can be sumed.
:param key: str : key within the dictionary
:return: int/double/whatever is in the key.
"""
_sum = 0
for dic_ in self.dics:
if key in dic_.keys():
_sum += dic_[key]
return _sum
def get_paired_keys(self, key1, key2):
""" Returns an array of tuples consisting of (dict[key1], dict[key2]).
Both keys must exist in all dictionaries at the same time.
:param key1: str: dictionary key
:param key2: str: dictionary key
:return: list
"""
pairs = []
for dic_ in self.dics:
if key1 not in dic_.keys():
continue
if key2 not in dic_.keys():
continue
pairs.append((dic_[key1], dic_[key2]))
return pairs
def list_grouped_by_key_values(self, key1, key2):
"""
groups in key[]a list with the key dic[key1] all the values of
dic[key2]
:param key1: str: dictionary key to group in
:param key2: str: dictionary key to group by
:return: dictionary
>>> test = [
{'id':1, 'status':0, 'desc': 'desc 1'},
{'id':2, 'status':1, 'desc': 'desc 2'},
{'id':3, 'status':2, 'desc': 'desc 3'},
{'id':4, 'status':0, 'desc': 'desc 4'},
{'id':5, 'status':4, 'desc': 'desc 5'},
{'id':6, 'status':5, 'desc': 'desc 6'},
]
>>> p = DicArrays(test)
>>> p.list_key_values('status', 'id')
{0: [1, 4], 1: [2], 2: [3], 4: [5], 5: [6]}
"""
mapped = {}
for dic_ in self.dics:
if key1 not in dic_.keys():
# if the searched key is not in the dictionary, nothing can
# be done... RIP
continue
# te key in the mapped dict should be the value from key1
key_entry = dic_[key1]
if key_entry not in mapped.keys():
mapped[key_entry] = []
mapped[key_entry].append(dic_[key2])
return mapped
| [
"xinxin.yang@de.ey.com"
] | xinxin.yang@de.ey.com |
e29b795b791a39c16bac4e8f849e8d67b243c733 | 18dca9a552f5aa9303536613ec39f19cebf6647c | /CreateTrainingFiles/ArxivMAG/prepare_arxiv_hd2v_file.py | b73e1d6597b04b0f270151a1756b21c7527be5e2 | [
"MIT"
] | permissive | ashwath92/MastersThesis | 9a39ed7eec825ed559d09507721c21bd12e2ab9c | f74755dc0c32f316da3c860dd5dbfa4c9cad97b3 | refs/heads/master | 2021-08-16T12:01:33.282459 | 2020-06-27T16:00:16 | 2020-06-27T16:00:16 | 197,282,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,315 | py | """ Prepapres a file from Arxiv data (as well as additional mag contexts) as per the hyperdoc2vec format.
All the citation markers in the output file are MAG Ids (for the ACL papers, the mapping from ACL to
MAG can be found in /home/ashwath/Programs/ArxivCS/SQLITEDB/arxivcs_mag_mapping.sqlite3 -- table name: arxivcs_mag).
Adjacent citations are not comma-separated, but instead just placed next to each other.
The input files have citation markers with UUIDs. These UUIDs, defined in /vol2/unarXive/arxiv-txt-data/metadata.db
and mapped to mag ids in the bibitemmagidmap table, have been preprocessed in read_bibitemmagidmap_into_pickle.py
and inserted into a dictionary in a pickle."""
import os
import re
import csv
import pickle
import sqlite3
import psycopg2
import psycopg2.extras
from time import time
from gensim.parsing import preprocessing
from gensim.utils import to_unicode
import contractions
import pandas as pd
from tqdm import tqdm
import concurrent.futures
from multiprocessing import Pool, cpu_count
basepath = '/home/ashwath/Programs'
dbpath = os.path.join(basepath, 'ArxivCS', 'SQLITEDB', 'arxivcs_mag_mapping.sqlite3')
def db_connect(set_params=False, path = dbpath):
""" Connects to sqlite3 db given via a parameter/uses a default parameter.
It sets timeout=10 to prevent sqlite getting locked in between inserts. It
also sets detect_types to allow datetime/date datatypes in tables. """
connection = sqlite3.connect(path, timeout=10,
detect_types=sqlite3.PARSE_DECLTYPES)
if set_params is True:
# Speed up insertions: only called while creating the database
connection.execute('PRAGMA main.journal_mode=WAL;')
connection.execute('PRAGMA main.cache_size=10000;')
connection.execute('PRAGMA main.locking_mode=EXCLUSIVE;')
return connection
# GLOBALS
# Hyperdoc2vec markers for citations
docid_prefix='=-='
docid_suffix='-=-'
# IMPORTANT: I need a set of mag ids which are cited so that i can use it to add extra mag content.
allmagpaperids = set()
# mag arxiv mapping db connection
sconn = db_connect()
scur = sconn.cursor()
# NOT BEING USED, I have now pre-loaded this into a Pandas series in a pickle
# context connection: for getting the mag id of the CITED papers
#meta_db_path = '/vol2/unarXive/arxiv-txt-data/metadata.db'
#cconn = db_connect(path=meta_db_path)
#ccur = cconn.cursor()
# Get the uuid_mag_id dict which has been precomputed into a pickle file (from the sqlite3 db)
with open('Pickles/uuid_magid_dict.pickle', 'rb') as picc:
uuid_magid_dict = pickle.load(picc)
# Some arxiv ids are mapped to 2 magids, keep only 1 (data problem)
# 72246 rows in the results (out of 72315): 69 duplicates
# Training set is all years until 2016 (2017 is the test set)
# Training set: 62296 papers
# Test set: 9954 papers
trainingquery = """select arxiv_id, mag_id
from arxivcs_mag
where arxiv_id not like '17%'
group by mag_id;
"""
# Write test set
testsetquery = """select arxiv_id, mag_id
from arxivcs_mag
where arxiv_id like '17%'
group by mag_id;
"""
# shape: (18642, 2)
testresdf = pd.read_sql_query(testsetquery, sconn)
testresdf.to_csv('AdditionalOutputs/test_ids.tsv', index=False, sep='\t')
# shape: (53614, 2)
trainresdf = pd.read_sql_query(trainingquery, sconn)
trainresdf.to_csv('AdditionalOutputs/training_ids.tsv', index=False, sep='\t')
# Get a Series of mag ids for which we have full text
mag_id_series = trainresdf['mag_id']
# IMP: There seems to be some problem with the data?? Multiple arxiv ids are mapped to the same mag id
# Doing select mag_id from arxivcs_mag, and read_sql_query, then
# df[df.isin(df[df.duplicated()])] gives 69 records.
# Get a set of mag ids (mapped from arxiv of course) which have full text
inarxiv_papers_set = set(mag_id_series.tolist())
# POSTGRES connection obj and cursor
pconn = psycopg2.connect("dbname=MAG19 user=mag password=1maG$ host=shetland.informatik.uni-freiburg.de")
pcur = pconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
# POSTGRES QUERY
magonly_query = """
SELECT titleandabstract.paperid, papertitle, abstract, contexts, referenceids
FROM
(
SELECT papers.paperid, papertitle, abstract FROM papers INNER JOIN paperabstracts
ON papers.paperid=paperabstracts.paperid
WHERE papers.paperid=%s) AS titleandabstract INNER JOIN
(
SELECT paperid, string_agg(paperreferenceid::character varying, ',') AS referenceids,
string_agg(citationcontext, ' ||--|| ') AS contexts
FROM papercitationcontexts
WHERE paperid=%s
GROUP BY paperid
) AS listofcontexts
ON titleandabstract.paperid=listofcontexts.paperid;"""
# Arxiv citing, cited list based on mag ids
arxiv_citing_cited_file = open('AdditionalOutputs/arxivmag_references.tsv', 'w')
fieldnames = ['citing_mag_id', 'cited_mag_id']
writer = csv.DictWriter(arxiv_citing_cited_file, delimiter="\t", fieldnames=fieldnames)
writer.writeheader()
citation_pattern = re.compile(r'(\{\{cite:)([a-zA-z0-9-]+)(\}\})')
replaced_citation_pattern = re.compile(r'(=-=)([0-9]+)(-=-)')
def get_mag_from_uuid(matchobject):
""" This function takes the uuid and gets the corresponding mag id"""
cited_uuid = matchobject.group(2)
fetched_mag_id = uuid_magid_dict.get(cited_uuid)
if fetched_mag_id is None:
# If the uuid does not map to a mag id, replace with the word citation.
#wordindex_magid_dict[i] = 'citation'
return 'citation'
else:
allmagpaperids.add(fetched_mag_id)
return '{}{}{}'.format(docid_prefix, fetched_mag_id, docid_suffix)
def read_arxiv_addmagids(arxivfilename_plus_mag):
""" Read arxiv full text, replace citations with mag id
arxivfilename_plus_mag is a list of lists with the filename (arxiv name+ path.txt)
and the correspondingly mapped mag id in each list"""
print(arxivfilename_plus_mag, 'here')
arxiv_filepath = arxivfilename_plus_mag[0]
mag_id = arxivfilename_plus_mag[1]
allmagpaperids.add(mag_id)
with open(arxiv_filepath, 'r') as arxivfile:
content = arxivfile.read().replace('\n', ' ')
# Replace all {{cite:ac7d7c84-d6e0-461d-a1fc-36f7ee323c07}}, i.e. \{\}cite:.*\}\}
# Get all the word indices which need to be replaced and put it in a dict with
# the corresponding mag id from the db.
# Do the replacements in the words list
content = citation_pattern.sub(get_mag_from_uuid, content)
# Make sure to add the citing paper mag id as the first word in the line
content = '{} {}\n'.format(mag_id, content)
# Write to refs file:
write_refs_file(content, mag_id)
return content
def write_refs_file(content, mag_id):
""" writes into the refs file (citing paper id, cited paperid)"""
for citationmarker in replaced_citation_pattern.finditer(content):
# group(2) gets the magid from the match object
fetched_mag_id = citationmarker.group(2)
writer.writerow({'citing_mag_id': mag_id,'cited_mag_id': fetched_mag_id})
def clean_text(text):
""" Cleans the text in the only argument in various steps
ARGUMENTS: text: content/title, string
RETURNS: cleaned text, string"""
# Replace newlines by space. We want only one doc vector.
text = text.replace('\n', ' ').lower()
# Remove URLs
#text = re.sub(r"http\S+", "", text)
# Expand contractions: you're to you are and so on.
text = contractions.fix(text)
# Don't Remove stop words
#text = preprocessing.remove_stopwords(text)
#text = preprocessing.strip_tags(text)
# Remove punctuation -- all special characters
text = preprocessing.strip_multiple_whitespaces(preprocessing.strip_punctuation(text))
return text
def add_additional_papers(outfile):
""" Add additional papers for which full text from Arxiv is not present. Care is taken that while
adding references to THESE papers, these references should be in the set of papers stored
in the allmagpaperids set (otherwise, there will be additional papers in the reference part
of the concat contexts which are not in the files in the text.
ALSO NOTE that allmagpaperids contains all papers which either cite or are cited so far
inarxiv_papers_set contains the set of papers which are in arxiv (citing)
A set difference (allmagpaperids - inarxiv_papers_set) gives the set of mag_ids for which we
get additional text"""
additional_mag_ids = allmagpaperids - inarxiv_papers_set
for paperid in tqdm(additional_mag_ids):
pcur.execute(magonly_query, (paperid, paperid))
# Get paperid, contexts, abstract, title, refids of current paper id
for row in pcur:
# row is a dict with keys:
# dict_keys(['paperid', 'papertitle', 'abstract', 'contexts', 'referenceids'])
paperid = row.get('paperid')
# Get all contexts and reference ids (delimiters set in the pSQL query)
contexts = row.get('contexts').replace('\n', ' ')
referenceids = row.get('referenceids')
title = clean_text(row.get('papertitle'))
abstract = clean_text(row.get('abstract'))
print(title)
# Get a single string for all the contexts
if contexts is not None and referenceids is not None:
contexts = contexts.split(' ||--|| ')
referenceids = referenceids.split(',')
contexts_with_refs = []
# Go through context, refid pairs, one at a time
for context, referenceid in zip(contexts, referenceids):
# VERY VERY IMPORTANT: check if the referenceid is not present in the allmagpaperids set,
# IGNORE IT! DESIGN DECISION: the other choice is to have a LOT of passes.
if referenceid in allmagpaperids:
writer.writerow({'citing_mag_id': paperid,'cited_mag_id': referenceid})
contextlist = clean_text(context).split()
# Insert the reference id as the MIDDLE word of the context
# NOTE, when multiple reference ids are present, only 1 is inserted. Mag issue.
# In the eg. nips file, it's like this: this paper uses our previous work on weight space
# probabilities =-=nips05_0451-=- =-=nips05_0507-=-.
index_to_insert = len(contextlist) // 2
value_to_insert = docid_prefix + referenceid + docid_suffix
# Add the ref id with the prefix and suffix
contextlist.insert(index_to_insert, value_to_insert)
# Insert the context with ref id into the contexts_with_refs list
contexts_with_refs.append(' '.join(contextlist))
# else: do nothing, next iteration
# After all the contexts azre iterated to, make them a string.
contexts_concatenated = ' '.join(contexts_with_refs)
else:
contexts_concatenated = ''
# Do not write these to file????? OR
# Concatenate the paperid, title, abstract and the contexts together.
content = "{} {} {} {}\n".format(paperid, title, abstract, contexts_concatenated)
content = to_unicode(content)
if content.strip() != '':
outfile.write(content)
print("Written file for {}".format(paperid))
def run_multiprocessing_pool():
""" Uses all the cores to read the arxiv files, add the mag ids, and write to
a single consolidated output file. It also adds additional mag contexts+abstracts at the end"""
output_file = open('arxiv_hd2v_training.txt', 'w')
workers = cpu_count()
# Create a list of lists with [[arxivid, magid], [arxivid, magid], ...]
arxiv_filepath = '/vol2/unarXive/arxiv-txt-data'
trainresdf['arxiv_id'] = trainresdf['arxiv_id'].apply(lambda x: '{}/{}.txt'.format(arxiv_filepath, x))
# arxivmag_list is a list of lists
arxivmag_list = trainresdf.values.tolist()
#with Pool(processes=workers) as pool:
#with concurrent.futures.ProcessPoolExecutor(max_workers=64) as executor:
# VERY VERY VERY VERY IMPORTANT: ThreadPoolExecutor allows the concurrent child
# processes to update the global allmagids... variable together (they share state).
# Child processes do not share state in ProcessPool, any changes to global vars in
# the function are immutable.
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
# chunk size =1
# It writes in the same order as the iterable is called.
#content = pool.map(read_arxiv_addmagids, arxivmag_list, chunksize=len(arxivmag_list)//workers)
#output_file.write(content)
content = executor.map(read_arxiv_addmagids, arxivmag_list, chunksize=len(arxivmag_list)//workers)
# content is a generator
#print(content, type(content), 'outside')
# content is an iterable, a generator with all the content values returned from read_arxiv_addmagids
for text in content:
output_file.write(text)
# Add additional content : abstact + title + concatenated contexts from MAG
# Note that the citation marker (cited paper id) is always placed bang in the centre
# of the context.
add_additional_papers(output_file)
output_file.close()
def main():
"""Main function """
start = time()
run_multiprocessing_pool()
# Pickle the sets so that we can add additional contexts later from MAG based on them.
with open('Pickles/inarxiv_papers_set.pickle', 'wb') as picc:
pickle.dump(inarxiv_papers_set, picc)
with open('Pickles/allmagpapers_en_magcontexts.pickle', 'wb') as picc2:
pickle.dump(allmagpaperids, picc2)
# Close files and db connections
arxiv_citing_cited_file.close()
sconn.close()
pconn.close()
print("Time taken:{}".format(time() - start))
if __name__ == '__main__':
main() | [
"ashwath92@gmail.com"
] | ashwath92@gmail.com |
74b92f45f67cf407365824f65209050f0db4d914 | 29ae8c6bbb81c0fe2ba958fa51ec44c373c0aa16 | /week2/cdb.py | 729536de6c0320feb5316a8075d385acfe3a31c5 | [] | no_license | evadyadko/gevpro | ba8a6e9dddd90c91c937c73ee086f3b67959ed74 | 72a0f149c5ace7680ba1340f54488c3da8a69298 | refs/heads/main | 2023-03-13T06:55:56.402150 | 2021-02-28T22:27:59 | 2021-02-28T22:27:59 | 335,079,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from xml.etree import ElementTree as ET
def get_adjectives(database_path):
tree = ET.parse(database_path)
root = tree.getroot()
adj_set = set([cid.attrib['form'] for cid in root
if 'ADJ' in cid.attrib['pos']])
return adj_set
def main():
print(get_adjectives('cdb-sample.xml'))
if __name__ == "__main__":
main()
| [
"evadyadko@gmail.com"
] | evadyadko@gmail.com |
564264f489217b11582113729d08d6510332408b | 2dc2babb74118d1a4620aeb54112877bc6f44363 | /NNC.py | 7b73524071298b7f209348bd744e3da239fea5b0 | [] | no_license | zongxinwu92/stanford_cs231n_NNC | fe55871fab9833a430537a5be6cde42c5bfd99ce | cf8394bf976242b0c65237152e077752b6b71a31 | refs/heads/master | 2020-03-16T16:41:00.852364 | 2018-05-09T21:54:53 | 2018-05-09T21:54:53 | 132,799,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | import numpy as np
class NearestNeighbor(object):
def __index__(self):
pass
def train(self, X, y):
'''X is N*D where each row is an example, Y is one-D of size N'''
self.Xtr = X
self.ytr = y
def predict(self, X):
'''X is X*D where each row is an example we wish to predict label for '''
num_test = X.shape[0]
Ypred = np.zeros(num_test, dtype = self.ytr.dtype)
for i in range(num_test):
distance = np.sum(np.abs(self.Xtr - X[i, :]), axis = 1)
min_index = np.argmin(distance)
Ypred[i] = self.ytr[min_index]
print(i, num_test)
return Ypred
| [
"zongxinwu@Zongxins-MacBook-Pro.local"
] | zongxinwu@Zongxins-MacBook-Pro.local |
541093f02de52d797169a02ceaec446a12f89eb9 | 9a14c99da43b5d27f344e9dd5cd9cf88f6796268 | /Scrutiny/Dictionary/DictionaryIteration.py | f4c70ee7a6aaf31ccb4b42bd47a65c1865da45e4 | [] | no_license | ResearchInMotion/CloudForce-Python- | adb1a08aa52c7fa0140ba0ed51bc3b138d52a8a8 | a7b925e94d83712569dbb808d8aad0e6efbddf34 | refs/heads/master | 2021-01-20T11:38:40.700723 | 2018-07-28T07:30:51 | 2018-07-28T07:30:51 | 101,677,396 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | phonebook = {"John" : 938477566,"Jack" : 938377264,"Jill" : 947662781}
for name, number in phonebook.items():
print("Phone number of %s is %d" % (name, number))
check = {"sahil":8,"Nikki":9,"Vmal":9}
for name,number in check.items():
print("Age of %s is %d"%(name,number))
| [
"sahilnagpal@Sahils-MacBook-Air.local"
] | sahilnagpal@Sahils-MacBook-Air.local |
004878eee5cb4cdac1571456f0170a3c5841a3bd | c67ed725f9ea58f905f3b1224010ee8f7dcc0e14 | /manage.py | a8f6c68e8abfc81c8db51d05c5f479d81742cc90 | [
"MIT"
] | permissive | gonvaled/backend-django-ember-showcase | c34fee9795b6c0af10cabc576a5706a75028f4bc | 6445e80a062dd9dfe8840c43ef45c324914144ce | refs/heads/master | 2021-01-10T10:53:55.587302 | 2016-01-07T19:57:51 | 2016-01-07T19:57:51 | 49,221,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backend_django_ember_showcase.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"gonvaled@gonvaled.com"
] | gonvaled@gonvaled.com |
d9164c4b3811ee68c95e494d3120e1877b63ed24 | 426ea3c0ed0e2163a7a2af2ed2ac7084779fbedf | /IDE_Ver/Tool/__init__.py | bdf4c6e71a9a542fd3ec032efc2c5b31dd44e5dc | [] | no_license | kevin910162/Perpetual-Callable-Bond-Pricing | b303ebfe6c1009d7ad342c47419721ace770ee28 | 3048ff0b301b30c66cd916afe35c556bfb6780a2 | refs/heads/master | 2020-12-05T12:30:59.502082 | 2020-02-12T04:27:13 | 2020-02-12T04:27:13 | 232,110,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from .downloadinfo import DownLoadInfo
from .functions import Functions
from .loadinfo import LoadInfo
from .plot import Plot | [
"40070418+kevin910162@users.noreply.github.com"
] | 40070418+kevin910162@users.noreply.github.com |
a0115f2376d4b303c31a2fc2d41d8b84830d0916 | 710276d0c9521c4654bb163fb8ef1438bb8316cc | /setup.py | 51203bf16681c47568bbe72adddbbe4300da1b0d | [
"MIT"
] | permissive | djego/ecommerce-peru-scrap-cli | 5f4a452b5a8d1fab1d702bafffd2d1f9b81454e8 | 82ff1f91af7e105b80f30b1fd747a25ec296f2d4 | refs/heads/master | 2023-01-04T16:37:19.980907 | 2020-10-24T03:29:07 | 2020-10-24T03:29:07 | 295,941,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import setuptools
with open("readme.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='ecope',
version='0.1.2',
scripts=['./scripts/ecope'],
author='Diego Machaca',
author_email = 'diegomachaca@gmail.com',
url = 'https://github.com/djego/ecommerce-peru-scrap-cli',
keywords = ['scraping', 'peru', 'ecommerce','csv','json'],
description='Ecommerce Perú Scrap CLI is a project open source that extract products data by category and export to csv, json and other structure format files',
packages=['application'],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
'setuptools',
'beautifulsoup4 == 4.9.1',
'html5lib == 1.1',
'lxml == 4.5.2',
'six == 1.15.0',
'soupsieve == 2.0.1',
'webencodings == 0.5.1',
'requests == 2.24.0'
],
python_requires='>=3.7'
) | [
"diego.machaca@gmail.com"
] | diego.machaca@gmail.com |
ffdd69291bd6db302ef39fc5a06f21f460375837 | 2c733aeaccae6ffb509a5b75b61fcf09252e899e | /test_server.py | 12e697853ef4dbf73eca368da645704dddc7c218 | [] | no_license | YouRancestor/WeChatGameServerTest | 7dbe71903e2959a54dad0fc4d1e309cfc022dabd | 687b610c5a2db3027fe43afbba421080da9ca834 | refs/heads/master | 2020-05-18T18:07:04.734686 | 2019-05-02T12:24:10 | 2019-05-02T12:24:10 | 184,576,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(80)
tornado.ioloop.IOLoop.current().start() | [
"zyr_zyr_@sohu.com"
] | zyr_zyr_@sohu.com |
77d950e90b279c3c0f8a7d1aa395e6b81e1a73ab | 06c1e0add693ba6d9fa3421f6e3713ea1df47036 | /snakeskin/path_finders/__init__.py | 8a012c900caefe2b281e754cabfa768043c569a9 | [
"Apache-2.0"
] | permissive | ewanbarr/snakeskin | 95dc9069c61de60097aa78b0d68218e3ed240445 | b41a5393e9b4ab42fd6245e022dd4923be01815b | refs/heads/master | 2020-04-15T07:20:24.400522 | 2015-12-07T04:55:34 | 2015-12-07T04:55:34 | 33,218,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | from base_path_finder import BasePathFinder
| [
"ewan.d.barr@googlemail.com"
] | ewan.d.barr@googlemail.com |
1c2afae869071dc954bc26d8167396d0e199b87c | 7284c72bbe38820230e1969eedda57698bad9d9d | /player.py | 0020b0f0130df25424c6181370670730b9def805 | [] | no_license | Clutterbutter/one_night_werewolf_python | 7f3fb3cda2212857251664e9cd5ee4a7d7346f21 | b7a240e75f365643bd49b0cf082c8784d2d6a3c8 | refs/heads/master | 2020-04-03T17:41:39.030224 | 2018-10-30T21:06:23 | 2018-10-30T21:06:23 | 155,455,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | #If you want to run the program, go to RUN ME
######
# This file contains the player class.
######
game_sizes = {
1 : 'one',
2 : 'two',
3 : 'three',
4 : 'four',
5 : 'five',
6 : 'six',
7 : 'seven',
8 : 'eight',
9 : 'nine',
10 : '10'
}
class Player:
def __init__(self, player_name, player_number):
self.player_name = player_name
self.player_number = player_number
self.new_role = ''
self.votes = 0
def assign_role(self, role_name):
self.role = role_name
def assign_end_of_night_role(self, new_role_name):
self.new_role = new_role_name
def add_vote(self):
self.votes = self.votes + 1
def display_end_of_night_role(self):
return self.new_role
def display_role(self):
return self.role
def display_name(self):
return self.player_name
def display_number(self):
return self.player_number | [
"0502866@mlsd.org"
] | 0502866@mlsd.org |
f91c3242a4878095551e3cadefe04006bd7cb9ca | 97f10c4614f5946fbffaf614bbdcd654156d73bd | /backend/post/models.py | a5358ebda697afb0a978af2303643e23ae4d6d02 | [
"Apache-2.0"
] | permissive | aryasoni98/Code-Kindle | 71e18a1da9bcf69be2eac168ba1c539f41ef1ebd | 856a3a5fcfe561df4d351f2a724d70f430a29954 | refs/heads/main | 2023-04-10T08:26:52.449561 | 2021-04-19T19:06:00 | 2021-04-19T19:06:00 | 358,722,732 | 0 | 0 | Apache-2.0 | 2021-04-19T19:06:01 | 2021-04-16T21:23:33 | Python | UTF-8 | Python | false | false | 205 | py | from django.db import models
# Create your models here.
class Post(models.Model):
files = models.FileField(upload_to='documents/')
uploaded_at = models.DateTimeField(auto_now_add=True)
| [
"bushra.akram999@hotmail.com"
] | bushra.akram999@hotmail.com |
a38f3f82f9156151b766513745c8d7a2c1d77e7c | d3df14eac415e1d593bd55cb3333a72f790caab2 | /Texy/Texy_event.py | 1eede7b78505b9116a4698ab4188df25124ff087 | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | Thirsty-Robot/Texy.py-Game-Engine | 03850cbc6115e6d4c9614b3812b5da25c289ee03 | 7dce4b6ac79ecf21e7908be007919799c5382d20 | refs/heads/master | 2020-03-23T11:56:07.827300 | 2018-08-07T08:52:59 | 2018-08-07T08:52:59 | 141,527,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from colorama import *
init(autoreset=True)
def text(string):
color = Fore.WHITE + string
return color
def alert(string):
color = Fore.RED + string
return color
def notification(string):
color = Fore.YELLOW + string
return color
def emphasis(string):
color = Fore.YELLOW + Back.BLUE + string
return color
def empasis2 (string):
color = Style.BRIGHT + string
return color
def dim(string):
color = Style.DIM + string
return color | [
"Thirsty-Robot@protonmail.com"
] | Thirsty-Robot@protonmail.com |
a62f1b08399dc567a10875ea06da7d1d25752cbf | 01f966d42235ec3a18609b20e0eb863d4c3f8946 | /windows_tools/office/__init__.py | 16e4179fcc34c542c00f3a120a554578e10d5924 | [
"BSD-3-Clause"
] | permissive | hpkumbhar/windows_tools | 562d8b1d1714b074f603679578665929cf7ea2b3 | 810f22038af4a6250976cd31cfcb4b85c8a79062 | refs/heads/master | 2023-08-15T15:46:32.992440 | 2021-10-11T18:14:09 | 2021-10-11T18:14:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,814 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of windows_tools module
"""
Microsoft Office identification
Versioning semantics:
Major version: backward compatibility breaking changes
Minor version: New functionality
Patch version: Backwards compatible bug fixes
"""
__intname__ = "windows_tools.office"
__author__ = "Orsiris de Jong"
__copyright__ = "Copyright (C) 2020 Orsiris de Jong"
__description__ = "MS Office identification, works for click and run, o365 and others"
__licence__ = "BSD 3 Clause"
__version__ = "0.1.4"
__build__ = "2021101002"
from typing import Tuple, Optional
from windows_tools import registry
# Let's make sure the dictionary goes from most recent to oldest
KNOWN_VERSIONS = {
"16.0": "2016/2019/O365",
"15.0": "2013",
"14.0": "2010",
"12.0": "2007",
"11.0": "2003",
"10.0": "2002",
"9.0": "2000",
"8.0": "97",
"7.0": "95",
}
def _get_office_click_and_run_ident():
# type: () -> Optional[str]
"""
Get ClickAndRun Product Id for Office 2016/2019/O365 detection
Example of result "ProPlus2019Volume,VisioPro2019Volume"
"""
try:
click_and_run_ident = registry.get_value(
registry.HKEY_LOCAL_MACHINE,
r"Software\Microsoft\Office\ClickToRun\Configuration",
"ProductReleaseIds",
arch=registry.KEY_WOW64_64KEY | registry.KEY_WOW64_32KEY,
)
except FileNotFoundError:
click_and_run_ident = None
return click_and_run_ident
def _get_used_word_version():
# type: () -> Optional[int]
"""
Try do determine which version of Word is used (in case multiple versions are installed)
"""
try:
word_ver = registry.get_value(
registry.HKEY_CLASSES_ROOT, r"Word.Application\CurVer", None
)
except FileNotFoundError:
word_ver = None
try:
version = int(word_ver.split(".")[2])
except (IndexError, ValueError, AttributeError):
version = None
return version
def _get_installed_office_version():
# type: () -> Optional[str, bool]
"""
Try do determine which is the highest current version of Office installed
"""
for possible_version, _ in KNOWN_VERSIONS.items():
try:
office_keys = registry.get_keys(
registry.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\Office\{}".format(possible_version),
recursion_level=2,
arch=registry.KEY_WOW64_64KEY | registry.KEY_WOW64_32KEY,
combine=True,
)
try:
is_click_and_run = (
True if office_keys["ClickToRunStore"] is not None else False
)
except (TypeError, KeyError):
is_click_and_run = False
try:
# Let's say word is the reference (since we could also have powerpoint viewer or so)
is_valid = True if office_keys["Word"] is not None else False
if is_valid:
return possible_version, is_click_and_run
except KeyError:
pass
except FileNotFoundError:
pass
return None, None
def get_office_version():
# type: () -> Tuple[str, Optional[str]]
"""
It's plain horrible to get the office version installed
Let's use some tricks, ie detect current Word used
"""
word_version = _get_used_word_version()
office_version, is_click_and_run = _get_installed_office_version()
# Prefer to get used word version instead of installed one
if word_version is not None:
office_version = word_version
if office_version is not None:
version = float(office_version)
else:
version = None
click_and_run_ident = _get_office_click_and_run_ident()
def _get_office_version():
# type: () -> Optional[str]
if version is not None:
if version < 16:
try:
return KNOWN_VERSIONS["{}".format(version)]
except KeyError:
pass
# Special hack to determine which of 2016, 2019 or O365 it is
if version == 16:
if isinstance(click_and_run_ident, str):
for ver in ["2016", "2019", "O365"]:
if ver in click_and_run_ident:
return ver
return "2016/2019/O365"
# Let's return whatever we found out
return "Unknown: {}".format(version)
return None
if isinstance(click_and_run_ident, str) or is_click_and_run:
click_and_run_suffix = "ClickAndRun"
else:
click_and_run_suffix = ""
return _get_office_version(), click_and_run_suffix
| [
"ozy@netpower.fr"
] | ozy@netpower.fr |
a29e5f81d40ccd06b6053ab1c38c7a185c9ec5fc | 7851871aa904c8e02b88690ef4423f8d988f8a90 | /square_no_list.py | a8a23de9e4c0dfc5702f6d0c31443e03f6b7e24a | [] | no_license | sharda2001/list | 98809d9e0913adf9691523eb380fef4aa13fb703 | 40c2328e7da6dd410945e9febf767ba78f66cea1 | refs/heads/main | 2023-06-17T09:54:10.122583 | 2021-07-13T03:52:00 | 2021-07-13T03:52:00 | 377,053,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | numbers = [1, 2, 3, 4, 5]
squared_numbers = [number ** 2 for number in numbers]
print(squared_numbers) | [
"noreply@github.com"
] | noreply@github.com |
829b6ee686c41e29153cc2e3e1f700ce9f61714d | 71f13647082c9690cb56688e792734e7b09b0b7d | /data.py | 87da597195f576390e22433e4bd6a98c64e8983d | [] | no_license | anushapv2000/data_class | 3116069aef0f8feee54a1a5699884626a31d5c15 | 3eafd5c3f197a2c5265e952f1cda4213aede5d87 | refs/heads/main | 2023-07-23T06:58:41.371007 | 2021-08-31T10:02:26 | 2021-08-31T10:02:26 | 401,652,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | print('kjsadfa i')
| [
"anusha.pv@btech.christuniversity.in"
] | anusha.pv@btech.christuniversity.in |
29120f7257dfbc2ee265a054bef335380f6ebefa | dcfa5d5d731381cb109d6efbc11c83648ea4fa7e | /Scripts/ScriptsHoras/10Proyecto_spark.py | 6252912a6402aea5f802292cc0b26420cd9c26b9 | [] | no_license | srgxv1/twitchAdsStudy | 798c97252f69522e9190418c264d139ff575608d | 2d834474ffec86a0f240b417abc44415e2b567a9 | refs/heads/master | 2020-04-10T06:28:11.588293 | 2018-12-17T20:19:41 | 2018-12-17T20:19:41 | 160,855,334 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | from pyspark import SparkConf, SparkContext, SQLContext, Row
from pyspark.sql.functions import col
import sys
conf = SparkConf().setMaster('local').setAppName('Twitch')
sc = SparkContext(conf = conf)
sqlContext = SQLContext(sc)
inputRDD = sc.textFile('10').map(lambda x: x.split('\t'))
ids = inputRDD.map(lambda p: Row(Game = p[3].lower(), CurrentViewer = p[1], Followers = float(p[7]), Partner = p[8], Language = p[9]))
df = ids.toDF()
df = df.filter(df['Language'] == "es").filter(df['Partner'] != "-1").filter(df['Game'] != "-1").filter(df['Followers'] >= 1000)
df.groupBy(col("Game")).agg({"CurrentViewer":"sum"}).orderBy("sum(CurrentViewer)", ascending=False).limit(10).coalesce(1).write.format("com.databricks.spark.csv").save('out10') | [
"rodrigo.manuel.perez@gmail.com"
] | rodrigo.manuel.perez@gmail.com |
ae825fe3516b3c4458a8137c101f289786af735c | 3ced55b04ec82df5257f0e3b500fba89ddf73a8a | /src/stk/molecular/topology_graphs/cage/two_plus_four/two_plus_four.py | 80aa18537329f8d918ee7fea003280f088245115 | [
"MIT"
] | permissive | rdguerrerom/stk | 317282d22f5c4c99a1a8452023c490fd2f711357 | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | refs/heads/master | 2023-08-23T21:04:46.854062 | 2021-10-16T14:01:38 | 2021-10-16T14:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,112 | py | """
Two Plus Four
=============
"""
from ..cage import Cage
from ..vertices import LinearVertex, NonLinearVertex
from ...topology_graph import Edge
class TwoPlusFour(Cage):
"""
Represents a capsule cage topology graph.
Unoptimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)cc(Br)c(Br)c1',
functional_groups=[stk.BromoFactory()],
)
cage = stk.ConstructedMolecule(
topology_graph=stk.cage.TwoPlusFour((bb1, bb2)),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cage.get_atoms(),
cage.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cage.get_bonds()
),
)
:class:`.Collapser` optimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)cc(Br)c(Br)c1',
functional_groups=[stk.BromoFactory()],
)
cage = stk.ConstructedMolecule(
topology_graph=stk.cage.TwoPlusFour(
building_blocks=(bb1, bb2),
optimizer=stk.Collapser(),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cage.get_atoms(),
cage.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cage.get_bonds()
),
)
Nonlinear building blocks with four functional groups are
required for this topology.
Linear building blocks with two functional groups are required for
this topology.
When using a :class:`dict` for the `building_blocks` parameter,
as in :ref:`cage-topology-graph-examples`:
*Multi-Building Block Cage Construction*, a
:class:`.BuildingBlock`, with the following number of functional
groups, needs to be assigned to each of the following vertex ids:
| 4-functional groups: (0, 1)
| 2-functional groups: (2, 3, 4, 5)
See :class:`.Cage` for more details and examples.
"""
_vertex_prototypes = (
NonLinearVertex(0, [0, 0, -1]),
NonLinearVertex(1, [0, 0, 1]),
LinearVertex(2, [2, 0, 0], False),
LinearVertex(3, [-2, 0, 0], False),
LinearVertex(4, [0, 2, 0], False),
LinearVertex(5, [0, -2, 0], False),
)
_edge_prototypes = (
Edge(0, _vertex_prototypes[2], _vertex_prototypes[0]),
Edge(1, _vertex_prototypes[2], _vertex_prototypes[1]),
Edge(2, _vertex_prototypes[3], _vertex_prototypes[0]),
Edge(3, _vertex_prototypes[3], _vertex_prototypes[1]),
Edge(4, _vertex_prototypes[4], _vertex_prototypes[0]),
Edge(5, _vertex_prototypes[4], _vertex_prototypes[1]),
Edge(6, _vertex_prototypes[5], _vertex_prototypes[0]),
Edge(7, _vertex_prototypes[5], _vertex_prototypes[1])
)
_num_windows = 4
_num_window_types = 1
| [
"noreply@github.com"
] | noreply@github.com |
c4ab791f131770d16025600c9969fa275bcb485e | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/recicler/localizaciones/l10n_be_invoice_bba/__init__.py | 8c3517b22a87f1e464f6866fc7d7621f263d5a7d | [] | no_license | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | # -*- encoding: utf-8 -*-
# Part of YuanCloud. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
import partner
import invoice
| [
"liuganghao@lztogether.com"
] | liuganghao@lztogether.com |
4703a772fb058f2ae5422adf59565dcc4c53ebd5 | 138a481380dae21341e5ad5f25d9f9a38451f217 | /01/1.py | 1f534b0795bbf72bad753a8817f425a14337df86 | [] | no_license | glee-/advent2020 | 456b855d008e784a1cd627dc274f2afd6ea3bd06 | acc5c9990c33c5428853e24608d542feaa42d032 | refs/heads/master | 2023-01-31T08:38:53.671050 | 2020-12-17T09:03:58 | 2020-12-17T09:03:58 | 321,848,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import sys
# two sum
f = open(sys.argv[1])
nums = []
for line in f.readlines():
nums.append(int(line.strip()))
setnums = set(nums)
for num in nums:
alt = 2020 - num
if alt in setnums:
print(num, alt)
print(num * alt)
| [
"gleeb@berkeley.edu"
] | gleeb@berkeley.edu |
409b1cba66bea305247d7f650e6ca420be6dfcd5 | 69f6f415469bca089f6d147767c13c401f142a31 | /flywheel_cli/monkey.py | 188f2dbdf9f0eb25bcddbaedca1b47fec45f3b75 | [
"MIT"
] | permissive | amitvakula/python-cli | 8d7e34a98237680db87099aa1d72db25c8391cbb | 0bdbd39c40cdb3fe4dbd3b0cb38abbce94242dac | refs/heads/master | 2020-06-25T09:48:46.362433 | 2019-07-16T18:21:16 | 2019-07-16T18:21:16 | 199,276,371 | 0 | 0 | MIT | 2019-07-28T11:02:30 | 2019-07-28T11:02:30 | null | UTF-8 | Python | false | false | 618 | py | """This module provides system-level monkey patches as needed"""
def patch_fs():
"""On windows, python 3.6.6 os.readlink errors if passed bytes instead of a string.
This monkey-patch fixes the case where pyfilesystem uses fsencode before calling readlink.
"""
import os
if os.name == 'nt':
from fs.osfs import OSFS
def _gettarget(self, sys_path):
try:
target = os.readlink(os.fsdecode(sys_path))
except OSError:
return None
else:
return os.fsencode(target)
OSFS._gettarget = _gettarget
| [
"justinehlert@flywheel.io"
] | justinehlert@flywheel.io |
2617cd1f3a647f2cf4ab00ef8bf59d44f9754cd1 | 2bfffc7aecf402e391a99118833fb392667b6665 | /cat.py | da5effe54063a959d5340a1bdb1746cc526c2d40 | [] | no_license | VladimirKozlov466/QAP_module_16 | 3064c8bb9abc47f7a2d6d4ee09cd46dca119fca0 | b84d2d90ab8f84286e258b1dbee71ffa479725b9 | refs/heads/master | 2023-07-07T22:15:34.048649 | 2021-09-03T13:26:34 | 2021-09-03T13:26:34 | 400,108,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | class Cat:
def __init__(self, name, gender, age):
self.name = name
self.gender = gender
self.age = age
def getName(self):
return self.name
def getGender(self):
return self.gender
def getAge(self):
return self.age
def pet_info(self):
return f'Имя питомца: {self.getName()}, Пол питомца: {self.getGender()}, Возраст: {self.getAge()}'
fedor = Cat(name="Федор", gender="мальчик", age=5) # тоже не понимаю почему ругается?!
print(fedor.pet_info())
| [
"noreply@github.com"
] | noreply@github.com |
a726034e9cd9ddef97e37184362bcc9a99f8446a | 525592619f62d5c782bbeac002e3472fafc05ecd | /blog_project/urls.py | 426b1b9d623fe587ed330e88b64536172a20e32c | [] | no_license | suelalleshaj/blog | eef9459568c8244f5142c9c6b6c6c3ef3bda2804 | 688158b2bf6c4f5b38f2ffd73a700a2ed953bdb4 | refs/heads/master | 2023-04-25T10:09:11.810655 | 2021-04-27T17:58:51 | 2021-04-27T17:58:51 | 362,208,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | """blog_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
path('', include('blog.urls')),
]
| [
"suelalleshaj"
] | suelalleshaj |
7fb479085e8c781abc3d68e21b41792dad0d3518 | 90e57d637193457f25faca09e3607919c79586f4 | /Text/File_reader.py | 14d6fe349581de010fc4ff32354cbe3caa33a8f2 | [] | no_license | Pelmondo/PythonPractice | eee7e04dd47d18f146280b6c216e9d44256e2083 | 72c60a978451819b1a26f07e95a8c8524c1f10b7 | refs/heads/master | 2020-05-03T04:48:27.748213 | 2019-03-29T15:33:44 | 2019-03-29T15:33:44 | 178,432,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # with open('pi_digits.txt') as file_object:
# contents = file_object.read()
# print(contents.rstrip())
file_name = 'pi_million_digits.txt'
# with open(file_name) as file_object:
# for line in file_object:
# print(line.rstrip())
with open(file_name) as file_object:
lines = file_object.readlines()
# print(lines[0].rstrip())
pi_string = ''
for line in lines:
pi_string += line.strip()
# print(pi_string)
print(len(pi_string))
| [
"studnetserg@gmail.com"
] | studnetserg@gmail.com |
ddb817066bd2ddac9614af88404c53d4467561fd | 015eeb251288d23a6ac2fee1c8f85a347f70b34c | /Util/Log.py | fb98fbe47e45cc004da4392df0f13137a4e9674b | [] | no_license | zhuhui2018/dataDrien | b137aaa23d7fe7d3ad9f495469be2a7044d4caaa | fb5fc725055d113a4d75a7eda81e2be78a043831 | refs/heads/master | 2022-04-21T13:38:41.288513 | 2020-04-20T12:47:21 | 2020-04-20T12:47:21 | 257,276,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | #encoding:utf-8
import logging.config
from Proj_Var.Var import *
#读取日志的配置文件
logging.config.fileConfig(LogPath)
#选择一个日志格式
logger=logging.getLogger("example02")
def error(message):
#打印error级别的信息
logger.error(message)
def info(message):
#打印info级别的信息
logger.info(message)
def warning(message):
#打印warnning级别的信息
logger.warning(message)
if __name__ == "__main__":
info("world")
warning("passwrod")
error("password") | [
"1137030459@qq.com"
] | 1137030459@qq.com |
ec08ce65e5c67bdf27a9cc5d8f54362885258908 | 5aa07a91341f48eb2278882fb7e53e4686c7640f | /Website/homepage/migrations/0018_auto_20180413_1306.py | 4c3425502012dd9012e44dfa755403b9c0bda3f7 | [] | no_license | hazellobo18/travel_agency.github.io | f724adbc054296c50ea138adac22ee18fcc25797 | 1f9d1e3eadc3430e22673548e9d601fc0446771b | refs/heads/master | 2020-03-10T22:28:21.387962 | 2018-04-15T14:47:52 | 2018-04-15T14:47:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # Generated by Django 2.0.3 on 2018-04-13 07:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('homepage', '0017_auto_20180412_2154'),
]
operations = [
migrations.AlterField(
model_name='userprofileinfo',
name='user',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"hazellobo18@gmail.com"
] | hazellobo18@gmail.com |
8afa42adcb90a1783e1a39552cbf424e96241e96 | 593566486cb3eae8e049847fa9c78c3949a88f87 | /TodoList/views.py | 0ea095a7d9a274055812715ac1d6359d35c79870 | [] | no_license | DaKoala/Pre-Work---Todo-List | e6f13f251396d810b3804c218bf4aa5be36c1e04 | 4ac6226d983d1fe687301f68436db8222a811eb7 | refs/heads/master | 2021-01-01T18:18:59.785031 | 2017-07-25T12:40:35 | 2017-07-25T12:40:35 | 98,300,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | from django.shortcuts import render
from django.http import HttpResponse
from .forms import ToDoForm
from .models import *
import re
import datetime
# Create your views here.
def index(request):
to_do_list = ToDo.objects.all()
week_list = list()
for i in range(7):
week_list.append(datetime.date.today() + datetime.timedelta(days=i))
return render(request, 'index.html', locals())
def list_all(request):
to_do_list = ToDo.objects.all()
all_day_list = list()
for to_do in to_do_list:
all_day_list.append(to_do.date)
all_day_list.sort()
return render(request, 'list_all.html', locals())
def list_prior(request):
to_do_list = ToDo.objects.all()
all_day_list = list()
prior_list = [1, 2, 3, 4]
return render(request, 'list_prior.html', locals())
def list_expire(request):
to_do_list = ToDo.objects.all()
all_expire_day_list = list()
for to_do in to_do_list:
all_expire_day_list.append(to_do.expire_date)
all_expire_day_list.sort()
return render(request, 'list_expire.html', locals())
def add(request):
if request.method == 'POST':
form = ToDoForm(request.POST)
if form.is_valid():
to_do_info = form.save()
to_do_info.save()
return render(request, 'finish.html', locals())
else:
form = ToDoForm()
return render(request, 'add.html', {'form_info': form})
def delete(request):
url = request.get_full_path()
id_filter = re.compile(r'\d+')
delete_id = int(id_filter.findall(url)[-1])
ToDo.objects.filter(id=delete_id).delete()
return render(request, 'delete.html', locals())
def edit(request):
url = request.get_full_path()
id_filter = re.compile(r'\d+')
edit_id = int(id_filter.findall(url)[-1])
edit_object = ToDo.objects.filter(id=edit_id)
if request.method == 'POST':
form = ToDoForm(request.POST)
if form.is_valid():
new_date = form.cleaned_data['date']
new_content = form.cleaned_data['content']
new_prior = form.cleaned_data['prior']
new_is_finished = form.cleaned_data['is_finished']
new_expire_date = form.cleaned_data['expire_date']
edit_object.update(date=new_date,
content=new_content,
prior=new_prior,
is_finished=new_is_finished,
expire_date=new_expire_date)
return render(request, 'finish.html', locals())
else:
form = ToDoForm(initial={'content': ToDo.objects.get(id=edit_id).content,
'date': ToDo.objects.get(id=edit_id).date,
'prior': ToDo.objects.get(id=edit_id).prior,
'is_finished': ToDo.objects.get(id=edit_id).is_finished,
'expire_date': ToDo.objects.get(id=edit_id).expire_date, })
return render(request, 'edit.html', locals())
def complete(request):
url = request.get_full_path()
id_filter = re.compile(r'\d+')
complete_id = int(id_filter.findall(url)[-1])
ToDo.objects.filter(id=complete_id).update(is_finished=True)
return render(request, 'finish.html', locals()) | [
"noreply@github.com"
] | noreply@github.com |
0799da7f0ed0a7e68edd997eeaa9deedf6405066 | 8f70b40ef1c657ee14accfe6e2f8b1ebb1bebb7e | /schoolinfo/urls.py | e842061d58d48d531db89aafe7420297d52ef38e | [] | no_license | TejashviVerma/School_ERP | e3d6f1aabe92167c2b55c0b1682dde505bb04edd | 11406da8b1d8701b7ea55f75c76f1cbf44a72c53 | refs/heads/master | 2023-08-03T15:10:11.481306 | 2020-09-13T18:02:40 | 2020-09-13T18:02:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="addSchoolInfo"),
]
| [
"yashboura303@gmail.com"
] | yashboura303@gmail.com |
61f7cc0d8f2595e32918fd8d26d5b20f718ccaec | fd20270ef3a1b9611a4f1572318e6cdfc3e776d8 | /webapp2/user/migrations/0004_auto_20150907_0713.py | 00a34c359dfa20f0984abfad787a9200f8c2898e | [] | no_license | moment-x/f | 78ed69dc1e690f255b8be6f9e63eb5befef256ba | 0bc921ac67c06a9f9e8f44381478618aec55f10b | refs/heads/master | 2021-01-25T00:11:37.935406 | 2015-09-09T02:28:30 | 2015-09-09T02:28:30 | 42,149,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20150907_0352'),
]
operations = [
migrations.AlterField(
model_name='user',
name='contact',
field=models.CharField(unique=True, null=True, max_length=15),
),
]
| [
"token@email.com"
] | token@email.com |
1460b3e9fde82391611fff364e1feede93cbbc11 | 28dce7cced29b2870e88535e4b0f6f9cbb02d746 | /task_18/get_data.py | 9a132fd6e09428bf5728f96570ac202818e56cca | [] | no_license | kuzzzko/python | d12f3721c9012bdfc16d85dde1ccf442542c2eed | 6be15e9f53a26b8305e3b91eaf97963fff676383 | refs/heads/master | 2020-04-22T05:46:29.822390 | 2019-04-20T19:40:31 | 2019-04-20T19:40:31 | 170,167,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import sqlite3
import sys
def output_with_2args(keys,cursor_obj):
for row in cursor_obj:
for k in keys:
print('{:12}: {}'.format(k, row[k]))
print('-' * 40)
def output_with_0args(cursor_obj):
for row in cursor_obj:
print('{:20} {:15} {:5} {:18} {}'.format(*row))
db_filename = 'dhcp_snooping.db'
conn = sqlite3.connect(db_filename)
if len(sys.argv) == 3:
key,value = sys.argv[1:]
keys = ['mac', 'ip', 'vlan', 'interface', 'switch']
if key not in keys:
print('Данный параметр не поддерживается.')
print('Допустимые значения параметров: {}'.format(', '.join(keys)))
else:
keys.remove(key)
conn.row_factory = sqlite3.Row
print('\nDetailed information for host(s) with', key, value)
print('-' * 40)
query_act = 'select * from dhcp where {} = ? and active = 1'.format(key)
output_with_2args(keys,conn.execute(query_act, (value, )))
print('\n' + '=' * 40)
print('\nInactive values:')
print('-' * 40)
query_inact = 'select * from dhcp where {} = ? and active = 0'.format(key)
output_with_2args(keys, conn.execute(query_inact, (value, )))
elif len(sys.argv) == 1:
print('-' * 70)
print('Active values:')
print('-' * 70)
output_with_0args(conn.execute('select * from dhcp where active = 1'))
print('-' * 70)
print('Inactive values:')
print('-' * 70)
output_with_0args(conn.execute('select * from dhcp where active = 0'))
else:
print('Пожалуйста, введите два или ноль аргументов')
| [
"kuzzzko@gmail.com"
] | kuzzzko@gmail.com |
8d26a6f969809cb725345cdc97e909cdc61f535b | 97a39cfdbd0ae4310eef729785630438278d3279 | /manage.py | 4dfa3b998a58a9b60a40062cf56854fe68d23419 | [
"Apache-2.0"
] | permissive | cvlucian/confidant | e9ddf15885ec6a4442422a00d7c9d2a84f8dfa20 | 8e273fb813d57ae831343f7d047b32a8f62458cb | refs/heads/master | 2021-01-13T09:37:39.757319 | 2020-09-23T14:35:53 | 2020-09-23T14:35:53 | 72,053,900 | 1 | 0 | NOASSERTION | 2020-09-23T14:36:19 | 2016-10-26T23:44:55 | Python | UTF-8 | Python | false | false | 809 | py | from flask.ext.script import Manager
import confidant.workarounds # noqa
from confidant import app
from scripts.utils import ManageGrants
from scripts.utils import RevokeGrants
from scripts.bootstrap import GenerateSecretsBootstrap
from scripts.bootstrap import DecryptSecretsBootstrap
manager = Manager(app.app)
# Ensure KMS grants are setup for services
manager.add_command("manage_kms_auth_grants", ManageGrants)
# Revoke all KMS grants
manager.add_command("revoke_all_kms_auth_grants", RevokeGrants)
# Generate encrypted blob from a file
manager.add_command("generate_secrets_bootstrap", GenerateSecretsBootstrap)
# Show the YAML formatted secrets_bootstrap in a decrypted form
manager.add_command("decrypt_secrets_bootstrap", DecryptSecretsBootstrap)
if __name__ == "__main__":
manager.run()
| [
"rlane@lyft.com"
] | rlane@lyft.com |
2117b794c1515992b0712439da36ddfd1ee9888c | 01be82f78a051e0fb50bc9bf4c581376536019b0 | /testdome/two_sums.py | ed4fa520d5ba8f990ec616e74470e2c79a707f53 | [
"MIT"
] | permissive | sgrade/pytest | 4aaaa2b215a685468f91390733b1d03669a3ce78 | eae5ee9dd6829d52644c4df489d5514a0e0c8728 | refs/heads/master | 2023-06-08T23:36:12.437741 | 2023-06-06T19:53:26 | 2023-06-06T19:53:26 | 124,512,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,985 | py | """
Write a function that, given a list and a target sum, returns zero-based indices of any two
distinct elements whose sum is equal to the target sum. If there are no such elements,
the function should return (-1, -1).
For example, find_two_sum([1, 3, 5, 7, 9], 12) should return a tuple containing any of the following pairs of indices:
1 and 4 (3 + 9 = 12)
2 and 3 (5 + 7 = 12)
3 and 2 (7 + 5 = 12)
4 and 1 (9 + 3 = 12)
"""
class TwoSum:
@staticmethod
def find_two_sum(numbers, target_sum):
"""
:param numbers: (list of ints) The list of numbers.
:param target_sum: (int) The required target sum.
:returns: (a tuple of 2 ints) The indices of the two elements whose sum is equal to target_sum
"""
"""
# Do it with generators
# It works, but only gets 25% marks (1 of 4 tests pass)
def gen_tuple(num_list, target_s):
for item in num_list:
complement = target_s - item
if complement in numbers:
target_t = (numbers.index(item), numbers.index(complement))
yield target_t
yield (-1, -1)
return next(gen_tuple(numbers, target_sum))
"""
# Below works, but not perfect: 3 of 4 tests pass, only performance test fails
'''for number in numbers:
supplement = target_sum - number
if supplement in numbers:
index1 = numbers.index(number)
if supplement != number:
index2 = numbers.index(supplement)
return index1, index2
else:
if numbers.count(number) >= 2:
numbers.pop(index1)
index2 = (numbers.index(supplement))+1
return index1, index2
else:
return -1, -1
'''
# same - little performance
# changed but not finished
"""
for index1 in range(len(numbers)):
number = numbers[index1]
supplement = target_sum - number
for index2 in range(len(numbers)):
return (index1, index2) if number and supplement in numbers
if supplement in numbers:
if supplement != number:
return index1, numbers.index(supplement)
else:
if numbers.count(number) >= 2:
numbers.pop(index1)
index2 = (numbers.index(supplement)) + 1
return index1, index2
else:
return -1, -1
"""
print('First run')
print(TwoSum.find_two_sum([1, 3, 5, 7, 9], 12))
print('Second run')
print(TwoSum.find_two_sum([9, 2, 12, 100, 100], 200))
print('Third run')
print(TwoSum.find_two_sum([1, 23, 54, 97, 19, 12, 234, 23423, 67, 789, 23, 234, 23423, 666, 3], 669))
print('Fourth run')
print(TwoSum.find_two_sum([1, 3, 5, 7, 9], 120))
| [
"romankmail@gmail.com"
] | romankmail@gmail.com |
6305425047bc6275d2a171616fbdffe8a360ec2c | 674f5dde693f1a60e4480e5b66fba8f24a9cb95d | /armulator/armv6/opcodes/concrete/rsb_register_shifted_register_a1.py | 584074016b2edaaf59d9ac2ff84cb51509bec935 | [
"MIT"
] | permissive | matan1008/armulator | 75211c18ebc9cd9d33a02890e76fc649483c3aad | 44f4275ab1cafff3cf7a1b760bff7f139dfffb07 | refs/heads/master | 2023-08-17T14:40:52.793120 | 2023-08-08T04:57:02 | 2023-08-08T04:57:02 | 91,716,042 | 29 | 7 | MIT | 2023-08-08T04:55:59 | 2017-05-18T16:37:55 | Python | UTF-8 | Python | false | false | 837 | py | from armulator.armv6.bits_ops import substring, bit_at
from armulator.armv6.opcodes.abstract_opcodes.rsb_register_shifted_register import RsbRegisterShiftedRegister
from armulator.armv6.shift import decode_reg_shift
class RsbRegisterShiftedRegisterA1(RsbRegisterShiftedRegister):
@staticmethod
def from_bitarray(instr, processor):
rm = substring(instr, 3, 0)
type_o = substring(instr, 6, 5)
rs = substring(instr, 11, 8)
rd = substring(instr, 15, 12)
rn = substring(instr, 19, 16)
s = bit_at(instr, 20)
if rd == 0b1111 or rn == 0b1111 or rm == 0b1111 or rs == 0b1111:
print('unpredictable')
else:
shift_t = decode_reg_shift(type_o)
return RsbRegisterShiftedRegisterA1(instr, setflags=s, m=rm, s=rs, d=rd, n=rn, shift_t=shift_t)
| [
"matan1008@gmail.com"
] | matan1008@gmail.com |
78df3320c27ab2b3e2c072df6c4e2ef16a3b7759 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1469.py | 8fbe59658076b2a46a7c77ed1bf039f34b16f0ae | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py | import numpy as np
def checkWin(p):
#2 is X, 3 is O, check using the MOD method
if p == 0:
return "no"
if p % 2 == 0 and p % 3 != 0:
return 'X'
if p % 2 != 0 and p % 3 == 0:
return 'O'
else:
return 'draw'
def solve(filename):
fin = open(filename + '.in', 'r')
fout = open(filename + '.out', 'w')
T = int(fin.readline())
for case in xrange(T):
answer = ""
board = np.zeros((4, 4), np.int)
for i in xrange(4):
line = fin.readline().strip()
for j in xrange(4):
if line[j] == 'X':
board[i, j] = 2
elif line[j] == 'O':
board[i, j] = 3
elif line[j] == 'T':
board[i, j] = 1
#check rows and columns
prods = []
for i in xrange(4):
row_prod = np.prod(board[i, :])
col_prod = np.prod(board[:, i])
prods.append(checkWin(row_prod))
prods.append(checkWin(col_prod))
#print checkWin(row_prod), checkWin(col_prod)
#diagonals
prod_diag1 = 1
prod_diag2 = 1
for i in xrange(4):
prod_diag1 *= board[i, i]
prod_diag2 *= board[i, 3 - i]
prods.append(checkWin(prod_diag1))
prods.append(checkWin(prod_diag2))
#check answers
if 'no' in prods:
if 'X' not in prods and 'O' not in prods:
answer = 'Game has not completed'
elif 'X' in prods and 'O' not in prods:
answer = 'X won'
elif 'X' not in prods and 'O' in prods:
answer = 'O won'
else:
if 'X' not in prods and 'O' not in prods:
answer = 'Draw'
elif 'X' in prods and 'O' not in prods:
answer = 'X won'
elif 'X' not in prods and 'O' in prods:
answer = 'O won'
print answer
fout.write(('Case #%d: ' % (case + 1)) + str(answer) + '\n')
fin.readline()
fin.close()
fout.close()
if __name__ == "__main__":
# solve("A-tiny")
# solve("A-small-attempt0")
solve("A-large")
#solve("input")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
24ba9691f4135c3792fffc6b6620cd106159e6aa | ecf7adcd4d6bce51ee7c36048ed895b97b135191 | /HcalNoiseAnalyzer/config/res/HcalNoiseTree_Cosmics_v3_231228.py | b80ab8a132aa6b7077ac57c051c979573c233c2c | [] | no_license | paktinat/HCAL | 54739e72a54b870da2cbd77952c17280a86062d6 | d6a760ff66fb880146fd0f0a18f5b5791456e121 | refs/heads/master | 2016-08-03T18:39:54.885419 | 2015-12-19T08:46:50 | 2015-12-19T08:46:50 | 35,083,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,753 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: RECO --data -s RAW2DIGI,RECO --scenario cosmics --filein file:5C1B1DE5-9B38-E211-A048-001D09F24FBA.root --fileout DummyOutput.root --conditions GR_R_72_V1::All --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('RECO')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContentCosmics_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.RawToDigi_Data_cff')
process.load('Configuration.StandardSequences.ReconstructionCosmics_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# in Gobinda's script
#process.load("Configuration.Geometry.GeometryIdeal_cff")
#process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
#process.load("Geometry.CommonDetUnit.globalTrackingGeometry_cfi")
#process.load("RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi")
#process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAny_cfi")
#process.load("TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
#input = cms.untracked.int32(10000)
)
#process.metFilter = cms.EDFilter("METFilter",
# CaloMETsrc = cms.InputTag("caloMet"),
# MinMET = cms.double(7.0)
#)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(
default = cms.untracked.PSet( ## kill all messages in the log
limit = cms.untracked.int32(0)
),
FwkJob = cms.untracked.PSet( ## but FwkJob category - those unlimitted
limit = cms.untracked.int32(-1)
),
FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100), ## print event record number
limit = cms.untracked.int32(-1)
),
FwkSummary = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
# reportEvery = cms.untracked.int32(100),
# limit = cms.untracked.int32(10000000)
)
),
categories = cms.untracked.vstring('FwkJob','FwkReport','FwkSummary'),
destinations = cms.untracked.vstring('cout')
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
#fileNames = cms.untracked.vstring('/store/data//Commissioning2014/Cosmics/RAW/v3/000/225/125/00000/888ADCC7-352D-E411-B401-02163E00A091.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/MinimumBias/RAW/v3/000/224/512/00000/3CC0EA67-1727-E411-B11A-02163E008EFD.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v1/000/221/107/00000/521DB9A3-1EC1-E311-A9E4-02163E00BA2A.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/227/391/00000/90DAFD68-0750-E411-8E0A-02163E008BE3.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/MinimumBias/RAW/v3/000/227/391/00000/A2CD1AE3-0650-E411-A39E-02163E008CFE.root')
# fileNames = cms.untracked.vstring('/store/data/Commissioning2014/HcalHPDNoise/RAW/v3/000/227/489/00000/0CF75C7A-5855-E411-92A1-02163E00A129.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/22D7FAD0-6490-E411-891E-02163E0104D6.root')
fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/265CEF40-5E90-E411-A5F4-02163E011C1F.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/26672730-5E90-E411-8C85-02163E011BE3.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/62B946C7-6490-E411-B25E-02163E01193D.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/6451164E-4790-E411-BB28-02163E011945.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/682BE4C5-6490-E411-8E8B-02163E011C45.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/6A76831A-6590-E411-B89D-02163E011BDE.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/8A0391CE-6490-E411-B4EF-02163E00FC3C.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/8E75352E-5E90-E411-9AD8-02163E00FB18.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/AA8D8C70-5E90-E411-9CAB-02163E00FC3C.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/B81762C5-6490-E411-A1CA-02163E0119E8.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/C21A4ECC-6490-E411-97E6-02163E011BE3.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/CAFE674F-5E90-E411-8A5F-02163E00FB9F.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/E06FFF2B-5E90-E411-8F72-02163E0104D6.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/E2F36DC2-6490-E411-84C7-02163E01192A.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning2014/Cosmics/RAW/v3/000/231/228/00000/FC8BBCC1-6490-E411-BBB7-02163E00FDB9.root')
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('RECO nevts:1'),
name = cms.untracked.string('Applications')
)
# Output definition
#process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
# splitLevel = cms.untracked.int32(0),
# eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
# outputCommands = process.RECOSIMEventContent.outputCommands,
# fileName = cms.untracked.string('DummyOutput.root'),
# dataset = cms.untracked.PSet(
# filterName = cms.untracked.string(''),
# dataTier = cms.untracked.string('')
# )
#)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
#needed for 720 version
process.GlobalTag = GlobalTag(process.GlobalTag, 'GR_R_72_V1::All', '')
#needed for 703 version
#process.GlobalTag = GlobalTag(process.GlobalTag, 'GR_R_70_V2::All', '')
# Hcal noise analyzers
process.HBHENoiseFilterResultProducer = cms.EDProducer(
'HBHENoiseFilterResultProducer',
noiselabel = cms.InputTag('hcalnoise'),
minRatio = cms.double(-999),
maxRatio = cms.double(999),
minHPDHits = cms.int32(17),
minRBXHits = cms.int32(999),
minHPDNoOtherHits = cms.int32(10),
minZeros = cms.int32(10),
minHighEHitTime = cms.double(-9999.0),
maxHighEHitTime = cms.double(9999.0),
maxRBXEMF = cms.double(-999.0),
minNumIsolatedNoiseChannels = cms.int32(10),
minIsolatedNoiseSumE = cms.double(50.0),
minIsolatedNoiseSumEt = cms.double(25.0),
useTS4TS5 = cms.bool(False),
useRBXRechitR45Loose = cms.bool(False),
useRBXRechitR45Tight = cms.bool(False),
IgnoreTS4TS5ifJetInLowBVRegion = cms.bool(True),
jetlabel = cms.InputTag('ak5PFJets'),
maxjetindex = cms.int32(0),
maxNHF = cms.double(0.9)
)
process.TFileService = cms.Service("TFileService",
#fileName = cms.string("NoiseTree_Commissionig2014_HcalHPDNoise_v3_227489.root")
fileName = cms.string("/tmp/fahim/NoiseTree_Commissionig2014_Cosmics_v3_231228_02.root")
)
process.ExportTree = cms.EDAnalyzer("HcalNoiseAnalyzer",
HBHERecHitCollection = cms.untracked.string('hbhereco'),
IsCosmic = cms.untracked.bool(True)
)
process.hcalNoiseAna = cms.EDAnalyzer('HcalNoiseHistogrammer',
HBHERecHitCollection = cms.InputTag("hbhereco"),
HBHEDigiCollection = cms.InputTag("hcalDigis")
)
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.reconstruction_step = cms.Path(process.reconstructionCosmics * process.HBHENoiseFilterResultProducer * process.ExportTree)
#process.reconstruction_step = cms.Path(process.reconstructionCosmics * process.HBHENoiseFilterResultProducer * process.hcalNoiseAna)
process.endjob_step = cms.EndPath(process.endOfProcess)
#process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.reconstruction_step,process.endjob_step)
#===================== Message Logger =============================
##process.load("FWCore.MessageLogger.MessageLogger_cfi")
##process.MessageLogger.categories.append('PATSummaryTables')
##process.MessageLogger.cerr.PATSummaryTables = cms.untracked.PSet(
## limit = cms.untracked.int32(10),
## reportEvery = cms.untracked.int32(1)
## )
##process.options = cms.untracked.PSet(
## wantSummary = cms.untracked.bool(True)
## )
##process.MessageLogger.cerr.FwkReport.reportEvery = 1000
| [
"saeid.paktinat@cern.ch"
] | saeid.paktinat@cern.ch |
8de96ffa0c89cc9d5487a55588a2927367ce0f10 | 808035dee84363d9a53f740ae2918b39f07c448e | /topics/migrations/0008_topic_created_by.py | 3417de43ed5fca3d7286843008d43429c018d14d | [] | no_license | wandeei/QA | 5f706fa8717cbab4a3bdcd210c0e8fd61e8049e2 | 8ddb60f28cdade4b6825aeadf860079888778d14 | refs/heads/master | 2021-01-22T21:33:14.852560 | 2017-03-19T01:38:28 | 2017-03-19T01:38:28 | 85,443,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-01 20:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('topics', '0007_auto_20170228_2108'),
]
operations = [
migrations.AddField(
model_name='topic',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"fokeleji@gmail.com"
] | fokeleji@gmail.com |
aedb63d1e0dbac9808fffbec764639c2e106e2e3 | 7f0ad59f5d2e21438674362c5a1bb04f72f7b14c | /booktest/migrations/0002_auto_20190925_1832.py | d6528288a2067fa1eefdf0c77e5c0098d046c995 | [] | no_license | SHOST628/djangop1 | 1f3221b972dfccb8ea468b1c85b16426397b4cd6 | 95c3d891102d2e7431397fba479251060c64977a | refs/heads/master | 2023-04-30T02:17:37.724184 | 2019-09-28T05:14:28 | 2019-09-28T05:14:28 | 210,747,901 | 0 | 0 | null | 2023-04-21T20:37:30 | 2019-09-25T03:29:57 | Python | UTF-8 | Python | false | false | 356 | py | # Generated by Django 2.2.5 on 2019-09-25 10:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('booktest', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='bookinfo',
old_name='bititle',
new_name='btitle',
),
]
| [
"599531369@qq.com"
] | 599531369@qq.com |
7adb5d9b745908f403b296e7e0549b4dd2b044dc | c8bb38197d9fd24d6c633d605a9374e28eae40c9 | /Testing/X-ray SuiteTry/views/XRS_MainView.py | 3552fa5dbb1b7dda92b97dd1a07359dd26e6437b | [] | no_license | kif/Py2DeX | d42b3ceeebc9d104df4cd2996a0170f95cee29d7 | f149e07b5b5facfb7e622866d151f384184c1bc8 | refs/heads/master | 2020-04-16T01:23:10.794126 | 2014-07-09T15:21:04 | 2014-07-09T15:21:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,012 | py | import sys
from PyQt4 import QtGui
from UiFiles.XRS_Main import Ui_XRS_Main
import matplotlib as mpl
mpl.rcParams['font.size'] = 10
mpl.rcParams['lines.linewidth'] = 0.5
mpl.rcParams['lines.color'] = 'g'
mpl.rcParams['text.color'] = 'white'
mpl.rc('axes', facecolor='#1E1E1E', edgecolor='white', lw=1, labelcolor='white')
mpl.rc('xtick', color='white')
mpl.rc('ytick', color='white')
mpl.rc('figure', facecolor='#1E1E1E', edgecolor='black')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class XRS_MainView(QtGui.QWidget, Ui_XRS_Main):
def __init__(self, parent=None):
super(XRS_MainView, self).__init__(parent)
self.setupUi(self)
self.create_axes()
def create_axes(self):
self.image_axes = ImageAxes(self.image_frame)
self.graph_axes = GraphAxes(self.graph_frame)
def plot_image(self, img_data):
self.image_axes.show_image(img_data)
class MplAxes(object):
def __init__(self, parent):
self._parent = parent
self._parent.resizeEvent = self.resize_graph
self.create_axes()
self.redraw_figure()
def create_axes(self):
self.figure = Figure(None, dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setParent(self._parent)
axes_layout = QtGui.QVBoxLayout(self._parent)
axes_layout.setContentsMargins(0, 0, 0, 0)
axes_layout.setSpacing(0)
axes_layout.setMargin(0)
axes_layout.addWidget(self.canvas)
self.canvas.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.axes = self.figure.add_subplot(111)
def resize_graph(self, event):
new_size = event.size()
self.figure.set_size_inches([new_size.width() / 100.0, new_size.height() / 100.0])
self.redraw_figure()
def redraw_figure(self):
self.figure.tight_layout(None, 0.8, None, None)
self.canvas.draw()
class ImageAxes(MplAxes):
def __init__(self, parent):
super(ImageAxes, self).__init__(parent)
self.axes.yaxis.set_visible(False)
self.axes.xaxis.set_visible(False)
def show_image(self, img_data):
self.axes.cla()
self.img_data = img_data
self.image = self.axes.imshow(self.img_data, aspect='auto', cmap='hot')
self.axes.set_ylim([0, len(self.img_data) - 1])
self.axes.set_xlim([0, len(self.img_data[0]) - 1])
self.axes.invert_yaxis()
self.redraw_figure()
class GraphAxes(MplAxes):
def __init__(self, parent):
MplAxes.__init__(self, parent)
def plot_graph(self, spectrum):
self.axes.cla()
self.spectrum = spectrum
self.graph = self.axes.plot(spectrum.x, spectrum.y)
self.redraw_figure()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
view = XRS_MainView()
view.show()
app.exec_() | [
"clemens.prescher@gmail.com"
] | clemens.prescher@gmail.com |
f8b31d25968d3fe677a466959b35b7d08a5de8db | e36aebe2b744e3d1105eed748f4162a59441dd6d | /0x01-python-if_else_loops_functions/6-print_comb3.py | c46681b48856916bb71cb9ba49c68b551e3b1cb4 | [] | no_license | mrfosse/holbertonschool-higher_level_programming | 98f52ddaccf3e666180b0339f79bef98b2d3702f | 4c3f1d123c4d5072b3b304e5bb714dc6b4c43324 | refs/heads/master | 2021-01-20T07:18:13.104848 | 2017-05-31T06:16:56 | 2017-05-31T06:16:56 | 78,599,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/python3
a = 0
b = 1
while a <= 9:
c = b
while c <= 9:
if a == 8 and c == 9:
print("{}{}".format(a, c))
else:
print("{}{}, ".format(a, c), end="")
c = c + 1
a = a + 1
b = b + 1
| [
"mrfosse99@gmail.com"
] | mrfosse99@gmail.com |
6972b90c9bf18e83f50c21fa104285ba31edeb2b | 12d5241367b1be119fb293705be99d931f04971c | /collective/carousel/tests/test_viewlet.py | c4ed82766d21c118538af69de77ab8b743bbfab6 | [] | no_license | djay/collective.carousel | e44bd753366f9073a7663333d68cbd3f126a6982 | 8f0c398e14be642b5db29d5019ad7190d90b26c0 | refs/heads/master | 2020-06-09T03:44:54.854385 | 2010-10-08T14:45:16 | 2010-10-08T14:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,296 | py | # Carousel is rendered through a viewlet in IAboveContent
# using items provided by the carousel provider added to the context
from zope.interface import alsoProvides, noLongerProvides
from collective.carousel.browser.viewlets import CarouselViewlet
from collective.carousel.testing import ICustomType
from collective.carousel.tests.base import TestCase
class ViewletTestCase(TestCase):
def afterSetUp(self):
self.setRoles('Manager')
self.folder.invokeFactory('Topic', 'collection')
collection = getattr(self.folder, 'collection')
crit = self.folder.collection.addCriterion('portal_type', 'ATSimpleStringCriterion')
crit.setValue(['Document', 'News Item', 'Event'])
field = self.folder.Schema().getField('carouselprovider')
field.set(self.folder, collection)
# add a few objects
self.folder.invokeFactory('Document', 'carousel-doc')
self.folder.invokeFactory('News Item', 'carousel-news-item')
self.folder.invokeFactory('Event', 'carousel-event')
def test_viewlet_is_available(self):
request = self.app.REQUEST
context = self.folder
viewlet = CarouselViewlet(context, request, None, None)
self.failUnless(viewlet)
def test_multiple_providers(self):
collections = []
for i in range(3):
self.folder.invokeFactory('Topic', 'collection_%s'%i)
collection = getattr(self.folder, 'collection_%s'%i)
crit = collection.addCriterion('portal_type', 'ATSimpleStringCriterion')
crit.setValue('Document')
collections.append(collection)
field = self.folder.Schema().getField('carouselprovider')
field.set(self.folder, tuple(collections))
viewlet = CarouselViewlet(self.folder, self.app.REQUEST, None, None)
self.failUnless(len(viewlet.getProviders()) >= 3)
def test_viewlet_rendering(self):
# add a few documents
for i in range(10):
self.folder.invokeFactory('Document', 'document_%s'%i)
getattr(self.folder, 'document_%s'%i).reindexObject()
collection_num_items = len(self.folder.collection.queryCatalog())
# We better have some documents in the collection's results
self.failUnless(collection_num_items >= 10)
field = self.folder.Schema().getField('carouselprovider')
# technically the following checkup is done in test_field, but we better check again
self.assertEqual(field.get(self.folder), [self.folder.collection])
viewlet = CarouselViewlet(self.folder, self.app.REQUEST, None, None)
# first check getProviders()
self.assertEqual(viewlet.getProviders(), [self.folder.collection])
# check results(). We get not more than 7 items even though the collection returns >=7
# results, don't we?
self.failUnless(len(viewlet.results(viewlet.getProviders()[0])) == 7)
results = [result.id for result in viewlet.results(viewlet.getProviders()[0])]
doc_ids = [id for id in self.folder.contentIds()[:7] if 'document' in id]
for doc_id in doc_ids:
self.failUnless(doc_id in results)
# Test that we get correct tiles in the carousel
for result in viewlet.results(viewlet.getProviders()[0]):
item_type = result.portal_type
if item_type == 'Document':
self.failUnless('<p>This is a PAGE tile</p>' in viewlet.get_tile(result.getObject()))
if item_type == 'Event':
self.failUnless('<p>This is a DEFAULT tile</p>' in viewlet.get_tile(result.getObject()))
if item_type == 'News Item':
self.failUnless('<p>This is a NEWS ITEM tile</p>' in viewlet.get_tile(result.getObject()))
# Now we apply new custom tile registration for event object
event = getattr(self.folder, 'carousel-event')
alsoProvides(event, ICustomType)
self.failIf('<p>This is a DEFAULT tile</p>' in viewlet.get_tile(event))
# We revert event to standard state (without custom tile registration)
noLongerProvides(event, ICustomType)
# We should get our DEFAULT tile again
self.failIf('<p>This is a CUSTOM DEFAULT tile</p>' in viewlet.get_tile(event))
def test_edit_carousel_link(self):
viewlet = CarouselViewlet(self.folder, self.app.REQUEST, None, None)
carousel_criteria = self.folder.collection.absolute_url() + '/criterion_edit_form'
self.assertEqual(viewlet.editCarouselLink(viewlet.getProviders()[0]), carousel_criteria)
# Check whether anonymous users get "edit" link. First check that Manager gets it:
self.failUnless(viewlet.canSeeEditLink(viewlet.getProviders()[0]))
# Then we switch user to Anonymous:
self.setRoles([])
self.logout()
self.failIf(viewlet.canSeeEditLink(viewlet.getProviders()[0]))
def test_suite():
from unittest import defaultTestLoader
return defaultTestLoader.loadTestsFromName(__name__) | [
"spliter@db7f04ef-aaf3-0310-a811-c281ed44c4ad"
] | spliter@db7f04ef-aaf3-0310-a811-c281ed44c4ad |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.