blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a7dcff347cf0be58c7f7e31b6ad5683d3ad83bb
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/CJ_16_1/16_1_2_ramesh_srivatsan_file.py
|
48a62c972a704f3e67df69ac6179a26b29de87ff
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 454
|
py
|
t = int(raw_input())
for _ in range(t):
n = int(raw_input())
l = {}
for i in range(2*n-1):
pp = str(raw_input()).split()
for a in pp:
if l.has_key(a):
l[a]+=1
else:
l[a]=1
li = []
for key, value in l.iteritems():
if value%2!=0:
li.append(int(key))
li.sort()
li = [str(oo) for oo in li]
print('Case #'+str(_+1)+': '+(' '.join(li)))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
fe67d0066e94d1d7a237475ee1a475f287a22508
|
4b87a0de0f43de2bde41f2590faac970c18fe482
|
/core/migrations/0123_auto_20210210_1037.py
|
460ef79d8183a699baef11dcc6f3debf78449c3e
|
[] |
no_license
|
krishSona/testbackend
|
d0bc325776537d9814b9022b3538b5e8a840e6a4
|
d87e050d02542c58876d4f81c2ea99815ab4160e
|
refs/heads/master
| 2023-04-08T01:26:42.070058
| 2021-04-03T06:08:54
| 2021-04-03T06:08:54
| 354,214,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# Generated by Django 3.0.5 on 2021-02-10 10:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0122_domain'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='name',
field=models.CharField(max_length=30),
),
]
|
[
"kali@dailysalary.in"
] |
kali@dailysalary.in
|
995cc4b4e5c8fe123f68dc302262b15b94a96e8d
|
135aca3b7bab7c32d590986b0ac8cfe905c75657
|
/pytx/wsgi.py
|
6bd72939eb054853dc32c6bf1f974fc2ab870c46
|
[
"MIT"
] |
permissive
|
pytexas/PyTexas
|
9e083d4ad3ba72126babcf0df656f793a4439472
|
6ab364dee4f23de0b0fa78036a2428be7497fdf2
|
refs/heads/master
| 2022-01-12T03:35:15.409636
| 2019-05-05T14:31:44
| 2019-05-05T14:31:44
| 101,810,610
| 0
| 1
|
MIT
| 2019-04-05T02:57:54
| 2017-08-29T22:00:51
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
"""
WSGI config for pytx project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pytx.settings")
application = get_wsgi_application()
|
[
"paul.m.bailey@gmail.com"
] |
paul.m.bailey@gmail.com
|
702130c3e1067acdf330242f29c3a53c8bb944fb
|
e58df4aeee11f8a97bdeede6a75a776d130f86d2
|
/molpal/objectives/pyscreener/docking/__init__.py
|
0efcc759bcd9da402e1f0ed64dd4aa57d63ad4b9
|
[
"MIT"
] |
permissive
|
ashuein/molpal
|
6ecd79767d8ef254e2c852e20f77cd9338844f35
|
1e17a0c406516ceaeaf273a6983d06206bcfe76f
|
refs/heads/main
| 2023-01-29T03:23:10.525555
| 2020-12-15T14:17:56
| 2020-12-15T14:17:56
| 321,720,018
| 1
| 0
|
MIT
| 2020-12-15T16:09:48
| 2020-12-15T16:09:48
| null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from typing import Dict
from molpal.objectives.pyscreener.docking.base import Screener
def screener(software, **kwargs):
if software in ('vina', 'qvina', 'smina', 'psovina'):
from molpal.objectives.pyscreener.docking.vina import Vina
return Vina(software=software, **kwargs)
if software in ('dock', 'ucsfdock', 'DOCK'):
from molpal.objectives.pyscreener.docking.dock import DOCK
return DOCK(**kwargs)
raise ValueError(f'Unrecognized docking software: "{software}"')
|
[
"deg711@g.harvard.edu"
] |
deg711@g.harvard.edu
|
1f4bd1ecea902d5847f4ab2e2dfa0f1d7161688b
|
1f8e15456b2b591ebdbdd123868d72995f801f5f
|
/single/NormalBLP3.py
|
9741f0d90bad8502a727abe60781cb4fc2b550e7
|
[] |
no_license
|
BaiLiping/ControllerBasedCoaching
|
bcc4104d42c310ccb234bd84ae9ef66a3ba74e78
|
c718ef9896dc36819a2970465f47187ff0c7a261
|
refs/heads/master
| 2023-02-09T01:29:21.814357
| 2021-01-06T01:42:58
| 2021-01-06T01:42:58
| 308,488,963
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
from tensorforce import Agent, Environment
import matplotlib.pyplot as plt
import numpy as np
import math
import pickle
from tqdm import tqdm
#setparameters
num_steps=10 #update exploration rate over n steps
initial_value=0.95 #initial exploartion rate
decay_rate=0.5 #exploration rate decay rate
set_type='exponential' #set the type of decay linear, exponential
exploration=dict(type=set_type, unit='timesteps',
num_steps=num_steps,initial_value=initial_value,
decay_rate=decay_rate)
episode_number=1000
evaluation_episode_number=5
kp=1.77327564e+00
kd=-2.60674054e-02
prohibition_parameter=[0]
prohibition_position=[0.01,0.15]
# Pre-defined or custom environment
environment = Environment.create(environment='gym', level='InvertedPendulumBLP-v1')
length=np.zeros(episode_number)
reward_record_without=[]
agent_without = Agent.create(agent='agent.json', environment=environment,exploration=exploration)
states=environment.reset()
terminal = False
print('training agent without boundary')
angle_record=[]
for _ in tqdm(range(episode_number)):
episode_reward=0
states = environment.reset()
terminal= False
while not terminal:
theta=states[1]
actions = agent_without.act(states=states)
if abs(theta)<=0.015:
actions=kp*states[1]+kd*states[3]
states, terminal, reward = environment.execute(actions=actions)
episode_reward+=reward
agent_without.observe(terminal=terminal, reward=reward)
else:
states, terminal, reward = environment.execute(actions=actions)
reward-=abs(actions)
agent_without.observe(terminal=terminal, reward=reward)
episode_reward+=reward
reward_record_without.append(episode_reward)
agent_without.save(directory='model3', format='numpy')
x=range(episode_number)
x_angle=range(len(angle_record))
plt.figure(figsize=(10,10))
plt.plot(x,reward_record_without,label='without prohibitive boundary',color='black')
plt.xlabel('episodes')
plt.ylabel('reward')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='center left',ncol=2,shadow=True, borderaxespad=0)
plt.savefig('plot3.png')
plt.figure(figsize=(30,10))
plt.plot(x_angle,angle_record)
plt.savefig('angle3.png')
#evaluate the agent without Boundary
episode_reward = 0.0
evaluation_reward_record_without=[]
print('evaluating agent without boundary')
for _ in tqdm(range(evaluation_episode_number)):
episode_reward=0
states = environment.reset()
internals = agent_without.initial_internals()
terminal = False
while not terminal:
actions, internals = agent_without.act(states=states, internals=internals, independent=True, deterministic=True)
states, terminal, reward = environment.execute(actions=actions)
episode_reward += reward
evaluation_reward_record_without.append(episode_reward)
pickle.dump(evaluation_reward_record_without, open( "evaluation_without_record.p", "wb"))
environment.close()
|
[
"blp_engineer@outlook.com"
] |
blp_engineer@outlook.com
|
d3175d32a255ed2f4a57786732b6ab243f101446
|
0f16edb46a48f9b5a125abb56fc0545ede1d65aa
|
/client_cli/src/__init__.py
|
36ea25a06ca41f5a26e0c71ada5e8a185c1910f8
|
[
"Apache-2.0"
] |
permissive
|
DataONEorg/d1_python
|
5e685f1af0c356190f2d6df45d1ac849e2f56972
|
d72a9461894d9be7d71178fb7310101b8ef9066a
|
refs/heads/master
| 2023-08-29T03:16:38.131760
| 2023-06-27T21:59:37
| 2023-06-27T21:59:37
| 60,103,877
| 15
| 12
|
Apache-2.0
| 2023-09-06T18:27:53
| 2016-05-31T16:01:00
|
Python
|
UTF-8
|
Python
| false
| false
| 842
|
py
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from d1_cli.version import __version__ # noqa: F401
|
[
"git@dahlsys.com"
] |
git@dahlsys.com
|
6c12c0b8fdc44ffc4c2c3cee8a47b873de1de762
|
724e29f9984ca7f607356ce1783bc29d8cb8dae8
|
/lib/net/POINT2_model.py
|
49933907cdf0435de1de30d029af6ab380ac410d
|
[
"MIT"
] |
permissive
|
LiuLiluZJU/POINT2-pytorch
|
3d9e4d5552c757986106b3dd3ca0b828f09b73d6
|
c9f5fad59e2f7da2c169255de5a730d861a1a96e
|
refs/heads/master
| 2022-11-25T02:20:47.018315
| 2020-08-08T05:20:48
| 2020-08-08T05:20:48
| 266,246,266
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,709
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchviz import make_dot
from kornia import SpatialSoftArgmax2d
from .unet_model import UNet
from .FE_layer import FE_layer
from .POINT_model import PNet
from .triangulation_layer import triangulation_layer
import matplotlib.pyplot as plt
import math
from graphviz import Digraph
from torch.autograd import Variable, Function
class P2Net(nn.Module):
def __init__(self, device, n_channels=1, n_classes=64, bilinear=True, patch_neighbor_size=5):
super(P2Net, self).__init__()
self.device = device
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.patch_neighbor_size = patch_neighbor_size
self.PNet_ap = PNet(self.device, self.n_channels, self.n_classes, self.bilinear, self.patch_neighbor_size)
self.PNet_lat = PNet(self.device, self.n_channels, self.n_classes, self.bilinear, self.patch_neighbor_size)
self.triangulation_layer = triangulation_layer(self.device)
self.optimizer = torch.optim.Adam([
{'params': self.PNet_ap.parameters()},
{'params': self.PNet_lat.parameters()}
], lr=0.0002, weight_decay=1e-8)
def set_input(self, input_drr_ap, input_xray_ap, correspondence_2D_ap,
input_drr_lat, input_xray_lat, correspondence_2D_lat, fiducial_3D):
self.input_drr_ap = input_drr_ap
self.input_xray_ap = input_xray_ap
self.correspondence_2D_ap = correspondence_2D_ap
self.input_drr_lat = input_drr_lat
self.input_xray_lat = input_xray_lat
self.correspondence_2D_lat = correspondence_2D_lat
self.fiducial_3D = fiducial_3D
self.batch_size = self.correspondence_2D_ap.shape[0]
self.point_num = self.correspondence_2D_ap.shape[2]
self.PNet_ap.set_input(self.input_drr_ap, self.input_xray_ap, self.correspondence_2D_ap)
self.PNet_lat.set_input(self.input_drr_lat, self.input_xray_lat, self.correspondence_2D_lat)
def forward(self):
self.score_map_ap, self.score_map_gt_ap = self.PNet_ap()
self.score_map_lat, self.score_map_gt_lat = self.PNet_lat()
self.fiducial_3D_pred = self.triangulation_layer(self.score_map_ap, self.score_map_lat)
# center_volume = torch.tensor([127.5, 127.5, 127.5]).to(device=self.device, dtype=torch.float32)
# fiducial_3D_pred_decentral = self.fiducial_3D_pred - center_volume
# fiducial_3D_decentral = self.fiducial_3D - center_volume
# for batch_index in range(self.batch_size):
# # R1 = torch.randn(3, 3).to(device=self.device)
# # t1 = torch.randn(3, 1).to(device=self.device)
# # U1, S1, Vt1 = torch.svd(R1)
# # R1 = torch.matmul(U1, Vt1)
# # if torch.det(R1) < 0:
# # print("Reflection detected")
# # Vt1[2, :] *= -1
# # R1 = torch.matmul(Vt1.t(), U1.t())
# # fiducial_3D_decentral = torch.randn(20, 3).to(device=self.device)
# # fiducial_3D_pred_decentral = (torch.matmul(R1, fiducial_3D_decentral.t()) + t1.repeat(1, 20)).t()
# # fiducial_3D_decentral2 = fiducial_3D_decentral - torch.mean(fiducial_3D_decentral, dim=0).repeat(20, 1)
# # fiducial_3D_pred_decentral2 = fiducial_3D_pred_decentral - torch.mean(fiducial_3D_pred_decentral, dim=0).repeat(20, 1)
# # print(R1, t1)
#
# fiducial_3D_pred_decentral2 = fiducial_3D_pred_decentral[batch_index] - torch.mean(fiducial_3D_pred_decentral[batch_index], dim=[0])
# fiducial_3D_decentral2 = fiducial_3D_decentral[batch_index] - torch.mean(fiducial_3D_decentral[batch_index], dim=[0])
# H = torch.matmul(fiducial_3D_decentral2.t(), fiducial_3D_pred_decentral2)
# U, S, V = torch.svd(H) # V is different from numpy's. V in torch, V.t() in numpy
# R = torch.matmul(V, U.t())
# if torch.det(R) < 0:
# print("Reflection detected")
# V[2, :] *= -1
# R = torch.matmul(V, U.t())
# # print(fiducial_3D_pred_decentral[batch_index])
# # print(fiducial_3D_decentral[batch_index])
# # print(fiducial_3D_pred_decentral[batch_index] - fiducial_3D_decentral[batch_index])
# print(R)
# t = torch.mean(fiducial_3D_pred_decentral[batch_index], dim=[0]) - torch.matmul(R, torch.mean(fiducial_3D_decentral[batch_index], dim=[0]))
# print(t)
# k = 1
def backward_basic(self):
self.loss_bce_ap = F.binary_cross_entropy_with_logits(self.score_map_ap, self.score_map_gt_ap, reduction='mean')
self.loss_bce_lat = F.binary_cross_entropy_with_logits(self.score_map_lat, self.score_map_gt_lat, reduction='mean')
print(self.fiducial_3D_pred.shape)
self.loss_eular = torch.mean(torch.norm(self.fiducial_3D_pred - self.fiducial_3D, dim=2), dim=[1, 0])
print("self.loss_eular", self.loss_eular)
self.loss_total = self.loss_bce_ap + self.loss_bce_lat + 0.001 * self.loss_eular
# g = make_dot(self.loss_total)
# g.view()
print("loss:", self.loss_total)
self.loss_total.backward()
# print(self.UNet.up1.conv.double_conv[0].weight.grad)
def optimize_parameters(self):
# forward
self()
self.optimizer.zero_grad()
self.backward_basic()
nn.utils.clip_grad_value_(self.PNet_ap.parameters(), 0.1)
nn.utils.clip_grad_value_(self.PNet_lat.parameters(), 0.1)
self.optimizer.step()
|
[
"email@example.com"
] |
email@example.com
|
de8ef6ccebcd187a581faf86b86c35701d6200f7
|
11e484590b27585facf758f0432eeebe66bf790a
|
/fal_invoice_milestone_purchase/wizard/purchase_make_invoice_advance.py
|
0691ee65cc3d7a53ce22b97a015a0d621c6c880b
|
[] |
no_license
|
jeanabreu/falinwa_branch
|
51b38ee5a3373d42417b84a0431bad9f7295f373
|
be96a209479259cd5b47dec73694938848a2db6c
|
refs/heads/master
| 2021-01-18T10:25:49.866747
| 2015-08-25T10:05:05
| 2015-08-25T10:05:05
| 41,369,368
| 0
| 1
| null | 2015-08-25T14:51:50
| 2015-08-25T14:51:50
| null |
UTF-8
|
Python
| false
| false
| 6,431
|
py
|
from openerp.osv import fields, orm
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class purchase_advance_payment_inv(orm.TransientModel):
_name = "purchase.advance.payment.inv"
_description = "Purchase Advance Payment Invoice"
_columns = {
'amount': fields.float('Advance Amount', digits_compute= dp.get_precision('Account'),
help="The amount to be invoiced in advance."),
}
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
if context is None:
context = {}
purchase_obj = self.pool.get('purchase.order')
ir_property_obj = self.pool.get('ir.property')
fiscal_obj = self.pool.get('account.fiscal.position')
inv_line_obj = self.pool.get('account.invoice.line')
account_jrnl_obj = self.pool.get('account.journal')
wizard = self.browse(cr, uid, ids[0], context)
purchase_ids = context.get('active_ids', [])
result = []
for purchase in purchase_obj.browse(cr, uid, purchase_ids, context=context):
res = {}
# determine and check expense account
prop = ir_property_obj.get(cr, uid,
'property_account_expense_categ', 'product.category', context=context)
prop_id = prop and prop.id or False
account_id = fiscal_obj.map_account(cr, uid, purchase.fiscal_position or False, prop_id)
if not account_id:
raise orm.except_orm(_('Configuration Error!'),
_('There is no expense account defined as global property.'))
res['account_id'] = account_id
# determine invoice amount
if wizard.amount <= 0.00:
raise orm.except_orm(_('Incorrect Data'),
_('The value of Advance Amount must be positive.'))
inv_amount = purchase.amount_total * wizard.amount / 100
if not res.get('name'):
res['name'] = _("Advance of %s %%") % (wizard.amount)
# determine taxes
if res.get('invoice_line_tax_id'):
res['invoice_line_tax_id'] = [(6, 0, res.get('invoice_line_tax_id'))]
else:
res['invoice_line_tax_id'] = False
#search journal
journal_id = account_jrnl_obj.search(cr, uid, [('type', '=', 'purchase')], context=None)
journal_id = journal_id and journal_id[0] or False
# create the invoice
inv_line_values = {
'name': res.get('name'),
'origin': purchase.name,
'account_id': res['account_id'],
'price_unit': inv_amount,
'quantity': 1.0,
'discount': False,
'uos_id': res.get('uos_id', False),
'product_id': False,
'invoice_line_tax_id': res.get('invoice_line_tax_id'),
'account_analytic_id': purchase.order_line[0].account_analytic_id.id or False,
}
inv_values = {
'name': purchase.partner_ref or purchase.name or '',
'origin': purchase.name,
'type': 'in_invoice',
'reference': purchase.partner_ref or purchase.name or '',
'account_id': purchase.partner_id.property_account_payable.id,
'partner_id': purchase.partner_id.id,
'invoice_line': [(0, 0, inv_line_values)],
'currency_id': purchase.pricelist_id.currency_id.id,
'comment': purchase.notes or '',
'payment_term': purchase.payment_term_id and purchase.payment_term_id.id or False,
'fiscal_position': purchase.fiscal_position.id or purchase.partner_id.property_account_position.id or False,
'journal_id' : journal_id,
'date_invoice': context.get('date_invoice', False),
'company_id': purchase.company_id and purchase.company_id.id or False,
}
result.append((purchase.id, inv_values))
return result
def _create_invoices(self, cr, uid, inv_values, purchase_id, context=None):
inv_obj = self.pool.get('account.invoice')
purchase_obj = self.pool.get('purchase.order')
inv_id = inv_obj.create(cr, uid, inv_values, context=context)
inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)
# add the invoice to the purchase order's invoices
purchase_obj.write(cr, uid, purchase_id, {'invoice_ids': [(4, inv_id)]}, context=context)
return inv_id
def create_invoices(self, cr, uid, ids, context=None):
""" create invoices for the active purchases orders """
purchase_obj = self.pool.get('purchase.order')
act_window = self.pool.get('ir.actions.act_window')
wizard = self.browse(cr, uid, ids[0], context)
purchase_ids = context.get('active_ids', [])
inv_ids = []
for purchase_id, inv_values in self._prepare_advance_invoice_vals(cr, uid, ids, context=context):
inv_ids.append(self._create_invoices(cr, uid, inv_values, purchase_id, context=context))
if context.get('open_invoices', False):
return self.open_invoices( cr, uid, ids, inv_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Advance Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids[0],
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': "{'type': 'in_invoice'}",
'type': 'ir.actions.act_window',
}
#end of purchase_advance_payment_inv()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"hans.yonathan@falinwa.com"
] |
hans.yonathan@falinwa.com
|
2fcec0007bb738adbc0f9bba1db2d6d3e174a56f
|
bacd13a19aa2cc0037961d101207afc5b26405ca
|
/configs/May_05_2018_Penalva_RPS05.py
|
e12fbc5446cda7c2f4d80a947b04c6fdb4898bc6
|
[
"BSD-3-Clause"
] |
permissive
|
saketkc/ribo-seq-snakemake
|
85f74eda7a9972ce02f3091c0d181be64c688384
|
06b56d7fa1119c483a9f807c23f31fb9efa00440
|
refs/heads/master
| 2021-09-15T10:52:54.488854
| 2018-05-31T04:25:36
| 2018-05-31T04:25:36
| 82,966,027
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
## Absolute location where all raw files are
RAWDATA_DIR = '/auto/cmb-06/as/skchoudh/dna/May_05_2018_Penalva_RPS05/PenalvaL_05022018'
## Output directory
OUT_DIR = '/staging/as/skchoudh/rna/May_05_2018_Penalva_RPS05'
## Absolute location to 're-ribo/scripts' directory
SRC_DIR = '/auto/cmb-panasas2/skchoudh/github_projects/re-ribo-mine/scripts'
## Genome fasta location
GENOME_FASTA = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.fa'
## Chromosome sizes location
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
## Path to STAR index (will be generated if does not exist)
STAR_INDEX = '/home/cmb-panasas2/skchoudh/genomes/hg38/star_annotated'
## GTF path
GTF = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.annotation.without_rRNA_tRNA.gtf'
## GenePred bed downloaded from UCSC
## (this is used for inferring the type of experiment i.e stranded/non-stranded
## and hence is not required)
GENE_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v24.genes.bed'
## Path to bed file with start codon coordinates
START_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.start_codon.bed'
## Path to bed file with stop codon coordinates
STOP_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.stop_codon.bed'
## Path to bed file containing CDS coordinates
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.cds.bed'
# We don't have these so just use CDs bed to get the pipeline running
UTR5_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR5.bed'
UTR3_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR3.bed'
## Name of python2 environment
## The following package needs to be installed in that environment
## numpy scipy matploltib seaborn pysam pybedtools htseq
## you can do: conda create -n python2 PYTHON=2 && source activate python2 && conda install numpy scipy matploltib seaborn pysam pybedtools htseq
PYTHON2ENV = 'python2'
############################################Do Not Edit#############################################
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
|
[
"saketkc@gmail.com"
] |
saketkc@gmail.com
|
e81f3c205ce8eb12f67b402c0e69686aa7398c8c
|
9cc8b489018dc25781b0aaa407a9ddc2a5caa72b
|
/examples/animations/leaky_echo_state_network.py
|
8dce1fd3c088fdc0ccd48e68a4fa18fd481179bb
|
[
"Apache-2.0"
] |
permissive
|
SocratesNFR/EvoDynamic
|
2921fba58fec60ff8c3d239be4f84dced2f41d01
|
ddd93c999d0120349ab022e47fd370ca58ec5ce7
|
refs/heads/master
| 2023-03-30T21:31:55.031043
| 2023-03-29T12:15:15
| 2023-03-29T12:15:15
| 183,439,783
| 17
| 5
|
Apache-2.0
| 2020-09-07T13:08:13
| 2019-04-25T13:26:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,996
|
py
|
""" Simple animation of Echo State Network """
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import evodynamic.experiment as experiment
import evodynamic.connection.random as conn_random
import evodynamic.connection as connection
import evodynamic.cells.activation as act
import networkx as nx
width = 100
input_size = width // 10
input_scaling = 0.6
input_sparsity = 0.9
leaky_rate = 0.5
exp = experiment.Experiment()
input_esn = exp.add_input(tf.float64, [input_size], "input_esn")
g_input_real_conn = conn_random.create_gaussian_connection('g_input_real_conn',
input_size, width,
scale=input_scaling,
sparsity=input_sparsity,
is_sparse=True)
g_esn = exp.add_group_cells(name="g_esn", amount=width)
g_esn_real = g_esn.add_real_state(state_name='g_esn_real')
g_esn_real_conn = conn_random.create_gaussian_matrix('g_esn_real_conn',width,
spectral_radius=1.3,
sparsity=0.95, is_sparse=True)
# g_esn_real_bias = conn_random.create_gaussian_connection('g_esn_real_bias',
# 1, width,
# scale=1.0,
# is_sparse=False)
exp.add_connection("input_conn",
connection.WeightedConnection(input_esn,
g_esn_real,act.tanh,
g_input_real_conn))
# exp.add_connection("g_esn_conn",
# connection.WeightedConnection(g_esn_real,
# g_esn_real,act.leaky_sigmoid,
# g_esn_real_conn,
# fargs_list=[(leaky_rate,)]))
exp.add_connection("g_esn_conn",
connection.WeightedConnection(g_esn_real,
g_esn_real,act.leaky_tanh,
g_esn_real_conn,
fargs_list=[(leaky_rate,)]))
exp.initialize_cells()
weight_matrix = exp.session.run(exp.get_connection("g_esn_conn").w)
G = nx.DiGraph()
G.add_edges_from(weight_matrix[0])
pos_dict = {}
for i in range(width):
if i < input_size:
pos_dict[i] = (0,i)
pos = nx.spring_layout(G,pos=pos_dict, fixed=pos_dict.keys())
min_x_val = min([p[0] for p in pos.values()])
pos_new = {k: (pos[k][0]+min_x_val-1, pos[k][1]) if k<input_size else pos[k] for k in pos.keys()}
# Animation
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots()
plt.title('Step: 0')
current_state = exp.get_group_cells_state("g_esn", "g_esn_real")[:,0]
node_color = [round(current_state[node],2) for node in G]
nx.draw(G.reverse(), node_color = node_color, pos=pos_new, cmap=plt.cm.coolwarm,
vmin=-1, vmax=1,
connectionstyle="arc3, rad=0.1")
idx_anim = 0
def updatefig(*args):
global idx_anim
ax.clear()
input_esn_arr = np.random.randint(2, size=(input_size,1)) if idx_anim < 6 else np.zeros((input_size,1))
exp.run_step(feed_dict={input_esn: input_esn_arr})
current_state = exp.get_group_cells_state("g_esn", "g_esn_real")[:,0]
node_color = [round(current_state[node],2) for node in G]
nx.draw(G.reverse(), node_color = node_color, pos=pos_new, cmap=plt.cm.coolwarm,
vmin=-1, vmax=1,
connectionstyle="arc3, rad=0.1")
plt.title('Step: '+str(idx_anim))
idx_anim += 1
ani = animation.FuncAnimation(fig, updatefig, frames=30, interval=2000, blit=False)
plt.show()
plt.connect('close_event', exp.close())
|
[
"sidneypontesf@gmail.com"
] |
sidneypontesf@gmail.com
|
8247d63f1aa8f4e29ddecf52de7dbe954d7f95a3
|
6b3e8b4291c67195ad51e356ba46602a15d5fe38
|
/rastervision2/core/predictor.py
|
9b6af32dc47db091718b15160f358eb35de21eed
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
csaybar/raster-vision
|
4f5bb1125d4fb3ae5c455db603d8fb749221dd74
|
617ca15f64e3b8a391432306a743f7d0dfff352f
|
refs/heads/master
| 2021-02-26T19:02:53.752971
| 2020-02-27T17:25:31
| 2020-02-27T17:25:31
| 245,547,406
| 2
| 1
|
NOASSERTION
| 2020-03-07T01:24:09
| 2020-03-07T01:24:08
| null |
UTF-8
|
Python
| false
| false
| 4,141
|
py
|
from os.path import join
import zipfile
from rastervision2.pipeline import rv_config
from rastervision2.pipeline.config import build_config
from rastervision2.pipeline.filesystem.utils import (download_if_needed,
make_dir, file_to_json)
from rastervision2.core.data.raster_source import ChannelOrderError
from rastervision2.core.analyzer import StatsAnalyzerConfig
class Predictor():
"""Class for making predictions based off of a model bundle."""
def __init__(self,
model_bundle_uri,
tmp_dir,
update_stats=False,
channel_order=None):
"""Creates a new Predictor.
Args:
model_bundle_uri: URI of the model bundle to use. Can be any
type of URI that Raster Vision can read.
tmp_dir: Temporary directory in which to store files that are used
by the Predictor. This directory is not cleaned up by this
class.
channel_order: Option for a new channel order to use for the
imagery being predicted against. If not present, the
channel_order from the original configuration in the predict
package will be used.
"""
self.tmp_dir = tmp_dir
self.update_stats = update_stats
self.model_loaded = False
bundle_path = download_if_needed(model_bundle_uri, tmp_dir)
bundle_dir = join(tmp_dir, 'bundle')
make_dir(bundle_dir)
with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:
bundle_zip.extractall(path=bundle_dir)
config_path = join(bundle_dir, 'pipeline.json')
config_dict = file_to_json(config_path)
rv_config.reset(
config_overrides=config_dict.get('rv_config'),
verbosity=rv_config.verbosity,
tmp_dir=rv_config.tmp_dir)
self.pipeline = build_config(config_dict).build(tmp_dir)
self.scene = None
if not hasattr(self.pipeline, 'predict'):
raise Exception(
'pipeline in model bundle must have predict method')
self.scene = self.pipeline.config.dataset.validation_scenes[0]
if not hasattr(self.scene.raster_source, 'uris'):
raise Exception(
'raster_source in model bundle must have uris as field')
if not hasattr(self.scene.label_store, 'uri'):
raise Exception(
'label_store in model bundle must have uri as field')
for t in self.scene.raster_source.transformers:
t.update_root(bundle_dir)
if self.update_stats:
stats_analyzer = StatsAnalyzerConfig(
output_uri=join(bundle_dir, 'stats.json'))
self.pipeline.config.analyzers = [stats_analyzer]
self.scene.label_source = None
self.scene.aoi_uris = None
self.pipeline.config.dataset.train_scenes = [self.scene]
self.pipeline.config.dataset.validation_scenes = [self.scene]
self.pipeline.config.dataset.test_scenes = None
if channel_order is not None:
self.scene.raster_source.channel_order = channel_order
def predict(self, image_uris, label_uri):
"""Generate predictions for the given image.
Args:
image_uris: URIs of the images to make predictions against.
This can be any type of URI readable by Raster Vision
FileSystems.
label_uri: URI to save labels off into.
"""
try:
self.scene.raster_source.uris = image_uris
self.scene.label_store.uri = label_uri
if self.update_stats:
self.pipeline.analyze()
self.pipeline.predict()
except ChannelOrderError:
raise ValueError(
'The predict package is using a channel_order '
'with channels unavailable in the imagery.\nTo set a new '
'channel_order that only uses channels available in the '
'imagery, use the --channel-order option.')
|
[
"lewfish@gmail.com"
] |
lewfish@gmail.com
|
e3108d7acd8e38aaf51c1907544cd85f233fe97f
|
212724dd876c15ef801fb781e907b1c7dd08f4ae
|
/skyline/analyzer_dev/agent.py
|
119b1a148c34b95fdd80d9a272df3f2da99a8d70
|
[
"MIT"
] |
permissive
|
wfloutier/skyline
|
b9e769cddccdefeeb7c7cc258524bbf489f9d5eb
|
b12758dc11564de93c7ad76c1f8ed3327db78aa4
|
refs/heads/master
| 2020-08-08T03:19:40.283298
| 2019-10-09T11:05:13
| 2019-10-09T11:05:13
| 213,693,601
| 0
| 0
|
NOASSERTION
| 2019-10-08T16:20:15
| 2019-10-08T16:20:15
| null |
UTF-8
|
Python
| false
| false
| 7,067
|
py
|
import logging
import sys
import traceback
from os import getpid, kill
import signal
from os.path import dirname, abspath, isdir
from daemon import runner
from time import sleep, time
from logging.handlers import TimedRotatingFileHandler, MemoryHandler
import warnings
warnings.filterwarnings('error', 'a', RuntimeWarning, 'pandas', 0)
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
import settings
from validate_settings import validate_settings_variables
from analyzer_dev import AnalyzerDev
skyline_app = 'analyzer_dev'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
class AnalyzerDevAgent():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
self.stderr_path = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
self.pidfile_path = '%s/%s.pid' % (settings.PID_PATH, skyline_app)
self.pidfile_timeout = 5
def run(self):
if len(sys.argv) > 1 and sys.argv[1] == 'stop':
do_not_overwrite_log = True
# This should hopefully take care of a TODO from the bin files,
# TODO: write a real kill script
# This is basically from the python-daemon function:
# def _terminate_daemon_process from:
# https://github.com/elephantum/python-daemon/blob/a38aefd37d319586a9e7ab034435928b1c243e49/daemon/runner.py#L133
# logging with multiprocessing and log rotation is difficult. I am
# certain that many a people have been in a helpless and hopeless
# state when trying to debug Skyline, those python truncating log
# handlers, it is not easy. Many, many combinations of things have
# been attempted in this area to attempt to be able to have the
# agents just append the log. The TimedRotatingFileHandler does not
# help matters either as it has no mode='a'. It could be handled by
# normal log rotation but multiprocessing does not make that easy
# either. It is a difficult problem and adding a multiprocessing
# log Queue with the agent listening and writing has been consider
# too. It may work, but adds a lot more complexity, for me anyway.
# The ideal is to have to agent.py creating/appending to log
# and not overwriting the damn thing and TimedRotatingFileHandler,
# everything is possible :) And the new bin bash files do a pretty
# good job anyway and have for 2 years now, maybe havign lost 1 or 2
# in the 2 years that they have been managing the logs :)
# @earthgecko 20160520
pid = int(open(pidfile_path).read())
try:
kill(pid, signal.SIGTERM)
print '%s pid %s stopped' % (skyline_app, str(pid))
sys.exit(0)
except OSError, exc:
print 'Failed to kill pid %s' % str(pid)
sys.exit(1)
else:
logger.info('starting skyline ' + skyline_app)
Analyzer(getpid()).start()
while 1:
sleep(10)
def run():
"""
Check that all the `ALGORITHMS` can be run.
Start the AnalyzerAgent.
Start the logger.
"""
if not isdir(settings.PID_PATH):
print('pid directory does not exist at %s' % settings.PID_PATH)
sys.exit(1)
if not isdir(settings.LOG_PATH):
print('log directory does not exist at %s' % settings.LOG_PATH)
sys.exit(1)
if len(sys.argv) > 1 and sys.argv[1] == 'stop':
do_not_overwrite_log = True
# This should hopefully take care of a TODO from the bin files,
# TODO: write a real kill script
# as above @earthgecko 20160520
pidfile_path = settings.PID_PATH + '/' + skyline_app + '.pid'
pid = int(open(pidfile_path).read())
try:
kill(pid, signal.SIGTERM)
print '%s pid %s stopped' % (skyline_app, str(pid))
sys.exit(0)
except OSError, exc:
print 'Failed to kill pid %s' % str(pid)
sys.exit(1)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
if len(sys.argv) > 1 and sys.argv[1] == 'stop':
handler = logging.FileHandler(
settings.LOG_PATH + '/' + skyline_app + '.stop.log',
mode='a', delay=False)
else:
handler = logging.handlers.TimedRotatingFileHandler(
logfile,
when="midnight",
interval=1,
backupCount=5)
memory_handler = logging.handlers.MemoryHandler(256,
flushLevel=logging.DEBUG,
target=handler)
handler.setFormatter(formatter)
logger.addHandler(memory_handler)
# Validate settings variables
valid_settings = validate_settings_variables(skyline_app)
if not valid_settings:
print ('error :: invalid variables in settings.py - cannot start')
sys.exit(1)
if len(sys.argv) > 1 and sys.argv[1] == 'stop':
do_not_overwrite_log = True
else:
# Make sure we can run all the algorithms
try:
# from analyzer import algorithms
import algorithms_dev
logger.info('Testing algorithms')
timeseries = map(list, zip(map(float, range(int(time()) - 86400, int(time()) + 1)), [1] * 86401))
# ensemble = [globals()[algorithm](timeseries) for algorithm in settings.ALGORITHMS]
ensemble = [getattr(algorithms_dev, algorithm)(timeseries) for algorithm in settings.ALGORITHMS]
logger.info('Tested algorithms OK')
logger.info('ensemble: %s' % str(ensemble))
except KeyError as e:
print('Algorithm %s deprecated or not defined; check settings.ALGORITHMS' % e)
sys.exit(1)
except Exception as e:
print('Algorithm test run failed.')
traceback.print_exc()
sys.exit(1)
logger.info('Tested algorithms')
del timeseries
del ensemble
analyzer = AnalyzerDevAgent()
if len(sys.argv) > 1 and sys.argv[1] == 'stop':
do_not_overwrite_log = True
else:
logger.info('starting analyzer_dev.run')
memory_handler.flush
if len(sys.argv) > 1 and sys.argv[1] == 'run':
analyzer.run()
else:
daemon_runner = runner.DaemonRunner(analyzer)
daemon_runner.daemon_context.files_preserve = [handler.stream]
daemon_runner.do_action()
if len(sys.argv) > 1 and sys.argv[1] == 'stop':
do_not_overwrite_log = True
else:
logger.info('stopped analyzer_dev')
if __name__ == '__main__':
run()
|
[
"gary.wilson@of-networks.co.uk"
] |
gary.wilson@of-networks.co.uk
|
e8d681f0fd6132ac3ec5f78f2a19a7626eb73e81
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/file/formats/ios/apple8900/Apple8900Constants.pyi
|
f4484ba6bda351773a13dd061d24c65d6032375b
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
pyi
|
import java.lang
class Apple8900Constants(object):
AES_IV_ZERO_BYTES: List[int] = array('b', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
AES_KEY_BYTES: List[int] = array('b', [24, -124, 88, -90, -47, 80, 52, -33, -29, -122, -14, 59, 97, -44, 55, 116])
AES_KEY_STRING: unicode = u'188458A6D15034DFE386F23B61D43774'
FORMAT_ENCRYPTED: int = 3
FORMAT_PLAIN: int = 4
MAGIC: unicode = u'8900'
MAGIC_BYTES: List[int] = array('b', [56, 57, 48, 48])
MAGIC_LENGTH: int = 4
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
e9fa3b97e68a4174d68b1eb712a4ecfc00517d6a
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/vc/migrations/0026_vcdomainprovisioning_resource_group.py
|
f3e9edecabe1e4aaa2a15c894edf84d4149fce74
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 706
|
py
|
# ----------------------------------------------------------------------
# Add Resource Group to vc domainprovisioning
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
self.db.add_column(
"vc_vcdomainprovisioningconfig",
"resource_group",
models.CharField("Resource Group", max_length=64, null=True, blank=True),
)
|
[
"aversanta@gmail.com"
] |
aversanta@gmail.com
|
5cf693023453b8bd4cd23869f8c84666ede5542d
|
481daea741175413a840e922014505b0089dfd04
|
/processers/processer.py
|
c60964c6cf600594387bf9f42e0ac39550dd57d8
|
[] |
no_license
|
the16thpythonist/JTShell
|
b512813104e3ba5d19964bcb1dbb1e124adc6b5e
|
7b194b5132512aa9ed79a0c7d9757f24dbde9a5f
|
refs/heads/master
| 2021-05-30T02:19:48.182831
| 2015-11-07T00:43:35
| 2015-11-07T00:43:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
__author__ = 'Jonas'
from JTShell.util.message import Message
import time
class Processer:
"""
the base class for all objects, that are involved with the command execution process of the Shell, implementing the
connection with the logger and writer objects of the program, if needed/enabled, that can both be accessed through
calling the inherited _write method, which will write the message into both objects
:var logger: (Logger) the logging object of the shell program
:var writer: (Writer) the writer object of the shell program
:var name: (string) the name of the class combined with their very own id
:parameter shell: (Shell) the shell object from wich the Processer object was created in, so the object can acces
the writer and logger objects of the shell
"""
def __init__(self, shell):
self.shell = shell
self.name = ""
def _write(self, typ, message):
"""
a method, which will pass any given message to the logger and writer objects, if given, which in thier turn
will then process those messages further, writing them into log files or displaying it to the user.
the passed string argument "type" defines the appearance of the message and is divided into "error", "process",
"output", "warning".
:param message: (string) the message to be displayed
:param typ: (string) the type of message given
:return: (void)
"""
if self.shell.writer is not None:
self.shell.writer.write(Message(typ, "{0}: ".format(self.name) + message))
if self.shell.logger is not None:
self.shell.logger.write(Message(typ, "{0}: ".format(self.name) + message))
|
[
"jonseb1998@gmail.com"
] |
jonseb1998@gmail.com
|
9e599e8b5fd751743f099de05205732cd4bc081c
|
e77b92df446f0afed18a923846944b5fd3596bf9
|
/Programers_algo/Graph/kruskal.py
|
d1d113114a54256cb73edd9e6cc8c3d493ea7ea5
|
[] |
no_license
|
sds1vrk/Algo_Study
|
e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e
|
fbbc21bb06bb5dc08927b899ddc20e6cde9f0319
|
refs/heads/main
| 2023-06-27T05:49:15.351644
| 2021-08-01T12:43:06
| 2021-08-01T12:43:06
| 356,512,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
# 크루스칼 알고리즘
# 가장 적은 비용으로 모든 노드를 연결
# UnionFind
import sys
sys.stdin=open("input.txt","r")
# 특정 원소가 속한 부모 집합 찾기
def find_parent(parent,x):
# 루트 노트가 아니라면, 루트 노드를 찾을떄까지 재귀적으로 후촐
if parent[x]!=x:
parent[x]=find_parent(parent,parent[x])
# parent[x]가 갱신됨
return parent[x]
# 두 원소가 속한 집합 합치기
def union_parent(parent,a,b):
a=find_parent(parent,a)
b=find_parent(parent,b)
if a<b:
parent[b]=a
else :
parent[a]=b
# 노드의 개수와 간선(Union 연산)의 개수 입력받기
v,e=map(int,input().split())
parent=[0]*(v+1)
edges=[]
result=0
for i in range(1,v+1):
parent[i]=i
for _ in range(e):
a,b,cost=map(int,input().split())
# 비용순으로 정렬하기
edges.append((cost,a,b))
# 간선을 비용순으로 정렬
edges.sort()
# 간선을 하나씩 확인하며
for edge in edges:
cost,a,b=edge
# 사이클이 발생하지 않는 경우에만 집합에 포함
if find_parent(parent,a)!=find_parent(parent,b):
union_parent(parent,a,b)
result+=cost
print(result)
|
[
"51287886+sds1vrk@users.noreply.github.com"
] |
51287886+sds1vrk@users.noreply.github.com
|
8b1cec12c2e95e9c6e1aa2b3a1dc3890902dccb6
|
6a69d57c782e0b1b993e876ad4ca2927a5f2e863
|
/vendor/samsung/common/packages/apps/SBrowser/src/media/cast/test/utility/utility.gyp
|
93e2f14842b82bec4a5561b751635ce8a87d4a77
|
[
"BSD-3-Clause"
] |
permissive
|
duki994/G900H-Platform-XXU1BOA7
|
c8411ef51f5f01defa96b3381f15ea741aa5bce2
|
4f9307e6ef21893c9a791c96a500dfad36e3b202
|
refs/heads/master
| 2020-05-16T20:57:07.585212
| 2015-05-11T11:03:16
| 2015-05-11T11:03:16
| 35,418,464
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 891
|
gyp
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cast_test_utility',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
],
'dependencies': [
'<(DEPTH)/ui/gfx/gfx.gyp:gfx',
'<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
],
'sources': [
'<(DEPTH)/media/cast/test/fake_single_thread_task_runner.cc',
'<(DEPTH)/media/cast/test/fake_single_thread_task_runner.h',
'input_builder.cc',
'input_builder.h',
'audio_utility.cc',
'audio_utility.h',
'video_utility.cc',
'video_utility.h',
], # source
},
],
}
|
[
"duki994@gmail.com"
] |
duki994@gmail.com
|
b8ed59691aeefe3e4e9152934a20f1a38dd76ec1
|
135254b8c00935efd0efd33c708ce69470e23741
|
/Hard/149. Max Points on a Line.py
|
b3ccdb55fe8727f1317734272d23f558f8fa762e
|
[] |
no_license
|
MinecraftDawn/LeetCode
|
4974e6f96612f01e4774ecd5c30bc42dfff79467
|
0404bcce27ff363430e6ab71dbc27a69055fd261
|
refs/heads/master
| 2021-06-19T05:50:08.000396
| 2021-06-14T05:57:09
| 2021-06-14T05:57:09
| 188,446,485
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
class Solution:
def maxPoints(self, points: list) -> int:
length = len(points)
if length < 3: return length
ans = 0
for i in range(length-1):
for j in range(i+1,length):
x1,y1 = points[i]
x2,y2 = points[j]
dx = x2-x1
dy = y2-y1
count = 0
for k in range(length):
x3,y3 = points[k]
if dx == 0:
if x3 == x1:
count += 1
else:
if dy*(x3-x1) == dx*(y3-y1):
count += 1
ans = max(ans,count)
return ans
|
[
"eric4902077@gmail.com"
] |
eric4902077@gmail.com
|
a714c5b7c7804e9c67533f18dde8b0eedd53ddb4
|
ce196aba0adde47ea2767eae1d7983a1ef548bb8
|
/turtle_天花乱坠动画2.py
|
fcf7f1dd160a5abe55f5b10729cabd0a1f397bb2
|
[] |
no_license
|
xiang-daode/Python3_codes
|
5d2639ffd5d65065b98d029e79b8f3608a37cf0b
|
06c64f85ce2c299aef7f9311e9473e0203a05b09
|
refs/heads/main
| 2023-08-30T14:59:55.123128
| 2021-11-03T05:12:24
| 2021-11-03T05:12:24
| 333,632,892
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
# 在这里写上你的代码 :-)
from turtle import *
import math
x0,y0=0,0
u,v,w=.1,.2,.3
def Trgl(x,y,q):
global x0,y0
rgb="#"+hex(0x100000+(q**3) % 0xEFFFFF)[2:]
clr="#"+hex(0x100000+(q**2) % 0xEFFFFF)[2:]
pu()
goto(x0,y0)
pd()
pensize(6)
pencolor(rgb)
goto(x,y)
lt(.1) #旋转(角度制)
fillcolor(clr) #填充色
begin_fill() #开始填充
circle(5+(int)(q*q/1000) % 25, steps=3)
end_fill() #结束填充
x0,y0=x,y
def Draw():
ht() #隐藏乌龟
tracer(False) #快速直接画
global u,v,w,x0,y0
u,v,w=u+.01,v+.02,w+.03
#指定区间逐一画图:
clear()
for q in range(-360,366,6):
x=200*math.cos(u+q/60)+100*math.cos(v-q/90)+50*math.cos(-w+q/45)
y=200*math.sin(u+q/60)+100*math.sin(v-q/90)+50*math.sin(-w+q/45)
Trgl((int)(x),(int)(y),q)
update()
ontimer(Draw(), 20)
Draw()
|
[
"noreply@github.com"
] |
xiang-daode.noreply@github.com
|
af9f7a13d84ec8d9dcd93612ecfb7ab3e5a9bcd3
|
be0e0488a46b57bf6aff46c687d2a3080053e52d
|
/python/baekjoon/10991.py
|
8b5a24ec3813157fd199bc0c010da37a72249e58
|
[] |
no_license
|
syo0e/Algorithm
|
b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939
|
1ae754d5bb37d02f28cf1d50463a494896d5026f
|
refs/heads/master
| 2023-06-09T11:31:54.266900
| 2021-06-30T17:04:38
| 2021-06-30T17:04:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
n = int(input())
for i in range(n):
for j in range(i + 1, n):
print(" ", end="")
for j in range(i+1):
print("* ", end="")
print()
|
[
"kyun2dot@gmail.com"
] |
kyun2dot@gmail.com
|
7b218f14178cea2f621b90959936835954be0bd5
|
4a1b61cf551db7843050cc7080cec6fd60c4f8cc
|
/2020/SWEA_문제/swea_1211_Ladder2_풀이.py
|
8caa8091e792d5a7c8190b1981bcb752ee8bf14e
|
[] |
no_license
|
phoenix9373/Algorithm
|
4551692027ca60e714437fd3b0c86462f635d8ff
|
c66fd70e14bb8357318e8b8f386d2e968f0c4d98
|
refs/heads/master
| 2023-08-24T10:01:20.798430
| 2021-10-15T07:57:36
| 2021-10-15T07:57:36
| 288,092,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
dc = [-1, 1] # 좌우
def dir_check(r, c): # 방향 체크.
for i in range(2):
nc = c + dc[i]
if 0 <= nc < 100 and ladder[r][nc] == 1:
return i
return 2
def go(st):
col = st_pos[st]
cnt = 0
idx = st
for i in range(100):
d = dir_check(i, col)
if d < 2:
idx += dc[d]
cnt += abs(col - st_pos[idx])
col = st_pos[idx]
cnt += 1
return cnt
for tc in range(10):
# 테스트케이스 번호 입력
tc_num = int(input())
# 2차원 리스트 입력
ladder = [list(map(int, input().split())) for _ in range(100)]
# 시작 좌표를 담을 리스트
st_pos = []
# 시작 좌표를 다 담았다.
for i in range(100):
if ladder[0][i] == 1:
st_pos.append(i)
# 임의의 큰값으로 초기화
min_value = 987654321
# 어차피 정답으로 사용될거니 안쓰이는수 아무거나로 초기화
ans_idx = -1
for i in range(len(st_pos)):
tmp = go(i)
if tmp <= min_value:
min_value = tmp
ans_idx = st_pos[i]
print("#{} {}".format(tc_num, ans_idx))
|
[
"phoenix9373@naver.com"
] |
phoenix9373@naver.com
|
2fd557c72c6c708381632158e55b0b34053df367
|
7e93b1c33045b4c03054f42b6a2b800279b12a9b
|
/core/cache/backends/dummy.py
|
dd3b537c31f94fd76440864c0633ceb9028bcdbe
|
[
"MIT"
] |
permissive
|
anthill-arch/framework
|
6f8036980667843f2be1414850255cf6a10e2dcd
|
a6c238a62ae9c3fb319d12e77f7e9047aab75e8d
|
refs/heads/master
| 2020-05-09T06:01:31.186830
| 2019-08-23T13:52:43
| 2019-08-23T13:52:43
| 180,988,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
"""Dummy cache backend"""
from anthill.framework.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
super().__init__(*args, **kwargs)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def clear(self):
...
|
[
"x55aah@gmail.com"
] |
x55aah@gmail.com
|
990c86e2db482e655b196f5e1532da5ba6511e28
|
21b201ebf2ffbbc19fa8d74e5657e12ef597b02d
|
/research/attention_ocr/python/datasets/fsns_test.py
|
ae4bd198024cacb58466a58ae414192674610642
|
[] |
no_license
|
alhsnouf/model
|
fa619691ad9d0afc7ad849a9471e6bb0643a8d47
|
5fe429b115634e642a7469b3f1d4bc0c5cf98782
|
refs/heads/master
| 2021-04-12T11:16:02.150045
| 2018-03-27T15:19:18
| 2018-03-27T15:19:18
| 126,702,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:adecb7ba327b3d3e67e0b87d2739a254b0a9b16b81fbffc730ee0c03476db08d
size 3374
|
[
"alhanouf987@hotmail.com"
] |
alhanouf987@hotmail.com
|
7589cc724a6de9341855ab64e3e966192de85fb8
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/associate_router_request_body.py
|
1fee2936721baab02638a1cc01ff60243363cc0b
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,088
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AssociateRouterRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'router': 'Router'
}
attribute_map = {
'router': 'router'
}
def __init__(self, router=None):
"""AssociateRouterRequestBody
The model defined in huaweicloud sdk
:param router:
:type router: :class:`huaweicloudsdkdns.v2.Router`
"""
self._router = None
self.discriminator = None
self.router = router
@property
def router(self):
"""Gets the router of this AssociateRouterRequestBody.
:return: The router of this AssociateRouterRequestBody.
:rtype: :class:`huaweicloudsdkdns.v2.Router`
"""
return self._router
@router.setter
def router(self, router):
"""Sets the router of this AssociateRouterRequestBody.
:param router: The router of this AssociateRouterRequestBody.
:type router: :class:`huaweicloudsdkdns.v2.Router`
"""
self._router = router
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AssociateRouterRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
861702d38884274207c83fc6a9221108052e545b
|
b3b68efa404a7034f0d5a1c10b281ef721f8321a
|
/Scripts/simulation/holidays/holiday_commands.py
|
d5c08c9ea3a12970f2e84cae9f7fa63bd14fa313
|
[
"Apache-2.0"
] |
permissive
|
velocist/TS4CheatsInfo
|
62195f3333076c148b2a59f926c9fb5202f1c6fb
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
refs/heads/main
| 2023-03-08T01:57:39.879485
| 2021-02-13T21:27:38
| 2021-02-13T21:27:38
| 337,543,310
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,539
|
py
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\holidays\holiday_commands.py
# Compiled at: 2018-03-22 23:30:43
# Size of source mod 2**32: 2819 bytes
from protocolbuffers import GameplaySaveData_pb2, DistributorOps_pb2
from google.protobuf import text_format
from seasons.seasons_enums import SeasonType
from server_commands.argument_helpers import TunableInstanceParam, OptionalSimInfoParam, get_optional_target
import services, sims4.commands
@sims4.commands.Command('holiday.get_holiday_data', command_type=(sims4.commands.CommandType.Live))
def get_holiday_data(holiday_id: int, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
holiday_service.send_holiday_info_message(holiday_id)
@sims4.commands.Command('holiday.get_active_holiday_data', command_type=(sims4.commands.CommandType.Live))
def get_active_holiday_data(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('Failed to find SimInfo.')
return
sim_info.household.holiday_tracker.send_active_holiday_info_message(DistributorOps_pb2.SendActiveHolidayInfo.START)
@sims4.commands.Command('holiday.update_holiday', command_type=(sims4.commands.CommandType.Live))
def update_holiday(holiday_data: str, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
proto = GameplaySaveData_pb2.Holiday()
text_format.Merge(holiday_data, proto)
holiday_service.modify_holiday(proto)
@sims4.commands.Command('holiday.add_holiday', command_type=(sims4.commands.CommandType.Live))
def add_holiday(holiday_data: str, season: SeasonType, day: int, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
proto = GameplaySaveData_pb2.Holiday()
text_format.Merge(holiday_data, proto)
holiday_service.add_a_holiday(proto, season, day)
@sims4.commands.Command('holiday.remove_holiday', command_type=(sims4.commands.CommandType.Live))
def remove_holiday(holiday_id: int, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
holiday_service.remove_a_holiday(holiday_id)
|
[
"cristina.caballero2406@gmail.com"
] |
cristina.caballero2406@gmail.com
|
3eeadc182efdc464262b1666ed9e13d5177d14a7
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/Code/.history/listogram_20200211102152.py
|
dba8833384547273220a2bdcaa8ac891eb054bfd
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290
| 2020-08-04T23:19:23
| 2020-08-04T23:19:23
| 234,856,234
| 0
| 0
| null | 2020-06-05T21:13:04
| 2020-01-19T07:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,653
|
py
|
#!python
from __future__ import division, print_function # Python 2 and 3 compatibility
import random
from sample import prob_sample
class Listogram(list):
"""Listogram is a histogram implemented as a subclass of the list type."""
def __init__(self, word_list=None):
"""Initialize this histogram as a new list and count given words."""
super(Listogram, self).__init__() # Initialize this as a new list
# Add properties to track useful word counts for this histogram
self.types = 0 # Count of distinct word types in this histogram
self.tokens = 0 # Total count of all word tokens in this histogram
# Count words in given list, if any
# word_list = word_list.split()
if word_list is not None:
for word in word_list:
self.add_count(word)
def add_count(self, word, count=1):
"""Increase frequency count of given word by given count amount."""
# TODO: Increase word frequency by count
# add count to tokens
self.tokens += count
# first case: word exists -> increment count for that word
# print(instance)
if self.__contains__(word) == True:
index = self.index_of(word)
# print(index)
self[index][1] += count
# second case: word dne --> append instance of word
else:
self.append([word, count])
self.types += 1
def frequency(self, word):
"""Return frequency count of given word, or 0 if word is not found."""
# TODO: Retrieve word frequency count
for item in self:
if item[0] == word:
return item[1]
return 0
def __contains__(self, word):
"""Return boolean indicating if given word is in this histogram."""
# TODO: Check if word is in this histogram
for item in self:
if item[0] == word:
return True
return False
def index_of(self, target):
"""Return the index of entry containing given target word if found in
this histogram, or None if target word is not found."""
# TODO: Implement linear search to find index of entry with target word
print(self)
for i in range(len(self)):
# print(i)
if target == self[i][0]:
# print(self[i][0])
return int(i)
return None
# for item in self:
# if item[0] == target:
# return self.index(item)
# return None
def sample(self):
"""Return a word from this histogram, randomly sampled by weighting
each word's probability of being chosen by its observed frequency."""
# TODO: Randomly choose a word based on its frequency in this histogram
return prob_sample(self)
def print_histogram(word_list):
print()
print('Histogram:')
print('word list: {}'.format(word_list))
# Create a listogram and display its contents
histogram = Listogram(word_list)
print('listogram: {}'.format(histogram))
print('{} tokens, {} types'.format(histogram.tokens, histogram.types))
for word in word_list[-2:]:
freq = histogram.frequency(word)
print('{!r} occurs {} times'.format(word, freq))
print()
print_histogram_samples(histogram)
def print_histogram_samples(histogram):
print('Histogram samples:')
# Sample the histogram 10,000 times and count frequency of results
samples_list = [histogram.sample() for _ in range(10000)]
samples_hist = Listogram(samples_list)
print('samples: {}'.format(samples_hist))
print()
print('Sampled frequency and error from observed frequency:')
header = '| word type | observed freq | sampled freq | error |'
divider = '-' * len(header)
print(divider)
print(header)
print(divider)
# Colors for error
green = '\033[32m'
yellow = '\033[33m'
red = '\033[31m'
reset = '\033[m'
# Check each word in original histogram
for word, count in histogram:
# Calculate word's observed frequency
observed_freq = count / histogram.tokens
# Calculate word's sampled frequency
samples = samples_hist.frequency(word)
sampled_freq = samples / samples_hist.tokens
# Calculate error between word's sampled and observed frequency
error = (sampled_freq - observed_freq) / observed_freq
color = green if abs(error) < 0.05 else yellow if abs(error) < 0.1 else red
print('| {!r:<9} '.format(word)
+ '| {:>4} = {:>6.2%} '.format(count, observed_freq)
+ '| {:>4} = {:>6.2%} '.format(samples, sampled_freq)
+ '| {}{:>+7.2%}{} |'.format(color, error, reset))
print(divider)
print()
def main():
import sys
arguments = sys.argv[1:] # Exclude script name in first argument
if len(arguments) >= 1:
# Test histogram on given arguments
print_histogram(arguments)
else:
# Test histogram on letters in a word
word = 'abracadabra'
print_histogram(list(word))
# Test histogram on words in a classic book title
fish_text = 'one fish two fish red fish blue fish'
print_histogram(fish_text.split())
# Test histogram on words in a long repetitive sentence
woodchuck_text = ('how much wood would a wood chuck chuck'
' if a wood chuck could chuck wood')
print_histogram(woodchuck_text.split())
if __name__ == '__main__':
main()
print(listogram('one fish two fish red fish blue fish').items()
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
78e865b1517223315d05ca75adde29b5a753601b
|
acad86f0365aa0b5b613644a896bffb9d4dc533f
|
/django_session_jwt/test.py
|
8e931858e91b4e4cab19fa50500ef1161aa607b8
|
[
"MIT"
] |
permissive
|
willstott101/django-session-jwt
|
f85be9c9d5cefe26f5e6253886018baa256c2a83
|
ca5dc90c61190305f902ceab03a30abf0e184865
|
refs/heads/master
| 2023-08-29T13:42:35.587372
| 2021-09-23T19:18:26
| 2021-09-23T19:18:26
| 421,750,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
from importlib import import_module
from django.conf import settings
from django.test.client import Client as BaseClient
from django.contrib.auth import SESSION_KEY, get_user_model
from django_session_jwt.middleware import convert_cookie, verify_jwt
User = get_user_model()
class Client(BaseClient):
def login(self, **credentials):
ret = super(Client, self).login(**credentials)
if ret:
user = User.objects.get(id=int(self.session[SESSION_KEY]))
convert_cookie(self.cookies, user)
return ret
@property
def session(self):
"""
Obtains the current session variables.
"""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
sk = verify_jwt(cookie.value).get('sk', cookie.value)
return engine.SessionStore(sk)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
|
[
"btimby@gmail.com"
] |
btimby@gmail.com
|
9c3072e28bebc804c79f0b1fa0248796a4500f7e
|
82e8593a7e91a260a39b150e13f25741da4d6f8f
|
/pylib/cli.py
|
a4dbf7efd887a15e93d2dcb068d85565d0cc2891
|
[] |
no_license
|
grandi23/winterpy
|
5b9074c62d0f3da370705b99ae2a5f9d9f6ada59
|
989fe6595dc53dca9a0f6da3feb8e05d009d7958
|
refs/heads/master
| 2021-01-17T12:08:10.737815
| 2015-05-03T09:24:48
| 2015-05-03T09:24:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
# vim:fileencoding=utf-8
# This file is in the Public Domain
'''
Convenient functions for command-line use.
Python 2 & 3
'''
import os
import re
def repl(local, histfile=None, banner=None):
import readline
import rlcompleter
if 'libedit' in readline.__doc__:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
if histfile is not None and os.path.exists(histfile):
# avoid duplicate reading
if readline.get_current_history_length() <= 0:
readline.set_history_length(10000)
readline.read_history_file(histfile)
import code
readline.set_completer(rlcompleter.Completer(local).complete)
code.interact(local=local, banner=banner)
if histfile is not None:
readline.write_history_file(histfile)
def repl_reset_stdin(*args, **kwargs):
fd = os.open('/dev/tty', os.O_RDONLY)
os.dup2(fd, 0)
os.close(fd)
repl(*args, **kwargs)
def _translate(m):
s = m.group(0)
type, code = s[1], int(s[2:], 16)
if type == 'x':
return chr(code)
else:
return unichr(code).encode('utf-8')
def unescape_py2(s):
return re.sub(r'\\x[0-9A-Fa-f]{2}|\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8}',
_translate, s)
def repl_py27(local, *args, **kwargs):
'''Fix unicode display in Python 2.x; Console encoding must be UTF-8'''
import re, sys
def displayfunc(value):
if value is None:
local['_'] = None
return
r = repr(value)
r = unescape_py2(r)
print(r)
local['_'] = value
old_displayhook = sys.displayhook
sys.displayhook = displayfunc
try:
repl(local, *args, **kwargs)
finally:
sys.displayhook = old_displayhook
if __name__ == '__main__':
import sys
if sys.version_info[0] == 3:
repl_func = repl
else:
repl_func = repl_py27
repl_func(vars(), os.path.expanduser('~/.pyhistory'))
|
[
"lilydjwg@gmail.com"
] |
lilydjwg@gmail.com
|
e8d8d9af59056af753ef58acd36c9b21d2d45cbf
|
39354dfc8f61f57f022522a3e3a880c73a540d0d
|
/shenfun/utilities/__init__.py
|
ab4cb0c3b03aa606b0854b11a59de03b0f3e750b
|
[
"BSD-2-Clause"
] |
permissive
|
mstf1985/shenfun
|
aab9cd416ea7cb549ef191ed9e32f4cd66d522d0
|
83a28b3f7142ef3bf60b20d707ba8c1d2f13a8ff
|
refs/heads/master
| 2022-12-10T20:27:51.825173
| 2020-08-28T13:58:58
| 2020-08-28T13:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,621
|
py
|
"""
Module for implementing helper functions.
"""
import types
from numbers import Number
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
from collections import defaultdict
import numpy as np
import sympy as sp
from scipy.fftpack import dct
from shenfun.optimization import optimizer
__all__ = ['dx', 'clenshaw_curtis1D', 'CachedArrayDict',
'outer', 'apply_mask', 'integrate_sympy', 'mayavi_show']
def dx(u):
r"""Compute integral of u over domain
.. math::
\int_{\Omega} u dx
Parameters
----------
u : Array
The Array to integrate
"""
T = u.function_space()
uc = u.copy()
dim = len(u.shape)
if dim == 1:
w = T.points_and_weights(weighted=False)[1]
return np.sum(uc*w).item()
for ax in range(dim):
uc = uc.redistribute(axis=ax)
w = T.bases[ax].points_and_weights(weighted=False)[1]
sl = [np.newaxis]*len(uc.shape)
sl[ax] = slice(None)
uu = np.sum(uc*w[tuple(sl)], axis=ax)
sl = [slice(None)]*len(uc.shape)
sl[ax] = np.newaxis
uc[:] = uu[tuple(sl)]
return uc.flat[0]
def clenshaw_curtis1D(u, quad="GC"): # pragma: no cover
"""Clenshaw-Curtis integration in 1D"""
assert u.ndim == 1
N = u.shape[0]
if quad == 'GL':
w = np.arange(0, N, 1, dtype=float)
w[2:] = 2./(1-w[2:]**2)
w[0] = 1
w[1::2] = 0
ak = dct(u, 1)
ak /= (N-1)
return np.sqrt(np.sum(ak*w))
assert quad == 'GC'
d = np.zeros(N)
k = 2*(1 + np.arange((N-1)//2))
d[::2] = (2./N)/np.hstack((1., 1.-k*k))
w = dct(d, type=3)
return np.sqrt(np.sum(u*w))
class CachedArrayDict(MutableMapping):
"""Dictionary for caching Numpy arrays (work arrays)
Example
-------
>>> import numpy as np
>>> from shenfun.utilities import CachedArrayDict
>>> work = CachedArrayDict()
>>> a = np.ones((3, 4), dtype=int)
>>> w = work[(a, 0, True)] # create work array with shape as a
>>> print(w.shape)
(3, 4)
>>> print(w)
[[0 0 0 0]
[0 0 0 0]
[0 0 0 0]]
>>> w2 = work[(a, 1, True)] # Get different(note 1!) array of same shape/dtype
"""
def __init__(self):
self._data = {}
def __getitem__(self, key):
newkey, fill = self.__keytransform__(key)
try:
value = self._data[newkey]
except KeyError:
shape, dtype, _ = newkey
value = np.zeros(shape, dtype=np.dtype(dtype, align=True))
self._data[newkey] = value
if fill:
value.fill(0)
return value
@staticmethod
def __keytransform__(key):
assert len(key) == 3
return (key[0].shape, key[0].dtype, key[1]), key[2]
def __len__(self):
return len(self._data)
def __setitem__(self, key, value):
self._data[self.__keytransform__(key)[0]] = value
def __delitem__(self, key):
del self._data[self.__keytransform__(key)[0]]
def __iter__(self):
return iter(self._data)
def values(self):
raise TypeError('Cached work arrays not iterable')
def outer(a, b, c):
r"""Return outer product $c_{i,j} = a_i b_j$
Parameters
----------
a : Array of shape (N, ...)
b : Array of shape (N, ...)
c : Array of shape (N*N, ...)
The outer product is taken over the first index of a and b,
for all remaining indices.
"""
av = a.v
bv = b.v
cv = c.v
symmetric = a is b
if av.shape[0] == 2:
outer2D(av, bv, cv, symmetric)
elif av.shape[0] == 3:
outer3D(av, bv, cv, symmetric)
return c
@optimizer
def outer2D(a, b, c, symmetric):
c[0] = a[0]*b[0]
c[1] = a[0]*b[1]
if symmetric:
c[2] = c[1]
else:
c[2] = a[1]*b[0]
c[3] = a[1]*b[1]
@optimizer
def outer3D(a, b, c, symmetric):
c[0] = a[0]*b[0]
c[1] = a[0]*b[1]
c[2] = a[0]*b[2]
if symmetric:
c[3] = c[1]
c[6] = c[2]
c[7] = c[5]
else:
c[3] = a[1]*b[0]
c[6] = a[2]*b[0]
c[7] = a[2]*b[1]
c[4] = a[1]*b[1]
c[5] = a[1]*b[2]
c[8] = a[2]*b[2]
@optimizer
def apply_mask(u_hat, mask):
if mask is not None:
u_hat *= mask
return u_hat
def integrate_sympy(f, d):
"""Exact definite integral using sympy
Try to convert expression `f` to a polynomial before integrating.
See sympy issue https://github.com/sympy/sympy/pull/18613 to why this is
needed. Poly().integrate() is much faster than sympy.integrate() when applicable.
Parameters
----------
f : sympy expression
d : 3-tuple
First item the symbol, next two the lower and upper integration limits
"""
try:
p = sp.Poly(f, d[0]).integrate()
return p(d[2]) - p(d[1])
except sp.PolynomialError:
#return sp.Integral(f, d).evalf()
return sp.integrate(f, d)
def split(measures):
#ms = sp.sympify(measures).expand()
#ms = ms if isinstance(ms, tuple) else [ms]
#result = []
#for m in ms:
# if sp.simplify(m) == 0:
# continue
# d = {'coeff': m} if isinstance(m, Number) else sp.separatevars(m, dict=True)
# d = defaultdict(lambda: 1, {str(k): sp.simplify(v) for k, v in d.items()})
# dc = d['coeff']
# d['coeff'] = int(dc) if isinstance(dc, (sp.Integer, int)) else float(dc)
# result.append(d)
#return result
def _split(mss, result):
for ms in mss:
ms = sp.sympify(ms)
if isinstance(ms, sp.Mul):
# Multiplication of two or more terms
result = _split(ms.args, result)
continue
# Something else with only one symbol
sym = ms.free_symbols
assert len(sym) <= 1
if len(sym) == 1:
sym = sym.pop()
result[str(sym)] *= ms
else:
ms = int(ms) if isinstance(ms, sp.Integer) else float(ms)
result['coeff'] *= ms
return result
ms = sp.sympify(measures).expand()
result = []
if isinstance(ms, sp.Add):
for arg in ms.args:
result.append(_split([arg], defaultdict(lambda: 1)))
else:
result.append(_split([ms], defaultdict(lambda: 1)))
return result
def mayavi_show():
"""
Return show function that updates the mayavi figure in the background.
"""
from pyface.api import GUI
from mayavi import mlab
return mlab.show(GUI().stop_event_loop)
|
[
"mikaem@math.uio.no"
] |
mikaem@math.uio.no
|
00f9596b9cdd8422623d02cb8b0bb31ff158a62c
|
27b4d1b7723845812111a0c6c659ef87c8da2755
|
/face_recognition/02.py
|
688d0fa1923af488a037fb7b805f09b4f024848a
|
[] |
no_license
|
NAMEs/Python_Note
|
59a6eff7b4287aaef04bd69fbd4af3faf56cccb4
|
f560e00af37c4f22546abc4c2756e7037adcc40c
|
refs/heads/master
| 2022-04-11T09:32:17.512962
| 2020-03-17T09:30:58
| 2020-03-17T09:30:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
'''
自动找到图像中的所有面孔
'''
from PIL import Image
import face_recognition
#加载图片
image1 = face_recognition.load_image_file('./unknow/5.jpg')
# 返回一个列表,列表中用元组保存图片中的所有人脸的位置
face_locations = face_recognition.face_locations(image1)
# print(face_locations)
print("There are {} people in the picture.".format(len(face_locations)))
for face_location in face_locations:
# 元组中图片坐标为上,右,下,左
top,right,bottom,left = face_location
face_image = image1[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.show()
|
[
"1558255789@qq.com"
] |
1558255789@qq.com
|
4edf1fe1a5e22527573a5583c5b23eb261503734
|
308c6fb81f0023b9d5682731c10402ce6a2ebb49
|
/django-coreyschafer/bin/wheel
|
22071a9376b83a7a00b89f03cc28124ac54f7bc7
|
[] |
no_license
|
frclasso/django-project1
|
221646ddc9a6702ab8ab2b0e475f4eed09411457
|
9527e30f5f6d54be2a77524411f114441c968a92
|
refs/heads/master
| 2020-07-06T13:42:28.314546
| 2019-08-26T00:55:25
| 2019-08-26T00:55:25
| 203,033,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
#!/home/fabio-gurus/Desktop/repositories/django_projects/django-coreyschafer/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"frclasso@yahoo.com.br"
] |
frclasso@yahoo.com.br
|
|
f6d9cbd62c65febcdc5836464a6acb874d242b87
|
197420c1f28ccb98059888dff214c9fd7226e743
|
/Адаптивный_тренажёр/1.All_including/1.All_including.py
|
aa7587c23d75e4631ca3603a350790351678c14f
|
[] |
no_license
|
Vovanuch/python-basics-1
|
fc10b6f745defff31364b66c65a704a9cf05d076
|
a29affec12e8b80a1d3beda3a50cde4867b1dee2
|
refs/heads/master
| 2023-07-06T17:10:46.341121
| 2021-08-06T05:38:19
| 2021-08-06T05:38:19
| 267,504,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
'''
Напишите программу, которая находит все позиции вхождения подстроки в строку.
Формат ввода:
На первой строке содержится исходная строка, на второй строке ввода указана подстрока, позиции которой требуется найти. Строки состоят из символов латинского алфавита.
Формат вывода:
Строка, содержащая индексы (индексация начинается с нуля) вхождения подстроки в строку, разделённые пробелом или число -1 в случае, когда подстрока не найдена.
Sample Input 1:
abacabadaba
aba
Sample Output 1:
0 4 8
Sample Input 2:
aaaa
aa
Sample Output 2:
0 1 2
Sample Input 3:
abc
d
Sample Output 3:
-1
'''
s = input()
a = input()
is_part = False
for i in range(len(s)):
if s[i:].startswith(a):
print(i, end=' ')
is_part = True
if not is_part:
print(-1)
|
[
"vetohin.vladimir@gmail.com"
] |
vetohin.vladimir@gmail.com
|
50b9a5870c5e0c488973768abc415d1754612da4
|
2324dea2cb3003c8ab7e8fd80588d44973eb8c77
|
/Euler_6_273.py
|
28a9e0a2585490ed604b3493f38cb928c2fa2009
|
[] |
no_license
|
MikeOcc/MyProjectEulerFiles
|
5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56
|
4d066d52380aade215636953589bf56d6b88f745
|
refs/heads/master
| 2021-01-16T18:45:44.133229
| 2015-05-27T18:28:43
| 2015-05-27T18:28:43
| 5,876,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
#
#
# Euler Problem 273
#
#
from Functions import IsPrime,RetFact
from itertools import combinations
def Prod(f):
retval = 1
for vv in f:
retval *= vv
return retval
p=[]
S=set([])
for k in range(1,39):
v = 4*k + 1
if IsPrime(v): p+=[v]
print p
l = len(p)
summ = 0
cnt = 0
for i in range(1,3):
n = combinations(p,i)
for t in n:
N = Prod(t)
for c in xrange(1,int(N**.5)+1):
u = c**2
v = (N - u) **.5
if v == int(v) and c<v:
summ += c
#print "Found", N, c,v,t,RetFact(c),RetFact(v),(c*1.0)/N
if N not in S:S.add(N)
cnt += 1
print "Sum of S(N) is ", summ
print "number of items is ", cnt
print
print sorted(S)
|
[
"mike.occhipinti@mlsassistant.com"
] |
mike.occhipinti@mlsassistant.com
|
a467f80425555a9c632f0c29053bb48fe8de5aa0
|
af35f890c0c6a2fa531f47a4c2ed132e8920190d
|
/python/leetcode/string/1111_nesting_depth.py
|
bc58f9173f4a28b783a17a8182e19c032ec63a0c
|
[] |
no_license
|
Levintsky/topcoder
|
b1b17cd3fddef5a23297bcbe4e165508d09a655d
|
a5cb862f0c5a3cfd21468141800568c2dedded0a
|
refs/heads/master
| 2021-06-23T10:15:27.839199
| 2021-02-01T07:49:48
| 2021-02-01T07:49:48
| 188,175,357
| 0
| 1
| null | 2020-05-19T09:25:12
| 2019-05-23T06:33:38
|
C
|
UTF-8
|
Python
| false
| false
| 3,287
|
py
|
"""
1111. Maximum Nesting Depth of Two Valid Parentheses Strings (Medium)
A string is a valid parentheses string (denoted VPS) if and only if it consists of
"(" and ")" characters only, and:
It is the empty string, or
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0,
1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS seq, split it into two disjoint subsequences A and B, such that A
and B are VPS's (and A.length + B.length = seq.length).
Now choose any such A and B such that max(depth(A), depth(B)) is the
minimum possible value.
Return an answer array (of length seq.length) that encodes such a choice of
A and B: answer[i] = 0 if seq[i] is part of A, else answer[i] = 1. Note
that even though multiple answers may exist, you may return any of them.
Example 1:
Input: seq = "(()())"
Output: [0,1,1,1,1,0]
Example 2:
Input: seq = "()(())()"
Output: [0,0,0,1,1,0,1,1]
Constraints:
1 <= seq.size <= 10000
"""
class Solution(object):
def maxDepthAfterSplit(self, seq):
"""
:type seq: str
:rtype: List[int]
"""
n = len(seq)
if n == 0: return []
self.result = [-1] * n
self.parse(seq, 0, 0, n-1)
max_h = max(self.result) + 1
thr = (max_h - 1) // 2
for i in range(n):
if self.result[i] <= thr:
self.result[i] = 0
else:
self.result[i] = 1
return self.result
def parse(self, seq, depth, i, j):
if i >= j: return
# go through and split
slist = []
st = i
cnt = 0
for ii in range(i, j+1):
if seq[ii] == ")":
cnt -= 1
else:
cnt += 1
if cnt == 0:
slist.append([st, ii])
st = ii+1
# case 1:
if len(slist) == 1:
self.result[i] = depth
self.result[j] = depth
self.parse(seq, depth+1, i+1, j-1)
else:
for st, end in slist:
self.parse(seq, depth, st, end)
def solve2(self, seq):
n = len(seq)
res = [0] * n
cnt = 0
for i, c in enumerate(seq):
if i == 0:
res[i] = cnt
if c == "(":
cnt += 1
else:
cnt -= 1
else:
if c == "(":
res[i] = cnt
cnt += 1
else:
cnt -= 1
res[i] = cnt
thr = (max(res) - 1) // 2
for i in range(n):
if res[i] <= thr:
res[i] = 0
else:
res[i] = 1
return res
if __name__ == "__main__":
a = Solution()
# print(a.maxDepthAfterSplit("(()())"))
# rint(a.maxDepthAfterSplit("()(((())))()"))
print(a.solve2("(()())"))
print(a.solve2("()(())()"))
|
[
"zhuoyuanchen2014@u.northwestern.edu"
] |
zhuoyuanchen2014@u.northwestern.edu
|
7d9a15a515e928321da6c01ad4a4b0c6c281c704
|
e6a5fce33aad4fcba37842e135a51ba441b06f48
|
/Algorithms/Strings/BeautifulBinaryString.py
|
788b7087da8bc68a467ba09d592dd229b79f83b4
|
[
"MIT"
] |
permissive
|
pavstar619/HackerRank
|
6710ddd450b06fbb69da5abad9f570e5e26bbbc0
|
697ee46b6e621ad884a064047461d7707b1413cd
|
refs/heads/master
| 2020-06-18T18:53:53.421685
| 2020-02-18T09:35:48
| 2020-02-18T09:35:48
| 196,408,726
| 0
| 0
|
MIT
| 2019-07-11T14:18:16
| 2019-07-11T14:18:16
| null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
#!/bin/python3
class Main():
def __init__(self):
self.n = int(input())
self.s = input()
def output(self):
print(self.s.count('010'))
if __name__ == '__main__':
obj = Main()
obj.output()
|
[
"mokit.aust@gmail.com"
] |
mokit.aust@gmail.com
|
1d392f1ad1a591ca3f59ee411b29ba1720d86905
|
770d4df866b9e66a333f3ffeacdd659b8553923a
|
/results/0175/config.py
|
2eb890c054ba7a06a78c2e58b47304712cdacb6d
|
[] |
no_license
|
leojo/ResultsOverview
|
b2062244cbd81bc06b99963ae9b1695fa9718f90
|
a396abc7a5b4ab257150c0d37c40b646ebb13fcf
|
refs/heads/master
| 2020-03-20T19:52:37.217926
| 2018-08-05T12:50:27
| 2018-08-05T12:50:27
| 137,656,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,597
|
py
|
import os
import numpy as np
import waveUtils
class config(object):
def __init__(self):
self.prepare_data()
# Bsub arguments
bsub_mainfile = "main.py"
bsub_processors = 4
bsub_timeout = "4:00"
bsub_memory = 8000
# Epoch and batch config
batch_size = 128
latent_dim = 100
epochs = 100
epoch_updates = 100
# Network structure
input_s = 16000
n_ae = 5
n_conv_layers = 3
n_deconv_layers = 3
first_size = input_s // (2 ** n_deconv_layers)
final_decoder_filter_size = 3
# Model
load_model = False
model_path = os.path.join("models", "0103", "model") # only used if load_model=True
# Miscellaneous constants
sample_rate = 8000
reconstruction_mult = 1
learning_rate_min = 1e-4
learning_rate_max = 1e-4
learning_rate_scaling_factor = 0 # controlls the shape of the scaling curve from max to min learning rate
learning_rate = 1e-3 # legacy
kl_loss_mult = 1e-3
kl_extra_mult = 2
kl_extra_exponent = 2
keep_prob = 1
use_square = False
data_sources = ["clarinet","trumpet"]
data = None
# Functions
def prepare_data(self):
self.load_data()
def load_and_prepare_audio(self, source):
duration = self.input_s / float(self.sample_rate)
data_dir = os.path.join("wav_files", source)
waves, original_sample_rate = waveUtils.loadAudioFiles(data_dir)
cut_data = waveUtils.extractHighestMeanIntensities(waves, sample_rate=original_sample_rate, duration=duration)
del waves
data = waveUtils.reduceQuality(cut_data, self.sample_rate, duration)
del cut_data
return data
def load_data(self):
if self.data is None:
self.data = [self.load_and_prepare_audio(source) for source in self.data_sources]
def get_training_batch(self):
samples = []
originals = []
num_sources = len(self.data_sources)
sample_shape = self.data[0][0].shape
for _ in range(self.batch_size):
waves = []
sample = np.zeros(sample_shape)
for s in range(num_sources):
i = np.random.randint(len(self.data[s]))
wave = self.data[s][i]
waves.append(wave)
sample += wave
sample = sample/num_sources
samples.append(sample)
originals.append(waves)
samples = np.asarray(samples)
originals = np.asarray(originals)
return samples, originals
def normalize_batch(self, batch):
x = batch.astype(np.float32)
return x / np.max(np.abs(x))
def deconv_filter_size(self, i):
return (2 * (i + 1)) + 1
def deconv_channel_num(self, i):
return 2 ** (config.n_deconv_layers + 3 - i)
def conv_filter_size(self, i):
return (2 * (config.n_conv_layers - i)) + 1
def conv_channel_num(self, i):
return 2 ** (i + 4)
|
[
"leojohannsson91@gmail.com"
] |
leojohannsson91@gmail.com
|
1e0458e35481d98afa1fca38f79c63efacd7cb96
|
b24fa24a96036253a0cd168ac8f6dd41c9102b0a
|
/backend/test1_20020/urls.py
|
9496714a7230f5aec24f0c409c3dbda66af31988
|
[] |
no_license
|
crowdbotics-apps/test1-20020
|
a13c64c66d2028f99723f38e0341873a46d3ae3f
|
9d59b62483172e4900990c07afed60f443c211bc
|
refs/heads/master
| 2022-12-07T00:18:07.268473
| 2020-09-04T03:27:46
| 2020-09-04T03:27:46
| 292,737,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
"""test1_20020 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("taxi_profile.api.v1.urls")),
path("taxi_profile/", include("taxi_profile.urls")),
path("api/v1/", include("booking.api.v1.urls")),
path("booking/", include("booking.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("vehicle.api.v1.urls")),
path("vehicle/", include("vehicle.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
]
admin.site.site_header = "test1"
admin.site.site_title = "test1 Admin Portal"
admin.site.index_title = "test1 Admin"
# swagger
api_info = openapi.Info(
title="test1 API",
default_version="v1",
description="API documentation for test1 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
de97fb29bc50a39e1dba26ab527c2bf6030e1521
|
a392cd0963b030c934f2a9e329867a68515f4a5c
|
/cotoha/ne.py
|
acae889f794da0baf8c1d02b9350faa61b31d382
|
[] |
no_license
|
hatopoppoK3/COTOHA-Python
|
b89517bc6037c95a692dd85c98007727a712da24
|
df333167f775e7a550827d016bf1892e36ac5602
|
refs/heads/master
| 2021-01-16T02:14:44.716987
| 2020-05-20T15:22:31
| 2020-05-20T15:22:31
| 242,940,170
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,905
|
py
|
from cotoha.api import Cotoha
from cotoha.api import check_dic_class, get_sentence_class
class CotohaNe(Cotoha):
"""固有表現抽出に関するクラス.
"""
def __init__(self, sentence: str, kuzure_flag=False, dic_class=[]):
"""
Args:
sentence (str): 解析対象文.
sentence_class (bool, optional): 崩れ文かどうか. Defaults to False.
dic_class (list, optional): 専門用語辞書. Defaults to [].
Raises:
NeError: dic_classにエラーがある場合.
"""
super().__init__()
self.sentence = sentence
self.sentence_class = get_sentence_class(kuzure_flag)
if check_dic_class(dic_class):
self.dic_class = dic_class
else:
raise NeError('dic_classにエラーがあります.')
request_json = {'sentence': self.sentence,
'type': self.sentence_class,
'dic_type': self.dic_class}
response_dict = self.get_response_dict(
relative_url='nlp/v1/ne', request_body=request_json)
self.message = response_dict['message']
self.status = response_dict['status']
self.ne_result_list = []
for result_dict in response_dict['result']:
self.ne_result_list.append(NeResult(result_dict))
def __str__(self) -> str:
string = super().__str__()
string += 'sentence:{}\n'.format(self.sentence)
string += 'sentence_class:{}\n'.format(self.sentence_class)
string += 'dic_class:{}\n'.format(self.dic_class)
string += 'message:{}\n'.format(self.message)
string += 'status:{}\n'.format(self.status)
for ne_result in self.ne_result_list:
string += ne_result.__str__()
return string
class NeError(Exception):
"""固有表現抽出に関する例外クラス.
dic_classやsentence_classに関するエラーがある場合に呼ばれる.
"""
class NeResult(object):
"""固有表現抽出の結果に関するクラス.
"""
def __init__(self, result_dict: dict):
self.begin_pos = result_dict['begin_pos']
self.end_pos = result_dict['end_pos']
self.form = result_dict['form']
self.std_form = result_dict['std_form']
self.ne_class = result_dict['class']
self.extended_class = result_dict['extended_class']
self.source = result_dict['source']
def __str__(self):
string = 'begin_pos:{}\n'.format(self.begin_pos)
string += 'end_pos:{}\n'.format(self.end_pos)
string += 'form:{}\n'.format(self.form)
string += 'std_form:{}\n'.format(self.std_form)
string += 'ne_class:{}\n'.format(self.ne_class)
string += 'extended_class:{}\n'.format(self.extended_class)
string += 'source:{}\n'.format(self.source)
return string
|
[
"hatopoppo0320@gmail.com"
] |
hatopoppo0320@gmail.com
|
88a06957d542b33485cfa40edd4ee1565f4d15eb
|
c3cfd90531b560b9522a10089101dd09ed13233e
|
/arithmetic.py
|
c91f0e9764c593d10713fc7d9bd05f1a91842425
|
[] |
no_license
|
kateybatey/calculator-1
|
602368782265dd302fc4eee376e0d3568e826f2a
|
01b24c34d4d0c369279eea6e8ced9fd17bf35722
|
refs/heads/master
| 2021-01-22T02:04:27.807606
| 2017-05-24T19:48:10
| 2017-05-24T19:48:10
| 92,330,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
"""Math functions for calculator."""
def add(num1, num2):
"""Return the sum of the two input integers."""
total = num1 + num2
return total
def subtract(num1, num2):
"""Return the second number subtracted from the first."""
total = num1 - num2
return total
def multiply(num1, num2):
"""Multiply the two inputs together."""
total = num1 * num2
return total
def divide(num1, num2):
"""Divide the first input by the second, returning a floating point."""
total = num1 / num2
return total
def square(num1):
"""Return the square of the input."""
total = num1 ** 2
return total
def cube(num1):
"""Return the cube of the input."""
total = num1 ** 3
return total
def power(num1, num2):
"""Raise num1 to the power of num and return the value."""
total = num1 ** num2
return total
def mod(num1, num2):
"""Return the remainder of num / num2."""
total = num1 % num2
return total
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
567fa248d17de4c3fd124cb904e5495ef97e5a25
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_64/27.py
|
4c01b978bd8b3ea6271d01fc75a29607b2413799
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,003
|
py
|
import sys
import pdb
class Reader:
def __init__(self, filename):
self.fp = open(filename)
def read(self):
tokens = self.fp.readline().split()
result = []
for token in tokens:
try:
result.append(int(token, 10))
except ValueError:
result.append(token)
return result
def read_strings(self):
tokens = self.fp.readline().split()
return tokens
def bits(x, n):
result = []
for i in xrange(n - 1, -1, -1):
result.append((x & (1 << i)) != 0)
return result
class Board:
def __init__(self, board):
self.board = board
self.row_count = len(board)
self.col_count = len(board[0])
assert all(len(row) == self.col_count for row in board)
self.opt = [[None] * self.col_count for i in xrange(self.row_count)]
def remove_largest_square(self):
for i in xrange(self.row_count):
self.opt[i][0] = 1 if self.board[i][0] is not None else 0
for j in xrange(self.col_count):
self.opt[0][j] = 1 if self.board[0][j] is not None else 0
for i in xrange(1, self.row_count):
for j in xrange(1, self.col_count):
here = self.board[i][j]
up = self.board[i - 1][j]
left = self.board[i][j - 1]
upleft = self.board[i - 1][j - 1]
if here is None: # burned out
self.opt[i][j] = 0
elif (here is upleft is True and up is left is False) or \
(here is upleft is False and up is left is True):
self.opt[i][j] = min(self.opt[i - 1][j - 1],
self.opt[i][j - 1],
self.opt[i - 1][j]) + 1
assert self.opt[i][j] >= 2
else:
self.opt[i][j] = 1
square_max = 0
square_i = square_j = None
for i in xrange(self.row_count):
for j in xrange(self.col_count):
if self.opt[i][j] > square_max:
square_max = self.opt[i][j]
square_i, square_j = i, j
# return if no square to remove
if square_i is None:
return 0
# burn out the removed square
for i in xrange(square_i - square_max + 1, square_i + 1):
for j in xrange(square_j - square_max + 1, square_j + 1):
self.board[i][j] = None
# return the size of the largest square
return square_max
def tiles_left(self):
return sum(sum(x is not None for x in row) for row in self.board)
def dump(self):
tiles = {None: 'X', True: '@', False: '.'}
for row in self.board:
print ''.join(tiles[x] for x in row)
if __name__ == '__main__':
reader = Reader(sys.argv[1])
case_count, = reader.read()
for case in xrange(case_count):
# dynamic programming woo
row_count, col_count = reader.read()
board = []
for i in xrange(row_count):
row_hex, = reader.read_strings()
board.append(bits(int(row_hex, 16), col_count))
board = Board(board)
square_counts = {}
while True:
size = board.remove_largest_square()
if size == 0:
break
elif size == 1:
square_counts.setdefault(1, 0)
square_counts[1] += 1
square_counts[1] += board.tiles_left()
break
else:
square_counts.setdefault(size, 0)
square_counts[size] += 1
assert sum(size**2 * count for size, count in
square_counts.iteritems()) == row_count * col_count
print "Case #%d: %d" % (case + 1, len(square_counts))
for size in reversed(square_counts.keys()):
print "%d %d" % (size, square_counts[size])
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1bfa2e3038d4e8f3250045713006589dc9f8952b
|
2afb1095de2b03b05c8b96f98f38ddeca889fbff
|
/Python_Basic/def/decorators_2.py
|
33bb013abb5f1e24d9295d6568415f5872fd17b5
|
[] |
no_license
|
draganmoo/trypython
|
187316f8823296b12e1df60ef92c54b7a04aa3e7
|
90cb0fc8626e333c6ea430e32aa21af7d189d975
|
refs/heads/master
| 2023-09-03T16:24:33.548172
| 2021-11-04T21:21:12
| 2021-11-04T21:21:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,469
|
py
|
import time
''"""
添加会员,我们要先模拟一个会员库,比如有一个会员名字叫synferlo,密码是harry618
如果不通过装饰器,为了避免大规模修改源代码的函数,我们只能通过修改函数调用方式(即高阶函数)的方法达到效果。
也就是说,我们将一个函数作为参数传入到另一个函数中
"""''
"""
方法:
在log_in_system()函数中嵌套一个inner函数,然后return inner,这样整个log_in_system不会返回US,JP函数的结果,而是把inner函数执行后的内存地址返回
然后我们把它赋值给一个变量,在通过这个变量名+()的方式调用inner函数,通过闭包完成不修改源代码和调用方式的完成代码扩展
我们把一个函数的名字赋给一个变量,然后再调用这个变量就相当于调用这个函数:
在不改变源代码和调用方式前提下,把函数名赋值给新变量,然后再调用,这就是“函数装饰器”
又或者,你不想在后面添加:
US_EU = log_in_system(US_EU)
JP_KO = log_in_system(JP_KO)
则可以再想要装饰的函数上面添加 @log_in_system 来达到相同的效果,这就是装饰器的语法。
"""
data_base = {'is_log_in':False,
'user name':'synferlo',
'password':'harry618'}
def log_in_system(func):
##对字典内变量值得判断用方括号,后面加is或者==
def inner():
if data_base['is_log_in'] == False:
user_name = input('user name: ')
pass_word = input('password: ')
if user_name == data_base['user name'] and pass_word == data_base['password']:
print('Success! Welcome to Membership Section')
data_base['is_log_in'] = True
func()
else:
print('User name or password is not matched. Please try again')
else:
print('log in request has been approved')
func()
return inner
"""
添加了return inner后,后面US_EU = log_in_system(US_EU)返回的是inner的内存地址,而不是执行inner,即之前学的闭包现象
当我们真正想要调用inner的时候,需要执行US_EU() (即,代码原本的调用方式
这样我们在不修改原本调用方式和源代码的情况下完成了扩展代码的任务!!"""
def home_page():
print("""
--------------------Home Page------------------
this is the home page of this website
End
""")
def US_EU():
print("""
-----------------US and EU Section--------------
welcome to US and EU Membership section
End
""")
def JP_KO():
print("""
-----------------JP and KO Section--------------
welcome to JP and KO Membership section
End
""")
def domestic():
print("""
-----------------Domestic Section--------------
welcome to Domestic section
End
""")
home_page()
domestic()
"""##注意这里一定不能载US_EU和JP_KO后面加括号,加括号表示要启动这个函数,而我们这里是以索引,只是用他们的名字赋值"""
US_EU = log_in_system(US_EU)
JP_KO = log_in_system(JP_KO)
US_EU()
JP_KO()
"""
通过 @log_in_system 达成装饰器效果:
装饰器可以添加多个:执行时候从上到下执行:
@pay_money
@vip_level
@log_in_system
....
"""
data_base = {'is_log_in':False,
'user name':'synferlo',
'password':'harry618'}
def log_in_system(func):
##对字典内变量值得判断用方括号,后面加is或者==
def inner():
if data_base['is_log_in'] == False:
user_name = input('user name: ')
pass_word = input('password: ')
if user_name == data_base['user name'] and pass_word == data_base['password']:
print('Success! Welcome to Membership Section')
data_base['is_log_in'] = True
func()
else:
print('User name or password is not matched. Please try again')
else:
print('log in request has been approved')
func()
return inner
"""
添加了return inner后,后面US_EU = log_in_system(US_EU)返回的是inner的内存地址,而不是执行inner,即之前学的闭包现象
当我们真正想要调用inner的时候,需要执行US_EU() (即,代码原本的调用方式
这样我们在不修改原本调用方式和源代码的情况下完成了扩展代码的任务!!"""
def home_page():
print("""
--------------------Home Page------------------
this is the home page of this website
End
""")
@log_in_system
def US_EU():
print("""
-----------------US and EU Section--------------
welcome to US and EU Membership section
End
""")
@log_in_system
def JP_KO():
print("""
-----------------JP and KO Section--------------
welcome to JP and KO Membership section
End
""")
def domestic():
print("""
-----------------Domestic Section--------------
welcome to Domestic section
End
""")
home_page()
domestic()
US_EU()
JP_KO()
|
[
"13701304462@163.com"
] |
13701304462@163.com
|
ce7ffbdec0eb226aa156c9473f3ba0daa63fab4c
|
63191be7f688591af69263972d68423d76fb5f74
|
/geekshop/adminapp/controllers/user.py
|
05aa0358e05154eb814c7758f5dea946a87f8647
|
[] |
no_license
|
IliaNiyazof/Django
|
5eee4c226a1f06178fdbb5626444fff406886de7
|
052cb4f3f142c4224454ebac9fb27f63de9cbc47
|
refs/heads/master
| 2021-07-19T05:52:56.620026
| 2020-06-05T16:17:47
| 2020-06-05T16:17:47
| 166,776,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse_lazy
from django.shortcuts import HttpResponseRedirect
from authapp.models import ShopUser
class UserListView(ListView):
model = ShopUser
template_name = 'adminapp/users/users_index.html'
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, request, *args, **kwargs):
return super(UserListView, self).dispatch(request, *args, **kwargs)
class UserCreateView(CreateView):
model = ShopUser
template_name = 'adminapp/users/update.html'
fields = ('username', 'age', 'password', 'email', 'first_name', 'avatar')
success_url = reverse_lazy('admin:users_index')
class UserUpdateView(UpdateView):
model = ShopUser
template_name = 'adminapp/users/update.html'
fields = ('username', 'age', 'email', 'first_name', 'avatar')
success_url = reverse_lazy('admin:users_index')
def get_context_data(self, **kwargs):
parent_context = super(UserUpdateView, self).get_context_data(**kwargs)
parent_context['title'] = 'пользователи/создание'
return parent_context
class UserDeleteView(DeleteView):
model = ShopUser
template_name = 'adminapp/users/delete.html'
success_url = reverse_lazy('admin:users_index')
def delete(self, request, *args, **kwargs):
self.object = self.get_object() #получаем пользователя
self.object.is_active = False
self.object.save()
return HttpResponseRedirect(self.get_success_url())
|
[
"IFHRJCFY@yandex.ru"
] |
IFHRJCFY@yandex.ru
|
452d3cb8763ef5630fe140977e5d2231bfa8a948
|
f0316e656767cf505b32c83eef4df13bb9f6b60c
|
/LeetCode/Python/Medium/113_path_sum_2.py
|
06fd48759de839c50e4e5968b112c39a587e6b81
|
[] |
no_license
|
AkshdeepSharma/Classroom
|
70ec46b35fab5fc4a9d2eac430659d7dafba93da
|
4e55799466c101c736de6c7e07d716ff147deb83
|
refs/heads/master
| 2022-06-13T18:14:03.236503
| 2022-05-17T20:16:28
| 2022-05-17T20:16:28
| 94,828,359
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, wanted_sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
ans = []
self.dfs(root, wanted_sum, [], ans)
return ans
def dfs(self, root, wanted_sum, temp, ans):
if not root:
return
if not root.left and not root.right and wanted_sum == root.val:
ans.append(temp + [root.val])
return
self.dfs(root.left, wanted_sum - root.val, temp + [root.val], ans)
self.dfs(root.right, wanted_sum - root.val, temp + [root.val], ans)
|
[
"akshdeep.sharma1@gmail.com"
] |
akshdeep.sharma1@gmail.com
|
c0913253a2a5b39283be769ece6b4ad5d083e695
|
2c635d6b558a65e62a9d37c12abf9e4ecbe8938c
|
/Interleaving String/Interleaving String.py
|
078587fe65f8837338f6aae0a16ec2d3583abaf1
|
[] |
no_license
|
GreatStephen/MyLeetcodeSolutions
|
c698e13b7088fc9236250b6ec10331b88fe99ed1
|
73a8f79f2cd5c769b195c503f0346893b102acdc
|
refs/heads/master
| 2023-03-01T04:53:19.698040
| 2021-02-05T22:28:18
| 2021-02-05T22:28:18
| 284,350,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
class Solution:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
# 比较简单的dp,dp[i][j]检查s1[:i]和s2[:j]能否组成s3[:i+j]
if len(s1)+len(s2)!=len(s3): return False
R,C = len(s1), len(s2)
dp = [[False]*(C+1) for i in range(R+1)]
dp[0][0] = True
for i in range(R):
if s1[i]==s3[i] and dp[i][0]:
dp[i+1][0] = True
for j in range(C):
if s2[j]==s3[j] and dp[0][j]:
dp[0][j+1] = True
for i in range(1, R+1):
for j in range(1, C+1):
if not dp[i-1][j] and not dp[i][j-1]:
continue
if dp[i][j-1] and s2[j-1]==s3[i+j-1]: # 当前字符与s2相等,检查左边==True
dp[i][j] = True
elif dp[i-1][j] and s1[i-1]==s3[i+j-1]: # 当前字符与s1相等,检查上边==True
dp[i][j] = True
return dp[-1][-1]
|
[
"litianyou97@gmail.com"
] |
litianyou97@gmail.com
|
2eda2055c3ab058a4a5366e1564e1d8455fe7dd3
|
e8ae11e5017507da59e2e92d423b6a1994490de4
|
/env/lib/python2.7/site-packages/azure/mgmt/compute/models/ssh_public_key.py
|
0eb39d154705f352f3944718f3a54a63168a0c13
|
[] |
no_license
|
teopeurt/ansible-ubuntu-server
|
613d00cea28bc6531acf4a39aeeb9cd0baa2a391
|
b5b6127d2ee9723c5088443efe2ffb8ae30cfea7
|
refs/heads/master
| 2021-06-28T12:49:50.935753
| 2017-07-31T17:34:33
| 2017-07-31T17:34:33
| 98,912,808
| 0
| 1
| null | 2020-07-24T00:05:31
| 2017-07-31T17:32:56
|
Makefile
|
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SshPublicKey(Model):
"""
Contains information about SSH certificate public key and the path on the
Linux VM where the public key is placed.
:param path: Gets or sets the full path on the created VM where SSH
public key is stored. If the file already exists, the specified key is
appended to the file.
:type path: str
:param key_data: Gets or sets Certificate public key used to authenticate
with VM through SSH.The certificate must be in Pem format with or
without headers.
:type key_data: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(self, path=None, key_data=None):
self.path = path
self.key_data = key_data
|
[
"me@teopeurt.com"
] |
me@teopeurt.com
|
31f3a8fd24cca93817cf22a6198a2aeb399d5091
|
099b4f825cf6ccf7a9795154f2d7a7daa64d4691
|
/Python_Programming_For_The_Absolute_Begginer_Scripts/HeroInventoryThree.py
|
a4f02b299117ad2f7f80a9e270f4c94c8a4214da
|
[] |
no_license
|
espiercy/py_junkyard
|
22637f4b1056cd7571d99dfc14e27a0590695733
|
48204ddd00a366e67e98e2d6a01921b659677d57
|
refs/heads/master
| 2020-03-23T11:27:46.779467
| 2018-09-22T12:55:08
| 2018-09-22T12:55:08
| 141,504,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
#Hero's Inventory 3.0
#Demonstrates lists
#Evan Piercy
#3.21.15
#creates a list with some items and displays with a for loop
inventory = ["sword" , "armor" , "shield" , "healing potion"]
print("Your items: ")
for item in inventory:
print(item)
input("\nPress the enter key to continue.")
#get length of list
print("You have " , len(inventory), "items in your possession.")
input("\nPress the enter key to continue.")
#test for membership with in
if "healing potion" in inventory:
print("You will live to fight another day.")
#display one item through an index
index = int(input("\nEnter the index number for an item in inventory: "))
print("At index " , index , " is " , inventory[index])
#display a slice
start = int(input("Enter the index number to begin a slice: "))
finish = int(input("Enter the index number to end the slice: "))
print("Inventory[" , start , ":" , finish , "] is", end=" ")
print(inventory[start:finish])
input("\nPress the enter key to continue.")
#concatenate two lists
chest = ["gold","gems"]
print("You find a chest wich contains: ")
print(chest)
inventory += chest
print("Your inventory is now: ")
print(inventory)
#assign by index
print("You trade your sword for a crossbow.")
inventory[0] = "crossbow"
print("Your inventory is now: ")
print(inventory)
input("\nPress the enter key to continue.")
#assign by slice
print("You use your gold and gems to buy an orb of future telling.")
inventory[4:6] = ["orb of future telling"]
print("Your inventory is now: ")
print(inventory)
#delete an element
print("In a great battle, your shield is destroyed.")
del inventory[2]
print("Your inventory is now: ")
print(inventory)
input("\nPress the enter key to continue.")
#delete a silce
print("Your crossbow and armor are stolen by thieves.")
del inventory[:2]
print("Your inventory is now: ")
print(inventory)
input("\nPress the enter key to exit.")
|
[
"evanpiercy@gmail.com"
] |
evanpiercy@gmail.com
|
d253cc3acea300934927e626015ca9d87ed976b8
|
5215ee22217a0bddc6a6eae3b0e49c1216a77bbc
|
/snippets/genomescan/extractHmmerResults.py
|
3b6b53509ce7664ff31e629035a853ba702a58e0
|
[
"Artistic-2.0"
] |
permissive
|
PapenfussLab/Mungo
|
5cda4d19a5ef4cb51da495f7abf259b4cd4d1697
|
02c5b0e48ecd28596cb9481b282753859f47fed6
|
refs/heads/master
| 2021-01-17T07:40:08.832760
| 2015-08-20T01:21:19
| 2015-08-20T01:21:19
| 41,067,597
| 1
| 3
| null | 2016-06-12T23:31:20
| 2015-08-20T01:02:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
#!/usr/bin/env python
"""
extractHmmerResults.py <ioDir>
Input: Hmmer output 'DEFB.txt'
Outputs:
- the genomic version (DEFB_genomic.txt)
- a summary (DEFB_summary.txt)
- extracted sequence (DEFB_extracted.fa)
- the translation of this (DEFB_extracted_pep.fa)
Author: Tony Papenfuss
Date: Tue Aug 15 10:18:46 EST 2006
"""
import os, sys
import hmmer, fasta, sequence
homeDir = os.environ['HOME']
blastdb = os.path.join(homeDir, 'databases/opossum/assembly/blastdb/assembly')
ioDir = sys.argv[1]
os.chdir(ioDir)
genomicFile = open('DEFB_genomic.txt', 'w')
summaryFile = open('DEFB_summary.txt', 'w')
dnaWriter = fasta.MfaWriter('DEFB_extracted.fa')
pepWriter = fasta.MfaWriter('DEFB_extracted_pep.fa')
domains = hmmer.loadDomains('DEFB.txt', seqType='BlockSixFrame')
print >> genomicFile, '\t'.join(domains[0].fields
+ ['strand', 'lowScoring', 'pseudogene', 'nCysteines'])
for i,domain in enumerate(domains):
if i>99: break
domain.domain = 'DEFB_%0.2i' % (i+1)
domain.toGenomic(relative=True)
domain.addField('lowScoring', 'N')
domain.addField('pseudogene', 'N')
domain.addField('nCysteines', 0)
summary = []
h,s = fasta.getSequence(blastdb, domain.accession,
start=domain.sStart, end=domain.sEnd, strand=domain.strand)
pep = sequence.translate(s)
if i>59: domain.lowScoring = 'Y'
if '*' in pep:
domain.pseudogene = 'Y'
summary.append('Contains stops')
for aa in pep:
if aa=='C':
domain.nCysteines += 1
if domain.nCysteines!=6:
summary.append('Has %i cysteines' % domain.nCysteines)
print >> summaryFile, '%s\t%s' % (domain.domain, '; '.join(summary))
if domain.pseudogene=='Y' or domain.nCysteines<5 or domain.nCysteines>7:
print 'Skipped', i
else:
h2 = '%s %s:%s-%s(%s)' % (domain.domain, domain.accession, domain.sStart, domain.sEnd, domain.strand)
dnaWriter.write(h2, s + '\n')
pepWriter.write(h2, pep + '\n')
print >> genomicFile, domain
genomicFile.close()
summaryFile.close()
dnaWriter.close()
pepWriter.close()
|
[
"papenfuss@mac2576.wehi.edu.au"
] |
papenfuss@mac2576.wehi.edu.au
|
c0251a93285a9c216b2e657cd6f5ee75e88fd87c
|
e2f507e0b434120e7f5d4f717540e5df2b1816da
|
/363-prime.py
|
eec06779fac42ecf5ef0cc3980108a841e115439
|
[] |
no_license
|
ash/amazing_python3
|
70984bd32ae325380382b1fe692c4b359ef23395
|
64c98940f8a8da18a8bf56f65cc8c8e09bd00e0c
|
refs/heads/master
| 2021-06-23T14:59:37.005280
| 2021-01-21T06:56:33
| 2021-01-21T06:56:33
| 182,626,874
| 76
| 25
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
def is_prime(n):
if n < 2: # 1 is not prime
return False
for m in range(2, n):
if n % m == 0:
return False
return True
for x in range(50):
if is_prime(x):
print(x)
|
[
"andy@shitov.ru"
] |
andy@shitov.ru
|
f41d9ea36dbee55843bb4d1ff6dba6377fb63e81
|
9d278285f2bc899ac93ec887b1c31880ed39bf56
|
/ondoc/common/migrations/0016_auto_20190206_2246.py
|
e67cae7f190201b42f384a4f3f519d771ff61775
|
[] |
no_license
|
ronit29/docprime
|
945c21f8787387b99e4916cb3ba1618bc2a85034
|
60d4caf6c52a8b70174a1f654bc792d825ba1054
|
refs/heads/master
| 2023-04-01T14:54:10.811765
| 2020-04-07T18:57:34
| 2020-04-07T18:57:34
| 353,953,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# Generated by Django 2.0.5 on 2019-02-06 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0015_remark_status'),
]
operations = [
migrations.AlterField(
model_name='remark',
name='content',
field=models.TextField(),
),
]
|
[
"shashanks@policybazaar.com"
] |
shashanks@policybazaar.com
|
bc4fb90c32c6b9ad7010a25b5c8a0a524ed26ae7
|
277d4ee56616bb5930c57a57c68a202bf5085501
|
/stubs/torch/nn/parallel/replicate.pyi
|
f337d4b8bd699302940ca17613883b9bed788aa2
|
[
"MIT"
] |
permissive
|
miskolc/spacy-pytorch-transformers
|
fc502523644eb25cb293e0796b46535ba581a169
|
ab132b674c5a91510eb8cc472cdbdf5877d24145
|
refs/heads/master
| 2020-07-22T09:47:17.905850
| 2019-09-04T15:12:09
| 2019-09-04T15:12:09
| 207,156,566
| 1
| 0
|
MIT
| 2019-09-08T18:37:55
| 2019-09-08T18:37:55
| null |
UTF-8
|
Python
| false
| false
| 221
|
pyi
|
# Stubs for torch.nn.parallel.replicate (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
def replicate(network: Any, devices: Any, detach: bool = ...): ...
|
[
"honnibal+gh@gmail.com"
] |
honnibal+gh@gmail.com
|
9650f1d30391a7d26762cbdb090a2f7a374bd8d4
|
b731d1b35a5416cdd73d421ea3b88a3a18e4c6d3
|
/ecliptic/support/sequtils.py
|
8f51fb3e102d63eb8f64f97e3d6b21a2e5d694ec
|
[] |
no_license
|
xflicsu/ecliptic
|
ad772d3563cff1875dddc7d29d156093e03afd07
|
e9d2e671bcabc5df30ada0cf42953769099ad5d7
|
refs/heads/master
| 2020-12-28T21:06:37.212834
| 2013-06-18T14:13:23
| 2013-06-18T14:13:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,702
|
py
|
# Copyright (c) 2011-2012 Hyeshik Chang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# - Hyeshik Chang <hyeshik@snu.ac.kr>
#
__all__ = [
'iterseq_sequential',
'GiantFASTAFile',
'reverse_complement',
'iter_windowed',
'iter_windowed_str',
'get_first_sequence_length',
]
from Bio import SeqIO
import string
import gzip
from collections import deque
import os
import re
def iterseq_sequential(fastapath):
nextid = yield
for seq in SeqIO.parse(open(fastapath, 'r'), format='fasta'):
while seq.name == nextid:
nextid = yield seq
import pysam
# provides an interface like 'samtools faidx'.
whitespace = re.compile('[ \t\r\n]')
class GiantFASTAFile(object):
def __init__(self, filename):
if not os.path.exists(filename + '.fai'):
pysam.faidx(filename)
self.fasta = open(filename)
self.index = self.load_index(filename + '.fai')
def load_index(self, filename):
index = {}
for line in open(filename):
fields = line[:-1].split('\t')
index[fields[0]] = tuple(map(int, fields[1:]))
return index
def get(self, seqid, start=None, stop=None, strand='+'): # zero-based, half-open
length, filepos, colwidth, linesize = self.index[seqid]
if start is None and stop is None:
offset_st = filepos
linenum_en = length // colwidth
offset_en = filepos + length + linenum_en * (linesize - colwidth)
else:
start = max(0, start)
stop = min(length, stop)
linenum_st = start // colwidth
offset_st = filepos + start + linenum_st * (linesize - colwidth)
linenum_en = stop // colwidth
offset_en = filepos + stop + linenum_en * (linesize - colwidth)
self.fasta.seek(offset_st, 0)
seq = whitespace.sub('', self.fasta.read(offset_en - offset_st))
return seq if strand == '+' else reverse_complement(seq)
revcmptrans = string.maketrans('ATUGCatugc', 'TAACGtaacg')
def reverse_complement(seq):
return seq.translate(revcmptrans)[::-1]
def iter_windowed(it, width):
i = iter(it)
queue = deque()
for _ in range(width):
queue.append(i.next())
yield tuple(queue)
for next in i:
queue.popleft()
queue.append(next)
yield tuple(queue)
def iter_windowed_str(it, width):
for r in iter_windowed(it, width):
yield ''.join(r)
def get_first_sequence_length(path, format='fastq-illumina', gzipped=True):
if gzipped:
opener = gzip.open
else:
opener = open
return len(SeqIO.parse(opener(path), format=format).next().seq)
|
[
"hyeshik@snu.ac.kr"
] |
hyeshik@snu.ac.kr
|
2cb7966f9848c20ac6a089497ae7bb6742d0382b
|
7d2ee33675a0b0bd3c25ee22766eca3b658efbab
|
/tests/validate_json_schemas.py
|
879f38c7c26e8804615b48b00ca61940d6ebae9e
|
[
"MIT"
] |
permissive
|
AD-SDL/polybot-schema
|
8caef7d6ae01f5723b01e620a6d364b4fcb2ebc9
|
8bc3dbf39e8d71ac6279baa40f08679a3bdbb80a
|
refs/heads/main
| 2023-06-27T03:50:20.856468
| 2021-07-29T17:30:45
| 2021-07-29T17:30:45
| 389,676,088
| 0
| 0
|
MIT
| 2021-07-26T17:38:07
| 2021-07-26T15:12:15
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
from jsonschema import Draft7Validator, RefResolver
from pathlib import Path
import json
# Find all the schemas
schema_path = Path(__file__).parent / '..' / 'json'
schemas = schema_path.rglob('*.json')
# Loop through to make sure they are all valid
for schema in schemas:
print(f'Checking {schema.relative_to(schema_path)}...', end="")
# Load in the schema
with open(schema) as fp:
schema = json.load(fp)
# Pull in the references
validator = Draft7Validator(Draft7Validator.META_SCHEMA,
resolver=RefResolver('file:///{}/'.format(schema_path), schema))
validator.validate(schema)
print('OK')
|
[
"ward.logan.t@gmail.com"
] |
ward.logan.t@gmail.com
|
c30c48fa7dd725d285fdae6c1a5dd6d450f5beef
|
ce498e1b0fd9c2ae9ebd04c5834f4052a9a46219
|
/tuples/sorting-with-value.py
|
01e568039bac15ac185b8e92f489cf4c611c3c0e
|
[] |
no_license
|
wechuli/python
|
b32e84bb44646810f3f03f9fcfb67e7d8f54ebb6
|
40cc1782724179567128f6da202f166a0c9a0ea1
|
refs/heads/master
| 2022-02-20T19:18:03.578752
| 2019-09-24T17:19:27
| 2019-09-24T17:19:27
| 117,241,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
my_nary={'The':'Heart','is':"exceedingly",'deceitful':'and','wicked':'above','all':'things'}
my_nary2={'The':5,'is':85,'deceitful':2,'wicked':-8,'all':22,'things':85}
my_list=list(my_nary2.items())
my_lis2=list()
for key,vals in my_list:
my_lis2.append((vals,key))
my_lis2.sort(reverse=True)
print(my_lis2)
|
[
"wechulipaul@yahoo.com"
] |
wechulipaul@yahoo.com
|
9edcfd8e23912adb9f02ffa2158ad3e4a1c337a0
|
6cf0c21f14076979edafb401caf6f0be54377490
|
/vedastr_cstr/vedastr/models/bodies/rectificators/sspin.py
|
26c593457eb7e1d3db2b610c4456f3b659256f07
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
shekarneo/ocr-teamcode
|
abb23a6cb1df597ee0f8a45d1caf4c6374ce1d46
|
86d5070e8f907571a47967d64facaee246d92a35
|
refs/heads/main
| 2023-06-02T19:29:20.640271
| 2021-06-23T15:03:31
| 2021-06-23T15:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
# We implement a new module which has same property like spin to some extent.
# We think this manner can replace the GA-SPIN by enlarging output features
# of se layer, but we didn't do further experiments.
import torch.nn as nn
from vedastr.models.bodies.feature_extractors import build_feature_extractor
from vedastr.models.utils import SE
from vedastr.models.weight_init import init_weights
from .registry import RECTIFICATORS
@RECTIFICATORS.register_module
class SSPIN(nn.Module):
def __init__(self, feature_cfg, se_cfgs):
super(SSPIN, self).__init__()
self.body = build_feature_extractor(feature_cfg)
self.se = SE(**se_cfgs)
init_weights(self.modules())
def forward(self, x):
x = self.body(x)
x = self.se(x)
return x
|
[
"coodingpenguin@gmail.com"
] |
coodingpenguin@gmail.com
|
0503c67578bed73112c1a1fd91865ab9220e8817
|
fd1a3d5f1eb2a85a204b9694f26e2168e1e2fa10
|
/msu/fall2013/ece867/project/src/coroutine.py
|
63c83844bcb50a519b15c68480dac7474e52f568
|
[] |
no_license
|
csboling/studies
|
61d3d31ce8ccd4c9370e19fa60fc4ad8fbc26e30
|
1a212d63420ff1602755950124136993bc967849
|
refs/heads/master
| 2021-01-21T14:08:59.397293
| 2016-03-30T17:49:21
| 2016-03-30T17:49:21
| 25,365,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
def coroutine(f):
def start(*args, **kwargs):
g = f(*args, **kwargs)
g.next()
return g
return start
@coroutine
def consume():
while True:
(yield)
@coroutine
def broadcast(targets):
while True:
msg = (yield)
for target in targets:
target.send(msg)
@coroutine
def accrue(depth, target):
w = [[]]*depth
window = circbuf(w)
while True:
for i in xrange(depth):
window.send((yield))
target.send(w)
@coroutine
def disperse(targets):
while True:
results = (yield)
for i in xrange(len(results)):
targets[i].send(results[i])
@coroutine
def printer():
while True:
print (yield)
class Flush(Exception):
pass
@coroutine
def circbuf(v, target=None):
size = len(v)
count = 0
while True:
for i in xrange(len(v)):
try:
v[i] = (yield)
if count < size:
count += 1
except Flush:
if target == None:
break
else:
tail = i - count
if tail < 0:
tail += size
while count:
target.send(v[tail])
tail += 1
if tail == size: tail = 0
count -= 1
|
[
"charles.samuel.boling@gmail.com"
] |
charles.samuel.boling@gmail.com
|
c7c8dbb3871913fc00fa414dd0a9cd0fb3f622c3
|
ef61c5f177ee44ac08325335fc28a12f3fccbb58
|
/resource_management/views/add_item/add_item.py
|
c02a116732e5c57847c249eb40a4f70e1961fbef
|
[] |
no_license
|
bammidichandini/resource_management-chandini
|
3c11c7b2eb5e2f8d3df5b55e4d3ee86a27ed5c3a
|
aa4ec50f0b36a818bebc2033cb39ee928e5be13c
|
refs/heads/master
| 2022-12-01T19:59:25.366843
| 2020-07-23T09:10:42
| 2020-07-23T09:10:42
| 269,610,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
from functools import reduce
def add_item(*args, **kwargs): # pylint: disable=invalid-name
"""
Note: replace below mock implementation with your actual implementation
Request:
kwargs["user"] -> request user
kwargs["user_dto"] -> request user_dto
kwargs["request_object"] -> request body type object
kwargs["request_data"] -> request body data dict
kwargs["request_headers_obj"] -> request headers object
kwargs["request_query_params"] -> request query parameters object
Response :
return: tuple(response_status_code, response_object,
response_headers_object)
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response \
import endpoint_response
return endpoint_response(response_object)
"""
access_token = ''
http_authorization = args[0].META.get("HTTP_AUTHORIZATION")
if http_authorization is not None:
if len(http_authorization.split(" ")) == 2:
access_token = http_authorization.split(" ")[1]
http_source = args[0].META.get("HTTP_X_SOURCE")
kwargs.update({"access_token": access_token, 'source': http_source})
from .api_wrapper import api_wrapper
response_object = api_wrapper(*args, **kwargs)
allowed_primitive_types = [False, str, int, float]
from functools import reduce # pylint: disable=redefined-builtin
if response_object is None:
from django.http.response import HttpResponse
response_object = HttpResponse()
elif reduce((lambda a, b: a or isinstance(response_object, b)),
allowed_primitive_types):
from django.http.response import HttpResponse
response_object = HttpResponse(str(response_object))
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response \
import endpoint_response
return endpoint_response(response_object)
|
[
"chandini.bammidi123@gmail.com"
] |
chandini.bammidi123@gmail.com
|
b16c44dca67fbbadaa94bfac55dcddd6bcb70005
|
dbfeec6da584a41f8f341535728385db4d777ddf
|
/scripts/analysis/constant_accuracy_overall.py
|
60a0851b55bc554e5bc8a13d1d76b9a36a924242
|
[
"MIT"
] |
permissive
|
alan-turing-institute/CSV_Wrangling
|
7b0abfcfc0d3a7a91252da80064952ede4c71578
|
3e073b8eee906c8fc71a5ce4499b07bbe67e8807
|
refs/heads/master
| 2022-01-24T13:31:36.381401
| 2022-01-20T22:13:17
| 2022-01-20T22:13:17
| 158,363,564
| 29
| 10
|
MIT
| 2022-01-20T22:13:48
| 2018-11-20T09:28:05
|
TeX
|
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Overall accuracy of a method averaged over multiple corpora.
Author: Gertjan van den Burg
Copyright (c) 2018 - The Alan Turing Institute
License: See the LICENSE file.
"""
import argparse
import sys
from common.detector_result import Status
from .core import load_detector_results
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
dest="reference",
help="Reference file(s) with ground truth",
required=True,
nargs="+",
)
parser.add_argument(
"-d",
dest="detector",
help="Detector result(s)",
required=True,
nargs="+",
)
parser.add_argument(
"-o", dest="output", help="Output tex file to write to", required=True
)
return parser.parse_args()
def load_and_merge(filenames):
results = {}
for res_file in filenames:
_, res = load_detector_results(res_file)
for fname in res:
if fname in results:
print(
"Error: duplicate result for file %s" % fname,
file=sys.stderr,
)
raise SystemExit
results[fname] = res[fname]
return results
def compute_accuracy_overall(ref_results, det_results):
total = 0
correct = 0
for fname in ref_results:
ref = ref_results[fname]
if not ref.status == Status.OK:
continue
total += 1
det = det_results[fname]
if not det.status == Status.OK:
continue
correct += ref.dialect == det.dialect
return correct / total * 100
def main():
args = parse_args()
reference_results = load_and_merge(args.reference)
detector_results = load_and_merge(args.detector)
acc = compute_accuracy_overall(reference_results, detector_results)
with open(args.output, "w") as fid:
fid.write("%.0f\\%%%%" % acc)
if __name__ == "__main__":
main()
|
[
"gertjanvandenburg@gmail.com"
] |
gertjanvandenburg@gmail.com
|
ee853b1f9ace43cba6d45251c3e5f56064033d04
|
a7d497669dc91e6432216145a550755d42f6bb69
|
/src/cogent3/util/transform.py
|
e6859405cf90f5a8c193163b7ae0393da4be8a4f
|
[
"BSD-3-Clause"
] |
permissive
|
jbw900/cogent3
|
8887ee4b8be02a2086ad8d6fcaa54afe462baee5
|
9168c9b86d851992d008c8f730a5516b89aef835
|
refs/heads/master
| 2020-09-10T19:19:36.443143
| 2019-11-14T23:48:34
| 2019-11-14T23:48:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,677
|
py
|
#!/usr/bin/env python
"""Provides transformations of functions and other objects.
Includes:
Standard combinatorial higher-order functions adapted from David Mertz (2003),
"Text Processing in Python", Chapter 1.
Functions for performing complex tests on strings, e.g. includes_any or
includes_all.
Functions for generating combinations, permutations, or cartesian products
of lists.
"""
__author__ = "Sandra Smit"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["Sandra Smit", "Rob Knight", "Zongzhi Liu"]
__license__ = "BSD-3"
__version__ = "2019.10.24a"
__maintainer__ = "Sandra Smit"
__email__ = "sandra.smit@colorado.edu"
__status__ = "Production"
maketrans = str.maketrans
# standard combinatorial HOF's from Mertz
def per_shortest(total, x, y):
"""Divides total by min(len(x), len(y)).
Useful for normalizing per-item results from sequences that are zipped
together. Always returns 0 if one of the sequences is empty (to
avoid divide by zero error).
"""
shortest = min(len(x), len(y))
if not shortest:
return 0
else:
return total / shortest
def per_longest(total, x, y):
"""Divides total by max(len(x), len(y)).
Useful for normalizing per-item results from sequences that are zipped
together. Always returns 0 if one of the sequences is empty (to
avoid divide by zero error).
"""
longest = max(len(x), len(y))
if not longest:
return 0
else:
return total / longest
class for_seq(object):
"""Returns function that applies f(i,j) to i,j in zip(first, second).
f: f(i,j) applying to elements of the sequence.
aggregator: method to reduce the list of results to a scalar. Default: sum.
normalizer: f(total, i, j) that normalizes the total as a function of
i and j. Default is length_normalizer (divides by the length of the shorter
of i and j). If normalizer is None, no normalization is performed.
Will always truncate to length of the shorter sequence (because of the use
of zip).
"""
def __init__(self, f, aggregator=sum, normalizer=per_shortest):
self.f = f
self.aggregator = aggregator
self.normalizer = normalizer
def __call__(self, first, second):
f = self.f
if self.normalizer is None:
return self.aggregator([f(i, j) for i, j in zip(first, second)])
else:
return self.normalizer(
self.aggregator([f(i, j) for i, j in zip(first, second)]), first, second
)
# convenience functions for modifying objects
class KeepChars(object):
"""Returns a filter object o(s): call to return a filtered string.
Specifically, strips out everything in s that is not in keep.
This filter is case sensitive by default.
"""
allchars = bytes(range(256))
def __init__(self, keep, case_sens=True):
"""Returns a new KeepChars object, based on string keep"""
if not case_sens:
low = keep.lower()
up = keep.upper()
keep = low + up
keep = keep.encode("utf-8")
self._strip_table = dict([(c, None) for c in self.allchars if c not in keep])
def __call__(self, s):
"""f(s) -> s, translates using self.allchars and self.delchars"""
if s is None:
raise TypeError
if isinstance(s, bytes):
s = s.decode("utf8")
s = str(s)
return s.translate(self._strip_table)
def first_index_in_set(seq, items):
"""Returns index of first occurrence of any of items in seq, or None."""
for i, s in enumerate(seq):
if s in items:
return i
|
[
"Gavin.Huttley@anu.edu.au"
] |
Gavin.Huttley@anu.edu.au
|
2ed208d6f7e98069fb6da757364000b2bbd9ed4e
|
6b3cccf29a604cf6e433bf411f71c9f2692e1c93
|
/openapi_core/schema/schemas/unmarshallers.py
|
b3d0ece1a8b9e32093802a0be93761dfc1ecadd3
|
[
"BSD-3-Clause"
] |
permissive
|
srgkm/openapi-core
|
d826f942fb4551e4d7193e4cb7c156a48c2feb0b
|
4b712cb2b5d045166cecce89ca9e47eb3da5163f
|
refs/heads/master
| 2020-12-13T21:16:23.803664
| 2020-01-13T10:43:32
| 2020-01-13T10:43:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,844
|
py
|
from six import text_type, binary_type, integer_types
from openapi_core.schema.schemas.enums import SchemaFormat, SchemaType
from openapi_core.schema.schemas.exceptions import (
InvalidCustomFormatSchemaValue,
UnmarshallerStrictTypeError,
FormatterNotFoundError,
)
from openapi_core.schema.schemas.util import (
forcebool, format_date, format_datetime, format_byte, format_uuid,
format_number,
)
class StrictUnmarshaller(object):
STRICT_TYPES = ()
def __call__(self, value, type_format=SchemaFormat.NONE, strict=True):
if self.STRICT_TYPES and strict and not isinstance(
value, self.STRICT_TYPES):
raise UnmarshallerStrictTypeError(value, self.STRICT_TYPES)
return value
class PrimitiveTypeUnmarshaller(StrictUnmarshaller):
FORMATTERS = {
SchemaFormat.NONE: lambda x: x,
}
def __init__(self, custom_formatters=None):
if custom_formatters is None:
custom_formatters = {}
self.custom_formatters = custom_formatters
def __call__(self, value, type_format=SchemaFormat.NONE, strict=True):
value = super(PrimitiveTypeUnmarshaller, self).__call__(
value, type_format=type_format, strict=strict)
try:
schema_format = SchemaFormat(type_format)
except ValueError:
formatter = self.custom_formatters.get(type_format)
else:
formatters = self.get_formatters()
formatter = formatters.get(schema_format)
if formatter is None:
raise FormatterNotFoundError(value, type_format)
try:
return formatter(value)
except ValueError as exc:
raise InvalidCustomFormatSchemaValue(value, type_format, exc)
def get_formatters(self):
return self.FORMATTERS
class StringUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (text_type, binary_type)
FORMATTERS = {
SchemaFormat.NONE: text_type,
SchemaFormat.PASSWORD: text_type,
SchemaFormat.DATE: format_date,
SchemaFormat.DATETIME: format_datetime,
SchemaFormat.BINARY: binary_type,
SchemaFormat.UUID: format_uuid,
SchemaFormat.BYTE: format_byte,
}
class IntegerUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = integer_types
FORMATTERS = {
SchemaFormat.NONE: int,
SchemaFormat.INT32: int,
SchemaFormat.INT64: int,
}
class NumberUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (float, ) + integer_types
FORMATTERS = {
SchemaFormat.NONE: format_number,
SchemaFormat.FLOAT: float,
SchemaFormat.DOUBLE: float,
}
class BooleanUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (bool, )
FORMATTERS = {
SchemaFormat.NONE: forcebool,
}
|
[
"maciag.artur@gmail.com"
] |
maciag.artur@gmail.com
|
748aab2283fd79854e093608d29d48ce9b13ead4
|
53c91272444bfab92e7e89e0358047b27eab1125
|
/03.代码/豆瓣评论/scrapydouban/scrapydouban/middlewares.py
|
61c53041598417f8298ce947bccbc3bfded65d56
|
[] |
no_license
|
MrFiona/python_module_summary
|
2bbf9f30e0fbfe302e7e6c429754fa7bf4bfc411
|
4e36f6f5f6abed10fc06b16b0ed7c12cde7746d0
|
refs/heads/master
| 2021-01-20T03:54:38.105298
| 2019-01-07T07:28:36
| 2019-01-07T07:28:36
| 101,373,212
| 2
| 0
| null | 2018-04-15T05:56:45
| 2017-08-25T06:28:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ScrapydoubanSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"1160177283@qq.com"
] |
1160177283@qq.com
|
d2b1b651ebf6b8b808ed94202ed67b4c9425c296
|
e44d00ffcea03f8656c40b3d4d993d51a38af3b0
|
/leetcode/July/J15_ReverseWords.py
|
7c56ae8d6499aa463fa5c626d3acfd51ddc0ed3f
|
[] |
no_license
|
Ayushmanglani/competitive_coding
|
d6beec4f2b24aef34ea44c3a4a72074985b4a766
|
12325b09ae2bc6b169578b6a0a091069e14c9227
|
refs/heads/master
| 2023-06-12T04:43:41.130774
| 2021-07-03T13:01:37
| 2021-07-03T13:01:37
| 262,079,363
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
class Solution:
def reverseWords(self, s: str) -> str:
s = s.split()
return(" ".join(s[::-1]))
#method 2
class Solution:
def reverseWords(self, s: str) -> str:
s = s.split(" ")
s = s[::-1]
res = ""
for i in s:
if i != "":
res += i + " "
return(res.strip())
|
[
"ayush.manglani@gmail.com"
] |
ayush.manglani@gmail.com
|
b8d73b63c2697f898dd70e716bd7a8aef651e60c
|
6bd71bdfe9234e5e6de90bb40b6cd8d3e25ca6d2
|
/Tier3-Data/ViaSRM/copy-files-SRM-2-SRM.py
|
ff8770089328f651fa01741ca337dc1b036f041c
|
[] |
no_license
|
andres0sorio/CMSWork
|
f1f30a12bf43eb688ef9e95c53c94fe32fc7fe66
|
81e60a0a9b70cd2ae01d17b15be386a6cd925416
|
refs/heads/master
| 2021-01-22T13:12:16.094247
| 2015-10-26T04:47:12
| 2015-10-26T04:47:12
| 9,710,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
#!/usr/bin/python
import os,sys
import string
from optparse import OptionParser
# ...
# Andres Osorio - aosorio@uniandes.edu.co
# ...
#-----------------------------------------------------
#-----------------------------------------------------
parser = OptionParser()
parser.add_option("-f", type = "string", dest="infile",
help="List of LFNs" )
parser.add_option("-d", type = "string", dest="debug",
help="Are you in debug mode?" )
(options, args) = parser.parse_args()
if options.infile is None:
parser.error("please provide a file")
if options.debug is None:
parser.error("please, are you in debug mode?")
#-----------------------------------------------------
#-----------------------------------------------------
infile = options.infile
debug = options.debug
print 'Copying files: '
fh = open(infile)
nfiles = 0
#cpmethod = 'srmcp '
#cpopts = '-srm_protocol_version=2 -use_urlcopy_script=true -urlcopy= ${SRM_PATH}/sbin/url-copy.sh -debug'
cpmethod = 'lcg-cp '
cpopts = ''
#............... TO
tprefix = ' srm://moboro.uniandes.edu.co:8446/srm/managerv2\?SFN=/dpm/uniandes.edu.co/home/cms'
tdir = '/user/a/aosorio/gridfiles/RAW/Run2011B/L1Accept/'
#............... FROM
sprefix = ' srm://cmssrm.fnal.gov:8443/srm/managerv2\?SFN=/11'
while 1:
line = fh.readline()
if not line:
break
lfn = line[8:-3]
info = lfn.split('/')
filename = info[-1]
source = sprefix + lfn
target = tprefix + tdir + filename
command = cpmethod + cpopts + source + target
print command
if debug == 'no':
os.system(command)
nfiles+=1
if debug == 'yes':
break
print 'Done. Total files: ', (nfiles-1)
|
[
"osorio.af@gmail.com"
] |
osorio.af@gmail.com
|
9bf77a66acd94e715057b64921810fafc903c148
|
ce68e6f989ebf72becce3638dc920fc60edec45c
|
/SWEA/D3/SWEA_2814_최장경로.py
|
6082630ee9fd834346af54032643eb3679e71238
|
[] |
no_license
|
hyeinkim1305/Algorithm
|
103513e502241a619a60e6663ed8346e5c574ebc
|
93541b01fab0a6ceb6f9dd06a7c049c8b57d94f9
|
refs/heads/master
| 2023-05-09T19:07:58.280912
| 2021-06-10T01:51:29
| 2021-06-10T01:51:29
| 330,079,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
# D3
# 두 정점 사이에 여러 개의 간선 존재?
def dfs(idx, cnt):
global max_cnt
vis[idx] = 1
if cnt > max_cnt:
max_cnt = cnt
for j in range(1, N+1):
if idx != j and adj[idx][j] == 1:
if vis[j] == 0:
dfs(j, cnt+1)
# 아래 줄이 없으면 한번 씩만 방문함
# 아래 줄을 쓰게 되면 각 정점들을 시작점으로 돌 수 있음
vis[idx] = 0 # 두번째 예시에서, 3에서 시작해서 끝까지 가는 경로를 돌게 됨
#### 느낌에, 출발점이 달라질 수 있는 경우 방문배열을 취소하는 듯
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
adj = [[0] * (N+1) for _ in range(N+1)]
vis = [0] * (N+1) # 각 정점 방문 배열
max_cnt = -1
# 인접행렬 구성성
for _ in range(M):
u, v = map(int, input().split())
adj[u][v] = 1
adj[v][u] = 1
for i in range(1, N+1): # 각 정점에서 시작
if vis[i] == 0:
dfs(i, 1) # 1 : 정점 개수수
print('#{} {}'.format(tc, max_cnt))
'''
2
1 0
3 2
1 2
3 2
'''
'''
1
6 5
1 2
1 3
2 4
2 5
2 6
'''
|
[
"haileyyy1305@gmail.com"
] |
haileyyy1305@gmail.com
|
4727b92f82d006d2db8a575d76f06b2f66888c5a
|
09912a852e0e20d6a475ef904724f80072a68359
|
/eds/MemsIPE/server/openmtc-server/src/openmtc_server/Event.py
|
6a3ca161a5ec34a44d4d93e69312d5e4a98fb703
|
[
"Apache-2.0"
] |
permissive
|
elastest/elastest-device-emulator-service
|
034aa19438383df0975bf86d49e231342d63002f
|
f512355c5fde6bf027d23558e256b96e2296e0f2
|
refs/heads/master
| 2021-03-09T15:13:30.676138
| 2020-01-13T12:02:02
| 2020-01-13T12:02:02
| 91,440,225
| 3
| 9
|
Apache-2.0
| 2018-12-03T14:59:27
| 2017-05-16T09:26:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,910
|
py
|
from abc import abstractmethod, ABCMeta
from futile import issubclass as safe_issubclass
from futile.logging import LoggerMixin
from openmtc.model import Resource
class Event(LoggerMixin):
__metaclass__ = ABCMeta
@abstractmethod
def fire(self, *event_data):
raise NotImplementedError()
@abstractmethod
def register_handler(self, handler, *args, **kw):
raise NotImplementedError()
class EventSpec(object):
__metaclass__ = ABCMeta
@abstractmethod
def matches(self, item):
raise NotImplementedError()
class BasicEvent(Event):
def __init__(self):
super(BasicEvent, self).__init__()
self._handlers = []
def _add_handler_data(self, data):
handler = data
if handler in self._handlers:
self.logger.warn("Handler %s is already registered", handler)
else:
self._handlers.append(handler)
def register_handler(self, handler, **kw):
self._add_handler_data(handler)
def _execute_handler(self, handler, *event_data):
self.logger.debug("Running handler %s with %s", handler, event_data)
try:
handler(*event_data)
except Exception:
self.logger.exception("Error in event handler")
self.logger.debug("handler %s finished", handler)
def _fired(self, *event_data):
for handler in self._handlers:
self._execute_handler(handler, *event_data)
def fire(self, *event_data):
self.logger.debug("Fired: %s with %s", self, event_data)
self._fired(*event_data)
class ResourceTreeEvent(BasicEvent):
def _add_handler_data(self, data):
resource_type = data[0]
handler = data[1]
# TODO: kca: error messages
if resource_type is not None and not safe_issubclass(resource_type,
Resource):
raise TypeError(resource_type)
if not callable(handler):
raise TypeError(handler)
if data in self._handlers:
self.logger.warn("Handler %s is already registered for type %s",
handler, resource_type or "<all>")
else:
self._handlers.append(data)
def register_handler(self, handler, resource_type=None, **kw):
self._add_handler_data((resource_type, handler))
def _execute_handler(self, data, *event_data):
handler = data[1]
self.logger.debug("Running handler %s with %s", handler, event_data)
handler(*event_data)
self.logger.debug("handler finished")
def _fired(self, resource_type, *event_data):
for data in self._handlers:
handled_type = data[0]
if handled_type is None or issubclass(resource_type, handled_type):
self._execute_handler(data, *event_data)
else:
pass
|
[
"sro"
] |
sro
|
a4977e1faa3a4d326cda4f0f40893a477d82fba0
|
9613620632d8a60afa1ac66a752f2eb543a8530d
|
/src/posts/apps.py
|
139fec2d7e31d75c47e310e121e5c3e2843e2d5b
|
[] |
no_license
|
loristron/iSocial
|
72dc48b36ff1959d98fdb1d4df66495575f9d48a
|
417d7a344a9a078d76380c221d32860d8775f36d
|
refs/heads/master
| 2023-02-24T15:46:17.450470
| 2021-02-03T00:52:26
| 2021-02-03T00:52:26
| 335,385,276
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
from django.apps import AppConfig
class PostsConfig(AppConfig):
name = 'posts'
verbose_name = 'Posts, Comments and Likes'
|
[
"loremmiranda@gmail.com"
] |
loremmiranda@gmail.com
|
a9fd455aafe7b51c2e12403c2de811af29e4df85
|
a0947c2778742aec26b1c0600ceca17df42326cd
|
/Python/Web/72of79DB.py
|
9cd00e6e182ec0bd2fd0d3e52465a807cc2c731d
|
[] |
no_license
|
JohnCDunn/Course-Work-TTA
|
5758319d4607114914ba9723328658bed8fb2024
|
8c4f60d51007dac2ac4cceb84b0f9666e143c0d7
|
refs/heads/master
| 2021-01-10T16:37:02.609879
| 2016-02-01T18:05:38
| 2016-02-01T18:05:38
| 49,983,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
import sqlite3
conn = sqlite3.connect('time_database.db')
c = conn.cursor()
c.execute('''CREATE TABLE time_table (last_update text, last_timestamp)''')
c.execute("INSERT INTO time_table VALUES ('1', '20150101010101')")
conn.commit()
conn.close()
|
[
"JohnClydeDunn@Gmail.com"
] |
JohnClydeDunn@Gmail.com
|
5807b840bdeeec376fc32e131f19b981354fc4c6
|
735bc5756b554009c552844616183b2bcf9ac50b
|
/sauna/reload/interfaces.py
|
3b5b4b263f04e0e9abe866d5e57392c026b534f8
|
[
"ZPL-2.1"
] |
permissive
|
collective/sauna.reload
|
31a2e3be956a9ef2168016e9f199c4b3d1c0b052
|
e12a0a9e01204de324ab934aec5754773ac30bd6
|
refs/heads/master
| 2023-03-22T11:29:17.651087
| 2018-10-18T11:15:02
| 2018-10-18T11:15:02
| 2,267,806
| 10
| 1
| null | 2014-10-19T14:24:33
| 2011-08-25T12:40:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 University of Jyväskylä and Contributors.
#
# All Rights Reserved.
#
# Authors:
# Esa-Matti Suuronen <esa-matti@suuronen.org>
# Asko Soukka <asko.soukka@iki.fi>
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
"""ZCA Interface Definitions"""
from zope.interface import Interface, Attribute
class IDatabaseHooks(Interface):
"""
Provides storage-specific hooks to be called during the reload.
"""
def prepareForReload():
"""
Is called before the reload (before the process is killed)
to allow database connection be prepared for it.
"""
def resumeFromReload():
"""
Is called after the reload (after a new process has been spawned)
to allow database connection be restored.
"""
class INewChildForked(Interface):
"""
Emited immediately after new process is forked. No development packages
have been yet installed.
Useful if you want to do something before your code gets loaded.
Note that you cannot listen this event on a package that is marked for
reloading as it is not yet installed when this is fired.
"""
forkloop = Attribute('ForkLoop instance')
class INewChildIsReady(Interface):
"""
Emitted when all the development packages has been installed to the new
forked child.
Useful for notifications etc.
"""
forkloop = Attribute('ForkLoop instance')
|
[
"asko.soukka@iki.fi"
] |
asko.soukka@iki.fi
|
5e6b20b5c16d81d1f4479bc5620f2397b8e7486d
|
76b0fad21d63847896e09b0c4792637ae2b1c460
|
/src/cookbook_10.py
|
912825055e51f3438d430db57449e5901cd71756
|
[] |
no_license
|
hezhiqiang-book/python-cookbook
|
0718056aff37648246958fc38dffde66bc9df40a
|
a432835755a9a8703789890018561c4218a42259
|
refs/heads/master
| 2020-12-09T15:53:43.316368
| 2015-11-29T02:56:38
| 2015-11-29T02:56:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Counter(object):
def __init__(self):
self.dict = {}
def add(self, item):
count = self.dict.setdefault(item, 0)
self.dict[item] = count + 1
def counts(self, desc=None):
result = [[val, key] for (key, val) in self.dict.items()]
result.sort()
if desc:
result.reverse()
return result
if __name__ == '__main__':
'''Produces:
>>> Ascending count:
[[1, 'but'], [1, 'it'], [1, 'not.'], [1, 'now'], [1, 'test,'], [1, 'test.'], [1, 'was'], [2, 'Hello'], [2, 'a'], [2, 'is'], [2, 'there'], [2, 'this']]
Descending count:
[[2, 'this'], [2, 'there'], [2, 'is'], [2, 'a'], [2, 'Hello'], [1, 'was'], [1, 'test.'], [1, 'test,'], [1, 'now'], [1, 'not.'], [1, 'it'], [1, 'but']]
'''
sentence = "Hello there this is a test. Hello there this was a test, but now it is not."
words = sentence.split()
c = Counter()
for word in words:
c.add(word)
print "Ascending count:"
print c.counts()
print "Descending count:"
print c.counts(1)
|
[
"sixu05202004@gmail.com"
] |
sixu05202004@gmail.com
|
043315ef85ed72f41f1faddc9dab297d8fb220e9
|
bc435c8e7e8e418015e4848bb196adeac1343855
|
/tools/spider/getItem.py
|
b53c0b6bc94a57170f41aef5772d7fa577a6dee3
|
[] |
no_license
|
xiaoZ-hc/PwnMe
|
13a580268273dfaa2957c70fe485475fc8a42bb4
|
2269b9461f6bf8b5a6f600801a03bb60da3adadc
|
refs/heads/master
| 2020-09-05T00:58:28.343371
| 2018-08-01T08:47:15
| 2018-08-01T08:47:15
| 219,939,668
| 0
| 1
| null | 2019-11-06T07:44:20
| 2019-11-06T07:44:19
| null |
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
#!/usr/bin/env python
# encoding:utf-8
import requests
import bs4
import time
import sys
import os
headers = {
"Host" : "www.exploit-db.com",
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0",
"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language" : "en-US,en;q=0.5",
"Accept-Encoding" : "gzip, deflate, br",
"Connection" : "keep-alive",
"Upgrade-Insecure-Requests" : "1",
"Pragma" : "no-cache",
"Cache-Control" : "no-cache"
}
url = sys.argv[1] # 第一个参数指定 url
pages_type = sys.argv[2] # 指定类型 主要为了确定要将文件保存在哪儿
# url = "https://www.exploit-db.com/exploits/39617/"
response = requests.get(url, headers=headers)
content = response.text.encode("UTF-8")
soup = bs4.BeautifulSoup(content, "html.parser")
def getType(soup):
result = ""
return result
def getCode(soup):
result = soup.find("pre").text
return result
def getAuthor(soup):
result = soup.findAll(attrs={"name":"author"})[0]["content"]
return result
def getDescribe(soup):
result = soup.findAll(attrs={"name":"description"})[0]["content"]
return result
def getTitle(soup):
result = soup.title.string
return result
def save(path, content):
codefile = open(path, 'w')
codefile.write(content.encode("UTF-8"))
codefile.close()
def formate(filename):
filename = filename.replace(" ","_")
filename = filename.replace("[","_")
filename = filename.replace("]","_")
filename = filename.replace("(","_")
filename = filename.replace(")","_")
filename = filename.replace("/","_")
filename = filename.replace("\\","_")
filename = filename.replace("{","_")
filename = filename.replace("}","_")
filename = filename.replace(".","_")
filename = filename.replace("\"","_")
filename = filename.replace("'","_")
if filename.endswith("_"):
filename = filename[:-1]
return filename
def compile(path):
command = "gcc -g -fno-stack-protector -z execstack " + path + " -o " + path[:-2] + ".out"
os.system(command)
def commit(comment):
command = "git add ."
os.system(command)
command = "git commit -m \"" + comment + "\""
print command
os.system(command)
print "Getting title..."
dirname = getTitle(soup)
dirname = formate(dirname)
dirname = "./cache/" + pages_type + "/" + dirname
file_content = getCode(soup)
print "Making dir..."
os.makedirs(dirname)
print "Saving file..."
save(dirname + "/" + "main.c", file_content)
comment = getDescribe(soup).replace("\"","")
commit(comment)
# print "Compiling..."
# compile(dirname + "/" + filename)
|
[
"wangyihanger@gmail.com"
] |
wangyihanger@gmail.com
|
a5a3e0a1a6d2bee081cc972ef984f9da33515eb9
|
63768dc92cde5515a96d774a32facb461a3bf6e9
|
/jacket/api/compute/openstack/compute/certificates.py
|
61aaa716f6f6ade322e2c850f6dd52375a387db5
|
[
"Apache-2.0"
] |
permissive
|
ljZM33nd/jacket
|
6fe9156f6f5789e5c24425afa7ce9237c302673d
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
refs/heads/master
| 2023-04-16T11:02:01.153751
| 2016-11-15T02:48:12
| 2016-11-15T02:48:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
import jacket.compute.cert.rpcapi
from jacket.compute import exception
from jacket.i18n import _
ALIAS = "os-certificates"
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_certificate_view(certificate, private_key=None):
return {
'data': certificate,
'private_key': private_key,
}
class CertificatesController(wsgi.Controller):
"""The x509 Certificates API controller for the OpenStack API."""
def __init__(self):
self.cert_rpcapi = jacket.compute.cert.rpcapi.CertAPI()
super(CertificatesController, self).__init__()
@extensions.expected_errors((404, 501))
def show(self, req, id):
"""Return certificate information."""
context = req.environ['compute.context']
authorize(context, action='show')
if id != 'root':
msg = _("Only root certificate can be retrieved.")
# TODO(oomichi): This seems a HTTPBadRequest case because of the
# above message. This will be changed with a microversion in the
# future.
common.raise_feature_not_supported(msg=msg)
try:
cert = self.cert_rpcapi.fetch_ca(context,
project_id=context.project_id)
except exception.CryptoCAFileNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'certificate': _translate_certificate_view(cert)}
# NOTE(gmann): Here should be 201 instead of 200 by v2.1
# +microversions because the resource certificate has been created
# completely when returning a response.
@extensions.expected_errors(())
def create(self, req, body=None):
"""Create a certificate."""
context = req.environ['compute.context']
authorize(context, action='create')
pk, cert = self.cert_rpcapi.generate_x509_cert(context,
user_id=context.user_id, project_id=context.project_id)
return {'certificate': _translate_certificate_view(cert, pk)}
class Certificates(extensions.V21APIExtensionBase):
"""Certificates support."""
name = "Certificates"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(ALIAS,
CertificatesController(),
member_actions={})]
return resources
def get_controller_extensions(self):
return []
|
[
"nkapotoxin@gmail.com"
] |
nkapotoxin@gmail.com
|
bcb2e8de18e2d1eac6fd24c457f468bfab851648
|
6be845bf70a8efaf390da28c811c52b35bf9e475
|
/windows/Resources/Dsz/PyScripts/Lib/dsz/mca_dsz/file/cmd/get/types.py
|
869ee75deb26cb39c8dca695a2111e7c46c17f43
|
[] |
no_license
|
kyeremalprime/ms
|
228194910bf2ed314d0492bc423cc687144bb459
|
47eea098ec735b2173ff0d4e5c493cb8f04e705d
|
refs/heads/master
| 2020-12-30T15:54:17.843982
| 2017-05-14T07:32:01
| 2017-05-14T07:32:01
| 91,180,709
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: types.py
from types import *
MSG_KEY_PARAMS = 65536
MSG_KEY_PARAMS_RAW_INDEX = 65537
MSG_KEY_PARAMS_OFFSET = 65538
MSG_KEY_PARAMS_BYTES_TO_READ = 65539
MSG_KEY_PARAMS_MAX_FILES = 65540
MSG_KEY_PARAMS_CHUNK_SIZE = 65541
MSG_KEY_PARAMS_FLAGS = 65542
MSG_KEY_PARAMS_DATE_TYPE = 65543
MSG_KEY_PARAMS_AGE = 65544
MSG_KEY_PARAMS_AFTER_TIME = 65545
MSG_KEY_PARAMS_BEFORE_TIME = 65546
MSG_KEY_PARAMS_MASK = 65547
MSG_KEY_PARAMS_PATH = 65548
MSG_KEY_PARAMS_MINIMUM_SIZE = 65549
MSG_KEY_PARAMS_MAXIMUM_SIZE = 65550
MSG_KEY_PARAMS_FILE_PROVIDER = 65551
MSG_KEY_RESULT_FILE_INFO = 131072
MSG_KEY_RESULT_FILE_INFO_INDEX = 131073
MSG_KEY_RESULT_FILE_INFO_FILE_SIZE = 131074
MSG_KEY_RESULT_FILE_INFO_CREATE_TIME = 131075
MSG_KEY_RESULT_FILE_INFO_ACCESS_TIME = 131076
MSG_KEY_RESULT_FILE_INFO_MODIFY_TIME = 131077
MSG_KEY_RESULT_FILE_INFO_OPEN_STATUS = 131078
MSG_KEY_RESULT_FILE_INFO_OFFSET = 131079
MSG_KEY_RESULT_FILE_INFO_NAME = 131080
MSG_KEY_RESULT_FILE_INFO_FLAGS = 131081
MSG_KEY_RESULT_DATA = 196608
MSG_KEY_RESULT_DATA_INDEX = 196609
MSG_KEY_RESULT_DATA_BUFFER = 196610
MSG_KEY_RESULT_DONE = 262144
MSG_KEY_RESULT_DONE_INDEX = 262145
|
[
"kyeremalprime@gmail.com"
] |
kyeremalprime@gmail.com
|
a9fb728e80d05f1c60a425baa7e4130d5f496aa1
|
22b363b0a4d67427d4746414090d6fef701fd3b9
|
/src/triage/__init__.py
|
f148179bcdd93eca35d0d477df9f079e43aba6e1
|
[] |
no_license
|
Barbarioli/triage
|
12557dd410def01291da355051966b4d2e5885b4
|
5d1ef41f1d89b150b30fae0c300028ef8dc39321
|
refs/heads/master
| 2020-03-13T10:35:48.469855
| 2018-04-14T02:29:03
| 2018-04-14T02:29:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
# -*- coding: utf-8 -*-
__author__ = """Center for Data Science and Public Policy"""
__email__ = 'datascifellows@gmail.com'
__version__ = '2.2.0'
|
[
"tristan.h.crockett@gmail.com"
] |
tristan.h.crockett@gmail.com
|
b13b2d3ef3319d540e3e1ead4ca55bb548c596ab
|
3bb4d1c4060a8d0f30cbfa01328a3b1520ce1bd5
|
/apps/system/migrations/0002_auto_20190404_2012.py
|
5c220e6021475f5576c584f224796a70d5180679
|
[] |
no_license
|
Chen320048/HuoChenGuang_Files
|
8750bc7afe871128d7ae526741da83a2ac485ce4
|
f4f847572a69ecad1f8ab22c7364cbffe70571fc
|
refs/heads/master
| 2020-05-31T09:05:37.257383
| 2019-06-04T13:18:47
| 2019-06-04T13:18:47
| 190,204,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2019-04-04 20:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('system', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bizlog',
options={'default_permissions': (), 'ordering': ['-id'], 'permissions': (('view_visitor', '\u8bbf\u95ee\u7ec8\u7aef\u7edf\u8ba1'), ('manage_version', '\u7248\u672c\u7ba1\u7406'), ('view_feedback', '\u5efa\u8bae\u53cd\u9988'), ('view_log', '\u7cfb\u7edf\u65e5\u5fd7'), ('manage_vcode', '\u7cfb\u7edf\u9a8c\u8bc1\u7801\u7ba1\u7406')), 'verbose_name': '\u7cfb\u7edf'},
),
migrations.AlterModelOptions(
name='feedback',
options={'default_permissions': (), 'ordering': ['-id'], 'verbose_name': '\u7528\u6237\u53cd\u9988'},
),
migrations.AlterModelOptions(
name='vcode',
options={'default_permissions': (), 'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='version',
options={'default_permissions': (), 'ordering': ['-id'], 'verbose_name': '\u7248\u672c\u4fe1\u606f'},
),
migrations.AlterModelOptions(
name='visitor',
options={'default_permissions': (), 'ordering': ['-id']},
),
]
|
[
"123456@qq.com"
] |
123456@qq.com
|
f462aa831af9c6ef63296255f49b337746e77814
|
a560269290749e10466b1a29584f06a2b8385a47
|
/Notebooks/py/yuikitaml/method-examples/method-examples.py
|
bbf85edccd7069000af9984fb7999be1f545bd7f
|
[] |
no_license
|
nischalshrestha/automatic_wat_discovery
|
c71befad1aa358ae876d5494a67b0f4aa1266f23
|
982e700d8e4698a501afffd6c3a2f35346c34f95
|
refs/heads/master
| 2022-04-07T12:40:24.376871
| 2020-03-15T22:27:39
| 2020-03-15T22:27:39
| 208,379,586
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
#!/usr/bin/env python
# coding: utf-8
# ## Method examples quick look
# Using Titanic dataset
# In[ ]:
import pandas as pd
import numpy as np
import re
import sklearn
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import warnings
warnings.filterwarnings('ignore')
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier,
GradientBoostingClassifier, ExtraTreesClassifier)
from sklearn.svm import SVC
from sklearn.cross_validation import KFold
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
PassengerId = test['PassengerId']
train.head(3)
# In[ ]:
# Count NaN
train.Cabin.isnull().sum()
# In[ ]:
# Numerise Sex
result = train.Sex.map({'male': 0, 'female': 1})
result.head(3)
# In[ ]:
# Fill na
result = train.Cabin.fillna('S')
result.head(3)
# In[ ]:
|
[
"bitsorific@gmail.com"
] |
bitsorific@gmail.com
|
a813e9d7480febc65bed94dcd2c611f754601bb5
|
d28d47bc1d25161629d2174a465ae3eb38e02802
|
/myapp/migrations/0002_auto_20190904_1909.py
|
86cd65d9520221dee1b35b5c4957107f4966576a
|
[] |
no_license
|
Lider-neuromedia/opx-opensource-web
|
4df776607d701f0fe89ad1fb178502ec089678be
|
b44bb24830e52df9a641181f1ab644e9da08ff1d
|
refs/heads/master
| 2022-06-05T01:25:28.312145
| 2020-04-29T22:23:41
| 2020-04-29T22:23:41
| 260,053,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# Generated by Django 2.2.4 on 2019-09-04 19:09
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='usuario',
old_name='userpassword',
new_name='password',
),
migrations.AlterField(
model_name='accion',
name='accionid',
field=models.UUIDField(default=uuid.UUID('50258478-ee9f-42e7-90bc-4e70fc047c0b'), editable=False, primary_key=True, serialize=False),
),
]
|
[
"="
] |
=
|
ea8d95bb65ad2ff1b41bf10867f308283fee4d22
|
ee86ad4b38f6ba13f195246f14224ba781f933cc
|
/02_2차원배열/2차원 순회.py
|
bf7faa2b62022360c75ed078fa6eac2111d6d63e
|
[] |
no_license
|
yejikk/Algorithm
|
aed7adf00c1e32d21b735b3b34dc6cb75049f164
|
531f43305b3a23c824c9e153151b7280c1dc2535
|
refs/heads/master
| 2020-04-17T06:17:28.961656
| 2019-11-16T08:02:49
| 2019-11-16T08:02:49
| 166,318,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
arr = [[0,1,2,3],
[4,5,6,7],
[8,9,10,11]]
# i : 행의 좌표
n = len(arr)
# j : 열의 좌표,
m = len(arr[0])
# 행 우선 순회
for i in range(len(arr)):
for j in range(len(arr[i])):
print(arr[i][j], end=' ')
print()
print()
# 열 우선 순회
for j in range(len(arr[0])):
for i in range(len(arr)):
print(arr[i][j], end=' ')
print()
print()
# 지그재그 순회
for i in range(len(arr)) :
for j in range(len(arr[0])):
print(arr[i][j + (m-1-2*j) * (i%2)], end=' ')
print()
print()
|
[
"dpwl7484@gmail.com"
] |
dpwl7484@gmail.com
|
ded0ba095bd9d4078345e5fdce6768005fe044fe
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2453486_0/Python/Zolmeister1/tictac.py
|
2879fa0314f0c731fcb36909d595b1a0d320f8b9
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,997
|
py
|
fin = open('tictac.in').read()[2:].strip().split('\n\n')
fout = open('tictac.out','w')
def pp(s):#print end case
print 'Case #%d: ' % (cnt) +s
fout.write('Case #%d: ' % (cnt) +s+'\n')
cnt=0
for game in fin:
cnt+=1
rows = game.split('\n')
cont = False
for r in rows:
os = r.count('O')
xs = r.count('X')
ts = r.count('T')
if os==4 or os==3 and ts==1:
pp('O won')
cont = True
break
elif xs==4 or xs==3 and ts==1:
pp('X won')
cont = True
break
if cont:
continue
cols = zip(*rows)
for r in cols:
os = r.count('O')
xs = r.count('X')
ts = r.count('T')
if r.count('O')==4 or r.count('O')==3 and r.count('T')==1:
pp('O won')
cont = True
break
elif r.count('X')==4 or r.count('X')==3 and r.count('T')==1:
pp('X won')
cont=True
break
if cont:
continue
leftDiag = [
[0,0],
[1,1],
[2,2],
[3,3]
]
rightDiag = [
[0,3],
[1,2],
[2,1],
[3,0]
]
left = []
for pos in leftDiag:
left.append(cols[pos[0]][pos[1]])
right = []
for pos in rightDiag:
right.append(cols[pos[0]][pos[1]])
osL = left.count('O')
xsL = left.count('X')
tsL = left.count('T')
osR = right.count('O')
xsR = right.count('X')
tsR = right.count('T')
if osL==4 or osL==3 and tsL==1:
pp('O won')
continue
elif xsL==4 or xsL==3 and tsL==1:
pp('X won')
continue
if osR==4 or osR==3 and tsR==1:
pp('O won')
continue
elif xsR==4 or xsR==3 and tsR==1:
pp('X won')
continue
for r in cols:
if r.count('.') > 0:
pp('Game has not completed')
break
else:
pp('Draw')
fout.close()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
bece03e4588034620ff88252c2a43363cd91a680
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Lib/objc/_MaterialKit.py
|
e6f20a6022f0fd5e3b40448fc97fc45454fa77e7
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832
| 2022-07-20T14:38:45
| 2022-07-20T14:38:45
| 148,944,721
| 884
| 157
|
MIT
| 2023-02-26T21:34:04
| 2018-09-15T22:29:07
|
C
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
"""
Classes from the 'MaterialKit' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MTVisualStyling = _Class("MTVisualStyling")
MTVisualStylingProvider = _Class("MTVisualStylingProvider")
MTMappedImageCache = _Class("MTMappedImageCache")
MTLumaDodgePillSettings = _Class("MTLumaDodgePillSettings")
MTLumaDodgePillStyleSettings = _Class("MTLumaDodgePillStyleSettings")
MTLumaDodgePillDomain = _Class("MTLumaDodgePillDomain")
_MTVisualStylingVibrancyEffect = _Class("_MTVisualStylingVibrancyEffect")
MTMaterialShadowView = _Class("MTMaterialShadowView")
MTMaterialView = _Class("MTMaterialView")
_MTStaticVibrantColorMaterialView = _Class("_MTStaticVibrantColorMaterialView")
MTStylingProvidingSolidColorView = _Class("MTStylingProvidingSolidColorView")
_MTLumaDodgePillLowQualityEffectView = _Class("_MTLumaDodgePillLowQualityEffectView")
MTPillView = _Class("MTPillView")
MTStaticColorPillView = _Class("MTStaticColorPillView")
MTLumaDodgePillView = _Class("MTLumaDodgePillView")
MTShadowView = _Class("MTShadowView")
|
[
"adrilabbelol@gmail.com"
] |
adrilabbelol@gmail.com
|
08786c00f9edae0282a030938082a24985a78bfb
|
3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d
|
/django-rest-framework/day132_01/venv/Scripts/pip-script.py
|
30ef622f420166e537def6d1a18be62e0beda706
|
[] |
no_license
|
yingkun1/python-django
|
a3084460a83682f3e0848d5b40c881f93961ecc2
|
08c9ed3771eb245ee9ff66f67cf28730d2675bbe
|
refs/heads/master
| 2022-12-11T12:33:20.788524
| 2019-06-12T09:30:59
| 2019-06-12T09:30:59
| 189,977,625
| 1
| 0
| null | 2022-11-22T02:57:01
| 2019-06-03T09:43:30
|
Python
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!E:\python-django\django-rest-framework\day132_01\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"925712087@qq.com"
] |
925712087@qq.com
|
10adf812c3b3204b61a65a1935b9b8ed4ed9a65d
|
2d1fdf69b3f00d1840f2e8956b91bd97c4777c24
|
/mohet/mohet_app/migrations/0002_auto_20210604_2126.py
|
210c1f080297d77c0f3631e679d1324652951377
|
[] |
no_license
|
DiyarBarham/project-mohet
|
d8a4df41ee19365dd9a6e4c756477f3ccaffd60f
|
03b92cd28fa315322d76f5e1632b5403f117fa25
|
refs/heads/master
| 2023-05-25T11:47:05.351526
| 2021-06-06T14:53:31
| 2021-06-06T14:53:31
| 374,387,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
# Generated by Django 2.2.4 on 2021-06-04 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mohet_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='article',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='comment',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='contact',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='media',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='mediatype',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='role',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='subscription',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"diyarbarham@gmail.com"
] |
diyarbarham@gmail.com
|
3463202e5e93b0f832989da3363bc365a09d764d
|
07917881310fc81d85a2cbdf27c9b3c4fa03c694
|
/python1812/python_4/四阶段/day01/code/python3code/job51.py
|
959f5d9501613b5eb6c690f4ff91b6599ea99e23
|
[] |
no_license
|
zaoyuaner/Learning-materials
|
9bc9a127d1c6478fb6cebbb6371b1fd85427c574
|
1f468a6f63158758f7cbfe7b5df17f51e3205f04
|
refs/heads/master
| 2020-05-18T11:38:45.771271
| 2019-05-20T09:07:44
| 2019-05-20T09:07:44
| 184,384,050
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,706
|
py
|
import re
import urllib.request
headers = {
"User-Agent":"User-Agent, Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
}
#获取前程无忧的接口
url = "https://search.51job.com/list/040000%252C010000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
#抓取数据 创建请求对象
req = urllib.request.Request(url,headers=headers)
#获取服务器响应数据
response = urllib.request.urlopen(req)
# print(response)
#解码
html = response.read().decode('gbk')
# print(html)
#print(type(html))
# 处理数据 拿到标签中间所有的内容
jobnum_re = '<div class="rt">(.*?)</div>'
coms = re.compile(jobnum_re,re.S)
strs = coms.findall(html)[0]
# print(strs)
#贪婪模式 非贪婪模式
#非贪婪模式加上 ? 变成了 贪婪模式
#取出 纯数字
num_re = '.*?(\d+).*'
num = re.findall(num_re,strs)
# print(num)
# print(int(num[0]))
#获取第一个岗位信息
jobname_re = '<div class="el">(.*?)</div>'
joblist = re.findall(jobname_re,html,re.S)
# #print(joblist[0]) #这是第一个岗位的信息 多个标签
#
# #匹配岗位内容
# jobnameone_re = 'onmousedown="">(.*?)</a>'
# jobnameone_list = re.findall(jobnameone_re,joblist[1],re.S)
# print(jobnameone_list[0].strip())
for job in joblist:
jobnameone_re = 'onmousedown="">(.*?)</a>'
jobnameone_list = re.findall(jobnameone_re, job, re.S)
print(jobnameone_list)
#print("岗位名称:",jobnameone_list[0].strip())
|
[
"13510647877@163.com"
] |
13510647877@163.com
|
685728b1b73a4e7b80a069a6247b6266c4e245ae
|
e16cf4d5dc7a0055892feee2397ac341a4c9b375
|
/data-structures/stacks_and_queues/test_stacks_and_queues.py
|
6ed1937c2e223db01050abe6469abfaee4309c5a
|
[
"MIT"
] |
permissive
|
arsummers/python-data-structures-and-algorithms
|
47935b75216594566a706083f91c6d71ae01a96c
|
30a488bd1100d8edac3b7fda73f7d7d999c61bfc
|
refs/heads/master
| 2021-06-18T08:33:36.210039
| 2020-09-24T23:23:32
| 2020-09-24T23:23:32
| 195,889,918
| 3
| 1
|
MIT
| 2021-04-20T18:35:42
| 2019-07-08T21:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,307
|
py
|
import pytest
from stacks_and_queues import Stack, Queue
# Can successfully push onto a stack
def test_stack_push_one():
s = Stack()
s.push('a')
assert s.peek() == 'a'
# Can successfully push multiple values onto a stack
def test_stack_push_multiple():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
assert s.peek() == 'c'
# Can successfully pop off the stack
def test_stack_pop():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
s.pop('a')
assert s.peek() == 'b'
# Can successfully empty a stack after multiple pops
def test_pop_stack_to_empty():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
s.pop('a')
s.pop('b')
s.pop('c')
assert s.peek() is None
# Can successfully peek the next item on the stack)
def test_peek_next_item():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
assert s.peek() == 'c'
# Can successfully instantiate an empty stack
def test_instantiate_empty_stack():
s = Stack()
assert s.peek() is None
# TESTS FOR QUEUES
# Can successfully enqueue into a queue
def test_enqueue_single_value():
q = Queue()
q.enqueue('a')
assert q.peek() == 'a'
# Can successfully enqueue multiple values into a queue
def test_enqueue_multiple_values():
q = Queue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
assert q.peek() == 'a'
# Can successfully dequeue out of a queue the expected value
def test_dequeue():
q = Queue()
q.dequeue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
q.enqueue('d')
q.enqueue('e')
q.enqueue('g')
assert q.dequeue() == 'a'
# Can successfully peek into a queue, seeing the expected value
def test_peek_into_queue():
q = Queue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
q.enqueue('d')
q.enqueue('e')
q.enqueue('g')
assert q.peek() == 'a'
# Can successfully empty a queue after multiple dequeues
def test_dequeue_queue_to_empty():
q = Queue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
q.dequeue()
q.dequeue()
q.dequeue()
assert q.peek() == None
# Can successfully instantiate an empty queue
def test_instantiate_empty_queue():
q = Queue()
assert q.peek() is None
|
[
"aliyasummers1@gmail.com"
] |
aliyasummers1@gmail.com
|
88f49fdc3e4695c32dc863b2acf20931279d3de7
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part000476.py
|
79c655e3af493ac7bfbe5077aba07884817c5816
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,690
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher43221(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.2.2.2.1.1', 1, 1, None), Mul)
]),
2: (2, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher43221._instance is None:
CommutativeMatcher43221._instance = CommutativeMatcher43221()
return CommutativeMatcher43221._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 43220
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 43222
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 43223
if len(subjects2) >= 1 and subjects2[0] == Integer(2):
tmp5 = subjects2.popleft()
# State 43224
if len(subjects2) == 0:
pass
# State 43225
if len(subjects) == 0:
pass
# 0: x**2
yield 0, subst1
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
861c710c6e89b0ffb3546483f63f4f2e2af85838
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/first_eye/world/old_problem/right_government/eye_or_case.py
|
e661a8e74622ff9a608787dce1a027c2b2be1f36
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
#! /usr/bin/env python
def government(str_arg):
small_company(str_arg)
print('early_fact')
def small_company(str_arg):
print(str_arg)
if __name__ == '__main__':
government('week')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
aa00fe43e3785a75507fb2a0b3aaad991da96511
|
0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e
|
/regexlib/python_re2_test_file/regexlib_2890.py
|
bbf1cafd2ceabdcfdc8a3bcc7fb879bc369f6b85
|
[
"MIT"
] |
permissive
|
agentjacker/ReDoS-Benchmarks
|
c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
refs/heads/main
| 2023-05-10T13:57:48.491045
| 2021-05-21T11:19:39
| 2021-05-21T11:19:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
# 2890
# (([\w|\.]*)\s*={1}\s*(.*?))$
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"1"*10000+"!_1!1 _SLQ_1"
import re2 as re
from time import perf_counter
regex = """(([\w|\.]*)\s*={1}\s*(.*?))$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "1" * i * 10000 + "!_1!1 _SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!")
|
[
"liyt@ios.ac.cn"
] |
liyt@ios.ac.cn
|
e77c02bc990fe5e539985d34b266c7a0618a1fbb
|
b1ddae0f702f7af4a22ccf8e57eccb6778eaa8a5
|
/apps/users/migrations/0006_auto_20180703_0743.py
|
5623ef9fa7b291ce5b84c28a1155b44805a9c8f0
|
[] |
no_license
|
MoNaiZi/Mxoinline3
|
1cd1effa716bacbe4a7fc83c4687adc1fdbbea03
|
8d8ba1322fbaefcf8767160e1e2d05afc755fe5c
|
refs/heads/master
| 2020-03-17T00:03:41.868735
| 2018-07-11T00:48:09
| 2018-07-11T00:48:09
| 133,101,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-07-03 07:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20180630_2043'),
]
operations = [
migrations.AlterField(
model_name='emailverifyrecord',
name='send_type',
field=models.CharField(choices=[('register', '注册'), ('forget', '找回密码'), ('update_email', '修改邮箱')], max_length=20),
),
]
|
[
"you@example.com"
] |
you@example.com
|
6b65d35046b48be71a3df21e7ab47327a6e56a42
|
49c2492d91789b3c2def7d654a7396e8c6ce6d9f
|
/ROS/ros_archive/dyros_backup/original/catkin_ws_right/build/gps_test/catkin_generated/pkg.installspace.context.pc.py
|
562b7a65ea657b1e00388349bb7282ec2cc8d76f
|
[] |
no_license
|
DavidHan008/lockdpwn
|
edd571165f9188e0ee93da7222c0155abb427927
|
5078a1b08916b84c5c3723fc61a1964d7fb9ae20
|
refs/heads/master
| 2021-01-23T14:10:53.209406
| 2017-09-02T18:02:50
| 2017-09-02T18:02:50
| 102,670,531
| 0
| 2
| null | 2017-09-07T00:11:33
| 2017-09-07T00:11:33
| null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgps_test".split(';') if "-lgps_test" != "" else []
PROJECT_NAME = "gps_test"
PROJECT_SPACE_DIR = "/home/dyros-vehicle/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"gyurse@gmail.com"
] |
gyurse@gmail.com
|
b5e7c3e35e02e537e325be2564d2639f88a3b296
|
98fe2f95d7fba400c592996681099014279bff70
|
/voomza/apps/books_common/tests/yearbook_signs.py
|
f38d5cbb32ad58c562791185ed125b22280af276
|
[] |
no_license
|
bcattle/monkeybook
|
51435a3c0416314597adcbb5f958c2761d92bfea
|
e01f2c371d124449f053ee37eb71ce9a64732e56
|
refs/heads/master
| 2016-09-06T13:28:23.332786
| 2013-03-22T02:04:15
| 2013-03-22T02:04:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
import random
from django.test import TestCase
class YearbookSignPaginationTestCase(TestCase):
short_sign = "Remember when we snuck into the pastry shop on 23rd street??? " \
"I wont' forget the epic nights out in 2012."
long_sign = "Remember when we snuck into the pastry shop on 23rd street??? " \
"I won't forget the epic nights out in 2012. I won't forget " \
"the epic nights out in 2012. To many more in 2013!!"
def get_signs(self, short=0, long=0):
signs = []
[signs.append(self.short_sign) for n in range(short)]
[signs.append(self.long_sign) for n in range(long)]
random.shuffle(signs)
return signs
def test_all_short(self):
import ipdb
ipdb.set_trace()
signs = self.get_signs(short=8)
pages = assign_signs_to_pages(signs)
# 2 pages: 6 on the first, 2 on the second
self.assertEqual(len(pages), 2)
self.assertEqual(len(pages[0]), 6)
self.assertEqual(len(pages[1]), 2)
def test_all_long(self):
self.assertEqual(1 + 1, 2)
def test_1_long(self):
self.assertEqual(1 + 1, 2)
def test_2_long(self):
self.assertEqual(1 + 1, 2)
def test_5_long(self):
self.assertEqual(1 + 1, 2)
|
[
"bryan.cattle@gmail.com"
] |
bryan.cattle@gmail.com
|
9f0f9a18fd94e3ee3b6e016fcc1957d57a35bd41
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03805/s294798234.py
|
6299c897ff1f6ded8b87e4e179c3be4d195d7f46
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from itertools import permutations
N, M, *ab = map(int, open(0).read().split())
g = [[0] * N for _ in range(N)]
for a, b in zip(*[iter(ab)] * 2):
g[a - 1][b - 1] = 1
g[b - 1][a - 1] = 1
ans = 0
for path in permutations(range(1, N)):
path = [0] + list(path)
if all(g[v][nv] for v, nv in zip(path, path[1:])):
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ea31f37141bfa7d31525e6ecb85ea0031e0c6fa7
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.ILO/Sans_16/pdf_to_json_test_Latn.ILO_Sans_16.py
|
7498107868f61c7e89533ff42e20ed1ada2c1374
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.ILO/Sans_16/udhr_Latn.ILO_Sans_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
d94f312791ea3d44722cbec54149613205bd4fb5
|
1a2bf34d7fc1d227ceebf05edf00287de74259c5
|
/Django/Day08/Day08Django/Day08Django/wsgi.py
|
0968ce766de35b9635af8d718732d6bee6fea12b
|
[] |
no_license
|
lzn9423362/Django-
|
de69fee75160236e397b3bbc165281eadbe898f0
|
8c1656d20dcc4dfc29fb942b2db54ec07077e3ae
|
refs/heads/master
| 2020-03-29T18:03:47.323734
| 2018-11-28T12:07:12
| 2018-11-28T12:07:12
| 150,192,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
"""
WSGI config for Day08Django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Day08Django.settings")
application = get_wsgi_application()
|
[
"411121080@qq.com"
] |
411121080@qq.com
|
133c880358e31259569b80e438475e23bc74d029
|
172e2da04fc84d2fe366e79539c6ab06d50a580f
|
/backend/chocolate_23135/settings.py
|
804ba5930fcf2868173176c571c1fa7eafd441d5
|
[] |
no_license
|
crowdbotics-apps/chocolate-23135
|
ff3c3a0e0e9ae0eefb7811868cb9e1281af2981b
|
64060aef4a3a3bd1fb61c2fc457fdbd46b072ca6
|
refs/heads/master
| 2023-03-05T07:34:50.965348
| 2021-02-18T16:13:56
| 2021-02-18T16:13:56
| 317,985,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,018
|
py
|
"""
Django settings for chocolate_23135 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "chocolate_23135.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "chocolate_23135.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2786bd4521b775c949294da93fc52e7ff191eced
|
7ce2b2000cfefe8fbefc2271ebc7df2061c88194
|
/CAIL2020/sfzyzb/main.py
|
81c4fd79e0ccba4eebf1f25f8dfbbd916da8e606
|
[
"Apache-2.0"
] |
permissive
|
generalzgd/CAIL
|
f06d79acf42ac2188938c02087f7d07b9b43095c
|
57529e64ee2f602324a500ff9bed660ddcde10bb
|
refs/heads/master
| 2023-01-24T01:14:05.382525
| 2020-11-20T03:40:47
| 2020-11-20T03:40:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,135
|
py
|
"""Test model for SMP-CAIL2020-Argmine.
Author: Yixu GAO yxgao19@fudan.edu.cn
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import json
import os
from types import SimpleNamespace
import fire
import pandas
import pandas as pd
import torch
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate
from model import BertForClassification, RnnForSentencePairClassification, LogisticRegression
from utils import load_torch_model
LABELS = ['0', '1']
MODEL_MAP = {
'bert': BertForClassification,
'rnn': RnnForSentencePairClassification,
'lr': LogisticRegression
}
def main(in_file='/data/SMP-CAIL2020-test1.csv',
out_file='/output/result1.csv',
model_config='config/bert_config.json'):
"""Test model for given test set on 1 GPU or CPU.
Args:
in_file: file to be tested
out_file: output file
model_config: config file
"""
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
else:
device = torch.device('cpu')
#0. preprocess file
tag_sents = []
para_id = 0
with open(in_file, 'r', encoding='utf-8') as fin:
for line in fin:
sents = json.loads(line.strip())
text = sents['text']
sentences = [item['sentence'] for item in text]
for sent in sentences:
tag_sents.append((para_id, sent))
para_id += 1
df = pandas.DataFrame(tag_sents, columns=['para', 'content'])
df.to_csv("data/para_content_test.csv", columns=['para', 'content'], index=False)
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
test_set = data.load_file("data/para_content_test.csv", train=False)
data_loader_test = DataLoader(
test_set, batch_size=config.batch_size, shuffle=False)
# 2. Load model
model = MODEL_MAP[config.model_type](config)
model = load_torch_model(
model, model_path=os.path.join(config.model_path, 'model.bin'))
model.to(device)
# 3. Evaluate
answer_list = evaluate(model, data_loader_test, device)
# 4. Write answers to file
df = pd.read_csv("data/para_content_test.csv")
idcontent_list = list(df.itertuples(index=False))
filter_list = [k for k,v in zip(idcontent_list, answer_list) if v]
df = pd.DataFrame(filter_list, columns=['para', 'content'])
df.to_csv(out_file, columns=['para', 'content'], index=False)
if __name__ == '__main__':
fire.Fire(main)
|
[
"bangtech@sina.com"
] |
bangtech@sina.com
|
4f14f1f037cb2454bf6a63efb237c3c8c970e2ab
|
26c4426d2c9cd10fd7d4a73609512e69e31b64ba
|
/html2ex/h2e_openpyxl.py
|
9dae85c7684b7fa2b0f2370c7e75e7a9298de7c7
|
[] |
no_license
|
KirillUdod/html2exc
|
550761213eb6edd7d3ea4787938cce65584606c3
|
60569f01822a15b2e5b6884a42774cd428953700
|
refs/heads/master
| 2021-01-15T17:07:05.906492
| 2016-01-06T11:51:38
| 2016-01-06T11:51:38
| 34,809,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,578
|
py
|
from openpyxl.cell import get_column_letter
from openpyxl.styles import Alignment, Font, Border, Side, PatternFill
from openpyxl.workbook import Workbook
from openpyxl import load_workbook, drawing
from lxml.html import document_fromstring, HTMLParser
# Value must be one of set(['hair', 'medium', 'dashDot', 'dotted', 'mediumDashDot', 'dashed',
# 'mediumDashed', 'mediumDashDotDot', 'dashDotDot', 'slantDashDot', 'double', None, 'thick', 'thin'])
_BORDER_STYLE = {
'0': None,
'1': 'thin',
'2': 'medium',
'3': 'thick',
}
_TEXT_SIZE = {
'h1': 32,
'h2': 24,
'h3': 18,
'h4': 16,
'h5': 14,
'h6': 11,
}
BORDER_COLOR = '000000'
WHITE_COLOR = 'FFFFFF'
TH_COLOR = '00FFFF'
BLACK_COLOR = '000000'
class Html2Excel(object):
def __init__(self):
self.list = []
self.start_row = 0
self.start_col = 0
self.workbook = Workbook(encoding='utf8')
self.worksheet = self.workbook.active
def use_existing_wb(self, name_workbook):
self.workbook = load_workbook(filename=name_workbook)
self.worksheet = self.workbook.get_active_sheet()
def create_new_sheet(self, name_sheet):
self.worksheet = self.workbook.create_sheet()
self.worksheet.title = name_sheet
def set_col_width(self, cols_width):
for col_i in cols_width.keys():
self.worksheet.column_dimensions[get_column_letter(col_i)].width = int(cols_width.get(col_i))
def add_logo(self, logo_filename):
img = drawing.Image(logo_filename)
img.anchor(self.worksheet.cell('D3'))
self.worksheet.add_image(img)
def append_html_table(self, html_string, start_row=1, start_col=1):
html_string = document_fromstring(html_string, HTMLParser(encoding='utf8'))
last_row = start_row - 1
last_col = start_col
for table_el in html_string.xpath('//table'):
last_row += 1
for row_i, row in enumerate(table_el.xpath('./tr'), start=last_row):
for col_i, col in enumerate(row.xpath('./td|./th'), start=last_col):
colspan = int(col.get('colspan', 0))
rowspan = int(col.get('rowspan', 0))
font_bold = False
font_size = 11
font_color = BLACK_COLOR
if rowspan:
rowspan -= 1
if colspan:
colspan -= 1
col_data = col.text_content().encode("utf8")
valign = 'center' if col_i == start_col and col.tag != 'th' else 'top'
while (row_i, col_i) in self.list:
col_i += 1
cell = self.worksheet.cell(row=row_i, column=col_i)
if rowspan or colspan:
self.worksheet.merge_cells(start_row=row_i, end_row=row_i+rowspan, start_column=col_i,
end_column=col_i+colspan)
cell.value = col_data
cell.alignment = Alignment(
horizontal=row.get('align', col.get('align')) or 'left',
vertical=row.get('valign', col.get('valign')) or valign,
shrink_to_fit=True, wrap_text=True
)
bgcolor = row.get('bgcolor', col.get('bgcolor'))
if bgcolor:
cell.fill = PatternFill(fill_type='solid', start_color=bgcolor, end_color=bgcolor)
for el in col.iter():
if el.tag == 'font':
font_color = el.get('color')
elif el.tag == 'b':
font_bold = True
elif el.tag in _TEXT_SIZE:
font_bold = True,
font_size = _TEXT_SIZE.get(el)
cell.font = Font(
color=font_color,
bold=font_bold,
size=font_size,
)
if col.tag == 'th':
cell.font = Font(
bold=True
)
cell.fill = PatternFill(
fill_type='solid',
start_color=TH_COLOR,
end_color=TH_COLOR
)
for i in range(0, rowspan+1, 1):
for j in range(0, colspan+1, 1):
if i == rowspan:
last_row = row_i + i
self.list.append((row_i+i, col_i+j))
cell = self.worksheet.cell(row=row_i+i, column=col_i+j)
cell.border = Border(
left=Side(border_style=_BORDER_STYLE.get(table_el.get('border') or None),
color=BORDER_COLOR),
right=Side(border_style=_BORDER_STYLE.get(table_el.get('border') or None),
color=BORDER_COLOR),
top=Side(border_style=_BORDER_STYLE.get(table_el.get('border') or None),
color=BORDER_COLOR),
bottom=Side(border_style=_BORDER_STYLE.get(table_el.get('border') or None),
color=BORDER_COLOR),
)
return last_row, last_col
def save_wb(self, name):
self.workbook.save(name)
if __name__ == '__main__':
# html_filename = sys.argv[1]
# xls_filename = sys.argv[2] if len(sys.argv) > 2 else (html_filename + ".xls")
html_filename = '13.html'
xls_filename = '22.xlsx'
logo_filename = 'test.jpg'
# converter = Html2Excel()
# converter.create_new_sheet('1')
# last_row, last_col = converter.append_html_table('1.html', 0, 1)
# converter.append_html_table('2.html', last_row + 2, 1)
# converter.save_wb(xls_filename)
cols_width = {
1: 5,
2: 15,
3: 25,
4: 35,
5: 5,
6: 15,
}
converter = Html2Excel()
converter.use_existing_wb(xls_filename)
converter.set_col_width(cols_width)
converter.add_logo(logo_filename)
converter.append_html_table(open(html_filename, 'rb').read(), 8, 2)
# converter.create_new_sheet('test')
converter.save_wb(xls_filename)
|
[
"kirilludod@gmail.com"
] |
kirilludod@gmail.com
|
86ed9f46f1c2d2d11c6802437d45c5809fb30eeb
|
56befbe23e56995f9772010f47dcbf38c8c150bc
|
/0x04-python-more_data_structures/3-common_elements.py
|
eea467f21330bd685f084a863c29e797149a3f26
|
[] |
no_license
|
lemejiamo/holbertonschool-higher_level_programming
|
a48175156816a856dba0ddc6888b76f6e04b7223
|
3f756ed3a907044bfc811f108d5ae98bb0e9802c
|
refs/heads/main
| 2023-08-14T13:22:13.481356
| 2021-09-28T04:09:50
| 2021-09-28T04:09:50
| 361,910,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
#!/usr/bin/python3
def common_elements(set_1, set_2):
equals = []
for i in set_1:
for j in set_2:
if j == i:
equals.append(j)
return equals
|
[
"luismejia69@gmail.com"
] |
luismejia69@gmail.com
|
d3f4ba1961c78164f76c03bb013ed36231123fa8
|
be471cdee10e2273ce41631c4a58f16227f18b5b
|
/server/walt/server/threads/blocking/images/publish.py
|
c0d87e832bd7edbf59ba092d26d354f78180beb8
|
[
"BSD-3-Clause"
] |
permissive
|
dia38/walt-python-packages
|
d91d477c90dbc4bd134fdcc31d7cb404ef9885b8
|
e6fa1f166f45e73173195d57840d22bef87b88f5
|
refs/heads/master
| 2020-04-29T17:41:19.936575
| 2019-11-26T10:11:58
| 2019-11-26T10:11:58
| 176,303,546
| 0
| 0
|
BSD-3-Clause
| 2019-03-18T14:27:56
| 2019-03-18T14:27:56
| null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
from walt.server.threads.blocking.images.metadata import \
update_user_metadata_for_image
# this implements walt image publish
def publish(requester, server, dh_peer, auth_conf, image_fullname):
# push image
server.docker.hub.push(image_fullname, dh_peer, auth_conf, requester)
# update user metadata ('walt_metadata' image on user's hub account)
update_user_metadata_for_image(server.docker, dh_peer, auth_conf,
requester, image_fullname)
|
[
"etienne.duble@imag.fr"
] |
etienne.duble@imag.fr
|
484e6d6524860d2e105423312211ef8bece01452
|
e13542cc246837c0f91f87506fc75a2d946f115c
|
/aula1/beta/bin/wheel
|
0dc4c3b65acc995f2e7f7bec7b3abab2907ee749
|
[] |
no_license
|
Gamboua/python-520
|
a042d265cd1d1b9d6c0839d1d4dbdde3e7d401a6
|
52aa4f7c9688be88855e81c473e10cd88788dd64
|
refs/heads/master
| 2021-09-18T10:23:21.900421
| 2018-07-12T21:44:40
| 2018-07-12T21:44:40
| 109,701,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
#!/var/www/html/python/beta/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gabriel.bonfim@4linux.com.br"
] |
gabriel.bonfim@4linux.com.br
|
|
f501eef3959ee7b7694575e29d87512d48a2abc3
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/RuckusAutoTest/tests/zd/CB_ZD_Get_Mgmt_IPV6_ACLs.py
|
eab30af67937b2aaea57f45e04f394791f085722
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905
| 2016-08-20T03:34:52
| 2016-08-20T03:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
# Copyright (C) 2011 Ruckus Wireless, Inc. All rights reserved.
# Please make sure the following module docstring is accurate since it will be used in report generation.
"""
Description:
@author: Cherry Cheng
@contact: cherry.cheng@ruckuswireless.com
@since: Oct 2011
Prerequisite (Assumptions about the state of the test bed/DUT):
1. Build under test is loaded on the AP and Zone Director
Required components: 'ZoneDirectorCLI'
Test parameters:
- 'ap_mac_list': 'AP mac address list',
- 'ip_cfg': 'AP IP configuration.'
Test procedure:
1. Config:
- initialize test parameters
2. Test:
- Set AP device IP setting as specified via CLI
3. Cleanup:
- N/A
Result type: PASS/FAIL
Results: PASS: If set device IP setting successfully
FAIL: If any item is incorrect
Messages: If FAIL the test script returns a message related to the criterion that is not satisfied
"""
import logging
from RuckusAutoTest.models import Test
from RuckusAutoTest.components.lib.zd import mgmt_ip_acl
class CB_ZD_Get_Mgmt_IPV6_ACLs(Test):
required_components = ['ZoneDirector']
parameters_description = {}
def config(self, conf):
self._cfg_init_test_params(conf)
def test(self):
try:
logging.info("Get all management ipv6 acls via ZD GUI")
self.gui_all_acls_list = mgmt_ip_acl.get_all_mgmt_ipv6_acl(self.zd)
except Exception, ex:
self.errmsg = 'Get mgmt ipv6 acl failed:%s' % (ex.message)
if self.errmsg:
return self.returnResult("FAIL",self.errmsg)
else:
self._update_carrier_bag()
pass_msg = 'Get management ivp6 acl via ZD GUI successfully'
return self.returnResult('PASS', pass_msg)
def cleanup(self):
pass
def _update_carrier_bag(self):
self.carrierbag['gui_mgmt_ipv6_acl_list'] = self.gui_all_acls_list
def _cfg_init_test_params(self, conf):
'''
Mgmt acl dict sample:
{'name': 'mgmt ip acl name',
'type': 'single|prefix,
'addr': 'single addr|addr and prefix split with /',
}
'''
self.conf = {}
self.conf.update(conf)
self.errmsg = ''
self.zd = self.testbed.components['ZoneDirector']
|
[
"tan@xx.com"
] |
tan@xx.com
|
df48824f457902ed3e11b5e29e8b28c3cabb0ba9
|
2067c90b12a5b10fbb97bb2fe52a30f79d502b7b
|
/booking/migrations/0006_auto_20200216_1826.py
|
3c6f8b2d33482cc143f75329a8b5d7acec770d3f
|
[] |
no_license
|
info3g/payapal_integration
|
bc632841abd0284a6757f31cf13d9ee1946bba33
|
a8e745df3497f553d437988c799cd9882602d3b8
|
refs/heads/master
| 2022-12-10T01:15:46.167005
| 2020-03-03T16:36:48
| 2020-03-03T16:36:48
| 243,932,807
| 0
| 1
| null | 2022-12-08T03:43:42
| 2020-02-29T08:34:31
|
Python
|
UTF-8
|
Python
| false
| false
| 528
|
py
|
# Generated by Django 3.0.3 on 2020-02-16 17:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0005_customer_price'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='address',
field=models.CharField(max_length=120),
),
migrations.AlterField(
model_name='customer',
name='price',
field=models.FloatField(),
),
]
|
[
"infothreeg@gmail.com"
] |
infothreeg@gmail.com
|
d3fc4e749b5623648d7755c22b43adbca799339d
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/keras/datasets/cifar10.py
|
863c9df822d0ee351f438d2d076f8030cbbc3a5f
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:63d66a0e745552af02799b65d4289ea59046aeae4e0e92fffe1446ff879f32e4
size 3548
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.