blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
030305ab4f62d57c7c891748c1539069d222cb70
|
35b5f50626d33b17f38f311e9d5fc6b727d25765
|
/gui/kivy/uix/dialogs/invoices.py
|
8904edf6fbc412e1c895baa64e3ebd02c67d15b9
|
[
"MIT"
] |
permissive
|
bitcoinpostquantum/electrumpq
|
7fcf5a3fbda2b05f033340ba61fc23e46997f5ed
|
dbbc2a493aff904923cd8112fc5bb07802df272c
|
refs/heads/master
| 2020-04-10T10:09:24.239700
| 2018-12-29T09:49:58
| 2018-12-29T09:49:58
| 160,957,005
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,810
|
py
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from decimal import Decimal
Builder.load_string('''
<InvoicesLabel@Label>
#color: .305, .309, .309, 1
text_size: self.width, None
halign: 'left'
valign: 'top'
<InvoiceItem@CardItem>
requestor: ''
memo: ''
amount: ''
status: ''
date: ''
icon: 'atlas://gui/kivy/theming/light/important'
Image:
id: icon
source: root.icon
size_hint: None, 1
width: self.height *.54
mipmap: True
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.requestor
shorten: True
Widget
InvoicesLabel:
text: root.memo
color: .699, .699, .699, 1
font_size: '13sp'
shorten: True
Widget
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.amount
font_size: '15sp'
halign: 'right'
width: '110sp'
Widget
InvoicesLabel:
text: root.status
font_size: '13sp'
halign: 'right'
color: .699, .699, .699, 1
Widget
<InvoicesDialog@Popup>
id: popup
title: _('Invoices')
BoxLayout:
id: box
orientation: 'vertical'
spacing: '1dp'
ScrollView:
GridLayout:
cols: 1
id: invoices_container
size_hint: 1, None
height: self.minimum_height
spacing: '2dp'
padding: '12dp'
''')
from kivy.properties import BooleanProperty
from electrumpq_gui.kivy.i18n import _
from electrumpq.util import format_time
from electrumpq.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrumpq_gui.kivy.uix.context_menu import ContextMenu
invoice_text = {
PR_UNPAID:_('Pending'),
PR_UNKNOWN:_('Unknown'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
pr_icon = {
PR_UNPAID: 'atlas://gui/kivy/theming/light/important',
PR_UNKNOWN: 'atlas://gui/kivy/theming/light/important',
PR_PAID: 'atlas://gui/kivy/theming/light/confirmed',
PR_EXPIRED: 'atlas://gui/kivy/theming/light/close'
}
class InvoicesDialog(Factory.Popup):
def __init__(self, app, screen, callback):
Factory.Popup.__init__(self)
self.app = app
self.screen = screen
self.callback = callback
self.cards = {}
self.context_menu = None
def get_card(self, pr):
key = pr.get_id()
ci = self.cards.get(key)
if ci is None:
ci = Factory.InvoiceItem()
ci.key = key
ci.screen = self
self.cards[key] = ci
ci.requestor = pr.get_requestor()
ci.memo = pr.get_memo()
amount = pr.get_amount()
if amount:
ci.amount = self.app.format_amount_and_units(amount)
status = self.app.wallet.invoices.get_status(ci.key)
ci.status = invoice_text[status]
ci.icon = pr_icon[status]
else:
ci.amount = _('No Amount')
ci.status = ''
exp = pr.get_expiration_date()
ci.date = format_time(exp) if exp else _('Never')
return ci
def update(self):
self.menu_actions = [('Pay', self.do_pay), ('Details', self.do_view), ('Delete', self.do_delete)]
invoices_list = self.ids.invoices_container
invoices_list.clear_widgets()
_list = self.app.wallet.invoices.sorted_list()
for pr in _list:
ci = self.get_card(pr)
invoices_list.add_widget(ci)
def do_pay(self, obj):
self.hide_menu()
self.dismiss()
pr = self.app.wallet.invoices.get(obj.key)
self.app.on_pr(pr)
def do_view(self, obj):
pr = self.app.wallet.invoices.get(obj.key)
pr.verify(self.app.wallet.contacts)
self.app.show_pr_details(pr.get_dict(), obj.status, True)
def do_delete(self, obj):
from .question import Question
def cb(result):
if result:
self.app.wallet.invoices.remove(obj.key)
self.hide_menu()
self.update()
d = Question(_('Delete invoice?'), cb)
d.open()
def show_menu(self, obj):
self.hide_menu()
self.context_menu = ContextMenu(obj, self.menu_actions)
self.ids.box.add_widget(self.context_menu)
def hide_menu(self):
if self.context_menu is not None:
self.ids.box.remove_widget(self.context_menu)
self.context_menu = None
|
[
"code@bitcoinpq.org"
] |
code@bitcoinpq.org
|
1c76e72cd8addfb1c576f4225f82d9d5ad24d572
|
f64f8a8827219371236f0e2ad3d5220ec1825cb2
|
/bux/_commands/_losers.py
|
32efa4c463b6d14b31e50be4b8aa634548f350b3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
orsinium-labs/bux
|
2b1c0c19aa06d480a90386cdda66af855a746f32
|
fbb5727b759719f15ec38dd4bf00e493690854b4
|
refs/heads/master
| 2023-07-12T19:35:14.768488
| 2021-08-30T11:19:03
| 2021-08-30T11:19:03
| 389,935,936
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from argparse import ArgumentParser
import bux
from ._base import Command, register
@register
class Losers(Command):
name = 'losers'
@staticmethod
def init_parser(parser: ArgumentParser) -> None:
parser.add_argument('--token', required=True)
def run(self) -> int:
api = bux.UserAPI(token=self.args.token)
movers = api.securities().movers().requests()
for stock in movers.losers:
gain = (stock.bid.amount / stock.closing_bid.amount - 1) * 100
self.print(f'{stock.id} {stock.name:25} {gain:+.02f}%')
return 0
|
[
"mail@orsinium.dev"
] |
mail@orsinium.dev
|
d671f49d5bf51d6b819c2844d458b42b7ada6c94
|
17cad1d357380875243b804ffd13882f1a7d61a8
|
/0x0B-python-input_output/2-read_lines.py
|
f33d4e7521358ff0ab260754b7aa8efa81a6ae2c
|
[] |
no_license
|
Abdou-Hidoussi/holbertonschool-higher_level_programming
|
9a0c0714b63ccd9823798adb51eb4f395ab375dc
|
1dd37cc5f848d1f37884e6ffbe9598eae8c4f30e
|
refs/heads/master
| 2023-03-05T00:02:25.283646
| 2021-02-18T20:42:26
| 2021-02-18T20:42:26
| 291,713,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
#!/usr/bin/python3
"""
task 2
"""
def read_lines(filename="", nb_lines=0):
"""read line function"""
with open(filename, encoding="utf-8") as f:
if nb_lines <= 0:
print(f.read(), end="")
return
for i in range(nb_lines):
print(f.readline(), end="")
|
[
"hidoussiabdou5@gmail.com"
] |
hidoussiabdou5@gmail.com
|
4649092af7c99f1f913f1b6305e81c3ad84e7b26
|
2b4af8810511b5f1ed47fdf5662753b9b4af76b8
|
/corehq/apps/case_search/migrations/0004_auto_20170518_2018.py
|
90b4ceefdfb27e1e0f3dd0893ad81a5dd1d8a782
|
[] |
no_license
|
DeckOfPandas/commcare-wddcp
|
55bde89197ec5bc4a4b53d327ec6a811aec0d752
|
810d2e09d3890e3d0d70178745da5924c1db767b
|
refs/heads/dimagi
| 2020-12-02T19:19:53.992796
| 2017-06-30T15:18:16
| 2017-07-05T12:23:26
| 96,325,707
| 1
| 0
| null | 2017-07-05T14:02:49
| 2017-07-05T14:02:49
| null |
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-18 20:18
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('case_search', '0003_casesearchqueryaddition'),
]
operations = [
migrations.CreateModel(
name='FuzzyProperties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(db_index=True, max_length=256)),
('case_type', models.CharField(db_index=True, max_length=256)),
('properties', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True, null=True), null=True, size=None)),
],
),
migrations.AlterField(
model_name='casesearchqueryaddition',
name='query_addition',
field=jsonfield.fields.JSONField(default=dict, help_text=b"More information about how this field is used can be found <a href='https://docs.google.com/document/d/1MKllkHZ6JlxhfqZLZKWAnfmlA3oUqCLOc7iKzxFTzdY/edit#heading=h.k5pky76mwwon'>here</a>. This ES <a href='https://www.elastic.co/guide/en/elasticsearch/guide/1.x/bool-query.html'>documentation</a> may also be useful. This JSON will be merged at the `query.filtered.query` path of the query JSON."),
),
migrations.AlterUniqueTogether(
name='fuzzyproperties',
unique_together=set([('domain', 'case_type')]),
),
migrations.AddField(
model_name='casesearchconfig',
name='fuzzy_properties',
field=models.ManyToManyField(to='case_search.FuzzyProperties'),
),
]
|
[
"proteusvacuum@gmail.com"
] |
proteusvacuum@gmail.com
|
73ea9b5b72d472a84e080415378c392f17ac413d
|
34c84dc28ca8c62594ba74facc6cef4eacb2aad9
|
/examples/liquid-argon/utils.py
|
672f312f191d433c0811262db1d2a9b8692c8079
|
[] |
no_license
|
choderalab/automatic-equilibration-detection
|
f4102407db312402b30dceb1cee0ea0e698e46b1
|
9b5f096cd8e309bc1158f9eed5d8fd41f78312cc
|
refs/heads/master
| 2021-01-17T15:15:38.786759
| 2016-12-24T06:12:32
| 2016-12-24T06:12:32
| 19,412,535
| 15
| 9
| null | 2016-12-24T06:12:33
| 2014-05-03T21:28:09
|
TeX
|
UTF-8
|
Python
| false
| false
| 4,937
|
py
|
#!/usr/bin/env python
"""
Run a simulation of liquid argon at constant pressure.
"""
import os, os.path, copy
import netCDF4
from simtk import openmm, unit
from simtk.openmm import app
from openmmtools import testsystems, integrators
def minimize(system, positions):
"""
Minimize the specified testsystem.
Parameters
----------
system : simtk.openmm.System
The system to minimize
positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
The initial positions to be minimized.
Returns
-------
minimized_positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
Minimized positions.
"""
integrator = openmm.VerletIntegrator(1.0 * unit.femtosecond)
context = openmm.Context(system, integrator)
context.setPositions(positions)
openmm.LocalEnergyMinimizer.minimize(context)
final_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return final_positions
def write_pdb(filename, positions):
"""
Write PDB file for argon particles.
Parameters
----------
filename : str
Filename to write PDB file to.
positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
Positions to write.
"""
nparticles = positions.shape[0]
outfile = open(filename, 'w')
for particle in range(nparticles):
outfile.write("ATOM %5d AR AR 1 %8.3f%8.3f%8.3f\n" % (particle, positions[particle,0]/unit.angstrom, positions[particle,1]/unit.angstrom, positions[particle,2]/unit.angstrom))
outfile.close()
return
def create_netcdf_datastore(filename, system, positions, nreplicates, niterations, observation_interval):
"""
Create (or resume from) NetCDF data storage file.
Parameters
----------
filename : str
Filename of NetCDF file.
system : simtk.openmm.System
The system to minimize
positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
The initial positions used for all simulations
nreplicates : int
The number of simulation replicates to be performed
niterations : int
The number of simulation iterations to be performed.
obervation_interval : simtk.unit.Quantity with units compatible with ps
Observation interval between frames.
Returns
-------
ncfile : netCDF4.Dataset
"""
if os.path.exists(filename):
raise Exception("Datafile '%s' already exists." % filename)
# Create a new file.
ncfile = netCDF4.Dataset(filename, 'w', version='NETCDF4')
# Determine some extra dimensions
nparticles = positions.shape[0]
# Initialize NetCDF file.
ncfile.createDimension('replicate', 0) # unlimited number of replicates
ncfile.createDimension('iteration', 0) # unlimited number of iterations
ncfile.createDimension('atom', nparticles) # number of atoms in system
ncfile.createDimension('spatial', 3) # number of spatial dimensions
ncfile.createDimension('singleton', 1)
# Set global attributes.
import time
setattr(ncfile, 'title', 'liquid argon simulation density data')
setattr(ncfile, 'CreationDate', time.ctime())
# Store global data.
ncvar = ncfile.createVariable('observation_interval', 'f4')
ncvar.assignValue(observation_interval / unit.picoseconds)
setattr(ncvar, 'units', 'ps')
# Store initial positions.
ncvar_positions = ncfile.createVariable('initial_positions', 'f4', ('atom','spatial'), zlib=True, chunksizes=(nparticles,3))
setattr(ncvar_positions, 'units', 'nm')
setattr(ncvar_positions, "long_name", "initial_positions[atom][spatial] is initial position of coordinate 'spatial' of atom 'atom' used for all simulations.")
x = positions / unit.nanometers
ncfile.variables['initial_positions'][:,:] = x[:,:]
# Store system.
ncvar_system = ncfile.createVariable('system', str, ('singleton',), zlib=True)
setattr(ncvar_system, 'long_name', "system is the serialized OpenMM System used for all simulations")
ncvar_system[0] = system.__getstate__()
# Create storage for simulation data.
ncvar_densities = ncfile.createVariable('reduced_density', 'f4', ('replicate','iteration'), zlib=True, chunksizes=(nreplicates,1))
setattr(ncvar_densities, "long_name", "reduced_density[replicate][iteration] is the density (in reduced, dimensionless units) of iteration 'iteration' of replicate 'replicate'")
ncvar_potential = ncfile.createVariable('reduced_potential', 'f4', ('replicate','iteration'), zlib=True, chunksizes=(1,niterations+1))
setattr(ncvar_potential, "long_name", "reduced_potential[replicate][iteration] is the density (in kT) of iteration 'iteration' of replicate 'replicate'")
ncfile.sync()
return ncfile
|
[
"choderaj@mskcc.org"
] |
choderaj@mskcc.org
|
f4d4477c62a9b7c90942fce44f0792f8b0c019a1
|
c3a84a07539c33040376f2c1e140b1a1041f719e
|
/wagtail-stubs/admin/views/tags.pyi
|
55025030a9dcc5f24d36277a1b5c72bd7e56c396
|
[] |
no_license
|
tm-kn/tmp-wagtail-stubs
|
cc1a4434b7142cb91bf42efb7daad006c4a7dbf4
|
23ac96406610b87b2e7751bc18f0ccd27f17eb44
|
refs/heads/master
| 2023-01-20T14:41:33.962460
| 2020-11-30T23:15:38
| 2020-11-30T23:15:38
| 317,332,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
pyi
|
from typing import Any, Optional
def autocomplete(request: Any, app_name: Optional[Any] = ..., model_name: Optional[Any] = ...): ...
|
[
"hi@tmkn.org"
] |
hi@tmkn.org
|
020965ab409130059e4fb9e1e3a6cf4d39e75232
|
864755f7d733351b205e460ec54a5f6d13050037
|
/devilry/devilry_settings/views.py
|
602dd5a1b02bebe44232137c80586840865b0340
|
[] |
permissive
|
aless80/devilry-django
|
27fc14b7bb7356f5f9d168e435a84e7bb43a682a
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
refs/heads/master
| 2020-05-20T12:22:09.255393
| 2019-05-19T21:06:57
| 2019-05-19T21:06:57
| 185,568,847
| 0
| 0
|
BSD-3-Clause
| 2019-05-08T08:53:52
| 2019-05-08T08:53:51
| null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
from django.http import HttpResponse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
@login_required
def missing_setting(request, setting):
message = """
You have been redirected to this view because your local Devilry system administrator
have not set the <strong>{setting}</strong>-setting. Please tell them to set it.""".format(setting=setting)
return HttpResponse('<html><body>{message}</body></html>'.format(message=message))
def urlsetting_or_unsetview(settingname):
setting = getattr(settings, settingname, None)
if setting:
return setting
else:
return reverse('devilry_settings_missing_setting', args=(settingname,))
|
[
"post@espenak.net"
] |
post@espenak.net
|
2155e9730a7adb5594c0a9c0e5138143f70e3f0e
|
f8cf0f8d3ca1784b59fff380b99c4fa4da225389
|
/ceshi/configparser/test.py
|
4369761d2860981c381117b763a2d97b2035bd9f
|
[] |
no_license
|
loveguan/mysite
|
6f3c10c9bd1780a6a3c789c03ef66a10b186da92
|
7217b0e111626af0e6afddd0bc405705cf9641ca
|
refs/heads/master
| 2021-01-01T20:49:45.367058
| 2018-02-05T14:19:42
| 2018-02-05T14:19:42
| 98,939,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: JOJ
@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
@contact: zhouguanjie@qq.com
@software: JOJ
@file: test.py
@time: 2017/12/14 15:42
@desc: 建立配置文件
'''
import configparser
config=configparser.ConfigParser()
config["DEFAULT"]={'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Host Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini','w') as configfile:
config.write(configfile)
|
[
"zhouguanjie@qq.com"
] |
zhouguanjie@qq.com
|
f3cd6766d23f03656ef3274d07cce9ea1489c132
|
350ecc8259bcad075bd376423335bb41cc8a533e
|
/classic_strategy1.py
|
50460f25736ecc5fcf66524f4c162073165d5ca1
|
[] |
no_license
|
CodedQuen/python_begin
|
39da66ecc4a77b94a5afbbf0900727c8156b85e1
|
1433c319b5d85520c50aee00dd4b6f21a7e6366a
|
refs/heads/master
| 2022-06-10T10:30:28.807874
| 2020-04-25T03:34:03
| 2020-04-25T03:34:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,457
|
py
|
# classic_strategy.py
# Strategy pattern -- classic implementation
"""
# BEGIN CLASSIC_STRATEGY_TESTS
>>> joe = Customer('John Doe', 0) # <1>
>>> ann = Customer('Ann Smith', 1100)
>>> cart = [LineItem('banana', 4, .5), # <2>
... LineItem('apple', 10, 1.5),
... LineItem('watermellon', 5, 5.0)]
>>> Order(joe, cart, FidelityPromo()) # <3>
<Order total: 42.00 due: 42.00>
>>> Order(ann, cart, FidelityPromo()) # <4>
<Order total: 42.00 due: 39.90>
>>> banana_cart = [LineItem('banana', 30, .5), # <5>
... LineItem('apple', 10, 1.5)]
>>> Order(joe, banana_cart, BulkItemPromo()) # <6>
<Order total: 30.00 due: 28.50>
>>> long_order = [LineItem(str(item_code), 1, 1.0) # <7>
... for item_code in range(10)]
>>> Order(joe, long_order, LargeOrderPromo()) # <8>
<Order total: 10.00 due: 9.30>
>>> Order(joe, cart, LargeOrderPromo())
<Order total: 42.00 due: 42.00>
# END CLASSIC_STRATEGY_TESTS
"""
# BEGIN CLASSIC_STRATEGY
from abc import ABC, abstractmethod
from collections import namedtuple
Customer = namedtuple('Customer', 'name fidelity')
class LineItem:
def __init__(self, product, quantity, price):
self.product = product
self.quantity = quantity
self.price = price
def total(self):
return self.price * self.quantity
class Order: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion.discount(self)
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
class Promotion(ABC): # the Strategy: an Abstract Base Class
@abstractmethod
def discount(self, order):
"""Return discount as a positive dollar amount"""
class FidelityPromo(Promotion): # first Concrete Strategy
"""5% discount for customers with 1000 or more fidelity points"""
def discount(self, order):
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
class BulkItemPromo(Promotion): # second Concrete Strategy
"""10% discount for each LineItem with 20 or more units"""
def discount(self, order):
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
class LargeOrderPromo(Promotion): # third Concrete Strategy
"""7% discount for orders with 10 or more distinct items"""
def discount(self, order):
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
joe = Customer('John Doe', 0)
ann = Customer('Ann Smith', 1100)
cart = [LineItem('banana', 4, .5),
LineItem('apple', 10, 1.5),
LineItem('watermellon', 5, 5.0)]
print(Order(joe, cart, FidelityPromo()))
# END CLASSIC_STRATEGY
|
[
"noreply@github.com"
] |
CodedQuen.noreply@github.com
|
6f15e3cd583c011bb562a0b53d54bb954df4bb24
|
473507d7540ad1ee5ae2670ac18ace05cd50f6fa
|
/Math/excel_sheet_column_number.py
|
51d65533812a517cb65c1ea1328ec16f33f05486
|
[] |
no_license
|
JunctionChao/LeetCode
|
4558c9d053f4a4d003903d08fade9fd93e6d9658
|
10daf38e673e69922f4be7eadf4054810da8ae13
|
refs/heads/master
| 2023-02-04T06:59:35.833078
| 2020-12-18T14:32:03
| 2020-12-18T14:32:03
| 322,617,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Date : 2019-09-19
# Author : Yuanbo Zhao (chaojunction@gmail.com)
def titleToNumber(s: str) -> int:
i = 0
result = 0
for char in s[::-1]:
r = ord(char) - ord('A') + 1
result += r * 26**i
i += 1
return result
if __name__ == '__main__':
print(titleToNumber('AA'))
|
[
"1429004361@qq.com"
] |
1429004361@qq.com
|
4d2f0d2d4ca9497e547201b052c68f244f1836f0
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/units/format/__init__.py
|
bad82f9313aa459bb138f442ee33f3d00bf7bc45
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A collection of different unit formats.
"""
# This is pretty atrocious, but it will prevent a circular import for those
# formatters that need access to the units.core module An entry for it should
# exist in sys.modules since astropy.units.core imports this module
import sys
core = sys.modules['astropy.units.core']
from .base import Base
from .generic import Generic, Unscaled
from .cds import CDS
from .console import Console
from .fits import Fits
from .latex import Latex, LatexInline
from .ogip import OGIP
from .unicode_format import Unicode
from .vounit import VOUnit
__all__ = [
'Base', 'Generic', 'CDS', 'Console', 'Fits', 'Latex', 'LatexInline',
'OGIP', 'Unicode', 'Unscaled', 'VOUnit', 'get_format']
def get_format(format=None):
"""
Get a formatter by name.
Parameters
----------
format : str or `astropy.units.format.Base` instance or subclass
The name of the format, or the format instance or subclass
itself.
Returns
-------
format : `astropy.units.format.Base` instance
The requested formatter.
"""
if isinstance(format, type) and issubclass(format, Base):
return format
elif not (isinstance(format, str) or format is None):
raise TypeError(
"Formatter must a subclass or instance of a subclass of {!r} "
"or a string giving the name of the formatter. Valid formatter "
"names are: [{}]".format(Base, ', '.join(Base.registry)))
if format is None:
format = 'generic'
format_lower = format.lower()
if format_lower in Base.registry:
return Base.registry[format_lower]
raise ValueError("Unknown format {!r}. Valid formatter names are: "
"[{}]".format(format, ', '.join(Base.registry)))
|
[
"nicolas.holzschuch@inria.fr"
] |
nicolas.holzschuch@inria.fr
|
bb9fa8236399987f6814680af95a20481f9fc3d4
|
67b7e6d2c08f08403ec086c510622be48b8d26d8
|
/src/test/tinc/tincrepo/mpp/gpdb/tests/queries/basic/exttab/errlog/sql/datagen_first_errors.py
|
d83f25a62ae3d2681a21b0d929f720f87afcdc31
|
[
"Apache-2.0",
"PostgreSQL",
"LicenseRef-scancode-rsa-md4",
"OLDAP-2.8",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"Zlib",
"LicenseRef-scancode-zeusbench",
"LicenseRef-scancode-mit-modification-obligations",
"OpenSSL",
"MIT",
"LicenseRef-scancode-other-copyleft",
"bzip2-1.0.6",
"NTP",
"W3C",
"metamail",
"Beerware",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-stream-benchmark",
"LicenseRef-scancode-openssl",
"X11-distribute-modifications-variant",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-ssleay-windows",
"Spencer-94",
"ISC",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause",
"Python-2.0",
"curl",
"LicenseRef-scancode-sun-bcl-sdk-5.0",
"MIT-CMU",
"W3C-19980720"
] |
permissive
|
sshyran/gpdb
|
41012411d22b0294204dfb0fe67a1f4c8d1ecaf6
|
2d065ecdd2b5535cb42474f17a0ee6592b4e6837
|
refs/heads/master
| 2023-04-09T14:05:44.030212
| 2016-11-12T08:33:33
| 2016-11-12T08:34:36
| 73,544,159
| 0
| 0
|
Apache-2.0
| 2023-04-04T00:30:10
| 2016-11-12T09:43:54
|
PLpgSQL
|
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import sys
def main(total_rows, number_of_error_rows):
error_count = 0
for i in xrange(number_of_error_rows):
print "error_%s" %str(error_count)
for i in xrange(total_rows - number_of_error_rows):
print "%s|%s_number" %(i,i)
if __name__ == '__main__':
total_rows = 20
error_rows = 0
if len(sys.argv) > 1:
total_rows = int(sys.argv[1])
error_rows = int(sys.argv[2])
main(total_rows, error_rows)
|
[
"jyih@pivotal.io"
] |
jyih@pivotal.io
|
a0924adb3c7da96cb655447e56114e94b508ac22
|
b68c92fe89b701297f76054b0f284df5466eb698
|
/Other/Companies/Microsoft/BenchmarkMatching.py
|
a00910c36d86cd8e47e303768e50f53753c80cee
|
[] |
no_license
|
makrandp/python-practice
|
32381a8c589f9b499ab6bde8184a847b066112f8
|
60218fd79248bf8138158811e6e1b03261fb38fa
|
refs/heads/master
| 2023-03-27T18:11:56.066535
| 2021-03-28T04:02:00
| 2021-03-28T04:02:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,409
|
py
|
'''
Programming challenge description:
We say a portfolio matches the benchmark when the number of shares of each asset in the portfolio matches the number of shares of each asset in the benchmark. Your question is to write a program that determines the transactions necessary to make a portfolio match a benchmark.
A portfolio is a collection of assets such as stocks and bonds. A portfolio could have 10 shares of Vodafone stock, 15 shares of Google stock and 15 shares of Microsoft bonds. A benchmark is also just a collection of assets. A benchmark could have 15 shares of Vodafone stock, 10 shares of Google stock and 15 shares of Microsoft bonds.
A transaction is when you “buy” or “sell” a particular asset of certain asset type (“stock” or “bond”). For instance, you can decide to buy 5 shares of Vodafone stock which, given the portfolio described above, would result in you having 15 shares of Vodafone stock. Correspondingly, you decide to sell 5 shares of Microsoft bonds, which would result in 10 shares of Microsoft bonds in the above portfolio.
Assumptions:
Shares are positive decimals
There will always be at least 1 asset present in the Portfolio and Benchmark
A particular asset can be bond, stock, or both. For example, 5 shares of Microsoft bonds and 10 shares of Microsoft stock can both be present in the portfolio/benchmark
The trades should be sorted in alphabetical order based on the names of the assets; if both bonds and stock are present for an asset, list bonds first
Input:
The first part of the input is the Portfolio holdings (in the format Name,AssetType,Shares where each asset is separated by ‘|’ symbol)
The second part of the input is the Benchmark holdings (in the format Name,AssetType,Shares where each asset is separated by ‘|’ symbol)
Example input: Vodafone,STOCK,10|Google,STOCK,15|Microsoft,BOND,15:Vodafone,STOCK,15|Google,STOCK,10|Microsoft,BOND,15
Note that the two parts are separated by the ‘:’ symbol.
Output:
The output is a list of transactions (separated by new line) in the format TransactionType,Name,AssetType,Shares. Note that the TransactionType should only be BUY or SELL.
Example output: SELL,Google,STOCK,5 BUY,Vodafone,STOCK,5
Test 1
Test Input
Download Test 1 Input
Vodafone,STOCK,10|Google,STOCK,15|Microsoft,BOND,15:Vodafone,STOCK,15|Google,STOCK,10|Microsoft,BOND,15
Expected Output
Download Test 1 Input
SELL,Google,STOCK,5
BUY,Vodafone,STOCK,5
Test 2
Test Input
Download Test 2 Input
Vodafone,STOCK,10|Google,STOCK,15:Vodafone,STOCK,15|Vodafone,BOND,10|Google,STOCK,10
Expected Output
Download Test 2 Input
SELL,Google,STOCK,5
BUY,Vodafone,BOND,10
BUY,Vodafone,STOCK,5
'''
"""
Super quick python answer based off of the information & test cases provided. Could/should heavily improve the string concatenation.
All we do is take the company name plus the shares type (bond or stock) and use that as the key, with the amount as the value. We do this for both the current portfolio and the benchmark. For example "Vodafone,STOCK,10|Google,STOCK,15:Vodafone,STOCK,15|Vodafone,BOND,10|Google,STOCK,10"" gives us:
currentShares = {
"Vodafone,STOCK": 10,
"Google,STOCK": 15
}
benchmarkShares = {
"Vodafone,Stock": 15,
"Vodafone,BOND": 15,
"Google,STOCK": 10
}
Then we iterate through the benchmark shares, outputting based off of the difference and removing the key from current shares. Of the remaining current shares, we simply sell them off.
"""
from typing import List
from collections import defaultdict
class Solution():
def benchmarkMatching(self, data: str) -> List[str]:
# Getting our current share prices
currentShares, benchmarkShares = data.split(':')
currentShares, benchmarkShares = currentShares.split('|'), benchmarkShares.split('|')
currentSharesHash, outputBonds, outputShares = defaultdict(lambda: 0), list(), list()
for c in currentShares:
name, portType, amount = c.split(',')
currentSharesHash[name+","+portType] += int(amount)
for c in benchmarkShares:
name, portType, amount = c.split(',')
diff = int(amount) if name+","+portType not in currentSharesHash else (int(amount) - currentSharesHash[name+","+portType])
if diff != 0:
s = ("SELL" if diff < 0 else "BUY") + "," + name + "," + portType + "," + str(abs(diff))
if portType == "BOND":
outputBonds.append((name,s))
else:
outputShares.append((name,s))
if name+","+portType in currentSharesHash:
del currentSharesHash[name+","+portType]
for c in currentSharesHash.keys():
name, portType = c.split(',')
amount = currentSharesHash[c]
if portType == "BOND":
outputBonds.append((name,s))
else:
outputShares.append((name,s))
# Sorting outputs
output = list()
for bond in sorted(outputBonds):
output.append(bond[1])
for share in sorted(outputShares):
output.append(share[1])
print(output)
s = Solution()
s.benchmarkMatching("Vodafone,STOCK,10|Google,STOCK,15:Vodafone,STOCK,15|Vodafone,BOND,10|Google,STOCK,10")
s.benchmarkMatching("Vodafone,STOCK,10|Google,STOCK,15|Microsoft,BOND,15:Vodafone,STOCK,15|Google,STOCK,10|Microsoft,BOND,15")
|
[
"awalexweber99@gmail.com"
] |
awalexweber99@gmail.com
|
50d8820972add7db1e12f143f05cf38d7f3ed8a2
|
548a5ed489f88b34f0dd31a2118cb1ce82155c99
|
/BOJ/2020_12/2562.py
|
fce608b3cdaa9bec2c41b134ed6c60e49a90d410
|
[] |
no_license
|
rkdtmddnjs97/algorithm
|
f44606f0f39c39af272ffef5373c801e7388d882
|
7cc3d20d654ea067502c3e60b706b0cb765784c0
|
refs/heads/main
| 2023-07-13T08:20:38.750770
| 2021-08-21T20:31:54
| 2021-08-21T20:31:54
| 398,647,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
a = [ int(input()) for i in range(9)]
b = max(a)
print(b)
for key,value in enumerate(a):
if value == b:
print(key+1)
|
[
"rkdtmddnjs97@gmail.com"
] |
rkdtmddnjs97@gmail.com
|
1509d9b68521df12375cdeb84a7ebe5c1ec96e76
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/elasticache_write_f/tags-to-resource_add.py
|
9d6e980bae8076a92de85796496e725ed2d6d06f
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
"""
write_parameter("elasticache", "add-tags-to-resource")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
bb15f4a2be885270cef4e62640a269481b8a03d3
|
8da4a294cd72d36f1f890148b859eee88fe270ac
|
/dev/local/data/external.py
|
1b49501bc88cbb32b8c1f668d3ef04f8e21d4687
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
neerajshukla1911/fastai_dev
|
3bf323e80d5594fb4bc543fe73de553e036b2ec2
|
768a5dac135a0f2ea91bc645ba279d3b1c5fd649
|
refs/heads/master
| 2020-07-16T00:47:11.761475
| 2019-09-01T14:10:32
| 2019-09-01T14:10:32
| 205,684,686
| 0
| 0
|
Apache-2.0
| 2019-09-01T14:04:13
| 2019-09-01T14:04:12
| null |
UTF-8
|
Python
| false
| false
| 8,016
|
py
|
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/04_data_external.ipynb (unless otherwise specified).
__all__ = ['download_url', 'URLs', 'get_path', 'ConfigKey', 'download_data', 'untar_data']
from ..imports import *
from ..test import *
from ..core import *
def download_url(url, dest, overwrite=False, pbar=None, show_progress=True, chunk_size=1024*1024,
timeout=4, retries=5):
"Download `url` to `dest` unless it exists and not `overwrite`"
if os.path.exists(dest) and not overwrite: return
s = requests.Session()
s.mount('http://',requests.adapters.HTTPAdapter(max_retries=retries))
u = s.get(url, stream=True, timeout=timeout)
try: file_size = int(u.headers["Content-Length"])
except: show_progress = False
with open(dest, 'wb') as f:
nbytes = 0
if show_progress:
pbar = progress_bar(range(file_size), auto_update=False, leave=False, parent=pbar)
try:
for chunk in u.iter_content(chunk_size=chunk_size):
nbytes += len(chunk)
if show_progress: pbar.update(nbytes)
f.write(chunk)
except requests.exceptions.ConnectionError as e:
fname = url.split('/')[-1]
from fastai.datasets import Config
data_dir = dest.parent
print(f'\n Download of {url} has failed after {retries} retries\n'
f' Fix the download manually:\n'
f'$ mkdir -p {data_dir}\n'
f'$ cd {data_dir}\n'
f'$ wget -c {url}\n'
f'$ tar -zxvf {fname}\n'
f' And re-run your code once the download is successful\n')
class URLs():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
URL = 'http://files.fast.ai/data/examples/'
MDL = 'http://files.fast.ai/models/'
S3 = 'https://s3.amazonaws.com/fast-ai-'
S3_IMAGE = f'{S3}imageclas/'
S3_IMAGELOC = f'{S3}imagelocal/'
S3_NLP = f'{S3}nlp/'
S3_COCO = f'{S3}coco/'
S3_MODEL = f'{S3}modelzoo/'
# main datasets
ADULT_SAMPLE = f'{URL}adult_sample.tgz'
BIWI_SAMPLE = f'{URL}biwi_sample.tgz'
CIFAR = f'{URL}cifar10.tgz'
COCO_SAMPLE = f'{S3_COCO}coco_sample.tgz'
COCO_TINY = f'{URL}coco_tiny.tgz'
HUMAN_NUMBERS = f'{URL}human_numbers.tgz'
IMDB = f'{S3_NLP}imdb.tgz'
IMDB_SAMPLE = f'{URL}imdb_sample.tgz'
ML_SAMPLE = f'{URL}movie_lens_sample.tgz'
MNIST_SAMPLE = f'{URL}mnist_sample.tgz'
MNIST_TINY = f'{URL}mnist_tiny.tgz'
MNIST_VAR_SIZE_TINY = f'{S3_IMAGE}mnist_var_size_tiny.tgz'
PLANET_SAMPLE = f'{URL}planet_sample.tgz'
PLANET_TINY = f'{URL}planet_tiny.tgz'
IMAGENETTE = f'{S3_IMAGE}imagenette.tgz'
IMAGENETTE_160 = f'{S3_IMAGE}imagenette-160.tgz'
IMAGENETTE_320 = f'{S3_IMAGE}imagenette-320.tgz'
IMAGEWOOF = f'{S3_IMAGE}imagewoof.tgz'
IMAGEWOOF_160 = f'{S3_IMAGE}imagewoof-160.tgz'
IMAGEWOOF_320 = f'{S3_IMAGE}imagewoof-320.tgz'
# kaggle competitions download dogs-vs-cats -p {DOGS.absolute()}
DOGS = f'{URL}dogscats.tgz'
# image classification datasets
CALTECH_101 = f'{S3_IMAGE}caltech_101.tgz'
CARS = f'{S3_IMAGE}stanford-cars.tgz'
CIFAR_100 = f'{S3_IMAGE}cifar100.tgz'
CUB_200_2011 = f'{S3_IMAGE}CUB_200_2011.tgz'
FLOWERS = f'{S3_IMAGE}oxford-102-flowers.tgz'
FOOD = f'{S3_IMAGE}food-101.tgz'
MNIST = f'{S3_IMAGE}mnist_png.tgz'
PETS = f'{S3_IMAGE}oxford-iiit-pet.tgz'
# NLP datasets
AG_NEWS = f'{S3_NLP}ag_news_csv.tgz'
AMAZON_REVIEWS = f'{S3_NLP}amazon_review_full_csv.tgz'
AMAZON_REVIEWS_POLARITY = f'{S3_NLP}amazon_review_polarity_csv.tgz'
DBPEDIA = f'{S3_NLP}dbpedia_csv.tgz'
MT_ENG_FRA = f'{S3_NLP}giga-fren.tgz'
SOGOU_NEWS = f'{S3_NLP}sogou_news_csv.tgz'
WIKITEXT = f'{S3_NLP}wikitext-103.tgz'
WIKITEXT_TINY = f'{S3_NLP}wikitext-2.tgz'
YAHOO_ANSWERS = f'{S3_NLP}yahoo_answers_csv.tgz'
YELP_REVIEWS = f'{S3_NLP}yelp_review_full_csv.tgz'
YELP_REVIEWS_POLARITY = f'{S3_NLP}yelp_review_polarity_csv.tgz'
# Image localization datasets
BIWI_HEAD_POSE = f"{S3_IMAGELOC}biwi_head_pose.tgz"
CAMVID = f'{S3_IMAGELOC}camvid.tgz'
CAMVID_TINY = f'{URL}camvid_tiny.tgz'
LSUN_BEDROOMS = f'{S3_IMAGE}bedroom.tgz'
PASCAL_2007 = f'{S3_IMAGELOC}pascal_2007.tgz'
PASCAL_2012 = f'{S3_IMAGELOC}pascal_2012.tgz'
#Pretrained models
OPENAI_TRANSFORMER = f'{S3_MODEL}transformer.tgz'
WT103_FWD = f'{S3_MODEL}wt103-fwd'
WT103_BWD = f'{S3_MODEL}wt103-bwd'
def _get_config():
config_path = Path(os.getenv('FASTAI_HOME', '~/.fastai')).expanduser()
config_file = config_path/'config.yml'
if config_file.exists():
with open(config_file, 'r') as yaml_file:
config = yaml.safe_load(yaml_file)
if 'version' in config and config['version'] == 1: return config
else: config = {}
#File inexistent or wrong version -> going to default
config = {'data_path': str(config_path/'data'),
'archive_path': str(config_path/'archive'),
'model_path': str(config_path/'models'),
'version': 1}
with open(config_file, 'w') as yaml_file:
yaml.dump(config, yaml_file, default_flow_style=False)
return config
ConfigKey = Enum('ConfigKey', 'Data Archive Model')
def get_path(c_key=ConfigKey.Data):
return Path(_get_config()[f"{c_key.name.lower()}_path"])
def _url2path(url, c_key=ConfigKey.Archive):
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key==ConfigKey.Model else 'data')/fname
if local_path.exists(): return local_path
return get_path(c_key)/fname
def download_data(url, fname=None, c_key=ConfigKey.Archive, force_download=False):
"Download `url` to `fname`."
fname = Path(fname or _url2path(url, c_key=c_key))
fname.parent.mkdir(parents=True, exist_ok=True)
if not fname.exists() or force_download:
print(f'Downloading {url}')
download_url(url, fname, overwrite=force_download)
return fname
def _get_check(url):
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
return checks.get(url, None)
def _check_file(fname):
size = os.path.getsize(fname)
with open(fname, "rb") as f:
hash_nb = hashlib.md5(f.read(2**20)).hexdigest()
return [size,hash_nb]
def _add_check(url, fname):
"Internal function to update the internal check file with `url` and check on `fname`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
checks[url] = _check_file(fname)
json.dump(checks, open(Path(__file__).parent/'checks.txt', 'w'), indent=2)
def untar_data(url, fname=None, dest=None, c_key=ConfigKey.Data, force_download=False):
"Download `url` to `fname` if `dest` doesn't exist, and un-tgz to folder `dest`."
default_dest = _url2path(url, c_key=c_key).with_suffix('')
dest = default_dest if dest is None else Path(dest)/default_dest.name
fname = Path(fname or _url2path(url))
if fname.exists() and _get_check(url) and _check_file(fname) != _get_check(url):
print("A new version of this is available, downloading...")
force_download = True
if force_download:
if fname.exists(): os.remove(fname)
if dest.exists(): shutil.rmtree(dest)
if not dest.exists():
fname = download_data(url, fname=fname, c_key=c_key)
if _get_check(url) and _check_file(fname) != _get_check(url):
print(f"File downloaded is broken. Remove {fname} and try again.")
tarfile.open(fname, 'r:gz').extractall(dest.parent)
return dest
|
[
"sylvain.gugger@gmail.com"
] |
sylvain.gugger@gmail.com
|
d630b1dd7a6eb1c30f516c89ef5c64aaeb65c260
|
6a0b5dd52577003eddeb45d1ccc173ff6de7e7ca
|
/director/users/api_v1_urls.py
|
5721468341cbb8e8260894fc59e968198546b117
|
[
"Apache-2.0"
] |
permissive
|
paulolimac/hub
|
98ab1fd0c60ccc28a5a887dc486119da066ced36
|
ce5d86343e340ff0bd734e49a48d0745ae88144d
|
refs/heads/master
| 2020-04-03T13:21:20.046117
| 2018-10-29T23:49:37
| 2018-10-29T23:49:37
| 155,282,253
| 0
| 0
| null | 2018-10-29T21:14:19
| 2018-10-29T21:14:18
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
from django.urls import path
from users.api_views import UserSearch
urlpatterns = [
path('search', UserSearch.as_view())
]
|
[
"ben@beneboy.co.nz"
] |
ben@beneboy.co.nz
|
64724bb718e51f021275096a77275b11b6481ac4
|
4178f2916d2da72cbb45454fbed941dcfe8f6460
|
/POM_test/TestCase/Debug2.py
|
a6a55e7b2f21f479f7cacf5f29ff779122b450fc
|
[] |
no_license
|
maxcrup007/Selenium_Webdriver_Python
|
15196cb04ba5cafdc5b776c26d167f0b48fb0e14
|
6be7f0b9f53df1ba592957029e8a4d22e409d1c4
|
refs/heads/main
| 2023-03-24T21:04:31.976451
| 2021-03-22T09:16:04
| 2021-03-22T09:16:04
| 349,379,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
import os
os.chdir('C:/Users/voraw/Desktop/Working/Gaming & Hobby/Ma11/miKu-Doujin/Chane name')
print(os.getcwd())
COUNT = 1
def change_name():
global COUNT
COUNT = COUNT + 1
for f in os.listdir():
f_name, f_ext = os.path.splitext(f)
if COUNT >= 100:
f_name = str(COUNT)
elif COUNT >= 10 and COUNT < 100:
f_name = "0" + str(COUNT)
else:
f_name = "00" + str(COUNT)
change_name()
new_name = '{} {}'.format(f_name, f_ext)
os.rename(f, new_name)
#### Source Code
# import os
#
# os.chdir('D://Geeksforgeeks')
# print(os.getcwd())
# COUNT = 1
#
#
# # Function to increment count
# # to make the files sorted.
# def increment():
# global COUNT
# COUNT = COUNT + 1
#
#
# for f in os.listdir():
# f_name, f_ext = os.path.splitext(f)
# f_name = "geek" + str(COUNT)
# increment()
#
# new_name = '{} {}'.format(f_name, f_ext)
# os.rename(f, new_name)
|
[
"36732487+maxcrup007@users.noreply.github.com"
] |
36732487+maxcrup007@users.noreply.github.com
|
887821cb1d3663aec05aded7090a12fbb0863b88
|
d9c95cd0efad0788bf17672f6a4ec3b29cfd2e86
|
/disturbance/migrations/0163_apiarysiteonapproval_site_category.py
|
b2f18b430255810ee5d6bbd518185236c84db914
|
[
"Apache-2.0"
] |
permissive
|
Djandwich/disturbance
|
cb1d25701b23414cd91e3ac5b0207618cd03a7e5
|
b1ba1404b9ca7c941891ea42c00b9ff9bcc41237
|
refs/heads/master
| 2023-05-05T19:52:36.124923
| 2021-06-03T06:37:53
| 2021-06-03T06:37:53
| 259,816,629
| 1
| 1
|
NOASSERTION
| 2021-06-03T09:46:46
| 2020-04-29T03:39:33
|
Python
|
UTF-8
|
Python
| false
| false
| 592
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-09-17 04:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0162_auto_20200917_1209'),
]
operations = [
migrations.AddField(
model_name='apiarysiteonapproval',
name='site_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='disturbance.SiteCategory'),
),
]
|
[
"katsufumi.shibata@dbca.wa.gov.au"
] |
katsufumi.shibata@dbca.wa.gov.au
|
782f7ad84a757286a8685de5ded3aa137187a6e8
|
f042383cbc9f10837ebdb5b9033a0263f6a43698
|
/examples/docs_snippets/docs_snippets/intro_tutorial/basics/e04_quality/custom_types_2.py
|
70e3912ca669f3083987bf7f01f0b16146f8993a
|
[
"Apache-2.0"
] |
permissive
|
helloworld/dagster
|
664e6636d68bafa5151418c9d4316a565717f5ee
|
779e27faa3e46b7d043cb9624617e655a9ed570c
|
refs/heads/master
| 2022-03-24T12:15:36.626783
| 2022-02-26T01:34:29
| 2022-02-26T01:34:29
| 464,019,094
| 0
| 0
|
Apache-2.0
| 2022-03-05T20:23:14
| 2022-02-27T02:38:17
| null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
import requests
from dagster import DagsterType, In, Out, get_dagster_logger, job, op
# start_custom_types_2_marker_0
def is_list_of_dicts(_, value):
return isinstance(value, list) and all(
isinstance(element, dict) for element in value
)
SimpleDataFrame = DagsterType(
name="SimpleDataFrame",
type_check_fn=is_list_of_dicts,
description="A naive representation of a data frame, e.g., as returned by csv.DictReader.",
)
# end_custom_types_2_marker_0
# start_custom_types_2_marker_1
@op(out=Out(SimpleDataFrame))
def bad_download_csv():
response = requests.get("https://docs.dagster.io/assets/cereal.csv")
lines = response.text.split("\n")
get_dagster_logger().info(f"Read {len(lines)} lines")
return ["not_a_dict"]
# end_custom_types_2_marker_1
@op(ins={"cereals": In(SimpleDataFrame)})
def sort_by_calories(cereals):
sorted_cereals = sorted(cereals, key=lambda cereal: cereal["calories"])
get_dagster_logger().info(
f'Most caloric cereal: {sorted_cereals[-1]["name"]}'
)
@job
def custom_type_job():
sort_by_calories(bad_download_csv())
|
[
"noreply@github.com"
] |
helloworld.noreply@github.com
|
4cd457aae559324c28a76e8ff71688100483e7f2
|
f7a48634de139b7f5585c2bf3d3014605130428c
|
/ebedke/plugins/kompot.py
|
c41f831fd3d7bba415606e8c90bfa14c14c43220
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ijanos/ebedke
|
b72dcdef63c575eb4090661bab2e2c7a7864ab76
|
9a0f91cc6536a78d7da9aca1fab22924a56d38e2
|
refs/heads/master
| 2023-04-20T19:36:03.928669
| 2021-01-24T11:35:15
| 2021-01-24T11:35:15
| 99,848,492
| 35
| 11
|
Apache-2.0
| 2023-03-27T22:36:27
| 2017-08-09T20:08:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
from datetime import datetime, timedelta
from ebedke.utils.date import days_lower, on_workdays
from ebedke.utils.text import pattern_slice
from ebedke.utils import facebook
from ebedke.pluginmanager import EbedkePlugin
FB_PAGE = "https://www.facebook.com/pg/KompotBisztro/posts/"
FB_ID = "405687736167829"
@on_workdays
def getMenu(today):
day = today.weekday()
is_this_week = lambda date: datetime.strptime(date, '%Y-%m-%dT%H:%M:%S%z').date() > today.date() - timedelta(days=7)
is_today = lambda date: datetime.strptime(date, '%Y-%m-%dT%H:%M:%S%z').date() == today.date()
ignore_hashtags = lambda post: " ".join(word.lower() for word in post.split() if word[0] != "#")
daily_menu_filter = lambda post: is_today(post['created_time']) \
and "menü" in post['message'].lower()
weekly_menu_filter = lambda post: is_this_week(post['created_time']) \
and days_lower[day] in ignore_hashtags(post['message'])
weekly_menu = facebook.get_filtered_post(FB_ID, weekly_menu_filter)
if weekly_menu:
menu = pattern_slice(weekly_menu.splitlines(), [days_lower[day]], days_lower + ["sütiket", "#", "jó étvágyat", "mai menü"])
else:
menu_post = facebook.get_filtered_post(FB_ID, daily_menu_filter).splitlines()
menu = []
for i, line in enumerate(menu_post):
if "A:" in line:
menu = list((menu_post[i - 1], menu_post[i], menu_post[i + 1]))
break
return menu
plugin = EbedkePlugin(
enabled=True,
groups=["corvin"],
name='Kompót',
id='kp',
url=FB_PAGE,
downloader=getMenu,
ttl=timedelta(hours=24),
cards=['szep'],
coord=(47.485753, 19.075932)
)
|
[
"ijanos@gmail.com"
] |
ijanos@gmail.com
|
931300b3c495baff8b052bb61df42f03e9e0e772
|
1a758ef862f733d98ddd8ebc8ade5cefd95c24f2
|
/customers/migrations/0013_facebookcustomer.py
|
3abb34e6d2d0c3b6e1e3df3ca8e899dc7fe394e5
|
[] |
no_license
|
ajajul/ReactJS_Python
|
f116b35394666c5b3f2419eb5d8d7aeb077d4a24
|
08310d56fa88f326ddbfdd4b189f2a3a71f76d99
|
refs/heads/master
| 2020-03-19T03:16:57.510672
| 2018-06-01T10:36:36
| 2018-06-01T10:36:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('customers', '0012_auto_20160802_1628'),
]
operations = [
migrations.CreateModel(
name='FacebookCustomer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('facebook_id', models.CharField(max_length=255, verbose_name=b'Facebook ID')),
('first_name', models.CharField(max_length=255, verbose_name=b'First name')),
('last_name', models.CharField(max_length=255, verbose_name=b'Last name')),
('email', models.EmailField(unique=True, max_length=255, verbose_name=b'Email address')),
('gender', models.CharField(max_length=255, verbose_name=b'Gender')),
('customer', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
]
|
[
"web.expert@aol.com"
] |
web.expert@aol.com
|
959d08c86faa5429545a51552270f33743c50c74
|
3485140792e9bae67499fef138d50d046cccb256
|
/datamining/AprioriProject/util/ProgressBar.py
|
5858e003c7264ec53dab5ae6a814fc400029c84a
|
[] |
no_license
|
ALREstevam/TopicosBD-DataMining-IBGE-Apriori
|
dc14a50ca8f3046b8125a183cdcb4e99d3c4c616
|
5bf8dee35df0f22902f7816b8738e585fdca3410
|
refs/heads/master
| 2020-03-17T04:38:08.111880
| 2018-06-14T12:14:11
| 2018-06-14T12:14:11
| 133,282,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
import sys
class ProgressBar(object):
DEFAULT_BAR_LENGTH = 65
DEFAULT_CHAR_ON = '█'
DEFAULT_CHAR_OFF = '░'
def __init__(self, end, start=0):
self.end = end
self.start = start
self._barLength = self.__class__.DEFAULT_BAR_LENGTH
self.setLevel(self.start)
self._plotted = False
def setLevel(self, level):
self._level = level
if level < self.start: self._level = self.start
if level > self.end: self._level = self.end
value = float(self.end - self.start)
if(value == 0): value = 1
self._ratio = float(self._level - self.start) / value
self._levelChars = int(self._ratio * self._barLength)
def plotProgress(self):
sys.stdout.write("\r %3i%% [%s%s]" %(
int(self._ratio * 100.0),
self.__class__.DEFAULT_CHAR_ON * int(self._levelChars),
self.__class__.DEFAULT_CHAR_OFF * int(self._barLength - self._levelChars),
))
sys.stdout.flush()
self._plotted = True
def setAndPlot(self, level):
oldChars = self._levelChars
self.setLevel(level)
if (not self._plotted) or (oldChars != self._levelChars):
self.plotProgress()
def __add__(self, other):
assert type(other) in [float, int], "can only add a number"
self.setAndPlot(self._level + other)
return self
def __sub__(self, other):
return self.__add__(-other)
def __iadd__(self, other):
return self.__add__(other)
def __isub__(self, other):
return self.__add__(-other)
def __del__(self):
sys.stdout.write("\n")
'''
import time
for j in range(5):
count = 1000
pb = ProgressBar(count)
# pb.plotProgress()
for i in range(0, count):
pb += 1
# pb.setAndPlot(i + 1)
time.sleep(0.01)
print('\n\nSTEP {}'.format(j))
'''
|
[
"a166348@g.unicamp.com"
] |
a166348@g.unicamp.com
|
6e210328858f6452857a8f09f3486b78b2ddc68c
|
51fba32aca3114a6897e11b271ee29d3b038056c
|
/tests/08_test_patch.py
|
bc9a507c707597c9dbc29ad814635d12538e8e77
|
[] |
no_license
|
lamby/git-buildpackage
|
b2fbf08b93ed0520c8e5ba0c3eb66f15d7a64a41
|
c4bc6561c788f71b5131d0bd8e92478e83808200
|
refs/heads/master
| 2021-01-02T23:04:26.941635
| 2017-08-05T23:46:58
| 2017-08-06T00:55:37
| 75,486,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
# vim: set fileencoding=utf-8 :
"""Test L{Patch} class"""
from . import context # noqa: 401
import os
import unittest
from gbp.patch_series import Patch
class TestPatch(unittest.TestCase):
data_dir = os.path.splitext(__file__)[0] + '_data'
def test_filename(self):
"""Get patch information from the filename"""
p = Patch(os.path.join(self.data_dir, "doesnotexist.diff"))
self.assertEqual('doesnotexist', p.subject)
self.assertEqual({}, p.info)
p = Patch(os.path.join(self.data_dir, "doesnotexist.patch"))
self.assertEqual('doesnotexist', p.subject)
p = Patch(os.path.join(self.data_dir, "doesnotexist"))
self.assertEqual('doesnotexist', p.subject)
self.assertEqual(None, p.author)
self.assertEqual(None, p.email)
self.assertEqual(None, p.date)
def test_header(self):
"""Get the patch information from a patch header"""
patchfile = os.path.join(self.data_dir, "patch1.diff")
self.assertTrue(os.path.exists(patchfile))
p = Patch(patchfile)
self.assertEqual('This is patch1', p.subject)
self.assertEqual("foo", p.author)
self.assertEqual("foo@example.com", p.email)
self.assertEqual("This is the long description.\n"
"It can span several lines.\n",
p.long_desc)
self.assertEqual('Sat, 24 Dec 2011 12:05:53 +0100', p.date)
|
[
"agx@sigxcpu.org"
] |
agx@sigxcpu.org
|
12d7d0236d58487ba5f9d74bafeeeaeb487401aa
|
3940b4a507789e1fbbaffeb200149aee215f655a
|
/lc/112.PathSum.py
|
14e465def2db1577e4b4e9af3851db0a471c4446
|
[] |
no_license
|
akimi-yano/algorithm-practice
|
15f52022ec79542d218c6f901a54396a62080445
|
1abc28919abb55b93d3879860ac9c1297d493d09
|
refs/heads/master
| 2023-06-11T13:17:56.971791
| 2023-06-10T05:17:56
| 2023-06-10T05:17:56
| 239,395,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
# 112. Path Sum
# Easy
# 2849
# 583
# Add to List
# Share
# Given the root of a binary tree and an integer targetSum, return true if the tree has a root-to-leaf path such that adding up all the values along the path equals targetSum.
# A leaf is a node with no children.
# Example 1:
# Input: root = [5,4,8,11,null,13,4,7,2,null,null,null,1], targetSum = 22
# Output: true
# Example 2:
# Input: root = [1,2,3], targetSum = 5
# Output: false
# Example 3:
# Input: root = [1,2], targetSum = 0
# Output: false
# Constraints:
# The number of nodes in the tree is in the range [0, 5000].
# -1000 <= Node.val <= 1000
# -1000 <= targetSum <= 1000
# This solution works!:
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def hasPathSum(self, root: TreeNode, targetSum: int) -> bool:
def helper(cur, total):
nonlocal targetSum
total += cur.val
if not cur.left and not cur.right:
return targetSum == total
if cur.left and helper(cur.left, total):
return True
if cur.right and helper(cur.right, total):
return True
return False
if not root:
return False
return helper(root, 0)
# This solution also works!:
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @param sum, an integer
# @return a boolean
# 1:27
def hasPathSum(self, root, sum):
if not root:
return False
if not root.left and not root.right and root.val == sum:
return True
sum -= root.val
return self.hasPathSum(root.left, sum) or self.hasPathSum(root.right, sum)
|
[
"akimi.mimi.yano@gmail.com"
] |
akimi.mimi.yano@gmail.com
|
e87acb9a972fbc841b375cd19b7a3397f02cb1d5
|
920bc59a07adc65569ae2d6736388519b43cfa23
|
/business_logic/blockly/build.py
|
9f732ce12acd96a8f9d521cb445158fd5990988a
|
[
"MIT"
] |
permissive
|
glafira-ivanova/django-business-logic
|
e924ccabac6b5219fd87dabe60c6e0ecfaa40303
|
7cc0d0475815082e75a16201daf9865d08d3f281
|
refs/heads/master
| 2021-01-11T05:35:35.193191
| 2016-10-24T12:59:04
| 2016-10-24T12:59:04
| 71,771,078
| 0
| 0
| null | 2016-10-24T09:03:42
| 2016-10-24T09:03:42
| null |
UTF-8
|
Python
| false
| false
| 5,305
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import inspect
from lxml import etree
from django.db.models import Model
from ..models import *
from .data import OPERATOR_TABLE
from .exceptions import BlocklyXmlBuilderException
def camel_case_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class BlocklyXmlBuilder(NodeCacheHolder):
def build(self, tree_root):
xml = etree.Element('xml')
self.visit(tree_root, parent_xml=xml)
return etree.tostring(xml, pretty_print=True).decode('utf-8')
def visit(self, node, parent_xml):
content_object = node.content_object
if content_object is None:
last_xml = None
for child in self.get_children(node):
if last_xml is not None:
next = etree.Element('next')
last_xml.append(next)
parent_xml = next
last_xml = self.visit(child, parent_xml)
return
for cls in inspect.getmro(content_object.__class__):
if cls == Model:
break
method_name = 'visit_{}'.format(camel_case_to_snake_case(cls.__name__))
method = getattr(self, method_name, None)
if not method:
continue
node_xml = method(node, parent_xml)
if not getattr(method, 'process_children', None):
for child in self.get_children(node):
self.visit(child, parent_xml)
return node_xml
def visit_constant(self, node, parent_xml):
block_type = {
NumberConstant: 'math_number',
StringConstant: 'text',
BooleanConstant: 'logic_boolean',
}
field_name = {
NumberConstant: 'NUM',
StringConstant: 'TEXT',
BooleanConstant: 'BOOL',
}
content_object = node.content_object
cls = content_object.__class__
block = etree.SubElement(parent_xml, 'block', type=block_type[cls])
field = etree.SubElement(block, 'field', name=field_name[cls])
if isinstance(content_object, BooleanConstant):
field.text = str(content_object).upper()
else:
field.text = str(content_object)
return block
def visit_variable(self, node, parent_xml):
variables_get_block = etree.SubElement(parent_xml, 'block', type='variables_get')
self._visit_variable(node, variables_get_block)
def visit_assignment(self, node, parent_xml):
lhs_node, rhs_node = self.get_children(node)
variables_set = etree.SubElement(parent_xml, 'block', type='variables_set')
self._visit_variable(lhs_node, variables_set)
value = etree.SubElement(variables_set, 'value', name='VALUE')
self.visit(rhs_node, value)
return variables_set
visit_assignment.process_children = True
def _visit_variable(self, node, parent_xml):
variable = node.content_object
field = etree.SubElement(parent_xml, 'field', name='VAR')
field.text = variable.definition.name
def visit_binary_operator(self, node, parent_xml):
# determine block_type
operator = node.content_object.operator
block_type = None
table = None
for block_type, table in OPERATOR_TABLE.items():
if operator in table:
break
else:
raise BlocklyXmlBuilderException('Invalid Operator: {}'.format(operator))
block = etree.SubElement(parent_xml, 'block', type=block_type)
field = etree.SubElement(block, 'field', name='OP')
field.text = table[operator]
lhs_node, rhs_node = self.get_children(node)
for value_name, child_node in (('A', lhs_node), ('B', rhs_node)):
value = etree.SubElement(block, 'value', name=value_name)
self.visit(child_node, value)
return block
visit_binary_operator.process_children = True
def visit_if_statement(self, node, parent_xml):
children = self.get_children(node)
block = etree.SubElement(parent_xml, 'block', type='controls_if')
if len(children) > 2:
mutation = etree.SubElement(block, 'mutation')
if len(children) % 2:
mutation.set('else', '1')
elifs = (len(children) - 2 - len(children) % 2) / 2
if elifs:
mutation.set('elseif', str(int(elifs)))
for i, pair in enumerate(pairs(children)):
# last "else" branch
if len(pair) == 1:
statement = etree.SubElement(block, 'statement', name='ELSE')
self.visit(pair[0], statement)
break
if_condition = pair[0]
if_value = etree.SubElement(block, 'value', name='IF{}'.format(i))
self.visit(if_condition, if_value)
statement = etree.SubElement(block, 'statement', name='DO{}'.format(i))
self.visit(pair[1], statement)
visit_if_statement.process_children = True
def tree_to_blockly_xml(tree_root):
return BlocklyXmlBuilder().build(tree_root)
def blockly_xml_to_tree(xml):
pass
|
[
"dgk@dgk.su"
] |
dgk@dgk.su
|
1549dbbffe60bde02cbe4a4dc8ded721bb9ac421
|
f56346f16477de58c5483ddbab63d3bff15801c6
|
/python_source/graph-tool/example2.py
|
a7571e1e5d9faf83e8928fe282b25427d72ff25a
|
[] |
no_license
|
jerryhan88/py_source
|
ca6afb6582777a444a19e33c832b638fc9e2fd52
|
e1500b1d2d4fa5f30e278422c5b1afa1d777f57f
|
refs/heads/master
| 2020-04-06T13:12:34.814275
| 2016-10-06T09:30:50
| 2016-10-06T09:30:50
| 40,874,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,517
|
py
|
#! /usr/bin/env python
# We will need some things from several places
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info < (3,):
range = xrange
import os
from pylab import * # for plotting
from numpy.random import * # for random sampling
seed(42)
# We need to import the graph_tool module itself
from graph_tool.all import *
def price_network():
# let's construct a Price network (the one that existed before Barabasi). It is
# a directed network, with preferential attachment. The algorithm below is
# very naive, and a bit slow, but quite simple.
# We start with an empty, directed graph
g = Graph()
# We want also to keep the age information for each vertex and edge. For that
# let's create some property maps
v_age = g.new_vertex_property("int")
e_age = g.new_edge_property("int")
# The final size of the network
N = 100
# We have to start with one vertex
v = g.add_vertex()
v_age[v] = 0
# we will keep a list of the vertices. The number of times a vertex is in this
# list will give the probability of it being selected.
vlist = [v]
# let's now add the new edges and vertices
for i in range(1, N):
# create our new vertex
v = g.add_vertex()
v_age[v] = i
# we need to sample a new vertex to be the target, based on its in-degree +
# 1. For that, we simply randomly sample it from vlist.
i = randint(0, len(vlist))
target = vlist[i]
# add edge
e = g.add_edge(v, target)
e_age[e] = i
# put v and target in the list
vlist.append(target)
vlist.append(v)
# now we have a graph!
# let's do a random walk on the graph and print the age of the vertices we find,
# just for fun.
v = g.vertex(randint(0, g.num_vertices()))
while True:
print("vertex:", int(v), "in-degree:", v.in_degree(), "out-degree:",
v.out_degree(), "age:", v_age[v])
if v.out_degree() == 0:
print("Nowhere else to go... We found the main hub!")
break
n_list = []
for w in v.out_neighbours():
n_list.append(w)
v = n_list[randint(0, len(n_list))]
# let's save our graph for posterity. We want to save the age properties as
# well... To do this, they must become "internal" properties:
g.vertex_properties["age"] = v_age
g.edge_properties["age"] = e_age
# now we can save it
g.save("price.xml.gz")
# Let's plot its in-degree distribution
in_hist = vertex_hist(g, "in")
y = in_hist[0]
err = sqrt(in_hist[0])
err[err >= y] = y[err >= y] - 1e-2
figure(figsize=(6,4))
errorbar(in_hist[1][:-1], in_hist[0], fmt="o", yerr=err,
label="in")
gca().set_yscale("log")
gca().set_xscale("log")
gca().set_ylim(1e-1, 1e5)
gca().set_xlim(0.8, 1e3)
subplots_adjust(left=0.2, bottom=0.2)
xlabel("$k_{in}$")
ylabel("$NP(k_{in})$")
tight_layout()
savefig("price-deg-dist.pdf")
savefig("price-deg-dist.png")
price_network()
g = load_graph("price.xml.gz")
age = g.vertex_properties["age"]
pos = sfdp_layout(g)
graph_draw(g, pos, output_size=(1000, 1000), vertex_color=[1,1,1,0],
vertex_fill_color=age, vertex_size=1, edge_pen_width=1.2,
vcmap=matplotlib.cm.gist_heat_r, output="price.png")
|
[
"jerryhan88@gmail.com"
] |
jerryhan88@gmail.com
|
6e248c7365a903010c866c0f556d026a124c56af
|
9c636aeed2fc0a591507fcf0a8a6124fae710c9b
|
/insertLL.py
|
3d8c20f19faad87ef2b54e0fea3e0ad01926eb92
|
[] |
no_license
|
ilkaynazli/challenges
|
4b2d1ac847b1761f98183457f8ea5bac6556eeff
|
f7c165fedbdc9811fb7f1d2a43c797f5b5ac5322
|
refs/heads/master
| 2020-04-07T01:03:18.625568
| 2019-04-25T19:40:22
| 2019-04-25T19:40:22
| 157,928,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
"""
Given a node from a cyclic linked list which is sorted in ascending order, write a function to insert a value into the list such that it remains a cyclic sorted list. The given node can be a reference to any single node in the list, and may not be necessarily the smallest value in the cyclic list.
If there are multiple suitable places for insertion, you may choose any place to insert the new value. After the insertion, the cyclic list should remain sorted.
If the list is empty (i.e., given node is null), you should create a new single cyclic list and return the reference to that single node. Otherwise, you should return the original given node.
"""
"""
# Definition for a Node.
class Node:
def __init__(self, val, next):
self.val = val
self.next = next
"""
class Solution:
def insert(self, head: 'Node', insertVal: 'int') -> 'Node':
new = Node(insertVal, None)
def insert_node(cur, new):
new.next = cur.next
cur.next = new
if head is None:
head = new
return head
current = head
while current:
if (current.val < new.val and current.next.val >= new.val):
insert_node(current, new)
break
if current.next.val < current.val:
if current.next.val >= new.val or current.val < new.val:
insert_node(current, new)
break
current = current.next
if current.next == head:
insert_node(current, new)
break
return head
|
[
"ilkayncelik@gmail.com"
] |
ilkayncelik@gmail.com
|
040da08f98ffd9b102de0d8c3fb12f826ce7f563
|
523e24bd96d7de004a13e34a58f5c2d79c8222e0
|
/plugin.program.indigo/maintool.py
|
7517a338760875f088560711b832ad5ef1cff331
|
[] |
no_license
|
Bonitillo/Bonitillonew
|
ec281e5ab9d4fec83d88936e8d8ce32bad6a81c9
|
a8099e326dda297f66096480ec93def8a8c124a8
|
refs/heads/master
| 2022-10-13T05:39:01.126653
| 2017-03-21T16:47:23
| 2017-03-21T16:47:23
| 85,725,652
| 2
| 4
| null | 2022-09-30T21:18:58
| 2017-03-21T16:16:33
|
Python
|
UTF-8
|
Python
| false
| false
| 7,349
|
py
|
from urllib2 import Request, urlopen
import urllib2,urllib,re,os, shutil
import sys
import time,datetime
import xbmcplugin,xbmcgui,xbmc, xbmcaddon, downloader, extract, time
from libs import kodi
from libs import viewsetter
addon_id=kodi.addon_id
addon = (addon_id, sys.argv)
artwork = xbmc.translatePath(os.path.join('special://home','addons',addon_id,'art/'))
fanart = artwork+'fanart.jpg'
messages = xbmc.translatePath(os.path.join('special://home','addons',addon_id,'resources','messages/'))
execute = xbmc.executebuiltin
AddonTitle = 'Indigo'
########PATHS###############################################
addonPath=xbmcaddon.Addon(id=addon_id).getAddonInfo('path')
addonPath=xbmc.translatePath(addonPath)
xbmcPath=os.path.join(addonPath,"..","..")
KodiPath=os.path.abspath(xbmcPath)
############################################################
def tool_menu():
kodi.addItem("Clear Cache",'','clearcache',artwork+'clear_cache.png',description="Clear your device cache!")
kodi.addItem("Purge Packages",'','purgepackages',artwork+'purge_packages.png',description="Erase old addon update files!")
kodi.addItem("Wipe Addons",'','wipeaddons',artwork+'wipe_addons.png',description="Erase all your Kodi addons in one shot!")
kodi.addDir("Install Custom Keymaps",'','customkeys',artwork+'custom_keymaps.png',description="Get the best experience out of your device-specific remote control!")
if kodi.get_setting ('automain') == 'true':
kodi.addItem("Disable Auto Maintenance ",'','disablemain',artwork+'disable_AM.png',description="Disable the periodic automated erasing of cache and packages!")
if kodi.get_setting ('automain') == 'false':
kodi.addItem("Enable Auto Maintenance ",'','enablemain',artwork+'enable_AM.png',description="Enable the periodic automated erasing of cache and packages!")
if kodi.get_setting ('scriptblock') == 'true':
kodi.addItem("Disable Malicious Scripts Blocker",'','disableblocker',artwork+'disable_MSB.png',description="Disable protection against malicious scripts!")
if kodi.get_setting ('scriptblock') == 'false':
kodi.addItem("Enable Malicious Scripts Blocker",'','enableblocker',artwork+'enable_MSB.png',description="Enable protection against malicious scripts!")
viewsetter.set_view("sets")
################################
### Clear Cache ###
################################
def clear_cache():
kodi.log('CLEAR CACHE ACTIVATED')
xbmc_cache_path = os.path.join(xbmc.translatePath('special://home'), 'cache')
confirm=xbmcgui.Dialog().yesno("Please Confirm"," Please confirm that you wish to clear "," your Kodi application cache!"," ","Cancel","Clear")
if confirm:
if os.path.exists(xbmc_cache_path)==True:
for root, dirs, files in os.walk(xbmc_cache_path):
file_count = 0
file_count += len(files)
if file_count > 0:
for f in files:
try:
os.unlink(os.path.join(root, f))
except:
pass
for d in dirs:
try:
shutil.rmtree(os.path.join(root, d))
except:
pass
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, " Cache Cleared Successfully!")
xbmc.executebuiltin("Container.Refresh()")
################################
### End Clear Cache ###
################################
def purge_packages():
kodi.log('PURGE PACKAGES ACTIVATED')
packages_path = xbmc.translatePath(os.path.join('special://home/addons/packages', ''))
confirm=xbmcgui.Dialog().yesno("Please Confirm"," Please confirm that you wish to delete "," your old addon installation packages!"," ","Cancel","Delete")
if confirm:
try:
for root, dirs, files in os.walk(packages_path,topdown=False):
for name in files :
os.remove(os.path.join(root,name))
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, " Packages Folder Wiped Successfully!")
xbmc.executebuiltin("Container.Refresh()")
except:
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, "Error Deleting Packages please visit TVADDONS.AG forums")
def wipe_addons():
kodi.logInfo('WIPE ADDONS ACTIVATED')
confirm=xbmcgui.Dialog().yesno("Please Confirm"," Please confirm that you wish to uninstall "," all addons from your device!"," ","Cancel","Uninstall")
if confirm:
addonPath=xbmcaddon.Addon(id=addon_id).getAddonInfo('path')
addonPath=xbmc.translatePath(addonPath)
xbmcPath=os.path.join(addonPath,"..","..")
xbmcPath=os.path.abspath(xbmcPath);
addonpath = xbmcPath+'/addons/'
mediapath = xbmcPath+'/media/'
systempath = xbmcPath+'/system/'
userdatapath = xbmcPath+'/userdata/'
packagepath = xbmcPath+ '/addons/packages/'
try:
for root, dirs, files in os.walk(addonpath,topdown=False):
print root
if root != addonpath :
if 'plugin.program.indigo' not in root:
if 'metadata.album.universal' not in root:
if 'metadata.artists.universal' not in root:
if 'metadata.common.musicbrainz.org' not in root:
if 'service.xbmc.versioncheck' not in root:
shutil.rmtree(root)
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, "Addons Wiped Successfully! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
except:
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, "Error Wiping Addons please visit TVADDONS.AG forums")
def disable_main():
#kodi.log('DISABLE MAIN TOOL')
confirm=xbmcgui.Dialog();
if confirm.yesno('Automatic Maintenance ',"Please confirm that you wish to TURN OFF automatic maintenance! "," "):
kodi.log ("Disabled AUTOMAIN")
kodi.set_setting('automain','false')
dialog = xbmcgui.Dialog()
dialog.ok("Automatic Maintenance", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return
def enable_main():
#kodi.log('ENABLE MAIN TOOL')
confirm=xbmcgui.Dialog();
if confirm.yesno('Automatic Maintenance ',"Please confirm that you wish to TURN ON automatic maintenance! "," "):
kodi.log ("enabled AUTOMAIN")
kodi.set_setting('automain','true')
dialog = xbmcgui.Dialog()
dialog.ok("Automatic Maintenance", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return
def disable_blocker():
#kodi.log('DISABLE BLOCKER')
confirm=xbmcgui.Dialog();
if confirm.yesno('Malicious Script Blocker',"Please confirm that you wish to TURN OFF Malicious Script Blocker! "," "):
kodi.log ("Disable Script Block")
kodi.set_setting('scriptblock','false')
dialog = xbmcgui.Dialog()
dialog.ok("Script Blocker", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return
def enable_blocker():
#kodi.log('ENABLE BLOCKER')
confirm=xbmcgui.Dialog();
if confirm.yesno('Malicious Script Blocker',"Please confirm that you wish to TURN ON Malicious Script Blocker! "," "):
kodi.log ("Enable Script Block")
kodi.set_setting('scriptblock','true')
dialog = xbmcgui.Dialog()
dialog.ok("Script Blocker", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return
|
[
"richellizardo@Djs-MacBook-Pro.local"
] |
richellizardo@Djs-MacBook-Pro.local
|
3eb53c7799362cdc6c41647804734c03d62b2e4e
|
a3e52fbdfc81da3d17fee3d11b4451b330bfd592
|
/JudgeOnline/solution/hrank/algorithm/graph/shrotestReach.py
|
69b1ab517d2a6ef79f88316198cd25092699b26d
|
[] |
no_license
|
chrislucas/python
|
79633915dd0aa8724ae3dfc5a3a32053f7a4f1e0
|
d3cca374f87e134a7ddfc327a6daea983875ecac
|
refs/heads/master
| 2021-01-17T04:08:25.056580
| 2016-12-26T11:41:31
| 2016-12-26T11:41:31
| 42,319,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
'''
Created on 11 de dez de 2016
@author: C.Lucas
https://www.hackerrank.com/challenges/bfsshortreach
'''
if __name__ == '__main__':
pass
|
[
"christoffer.luccas@gmail.com"
] |
christoffer.luccas@gmail.com
|
0d5a4132c1a3c779a764137edb3a3e33431d8662
|
fa89836a6759151896a07650747462b8cda40610
|
/mse/about/migrations/0010_event_ordinal.py
|
60b846d5a892fc107b9a4bef92acd71c0bed9132
|
[] |
no_license
|
DigitalGizmo/mse21
|
334813bfebec9b78f0541744e54f218f9cc6936b
|
89f1c0f9c05cefaaa8c703732ee4e4642aecd3c9
|
refs/heads/master
| 2023-07-09T13:29:13.903900
| 2018-03-26T19:26:09
| 2018-03-26T19:26:09
| 126,878,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('about', '0009_single'),
]
operations = [
migrations.AddField(
model_name='event',
name='ordinal',
field=models.IntegerField(verbose_name='Order in Menu', default=99),
),
]
|
[
"donpublic@digitalgizmo.com"
] |
donpublic@digitalgizmo.com
|
e3205ca78ec9c5c4154d6e2bc096e8713b5deffc
|
78883afed6f95bc0aae9f48e9d20a4a7c77adb32
|
/plugins/secpicam480.py
|
d9c6855043be61e4c9b27797e8255abed9640c19
|
[] |
no_license
|
xe1gyq/speed-camera
|
f7da04162afaece15033971e23692f5f24a715ed
|
71306c058235bf1a7fb00c484c9d34f4ac0fefae
|
refs/heads/master
| 2021-03-30T21:18:50.236194
| 2018-02-26T20:07:13
| 2018-02-26T20:07:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
# ---------------- User Configuration Settings for speed-cam.py ---------------------------------
# Ver 8.4 speed-cam.py picam480 Stream Variable Configuration Settings
#######################################
# speed-cam.py plugin settings
#######################################
# Calibration Settings
# --------------------
calibrate = False # Create a calibration image file with calibration hash markers 10 px per mark
# Crop Area for motion detection Tracking
# ---------------------------------------
x_left = 150 # Default= 150 Exclude event if x less than this px position
x_right = 490 # Default= 490 Exclude event if x greater than this px position
y_upper = 140 # Default= 140 Exclude event if y less that this value
y_lower = 340 # Default= 340 Exclude event if y greater than this value
# Motion Event Settings
# ---------------------
SPEED_MPH = False # Set the speed conversion kph=False mph=True
MIN_AREA = 200 # Default= 200 Exclude all contours less than or equal to this sq-px Area
track_len_trig = 75 # Default= 75 Length of track to trigger speed photo
x_diff_max = 18 # Default= 18 Exclude if max px away >= last motion event x pos
x_diff_min = 1 # Default= 1 Exclude if min px away <= last event x pos
track_timeout = 0.0 # Default= 0.0 Optional seconds to wait after track End (Avoid dual tracking)
event_timeout = 0.3 # Default= 0.3 seconds to wait for next motion event before starting new track
log_data_to_CSV = False # Default= False True = Save log data as CSV comma separated values
# Camera Settings
# ---------------
WEBCAM = False # Default= False False=PiCamera True=USB WebCamera
# Pi Camera Settings
# ------------------
CAMERA_WIDTH = 640 # Default= 640 Image stream width for opencv motion scanning default=320
CAMERA_HEIGHT = 480 # Default= 480 Image stream height for opencv motion scanning default=240
CAMERA_FRAMERATE = 20 # Default = 30 Frame rate for video stream V2 picam can be higher
# Camera Image Settings
# ---------------------
image_path = "media/security" # folder name to store images
image_prefix = "scam-" # image name prefix security camera
image_show_motion_area = False # True= Display motion detection rectangle area on saved images
image_filename_speed = False # True= Prefix filename with speed value
image_text_on = False # True= Show Text on speed images False= No Text on images
image_bigger = 1.5 # Default= 1.5 Resize saved speed image by value
image_font_size = 18 # Default= 18 Font text height in px for text on images
imageRecentMax = 10 # 0=off Maintain specified number of most recent files in motionRecentDir
imageRecentDir = "media/recent/security" # default= "media/recent" save recent files directory path
# Optional Manage SubDir Creation by time, number of files or both
# ----------------------------------------------------------------
imageSubDirMaxHours = 0 # 0=off or specify MaxHours - Creates New dated sub-folder if MaxHours exceeded
imageSubDirMaxFiles = 0 # 0=off or specify MaxFiles - Creates New dated sub-folder if MaxFiles exceeded
# ---------------------------------------------- End of User Variables -----------------------------------------------------
|
[
"pageauc@gmail.com"
] |
pageauc@gmail.com
|
f94f70300297d6540a203b03e0a808f40fb78e99
|
3cedc7c1519d3b013aad9ec4e6a6ee7834da7589
|
/selenium_code/z_practise/001/sa2.py
|
f978ca4ea1b40e7eda5051133e473ae0a9999596
|
[] |
no_license
|
hzrg/songqin_course
|
53437100669ee93d2ac5ecae5de938b1a4007d7f
|
05e422ce34a42fd6d3819722a19252f8005e79ed
|
refs/heads/master
| 2022-02-09T13:27:59.871400
| 2019-06-13T06:08:45
| 2019-06-13T06:08:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
# coding:utf8
from selenium import webdriver
# 导入浏览器驱动路径
executable_path = r"d:\tools\webdrivers\chromedriver.exe"
# 指定是chrome 的驱动
# 执行到这里的时候Selenium会去到指定的路径将chrome driver 程序运行起来
driver = webdriver.Chrome(executable_path)
# ------------------------
driver.get('http://www.weather.com.cn/html/province/jiangsu.shtml')
# 分析html发现 温度信息在forecastID 子元素dl里面
info = driver.find_element_by_id("forecastID")
# 再从 forecastID 元素获取所有子元素dl
dls = info.find_elements_by_tag_name('dl')
# 将城市和气温信息保存到列表citys中
citys = []
for dl in dls:
# print dl.get_attribute('innerHTML')
name = dl.find_element_by_tag_name('dt').text
# 最高最低气温位置会变,根据位置决定是span还是b
ltemp = dl.find_element_by_tag_name('b').text
ltemp = int(ltemp.replace(u'℃',''))
print(name, ltemp)
citys.append([name, ltemp])
lowest = 100
lowestCitys = [] # 温度最低城市列表
for one in citys:
curcity = one[0]
ltemp = one[1]
curlowweather = ltemp
# 发现气温更低的城市
if curlowweather<lowest:
lowest = curlowweather
lowestCitys = [curcity]
# 温度和当前最低相同,加入列表
elif curlowweather ==lowest:
lowestCitys.append(curcity)
print('温度最低为%s, 城市有%s' % (lowest, ','.join(lowestCitys)))
# ------------------------
driver.quit()
|
[
"1174497735@qq.com"
] |
1174497735@qq.com
|
eb1d56fce359772a0815850648aed190af310eb2
|
7c61922c2de52ea684a39a002355eff6551bf930
|
/getcount.py
|
33de2b911f0d1b8b0652e5e8f9650e44c86dcae2
|
[] |
no_license
|
DongDong-123/codewars
|
ac3e6b5d5dab78ef60140ac87b9c02cc8dba646c
|
723750fed649ea763a2363604dd6dea3359216a8
|
refs/heads/master
| 2020-03-21T15:48:19.316417
| 2019-01-04T14:44:54
| 2019-01-04T14:44:54
| 138,733,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
def getCount(inputStr):
#num_vowels = 0
inputStr = inputStr.lower()
vowels = ['a','e','i','o','u']
b = []
for i in inputStr:
b.append(i)
for j in b and vowels:
num_vowels = inputStr.count(i)
return num_vowels
#inputStr = "abracadabra"
a = getCount("abracadabra")
print(a)
|
[
"zisehaiyang04@163.com"
] |
zisehaiyang04@163.com
|
fa79b596babef682f3b5914ffcc30d799205917c
|
726ce8dddbb12af1662e002633bfe538ddf77708
|
/PyOpenGL-2.0.2.01-py2.5-win32.egg/OpenGL/GL/SGIS/_multitexture.py
|
f56fd6209fbb3f8208e4edbfaed99bcb96da0c30
|
[] |
no_license
|
bopopescu/BCPy2000-1
|
f9264bb020ba734be0bcc8e8173d2746b0f17eeb
|
0f877075a846d17e7593222628e9fe49ab863039
|
refs/heads/master
| 2022-11-26T07:58:03.493727
| 2019-06-02T20:25:58
| 2019-06-02T20:25:58
| 282,195,357
| 0
| 0
| null | 2020-07-24T10:52:24
| 2020-07-24T10:52:24
| null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'_multitexture.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"vanessa_kamara@my.uri.edu"
] |
vanessa_kamara@my.uri.edu
|
b2ab8d47998ea4e78f6b52de16d4b8b57ba4020a
|
cd052f960846ea33e22abdded3106fb492f16c31
|
/爬虫项目/code11/Tencent/Tencent/middlewares.py
|
f313fa007a063eec3e1f4f43302226f7dbe1aa01
|
[] |
no_license
|
byst4nder/his_spider
|
2d96457b70894c36506e8061d8a3201ac337a5d0
|
a51e31acff41292e568ac22b0e213e6cb48218fa
|
refs/heads/master
| 2020-07-21T12:06:28.952083
| 2019-09-06T14:25:58
| 2019-09-06T14:25:58
| 206,857,595
| 1
| 0
| null | 2019-09-06T19:04:02
| 2019-09-06T19:04:02
| null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
#coding:utf-8
import random
import requests
from fake_useragent import UserAgent
from settings import USER_AGENT_LIST
class RandomUserAgentMiddleware(object):
def __init__(self):
self.ua_obj = UserAgent()
def process_request(self, request, spider):
#user_agent = random.choice(USER_AGENT_LIST)
user_agent = self.ua_obj.random
request.headers["User-Agent"] = user_agent
print('---' * 10)
print(request.headers)
# 在中间件里不需要写return操作
# return request
class RandomProxyMiddleware(object):
def __init__(self):
self.proxy_url = "http://kps.kdlapi.com/api/getkps/?orderid=914194268627142&num=1&pt=1&sep=1"
# 获取代理服务器里提供的proxy
self.proxy_list = [requests.get(self.proxy_url).content]
self.count = 0
def process_request(self, request, spider):
if self.count < 20:
proxy = random.choice(self.proxy_list)
#http://47.99.65.91:16818
# http://maozhaojun:ntkn0npx@47.99.65.91:16818
request.meta['proxy'] = "http://maozhaojun:ntkn0npx@" + proxy
self.count += 1
else:
self.proxy_list = [requests.get(self.proxy_url).content]
self.count = 0
|
[
"mac@macdeMacBook-Pro.local"
] |
mac@macdeMacBook-Pro.local
|
d795d34961b9c42afe0703c20a4e6eeb5855f39a
|
21b39d50e4df56ea01453001845d1580729af1df
|
/jdcloud_sdk/services/cdn/apis/SetDomainConfigRequest.py
|
1e000f49f2dff6150f0a5cf3e6fc819eb5b40be3
|
[
"Apache-2.0"
] |
permissive
|
Tanc009/jdcloud-sdk-python
|
ef46eac7731aa8a1839b1fc1efd93249b7a977f0
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
refs/heads/master
| 2021-08-09T14:49:16.177709
| 2021-06-25T02:38:41
| 2021-06-25T02:38:41
| 141,714,695
| 0
| 0
|
Apache-2.0
| 2018-07-20T13:21:17
| 2018-07-20T13:21:16
| null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class SetDomainConfigRequest(JDCloudRequest):
"""
更新域名配置
"""
def __init__(self, parameters, header=None, version="v1"):
super(SetDomainConfigRequest, self).__init__(
'/domain/{domain}/config', 'POST', header, version)
self.parameters = parameters
class SetDomainConfigParameters(object):
def __init__(self, domain, ):
"""
:param domain: 用户域名
"""
self.domain = domain
self.httpType = None
self.backSourceType = None
self.jumpType = None
self.jcdnTimeAnti = None
self.hdrCtrl = None
self.toutiaoHeader = None
def setHttpType(self, httpType):
"""
:param httpType: (Optional) http类型,只能为http或者https
"""
self.httpType = httpType
def setBackSourceType(self, backSourceType):
"""
:param backSourceType: (Optional) 回源类型
"""
self.backSourceType = backSourceType
def setJumpType(self, jumpType):
"""
:param jumpType: (Optional) 有三种类型:default、http、https
"""
self.jumpType = jumpType
def setJcdnTimeAnti(self, jcdnTimeAnti):
"""
:param jcdnTimeAnti: (Optional) dash鉴权相关配置
"""
self.jcdnTimeAnti = jcdnTimeAnti
def setHdrCtrl(self, hdrCtrl):
"""
:param hdrCtrl: (Optional) 回源鉴权相关配置
"""
self.hdrCtrl = hdrCtrl
def setToutiaoHeader(self, toutiaoHeader):
"""
:param toutiaoHeader: (Optional) 头条header配置
"""
self.toutiaoHeader = toutiaoHeader
|
[
"tancong@jd.com"
] |
tancong@jd.com
|
e962b54ec262cb0e8a2b1e534a1193f362ac6c0e
|
6e8d58340f2be5f00d55e2629052c0bbc9dcf390
|
/lib/galaxy/datatypes/converters/fastqsolexa_to_fasta_converter.py
|
1b68b3f6a2a340f24a5357a700a8e9995715fcc1
|
[
"CC-BY-2.5",
"MIT"
] |
permissive
|
JCVI-Cloud/galaxy-tools-prok
|
e57389750d33ac766e1658838cdb0aaf9a59c106
|
3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c
|
refs/heads/master
| 2021-05-02T06:23:05.414371
| 2014-03-21T18:12:43
| 2014-03-21T18:12:43
| 6,092,693
| 0
| 2
|
NOASSERTION
| 2020-07-25T20:38:17
| 2012-10-05T15:57:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
#!/usr/bin/env python
"""
convert fastqsolexa file to separated sequence and quality files.
assume each sequence and quality score are contained in one line
the order should be:
1st line: @title_of_seq
2nd line: nucleotides
3rd line: +title_of_qualityscore (might be skipped)
4th line: quality scores
(in three forms: a. digits, b. ASCII codes, the first char as the coding base, c. ASCII codes without the first char.)
Usage:
%python fastqsolexa_to_fasta_converter.py <your_fastqsolexa_filename> <output_seq_filename> <output_score_filename>
"""
import sys, os
from math import *
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( "%s" % msg )
sys.exit()
def __main__():
infile_name = sys.argv[1]
outfile = open( sys.argv[2], 'w' )
fastq_block_lines = 0
seq_title_startswith = ''
for i, line in enumerate( file( infile_name ) ):
line = line.rstrip() # eliminate trailing space and new line characters
if not line or line.startswith( '#' ):
continue
fastq_block_lines = ( fastq_block_lines + 1 ) % 4
line_startswith = line[0:1]
if fastq_block_lines == 1:
# line 1 is sequence title
if not seq_title_startswith:
seq_title_startswith = line_startswith
if seq_title_startswith != line_startswith:
stop_err( 'Invalid fastqsolexa format at line %d: %s.' %( i + 1, line ) )
read_title = line[ 1: ]
outfile.write( '>%s\n' % line[1:] )
elif fastq_block_lines == 2:
# line 2 is nucleotides
read_length = len( line )
outfile.write( '%s\n' % line )
else:
pass
outfile.close()
if __name__ == "__main__": __main__()
|
[
"root@ip-10-118-137-129.ec2.internal"
] |
root@ip-10-118-137-129.ec2.internal
|
d224d3604bd4bf178bcc2ccbd591c0f88336a58b
|
77d808f47101202db6cec5a9eee6b38c55f73fde
|
/24. Regular Expressions/04.py
|
62ae2b81b1544adb49f3011abd21606be8b3f9cb
|
[] |
no_license
|
dimDamyanov/Py-Fundamentals
|
2ce5591fbfebf8d95c832e3f7109b24e53dd721b
|
5ccae5bfa456829d97e8773ee9f5eaa5f5051765
|
refs/heads/main
| 2023-01-29T22:21:07.788061
| 2020-12-13T08:11:04
| 2020-12-13T08:11:04
| 317,682,227
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
import re
data = input()
numbers = [n.group(0) for n in re.finditer(r'(^|(?<=\s))-*\d+(\.\d+)*($|(?=\s))', data)]
print(*numbers, sep=' ')
|
[
"dim.damianov@gmail.com"
] |
dim.damianov@gmail.com
|
138ad6816981ced62f71bd3859116d1fa7ecfa16
|
e8d34c096f9df7f22ff5ccee34cf9f6e6a0adab4
|
/flask_login/test_gpios.py
|
3de4b6984d75c2f38c64ff8539dbc50799074af9
|
[] |
no_license
|
MarianoDel/coralpreto_py
|
50fed2bd4032d4e3adc29c06de4b096ee1b3833a
|
06bbe3f814fdbf80ae58b1ba6a53d0e96f0ec566
|
refs/heads/master
| 2023-03-07T14:19:19.074639
| 2022-03-25T17:34:38
| 2022-03-25T17:34:38
| 238,445,438
| 0
| 0
| null | 2023-03-05T06:04:33
| 2020-02-05T12:30:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
# -*- coding: utf-8 -*-
#usar python3
import time
RUNNING_ON_RASP = 0
if RUNNING_ON_RASP:
from gpios import *
GpiosInit()
def TestBlue():
print ("start blinking blue led for 10 secs")
if RUNNING_ON_RASP:
LedBlueToggleContinous('start')
time.sleep(10)
print ("ending toggling")
if RUNNING_ON_RASP:
LedBlueToggleContinous('stop')
print ("test ended!")
def TestChannel ():
channel = ['09', '12', '14', '71', '72', '74', '77', '81']
for i in range(len(channel)):
print ("memory: " + str(i) + " test channel: " + channel[i])
if RUNNING_ON_RASP:
Channel_to_Memory(channel)
time.sleep(5)
print ("test ended!")
def TestPtt():
print ("PTT on for 5 secs")
if RUNNING_ON_RASP:
PttOn()
time.sleep(5)
if RUNNING_ON_RASP:
PttOff()
print ("Ptt off")
print ("test ended!")
def TestEncendido():
print ("Encendido on for 5 secs")
if RUNNING_ON_RASP:
OnOff_On()
time.sleep(5)
if RUNNING_ON_RASP:
OnOff_Off()
print ("Encendido off")
print ("test ended!")
def InitialValues ():
LedBlueOff()
PttOff()
OnOff_Off()
Bit0Off()
Bit1Off()
Bit2Off()
##############
# Main Tests #
##############
InitialValues()
TestBlue()
TestChannel()
TestPtt()
TestEncendido()
GpiosCleanUp()
|
[
"marianodeleu@yahoo.com.ar"
] |
marianodeleu@yahoo.com.ar
|
fe5008878edb08f5883649ab0765b19fdb4de0ce
|
3b944f1714c458c5d6d0e84d4b1498f2b59c4ef7
|
/581. Shortest Unsorted Continuous Subarray.py
|
3fa7d45d3112e56a100aa8150f35c38a0d623fae
|
[] |
no_license
|
shiannn/LeetCodePython
|
e4d66f108200d8329616b3e45b70c3f8fc4cd9ed
|
6e4472d41904e60ff9d70b5f3979c5dcae98c838
|
refs/heads/master
| 2021-06-26T03:24:03.079077
| 2021-02-24T16:54:18
| 2021-02-24T16:54:18
| 213,206,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
class Solution:
def findUnsortedSubarray(self, nums) -> int:
end = -2
start = -1
max_ = -float('inf')
for idx, num in enumerate(nums):
max_ = max(max_, num)
if max_ != num:
end = idx
min_ = float('inf')
for idx, num in reversed(list(enumerate(nums))):
#print(idx, num)
min_ = min(min_, num)
if min_ != num:
start = idx
#print(start, end)
return end - start + 1
if __name__ == '__main__':
sol = Solution()
nums = [2,6,4,8,10,9,15]
ret = sol.findUnsortedSubarray(nums)
print(ret)
|
[
"b05502087@ntu.edu.tw"
] |
b05502087@ntu.edu.tw
|
4911d82b51dc9ec4b68a07e2dc8f0b5229a842e6
|
099f8740e61878c92c067e96d76ccb014cd342c3
|
/robovat/simulation/__init__.py
|
718ea92343dbbbccf8f49643d7c02676671f222b
|
[
"MIT"
] |
permissive
|
UT-Austin-RPL/robovat
|
c52d7f0b5b4244ad19fc7c15c876e005626bf182
|
c333ce7f1d7b156bedf28c3b09793f5487b6690a
|
refs/heads/master
| 2023-01-06T12:32:39.304293
| 2020-11-12T20:12:25
| 2020-11-12T20:12:25
| 290,521,446
| 7
| 2
|
MIT
| 2020-08-28T17:33:52
| 2020-08-26T14:37:23
| null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
from robovat.simulation.base import Base
from robovat.simulation.body import Body
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.entity import Entity
from robovat.simulation.joint import Joint
from robovat.simulation.link import Link
from robovat.simulation.simulator import Simulator
|
[
"kuanfang@outlook.com"
] |
kuanfang@outlook.com
|
a3dafb3d4576186964f7d3265b17eb05cf0d5f78
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/359/usersdata/282/109815/submittedfiles/lecker.py
|
ac07eef3ffae2806101fd83deb886992b12a8634
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# -*- coding: utf-8 -*-
c=int(input('Digite o número de consultas: '))
pedidos=[]
fabricados=[]
for i in range (0,c,1):
pedidos.append(int(input('Digite o tamanho do taco: ')))
for i in range(0,c,1):
if pedidos[1] not in fabricados:
fabricados.append(pedidos[i])
fabricados.append(pedidos[i])
print(len(fabricados))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1e5a9be74f78ccb91bf9ebd626783bf8123fcbaf
|
8e2e28a191fa5ec5a6c070ec7e9ccad98c8b4a0b
|
/jiaocheng/02-python核心编程/05-getattribute属性.py
|
0751dbd47c6556fed35b260ab822029c2dbcc613
|
[
"Apache-2.0"
] |
permissive
|
kellanfan/python
|
4cd61cbc062e2eee3a900fa7447ca5f0b8f1a999
|
912dc05a3bd0ded9544166a68da23ca0a97b84da
|
refs/heads/master
| 2023-04-06T03:04:38.851928
| 2023-04-01T02:45:56
| 2023-04-01T02:45:56
| 65,542,280
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
class Itcast(object):
def __init__(self,subject1):
self.subject1 = subject1
self.subject2 = 'cpp'
#属性访问时拦截器,打log
def __getattribute__(self,obj):
print("====1>%s"%obj)
if obj == 'subject1':
print('log subject1')
return 'redirect python'
else: #测试时注释掉这2行,将找不到subject2
temp = object.__getattribute__(self,obj)
print("====2>%s"%str(temp))
# return temp
def show(self):
print('this is Itcast')
s = Itcast("python")
print(s.subject1)
print(s.subject2)
s.show()
#1. 先获取show属性对应的结果,,,,应该是一个方法
#2. 方法()
#就是说对象中,不管是属性还是方法都是引用,如果是方法,只是使用一个变量指向了一个函数
# import types
# p1.eat = types.MethodType(eat, p1)
|
[
"icyfk1989@163.com"
] |
icyfk1989@163.com
|
5117dc2fd127111959aeb4c16a0827934522c3b0
|
9835b6949fe4c8018de57aee531dedf1509337cc
|
/September_2020/sep_11_Maximun_Product_Subarray.py
|
3c6c6472a9a78f7529c7993b6863e42fdb1b0150
|
[] |
no_license
|
jcai0o0/My_Leetcode_Solutions
|
f6edea0693d252a99e6507a1724a89763113f8a0
|
3fc909c01c6a345f625c9ab9e0f1584ea5fa8ab4
|
refs/heads/master
| 2023-01-01T04:08:33.929184
| 2020-10-17T02:01:56
| 2020-10-17T02:01:56
| 289,094,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
class Solution:
def maxProduct(self, nums: List[int]) -> int:
if not nums:
return 0
N = len(nums)
f = [0] * N
g = [0] * N
f[0] = g[0] = res = nums[0]
for i in range(1, N):
f[i] = max(f[i-1] * nums[i], nums[i], g[i-1] * nums[i])
g[i] = min(f[i-1] * nums[i], nums[i], g[i-1] * nums[i])
res = max(res, f[i])
return res
|
[
"44845593+jcai0o0@users.noreply.github.com"
] |
44845593+jcai0o0@users.noreply.github.com
|
d4ef79f1d42135b241425cfb23eada729d85805d
|
420f974d85376031e66bb7241caedee1675b93ec
|
/init.py
|
a071836a49381b59b0ae48ee879ae0dacc8fbade
|
[] |
no_license
|
uiandwe/chatting
|
060c8b513ecd53db9519c97f99198c09cc918e0a
|
e8430cf4db173d44ee37601b96a8028271000cd1
|
refs/heads/master
| 2020-04-01T23:33:02.324646
| 2016-06-29T02:26:53
| 2016-06-29T02:26:53
| 62,188,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
__author__ = 'hyeonsj'
# db
host = '127.0.0.1'
user = 'root'
passwd = 'spoqa'
db = 'spoqa'
charset = 'utf8'
# logging level
# debug 10
# warning 30
# error 40
log_level = 10
|
[
"uiandwe@gmail.com"
] |
uiandwe@gmail.com
|
7b13f2453af39f2d8ce8980fb548903267988fb9
|
e47d5da2a947c3b3a834817d0b084ee65d302067
|
/atcoder.jp/aising2020/aising2020_b/Main.py
|
066248010306017828be4a1ada26949f6befc4c7
|
[] |
no_license
|
aki-nlp/AtCoder
|
3293b9b183c0a8cefbf20d7f4f491c6f1e7604b8
|
9385805cbb1fa158f6d3c4a2415cdf7ba94547e5
|
refs/heads/master
| 2023-02-25T06:04:10.913237
| 2020-10-03T12:02:00
| 2020-10-03T12:02:00
| 296,792,313
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
def main():
n = int(input())
a = list(map(int, input().split()))
a = a[::2]
ans = 0
for aa in a:
if aa%2 == 1:
ans += 1
print(ans)
if __name__ == '__main__':
main()
|
[
"akiuo.ou@gmail.com"
] |
akiuo.ou@gmail.com
|
5f0e5ecf4312f6a94fb5df3eca0368782d2e1f45
|
69889d51e933b4e8a1d4c8397a317aa1d1365a5a
|
/String/KMP/13506.py
|
ace84b813e5a35c288b459bc91aa9047c3fb07b6
|
[] |
no_license
|
ddraa/Algorithm
|
a35c87631420ceccec6f7094da6f2b22ddb66c8c
|
a97c6628d5389f7f93603a2e95ac3b569057f556
|
refs/heads/master
| 2023-06-25T17:12:39.925821
| 2021-07-18T05:53:28
| 2021-07-18T05:53:28
| 279,240,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
import sys
def LIS(P):
lp = len(P)
Table = [0] * lp
i = 0
for j in range(1, lp):
while i > 0 and P[i] != P[j]:
i = Table[i - 1]
if P[i] == P[j]:
i += 1
Table[j] = i
return Table
print(LIS("papapapap"))
|
[
"ruuddyd@gmail.com"
] |
ruuddyd@gmail.com
|
ba1a284531e5e1f2b4e492eca0027f9a3e9bc9b6
|
102a33464fd3a16ceedd134e9c64fea554ca5273
|
/apps/shop/forms.py
|
22014c7b482f0b94dbeda97e4c41e71fdb9827e3
|
[] |
no_license
|
pythonguru101/django-ecommerce
|
b688bbe2b1a53c906aa80f86f764cf9787e6c2fe
|
f94de9c21223716db5ffcb86ba87219da88d2ff4
|
refs/heads/master
| 2020-07-24T14:57:02.047702
| 2020-06-10T06:06:23
| 2020-06-10T06:06:23
| 207,961,132
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,754
|
py
|
import re
from django import forms
from django.utils.translation import ugettext as _
from markdownx.widgets import MarkdownxWidget
from apps.shop.models import Product, ShippingType, Category
from .plugshop.forms import OrderForm as PlugshopOrderForm
class CategoryAdminForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
widgets = {
'short_description': MarkdownxWidget(),
'description': MarkdownxWidget(),
}
class ProductAdminForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
widgets = {
'short_description': MarkdownxWidget(),
'description': MarkdownxWidget(),
}
class OrderForm(PlugshopOrderForm):
shipping_type = forms.ModelChoiceField(empty_label=None,
queryset=ShippingType.objects.filter(is_active=True))
name = forms.CharField(required=True, error_messages={
'required': _(u'Укажите имя')
})
email = forms.EmailField(required=True, error_messages={
'required': _(u'Укажите email')
})
phone = forms.CharField(required=True, error_messages={
'required': _(u'Укажите телефон')
})
def __require(self, name, error):
value = self.cleaned_data.get(name, None)
if len(value) == 0:
self.errors[name] = [error]
def clean_name(self):
name = self.cleaned_data.get('name').strip().split()
shipping_type = self.cleaned_data.get('shipping_type')
if shipping_type.require_zip_code and len(name) < 3:
raise forms.ValidationError(_(u'Введите фамилию имя и отчество'))
if len(name):
self.cleaned_data['last_name'] = name[0]
self.cleaned_data['first_name'] = " ".join(name[1:])
else:
raise forms.ValidationError(_(u'Введите имя'))
return " ".join(name)
def clean(self):
cleaned_data = self.cleaned_data
shipping_type = cleaned_data.get('shipping_type')
if shipping_type:
if shipping_type.require_address:
self.__require('address', _(u'Не указан адрес доставки'))
if shipping_type.require_zip_code:
self.__require('zip_code', _(u'Не указан индекс'))
self.__require('city', _(u'Не указан город'))
zip_code = self.cleaned_data.get('zip_code', None)
if re.search(r'^\d{6}$', zip_code) is None:
self.errors['zip_code'] = [_(u'Индекс состоит из 6 цифр')]
return cleaned_data
|
[
"pythonguru101@gmail.com"
] |
pythonguru101@gmail.com
|
72ad00e39cc8e6c09b50e778412f8d9d2094a9e5
|
3996539eae965e8e3cf9bd194123989741825525
|
/EventFilter/Utilities/rawStreamFileWriterForBU_cfi.py
|
55b0b4128380e1fd75980e1887abc4c5ada3b947
|
[] |
no_license
|
cms-sw/cmssw-cfipython
|
01990ea8fcb97a57f0b0cc44a8bf5cde59af2d98
|
25ee4c810103c4a507ca1b949109399a23a524c5
|
refs/heads/CMSSW_11_2_X
| 2023-09-01T16:56:00.658845
| 2022-06-20T22:49:19
| 2022-06-20T22:49:19
| 136,184,115
| 1
| 0
| null | 2022-10-19T14:04:01
| 2018-06-05T13:47:28
|
Python
|
UTF-8
|
Python
| false
| false
| 291
|
py
|
import FWCore.ParameterSet.Config as cms
rawStreamFileWriterForBU = cms.OutputModule('RawStreamFileWriterForBU',
source = cms.InputTag('rawDataCollector'),
numEventsPerFile = cms.uint32(100),
frdVersion = cms.uint32(6),
microSleep = cms.int32(0),
frdFileVersion = cms.uint32(0)
)
|
[
"cmsbuild@cern.ch"
] |
cmsbuild@cern.ch
|
75909244f23ef13c6850631c801a95fcc525f524
|
e32ee307e4c59cc18f9dea18d797784a1b23148f
|
/calculate the number of local extrema in the given array..py
|
b2eb8e2bd69cb0f68b09931e45bd4707c0c00a29
|
[] |
no_license
|
GuhanSGCIT/SGCIT
|
f4ab44346186d45129c74cbad466c6614f9f0f08
|
8b2e5ccf693384aa22aa9d57f39b63e4659f6261
|
refs/heads/master
| 2020-07-11T05:47:54.033120
| 2020-07-07T05:02:41
| 2020-07-07T05:02:41
| 204,459,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
n = int(input())
l = [int(x) for x in input().split()]
count = 0
for i in range(1, n-1):
if (l[i]>l[i-1] and l[i]>l[i+1]) or (l[i]<l[i-1] and l[i]<l[i+1]):
count+=1
print(count)
|
[
"noreply@github.com"
] |
GuhanSGCIT.noreply@github.com
|
7138199d17ce5d21d5395a8ea2228f815ea2bb79
|
27acb207b21b4572561de4a5f7dfb9740318c0b8
|
/Python-Data-Representations/Week1/Ex6_W1_substring.py
|
b5a1afe3b91a4d51ec0978800eac5b19ff906c2d
|
[] |
no_license
|
iamieht/intro-scripting-in-python-specialization
|
ee836ef05b62f6c74fe8da3ee137687b4d0035cf
|
8ea4f85f0ed3dcd541f89521c013335e9eb32980
|
refs/heads/master
| 2021-01-16T05:35:51.616276
| 2020-06-08T18:39:45
| 2020-06-08T18:39:45
| 242,993,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
"""
Function that tests for substring
"""
def is_substring(example_string, test_string):
"""
Function that returns True if test_string
is a substring of example_string and False otherwise
"""
# enter one line of code for substring test here
return test_string in example_string
# Tests
example_string = "It's just a flesh wound."
print(is_substring(example_string, "just"))
print(is_substring(example_string, "flesh wound"))
print(is_substring(example_string, "piddog"))
print(is_substring(example_string, "it's"))
print(is_substring(example_string, "It's"))
# Output
#True
#True
#False
#False
#True
|
[
"iamieht@gmail.com"
] |
iamieht@gmail.com
|
6f13f1e1e5fad0a19e704f17be7866134efb141e
|
eda9187adfd53c03f55207ad05d09d2d118baa4f
|
/tensorboardX/demo.py
|
1fb77accb7db02f58576ac23e1ac78b36108156f
|
[] |
no_license
|
HuiZhaozh/python_tutorials
|
168761c9d21ad127a604512d7c6c6b38b4faa3c7
|
bde4245741081656875bcba2e4e4fcb6b711a3d9
|
refs/heads/master
| 2023-07-07T20:36:20.137647
| 2020-04-24T07:18:25
| 2020-04-24T07:18:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,306
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/python
'''
-------------------------------------------------
File Name : demo
Description :
Envs :
Author : yanerrol
Date : 2020/2/3 21:13
-------------------------------------------------
Change Activity:
2020/2/3 21:13:
-------------------------------------------------
'''
__author__ = 'yanerrol'
import torch
import torchvision.utils as vutils
import numpy as np
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
import datetime
resnet18 = models.resnet18(False)
writer = SummaryWriter()
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
for n_iter in range(100):
s1 = torch.rand(1) # value to keep
s2 = torch.rand(1)
# data grouping by `slash`
writer.add_scalar('data/scalar_systemtime', s1[0], n_iter)
# data grouping by `slash`
writer.add_scalar('data/scalar_customtime', s1[0], n_iter, walltime=n_iter)
writer.add_scalars('data/scalar_group', {"xsinx": n_iter * np.sin(n_iter),
"xcosx": n_iter * np.cos(n_iter),
"arctanx": np.arctan(n_iter)}, n_iter)
x = torch.rand(32, 3, 64, 64) # output from network
if n_iter % 10 == 0:
x = vutils.make_grid(x, normalize=True, scale_each=True)
writer.add_image('Image', x, n_iter) # Tensor
writer.add_image_with_boxes('imagebox_label', torch.ones(3, 240, 240) * 0.5,
torch.Tensor([[10, 10, 100, 100], [101, 101, 200, 200]]),
n_iter,
labels=['abcde' + str(n_iter), 'fgh' + str(n_iter)])
x = torch.zeros(sample_rate * 2)
for i in range(x.size(0)):
# sound amplitude should in [-1, 1]
x[i] = np.cos(freqs[n_iter // 10] * np.pi *
float(i) / float(sample_rate))
writer.add_audio('myAudio', x, n_iter)
writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter)
for name, param in resnet18.named_parameters():
if 'bn' not in name:
writer.add_histogram(name, param, n_iter)
writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(
100), n_iter) # needs tensorboard 0.4RC or later
writer.add_pr_curve_raw('prcurve with raw data', true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall, n_iter)
# export scalar data to JSON for external processing
writer.export_scalars_to_json("./all_scalars.json")
dataset = datasets.MNIST('mnist', train=False, download=True)
images = dataset.test_data[:100].float()
label = dataset.test_labels[:100]
features = images.view(100, 784)
writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
writer.add_embedding(features, global_step=1, tag='noMetadata')
dataset = datasets.MNIST('mnist', train=True, download=True)
images_train = dataset.train_data[:100].float()
labels_train = dataset.train_labels[:100]
features_train = images_train.view(100, 784)
all_features = torch.cat((features, features_train))
all_labels = torch.cat((label, labels_train))
all_images = torch.cat((images, images_train))
dataset_label = ['test'] * 100 + ['train'] * 100
all_labels = list(zip(all_labels, dataset_label))
writer.add_embedding(all_features, metadata=all_labels, label_img=all_images.unsqueeze(1),
metadata_header=['digit', 'dataset'], global_step=2)
# VIDEO
vid_images = dataset.train_data[:16 * 48]
vid = vid_images.view(16, 48, 1, 28, 28) # BxTxCxHxW
writer.add_video('video', vid_tensor=vid)
writer.add_video('video_1_fps', vid_tensor=vid, fps=1)
writer.close()
|
[
"2681506@gmail.com"
] |
2681506@gmail.com
|
4a8a08909397b5d1c28e2f029ec69e5bba7a0535
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2437/60586/311745.py
|
df394328050a5b32f1a4d7b71b3a5abaa5a94c4e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
x=input()
if x=="6 2 ":
print(6,end="")
if x=="6 3 ":
print(1,end="")
elif x=="8 3 ":
print(3,end="")
elif x=="8 5 ":
print(0,end="")
else:
print(x)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d823fca9b27f34af478f6c88c97725a4014d1c14
|
c7aadaba9ee8f8f28cf1b2fc604d671f12675b49
|
/src/transient/diffusion/d3_d2D.py
|
2085a7f7796dc3b1d05dc6336268aa3832a7d63b
|
[] |
no_license
|
ellipsis14/fenics-tutorial
|
2147656822afa36e4e6b8d39e9728d63708d6c73
|
a1d9a7352675048b9d7f388b9b737701e7e78399
|
refs/heads/master
| 2021-01-15T23:45:09.826960
| 2015-03-04T10:46:33
| 2015-03-04T10:46:33
| 31,659,473
| 1
| 0
| null | 2015-03-04T13:54:36
| 2015-03-04T13:54:36
| null |
UTF-8
|
Python
| false
| false
| 3,107
|
py
|
"""
FEniCS tutorial demo program: Diffusion equation with Dirichlet
conditions and a solution that will be exact at all nodes.
As d2_d2D.py, but here we test various start vectors for iterative
solution of the linear system at each time level.
The script d3_d2D_script.py runs experiments with different start
vectors and prints out the number of iterations.
"""
from dolfin import *
import numpy, sys
numpy.random.seed(12)
# zero, random, default, last
initial_guess = 'zero' if len(sys.argv) == 1 else sys.argv[1]
# PETSc, Epetra, MTL4,
la_backend = 'PETSc' if len(sys.argv) <= 2 else sys.argv[2]
parameters['linear_algebra_backend'] = la_backend
# Create mesh and define function space
nx = ny = 40
mesh = UnitSquareMesh(nx, ny)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary conditions
alpha = 3; beta = 1.2
u0 = Expression('1 + x[0]*x[0] + alpha*x[1]*x[1] + beta*t',
alpha=alpha, beta=beta, t=0)
class Boundary(SubDomain): # define the Dirichlet boundary
def inside(self, x, on_boundary):
return on_boundary
boundary = Boundary()
bc = DirichletBC(V, u0, boundary)
# Initial condition
u_1 = interpolate(u0, V)
u_2 = Function(V)
#u_1 = project(u0, V) # will not result in exact solution!
dt = 0.9 # time step
T = 10*dt # total simulation time
# Define variational problem
# Laplace term
u = TrialFunction(V)
v = TestFunction(V)
a_K = inner(nabla_grad(u), nabla_grad(v))*dx
# "Mass matrix" term
a_M = u*v*dx
M = assemble(a_M)
K = assemble(a_K)
A = M + dt*K
bc.apply(A)
# f term
f = Expression('beta - 2 - 2*alpha', beta=beta, alpha=alpha)
# Linear solver initialization
#solver = KrylovSolver('cg', 'ilu')
solver = KrylovSolver('gmres', 'ilu')
#solver = KrylovSolver('gmres', 'none') # cg doesn't work, probably because matrix bc makes it nonsymmetric
solver.parameters['absolute_tolerance'] = 1E-5
solver.parameters['relative_tolerance'] = 1E-17 # irrelevant
solver.parameters['maximum_iterations'] = 10000
if initial_guess == 'default':
solver.parameters['nonzero_initial_guess'] = False
else:
solver.parameters['nonzero_initial_guess'] = True
u = Function(V)
set_log_level(DEBUG)
print 'nonzero initial guess:', solver.parameters['nonzero_initial_guess']
# Compute solution
u = Function(V)
t = dt
while t <= T:
print 'time =', t
# f.t = t # if time-dep f
f_k = interpolate(f, V)
F_k = f_k.vector()
b = M*u_1.vector() + dt*M*F_k
u0.t = t
bc.apply(b) # BIG POINT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if initial_guess == 'zero':
u.vector()[:] = 0
elif initial_guess == 'last':
pass
elif initial_guess == 'random':
u.vector()[:] = numpy.random.uniform(-1, 1, V.dim())
elif t >= 2*dt and initial_guess == 'extrapolate':
u.vector()[:] = 2*u_1.vector() - u_2.vector()
solver.solve(A, u.vector(), b)
# Verify
u_e = interpolate(u0, V)
u_e_array = u_e.vector().array()
u_array = u.vector().array()
print 'Max error, t=%-10.3f:' % t, numpy.abs(u_e_array - u_array).max()
t += dt
u_2.assign(u_1)
u_1.assign(u)
|
[
"hpl@simula.no"
] |
hpl@simula.no
|
0361b75dc0630118ca7291ef92d6eedb19e0f3ed
|
f0c35cd1d458f2f9ec1c605d73b9fc4738f62986
|
/web/admin/forms.py
|
59ea21852a00a9dacdc2d9f95b918f1dafa08ad3
|
[] |
no_license
|
dougmpx/xiaoli
|
9e57c7bdd1d6e9ab55adb657ad5fa9d10dbe2a50
|
88f28754d1a67351b90461ad004ca5d36dde1e02
|
refs/heads/master
| 2021-04-15T07:39:06.655988
| 2013-01-05T08:10:02
| 2013-01-05T08:10:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,099
|
py
|
#coding=utf-8
from tango.models import db, Category
from nodes.models import Vendor, Model
from .models import Miboid, Module
from flask_wtf import Form, TextField, PasswordField, HiddenField, SelectField, IntegerField, \
QuerySelectField, TextAreaField, widgets, ValidationError, required, equal_to, email
class SearchForm(Form):
keyword = TextField()
class CategoryForm(Form):
id = TextField(validators=[required(message=u'必填')])
obj = TextField(u'分组', [required(message=u'必填')])
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
class PermissionForm(Form):
endpoint = TextField(u'Endpoint')
module_text = TextField(u'模块显示名')
name = TextField(u'子模块显示名')
operation = TextField(u'操作名')
default_permission = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无权限'),(u'1', u'有权限')])
next = HiddenField()
class VendorForm(Form):
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
url = TextField(u'厂商主页')
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
class ModelForm(Form):
category = QuerySelectField(u'类别', get_label=u'alias',
query_factory=lambda: Category.query.filter_by(obj='node'))
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
sysoid = TextField(u'Sysoid')
vendor = QuerySelectField(u'厂商', get_label=u'alias',
query_factory=lambda: Vendor.query)
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
remark = TextAreaField(u'备注')
class SysoidForm(Form):
sysoid = TextField(u'SysOid', [required(message=u'必填')])
model = QuerySelectField(u'设备型号', get_label=u'alias',
query_factory=lambda:Model.query)
disco = TextField(u'发现模块')
mib = QuerySelectField(u'Mib文件', get_pk=lambda x: x, get_label=lambda x: x,
query_factory=lambda: [m[0] for m in db.session.query(Miboid.mib).distinct().all()])
remark = TextAreaField(u'备注')
class ModuleForm(Form):
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
period = IntegerField(u'周期(min)')
retries = IntegerField(u'重试次数(次)')
timeout = IntegerField(u'超时(s)')
remark = TextAreaField(u'备注')
class MonitorForm(Form):
category = TextField(u'分类')
vendor = TextField(u'供应商')
sysoid = TextField(u'Sysoid')
match = TextField(u'匹配规则')
module = QuerySelectField(u'采集模块', get_label=u'alias',
query_factory=lambda:Module.query)
mib = QuerySelectField(u'Mib文件', get_pk=lambda x: x, get_label=lambda x: x,
query_factory=lambda: [m[0] for m in db.session.query(Miboid.mib).distinct().all()])
remark = TextAreaField(u'备注')
class MiboidForm(Form):
mib = TextField(u'mib', [required(message=u'必填')])
grp = TextField(u'分组', [required(message=u'必填')])
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
oid = TextField(u'oid')
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
remark = TextAreaField(u'备注')
|
[
"thewawar@gmail.com"
] |
thewawar@gmail.com
|
b088b7e8a4069b741246eaf5ac68d6faad85613b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04012/s874951633.py
|
7b934360297ee1e1391f1376a323f92dc1ecebb8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
# coding: utf-8
w = list(input())
w_ = list(set(w))
flg = True
for a in w_:
if w.count(a)%2 != 0:
flg = False
if flg:
print("Yes")
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c5b5216e50a35624832cb3c83ef89b17bad936c6
|
fc3f784c8d00f419b11cbde660fe68a91fb080ca
|
/algoritm/20상반기 코딩테스트/보급로/1249.py
|
f8cb979771655a3bd22b8164a902086c5eea5c12
|
[] |
no_license
|
choo0618/TIL
|
09f09c89c8141ba75bf92657ac39978913703637
|
70437a58015aecee8f3d86e6bfd0aa8dc11b5447
|
refs/heads/master
| 2021-06-25T07:01:34.246642
| 2020-12-21T04:57:13
| 2020-12-21T04:57:13
| 163,782,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import sys
sys.stdin = open('1249.txt','r')
from collections import deque
dx=[1,0,-1,0]
dy=[0,1,0,-1]
def IS(y,x):
return -1<y<N and -1<x<N
for t in range(int(input())):
N=int(input())
A=[list(map(int,input()))for y in range(N)]
Map=[[10**9]*N for _ in range(N)]
Q=deque([(0,0,0)])
while Q:
c,y,x=Q.popleft()
if Map[y][x]<c:continue
for d in range(4):
Y,X=y+dy[d],x+dx[d]
if not IS(Y,X) or Map[Y][X]<=c+A[Y][X]:continue
Map[Y][X]=c+A[Y][X]
Q.append((c+A[Y][X],Y,X))
print('#%d %d'%(t+1,Map[N-1][N-1]))
|
[
"choo0618@naver.com"
] |
choo0618@naver.com
|
50f4218bab8cab402a3642b888fffb7a6a8f06f5
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-ief/huaweicloudsdkief/v1/model/update_edge_node_device_response.py
|
c0a8a018e150454b0fe2df63d8f1a2d583739033
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,496
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateEdgeNodeDeviceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'delete_connector': 'bool',
'deploy_connector': 'bool',
'deployment_id': 'str',
'update_devices': 'NodeDevice'
}
attribute_map = {
'delete_connector': 'delete_connector',
'deploy_connector': 'deploy_connector',
'deployment_id': 'deployment_id',
'update_devices': 'update_devices'
}
def __init__(self, delete_connector=None, deploy_connector=None, deployment_id=None, update_devices=None):
"""UpdateEdgeNodeDeviceResponse
The model defined in huaweicloud sdk
:param delete_connector: 工业终端设备预留字段
:type delete_connector: bool
:param deploy_connector: 工业终端设备预留字段
:type deploy_connector: bool
:param deployment_id: 工业终端设备预留字段
:type deployment_id: str
:param update_devices:
:type update_devices: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
super(UpdateEdgeNodeDeviceResponse, self).__init__()
self._delete_connector = None
self._deploy_connector = None
self._deployment_id = None
self._update_devices = None
self.discriminator = None
if delete_connector is not None:
self.delete_connector = delete_connector
if deploy_connector is not None:
self.deploy_connector = deploy_connector
if deployment_id is not None:
self.deployment_id = deployment_id
if update_devices is not None:
self.update_devices = update_devices
@property
def delete_connector(self):
"""Gets the delete_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The delete_connector of this UpdateEdgeNodeDeviceResponse.
:rtype: bool
"""
return self._delete_connector
@delete_connector.setter
def delete_connector(self, delete_connector):
"""Sets the delete_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param delete_connector: The delete_connector of this UpdateEdgeNodeDeviceResponse.
:type delete_connector: bool
"""
self._delete_connector = delete_connector
@property
def deploy_connector(self):
"""Gets the deploy_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The deploy_connector of this UpdateEdgeNodeDeviceResponse.
:rtype: bool
"""
return self._deploy_connector
@deploy_connector.setter
def deploy_connector(self, deploy_connector):
"""Sets the deploy_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param deploy_connector: The deploy_connector of this UpdateEdgeNodeDeviceResponse.
:type deploy_connector: bool
"""
self._deploy_connector = deploy_connector
@property
def deployment_id(self):
"""Gets the deployment_id of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The deployment_id of this UpdateEdgeNodeDeviceResponse.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""Sets the deployment_id of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param deployment_id: The deployment_id of this UpdateEdgeNodeDeviceResponse.
:type deployment_id: str
"""
self._deployment_id = deployment_id
@property
def update_devices(self):
"""Gets the update_devices of this UpdateEdgeNodeDeviceResponse.
:return: The update_devices of this UpdateEdgeNodeDeviceResponse.
:rtype: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
return self._update_devices
@update_devices.setter
def update_devices(self, update_devices):
"""Sets the update_devices of this UpdateEdgeNodeDeviceResponse.
:param update_devices: The update_devices of this UpdateEdgeNodeDeviceResponse.
:type update_devices: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
self._update_devices = update_devices
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateEdgeNodeDeviceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
8e2285e97c33aaae42dc1d4463e35d6f6d1a9b56
|
dffee54c9c40b495e56cd56d191aef0e4ebe6064
|
/composer/core/algorithm.py
|
25317300f7dca6dce28ebd33f352a1721d4460c4
|
[
"Apache-2.0"
] |
permissive
|
zeeroocooll/composer
|
3afb0427e713c3e19197c780f03b510fbf6c936b
|
6dd0a0f297cafb404333d6280a5344bcb7f3bee6
|
refs/heads/main
| 2023-08-20T04:21:51.536149
| 2021-10-13T20:34:29
| 2021-10-13T20:34:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,933
|
py
|
# Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from composer.core.serializable import Serializable
if TYPE_CHECKING:
from composer.core import Event, Logger, State
class Algorithm(Serializable, ABC):
"""Base class for algorithms.
Algorithms are pieces of code which run at specific events in the training
loop. Algorithms modify the trainer's state, generally with the effect of
improving the model's quality, or
increasing the efficiency and throughput of the training loop.
Algorithms must implement two methods:
:func:`match`, which returns whether the algorithm should be run given
the current event and state, and :func:`apply`, which makes an in-place
change to the State.
"""
@property
def find_unused_parameters(self) -> bool:
"""Indicates that the effect of this algorithm may cause some model
parameters to be unused.
Used to tell DDP that some parameters will be frozen during
training and hence it should not expect gradients from them.
All algorithms which do any kind of parameter freezing should
override this function to return True.
"""
return False
@abstractmethod
def match(self, event: Event, state: State) -> bool:
"""Determines whether this algorithm should run, given the current
:class:`Event` and :class:`State`.
Examples:
To only run on a specific event:
>>> return event == Event.BEFORE_LOSS
Switching based on state attributes:
>>> return state.epoch > 30 && state.world_size == 1
See :class:`State` for accessible attributes.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
Returns:
bool: True if this algorithm should run now.
"""
raise NotImplementedError(f'implement match() required for {self.__class__.__name__}')
@abstractmethod
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
"""Applies the algorithm to make an in-place change to the State
Can optionally return an exit code to be stored in a :class:`Trace`.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
logger (:class:`Logger`): A logger to use for
logging algorithm-specific metrics.
Returns:
``int`` or ``None``: exit code that is stored in :class:`Trace`
and made accessible for debugging.
"""
raise NotImplementedError(f'implement apply() required for {self.__class__.__name__}')
def __str__(self) -> str:
"""Returns the class name."""
return self.__class__.__name__
|
[
"averylamp@gmail.com"
] |
averylamp@gmail.com
|
5531e802e6e0131bfab313bbb6fe0f400f8fc8d2
|
698cb8d24879fe75669af6f2667c3f88660a0a1e
|
/FM/deepfm/deepfm_movielens_sample.py
|
4d5736c139d3a64e02b438bc0dbd2fbacb19ae68
|
[] |
no_license
|
HuichuanLI/Recommand-Algorithme
|
c83c5d34d75eebd127e2aef7abc8b7152fc54f96
|
302e14a3f7e5d72ded73b72a538596b6dc1233ff
|
refs/heads/master
| 2023-05-11T03:01:30.940242
| 2023-04-30T08:03:19
| 2023-04-30T08:03:19
| 187,097,782
| 71
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,684
|
py
|
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from deepctr.models import DeepFM
from deepctr.inputs import SparseFeat,get_feature_names
#数据加载
data = pd.read_csv("movielens_sample.txt")
sparse_features = ["movie_id", "user_id", "gender", "age", "occupation", "zip"]
target = ['rating']
# 对特征标签进行编码
for feature in sparse_features:
lbe = LabelEncoder()
data[feature] = lbe.fit_transform(data[feature])
# 计算每个特征中的 不同特征值的个数
fixlen_feature_columns = [SparseFeat(feature, data[feature].nunique()) for feature in sparse_features]
print(fixlen_feature_columns)
linear_feature_columns = fixlen_feature_columns
dnn_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 将数据集切分成训练集和测试集
train, test = train_test_split(data, test_size=0.2)
train_model_input = {name:train[name].values for name in feature_names}
test_model_input = {name:test[name].values for name in feature_names}
# 使用DeepFM进行训练
model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression')
model.compile("adam", "mse", metrics=['mse'], )
history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=1, verbose=True, validation_split=0.2, )
# 使用DeepFM进行预测
pred_ans = model.predict(test_model_input, batch_size=256)
# 输出RMSE或MSE
mse = round(mean_squared_error(test[target].values, pred_ans), 4)
rmse = mse ** 0.5
print("test RMSE", rmse)
|
[
"lhc14124908@163.com"
] |
lhc14124908@163.com
|
babcd86669606969ca94181114c3944258ecfa56
|
6bdb32ddbd72c4337dab12002ff05d6966538448
|
/gridpack_folder/mc_request/LHEProducer/Spin-1/Wprime_WZ_WhadZlep/Wprime_WZ_WhadZlep_narrow_M2000_13TeV-madgraph_cff.py
|
aef83982aeb269928c449b90de344527b31a631c
|
[] |
no_license
|
cyrilbecot/DibosonBSMSignal_13TeV
|
71db480de274c893ba41453025d01bfafa19e340
|
d8e685c40b16cde68d25fef9af257c90bee635ba
|
refs/heads/master
| 2021-01-11T10:17:05.447035
| 2016-08-17T13:32:12
| 2016-08-17T13:32:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/master/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-1/Wprime_WZ_WhadZlep/Wprime_WZ_WhadZlep_narrow_M2000
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-1/Wprime_WZ_WhadZlep/narrow/v2/Wprime_WZ_WhadZlep_narrow_M2000_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
|
[
"syu@cern.ch"
] |
syu@cern.ch
|
f82d94ad5533aa17f9c433b5546780f562802e2a
|
d1507ee333bf9453a197fe997b58871b527811bf
|
/venv/bin/automat-visualize
|
51f0d1222abf19fd9b8ca755d742738686858191
|
[] |
no_license
|
hirossan4049/screenshare
|
a336f2cf0e0584866356a82f13683480d9d039f6
|
004f0e649116a6059af19d6489aeb13aed1741f3
|
refs/heads/master
| 2021-01-27T09:21:48.891153
| 2020-04-12T04:55:40
| 2020-04-12T04:55:40
| 243,476,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
#!/Users/linear/Documents/pg/pythonnnnn/screenshare/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from automat._visualize import tool
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(tool())
|
[
"haruto405329@gmail.com"
] |
haruto405329@gmail.com
|
|
7069d8dae75b1aa649b24c927694adb46dc57f3c
|
732e1285934470ae04b20d64921a8cba20932875
|
/neuedu_cnblogs_spider/pipelines.py
|
d19805a40bcea08c1a72fa65eb9c955cfba04a39
|
[] |
no_license
|
infant01han/neuedu_django_scrapy_es_cnblogs
|
69ee11c7840b25b8ae6d37b21324389dfdacf371
|
d293bae6ab5a7a360289afe35b7c3320dbce2dc8
|
refs/heads/master
| 2021-04-19T05:43:49.618157
| 2020-03-24T07:51:20
| 2020-03-24T07:51:20
| 249,584,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class NeueduCnblogsSpiderPipeline(object):
def process_item(self, item, spider):
item.save_to_es()
return item
|
[
"you@example.com"
] |
you@example.com
|
5649179f8c1bb20ed44f3c4504259fd0c3f51967
|
3c868540c8f5b0b9b46440e9b8e9160de9e8988f
|
/ch06/handle_with_condition.py
|
fe8d59c97207d94fc31608b8c1b50584d2ba69ac
|
[] |
no_license
|
sarte3/python
|
cc8f41b8b22b0a980252d6546358dd212324e2cd
|
15d984e5df03387950692092b6b5569adab845bb
|
refs/heads/master
| 2023-01-18T18:37:40.720326
| 2020-11-17T08:43:27
| 2020-11-17T08:43:27
| 304,824,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
user_input_a = input('정수 입력 > ')
if user_input_a.isdigit():
number_input_a = int(user_input_a)
print('원의 반지름 : ', number_input_a)
print('원의 둘레 : ', 2 * 3.14 * number_input_a)
print('원의 넓이 : ', 3.14 * number_input_a * number_input_a)
else:
print('정수를 입력하지 않았습니다')
|
[
"sarte@outlook.kr"
] |
sarte@outlook.kr
|
250f31b763d02f2dba25473438a3e6fdcc71ebc9
|
55a9b1b294d5a402c63848f9f7386e3bf93645da
|
/docker/src/clawpack-5.3.1/pyclaw/src/petclaw/tests/test_io.py
|
56c544ed1ff6d6cd39629552d19d32f8513d88d9
|
[
"LicenseRef-scancode-public-domain",
"CC-BY-4.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
geohackweek/visualization
|
b606cfade5d31f59cc38602df05930aed6e19b17
|
5d29fa5b69d69ee5c18ffaef2d902bd51f5807c8
|
refs/heads/gh-pages
| 2021-01-21T13:34:44.622039
| 2019-09-06T23:28:08
| 2019-09-06T23:28:08
| 68,648,198
| 11
| 13
|
NOASSERTION
| 2019-09-06T23:28:09
| 2016-09-19T21:27:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 509
|
py
|
from clawpack import pyclaw
from clawpack import petclaw
import os
class PetClawIOTest(pyclaw.IOTest):
@property
def solution(self):
return petclaw.Solution()
@property
def file_formats(self):
return ['hdf5']
@property
def this_dir(self):
return os.path.dirname(os.path.abspath(__file__))
@property
def test_data_dir(self):
return os.path.join(self.this_dir, '../../pyclaw/tests/test_data')
def test_io_from_binary(self):
return
|
[
"arendta@uw.edu"
] |
arendta@uw.edu
|
b5719efc41c1787dbdbf3f5fd14e1e331769b2cf
|
55a4d7ed3ad3bdf89e995eef2705719ecd989f25
|
/main/law/spark_short/spark_short_limai_and_wenshu_origin/lawlist_to_lawid_2018-05-10_imp_other_etl_online.py
|
e9734a7e27e63e8f7b1081c614d979c3b4078dbe
|
[] |
no_license
|
ichoukou/Bigdata
|
31c1169ca742de5ab8c5671d88198338b79ab901
|
537d90ad24eff4742689eeaeabe48c6ffd9fae16
|
refs/heads/master
| 2020-04-17T04:58:15.532811
| 2018-12-11T08:56:42
| 2018-12-11T08:56:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,190
|
py
|
# -*- coding: utf-8 -*-
from pyspark import SparkContext,SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import re
def p(x):
if x[1]:
print type(x)
print x
# print x[1]
# exit(0)
def filter_(x):
if x[1] and x[1] != '': #过滤掉数据库中,lawlist为Null或''的行。
return True
return False
def get_uuids(uuids):
l = []
for x in uuids:
l.append(x) #将分组结果ResultIterable转换为List
return "||".join(l) #列表不能直接存入Mysql
def get_lawlist_ids(uuid_ids):
uuid,ids = uuid_ids[0],uuid_ids[1]
lawlist_id = []
for x in ids:
lawlist_id.append(x)
return (uuid,"||".join(lawlist_id))
def get_title_short_id(x): #保证lawlist和law_id的有序!
k = x[0] + "|" + x[1]
v = str(x[2])
return (k,v)
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
# sc.setLogLevel("ERROR") # ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
# lawlist = sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/civil', table='uuid_reason_lawlist',column='id',lowerBound=0,upperBound=100000,numPartitions=70,properties={"user": "root", "password": "HHly2017."})
lawlist_id = sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_v3', table='(select id,title_short,art_num,lawlist_id from law_rule_result2) tmp',column='id',lowerBound=1,upperBound=2881160,numPartitions=30,properties={"user": "weiwc", "password": "HHly2017."})
# lawlist= sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/civil', table='uuid_reason_lawlist',predicates=["id >= 1 and id <= 100"],properties={"user": "root", "password": "HHly2017."})
lawlist= sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_imp_other', table='(select id,uuid,lawlist from imp_other_etl ) tmp2',column='id',lowerBound=1,upperBound=4733848,numPartitions=108,properties={"user": "weiwc", "password": "HHly2017."})
def etl_lawlist(p1, p2, lawlist):
if lawlist and lawlist.strip() != '':
# if not (lawlist.strip().startswith("[") and lawlist.strip().endswith("]")): # 去掉前后的所有"
r1 = re.findall(ur'"{0,5}\["{0,5}', lawlist.strip())
r2 = re.findall(ur'"{0,5}\]"{0,5}', lawlist.strip())
if r1 and r2:
start = r1.pop(0)
end = r2.pop()
lawlist = lawlist.strip().replace(start, "").replace(end, "")
# l = list(eval(lawlist.strip())) #有脏数据不能直接使用eval()
l = lawlist.split('", "') #lawlist类似于:《最高人民法院关于审理建设工程施工合同纠纷案件适用法律问题的解释》第三条", "《中华人民共和国合同法》第九十七条", "最高人民法院关于审理建设工程施工合同纠纷案件适用法律问题的解释》第十条", "《中华人民共和国合同法》第九十八条
if l:
tl = []
for i in l:
r1 = re.split(p2, i)
if len(r1) > 2: #确保既有《,又有》
r2 = re.search(p1, r1[2])
if r2: #判断是否找到了条
tl.append(r1[1] + "|" + r2.group(0))
return list(set(tl)) # 去重
return []
return []
return []
lawlist_id2 = lawlist_id.select('title_short','art_num','lawlist_id').map(lambda x:get_title_short_id(x))
p1 = ur'\u7b2c[\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d\u5341\u767e\u5343]{1,10}\u6761'
p2 = ur'[\u300a\u300b]' # 按《》切分
c = lawlist.select('uuid','lawlist').map(lambda x:(x[0],x[1])).flatMapValues(lambda x: etl_lawlist(p1, p2, x)).filter(filter_).map(lambda x: (x[1], x[0])).groupByKey().mapValues(lambda v: get_uuids(v))
# flatMapValues(lambda x: etl_lawlist(p1, p2, x)).filter(filter_).map(lambda x: (x[1].encode("utf-8"), x[0]))
# groupByKey().mapValues(lambda v: get_uuids(v))
# filter(filter_).map(lambda x: (x[1].encode("utf-8"), x[0])).groupByKey().mapValues(lambda v: get_uuids(v))
# print str(c.count()) + "======================"
# c.foreach(p)
lawlist_title_id_result = lawlist_id2.join(c).map(lambda x:x[1]).filter(filter_).flatMapValues(lambda x:(x.split("||"))).map(lambda x:(x[1],x[0])).groupByKey().map(lambda x:(get_lawlist_ids(x)))
schema = StructType([StructField("uuid", StringType(), False),StructField("law_id", StringType(), True)])
f = sqlContext.createDataFrame(lawlist_title_id_result, schema=schema)
# , mode = "overwrite"
# useUnicode = true & characterEncoding = utf8,指定写入mysql时的数据编码,否则会乱码。
# print str(f.count()) + "======================"
f.write.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_imp_other?useUnicode=true&characterEncoding=utf8', table='imp_other_uuid_law_id',properties={"user": "weiwc", "password": "HHly2017."})
sc.stop()
|
[
"985819225@qq.com"
] |
985819225@qq.com
|
265a5e2c314e412b545f2390b981e49d3b9d7a25
|
09ae3f372d1000f118ad80874870ae420a4be66f
|
/scikit-learn-master/examples/compose/plot_digits_pipe.py
|
c5b0fb2a136094f0d16c180883cdcc3175896a9d
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
lqkweb/learnMLflow
|
998f80c3828879b8d542125bc95c6345b8e9b29a
|
13c5decaebba95b1b90f92021be35e343b4764af
|
refs/heads/master
| 2022-10-18T06:17:23.584172
| 2019-01-18T09:51:38
| 2019-01-18T09:51:38
| 166,145,472
| 2
| 0
|
Apache-2.0
| 2022-09-30T18:26:17
| 2019-01-17T02:22:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,395
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Define a pipeline to search for the best combination of PCA truncation
# and classifier regularization.
logistic = SGDClassifier(loss='log', penalty='l2', early_stopping=True,
max_iter=10000, tol=1e-5, random_state=0)
pca = PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
'pca__n_components': [5, 20, 30, 40, 50, 64],
'logistic__alpha': np.logspace(-4, 4, 5),
}
search = GridSearchCV(pipe, param_grid, iid=False, cv=5)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
# Plot the PCA spectrum
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(pca.explained_variance_ratio_, linewidth=2)
ax0.set_ylabel('PCA explained variance')
ax0.axvline(search.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(
lambda g: g.nlargest(1, 'mean_test_score'))
best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score',
legend=False, ax=ax1)
ax1.set_ylabel('Classification accuracy (val)')
ax1.set_xlabel('n_components')
plt.tight_layout()
plt.show()
|
[
"leiqk@dxy.cn"
] |
leiqk@dxy.cn
|
4661f874d007a11754a46c3beedde6041690f9e9
|
f6fafa5ade66f3168a4c8960389d6fb75539cf9b
|
/authmobile/views.py
|
c589d37ac540e48d45157b3ada270cf700ef5c9a
|
[] |
no_license
|
tokibito/nullpobug-mobile-twitter-client
|
7fc6593bd086017eaa7fad96f60efa43193ff526
|
dbfb75a16d4020f471187bb1398e06ef42fc9862
|
refs/heads/master
| 2020-07-25T07:39:49.730289
| 2009-07-23T07:27:06
| 2009-07-23T07:27:06
| 208,217,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
# vim:fileencoding=utf8
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from authmobile.models import MobileUser
def login_easy(request):
"""
かんたんログイン
"""
if request.agent.is_nonmobile():
return HttpResponseBadRequest(u'モバイル端末でアクセスしてください')
# サブスクライバーIDを取得
if request.agent.is_docomo():
guid = request.agent.guid
else:
guid = request.agent.serialnumber
user = authenticate(subscriber_id=guid)
if not user:
return direct_to_template(request, 'authmobile/error.html', extra_context={
'message': u'ユーザが見つかりません。',
})
login(request, user)
return HttpResponseRedirect(reverse('site_index'))
|
[
"xxshss@yahoo.co.jp"
] |
xxshss@yahoo.co.jp
|
46a68cf8d816140c27a6905b438ef3b5390e2390
|
29ecf78ebd8fe26409db20f5a5ccbf40a0b7bf77
|
/posts/tests/test_api_views.py
|
10d12405755f41f59f77e32766cef9f8a3457530
|
[] |
no_license
|
pranavchandran/Django-Tests-unleashed
|
56225d1cdd6cca58df4e0fffec33b3d36cabbad7
|
dc76e6b87cea7842388cd90bbd5a45c563e4af3f
|
refs/heads/master
| 2022-09-29T11:11:10.517822
| 2020-06-10T06:21:29
| 2020-06-10T06:21:29
| 271,107,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,879
|
py
|
from rest_framework.test import APIRequestFactory,force_authenticate
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.contrib.auth.models import AnonymousUser,User
from posts.models import Post
from posts.api.views import (
PostCreateAPIView,
PostDeleteAPIView,
PostDetailAPIView,
PostListAPIView,
PostUpdateAPIView,
)
# User = get_user_model
class PostApiTest(TestCase):
def setUp(self):
self.data = {"title":"coming days","content":"time is","publish":timezone.now().date()}
self.factory = APIRequestFactory()
self.user = User.objects.create(
username='test1', email='test@neeps.in', password='top_secret',
is_staff=True,is_superuser=True)
def create_post(self,title='crucial'):
return Post.objects.create(title=title)
def test_get_data(self):
list_url = reverse("posts-api:list")
obj =self.create_post()
detail_url = reverse('posts-api:detail',kwargs={'slug':obj.slug})
request = self.factory.get(list_url)
response = PostListAPIView.as_view()(request)
self.assertEqual(response.status_code,200)
request = self.factory.get(detail_url)
response = PostDetailAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,200)
def test_post_data(self):
create_url = reverse("posts-api:create")
request = self.factory.post(create_url,data=self.data)
response1 = PostCreateAPIView.as_view()(request)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostCreateAPIView.as_view()(request)
self.assertEqual(response.status_code,201)
def test_update_data(self):
obj = self.create_post()
update_url = reverse("posts-api:update",kwargs={"slug":obj.slug})
request = self.factory.put(update_url,data=self.data)
# print(request)
response1 = PostUpdateAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostUpdateAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,200)
def test_delete_data(self):
obj = self.create_post()
delete_url = reverse("posts-api:delete",kwargs={"slug":obj.slug})
request = self.factory.delete(delete_url)
print(request)
response1 = PostDeleteAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostDeleteAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,204)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
aea3200a6cf1ceec2a12eac766f221b4f85cb99d
|
03415e25427d9a17bada8fd75daadc45c093c377
|
/LST_Collect.py
|
7cf76c296c1d1f4a78e7ce9e9b0fd9243fd117e1
|
[] |
no_license
|
mwilensky768/MJW-HERA
|
472d639bd4086a31be112564be9b2b22e70e3e86
|
da1710a17123cc3ccd3e318e224712eb80bcb3bd
|
refs/heads/master
| 2021-08-10T22:32:15.391270
| 2017-11-13T01:45:48
| 2017-11-13T01:45:48
| 108,204,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
import pyuvdata
import glob
import numpy as np
from math import pi
inpath = '/data6/HERA/data/2458042/zen.2458042.'
pathlist = glob.glob(inpath + '*.xx*.uv')
obslist = np.sort(np.array([int(path[path.find('zen.') + 12:path.find('.xx')])
for path in pathlist]))
pathlist_sort = [inpath + str(obs) + '.xx.HH.uv' for obs in obslist]
UV = pyuvdata.UVData()
LST = []
for path in pathlist_sort:
UV.read_miriad(path)
LST.append(UV.lst_array[0])
np.save('/data4/mwilensky/GS_LST.npy', np.array(LST) * 23.934 / (2 * pi))
|
[
"mjw768@uw.edu"
] |
mjw768@uw.edu
|
c175141ce719e09b6cea9f37d217223ff7d6033a
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/H/hitkarsh/karnataka_2.py
|
6c81472eb4a4013c05dd3d24a663158f61abd084
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,692
|
py
|
import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=29"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=16
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
count+=1
continue
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=29"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=16
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
count+=1
continue
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
a6c789b7be6e47e5a363cd0cc4b9e9d846ce4005
|
b3b443f0bc49bbb10c26b51fe89e6860d4ca3d3a
|
/ctreport_selenium/ctreport_html/scripts/detailmodal.py
|
d3f7cf88a94f4e60fc79f4cc43686715a63414b6
|
[
"MIT"
] |
permissive
|
naveens33/ctreport-selenium
|
6b3a1cc93a6741a1d493c2452c1cf56c6d85c052
|
9553b5c4b8deb52e46cf0fb3e1ea7092028cf090
|
refs/heads/master
| 2022-12-23T04:55:12.226339
| 2020-08-29T19:22:00
| 2020-08-29T19:22:00
| 228,779,087
| 2
| 2
|
MIT
| 2022-12-18T22:53:51
| 2019-12-18T07:03:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,082
|
py
|
def content(var):
c = '''
<script>
function createmodal(id) {
''' + var + '''
var content = '<table class="table table-bordered ">';
var footer = ''
if(Array.isArray(tests[id])){
content += '<tbody>\
<tr class="table-secondary"><td>Expected</td></tr>\
<tr class="align-middle">';
content += '<td>'+tests[id][0].join(", ")+'</td></tr>\
<tr class="table-secondary"><td>Actual</td></tr>\
<tr class="align-middle">';
content += '<td>'+tests[id][1].join(", ")+'</td></tr>';
}
else{
content += '<thead class="thead-light">\
<tr>\
<th class="align-middle text-sm-center">Status</th>\
<th class="align-middle text-sm-center">Key</th>\
<th class="align-middle text-sm-center">Expected</th>\
<th class="align-middle text-sm-center">Actual</th>\
</tr>\
</thead>\
<tbody>';
for(var key in tests[id]) {
status =''
key_='<td>'+key+'</td>'
expected='<td>'+tests[id][key][0]+'</td>';
actual='<td>'+tests[id][key][1]+'</td>';
if (tests[id][key][2]=='true'){
status='<i class="fa fa-check-circle align-middle text-sm-center" style="color:#00AF00; font-size: 18px;"></i>';
}
else{
status='<i class="fa-times-circle fa align-middle text-sm-center" style="color:#F7464A; font-size: 18px;"></i>';
if (tests[id][key][0]=="null"){
key_ = '<td style="background-color:rgb(247, 131, 134,0.3);">'+key+'</td>'
expected='<td></td>';
}
else if(tests[id][key][1]=="null"){
actual='<td style="color:#F7464A;">\
<i class="fas fa-ban" data-toggle="tooltip" data-placement="right" data-original-title="Key missing in actual data"></i>\
</td>';
}
else{
actual='<td style="background-color: #ffffb2">'+tests[id][key][1]+'</td>';
}
}
content += '<tr class="align-middle text-sm-center">\
<td>\
'+status+'\
</td>\
'+key_+'\
'+expected+'\
'+actual+'\
</tr>';
footer = '<div class="row">\
<div class="col-2"><i class="fas fa-square-full border border-secondary" style="color: #ffffb2"></i></div>\
<div class="col-10">\Actual is not same as Expected</div>\
</div>\
<div class="row">\
<div class="col-2"><i class="fas fa-square-full border border-secondary" style="color:rgb(247, 131, 134,0.3);"></i></div>\
<div class="col-10">New key found in actual</div>\
</div>\
<div class="row">\
<div class="col-2"><i class="fas fa-ban" style="color:#F7464A;"></i></div>\
<div class="col-10">Key missing in actual data</div>\
</div>\';
}
}
content += '</tbody>\
</table>';
var header = "Expected vs Actual";
var html = '<div id="modalWindow" class="modal" data-keyboard="false" data-backdrop="static">';
html += '<div class="modal-dialog modal-dialog-scrollable ">\
<div class="modal-content">\
<div class="modal-header">\
<button type="button" id="closeModal" class="btn btn-danger" data-dismiss="modal" onclick=deletemodal("modalWindow") style="margin:auto 1rem auto auto; font-size: smaller;">Close</button>\
</div>\
<div class="modal-body">'
+content+'\
</div>\
<div class="modal-footer small">'\
+footer+'\
</div>\
</div>\
</div>\
</div>';
$("#myModal").html(html);
$("#modalWindow").modal();
}
function deletemodal(id) {
var element = document.getElementById(id);
element.parentNode.removeChild(element);
};
</script>
'''
return c
|
[
"naveensagayaselvaraj@gmail.com"
] |
naveensagayaselvaraj@gmail.com
|
607219c000f7f31a1333d2b772480f3aad169545
|
fea6e9d6b20b0c5f2a05a6f2433aae4176b2a00a
|
/server/applibs/account/tasks/fetch_status.py
|
1c80b02e381a041e1e063576ae4ca0441bcb6c7a
|
[] |
no_license
|
fanshuai/kubrick
|
fddf6c21bcd500223d9a05bd002e47eb1ecf8839
|
b7ed6588e13d2916a4162d56509d2794742a1eb1
|
refs/heads/main
| 2023-03-24T12:21:44.562850
| 2021-03-19T15:11:40
| 2021-03-19T15:11:40
| 349,445,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
"""
验证码发送状态同步
"""
import logging
from kubrick.celery import app
from server.corelib.dealer import deal_time
from server.corelib.notice.async_tasks import send_dd_msg__task
from server.constant.djalias import CQueueAlias
logger = logging.getLogger('kubrick.celery')
@app.task(queue=CQueueAlias.Timed.value)
def fetch_status_pnverify(now=None):
""" 短信验证码状态检查 """
from server.constant import mochoice as mc
from server.applibs.account.models import PNVerify
time_start, time_end = deal_time.round_floor_ten_mins(now=now)
pnv_qs = PNVerify.objects.filter(
status=mc.SMSStatus.Waiting,
created_at__gte=time_start,
created_at__lt=time_end,
)
done_count = 0
waiting_count = pnv_qs.count()
for pnv in pnv_qs:
pnv.sms_code_query()
done_count += 1 if pnv.is_status_final else 0
done_info = f'{time_start} ~ {time_end}: {done_count}/{waiting_count}'
logger.info(f'fetch_status_pnverify__done {done_info}')
if done_count != waiting_count:
send_dd_msg__task(f'短信验证码状态检查:{done_info}')
result = dict(
task='fetch_status_pnverify',
done=done_count,
waiting=waiting_count,
end_at=time_end.isoformat(),
start_at=time_start.isoformat(),
)
return result
|
[
"zfaner@gmail.com"
] |
zfaner@gmail.com
|
10db09bd205a4767ad04c2ad9a7ae71e296af40f
|
296132d2c5d95440b3ce5f4401078a6d0f736f5a
|
/homeassistant/components/xiaomi_ble/sensor.py
|
831b5d0910be035820e0172f6706c2b06edb2f0c
|
[
"Apache-2.0"
] |
permissive
|
mezz64/home-assistant
|
5349a242fbfa182159e784deec580d2800173a3b
|
997d4fbe5308b01d14ceabcfe089c2bc511473dd
|
refs/heads/dev
| 2023-03-16T22:31:52.499528
| 2022-12-08T02:55:25
| 2022-12-08T02:55:25
| 68,411,158
| 2
| 1
|
Apache-2.0
| 2023-03-10T06:56:54
| 2016-09-16T20:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,812
|
py
|
"""Support for xiaomi ble sensors."""
from __future__ import annotations
from typing import Optional, Union
from xiaomi_ble import DeviceClass, SensorUpdate, Units
from homeassistant import config_entries
from homeassistant.components.bluetooth.passive_update_processor import (
PassiveBluetoothDataProcessor,
PassiveBluetoothDataUpdate,
PassiveBluetoothProcessorCoordinator,
PassiveBluetoothProcessorEntity,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONDUCTIVITY,
ELECTRIC_POTENTIAL_VOLT,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_MBAR,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.sensor import sensor_device_info_to_hass_device_info
from .const import DOMAIN
from .device import device_key_to_bluetooth_entity_key
SENSOR_DESCRIPTIONS = {
(DeviceClass.BATTERY, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.BATTERY}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
(DeviceClass.CONDUCTIVITY, Units.CONDUCTIVITY): SensorEntityDescription(
key=str(Units.CONDUCTIVITY),
device_class=None,
native_unit_of_measurement=CONDUCTIVITY,
state_class=SensorStateClass.MEASUREMENT,
),
(
DeviceClass.FORMALDEHYDE,
Units.CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
): SensorEntityDescription(
key=f"{DeviceClass.FORMALDEHYDE}_{Units.CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER}",
native_unit_of_measurement=CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.HUMIDITY, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.HUMIDITY}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.ILLUMINANCE, Units.LIGHT_LUX): SensorEntityDescription(
key=f"{DeviceClass.ILLUMINANCE}_{Units.LIGHT_LUX}",
device_class=SensorDeviceClass.ILLUMINANCE,
native_unit_of_measurement=LIGHT_LUX,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.MOISTURE, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.MOISTURE}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.MOISTURE,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.PRESSURE, Units.PRESSURE_MBAR): SensorEntityDescription(
key=f"{DeviceClass.PRESSURE}_{Units.PRESSURE_MBAR}",
device_class=SensorDeviceClass.PRESSURE,
native_unit_of_measurement=PRESSURE_MBAR,
state_class=SensorStateClass.MEASUREMENT,
),
(
DeviceClass.SIGNAL_STRENGTH,
Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
): SensorEntityDescription(
key=f"{DeviceClass.SIGNAL_STRENGTH}_{Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT}",
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
state_class=SensorStateClass.MEASUREMENT,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
(DeviceClass.TEMPERATURE, Units.TEMP_CELSIUS): SensorEntityDescription(
key=f"{DeviceClass.TEMPERATURE}_{Units.TEMP_CELSIUS}",
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.VOLTAGE, Units.ELECTRIC_POTENTIAL_VOLT): SensorEntityDescription(
key=f"{DeviceClass.VOLTAGE}_{Units.ELECTRIC_POTENTIAL_VOLT}",
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
# Used for e.g. consumable sensor on WX08ZM
(None, Units.PERCENTAGE): SensorEntityDescription(
key=str(Units.PERCENTAGE),
device_class=None,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
}
def sensor_update_to_bluetooth_data_update(
sensor_update: SensorUpdate,
) -> PassiveBluetoothDataUpdate:
"""Convert a sensor update to a bluetooth data update."""
return PassiveBluetoothDataUpdate(
devices={
device_id: sensor_device_info_to_hass_device_info(device_info)
for device_id, device_info in sensor_update.devices.items()
},
entity_descriptions={
device_key_to_bluetooth_entity_key(device_key): SENSOR_DESCRIPTIONS[
(description.device_class, description.native_unit_of_measurement)
]
for device_key, description in sensor_update.entity_descriptions.items()
if description.native_unit_of_measurement
},
entity_data={
device_key_to_bluetooth_entity_key(device_key): sensor_values.native_value
for device_key, sensor_values in sensor_update.entity_values.items()
},
entity_names={
device_key_to_bluetooth_entity_key(device_key): sensor_values.name
for device_key, sensor_values in sensor_update.entity_values.items()
},
)
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Xiaomi BLE sensors."""
coordinator: PassiveBluetoothProcessorCoordinator = hass.data[DOMAIN][
entry.entry_id
]
processor = PassiveBluetoothDataProcessor(sensor_update_to_bluetooth_data_update)
entry.async_on_unload(
processor.async_add_entities_listener(
XiaomiBluetoothSensorEntity, async_add_entities
)
)
entry.async_on_unload(coordinator.async_register_processor(processor))
class XiaomiBluetoothSensorEntity(
PassiveBluetoothProcessorEntity[
PassiveBluetoothDataProcessor[Optional[Union[float, int]]]
],
SensorEntity,
):
"""Representation of a xiaomi ble sensor."""
@property
def native_value(self) -> int | float | None:
"""Return the native value."""
return self.processor.entity_data.get(self.entity_key)
|
[
"noreply@github.com"
] |
mezz64.noreply@github.com
|
27a49544c7c1b8f8f550a76bdb9f95675a635c6a
|
3cedb583e9f3dfcdf16aeba56a0b3ff7c6213e99
|
/python-codes/m3_curso_em_video_estruturas_compostas/ex101.0.py
|
b156f239ea79ed513ea7696f940a01732d28e535
|
[
"MIT"
] |
permissive
|
lucasportella/learning-python
|
0f39ae2389db6d07b5b8c14ebe0c24f1e93c77c5
|
a9449dffd489e7e1f1619e3acef86bc2c64f0f14
|
refs/heads/master
| 2022-12-26T15:04:12.806300
| 2020-10-14T23:17:47
| 2020-10-14T23:17:47
| 260,685,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
def voto(ano_nascimento):
from datetime import date
idade = date.today().year - ano_nascimento
if idade < 16:
return print(f"Com {idade} anos: VOTO NEGADO")
elif 16 <= idade < 18 or idade >= 70:
return print(f"Com {idade} anos: VOTO OPCIONAL")
elif idade >= 18 and idade < 70:
return print(f"Com {idade} anos: VOTO OBRIGATÓRIO")
print('--'*10)
voto(int(input("Em que ano você nasceu? ")))
|
[
"lucasportellaagu@gmail.com"
] |
lucasportellaagu@gmail.com
|
bdaf49b8f1852494947d57dd9f3e385d7cb21ecb
|
73c9211d5627594e0191510f0b4d70a907f5c4c5
|
/pytest/lesson6/TestXlsxReportdemo.py
|
2c2e3aef8262fceb1736ac41921a38a074af96c5
|
[] |
no_license
|
tigerxjtu/py3
|
35378f270363532fb30962da8674dbcee99eb5ff
|
5d24cd074f51bd0f17f6cc4f5f1a6e7cf0d48779
|
refs/heads/master
| 2021-07-13T05:34:15.080119
| 2020-06-24T09:36:33
| 2020-06-24T09:36:33
| 159,121,100
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,744
|
py
|
# -*- coding:utf-8 -*-
import xlsxwriter
import time
from pytest.lesson4.testrequest import *
from pytest.lesson4.testvote import *
from pytest.lesson4.testrequest import *
from pytest.testdata.getpath import GetTestDataPath
import xlrd
#把GetTestReport方法自己写出来
from pytest.testdata.getpath import GetTestReport
testurl="http://127.0.0.1:8000"
ReportPath=GetTestReport()
workbook = xlsxwriter.Workbook(ReportPath)
worksheet = workbook.add_worksheet("测试总结")
worksheet2 = workbook.add_worksheet("用例详情")
test_polls()
test_vote()
test_login()
TestReport = hlist # 调用测试结果
hpassnum = 0 # 定义一个变量,用来计算测试通过的用例数量
def get_format(wd, option={}):
return wd.add_format(option)
# 设置居中
def get_format_center(wd, num=1):
return wd.add_format({'align': 'center', 'valign': 'vcenter', 'border': num})
def set_border_(wd, num=1):
return wd.add_format({}).set_border(num)
# 写数据
def _write_center(worksheet, cl, data, wd):
return worksheet.write(cl, data, get_format_center(wd))
# 生成饼形图
def pie(workbook, worksheet):
chart1 = workbook.add_chart({'type': 'pie'})
chart1.add_series({
'name': '接口测试统计',
'categories': '=测试总结!$D$4:$D$5',
'values': '=测试总结!$E$4:$E$5',
})
chart1.set_title({'name': '接口测试统计'})
chart1.set_style(10)
worksheet.insert_chart('A9', chart1, {'x_offset': 25, 'y_offset': 10})
def init(worksheet):
global workbook
# 设置列行的宽高
worksheet.set_column("A:A", 15)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_column("F:F", 20)
worksheet.set_row(1, 30)
worksheet.set_row(2, 30)
worksheet.set_row(3, 30)
worksheet.set_row(4, 30)
worksheet.set_row(5, 30)
# worksheet.set_row(0, 200)
define_format_H1 = get_format(workbook, {'bold': True, 'font_size': 18})
define_format_H2 = get_format(workbook, {'bold': True, 'font_size': 14})
define_format_H1.set_border(1)
define_format_H2.set_border(1)
define_format_H1.set_align("center")
define_format_H2.set_align("center")
define_format_H2.set_bg_color("blue")
define_format_H2.set_color("#ffffff")
# Create a new Chart object.
worksheet.merge_range('A1:F1', '接口自动化测试报告', define_format_H1)
worksheet.merge_range('A2:F2', '测试概括', define_format_H2)
worksheet.merge_range('A3:A6', '炼数成金', get_format_center(workbook))
# worksheet.insert_image('A1', GetLogoDataPath())
_write_center(worksheet, "B3", '项目名称', workbook)
_write_center(worksheet, "B4", '接口版本', workbook)
_write_center(worksheet, "B5", '脚本语言', workbook)
_write_center(worksheet, "B6", '测试地址', workbook)
data = {"test_name": "炼数成金项目接口", "test_version": "v1.0.0",
"test_pl": "Python3", "test_net": testurl}
_write_center(worksheet, "C3", data['test_name'], workbook)
_write_center(worksheet, "C4", data['test_version'], workbook)
_write_center(worksheet, "C5", data['test_pl'], workbook)
_write_center(worksheet, "C6", data['test_net'], workbook)
_write_center(worksheet, "D3", "测试用例总数", workbook)
_write_center(worksheet, "D4", "测试用例通过数", workbook)
_write_center(worksheet, "D5", "测试用例失败数", workbook)
_write_center(worksheet, "D6", "测试日期", workbook)
timenow = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
data1 = {"test_sum": len(TestReport),
"test_success": hpassnum,
"test_failed": len(TestReport) - hpassnum,
"test_date": timenow}
_write_center(worksheet, "E3", data1['test_sum'], workbook)
_write_center(worksheet, "E4", data1['test_success'], workbook)
_write_center(worksheet, "E5", data1['test_failed'], workbook)
_write_center(worksheet, "E6", data1['test_date'], workbook)
_write_center(worksheet, "F3", "测试用例通过率", workbook)
worksheet.merge_range('F4:F6', str(
(round(hpassnum / len(TestReport), 2)) * 100) + '%', get_format_center(workbook))
pie(workbook, worksheet)
def test_detail(worksheet):
# 设置列宽高
worksheet.set_column("A:A", 30)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_column("F:F", 20)
worksheet.set_column("G:G", 20)
worksheet.set_column("H:H", 20)
# 设置行的宽高
for hrow in range(len(TestReport) + 2):
worksheet.set_row(hrow, 30)
worksheet.merge_range('A1:H1', '测试详情', get_format(workbook, {'bold': True,
'font_size': 18,
'align': 'center',
'valign': 'vcenter',
'bg_color': 'blue',
'font_color': '#ffffff'}))
_write_center(worksheet, "A2", '用例ID', workbook)
_write_center(worksheet, "B2", '接口名称', workbook)
_write_center(worksheet, "C2", '接口协议', workbook)
_write_center(worksheet, "D2", 'URL', workbook)
_write_center(worksheet, "E2", '参数', workbook)
_write_center(worksheet, "F2", '预期值', workbook)
_write_center(worksheet, "G2", '实际值', workbook)
_write_center(worksheet, "H2", '测试结果', workbook)
data = {"info": TestReport} # 获取测试结果被添加到测试报告里
temp = len(TestReport) + 2
global hpassnum
for item in data["info"]:
if item["t_result"] == "通过":
hpassnum += 1
else:
pass
_write_center(worksheet, "A" + str(temp), item["t_id"], workbook)
_write_center(worksheet, "B" + str(temp), item["t_name"], workbook)
_write_center(worksheet, "C" + str(temp), item["t_method"], workbook)
_write_center(worksheet, "D" + str(temp), item["t_url"], workbook)
_write_center(worksheet, "E" + str(temp), item["t_param"], workbook)
_write_center(worksheet, "F" + str(temp), item["t_hope"], workbook)
_write_center(worksheet, "G" + str(temp), item["t_actual"], workbook)
_write_center(worksheet, "H" + str(temp), item["t_result"], workbook)
temp = temp - 1
test_detail(worksheet2)
init(worksheet)
workbook.close()
|
[
"liyin@16010.net"
] |
liyin@16010.net
|
15cb6d7afdc7fc7eaaeaf492f771909ea8cda285
|
833b43575815ce6c5fa8cbac2628cb774331eda7
|
/chap14_p277_code1.py
|
ae943fb048c09744b8a7feb977edb8216aa7d722
|
[] |
no_license
|
ai-times/infinitybook_python
|
d9529dfe7d486bf5c713d52b530915a23cbf1812
|
1c011c31994d07fe959bba9b519c4365f5f40e7f
|
refs/heads/main
| 2023-03-01T12:18:20.695888
| 2021-02-14T04:22:40
| 2021-02-14T04:22:40
| 338,578,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
code = input("주민번호 앞자리 입력 : ")
y = "19" + code[0:2]
m = code[2:4]
d = code[4:6]
age = 2019-int(y)+1
print("당신은", y, "년에 태어났군요.")
print("당신의 생일은", m, "월", d, "일 이군요.")
print("당신의 올해", age, "살 이군요")
|
[
"wskim092@gmail.com"
] |
wskim092@gmail.com
|
af9738f6a4a38219406718a295ea78a732a3232d
|
a5205843ab0c6cff8f76f32436c580cfd523e9ad
|
/edit_sample_craps.py
|
cb01ef33a0829d35b2b1f5ee2d59d478e474790b
|
[] |
no_license
|
LRBeaver/Random
|
70194cde5d26b5e268d7c245056cedc8d0a6618d
|
90ec0036a4efb383d6496a7724a108aa1b2f2ddf
|
refs/heads/master
| 2020-12-24T18:42:37.716951
| 2016-04-14T12:52:56
| 2016-04-14T12:52:56
| 56,150,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
__author__ = 'lyndsay.beaver'
import random
def playRound():
print("The come-out phase: ")
print()
rollDice = input("Hit ENTER to roll the dice...")
diceTotal = random.randint(1,6) + random.randint(1,6)
if diceTotal in (7,11):
print("You rolled a", diceTotal)
print("You Win: Natural!")
elif diceTotal in (2,3,12):
print("You rolled a", diceTotal)
print("You Lose: Crap-Out!")
else:
print("You rolled a", diceTotal)
pointPhase(diceTotal)
def pointPhase(diceTotal):
print("The Point Phase:")
rollDice = input("Hit ENTER to roll the dice...")
diceTotalPoint = random.randint(1,6) + random.randint(1,6)
while diceTotalPoint not in (7, diceTotal):
diceTotalPoint = random.randint(1,6) + random.randint(1,6)
if diceTotalPoint == diceTotal:
print("You Rolled a", diceTotalPoint)
print("You Win: Hit!")
break
elif diceTotalPoint == 7:
print("You Rolled a", diceTotalPoint)
print("You lose: Seven-Out!")
else:
print("Keep Rolling")
def main():
playRound()
main()
|
[
"lrbeaver@gmail.com"
] |
lrbeaver@gmail.com
|
221f4c8150fddc906199d788e70ea2553500a8f7
|
2903ac66369b6bd45889b12629d8c8e34e6089b3
|
/frappe_training/config/desktop.py
|
60ea98f53064fec38a864b70c7e641453fb4dd78
|
[
"MIT"
] |
permissive
|
sivaranjanipalanivel/training
|
6fa50b5f97fb00894404fba11122599fd796623c
|
b177c56a319c07dc3467ce3113e332ecee9b81fa
|
refs/heads/master
| 2023-07-17T06:11:29.894363
| 2021-08-02T14:47:31
| 2021-08-02T14:47:31
| 391,987,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "frappe_training",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("frappe_training")
}
]
|
[
"you@example.com"
] |
you@example.com
|
a69a5ede8bc3f3237d149df470385eda0dce6cb6
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/YLf984Eod74ha4Tok_9.py
|
8d3ff278a5843fa0485c8620003772aaf0edbc8e
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
"""
In a calendar year, it is exactly 365.25 days. But, eventually, this will lead
to confusion because humans normally count by exact divisibility of 1 and not
with decimal points. So, to avoid the latter, it was decided to add up all
0.25 days every four-year cycle, make that year to sum up to 366 days
(including February 29 as an intercalary day), thus, called a **leap year**
and aside the other years of the four-year cycle to sum up to 365 days, **not
a leap year**.
In this challenge, (though quite repetitive), we'll take it to a new level,
where, you are to determine if it's a leap year or not without the use of the
**datetime** class, **if blocks** , **if-elif blocks** , **conditionals** (`a
if b else c`) nor the logical operators **AND** (`and`) and **OR** (`or`) with
the exemption of the **NOT** (`not`) operator.
Return `True` if it's a leap year, `False` otherwise.
### Examples
leap_year(1979) ➞ False
leap_year(2000) ➞ True
leap_year(2016) ➞ True
leap_year(1521) ➞ False
leap_year(1996) ➞ True
leap_year(1800) ➞ False
### Notes
You can't use the **datetime** class, **if statements** in general, the
**conditional** nor the **logical operators** (`and`, `or`).
"""
def leap_year(yr):
return yr%400 == 0 if not yr%100 else yr%4 == 0
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
85863f93c57442e96186df3112f03e59a994bebf
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/InnerDetector/InDetExample/InDetSLHC_Example/share/jobOptions_SLHC_nn_prodTrainingSample.py
|
f8455debc388b3c7208aa0f0ff0ccf73d99c6714
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,056
|
py
|
###############################################################################
# jobOptions_SLHC_nn_prodTrainingSample.py
#
# script that reads a series of simulated HIT files, runs digitization and
# clusterization and produces the Ntuples needed to train the cluster splitting
# neuronal network.
# The ntuples produced are stored in TrkValidation.root
# -Validation
# |-> PixelRIOs : Cluster info.
# |-> NNinput : Input to train the NN.
#
# Note: This jobOptions WILL NOT WORK as it is neither for SLHC nor for IBL.
# YOU NEED TO EDIT PixelClusterValidationNtupleWriter.cxx
# IN InnerDetector/InDetValidation/InDetTrackValidation/InDetTrackValidation/
# TO USE ToT INSTEAD OF CHARGE IN NNinput
#
# Note 2: This jobOptions are based on InDetSLHCExample options. There there
# is also a stand alone .py file in this dir.
#
# Author: Tiago Perez <tperez@cern.ch>
# Date: 9-Jan-2012
##############################################################################
#--------------------------------------------------------------
# Template jobOptions: SLHC
# - Digitization
#--------------------------------------------------------------
from AthenaCommon.GlobalFlags import globalflags
globalflags.ConditionsTag = "OFLCOND-SDR-BS14T-ATLAS-00"
include("InDetSLHC_Example/preInclude.SLHC.py")
include("InDetSLHC_Example/preInclude.SiliconOnly.py")
from AthenaCommon.AthenaCommonFlags import jobproperties
jobproperties.AthenaCommonFlags.EvtMax=-1
#
## Input data
DATADIR="root://eosatlas.cern.ch//eos/atlas/user/t/tperez/"
#
## MinBias
#FILEPATH+="mc11_slhcid.108119.Pythia8_minbias_Inelastic_high.merge.HITS.e876_s1333_s1335_tid514272_00/"
#FILEPATH+="HITS.514272._000030.pool.root.1"
#
## ttbar
FILEPATH=DATADIR+"mc11_slhcid.105568.ttbar_Pythia.simul.HITS.e842_s1333_tid510282_00/"
FILEPATH+="HITS.510282._000429.pool.root.1"
#
#
jobproperties.AthenaCommonFlags.PoolHitsInput=[FILEPATH]
jobproperties.AthenaCommonFlags.PoolRDOOutput=DATADIR+"ttbar.digit.RDO.pool.root"
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.DetDescrVersion='ATLAS-SLHC-01-00-00'
from Digitization.DigitizationFlags import jobproperties
jobproperties.Digitization.doInDetNoise=False
include ( "Digitization/Digitization.py" )
include("InDetSLHC_Example/postInclude.SLHC_Digitization.py")
#
# Start clusterization
#
#
# Suppress usage of pixel distortions when validating simulation
# (otherwise clusters are corrected for module bow while G4 is not)
#
from IOVDbSvc.CondDB import conddb
if not conddb.folderRequested('/Indet/PixelDist'):
conddb.addFolder('PIXEL_OFL','/Indet/PixelDist')
conddb.addOverride("/Indet/PixelDist","InDetPixelDist-nominal")
#
# Include clusterization
# (need to set up services not already configured for digitization)
#
#include ("PixelConditionsServices/PixelRecoDb_jobOptions.py")
#
## Disable some COOL queries ?
from PixelConditionsTools.PixelConditionsToolsConf import PixelRecoDbTool
ToolSvc += PixelRecoDbTool()
ToolSvc.PixelRecoDbTool.InputSource = 0
## Configure the clusterization tool
from SiClusterizationTool.SiClusterizationToolConf import InDet__ClusterMakerTool
ClusterMakerTool = InDet__ClusterMakerTool( name = "InDet::ClusterMakerTool",
UsePixelCalibCondDB = False )
ToolSvc += ClusterMakerTool
## Configure PixelConditionsSummarySvc
from PixelConditionsServices.PixelConditionsServicesConf import PixelConditionsSummarySvc
InDetPixelConditionsSummarySvc = PixelConditionsSummarySvc()
InDetPixelConditionsSummarySvc.UseSpecialPixelMap = False
InDetPixelConditionsSummarySvc.UseDCS = False
InDetPixelConditionsSummarySvc.UseByteStream = False
ServiceMgr += InDetPixelConditionsSummarySvc
print InDetPixelConditionsSummarySvc
from InDetPrepRawDataFormation.InDetPrepRawDataFormationConf import InDet__PixelClusterization
job += InDet__PixelClusterization("PixelClusterization")
#
# Include PixelValidationNtuple
# with some information about Geant4 hits
#
from InDetTrackValidation.InDetTrackValidationConf import InDet__PixelClusterValidationNtupleWriter
job += InDet__PixelClusterValidationNtupleWriter("PixelNtupleWriter",
NtupleFileName = 'TRKVAL',
NtupleDirectoryName = 'Validation',
NtupleTreeName = 'PixelRIOs',
PixelClusterContainer = 'PixelClusters',
WriteDetailedPixelInformation = False,
DoHits = True,
DoMC = True,
FindNotAssociatedParticle= False,
WriteNNTraining = True,
# Extra flags ONLY ON PRIVATE InDetTrackValidation/PixelClusterValidationNtupleWriter
UseToT = True,
DetGeo = 'SLHC')
print job.PixelNtupleWriter
theApp.HistogramPersistency = 'ROOT'
if not 'OutputNTpl' in dir():
OutputNTpl = "TrkValidation_noTrack_ttbar_.root"
# Root file definition
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
ServiceMgr.THistSvc.Output += [ "TRKVAL DATAFILE='" + OutputNTpl + "' TYPE='ROOT' OPT='RECREATE'" ]
theApp.Dlls += [ 'RootHistCnv' ]
#
#
#
MessageSvc = Service( "MessageSvc" )
#increase the number of letter reserved to the alg/tool name from 18 to 30
MessageSvc.Format = "% F%50W%S%7W%R%T %0W%M"
# to change the default limit on number of message per alg
MessageSvc.defaultLimit = 9999999 # all messages
# Set output level threshold among DEBUG, INFO, WARNING, ERROR, FATAL
MessageSvc.OutputLevel = INFO
include("InDetSLHC_Example/postInclude.SLHC_Setup.py")
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
d49a088bb0cfd1df5be0927b59cd9782ace85d05
|
d0e83b3f551c6af16aa0c8ed4ff074b3ec268120
|
/processors/feat.py
|
e48cc144ee2ba12b7865cdbb61a44eb472849820
|
[] |
no_license
|
SamuelLAN/kaggle_SCTP
|
cfb0228a81d71b2f1c315352bd6435042066967f
|
50ff2895baa6de29bdb19bfb20ca76718079d188
|
refs/heads/master
| 2020-04-25T16:22:07.803524
| 2019-04-03T09:06:12
| 2019-04-03T09:06:12
| 172,909,260
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
#!/usr/bin/Python
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
def lda(train_x, train_y, val_x, test_x):
''' LDA reduce the dimensions of the features '''
_lda = LDA()
train_x = _lda.fit_transform(train_x, train_y)
val_x = _lda.transform(val_x)
test_x = _lda.transform(test_x)
return train_x, val_x, test_x
def add_lda(train_x, train_y, val_x, test_x):
''' LDA reduce the dimensions of the features; and add this lda feature to the origin features '''
_lda = LDA()
train_lda = _lda.fit_transform(train_x, train_y)
val_lda = _lda.transform(val_x)
test_lda = _lda.transform(test_x)
train_x = np.hstack([train_x, train_lda])
val_x = np.hstack([val_x, val_lda])
test_x = np.hstack([test_x, test_lda])
return train_x, val_x, test_x
|
[
"412206186@qq.com"
] |
412206186@qq.com
|
7e9dcb08a5d09de543ba08b0a18e43862bec4e80
|
8537ecfe2a23cfee7c9f86e2318501f745078d67
|
/Practise_stuff/nympy_commands/oo_numpy_array_manipulation2.py
|
2fd9ce51e253406e6f5724fd2fcd8efc7014909a
|
[] |
no_license
|
oolsson/oo_eclipse
|
91d33501d9ed6c6b3c51bb22b635eb75da88e4e1
|
1828866bc4e1f67b279c5a037e4a6a4439ddb090
|
refs/heads/master
| 2021-01-01T20:17:12.644890
| 2015-11-30T09:49:41
| 2015-11-30T09:49:41
| 23,485,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
'''
Created on Jan 22, 2012
@author: oo
'''
import numpy
np=numpy
A=[1,2,3]
B=[4,5,6]
A=np.array(A)
B=np.array(B)
c=np.concatenate((A,B))
print c
print '2------------'
c=np.column_stack((A,B))
print c
print '3------------'
c=np.hstack((A,B))
print c
c=np.vstack((A,B))
print c
print '4------------'
c=np.array_split(c,1)
print c
print '5-----------'
d=np.array([1])
d=np.tile(d,7)
print d
print '6-----------'
x = np.array([[1,2],[3,4]])
print np.repeat(x, 1)
print np.repeat(x, 3, axis=1)
print np.repeat(x, [1, 2], axis=0)
|
[
"o.h.olsson@gmail.com"
] |
o.h.olsson@gmail.com
|
e3c3e76cd3f6345219ed73d91c75b8ea32a227b5
|
eab1756b01717e81537133400f36aea4d7a0876f
|
/dawn/launch-tong.py
|
cc90b2066a548a7ed4ba16879b0631e9ccd5a8e5
|
[] |
no_license
|
bearpelican/cluster
|
d677fe392ac1196b77e3f8fb79e530ec8371080f
|
2e316cf1def0b72b47f79a864ed3aa778c297b95
|
refs/heads/master
| 2020-03-21T06:52:57.514901
| 2018-08-10T10:20:26
| 2018-08-10T22:33:05
| 138,246,892
| 3
| 1
| null | 2018-06-22T02:51:07
| 2018-06-22T02:51:07
| null |
UTF-8
|
Python
| false
| false
| 2,593
|
py
|
#!/usr/bin/env python
# numpy01 image, see environment-numpy.org for construction
# (DL AMI v 3.0 based)
#
# us-east-1 AMIs
# numpy00: ami-f9d6dc83
# numpy01: ami-5b524f21
from collections import OrderedDict
import argparse
import os
import sys
import time
import boto3
module_path=os.path.dirname(os.path.abspath(__file__))
sys.path.append(module_path+'/..')
import util
util.install_pdb_handler()
parser = argparse.ArgumentParser(description='launch')
parser.add_argument('--ami', type=str, default='ami-5b524f21',
help="name of AMI to use ")
parser.add_argument('--group', type=str, default='dawn_runs',
help="name of the current run")
parser.add_argument('--name', type=str, default='baseline5-tong',
help="name of the current run")
parser.add_argument('--instance-type', type=str, default='p3.16xlarge',
help="type of instance")
parser.add_argument('--zone', type=str, default='us-east-1f',
help='which availability zone to use')
parser.add_argument('--linux-type', type=str, default='ubuntu',
help='which linux to use: ubuntu or amazon')
parser.add_argument('--role', type=str, default='launcher',
help='launcher or worker')
args = parser.parse_args()
def main():
import aws_backend
run = aws_backend.make_run(args.name, ami=args.ami,
availability_zone=args.zone,
linux_type=args.linux_type)
job = run.make_job('main', instance_type=args.instance_type)
job.wait_until_ready()
print(job.connect_instructions)
# if tensorboard is running, kill it, it will prevent efs logdir from being
# deleted
job.run("tmux kill-session -t tb || echo ok")
logdir = '/efs/runs/%s/%s'%(args.group, args.name)
job.run('rm -Rf %s || echo failed' % (logdir,)) # delete prev logs
# Launch tensorboard visualizer in separate tmux session
job.run("tmux new-session -s tb -n 0 -d")
job.run("tmux send-keys -t tb:0 'source activate mxnet_p36' Enter")
job.run("tmux send-keys -t tb:0 'tensorboard --logdir %s' Enter"%(logdir,))
job.run('source activate mxnet_p36')
job.run('killall python || echo failed') # kill previous run
job.run('pip install -U https://s3.amazonaws.com/inferno-dlami/tensorflow/p3/tensorflow-1.5.0-cp36-cp36m-linux_x86_64.whl')
job.upload('imagenet_utils.py')
job.upload('resnet_model.py')
job.upload('resnet.b512.baseline.py')
job.run_async('python resnet.b512.baseline.py --logdir=%s'%(logdir,))
if __name__=='__main__':
main()
|
[
"yaroslavvb@gmail.com"
] |
yaroslavvb@gmail.com
|
d6ec1defab5ed57216ed8a7c1927d4b569d4f5e7
|
f8af2d190600221b7a597ef4de8ee15137e01266
|
/django_mysite/polls/serializers.py
|
eef85178a606057b3aaaf04ed47a05c101d57c8e
|
[] |
no_license
|
rifqirosyidi/REST-Framework-Searching
|
3b4d64ca1d2217a48f1ec1c6591e1b7e1a42797d
|
25481026728edfd564bb6ba18c8ce73040e07543
|
refs/heads/master
| 2023-04-26T02:11:43.684540
| 2021-04-12T09:43:09
| 2021-04-12T09:43:09
| 206,774,068
| 1
| 0
| null | 2023-04-21T20:36:46
| 2019-09-06T10:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 202
|
py
|
from rest_framework import serializers
from .models import Question, Choice
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = '__all__'
|
[
"rief.rosyidi@gmail.com"
] |
rief.rosyidi@gmail.com
|
8d308bb5fcc1a686835c15b6f0c7d4dabfde7c44
|
f9b7930e6f43eca26abf87b39961fc2d022db54a
|
/Python/medium/338. Counting Bits.py
|
01ee506d021c0422aa75949e9d17355471bf95da
|
[] |
no_license
|
LRenascence/LeetCode
|
639452dd3bf65a14d0056c01e203a7082fbdc326
|
1a0e1d1503e0a7bff6917491a964a08c572827fb
|
refs/heads/master
| 2021-05-12T03:41:35.346377
| 2021-01-07T23:39:14
| 2021-01-07T23:39:14
| 117,622,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
"""
Given a non negative integer number num. For every numbers i in the range 0 ≤ i ≤ num calculate the number of 1's in their binary representation and return them as an array.
Example 1:
Input: 2
Output: [0,1,1]
Example 2:
Input: 5
Output: [0,1,1,2,1,2]
Follow up:
It is very easy to come up with a solution with run time O(n*sizeof(integer)). But can you do it in linear time O(n) /possibly in a single pass?
Space complexity should be O(n).
Can you do it like a boss? Do it without using any builtin function like __builtin_popcount in c++ or in any other language.
"""
class Solution:
def countBits(self, num: int) -> List[int]:
result = [0] * (num + 1)
for i in range(num + 1):
result[i] = result[i >> 1] + (i & 1)
return result
|
[
"im.renascence@gmail.com"
] |
im.renascence@gmail.com
|
3ef43777b05972b64a9d10046115d44bce3e8128
|
0c672b0b8431064617831d16bf0982d5d3ce6c27
|
/utils/proxy_api.py
|
bf5056d222433e6c27a71950ba9f9d043be6d898
|
[] |
no_license
|
buxuele/amazon_books
|
617327376044ffd4e760fdc1a71962119717cfe8
|
691bd3e48bd1730dbc4a4a855e84e0b1c3e9c2ec
|
refs/heads/master
| 2023-03-09T23:18:14.730828
| 2021-03-01T10:53:47
| 2021-03-01T10:53:47
| 342,610,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
import time
import requests
from utils.my_timer import timer
from utils.get_user_agent import get_a_ua
from utils.mongoDB import Mongo
import config # 根目录 数据库名称。
from pprint import pprint
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
class SmallProxy:
def __init__(self, china=True):
self.country = china
self.m = Mongo(config.proxy_db, config.proxy_coll)
self.url = "https://ip.jiangxianli.com/api/proxy_ips"
self.headers = {'User-Agent': get_a_ua()}
self.real_ip = self.find_myself()
@staticmethod
def find_myself():
target = 'http://httpbin.org/ip'
resp = requests.get(target)
return resp.json()["origin"]
# 获取更多的代理。这一部分写的很漂亮啊。自己写的就是很得意。
def make_payloads(self):
nations = ["俄罗斯", "美国", "加拿大", "日本", "德国", "香港", "印度尼西亚", "法国"]
if self.country:
pay = [{"page": c, "country": "中国", "order_by": "speed"} for c in range(1, 5)]
else:
pay = [{"page": 1, "country": b, "order_by": "speed"} for b in nations]
return pay
def greet(self, pay):
resp = requests.get(self.url, params=pay, headers=self.headers)
if resp.status_code == 200:
return resp.json()
else:
print(f"Sorry! 这个代理网站有问题!")
return None
@timer
def get_all_proxy(self):
temp = []
for k in self.make_payloads():
d = self.greet(k) # d <dict>
if d:
all_data = d["data"]["data"]
for t in all_data:
# if t["anonymity"] == 2: # 按匿名度来排除。
a = t["protocol"] + "://" + t["ip"] + ":" + t["port"]
temp.append(a)
print(temp)
print(len(temp))
return temp
def speed_status(self, proxy=None):
url = "http://httpbin.org/ip"
resp = requests.get(url, proxies={"http": proxy}, timeout=1)
# 只有当前使用的代理与自己真实的ip 不相等的时候,才说明这个代理是有效的。
if resp.status_code == 200 and resp.json()["origin"] != self.real_ip:
print("test ip", proxy)
print("real ip : ", resp.json()["origin"])
self.m.add_to_db({"url": proxy})
@timer
def run(self):
fake_proxy = self.get_all_proxy()
# 这里设置为20就很合适了,太多反而不利。
with ThreadPoolExecutor(max_workers=16) as executor:
future_tasks = [executor.submit(self.speed_status, p) for p in fake_proxy]
wait(future_tasks, return_when=ALL_COMPLETED)
def show_product(self):
self.m.get_unique(show=True)
if __name__ == '__main__':
p = SmallProxy(china=True)
# p.main()
p.run()
time.sleep(.1)
p.show_product()
|
[
"baogexuxuele@163.com"
] |
baogexuxuele@163.com
|
34322ab0be08ec02c0cf670b8835ce5086251b9a
|
add5ca4ed6f5a5030cfcd60a09e502390ffc4936
|
/full_code/paddle/conf/img_qa_gate2_gen.py
|
dd3f38a36d621d037da12a1a132552fe9d2eb6ae
|
[] |
no_license
|
yangyi02/vision_language
|
1f0b10e648a1ef0ea88edd30e41581d25969df27
|
9c55e5115d03bab58cf6165f63c9a6f426ed87ce
|
refs/heads/master
| 2020-04-02T19:45:25.051432
| 2018-10-25T22:32:39
| 2018-10-25T22:32:39
| 154,745,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,331
|
py
|
# -*- coding: utf-8 -*-
from math import sqrt
import os
import sys
from trainer.recurrent_units import LstmRecurrentUnit
model_type('recurrent_nn')
# data setting
gen_list = get_config_arg('gen_list', str, './gen.list')
result_file = get_config_arg('result_file', str, './result.txt')
# dictionary setting
dict_file = get_config_arg('dict_file', str, './dict.txt')
dict_pkl = get_config_arg('dict_pkl', str, './dict.pkl')
# image feature setting
img_feat_list = get_config_arg('img_feat_list', str, './img_feat.list')
# feature dimension setting
img_feat_dim = get_config_arg('img_feat_dim', int, 4096)
word_embedding_dim = 512
hidden_dim = 512
multimodal_dim = 1024
dict_dim = len(open(dict_file).readlines())
start_index = dict_dim-2
end_index = dict_dim-1
# hyperparameter setting
Settings(
batch_size = 8, # this must equal to trainer_count
learning_rate = 0,
)
# data provider setting
TestData(
PyData(
files = gen_list,
load_data_module = 'join_test',
load_data_object = 'processData',
load_data_args = ' '.join([dict_pkl, img_feat_list, str(img_feat_dim), '1.0'])
)
)
##### network #####
Inputs('question_id', 'img_feat', 'question')
Outputs('predict_word')
# data layers
DataLayer(name = 'question_id', size = 1)
DataLayer(name = 'img_feat', size = img_feat_dim)
DataLayer(name = 'question', size = dict_dim)
# question embedding input: question_embedding
MixedLayer(name = 'question_embedding',
size = word_embedding_dim,
bias = False,
inputs = TableProjection('question',
parameter_name = 'word_embedding',
),
)
# question hidden input
MixedLayer(name = 'question_input',
size = hidden_dim,
active_type = 'stanh',
inputs = FullMatrixProjection('question_embedding'),
)
# question hidden input: encoder
RecurrentLayerGroupBegin('encoder' + '_layer_group',
in_links = ['question_input'],
out_links = ['encoder'],
seq_reversed = False,
)
LstmRecurrentUnit(name = 'encoder',
size = hidden_dim/4,
active_type = 'relu',
state_active_type = 'linear',
gate_active_type = 'sigmoid',
inputs = [IdentityProjection('question_input')],
)
RecurrentLayerGroupEnd('encoder' + '_layer_group')
# get last of encoder
Layer(name = 'encoder_last',
type = 'seqlastins',
active_type = '',
bias = False,
inputs = [Input('encoder')],
)
# rnn1
RecurrentLayerGroupBegin('rnn1' + '_layer_group',
in_links = [],
out_links = ['predict_word'],
seq_reversed = False,
generator = Generator(
max_num_frames = 20,
beam_size = 5,
num_results_per_sample = 1,
),
)
img_feat_memory = Memory(name = 'img_feat_memory',
size = img_feat_dim,
boot_layer = 'img_feat',
is_sequence = False,
)
MixedLayer(name = 'img_feat_memory',
size = img_feat_dim,
bias = False,
inputs = IdentityProjection(img_feat_memory),
)
question_memory = Memory(name = 'question_memory',
size = hidden_dim/4,
boot_layer = 'encoder_last',
is_sequence = False,
)
MixedLayer(name = 'question_memory',
size = hidden_dim/4,
bias = False,
inputs = IdentityProjection(question_memory),
)
predict_word_memory = Memory(name = 'predict_word',
size = dict_dim,
boot_with_const_id = start_index,
)
MixedLayer(name = 'predict_word_embedding',
size = word_embedding_dim,
bias = False,
inputs = TableProjection(predict_word_memory,
parameter_name = 'word_embedding',
),
)
# hidden1
MixedLayer(name = 'hidden1',
size = hidden_dim,
active_type = 'stanh',
bias = Bias(parameter_name = '_hidden1.wbias'),
inputs = FullMatrixProjection('predict_word_embedding',
parameter_name = '_hidden1.w0'),
)
LstmRecurrentUnit(name = 'rnn1',
size = hidden_dim/4,
active_type = 'relu',
state_active_type = 'linear',
gate_active_type = 'sigmoid',
inputs = [IdentityProjection('hidden1')],
)
# language unit
MixedLayer(name = 'language',
size = multimodal_dim,
active_type = 'linear',
bias = Bias(parameter_name = '_language.wbias'),
inputs = [FullMatrixProjection(question_memory, parameter_name = '_language.w0'),
FullMatrixProjection('predict_word_embedding', parameter_name = '_language.w1'),
FullMatrixProjection('rnn1', parameter_name = '_language.w2'),
],
# drop_rate = 0.5,
)
MixedLayer(name = 'language_gate',
size = 1,
active_type = 'sigmoid',
bias = Bias(parameter_name = 'language_gate.b',
initial_std = 0.0, initial_mean = -2.0),
inputs = FullMatrixProjection('language',
parameter_name = 'language_gate_proj')
)
Layer(name = 'language_gate_expanded',
type = 'featmap_expand',
num_filters = multimodal_dim,
inputs = FullMatrixProjection('language_gate')
)
MixedLayer(name = 'gated_language',
size = multimodal_dim,
bias = False,
inputs = DotMulOperator(['language_gate_expanded', 'language'])
)
# hidden2
MixedLayer(name = 'hidden2',
size = multimodal_dim,
active_type = 'stanh',
bias = Bias(parameter_name = '_hidden2.wbias'),
inputs = [IdentityProjection('gated_language', parameter_name = '_hidden2.w0'),
FullMatrixProjection(img_feat_memory, parameter_name = '_hidden2.w1'),
],
# drop_rate = 0.5,
)
# hidden3
#Layer(
# name = 'hidden3',
# type = 'mixed',
# size = word_embedding_dim,
# active_type = 'stanh',
# inputs = FullMatrixProjection(
# 'hidden2',
# initial_std = sqrt(1. / multimodal_dim)),
#)
# output
Layer(name = 'output',
type = 'fc',
size = dict_dim,
active_type = 'softmax',
bias = Bias(parameter_name = '_output.wbias'),
inputs = [Input('hidden2', parameter_name = '_output.w0')],
#inputs = TransposedFullMatrixProjection(
# 'hidden3',
# parameter_name = 'wordvecs'),
)
Layer(
name = 'predict_word',
type = 'maxid',
inputs = 'output',
)
Layer(
name = 'eos_check',
type = 'eos_id',
eos_id = end_index,
inputs = ['predict_word'],
)
RecurrentLayerGroupEnd('rnn1' + '_layer_group')
# Write question and answer pairs to file
Evaluator(
name = 'caption_printer',
type = 'seq_text_printer',
dict_file = dict_file,
result_file = result_file,
#delimited = False,
inputs = ['question_id', 'question', 'predict_word'],
)
|
[
"yangyi02@gmail.com"
] |
yangyi02@gmail.com
|
eecde9e85f8bbc1b9eda6d9cab643cadd93edcab
|
d970e32d23e84fe0f6b5ba1694e2958d52fce586
|
/sample_scripts/sample_tokenization.py
|
f165ed859675d95ce1ca9d1aa24545228ddd3e2f
|
[
"MIT"
] |
permissive
|
Kensuke-Mitsuzawa/sample-codes-supporters-tutorial
|
8e6f1ed794732fa87176333286e65898e321f60f
|
ae9b544ddd3a782e76a30af257b43f88341ba696
|
refs/heads/master
| 2023-05-31T22:15:03.313349
| 2018-02-27T02:07:00
| 2018-02-27T02:07:00
| 79,502,186
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,661
|
py
|
from JapaneseTokenizer import MecabWrapper
from typing import List, Tuple, Dict, Union, Any
import json
import logging
import collections
import itertools
logger = logging.getLogger()
logger.setLevel(10)
SLEEP_TIME = 2
"""形態素分割のサンプルコードを示します
Python3.5.1の環境下で動作を確認しています。
"""
__author__ = "Kensuke Mitsuzawa"
__author_email__ = "kensuke.mit@gmail.com"
__license_name__ = "MIT"
def tokenize_text(input_text:str,
tokenizer_obj:MecabWrapper,
pos_condition:List[Tuple[str,...]])->List[str]:
"""* What you can do
- 1文書に対して、形態素分割を実施する
"""
### 形態素分割;tokenize() -> 品詞フィルタリング;filter() -> List[str]に変換;convert_list_object()
return tokenizer_obj.tokenize(input_text).filter(pos_condition=pos_condition).convert_list_object()
### 原型(辞書系)に変換せず、活用された状態のまま、欲しい場合は is_surface=True のフラグを与える
#return tokenizer_obj.tokenize(input_text, is_surface=True).filter(pos_condition=pos_condition).convert_list_object()
def aggregate_words(seq_tokenized:List[List[str]])->collections.Counter:
"""* What you can do
- 形態素の集計カウントを実施する
* Params
- seq_tokenized
>>> [['スター・ウォーズ', 'エピソード4', '新たなる希望', 'スター・ウォーズ', 'エピソード4', 'なる', 'きぼう', 'STAR WARS', 'IV', 'A NEW HOPE', '1977年', 'する', 'アメリカ映画']]
"""
### 二次元リストを1次元に崩す; List[List[str]] -> List[str] ###
seq_words = itertools.chain.from_iterable(seq_tokenized)
word_frequency_obj = collections.Counter(seq_words)
return word_frequency_obj
def aggregate_words_by_label():
"""* What you can do
-
"""
pass
def main(tokenizer_obj:MecabWrapper,
seq_text_data:List[Dict[str,Any]],
pos_condition:List[Tuple[str,...]]):
"""* What you can do
- 形態素解析機の呼び出し
- 単語集計
"""
# --------------------------------------------------------------------------------------------------------------#
# 単純単語集計をする
### Python独特のリスト内包表記を利用する(リスト内包表記の方が実行速度が早い) ###
seq_tokenized_text = [
tokenize_text(input_text=wiki_text_obj['text'],tokenizer_obj=tokenizer_obj, pos_condition=pos_condition)
for wiki_text_obj in seq_text_data
]
### 単語集計を実施する ###
word_frequency_obj = aggregate_words(seq_tokenized_text)
### Counterオブジェクトはdict()関数で辞書化が可能 ###
dict(word_frequency_obj)
### 頻度順にソートするために [(word, 頻度)] の形にする
seq_word_frequency = [(word, frequency) for word, frequency in dict(word_frequency_obj).items()]
### 単語頻度順にソート ###
print('Top 100 word frequency without label')
print(sorted(seq_word_frequency, key=lambda x:x[1], reverse=True)[:100])
# --------------------------------------------------------------------------------------------------------------#
# ラベルごとに単語を集計する
### ラベル情報も保持しながら形態素分割の実行 ###
seq_tokenized_text = [
(wiki_text_obj['gold_label'], tokenize_text(input_text=wiki_text_obj['text'],tokenizer_obj=tokenizer_obj, pos_condition=pos_condition))
for wiki_text_obj in seq_text_data
]
#### ラベルごとの集約する ####
##### ラベルごとに集計するためのキーを返す匿名関数 #####
key_function= lambda x:x[0]
#### 必ず、groupbyの前にsortedを実施すること
g_object = itertools.groupby(sorted(seq_tokenized_text, key=key_function), key=key_function)
### リスト内包表記化も可能。わかりやすさのために、通常のループ表記をする ###
for label_name, element_in_label in g_object:
### element_in_label はgenerator objectで [(label, [word])]の構造を作る ###
seq_list_tokens_with_label = list(element_in_label)
seq_list_tokens = [label_tokens[1] for label_tokens in seq_list_tokens_with_label]
word_frequency_obj_label = aggregate_words(seq_list_tokens)
seq_word_frequency_label = [(word, frequency) for word, frequency in dict(word_frequency_obj_label).items()]
print('*'*30)
print('Top 100 words For label = {}'.format(label_name))
print(sorted(seq_word_frequency_label, key=lambda x:x[1], reverse=True)[:100])
if __name__ == '__main__':
### MecabWrapperを作る ###
mecab_obj = MecabWrapper(dictType='ipadic')
### 取得したい品詞だけを定義する ###
pos_condition = [('名詞', '固有名詞'), ('動詞', '自立'), ('形容詞', '自立')]
### wikipedia summaryデータを読み込み ###
print('=' * 50)
path_wikipedia_summary_json = './wikipedia_data/wikipedia-summary.json'
with open(path_wikipedia_summary_json, 'r') as f:
seq_wiki_summary_text = json.load(f)
main(tokenizer_obj=mecab_obj,
pos_condition=pos_condition,
seq_text_data=seq_wiki_summary_text)
### wikipedia fullデータを読み込み ###
print('=' * 50)
path_wikipedia_full_json = './wikipedia_data/wikipedia-full.json'
with open(path_wikipedia_full_json, 'r') as f:
seq_wiki_full_text = json.load(f)
main(tokenizer_obj=mecab_obj,
pos_condition=pos_condition,
seq_text_data=seq_wiki_full_text)
|
[
"kensuke.mit@gmail.com"
] |
kensuke.mit@gmail.com
|
691a09c696e5d06361215ef05998a05a23437589
|
6d1380a38aeb89df5db2f742ca0665f877a01133
|
/extract.py
|
294ccc36295e2534490615af52969899c62233dc
|
[] |
no_license
|
marijnkoolen/constitution-reference-parser
|
937ddbfdb56a1cba78093c7568e311ca6790f4f4
|
4083461abb4dd4cc8639625f9305b580eb69ec04
|
refs/heads/master
| 2021-01-02T09:27:17.951140
| 2015-09-29T12:47:49
| 2015-09-29T12:47:49
| 40,536,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,420
|
py
|
import re
import patterns
from document import ReferenceList, Reference
def extract_refs(document, sentence):
sentenceDone = 0
# returns a dictionary of document specific patterns
pattern = patterns.makeRefPatterns(document.RefUnits())
refList = ReferenceList(sentence, pattern)
while not sentenceDone:
# Start of a reference
matchStart = re.search(refList.pattern['refStart'], refList.sentence)
if matchStart:
extract_start_ref(matchStart, refList, document)
while pattern['refDummy'] in refList.sentence:
extract_sequence_refs(refList, document)
else:
# assumption: there is no reference in this sentence
# action: signal extraction is done
refList.FinishCurrent()
sentenceDone = 1
# check if this is a complex reference sequence
return refList
def extract_start_ref(matchStart, refList, document):
refList.sentence = re.sub(matchStart.group(0), refList.pattern['refDummy'], refList.sentence, 1)
refType, num1, rangeSymbol, num2 = matchStart.groups()
refType = refType.lower()
refNums = makeRange(num1, rangeSymbol, num2)
if refType in document.SkipUnits:
refList.sentence = re.sub(refList.pattern['refDummy'], "", refList.sentence, 1)
return 0
addToRefList(refType, refNums, refList)
refList.UpdatePrev(refType)
return 0
def extract_sequence_refs(refList, document):
refNums = []
refType = None
sep, conj, part, refType, refNums = findSequenceType(refList, document)
if refNums == []:
# assumption: if there is no next pattern, the sequence is done
# action: remove the reference dummy
refList.sentence = re.sub(refList.pattern['refDummy'], "", refList.sentence, 1)
refList.FinishCurrent()
refList.UpdatePrev('')
return 0
elif refType:
refType = refType.lower()
# if found type is too deep in hierarchy, ignore it
# e.g. we don't consider paragraphs and refList.sentences as part of the reference
if refType in document.SkipUnits:
refList.UpdatePrev(refType)
return 0
elif refType == None:
# if previous type is too deep in hierarchy, ignore it
# e.g. we don't consider paragraphs and refList.sentences as part of the reference
if refList.prevUnit in document.SkipUnits:
refNums = []
if sep:
parse_separator_ref(refType, refNums, refList, document)
elif conj:
parse_conjunction_ref(refType, refNums, refList, document)
elif part:
parse_part_of_ref(refType, refNums, refList)
if refType != None:
refList.UpdatePrev(refType)
def findSequenceType(refList, document):
mSepConjNumber = re.search(refList.pattern['refSepConjNumber'], refList.sentence)
mSepConjPartTypeNumber = re.search(refList.pattern['refSepConjPartTypeNumber'], refList.sentence)
sep = None
conj = None
part = None
refType = None
refNums = []
if mSepConjNumber:
refList.sentence = re.sub(mSepConjNumber.group(0), refList.pattern['refDummy'], refList.sentence, 1)
sep, conj, num1, rangeSymbol, num2 = mSepConjNumber.groups()
refNums = makeRange(num1, rangeSymbol, num2)
elif mSepConjPartTypeNumber:
refList.sentence = re.sub(mSepConjPartTypeNumber.group(0), refList.pattern['refDummy'], refList.sentence, 1)
sep, conj, part, refType, num1, rangeSymbol, num2 = mSepConjPartTypeNumber.groups()
refNums = makeRange(num1, rangeSymbol, num2)
return (sep, conj, part, refType, refNums)
def parse_separator_ref(refType, refNums, refList, document):
# 1. ref sep number -> new ref of same type
# assumption: type of new ref is implicit
# action: add refs similar to previous type
if refType == None:
addToRefList(None, refNums, refList)
# 2. ref sep type number -> new ref of same type
# assumption: type of new ref is explicit and of same type
elif refType == refList.prevUnit:
addToRefList(None, refNums, refList)
# 3. ref sep type number -> specification of existing ref
# assumption: hierarchical relations are written from high to low
# action: replace previous reference with hierarchical reference
elif refType in document.ContainedBy and refList.prevUnit in document.ContainedBy[refType]:
prevRef = refList.Last()
refList.RemoveLast()
for refNum in refNums:
reference = Reference()
reference.CopyFrom(prevRef)
reference.AddPart(refType, refNum)
refList.AddCurrent(reference)
# 4. ref sep type number -> new ref of different type
# assumption: previous ref was hierarchical, new ref is higher in hierarchy
# action: add refType as new reference
else:
addToRefList(refType, refNums, refList)
def parse_conjunction_ref(refType, refNums, refList, document):
# ref conj number -> ref
# assumptions:
# 1. no mention of type suggests these are
# references of the same type as the
# previous reference
if refType == None:
addToRefList(None, refNums, refList)
# ref conj type number -> ref
# previous reference has same type and higher
# level type
# assumptions:
# 2. explicit mention of type suggest this is a
# separate reference, but share higher level
# type
elif refType == refList.prevUnit:
prevRef = refList.Last()
for container in document.ContainedBy[refType]:
if container in prevRef.TargetParts:
for refNum in refNums:
reference = Reference()
reference.CopyFrom(prevRef)
reference.AddPart(refType, refNum)
refList.AddCurrent(reference)
break
# ref conj type number -> ref
# assumptions:
# 3. explicit mention of type suggests these are
# separate references
else:
addToRefList(refType, refNums, refList)
def parse_part_of_ref(refType, refNums, refList):
# ref part type number -> ref
# assumptions:
# 1. part of signals end of sequence
# 2. new type is container of all refs in sequence
for refNum in refNums:
for reference in refList.current:
reference.AddPart(refType, refNum)
refList.prevUnit = ''
refList.FinishCurrent()
# remove dummy reference
refList.sentence = re.sub(refList.pattern['refDummy'], "", refList.sentence, 1)
def addToRefList(refType, refNums, refList):
#print "DEBUG: addToRefList"
for refNum in refNums:
reference = Reference()
#print "adding reference of type {0} with number {1}".format(refType, refNum)
if refType == None:
reference.CopyFrom(refList.Last())
refType = refList.prevUnit
reference.AddPart(refType, refNum)
refList.AddCurrent(reference)
def makeRange(num1, rangeSymbol, num2):
if rangeSymbol and num2:
if int(num2) < int(num1):
return [num1]
return [unicode(num) for num in range(int(num1), int(num2)+1)]
return [num1]
|
[
"marijn.koolen@gmail.com"
] |
marijn.koolen@gmail.com
|
fd7cdd39e9a8db86129719f700f436d19b4bc19f
|
1b36425f798f484eda964b10a5ad72b37b4da916
|
/posthog/models/event/event.py
|
2e6d0625403431f36a01778187c27ed6f634ddce
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dorucioclea/posthog
|
0408baa2a7ae98e5bea352c516f741ddc17c0a3e
|
8848981baf237117fb22d28af0770a0165881423
|
refs/heads/master
| 2023-01-23T11:01:57.942146
| 2023-01-13T09:03:00
| 2023-01-13T09:03:00
| 241,222,000
| 0
| 0
|
MIT
| 2020-02-17T22:34:37
| 2020-02-17T22:34:36
| null |
UTF-8
|
Python
| false
| false
| 5,830
|
py
|
import copy
import datetime
import re
from collections import defaultdict
from typing import Dict, List, Optional, Union
from dateutil.relativedelta import relativedelta
from django.db import models
from django.utils import timezone
from posthog.models.team import Team
SELECTOR_ATTRIBUTE_REGEX = r"([a-zA-Z]*)\[(.*)=[\'|\"](.*)[\'|\"]\]"
LAST_UPDATED_TEAM_ACTION: Dict[int, datetime.datetime] = {}
TEAM_EVENT_ACTION_QUERY_CACHE: Dict[int, Dict[str, tuple]] = defaultdict(dict)
# TEAM_EVENT_ACTION_QUERY_CACHE looks like team_id -> event ex('$pageview') -> query
TEAM_ACTION_QUERY_CACHE: Dict[int, str] = {}
DEFAULT_EARLIEST_TIME_DELTA = relativedelta(weeks=1)
class SelectorPart:
direct_descendant = False
unique_order = 0
def __init__(self, tag: str, direct_descendant: bool, escape_slashes: bool):
self.direct_descendant = direct_descendant
self.data: Dict[str, Union[str, List]] = {}
self.ch_attributes: Dict[str, Union[str, List]] = {} # attributes for CH
result = re.search(SELECTOR_ATTRIBUTE_REGEX, tag)
if result and "[id=" in tag:
self.data["attr_id"] = result[3]
self.ch_attributes["attr_id"] = result[3]
tag = result[1]
if result and "[" in tag:
self.data[f"attributes__attr__{result[2]}"] = result[3]
self.ch_attributes[result[2]] = result[3]
tag = result[1]
if "nth-child(" in tag:
parts = tag.split(":nth-child(")
self.data["nth_child"] = parts[1].replace(")", "")
self.ch_attributes["nth-child"] = self.data["nth_child"]
tag = parts[0]
if "." in tag:
parts = tag.split(".")
# Strip all slashes that are not followed by another slash
self.data["attr_class__contains"] = [self._unescape_class(p) if escape_slashes else p for p in parts[1:]]
tag = parts[0]
if tag:
self.data["tag_name"] = tag
@property
def extra_query(self) -> Dict[str, List[Union[str, List[str]]]]:
where: List[Union[str, List[str]]] = []
params: List[Union[str, List[str]]] = []
for key, value in self.data.items():
if "attr__" in key:
where.append(f"(attributes ->> 'attr__{key.split('attr__')[1]}') = %s")
else:
if "__contains" in key:
where.append(f"{key.replace('__contains', '')} @> %s::varchar(200)[]")
else:
where.append(f"{key} = %s")
params.append(value)
return {"where": where, "params": params}
def _unescape_class(self, class_name):
r"""Separate all double slashes "\\" (replace them with "\") and remove all single slashes between them."""
return "\\".join([p.replace("\\", "") for p in class_name.split("\\\\")])
class Selector:
parts: List[SelectorPart] = []
def __init__(self, selector: str, escape_slashes=True):
self.parts = []
# Sometimes people manually add *, just remove them as they don't do anything
selector = selector.replace("> * > ", "").replace("> *", "").strip()
tags = list(self._split(selector))
tags.reverse()
# Detecting selector parts
for index, tag in enumerate(tags):
if tag == ">" or tag == "":
continue
direct_descendant = index > 0 and tags[index - 1] == ">"
part = SelectorPart(tag, direct_descendant, escape_slashes)
part.unique_order = len([p for p in self.parts if p.data == part.data])
self.parts.append(copy.deepcopy(part))
def _split(self, selector):
in_attribute_selector = False
in_quotes: Optional[str] = None
part: List[str] = []
for char in selector:
if char == "[" and in_quotes is None:
in_attribute_selector = True
if char == "]" and in_quotes is None:
in_attribute_selector = False
if char in "\"'":
if in_quotes is not None:
if in_quotes == char:
in_quotes = None
else:
in_quotes = char
if char == " " and not in_attribute_selector:
yield "".join(part)
part = []
else:
part.append(char)
yield "".join(part)
class Event(models.Model):
class Meta:
indexes = [
models.Index(fields=["elements_hash"]),
models.Index(fields=["timestamp", "team_id", "event"]),
# Separately managed:
# models.Index(fields=["created_at"]),
# NOTE: The below index has been added as a manual migration in
# `posthog/migrations/0024_add_event_distinct_id_index.py, but I'm
# adding this here to improve visibility.
# models.Index(fields=["distinct_id"], name="idx_distinct_id"),
]
created_at: models.DateTimeField = models.DateTimeField(auto_now_add=True, null=True, blank=True)
team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)
event: models.CharField = models.CharField(max_length=200, null=True, blank=True)
distinct_id: models.CharField = models.CharField(max_length=200)
properties: models.JSONField = models.JSONField(default=dict)
timestamp: models.DateTimeField = models.DateTimeField(default=timezone.now, blank=True)
elements_hash: models.CharField = models.CharField(max_length=200, null=True, blank=True)
site_url: models.CharField = models.CharField(max_length=200, null=True, blank=True)
# DEPRECATED: elements are stored against element groups now
elements: models.JSONField = models.JSONField(default=list, null=True, blank=True)
|
[
"noreply@github.com"
] |
dorucioclea.noreply@github.com
|
110496e18fa67c64c20bfd271e9accc1b77ca647
|
615e9d142587c965d4f593ce68cae1811824026d
|
/19-functions/javoblar-19-07.py
|
3078014c425e95b4785cee83aa845fd53d1e7442
|
[] |
no_license
|
XurshidbekDavronov/python-darslar
|
0100bb8ea61c355949e81d1d3f3b923befeb80c9
|
4fcf9a3e0c2facdedaed9b53ef806cdc0095fd9d
|
refs/heads/main
| 2023-06-21T03:33:19.509225
| 2021-07-13T13:04:56
| 2021-07-13T13:04:56
| 377,176,205
| 1
| 0
| null | 2021-06-15T13:40:33
| 2021-06-15T13:40:32
| null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
"""
16/12/2020
Dasturlash asoslari
#19-dars: FUNCTIONS (FUNKSIYALAR)
Muallif: Anvar Narzullaev
Web sahifa: https://python.sariq.dev
"""
# Foydalanuvchidan son qabul qilib, sonni 2, 3, 4 va 5 ga qoldiqsiz bo'linishini tekshiruvchi
# funksiya yozing.
# Natijalarni konsolga chiqaring ("15 soni 3 ga qoldiqsiz bo'linadi" ko'rinishida)
def bolinish_alomatlari(son):
for n in range(2,11):
if not son%n:
print(f"{son} {n} ga qoldiqsiz bo'linadi")
bolinish_alomatlari(20)
|
[
"anvarbek@gmail.com"
] |
anvarbek@gmail.com
|
804861121ec5dd38d2d654fa3b12e263b371c486
|
fa346a2d5886420e22707a7be03599e634b230a9
|
/temboo/Library/Amazon/IAM/__init__.py
|
59787a0664534645fc9a01dd8d74b838ef9e46c0
|
[] |
no_license
|
elihuvillaraus/entity-resolution
|
cebf937499ed270c3436b1dd25ab4aef687adc11
|
71dd49118a6e11b236861289dcf36436d31f06bc
|
refs/heads/master
| 2021-12-02T17:29:11.864065
| 2014-01-08T04:29:30
| 2014-01-08T04:29:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,056
|
py
|
from ChangePassword import *
from UpdateGroup import *
from CreateAccessKey import *
from ListRolePolicies import *
from UpdateLoginProfile import *
from GetUserPolicy import *
from UpdateServerCertificate import *
from DeleteServerCertificate import *
from DeactivateMFADevice import *
from UpdateAccountPasswordPolicy import *
from ListAccessKeys import *
from DeleteAccessKey import *
from GetUser import *
from DeleteInstanceProfile import *
from RemoveUserFromGroup import *
from DeleteGroup import *
from GetAccountPasswordPolicy import *
from CreateUser import *
from ListInstanceProfilesForRole import *
from ListGroups import *
from ResyncMFADevice import *
from GetAccountSummary import *
from ListMFADevices import *
from CreateGroup import *
from DeleteGroupPolicy import *
from CreateLoginProfile import *
from GetLoginProfile import *
from DeleteRolePolicy import *
from GetRole import *
from GetGroupPolicy import *
from ListUsers import *
from EnableMFADevice import *
from ListVirtualMFADevices import *
from DeleteRole import *
from UpdateAccessKey import *
from ListUserPolicies import *
from UploadSigningCertificate import *
from RemoveRoleFromInstanceProfile import *
from AddUserToGroup import *
from ListServerCertificates import *
from GetServerCertificate import *
from ListInstanceProfiles import *
from CreateInstanceProfile import *
from ListSigningCertificates import *
from AddRoleToInstanceProfile import *
from CreateAccountAlias import *
from ListGroupPolicies import *
from ListRoles import *
from ListGroupsForUser import *
from UpdateSigningCertificate import *
from DeleteAccountAlias import *
from ListAccountAliases import *
from DeleteUser import *
from DeleteAccountPasswordPolicy import *
from DeleteLoginProfile import *
from UploadServerCertificate import *
from GetInstanceProfile import *
from UpdateUser import *
from DeleteUserPolicy import *
from DeleteSigningCertificate import *
from GetRolePolicy import *
from GetGroup import *
from DeleteVirtualMFADevice import *
from CreateVirtualMFADevice import *
|
[
"cedric.warny@gmail.com"
] |
cedric.warny@gmail.com
|
22254545f9a1cc0c5bd2eb4c3f056ed34bc7a22d
|
bcddca991afe606180dbb5ce6c033d8fb611154c
|
/docs/idf_extensions/include_build_file.py
|
b11a2128667b50bd2c713b8038e7b3dbc90675fd
|
[
"Apache-2.0"
] |
permissive
|
EmbeddedSystemClass/esp-idf
|
8ac5a312be41936b1e2dc5c68b7b68c9b4c1e488
|
92db6a3dabc1106b72865b8bd91d9bdd54fbdf6c
|
refs/heads/master
| 2022-12-31T19:57:49.052365
| 2020-10-22T19:19:01
| 2020-10-22T19:19:01
| 259,859,439
| 0
| 0
|
Apache-2.0
| 2020-04-29T07:47:48
| 2020-04-29T07:47:47
| null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
import os.path
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.misc import Include as BaseInclude
from sphinx.util.docutils import SphinxDirective
class IncludeBuildFile(BaseInclude, SphinxDirective):
"""
Like the standard "Include" directive, but relative to the app
build directory
"""
def run(self):
abspath = os.path.join(self.env.config.build_dir, self.arguments[0])
self.arguments[0] = abspath
self.env.note_included(abspath)
return super(IncludeBuildFile, self).run()
def setup(app):
directives.register_directive('include-build-file', IncludeBuildFile)
return {'parallel_read_safe': True, 'parallel_write_safe': True, 'version': '0.1'}
|
[
"koson.trachu@gmail.com"
] |
koson.trachu@gmail.com
|
7df9dcc7b35ce702c5fdf33e237c3bb866b1708a
|
afbaa5685bf737ec7d16fee2bab54ae13caf96f9
|
/geekbang/core/ch17/Demo1.py
|
98dd62e83056057241e556d48e785f0e1f247874
|
[] |
no_license
|
ykdsg/myPython
|
9dcc9afe6f595e51b72257875d66ada1ba04bba6
|
77d2eaa2acb172664b632cc2720cef62dff8f235
|
refs/heads/master
| 2023-06-10T20:11:08.061075
| 2023-06-03T11:39:53
| 2023-06-03T11:39:53
| 10,655,956
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
def func(message):
print('got a message:{}'.format(message))
# 函数赋予变量
send_message = func
send_message('hello world')
def get_message(message):
return 'got a message:' + message
def root_call(func, message):
print(func(message))
|
[
"17173as@163.com"
] |
17173as@163.com
|
f205af874bfd19c543b990383520db2dc51ce796
|
297c30dc0120c2920c86c8257bc530db1bb1114a
|
/Application/Application_Pandas/panda_DataFrame_Test_1.py
|
e7ad323507d330adb254dc3f79e9571b82741412
|
[] |
no_license
|
whoiszyc/Repo_python
|
76e248b350a3f109c53bfb1f3abe59b903a98e46
|
bdc3f39883aed5b2e85624525c662c00f60d35e3
|
refs/heads/master
| 2021-07-06T04:48:04.973680
| 2020-07-27T03:55:58
| 2020-07-27T03:55:58
| 139,599,645
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
import numpy as np
import pandas as pd
# use string as key
mydict0 = [{'a': 10, 'b': 20, 'c': 30, 'd': 40},{'a': 100, 'b': 200, 'c': 300, 'd': 400},{'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
# use int number as key
mydict1 = [{0: 10, 1: 20, 2: 30, 3: 40},{0: 100, 1: 200, 2: 300, 3: 400},{0: 1000, 1: 2000, 2: 3000, 3: 4000 }]
# test the first data frame
df=pd.DataFrame(mydict0)
print(df)
# general information of the data frame
print('Total number of data entries in the data frame is {}'.format(df.size))
print('Dimension of data entries in the data frame is {} by {}'.format(df.shape[0], df.shape[1]))
# get entry by location
print('Second column of the data frame')
print(df.iloc[:,1])
print('Second to third column of the data frame')
print(df.iloc[:,1:2])
print('Second to third row of the data frame')
print(df.iloc[1:2,:])
# get entry by key
print('The column that key equals to "a" is:')
print(df['a'])
# save data frame to csv
df.to_csv('test_1.csv')
df.to_excel('test_1.xls')
# test the second data frame
# get entry by key
df=pd.DataFrame(mydict1)
print(df)
print('The column that key equals to 0 is:')
print(df[0])
# save data frame to csv
df.to_csv('test_2.csv', encoding='utf-8')
df.to_excel('test_2.xls')
|
[
"31248391+whoiszyc@users.noreply.github.com"
] |
31248391+whoiszyc@users.noreply.github.com
|
476e8f2d422cf9b9348b2be998dbf5b010ef7f87
|
620b58e17d4851e43bd1270cabc8c26f43629a7b
|
/lib/candy_editor/AssetEditor/EngineAsset/ShaderAsset.py
|
fd3756f724e956b29e72c34bc709b54269fc049f
|
[
"MIT"
] |
permissive
|
lihaochen910/Candy
|
78b9862cf06748b365b6fb35ac23f0e7a00ab558
|
d12cb964768459c22f30c22531d3e1734901e814
|
refs/heads/master
| 2022-11-25T19:12:34.533828
| 2021-11-07T16:11:07
| 2021-11-07T16:11:07
| 141,284,960
| 1
| 1
|
NOASSERTION
| 2022-11-22T09:20:08
| 2018-07-17T12:12:02
|
Lua
|
UTF-8
|
Python
| false
| false
| 2,551
|
py
|
import os.path
import logging
import subprocess
import shutil
import json
from candy_editor.core import *
from candy_editor.moai.MOAIRuntime import _CANDY
##----------------------------------------------------------------##
class ShaderAssetManager ( AssetManager ):
def getName ( self ):
return 'asset_manager.shader'
def getMetaType ( self ):
return 'script'
def acceptAssetFile ( self, filePath ):
if not os.path.isfile ( filePath ): return False
name, ext = os.path.splitext ( filePath )
if not ext in [ '.shader' ]: return False
return True
def importAsset ( self, node, reload = False ):
node.assetType = 'shader'
node.setObjectFile ( 'def', node.getFilePath () )
return True
# def onRegister ( self ):
#check builtin shaders
# def editAsset (self, node):
# editor = app.getModule ( 'framebuffer_editor' )
# if not editor:
# return alertMessage ( 'Editor not load', 'shader Editor not found!' )
# editor.openAsset ( node )
##----------------------------------------------------------------##
class ShaderAssetCreator ( AssetCreator ):
def getAssetType ( self ):
return 'shader'
def getLabel ( self ):
return 'Shader'
def createAsset ( self, name, contextNode, assetType ):
ext = '.shader'
filename = name + ext
if contextNode.isType ( 'folder' ):
nodepath = contextNode.getChildPath ( filename )
else:
nodepath = contextNode.getSiblingPath ( filename )
fullpath = AssetLibrary.get ().getAbsPath ( nodepath )
_CANDY.createEmptySerialization ( fullpath, 'candy.Shader' )
return nodepath
class ShaderScriptAssetManager ( AssetManager ):
def getName ( self ):
return 'asset_manager.shader_script'
def getMetaType ( self ):
return 'script'
def acceptAssetFile ( self, filePath ):
if not os.path.isfile ( filePath ): return False
name, ext = os.path.splitext ( filePath )
if not ext in [ '.vsh', '.fsh' ]: return False
return True
def importAsset ( self, node, reload = False ):
name, ext = os.path.splitext ( node.getFilePath () )
if ext == '.vsh':
node.assetType = 'vsh'
elif ext == '.fsh':
node.assetType = 'fsh'
node.setObjectFile ( 'src', node.getFilePath () )
return True
##----------------------------------------------------------------##
ShaderAssetManager ().register ()
ShaderAssetCreator ().register ()
ShaderScriptAssetManager ().register ()
AssetLibrary.get ().setAssetIcon ( 'shader', 'shader' )
AssetLibrary.get ().setAssetIcon ( 'vsh', 'text-red' )
AssetLibrary.get ().setAssetIcon ( 'fsh', 'text-yellow' )
|
[
"lihaochen910@hotmail.com"
] |
lihaochen910@hotmail.com
|
9840040315f9fdf4d3c22de338e2ace8d80de7a0
|
fad702beb35d587278010e570a923bc84a4dda4a
|
/code/pyorg/scripts/tests/uni_2nd_speedup.py
|
13519cc3f8dc80adcdd125cde94a260a0bee67ba
|
[
"Apache-2.0"
] |
permissive
|
anmartinezs/pyseg_system
|
f7769ec3dcaf243895ec1cf13ac6e1da1ab2a92a
|
1370bfedae2ad5e6cdd1dc08395eb9e95b4a8596
|
refs/heads/master
| 2023-02-23T06:23:10.087737
| 2023-01-30T13:24:36
| 2023-01-30T13:24:36
| 227,147,753
| 15
| 4
|
NOASSERTION
| 2023-02-10T17:18:20
| 2019-12-10T14:58:22
|
C
|
UTF-8
|
Python
| false
| false
| 7,837
|
py
|
"""
Measures the speed-up for computing univarite 2nd oder models and simulate CSRV instances
"""
################# Package import
import os
import sys
import math
import time
import numpy as np
import multiprocessing as mp
from scipy.optimize import curve_fit
from pyorg.surf.model import ModelCSRV, gen_tlist
from pyorg.surf.utils import disperse_io
from matplotlib import pyplot as plt, rcParams
plt.switch_backend('agg')
###### Global variables
__author__ = 'Antonio Martinez-Sanchez'
########################################################################################
# PARAMETERS
########################################################################################
try:
root_path = sys.argv[1]
except IndexError:
root_path = os.path.split(os.path.abspath(__file__))[0] + '/../../../tests'
out_dir = root_path + '/results'
# Synthetic data generation variables
sdat_surf = root_path + '/../pyorg/surf/test/in/sph_rad_5_surf.vtp'
sdat_tomo_shape = (500, 500, 100)
sdat_n_tomos = 5
sdat_n_sims = None # 20
sdat_n_part_tomo = 600 # 200
# Analysis variables
ana_npr_rg = [1, 2, 4, 8, 16, 24, 32, 36] # [1, 2, 4, 16] # It must start with 1
ana_rad_rg = np.arange(4, 250, 1) # np.arange(4, 180, 3)
ana_shell_thick = None
ana_fmm = False # True
# Plotting settings
rcParams['axes.labelsize'] = 14
rcParams['xtick.labelsize'] = 14
rcParams['ytick.labelsize'] = 14
rcParams['patch.linewidth'] = 2
########################################################################################
# HELPING FUNCTIONS
########################################################################################
def gen_rect_voi_array(shape):
"""
Generates a rectangular array VOI
:param shape: 3-tuple with the length of the three rectangle sides
:return: a binary ndarray object
"""
seg = np.zeros(shape=np.asarray(shape) + 1, dtype=bool)
seg[1:shape[0], 1:shape[1], 1:shape[2]] = True
return seg
def amdahls(x, p):
"""
Computes Amdal's Law speed-up
:param x: is the speedup of the part of the task that benefits from improved system resources
:param p: is the proportion of execution time that the part benefiting from improved resources originally occupied
:return: the computed speed-up
"""
return 1. / (1. - p + p/x)
########################################################################################
# MAIN ROUTINE
########################################################################################
########## Print initial message
print('Test for measuring univariate 2nd order and simulations computation speed-up.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('\tSynthetic data generations settings: ')
print('\t\t-Particle surface path: ' + str(sdat_surf))
print('\t\t-Tomogram shape: ' + str(sdat_tomo_shape))
print('\t\t-Number of tomograms: ' + str(sdat_n_tomos))
if sdat_n_sims is None:
print('\t\t-Number of simulations per tomogram are set to the number of processess.')
else:
print('\t\t-Number of simulations per tomogram: ' + str(sdat_n_sims))
print('\t\t-Number of particles per tomogram: ' + str(sdat_n_part_tomo))
print('\tAnalysis settings: ')
print('\t\t-Number of parallel processes to check: ' + str(ana_npr_rg))
print('\t\t-Scale samplings array: ' + str(ana_rad_rg))
if ana_shell_thick is None:
print('\t\t-Functions L is computed.')
else:
print('\t\t-Function O is computed with shell thickness: ' + str(ana_shell_thick))
if ana_fmm:
print('\t\t-Geodesic metric.')
else:
print('\t\t-Euclidean metric.')
print('')
######### Main process
print('Main Routine: ')
print('\t-Initialization...')
voi = gen_rect_voi_array(sdat_tomo_shape)
part = disperse_io.load_poly(sdat_surf)
model_csrv = ModelCSRV()
ltomos_csrv = gen_tlist(sdat_n_tomos, sdat_n_part_tomo, model_csrv, voi, sdat_surf, mode_emb='center',
npr=max(ana_rad_rg))
cu_i = 1. / float(sdat_n_tomos * sdat_n_part_tomo)
cpus = mp.cpu_count()
print('\t\t+CPUs found: ' + str(cpus))
# Loop for the of processors
print('\t-Measurements loops: ')
comp_times = np.zeros(shape=len(ana_npr_rg), dtype=np.float32)
sim_times = np.zeros(shape=len(ana_npr_rg), dtype=np.float32)
for i, npr in enumerate(ana_npr_rg):
print('\t\t+Number of processes: ' + str(npr))
# Computations loop
comp_time, sim_time = 0, 0
for tkey in ltomos_csrv.get_tomo_fname_list():
hold_time = time.time()
hold_tomo = ltomos_csrv.get_tomo_by_key(tkey)
hold_tomo.compute_uni_2nd_order(ana_rad_rg, thick=None, border=True, conv_iter=None, max_iter=None, fmm=ana_fmm,
npr=npr)
comp_time += (time.time() - hold_time)
if sdat_n_sims is None:
hold_n_sims = npr
else:
hold_n_sims = sdat_n_sims
cu_sim_i = 1. / float(sdat_n_tomos * sdat_n_part_tomo * hold_n_sims)
hold_time = time.time()
hold_sim = hold_tomo.simulate_uni_2nd_order(hold_n_sims, model_csrv, part, 'center', ana_rad_rg, thick=None,
border=True, conv_iter=None, max_iter=None, fmm=ana_fmm,
npr=npr)
sim_time += (time.time() - hold_time)
comp_times[i], sim_times[i] = comp_time * cu_i, sim_time * cu_sim_i
print('\t\t\t*Computation time per c.u.: ' + str(comp_times[i]) + ' [secs]')
print('\t\t\t*Computation time per c.u. and null-model simulations time: ' + str(sim_times[i]) + ' [secs]')
print('\tPlotting: ')
# plt.figure()
# plt.xlabel('# processes')
# plt.ylabel('Time/c.u. [s]')
# plt.plot(ana_npr_rg, comp_times, linewidth=2.0, linestyle='-', color='b', label='C')
# plt.plot(ana_npr_rg, sim_times, linewidth=2.0, linestyle='-', color='g', label='C+S')
# plt.tight_layout()
# plt.legend(loc=0)
# if out_dir is not None:
# out_fig_times = out_dir + '/times.png'
# print '\t\t-Storing the time figure in: ' + out_fig_times
# plt.savefig(out_fig_times)
# else:
# plt.show(block=True)
# plt.close()
# Speed up fitting:
processes = np.asarray(ana_npr_rg, dtype=float)
processes_ex = np.logspace(0, np.log2(cpus), num=50, base=2)
sup_comp = comp_times[0] / comp_times
sup_sim = sim_times[0] / sim_times
popt_comp, pcov_comp = curve_fit(amdahls, processes, sup_comp)
popt_sim, pcov_sim = curve_fit(amdahls, processes, sup_sim)
sup_comp_f = amdahls(processes_ex, popt_comp)
sup_sim_f = amdahls(processes_ex, popt_sim)
fig, ax1 = plt.subplots()
ax1.set_xlabel('# processes')
ax1.set_ylabel('Time/c.u. [s]')
# ax1.set_xlim((1, processes_ex.max()))
ax1.plot(ana_npr_rg, comp_times, linewidth=2.0, linestyle='--', color='b', label='C Time')
ax1.plot(ana_npr_rg, sim_times, linewidth=2.0, linestyle='--', color='g', label='C&S Time')
ax2 = ax1.twinx()
ax2.set_ylabel('Speedup')
# plt.plot(processes_ex, processes_ex, linewidth=1.0, linestyle='--', color='k', label='IDEAL')
# plt.plot((16, 16), (0, 16), linewidth=1.0, linestyle='-.', color='k')
# plt.plot((36, 36), (0, 36), linewidth=1.0, linestyle='-.', color='k')
ax2.plot(processes, sup_comp, linewidth=4.0, linestyle='-', marker='*', color='b', label='C Speedup')
# ax2.plot(processes_ex, sup_comp_f, linewidth=2.0, linestyle='-', color='b', label='C Speedup')
ax2.plot(processes, sup_sim, linewidth=4.0, linestyle='-', marker='s', color='g', label='C&S Speedup')
# ax2.plot(processes_ex, sup_sim_f, linewidth=2.0, linestyle='-', color='g', label='C&S Speedup')
# ax2.set_ylim((1, processes_ex.max()))
fig.tight_layout()
# fig.legend(loc=9)
if out_dir is not None:
out_fig_speed = out_dir + '/speed_up_time.png'
print('\t\t-Storing the time figure in: ' + out_fig_speed)
plt.savefig(out_fig_speed)
else:
plt.show(block=True)
plt.close()
print('Terminated. (' + time.strftime("%c") + ')')
|
[
"an.martinez.s.sw@gmail.com"
] |
an.martinez.s.sw@gmail.com
|
7a28f24d0a6faf49ea00304d8ca51cfb2d5b84ef
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/6_tree/经典题/后序dfs统计信息/换根dp/hard/abc-233-G - Vertex Deletion-每个点是否在树的最大匹配中.py
|
284d716c8e41ab42dbe6165859649c030080a298
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
# abc-233-G - Vertex Deletion-每个点是否在树的最大匹配中
# https://atcoder.jp/contests/abc223/tasks/abc223_g
# 给定一棵树
# 对每个结点i为根,删除根连接的所有边后,
# !使得剩下的树的最大匹配和原树最大匹配相等
# 求这样的根的个数
# !解:即不参与二分图的最大匹配
# https://yukicoder.me/problems/2085
# 二分图博弈
# Alice和Bob在树上博弈
# 先手放一个棋子,后手在相邻的结点放一个棋子
# 交替放棋子,直到不能放棋子的时候,输
# !问先手是否必胜 => 如果起点不在二分图的最大匹配中,先手必胜
from Rerooting import Rerooting
if __name__ == "__main__":
E = int # 当前节点是否构成子树的最大匹配, 0: 不参与, 1: 参与
def e(root: int) -> E:
return 0
def op(childRes1: E, childRes2: E) -> E:
return childRes1 | childRes2
def composition(fromRes: E, parent: int, cur: int, direction: int) -> E:
"""direction: 0: cur -> parent, 1: parent -> cur"""
return fromRes ^ 1 # 孩子参与匹配则父亲不参与, 反之成立
n = int(input())
edges = []
for _ in range(n - 1):
u, v = map(int, input().split())
edges.append((u - 1, v - 1))
R = Rerooting(n)
for u, v in edges:
R.addEdge(u, v)
dp = R.rerooting(e=e, op=op, composition=composition, root=0)
print(dp.count(0)) # 不在最大匹配中的点的个数
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.