blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
66cd4531f9739fd1f61386fe7b7fddbd5984c01d
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/examples/v1/service-level-objective-corrections/ListSLOCorrection_2647266873.py
|
cf0d6f2f87fd6aab757bf5bbacf2f99c3ebb8689
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 514
|
py
|
"""
Get all SLO corrections returns "OK" response with pagination
"""
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.service_level_objective_corrections_api import ServiceLevelObjectiveCorrectionsApi
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = ServiceLevelObjectiveCorrectionsApi(api_client)
items = api_instance.list_slo_correction_with_pagination(
limit=2,
)
for item in items:
print(item)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
87a9327b9cb6b76cb848b894aa4ab84fa356902f
|
60ccf143ae59bd2aeb6b831499ba0d4045025588
|
/Exercicios/Ex081.py
|
0f9ccdfe4f7c1d4a879cb65c8905eca4d045c0af
|
[
"MIT"
] |
permissive
|
RenanRibeiroDaSilva/Meu-Aprendizado-Python
|
3283fa644214149d41777d6b23f6e98804bf30de
|
280bf2ad132ae0d26255e70b894fa7dbb69a5d01
|
refs/heads/main
| 2023-07-07T22:59:11.725000
| 2021-08-11T16:47:32
| 2021-08-11T16:47:32
| 369,657,470
| 2
| 0
|
MIT
| 2021-06-01T17:51:28
| 2021-05-21T21:25:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
""" Ex - 081 - Crie um programa que vai ler vários números e colocar em uma lista.
Depois disso, mostre:
A) Quantos números foram digitados.
B) A lista de valores, ordenada de forma decrescente.
C) Se o valor 5 foi digitado e está ou não na lista."""
# Como eu fiz
# Lista:
lista_num = list()
c = 0
# Loop:
while True:
num = int(input('Digite um valor: '))
lista_num.append(num)
c += 1
res = str(input('Quer continuar? [S/N] ')).strip()[0]
if res in 'Nn':
break
print('~-' * 25)
print(f'Foram digitados {c} valores!')
lista_num.sort(reverse=True)
print(f'Os valores digitados foram {lista_num} em ordem drecesente!')
if 5 in lista_num:
print(f'O número 5 foi digitado na lista!')
else:
print('O número 5 não foi digitado!')
# Como o Guanabara fez
valores = []
while True:
valores.append(int(input('Digite um valor: ')))
resp = str(input('Quer continuar? [S/N] '))
if resp in 'Nn':
break
print('-=' * 30)
print(f'Você digitou {len(valores)} elementos.')
valores.sort(reverse=True)
print(f'Os valores em ordem decrescente são {valores}')
if 5 in valores:
print('O valor 5 faz parte da lista')
else:
print('O valor 5 não foi encontrado na lista!')
|
[
"84098891+RenanRibeiroDaSilva@users.noreply.github.com"
] |
84098891+RenanRibeiroDaSilva@users.noreply.github.com
|
94d1e06020318b09a89c9d4c41acb0483c13bd08
|
e5897d5b5eb3b018bec8703f01cfc666acea5b38
|
/isy994/items/scenes/scene_container.py
|
ae59b821b3b55b875757c06f7a97ad6bf95a8438
|
[
"MIT"
] |
permissive
|
mjcumming/ISY994v5
|
5de41ce7e12be44c35dc0818daf639bb8c0e5487
|
928d8359fd15363e15b8daa402fbb1f5f53f3c45
|
refs/heads/master
| 2022-05-19T06:10:59.788621
| 2022-05-08T13:16:29
| 2022-05-08T13:16:29
| 187,289,265
| 4
| 10
|
MIT
| 2021-06-26T13:34:23
| 2019-05-17T22:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
#! /usr/bin/env python
import xml.etree.ElementTree as ET
import traceback
from ..item_container import Item_Container
from .scene_info import Scene_Info
from .scene_insteon import Scene_Insteon
import logging
logger = logging.getLogger(__name__)
scene_classes = {
"6": Scene_Insteon,
}
class Scene_Container(Item_Container):
def __init__(self, controller):
Item_Container.__init__(self, controller, "Scene")
def start(self):
success, response = self.controller.send_request("nodes/scenes")
if success:
try:
root = ET.fromstring(response)
self.process_scene_nodes(root)
self.items_retrieved = True
return True
except Exception as ex:
logger.error("container manager Error {}".format(ex))
traceback.print_exc()
else:
return False
def process_scene_nodes(self, root):
for scene in root.iter("group"):
self.process_scene_node(scene)
def process_scene_node(self, node):
if "nodeDefId" in node.attrib:
scene_info = Scene_Info(node)
if scene_info.valid: # make sure we have the info we need
# print('process scene',scene_info)
if scene_info.family in scene_classes:
scene_class = scene_classes[scene_info.family]
scene = scene_class(self, scene_info)
scene.update_onoff()
self.add(scene, scene.address)
else:
logger.warn("Invalid scene info {}".format(scene_info))
else:
logger.warn("Invalid scene info, nodeDefId {}".format(node))
def device_event(
self, device
): # notification from controller about a device event, used to "track" scene state
for address, scene in self.list.items():
scene.device_event(device)
def get_device(self, address):
return self.controller.device_container.get(address)
|
[
"mike@4831.com"
] |
mike@4831.com
|
a7b66fcea4a6778e70e3557ce2b745bc6c6c7e1a
|
4e2799eb806d66716283aa10be2682ea811a790c
|
/apps/exports/tests/test_scheduling.py
|
9c4488d7569f802dad1a5c4dde3536f4206bba7e
|
[] |
no_license
|
jhonandre/commcare-sync
|
37851a1e1127ee1691ab42fbccdc301c96c4e12e
|
28f07691bc26bb5d7a292f5201fe44fab739a1d5
|
refs/heads/master
| 2023-08-15T02:36:27.323577
| 2021-09-23T11:33:46
| 2021-09-23T11:33:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
from datetime import timedelta
from apps.exports.models import ExportRun
from apps.exports.tests.test_utils import BaseExportTestCase
from django.utils import timezone
class TestSchedule(BaseExportTestCase):
def test_export_is_scheduled_to_run(self):
# A config with no export runs should be scheduled
self.assertTrue(self.export_config.is_scheduled_to_run())
# A config that has an export_run in the QUEUED state should be seen as "scheduled"
export_run = ExportRun.objects.create(
base_export_config=self.export_config,
)
self.addCleanup(export_run.delete)
self.assertTrue(self.export_config.is_scheduled_to_run())
# A completed export that is failed shouldn't be rescheduled
export_run.status = ExportRun.FAILED
export_run.completed_at = timezone.now() - timedelta(minutes=5)
export_run.save()
self.assertFalse(self.export_config.is_scheduled_to_run())
# Once time_between_runs delay has passed, the export should be scheduled to run again
self.export_config.time_between_runs = 10
export_run.completed_at = timezone.now() - timedelta(minutes=15)
export_run.save()
self.assertTrue(self.export_config.is_scheduled_to_run())
def test_should_spawn_task(self):
ExportRun.objects.create(
base_export_config=self.export_config,
)
self.assertFalse(self.export_config.should_create_export_run())
|
[
"proteusvacuum@gmail.com"
] |
proteusvacuum@gmail.com
|
509b63483a3b8e451b0686b900f5b462f0f554f1
|
5db0a48428381223d2327b8ce17c5ba95f9fecf0
|
/college_football_risk/models/territory.py
|
9ea033d2d3ea1e27e034aa554ada9cb386f05e65
|
[] |
no_license
|
tuttlepower/college-football-risk-python
|
7349215c7f1e1c8512b74526193021b0af49bcfc
|
3014130991dc27eb69469a4ee2dac88b3f7ea498
|
refs/heads/master
| 2021-04-15T03:08:34.640525
| 2020-03-21T18:10:29
| 2020-03-21T18:10:29
| 249,290,397
| 0
| 0
| null | 2020-03-22T23:13:52
| 2020-03-22T23:13:52
| null |
UTF-8
|
Python
| false
| false
| 5,754
|
py
|
# coding: utf-8
"""
College Football Risk API
Companion API for College Football Risk # noqa: E501
The version of the OpenAPI document: 1.3.0
Contact: admin@collegefootballdata.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from college_football_risk.configuration import Configuration
class Territory(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'name': 'str',
'short_name': 'str',
'owner': 'str',
'neighbors': 'list[TerritoryNeighbors]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'short_name': 'shortName',
'owner': 'owner',
'neighbors': 'neighbors'
}
def __init__(self, id=None, name=None, short_name=None, owner=None, neighbors=None, local_vars_configuration=None): # noqa: E501
"""Territory - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._short_name = None
self._owner = None
self._neighbors = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if short_name is not None:
self.short_name = short_name
if owner is not None:
self.owner = owner
if neighbors is not None:
self.neighbors = neighbors
@property
def id(self):
"""Gets the id of this Territory. # noqa: E501
:return: The id of this Territory. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Territory.
:param id: The id of this Territory. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this Territory. # noqa: E501
:return: The name of this Territory. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Territory.
:param name: The name of this Territory. # noqa: E501
:type: str
"""
self._name = name
@property
def short_name(self):
"""Gets the short_name of this Territory. # noqa: E501
:return: The short_name of this Territory. # noqa: E501
:rtype: str
"""
return self._short_name
@short_name.setter
def short_name(self, short_name):
"""Sets the short_name of this Territory.
:param short_name: The short_name of this Territory. # noqa: E501
:type: str
"""
self._short_name = short_name
@property
def owner(self):
"""Gets the owner of this Territory. # noqa: E501
:return: The owner of this Territory. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this Territory.
:param owner: The owner of this Territory. # noqa: E501
:type: str
"""
self._owner = owner
@property
def neighbors(self):
"""Gets the neighbors of this Territory. # noqa: E501
:return: The neighbors of this Territory. # noqa: E501
:rtype: list[TerritoryNeighbors]
"""
return self._neighbors
@neighbors.setter
def neighbors(self, neighbors):
"""Sets the neighbors of this Territory.
:param neighbors: The neighbors of this Territory. # noqa: E501
:type: list[TerritoryNeighbors]
"""
self._neighbors = neighbors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Territory):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Territory):
return True
return self.to_dict() != other.to_dict()
|
[
"radjewwj@gmail.com"
] |
radjewwj@gmail.com
|
3339fa9fa7c973a4244174dc6ce138593c73b2f8
|
ccc4b6341676319c43a482d6322729d9172e8266
|
/extra_annos/migrations/0001_initial.py
|
db0fc197abf80c29e82f33b3c8e54e26d2ff3a5e
|
[
"MIT"
] |
permissive
|
Sumerian-Health/varfish-server
|
87278fcbd3c4289e63b6cbd8140d8a454fa94853
|
152b23fa93c2ea685f51622e94bc8790479c2336
|
refs/heads/master
| 2023-06-12T14:17:41.065266
| 2021-07-08T11:17:31
| 2021-07-08T11:28:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,760
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-07-13 14:26
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="ExtraAnno",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("release", models.CharField(max_length=32)),
("chromosome", models.CharField(max_length=32)),
("start", models.IntegerField()),
("end", models.IntegerField()),
("bin", models.IntegerField()),
("reference", models.CharField(max_length=512)),
("alternative", models.CharField(max_length=512)),
("anno_data", django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
),
migrations.CreateModel(
name="ExtraAnnoField",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("field", models.IntegerField()),
("label", models.CharField(max_length=128)),
],
),
migrations.AlterUniqueTogether(
name="extraanno",
unique_together=set([("release", "chromosome", "start", "reference", "alternative")]),
),
]
|
[
"manuel.holtgrewe@bihealth.de"
] |
manuel.holtgrewe@bihealth.de
|
7ce785ce5d5c5071581a2db86c31061bb9582cc0
|
18ea9b6e176be04f5d854dce1b75a9780d5052a7
|
/dataduct/steps/upsert.py
|
b24f8a9c4280451dc687908b2316ba642fc72c4d
|
[
"Apache-2.0"
] |
permissive
|
sungjuly/dataduct
|
fd89fbb82ae4cc87aa9651cdc8cd13c2c87c5212
|
3700d08a616820e5fecf22a6cf8aabac85a88cba
|
refs/heads/develop
| 2021-04-15T07:50:16.998950
| 2015-02-26T22:33:45
| 2015-02-26T22:33:45
| 30,907,001
| 0
| 0
| null | 2015-02-17T07:32:37
| 2015-02-17T07:32:37
| null |
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
"""ETL step wrapper for Upsert SQL script
"""
from .etl_step import ETLStep
from ..pipeline import SqlActivity
from ..database import Table
from ..database import SqlScript
from ..database import SelectStatement
from ..database import HistoryTable
from ..s3 import S3File
from ..utils.helpers import parse_path
from ..utils.helpers import exactly_one
class UpsertStep(ETLStep):
"""Upsert Step class that helps run a step on the emr cluster
"""
def __init__(self, destination, redshift_database, sql=None,
script=None, source=None, enforce_primary_key=True,
delete_existing=False, history=None, **kwargs):
"""Constructor for the UpsertStep class
Args:
**kwargs(optional): Keyword arguments directly passed to base class
"""
assert exactly_one(sql, source, script), 'One of sql/source/script'
super(UpsertStep, self).__init__(**kwargs)
# Input formatting
dest = Table(SqlScript(filename=parse_path(destination)))
if source is not None:
source_relation = Table(SqlScript(filename=parse_path(source)))
else:
source_relation = SelectStatement(
SqlScript(sql=sql, filename=script).sql())
# Create the destination table if doesn't exist
script = dest.exists_clone_script()
script.append(dest.upsert_script(
source_relation, enforce_primary_key, delete_existing))
if history:
hist = HistoryTable(SqlScript(
filename=parse_path(history)))
script.append(hist.update_history_script(dest))
self.activity = self.create_pipeline_object(
object_class=SqlActivity,
resource=self.resource,
schedule=self.schedule,
depends_on=self.depends_on,
database=redshift_database,
max_retries=self.max_retries,
script=self.create_script(S3File(text=script.sql())))
@classmethod
def arguments_processor(cls, etl, input_args):
"""Parse the step arguments according to the ETL pipeline
Args:
etl(ETLPipeline): Pipeline object containing resources and steps
step_args(dict): Dictionary of the step arguments for the class
"""
step_args = cls.base_arguments_processor(etl, input_args)
cls.pop_inputs(step_args)
step_args['resource'] = etl.ec2_resource
step_args['redshift_database'] = etl.redshift_database
return step_args
|
[
"sb2nov@gmail.com"
] |
sb2nov@gmail.com
|
0d9abe4cfde90729fa8def7c6aeeca70aa7f3509
|
16cc8f796eac98e9a475da11e4bc0aa26317e894
|
/panasonic3-14/a.py
|
8305feb550b7ca853ac551b0a9dcfc8c01baae23
|
[] |
no_license
|
amaguri0408/AtCoder-python
|
2f3fcdd82c52f5ddee88627fb99466c9e003164f
|
ab8ec04b8e434939e9f7035f3a280b30c0682427
|
refs/heads/master
| 2022-10-30T00:07:03.560011
| 2020-06-13T10:41:36
| 2020-06-13T10:41:36
| 271,954,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
lst = [1, 1, 1, 2, 1, 2, 1, 5, 2, 2, 1, 5, 1, 2, 1, 14, 1, 5, 1, 5, 2, 2, 1, 15, 2, 2, 5, 4, 1, 4, 1, 51]
k = int(input())
print(lst[k-1])
|
[
"noreply@github.com"
] |
amaguri0408.noreply@github.com
|
95cc420ac962966131d93517456112712e8d5895
|
c7603730fe2e0615cb8af85360f4270c6e519dcd
|
/eu-structural-funds/common/processors/MT/mt_malta_scraper.py
|
08fdc146452c6d2de38c6d173f333a4873a0187f
|
[
"MIT"
] |
permissive
|
transpresupuestaria/os-data-importers
|
b58266d03274901bf6104dc10ab725fa97a22d18
|
929e07aefc98ae4788e75c682d4c3adc014bf6ce
|
refs/heads/master
| 2022-07-02T16:21:34.023556
| 2020-05-18T18:48:08
| 2020-05-18T18:48:08
| 112,221,613
| 0
| 0
|
MIT
| 2018-08-07T00:26:10
| 2017-11-27T16:40:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
"""A scraper for Malta 2007-2013."""
from datapackage_pipelines.wrapper import spew, ingest
from logging import info, debug
from lxml.html import fromstring
from requests import Session
BASE_URL = 'https://investinginyourfuture.gov.mt'
PAGINATION_URL = BASE_URL + '/ajax/loadProjects.ashx?page={counter}'
PROJECT_URLS_XPATH = './/div[@class="project-listing-item-title"]/a'
FIELD_XPATHS = {
'Code': './/span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectRefCode"]',
'Title': './/span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectTitle"]',
'Project Cost': ".//*[@id='mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectCostBeneficiaryItem_divCostValue']",
'Beneficiary': './/span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectCostBeneficiaryItem_divBeneficiaryValue"]',
'Line Ministry': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdLineMinistry"]',
'Start Date': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdStartDate"]',
'End Date': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdEndDate"]',
'Non Technical Short Summary Of Project': ".//*[@id='mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divNonTechnicalShortSummaryContent']/p",
'Operational Programme': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdOperationalProgramme"]',
'Fund': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdFund"]',
'Operational Objective': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdOperationalObjective"]/p',
'Priority Axis': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdPriorityAxis"]',
'Focus Area Of Intervention': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdFocusAreaOfIntervention1"]',
'Project Objectives': './/div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectObjectives"]/p',
'Project Results': './/div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectResults"]/p',
'Project Purpose': './/div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectPurpose"]/p',
}
session = Session()
def scrape_project(url):
"""Return project data as a generator of tuples."""
response = session.get(url)
doc = fromstring(response.content)
def get_text(html_node):
if html_node is not None:
return html_node.text
for key, xpath in FIELD_XPATHS.items():
node = doc.find(xpath)
value = get_text(node)
debug('Extracted %s = %s', key, value)
yield key, value
def scrape_projects(paths):
"""Return generator of project dictionaries."""
for path in paths:
url = BASE_URL + path
project_row = dict(scrape_project(url))
info('Scraped %s', project_row)
yield project_row
def get_project_urls():
"""Return the complete list of project URLS."""
counter = 0
paths = []
while True:
counter += 1
project = PAGINATION_URL.format(counter=counter)
response = session.get(project)
if response.text:
doc = fromstring(response.content)
more_links = doc.findall(PROJECT_URLS_XPATH)
more_paths = list(map(lambda x: x.get('href'), more_links))
paths.extend(more_paths)
info('Collected %s urls on page %s', len(more_paths), counter)
else:
return paths
if __name__ == '__main__':
_, datapackage, _ = ingest()
project_paths = get_project_urls()
project_rows = scrape_projects(project_paths)
spew(datapackage, [project_rows])
|
[
"vitor@vitorbaptista.com"
] |
vitor@vitorbaptista.com
|
0d49d73ccb4f93db186fffd39c53b3d8f1cccc1b
|
1670dca534ef4fd7e8d9ca9e6d55b5885e4071f9
|
/CodeChef/CodeChef55.py
|
eb3b8f431333456c3d272f468c5b583c2b9a8353
|
[] |
no_license
|
Tejas1510/Pythonary
|
24512a6c5abfee17457397aa37849f3a5a739002
|
55c11f74d9f540bf696acecaa78febecd14d8422
|
refs/heads/master
| 2022-11-23T23:27:32.219513
| 2020-08-02T17:22:17
| 2020-08-02T17:22:17
| 264,151,076
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
""""""""""""""""""""""""""""""""""""""
Name of Question:Chef and his Student
Link of Question:https://www.codechef.com/problems/CHEFSTUD
"""""""""""""""""""""""""""""""""""""""
t=int(input())
for i in range(t):
s=input()
a=list(s)
for i in range(len(a)):
if(a[i]=="<"):
a[i]=">"
elif(a[i]==">"):
a[i]="<"
s="".join(a)
print(s.count("><"))
|
[
"noreply@github.com"
] |
Tejas1510.noreply@github.com
|
027a5979fa94e310120de50128b49b537fb1fa40
|
bec60c149e879666de11bd1bcf47ab0dc2225d49
|
/RF_MicroPython/main.py
|
ac8c2e72620466f6e070dbab064d15a2621d6230
|
[] |
no_license
|
KipCrossing/OpenEM
|
7fee5f3d98bb931209999a8dca41295c1412308e
|
0572d3697b1c8299c29e31840e6ec1f9e08c172c
|
refs/heads/master
| 2021-07-17T02:58:15.385369
| 2020-07-02T12:16:39
| 2020-07-02T12:16:39
| 186,344,444
| 0
| 0
| null | 2019-09-06T02:25:56
| 2019-05-13T04:19:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,823
|
py
|
import sht31
import machine
import pyb
import array
import math
import utime
from pyb import Pin, Timer
from ad9833 import AD9833
from pyb import Pin
from pyb import SPI
from specialmath import SpecialMath
print("(Main program started)")
blueled = pyb.LED(4)
# Wave gen
ss = Pin('Y5', Pin.OUT_PP)
spi = SPI(2, SPI.MASTER, baudrate=9600, polarity=1, phase=0, firstbit=SPI.MSB)
wave = AD9833(spi, ss)
# Bluetooth
blue_uart = pyb.UART(6, 9600)
blue_uart.init(9600, bits=8, stop=1, parity=None)
# Temp sensor
SCLpin = 'Y9'
SDApin = 'Y10'
i2c = machine.I2C(sda=machine.Pin(SDApin), scl=machine.Pin(SCLpin), freq=400000)
sht31sensor = sht31.SHT31(i2c)
# Initial variables
spw = 10 # Samples per wave
WAVES = 1000 # Number of waves to take an average from
freq = 16000 # Frequency in Hz
# send wave
wave.set_freq(freq)
wave.set_type(0)
wave.send()
wait = True
while wait:
print('Blue Out:')
if b'BTM-U' == blue_uart.read():
print("Start")
wait = False
pyb.delay(1000)
# pyb.repl_uart(blue_uart)
blue_uart.write("Warming up!")
blue_uart.write("Started")
utime.sleep(2)
wave.set_freq(freq)
wave.set_type(0)
wave.send()
# Timers for ADC's
adc1 = pyb.ADC(pyb.Pin.board.Y11) # create an ADC on pin X11
adc2 = pyb.ADC(pyb.Pin.board.X4) # create an ADC on pin X4
adc_voltage = pyb.ADC(pyb.Pin.board.Y12)
voltage = (adc_voltage.read()/4096)*14.12
adcall = pyb.ADCAll(12, 0x70000) # 12 bit resolution, internal channels
coretemp = adcall.read_core_temp()
# tim = pyb.Timer(8, freq=200000) # Create timer
# buf1 = bytearray(WAVES*spw) # create a buffer
# buf2 = bytearray(WAVES*spw) # create a buffe
# # read analog values into buffers at 100Hz (takes one second)
# pyb.ADC.read_timed_multi((adc1, adc2), (buf1, buf2), tim)
sm = SpecialMath()
(sm.hp_amp, sm.hp_sft) = (0, 0)
# Output File
outfile = open('out.csv', 'w')
outfile.write("i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,\n")
outfile.close()
def record(f):
tim = pyb.Timer(8, freq=f*spw) # Create timer
buf1 = bytearray(WAVES*spw) # create a buffer
buf2 = bytearray(WAVES*spw) # create a buffe
# read analog values into buffers at 100Hz (takes one second)
pyb.ADC.read_timed_multi((adc1, adc2), (buf1, buf2), tim)
listc = []
for i in range(spw):
listc.append(0)
count = 0
for n in range(len(buf1)):
if count > spw-1:
count = 0
listc[count] += buf1[n]
count += 1
listd = []
for i in range(spw):
listd.append(0)
count = 0
for n in range(len(buf2)):
if count > spw-1:
count = 0
listd[count] += buf2[n]
count += 1
# (a,s) = sm.fit_sin(listd,10)
(a1, s1) = sm.fit_sin(listc, 3)
# print("-")
data_mean = sm.mean(listd)
for d in range(0, len(listd)):
listd[d] -= data_mean
# total wave - Hp to get Hs
# sm.hp = sm.gen_sin(10, sm.hp_amp, s1 + sm.hp_sft)
listout = listd # [x - y for x, y in zip(listd, sm.hp)]
# print(listout)
outtext = ''
for d in listout:
outtext += str(d)+','
outfile = open('out.csv', 'a')
outfile.write(outtext+"\n")
outfile.close()
(a2, s2) = sm.fit_sin(listout, 3)
# print(listout)
# print('Hp - Amp: %f Sft: %f' % (a1,s1))
# print('Hs - Amp: %f Sft: %f' % (a2,s2))
# print(s2-s1)
if s2-s1 < 0:
return(a1, a2, s2-s1 + spw)
else:
return(a1, a2, s2-s1)
'''
outfile = open('RF_calibrate.csv', 'w')
outfile.write("Freq,Amp,Shift\n")
mul = 10
for i in range(900, 2000):
freq = i*mul
wave.set_freq(freq)
wave.send()
pyb.delay(50)
ampl = []
sftl = []
for j in range(4):
(or_amp, amp, sft) = record(freq)
ampl.append(amp)
sftl.append(sft)
output = "{},{},{}".format(wave.freq, int(sm.mean(ampl)), round(sm.mean(sftl), 3))
outfile.write(output+"\n")
blue_uart.write(output)
print(output)
blueled.toggle()
outfile.close()
'''
# Output File
outfile = open('OpenEM_data.csv', 'w')
outfile.write("ID,Amp,Shift,Shift_out,Voltage,Temp,Humidity,CoreTemp,Hs,Hp\n")
outfile.close()
count = 0
callibrate = []
Hp_prev = 0
calivbate = True
c_amp = 0
c_sft = 0
amp_roll = []
sft_roll = []
while True:
print("------------------------------" + str(freq))
blueled.toggle()
(or_amp, amp, sft) = record(freq)
sht31_t, sht31_h = sht31sensor.get_temp_humi()
coretemp = adcall.read_core_temp()
voltage = (adc_voltage.read()/4096)*14.12
sm.hp_sft = 9.54 - 0.25
if sft - sm.hp_sft < 0:
sft_out = sft - sm.hp_sft + spw
else:
sft_out = sft - sm.hp_sft
Hs = amp*math.sin(math.pi*2*sft_out/spw)
Hp = amp*math.cos(math.pi*2*sft_out/spw)
amp_roll.append(amp)
sft_roll.append(sft)
if len(amp_roll) > 4:
amp_roll.pop(0)
sft_roll.pop(0)
out_string = "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n" % (count,
amp,
sft,
sft_out,
voltage,
sht31_t,
sht31_h,
coretemp,
Hs,
Hp)
print(out_string)
outfile = open('OpenEM_data.csv', 'a')
outfile.write(out_string)
outfile.close()
blue_uart.write('%s, %s, %s' % (
count,
int(sm.mean(amp_roll)),
sm.mean(sft_roll)))
count += 1
|
[
"kip.crossing@gmail.com"
] |
kip.crossing@gmail.com
|
34767046d1b574b160cf38d2d476cabea85b10fa
|
03a79c4bef915a566f597d75d0d4a5bacc44c16e
|
/blog/posts/utils.py
|
6472726e6b46f32a26b82a86c24f8a8a488e7891
|
[] |
no_license
|
TarekCsePust/Blog-Apps-with-Django-Rest-Framework-Postgresql
|
d2bb77d4427b2dc791fc6761487d83b8821d8550
|
750a4918825100e2e3fd761844fa8b235bef687a
|
refs/heads/master
| 2020-04-02T04:38:27.263459
| 2019-01-24T07:08:20
| 2019-01-24T07:08:20
| 154,026,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
import datetime
import re
import math
from django.utils.html import strip_tags
def count_words(html_string):
# html_string = """
# <h1>This is a title</h1>
# """
word_string = strip_tags(html_string)
matching_words = re.findall(r'\w+', word_string)
count = len(matching_words) #joincfe.com/projects/
return count
def get_read_time(html_string):
count = count_words(html_string)
read_time_min = math.ceil(count/200.0) #assuming 200wpm reading
print("min: ",read_time_min)
# read_time_sec = read_time_min * 60
# read_time = str(datetime.timedelta(seconds=read_time_sec))
# read_time = str(datetime.timedelta(minutes=read_time_min))
return int(read_time_min)
|
[
"hasantarek12cse@gmail.com"
] |
hasantarek12cse@gmail.com
|
7771c441c900edf84030b5fa1d84a1b0c3051375
|
b110fdc592315daeeec7b0ce48535dfada995d68
|
/highlander/api/controllers/v1/validation.py
|
395e8d223f8bf47ba0ac5963159629aa0f9ee73f
|
[
"Apache-2.0"
] |
permissive
|
StephenTao/stephen
|
1ee5c77b2b4c96d6118911cc8a4458cb94735851
|
06da7cbc93b40fcd089eeed2972adc1fe6bd3cb9
|
refs/heads/master
| 2021-01-10T15:46:40.109013
| 2016-02-25T06:52:57
| 2016-02-25T06:52:57
| 52,503,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
from pecan import rest
from highlander import exceptions as exc
from highlander.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SpecValidationController(rest.RestController):
def __init__(self, parser):
super(SpecValidationController, self).__init__()
self._parse_func = parser
@pecan.expose('json')
def post(self):
"""Validate a spec."""
definition = pecan.request.text
try:
self._parse_func(definition)
except exc.DSLParsingException as e:
return {'valid': False, 'error': e.message}
return {'valid': True}
|
[
"stephenhuang@augmentum.com.cn"
] |
stephenhuang@augmentum.com.cn
|
29dd727b2f5a7952aa89561ac5cc127cab893549
|
81313cbd75bbd29cd48632d3cfc6b84884884650
|
/HistFitterNtuples/MakeArtificialDataTree.py
|
8988845de4059ffdce93eee5bd0f807f082a3fe8
|
[] |
no_license
|
UPenn-SUSY/PennSUSYFrame
|
ee93fd299e4e36ebc74e0065db0740451309682a
|
41303b163dbc05451b22c19b00b436cc25440cf6
|
refs/heads/master
| 2019-01-19T10:28:47.417027
| 2015-05-08T15:07:24
| 2015-05-08T15:07:24
| 13,934,522
| 2
| 0
| null | 2015-05-08T15:07:24
| 2013-10-28T18:23:35
|
C++
|
UTF-8
|
Python
| false
| false
| 4,295
|
py
|
import itertools
import ROOT
import array
import datetime
# ------------------------------------------------------------------------------
process_list = {'ttbar':1, 'ZGamma':1}
flavor_list = ['ee', 'mm', 'em']
region_list = ['cr_top', 'cr_z', 'sr']
hist_name = 'mbl_0'
hist_bins, hist_min, hist_max = 20, 0, 2000
lumi = 21.e3
rand = ROOT.TRandom3(datetime.datetime.now().microsecond)
# ------------------------------------------------------------------------------
def fillArtificialDataTree(in_file):
# create output file and tree
file_name_tag = '.'.join(['_'.join([k,str(v)]) for k, v in process_list.items()])
out_file_name = '.'.join(['ArtificialData', file_name_tag, 'root'])
out_file = ROOT.TFile(out_file_name, 'RECREATE')
# out_tree = ROOT.TTree('ArtificialData', 'ArtificialData')
out_tree = ROOT.TTree('data', 'data')
# create variables for branches
mbl_0 = array.array('d', [0])
is_ee = array.array('i', [0])
is_mm = array.array('i', [0])
is_em = array.array('i', [0])
is_sr = array.array('i', [0])
is_cr_top = array.array('i', [0])
is_cr_z = array.array('i', [0])
# connect branches
out_tree.Branch('mbl_0' , mbl_0 , 'mbl_0/D')
out_tree.Branch('is_ee' , is_ee , 'is_ee/I')
out_tree.Branch('is_mm' , is_mm , 'is_mm/I')
out_tree.Branch('is_em' , is_em , 'is_em/I')
out_tree.Branch('is_sr' , is_sr , 'is_sr/I')
out_tree.Branch('is_cr_top' , is_cr_top , 'is_cr_top/I')
out_tree.Branch('is_cr_z' , is_cr_z , 'is_cr_z/I')
# loop through processes and flavors
for pl, fl in itertools.product(process_list.keys(), flavor_list):
# get tree for this process and flavor channel
tree_name = '_'.join([fl, pl, 'NoSys'])
print 'process: ', pl, ' - flavor: ', fl, ' - tree: ', tree_name
t = in_file.Get(tree_name)
is_ee[0] = 1 if fl == 'ee' else 0
is_mm[0] = 1 if fl == 'mm' else 0
is_em[0] = 1 if fl == 'em' else 0
# loop through regions
for rl in region_list:
is_sr[0] = 1 if rl == 'sr' else 0
is_cr_top[0] = 1 if rl == 'cr_top' else 0
is_cr_z[0] = 1 if rl == 'cr_z' else 0
print 'is_sr[0] : ' , is_sr[0]
print 'is_cr_top[0]: ' , is_cr_top[0]
print 'is_cr_z[0] : ' , is_cr_z[0]
# create and fill histogram
this_hist_name = '_'.join([tree_name, rl, hist_name])
print ' region: ', rl, ' - hist name: ', this_hist_name
region_hist = ROOT.TH1F(this_hist_name,
'',
hist_bins,
hist_min,
hist_max)
t.Draw(' >> '.join([hist_name, this_hist_name]),
''.join([str(lumi),
'*weight*is_',
rl,
'*',
str(process_list[pl])]))
print ' integral: ', region_hist.Integral()
print ''
# find bin centers and content
bin_centers = [region_hist.GetBinCenter(this_bin) for this_bin in
xrange(hist_bins + 2)]
bin_content = [region_hist.GetBinContent(this_bin) for this_bin in
xrange(hist_bins + 2)]
print bin_centers
print bin_content
print sum(bin_content)
print ''
for center, content in itertools.izip(bin_centers, bin_content):
mbl_0[0] = center
print center, ' - ', content
num_events = rand.Poisson(content)
print ' bin center: ', center, ' - exp content: ', content, ' - gen content: ', num_events
# for i in xrange(int(content)):
for i in xrange(num_events):
# print ' - filling entry ', i
out_tree.Fill()
print ''
# write and close file
out_file.Write()
out_file.Close()
if __name__ == '__main__':
# file to extract samples
bkg_file = ROOT.TFile('BackgroundHistFitterTrees.root', 'r')
fillArtificialDataTree(bkg_file)
|
[
"bjack3@gmail.com"
] |
bjack3@gmail.com
|
1810565238029931f0f8d33d7f786dce3eb2940b
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/a5c70dc6725c422fcccd37ea07e8655b6ecbc833-<main>-fix.py
|
540ca1ec66339ceb0b9db883443a3a94f3ba9b5e
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
def main():
argument_spec = openstack_full_argument_spec(name=dict(required=True), password=dict(required=False, default=None, no_log=True), email=dict(required=False, default=None), default_project=dict(required=False, default=None), domain=dict(required=False, default=None), enabled=dict(default=True, type='bool'), state=dict(default='present', choices=['absent', 'present']), update_password=dict(default='always', choices=['always', 'on_create']))
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if (not HAS_SHADE):
module.fail_json(msg='shade is required for this module')
name = module.params['name']
password = module.params.pop('password')
email = module.params['email']
default_project = module.params['default_project']
domain = module.params['domain']
enabled = module.params['enabled']
state = module.params['state']
update_password = module.params['update_password']
try:
cloud = shade.openstack_cloud(**module.params)
user = cloud.get_user(name)
domain_id = None
if domain:
opcloud = shade.operator_cloud(**module.params)
domain_id = _get_domain_id(opcloud, domain)
if (state == 'present'):
if (update_password in ('always', 'on_create')):
if (not password):
msg = ('update_password is %s but a password value is missing' % update_password)
module.fail_json(msg=msg)
default_project_id = None
if default_project:
default_project_id = _get_default_project_id(cloud, default_project, module)
if (user is None):
user = cloud.create_user(name=name, password=password, email=email, default_project=default_project_id, domain_id=domain_id, enabled=enabled)
changed = True
else:
params_dict = {
'email': email,
'enabled': enabled,
'password': password,
'update_password': update_password,
}
if (domain_id is not None):
params_dict['domain_id'] = domain_id
if (default_project_id is not None):
params_dict['default_project_id'] = default_project_id
if _needs_update(params_dict, user):
if (update_password == 'always'):
user = cloud.update_user(user['id'], password=password, email=email, default_project=default_project_id, domain_id=domain_id, enabled=enabled)
else:
user = cloud.update_user(user['id'], email=email, default_project=default_project_id, domain_id=domain_id, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, user=user)
elif (state == 'absent'):
if (user is None):
changed = False
else:
cloud.delete_user(user['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
6504bc2fece9e2db5ffca7ae1fc4cb9dcc612d74
|
7d90d2ce27c6ee0af74391b09909edbd45fdc2f0
|
/renix_py_api/api_gen/OfpMeterTableConfig_Autogen.py
|
d6dbee9cc6d4bfb5fc23919add3b4a32597c6f8d
|
[] |
no_license
|
gaoxingyu-hub/54testframework-master-e284
|
d7ea0d4a715b65c8652430e963a86b9522a7237a
|
57dd2197e7d91b8ad8fb2bd0e3503f10afa08544
|
refs/heads/master
| 2023-04-30T05:50:41.542402
| 2021-05-28T09:19:37
| 2021-05-28T09:19:37
| 309,922,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,840
|
py
|
"""
Auto-generated File
Create Time: 2019-12-27 02:33:27
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .OfpGlobalConfig_Autogen import OfpGlobalConfig
@rom_manager.rom
class OfpMeterTableConfig(OfpGlobalConfig):
def __init__(self, ID=None, BandUnit=None, EnableBurstSize=None, EnableStatistics=None, **kwargs):
self._ID = ID # ID
self._BandUnit = BandUnit # Band Unit
self._EnableBurstSize = EnableBurstSize # Enable Burst Size
self._EnableStatistics = EnableStatistics # Enable Statistics
properties = kwargs.copy()
if ID is not None:
properties['ID'] = ID
if BandUnit is not None:
properties['BandUnit'] = BandUnit
if EnableBurstSize is not None:
properties['EnableBurstSize'] = EnableBurstSize
if EnableStatistics is not None:
properties['EnableStatistics'] = EnableStatistics
# call base class function, and it will send message to renix server to create a class.
super(OfpMeterTableConfig, self).__init__(**properties)
def delete(self):
"""
call to delete itself
"""
return self._finalize()
def edit(self, ID=None, BandUnit=None, EnableBurstSize=None, EnableStatistics=None, **kwargs):
properties = kwargs.copy()
if ID is not None:
self._ID = ID
properties['ID'] = ID
if BandUnit is not None:
self._BandUnit = BandUnit
properties['BandUnit'] = BandUnit
if EnableBurstSize is not None:
self._EnableBurstSize = EnableBurstSize
properties['EnableBurstSize'] = EnableBurstSize
if EnableStatistics is not None:
self._EnableStatistics = EnableStatistics
properties['EnableStatistics'] = EnableStatistics
super(OfpMeterTableConfig, self).edit(**properties)
@property
def ID(self):
"""
get the value of property _ID
"""
if self.force_auto_sync:
self.get('ID')
return self._ID
@property
def BandUnit(self):
"""
get the value of property _BandUnit
"""
if self.force_auto_sync:
self.get('BandUnit')
return self._BandUnit
@property
def EnableBurstSize(self):
"""
get the value of property _EnableBurstSize
"""
if self.force_auto_sync:
self.get('EnableBurstSize')
return self._EnableBurstSize
@property
def EnableStatistics(self):
"""
get the value of property _EnableStatistics
"""
if self.force_auto_sync:
self.get('EnableStatistics')
return self._EnableStatistics
@ID.setter
def ID(self, value):
self._ID = value
self.edit(ID=value)
@BandUnit.setter
def BandUnit(self, value):
self._BandUnit = value
self.edit(BandUnit=value)
@EnableBurstSize.setter
def EnableBurstSize(self, value):
self._EnableBurstSize = value
self.edit(EnableBurstSize=value)
@EnableStatistics.setter
def EnableStatistics(self, value):
self._EnableStatistics = value
self.edit(EnableStatistics=value)
def _set_id_with_str(self, value):
try:
self._ID = int(value)
except ValueError:
self._ID = hex(int(value, 16))
def _set_bandunit_with_str(self, value):
seperate = value.find(':')
exec('self._BandUnit = EnumOfpBandUnit.%s' % value[:seperate])
def _set_enableburstsize_with_str(self, value):
self._EnableBurstSize = (value == 'True')
def _set_enablestatistics_with_str(self, value):
self._EnableStatistics = (value == 'True')
|
[
"gaoxingyu@example.com"
] |
gaoxingyu@example.com
|
e81cea9a4f0cb5a1ef73fcf0a2db186d9a8a2073
|
a2362576001e0f9e22dc69c623170e108908c1b4
|
/testing_sys/testsys/migrations/0047_auto_20190524_2057.py
|
e21d7976bb73ac2e309fbf7b49eb37d9c68f8c49
|
[] |
no_license
|
mdigbazova/TestSystem
|
c1a694eb1877567bcc63a2cc3f615469ba4f8fd9
|
e5cca7a3aa31f1af4e1f7807895124e36348b9af
|
refs/heads/master
| 2022-12-15T22:20:14.812166
| 2019-06-11T08:14:24
| 2019-06-11T08:14:24
| 183,647,017
| 0
| 1
| null | 2022-11-22T03:50:12
| 2019-04-26T14:53:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
# Generated by Django 2.2 on 2019-05-24 17:57
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('testsys', '0046_auto_20190524_2050'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='currentdefinitionsdate',
field=models.DateTimeField(default=datetime.datetime(2019, 5, 24, 17, 57, 33, 27108, tzinfo=utc), null=True, verbose_name='Current Definitions Date'),
),
migrations.AlterField(
model_name='alertsbody',
name='alerttimestamp',
field=models.DateTimeField(default=datetime.datetime(2019, 5, 24, 17, 57, 33, 28106, tzinfo=utc), verbose_name='Alert Timestamp'),
),
migrations.AlterField(
model_name='alertsbody',
name='createdat',
field=models.DateTimeField(default=datetime.datetime(2019, 5, 24, 17, 57, 33, 28106, tzinfo=utc), verbose_name='Creation Date'),
),
]
|
[
"mdigbazova@gmail.com"
] |
mdigbazova@gmail.com
|
36d556974768695b7e1e8d9f902557a81d9650f3
|
731c3f2f85f6002725322eedc0b2c8b5e74f610e
|
/sale_discount_total/reports/invoice_report.py
|
2a6626455f4295c09de5b56c9dd0dd2afffc9203
|
[] |
no_license
|
babarlhr/project-0021
|
1ac824657f893c8f25d6eb3b839051f350d7cc9d
|
e30b8a9f5d2147d3ca5b56b69ec5dbd22f712a91
|
refs/heads/master
| 2021-09-22T15:45:47.431000
| 2018-09-11T14:59:49
| 2018-09-11T14:59:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
from openerp import fields, models
class AccountInvoiceReport(models.Model):
_inherit = 'account.invoice.report'
discount = fields.Float('Discount', readonly=True)
def _select(self):
res = super(AccountInvoiceReport,self)._select()
select_str = res + """, sub.discount AS discount """
return select_str
def _sub_select(self):
res = super(AccountInvoiceReport,self)._sub_select()
select_str = res + """,SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ((ail.quantity / u.factor * u2.factor) * ail.price_unit * (ail.discount) / 100.0)
ELSE ((ail.quantity / u.factor * u2.factor) * ail.price_unit * (ail.discount) / 100.0) END) as discount"""
return select_str
|
[
"wahhid@gmail.com"
] |
wahhid@gmail.com
|
acfc8e328100a02bf944650a202675138090aec8
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.0_rd=0.65_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=32/sched.py
|
3f94907f550578e3ad9bd176c4a0232307ccaf22
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
-X FMLP -Q 0 -L 2 100 400
-X FMLP -Q 0 -L 2 70 250
-X FMLP -Q 0 -L 2 64 200
-X FMLP -Q 1 -L 1 53 175
-X FMLP -Q 1 -L 1 47 150
-X FMLP -Q 1 -L 1 42 200
-X FMLP -Q 2 -L 1 41 200
-X FMLP -Q 2 -L 1 40 200
-X FMLP -Q 3 -L 1 32 175
-X FMLP -Q 3 -L 1 25 100
22 150
21 200
16 200
12 150
9 125
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
bb05c6d8f5cdb8e988bbb9b22fd2ca62a282ec17
|
d2ec5cdf0c94ae429476b802f4ae133fc74d35c2
|
/documents/management/commands/fixdocuments_remove_phantoms.py
|
cf875ee035bf0558687471075ab5f9eb28a2222f
|
[
"MIT"
] |
permissive
|
Barolina/doc-versions
|
eb4e6f0ce087d7027dc1bbd0b5b53a7779efab8e
|
ae536892f6245206abb7145592cf61408bc1161c
|
refs/heads/master
| 2021-01-12T10:27:25.218122
| 2013-02-23T18:34:55
| 2013-02-23T18:34:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
# -*- encoding: utf-8 -*-
from django.core.management.base import NoArgsCommand
from django.db.models import get_models, F
from documents.models import Document
from documents.management.commands.documentscheck import \
info, warning, set_options
def fix_model(model):
mn = model.__name__
info('fixing model : ' + mn)
c = model.objects.filter(document_start__gte=F('document_end')).count()
if c:
model.objects.filter(document_start__gte=F('document_end')).delete()
warning(mn + ': %d phantom document(s) removed' % c)
else:
info(mn + ': no phantom documents found')
def fix(out, err, **options):
set_options(out, err, **options)
for m in get_models():
if issubclass(m, Document):
fix_model(m)
class Command(NoArgsCommand):
help = 'Remove all records with document_start >= document_end ' \
'on all Document subclasses'
def handle_noargs(self, **options):
fix(self.stdout, self.stderr, **options)
|
[
"kostia.lopuhin@gmail.com"
] |
kostia.lopuhin@gmail.com
|
5b84b46750948531241467dbff1f604ee2a07454
|
9cebe39a7ed1bb813b2aebe1ae923821f3c08394
|
/ndb/util.py
|
f5d250fd689812eb345c4479d626001a9c10ae0a
|
[] |
no_license
|
argeweb/core
|
1f6a53092544bc7b7c972d4aa505d5d6ef8f3b50
|
bf78434714cdb5242b9b3b345666482b27d73528
|
refs/heads/master
| 2020-12-25T13:33:24.689677
| 2018-04-18T00:29:35
| 2018-04-18T00:29:35
| 67,552,917
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
"""
Utilities for working with both db and ndb models
"""
from google.appengine.ext import db, ndb
def list(Model, *args, **kwargs):
"""
Returns a query object for a db or ndb Model
"""
if issubclass(Model, db.Model):
return Model.all()
else:
return Model.query()
def decode_key(str, kind=None):
"""
Makes a ndb Key object from the given data
and optionally a kind. Kind is only needed if
the str is an id.
"""
if isinstance(str, ndb.Key):
return str
str = str.lstrip(':')
try:
id = long(str)
return ndb.Key(kind, id)
except ValueError:
return ndb.Key(urlsafe=str)
def encode_key(ins):
"""
Gets the urlsafe of a key for either a db or ndb instance
"""
try:
return new_key(ins).urlsafe()
except AttributeError:
return new_key(ins.key).urlsafe()
def new_key(ins_or_key):
"""
Makes a ndb.Key from ndb or db instances or keys
"""
if isinstance(ins_or_key, ndb.Key):
return ins_or_key
elif isinstance(ins_or_key, db.Model):
return ndb.Key.from_old_key(ins_or_key.key())
elif isinstance(ins_or_key, db.Key):
return ndb.Key.from_old_key(ins_or_key)
elif isinstance(ins_or_key, ndb.Model):
return ins_or_key.key
return None
def old_key(ins_or_key):
"""
Makes a db.Key from ndb or db instances or keys
"""
if isinstance(ins_or_key, ndb.Model):
return ins_or_key.key.to_old_key()
elif isinstance(ins_or_key, ndb.Key):
return ins_or_key.to_old_key()
elif isinstance(ins_or_key, db.Model):
return ins_or_key.key()
else:
return ins_or_key
|
[
"cwen0708@gmail.com"
] |
cwen0708@gmail.com
|
5d189a253e3bc1ba72529d977c88c26e1a0f2eae
|
623c915efdad396b9d40d0c46c9aed532839a383
|
/sudoku/grid_values.py
|
43a32971354cf454262ebe30e036f90496992ef3
|
[] |
no_license
|
KeithYJohnson/aind
|
f997aa20da2878b76a2950bed1452a826bcb11b5
|
d70ca4fbf5a38e2aaddedfc1fb01b212c008309b
|
refs/heads/master
| 2021-01-21T19:57:53.828896
| 2017-06-16T23:13:35
| 2017-06-16T23:13:35
| 92,176,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
# from utils import *
boxes = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9',
'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B9',
'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9',
'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9',
'E1', 'E2', 'E3', 'E4', 'E5', 'E6', 'E7', 'E8', 'E9',
'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9',
'G1', 'G2', 'G3', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9',
'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9',
'I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9']
def grid_values(grid_string, boxes):
"""Convert grid string into {<box>: <value>} dict with '123456789' value for empties.
Args:
grid: Sudoku grid in string form, 81 characters long
Returns:
Sudoku grid in dictionary form:
- keys: Box labels, e.g. 'A1'
- values: Value in corresponding box, e.g. '8', or '123456789' if it is empty.
"""
grid_dict = {}
for idx, char in enumerate(grid_string):
if char == '.':
grid_dict[boxes[idx]] = '123456789'
else:
grid_dict[boxes[idx]] = char
return grid_dict
# Credit to the course provider
def slicker_implementation(grid_string, boxes):
assert len(grid) == 81, "Input grid must be a string of length 81 (9x9)"
return dict(zip(boxes, grid))
if __name__ == '__main__':
string_grid = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'
print(grid_values(string_grid, boxes))
|
[
"keithyjohnson@gmail.com"
] |
keithyjohnson@gmail.com
|
ae8352c390609e6e0fd1f97b581fdc749145f99b
|
d92c34d44d025ae7619bb3ec0e974647d86d715c
|
/02_gpio/gpio.py
|
abdb7cbed45345dc18faed606459a2751aea0340
|
[] |
no_license
|
braingram/bbb_pru_tests
|
317ca0f0867f94cc27e00d7036f510cbe5affa16
|
c19374251e4f628ed0fe78a88d7ce40057e78e41
|
refs/heads/master
| 2021-01-19T10:59:10.083272
| 2015-03-14T21:48:53
| 2015-03-14T21:48:53
| 31,833,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
''' gpio.py
blink a led for a certain number of times'''
import struct
import mmap
import pypruss
# count, duration
PRU_ICSS = 0x4A300000
PRU_ICSS_LEN = 512*1024
SHAREDRAM_START = 0x00012000
count_value = 4
#duration_value = 1000 * 1000 * 100 # 500 ms
duration_value = 1000 * 1000 * 10 # 50 ms
print("Count : %s" % count_value)
print("Duration: %s" % duration_value)
with open("/dev/mem", "r+b") as f:
ddr_mem = mmap.mmap(f.fileno(), PRU_ICSS_LEN, offset=PRU_ICSS)
ddr_mem[SHAREDRAM_START:SHAREDRAM_START+4] = struct.pack('L', count_value)
ddr_mem[SHAREDRAM_START+4:SHAREDRAM_START+8] = struct.pack('L', duration_value)
pypruss.modprobe() # This only has to be called once pr boot
pypruss.init() # Init the PRU
pypruss.open(0) # Open PRU event 0 which is PRU0_ARM_INTERRUPT
pypruss.pruintc_init() # Init the interrupt controller
pypruss.exec_program(0, "./gpio.bin") # Load firmware "blinkled.bin" on PRU 0
pypruss.wait_for_event(0) # Wait for event 0 which is connected to PRU0_ARM_INTERRUPT
pypruss.clear_event(0) # Clear the event
pypruss.pru_disable(0) # Disable PRU 0, this is already done by the firmware
pypruss.exit() # Exit, don't know what this does.
|
[
"root@beaglebone.(none)"
] |
root@beaglebone.(none)
|
4196ac2dc9cfce344ae991d7e8f49bd052ce3e5e
|
6c5ce1e621e0bd140d127527bf13be2093f4a016
|
/ex075/venv/Scripts/pip3.7-script.py
|
e452b3b38862c357a618f44dd9740312f44bd5ab
|
[
"MIT"
] |
permissive
|
ArthurAlesi/Python-Exercicios-CursoEmVideo
|
124e2ee82c3476a5a49baafed657788591a232c1
|
ed0f0086ddbc0092df9d16ec2d8fdbabcb480cdd
|
refs/heads/master
| 2022-12-31T13:21:30.001538
| 2020-09-24T02:09:23
| 2020-09-24T02:09:23
| 268,917,509
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
Python
| false
| false
| 467
|
py
|
#!C:\Users\User\Documents\github-MeusRepositórios\Python-Exercicios-CursoEmVideo\ex075\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"54421573+ArthurAlesi@users.noreply.github.com"
] |
54421573+ArthurAlesi@users.noreply.github.com
|
58a810eb3bf799250724d7139f7bafde4a61ba14
|
3e35f5ab6e600d5c215eeecab8857ebebadf6ac4
|
/my_app/models.py
|
81d11b663f5810bb7dd6bd5dd09f301d0fc75288
|
[] |
no_license
|
jpisano99/my_app_template_r3
|
c14135d81b7f66a8b72305f16111d247b09dee49
|
dbdd9616c9cd86451e93a211a174a40dff31b3df
|
refs/heads/master
| 2023-02-22T07:06:34.852386
| 2022-07-25T17:51:05
| 2022-07-25T17:51:05
| 226,744,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
from my_app import db
class Test_Table(db.Model):
__tablename__ = 'test_table'
# Use this to specify a default schema/db for this table
# __table_args__ = {'schema': 'dev'}
# Us this to specify a different bind/sql server for this table
# __bind_key__ = 'dev'
id = db.Column(db.Integer(), primary_key=True)
first_name = db.Column(db.String(40))
last_name = db.Column(db.String(40))
qty_on_hand = db.Column(db.Integer)
cost = db.Column(db.Float)
date_added = db.Column(db.DateTime)
password_hash = db.Column(db.String(128))
@staticmethod
def newest():
return Test_Table.query.all()
def newest_name(num):
return Test_Table.query.order_by(Test_Table.first_name).limit(num)
def __repr__(self):
return "<name {}: '{} , {}'>".format(self.id, self.pss_name,self.tsa_name)
# class Bookings(db.Model):
# __tablename__ = 'bookings'
#
# erp_end_customer_name = db.Column(db.String(100))
# total_bookings = db.Column(db.Float)
# product_id = db.Column(db.String(25))
# date_added = db.Column(db.DateTime)
# hash_value = db.Column(db.String(50), primary_key=True)
# class Customers(db.Model):
# __tablename__ = 'customers'
#
# id = db.Column(db.Integer(), primary_key=True)
# last_name = db.Column(db.String(45))
# first_name = db.Column(db.String(45))
|
[
"jpisano@cisco.com"
] |
jpisano@cisco.com
|
5d3af36631918afa519eae61c95e01e084b19684
|
1e84a9fec36deaf9a55a2734749ea035f72ac869
|
/KAKAO BLIND RECRUITMENT/2017/3차/압축/main.py
|
636e20aa1a11fd3166c11bef8a77b1a406c6023d
|
[] |
no_license
|
mgh3326/programmers_algorithm
|
aa3afc91231550e1fec2d72d90e85b140f79d677
|
b62f08ccccbdcac71e484d508985a5a9ce5f2434
|
refs/heads/master
| 2022-08-31T04:19:15.728666
| 2022-07-31T14:02:26
| 2022-07-31T14:02:26
| 201,747,526
| 0
| 0
| null | 2022-07-23T10:19:13
| 2019-08-11T10:02:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
def solution(msg):
answer = []
my_dict = {}
dict_index = 1
for i in ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z"]:
my_dict[i] = dict_index
dict_index += 1
temp_idx = 2
for idx, my_msg in enumerate(msg):
if temp_idx > 2:
temp_idx -= 1
continue
temp_idx = 1
while True:
if idx + temp_idx > len(msg):
answer.append(out_idx)
break
find_msg = msg[idx:idx + temp_idx]
if find_msg in my_dict:
temp_idx += 1
out_idx = my_dict[find_msg]
continue
else:
answer.append(out_idx)
my_dict[find_msg] = dict_index
dict_index += 1
break
return answer
msg_list = [
"K",
"KAKAO",
"TOBEORNOTTOBEORTOBEORNOT",
"ABABABABABABABAB"
]
return_list = [
[11],
[11, 1, 27, 15],
[20, 15, 2, 5, 15, 18, 14, 15, 20, 27, 29, 31, 36, 30, 32, 34],
[1, 2, 27, 29, 28, 31, 30]
]
for _input_data in zip(msg_list, return_list):
_0 = _input_data[0]
_r = _input_data[-1]
print(msg_list.index(_0))
result = solution(_0)
print(result)
print(_r)
if result == _r:
print("맞음")
else:
print("틀림")
|
[
"mgh3326@naver.com"
] |
mgh3326@naver.com
|
1d4c47022930b5b454743b7015afc67a9b6eab89
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2697/60747/258513.py
|
e919dcb1081dd6fbc54e37ba84292fa5f160b216
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
s=input()
s=s[1:len(s)-1].split(",")
root=int(s[0])
a=0
i=1
for j in range(len(s)):
if s[j]!="null":
s[j]=int(s[j])
while i<len(s)/2-1:
if s[s.index(root)*2+1]=='null' or root>int(s[s.index(root)*2+1]):
if root=="null"or root<int(s[s.index(root)*2+2]) :
root = s[i]
else :
print("false")
a=-1
break
else:
print("false")
a=-1
break
i+=1
if a!=-1:
print("true")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
62d60230c1a889d8e64f09dc716744bb275ea099
|
0b79d66196e9bef7cf81c0c17b6baac025b0d7f1
|
/apps/property/inventory/models/trans.py
|
3878f3289ac3505812de4ef1c51e3ecffe04347e
|
[] |
no_license
|
tsevindik/sis-back
|
bf0244a803ba9432980844ff35498780ac664564
|
4ba942fe38cc150c70898db4daf211213b84a61a
|
refs/heads/master
| 2021-03-24T09:35:49.199712
| 2017-01-25T08:19:37
| 2017-01-25T08:19:37
| 73,540,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
from django.utils.translation import ugettext_lazy as _
from django.db import models
from utils.models import trans as trans_models
from . import main
class InventoryTypeTrans(trans_models.Translation):
neutral = models.ForeignKey(
main.Inventory
)
name = models.CharField(
max_length=50,
verbose_name=_("İsim")
)
class InventoryTrans(trans_models.Translation):
neutral = models.ForeignKey(
main.Inventory
)
name = models.CharField(
max_length=150,
verbose_name=_("İsim")
)
description = models.TextField(
verbose_name=_("Açıklama")
)
|
[
"abdullahsecer@std.sehir.edu.tr"
] |
abdullahsecer@std.sehir.edu.tr
|
f1e72430ddeb7762b293af65083afe0d2fab8a65
|
21b4585de4a0eacdb0d1e488dfae53684bb6564e
|
/62. Unique Paths.py
|
e249ce1880d51a1f8063a5a08d7fbd9ee3cb1af7
|
[] |
no_license
|
gaosq0604/LeetCode-in-Python
|
de8d0cec3ba349d6a6462f66286fb3ddda970bae
|
57ec95779a4109008dbd32e325cb407fcbfe5a52
|
refs/heads/master
| 2021-09-15T23:14:14.565865
| 2018-06-12T16:30:40
| 2018-06-12T16:30:40
| 113,881,474
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
class Solution:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
res=[1]*n
for _ in range(m-1):
for i in range(1,n):
res[i]+=res[i-1]
return res[-1]
|
[
"gaosq0604@gmail.com"
] |
gaosq0604@gmail.com
|
57486d8950198e14b5fe481c20ed8c146bb9095e
|
a94089d207f9efc78d6d75736ba443f7b2d5f5b4
|
/authsys/migrations/0001_initial.py
|
756703c700f78e0a83def8299be51e8e8822e99c
|
[] |
no_license
|
Larionov0/PyTest
|
217526fcd19785d886d74d638173d3fc5f963b26
|
a4ab75d4868845890ca2ffc117230a0b346f9c43
|
refs/heads/master
| 2023-02-18T04:09:16.745759
| 2021-01-15T14:50:26
| 2021-01-15T14:50:26
| 217,780,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,869
|
py
|
# Generated by Django 2.2.6 on 2019-10-28 21:43
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('catalog', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Achievement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='name', max_length=100)),
('condition', models.TextField(default='text that describes achievement')),
],
),
migrations.CreateModel(
name='FailedPack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(default=datetime.datetime(2019, 10, 28, 23, 43, 27, 54674))),
('pack', models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to='catalog.Pack')),
],
),
migrations.CreateModel(
name='MoneyAchievement',
fields=[
('achievement_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='authsys.Achievement')),
('paisons', models.IntegerField(default=1000000)),
],
bases=('authsys.achievement',),
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paisons', models.IntegerField(default=0)),
('achievements', models.ManyToManyField(blank=True, to='authsys.Achievement')),
('completed_packs', models.ManyToManyField(blank=True, related_name='completed_users', to='catalog.Pack')),
('failed_packs', models.ManyToManyField(blank=True, related_name='failed_users', to='authsys.FailedPack')),
('user', models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PacksAchievement',
fields=[
('achievement_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='authsys.Achievement')),
('pack_set', models.ManyToManyField(to='catalog.Pack')),
],
bases=('authsys.achievement',),
),
]
|
[
"larionov1001@gmail.com"
] |
larionov1001@gmail.com
|
d006d0a8f2aa0dff9f11db31950f1157a03e345e
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/unsignedByte/Schema+Instance/NISTXML-SV-IV-atomic-unsignedByte-minExclusive-2-2.py
|
22a0009905fe1daee2186d1122a0e3f545d55a05
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
from output.models.nist_data.atomic.unsigned_byte.schema_instance.nistschema_sv_iv_atomic_unsigned_byte_min_exclusive_2_xsd.nistschema_sv_iv_atomic_unsigned_byte_min_exclusive_2 import NistschemaSvIvAtomicUnsignedByteMinExclusive2
obj = NistschemaSvIvAtomicUnsignedByteMinExclusive2(
value=190
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
99f39851b384d27161ca03df0afa00bc1feff198
|
95124b283d7f67b0a1b9737c921a1c80c3390b56
|
/cookbook/migrations/0004_alter_chef_options.py
|
5ae4bb611914b8670b26cbf4c4da0f351d5d85b4
|
[] |
no_license
|
Saviodiow95/Recipes
|
ad905605ee9f9c2fce2c2d7e3ed75e1b5dfa79d4
|
0e88968f92dde012c3eee3518367d7d9950d856a
|
refs/heads/main
| 2023-08-28T05:03:03.798398
| 2021-10-30T01:39:53
| 2021-10-30T01:39:53
| 422,679,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
# Generated by Django 3.2.8 on 2021-10-28 14:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cookbook', '0003_auto_20211028_1048'),
]
operations = [
migrations.AlterModelOptions(
name='chef',
options={'verbose_name': 'Chef', 'verbose_name_plural': 'Chefes'},
),
]
|
[
"saviodiowflamengo@gmail.com"
] |
saviodiowflamengo@gmail.com
|
4a82e0f926a3e0cd84548bb25cce801091d6ee31
|
fe5b4e7af9a4504437d33734de0ea62baf454b69
|
/Learning/Python/Practices/mytimer.py
|
2ee9f967be12962d3f12cf066fefd1e21540ae51
|
[] |
no_license
|
FelicxFoster/Sources
|
937f2936b0fa3eef9dd2bbbde09e7f44755b8a8a
|
3750c393088c281c000228d84fe619ba321bd5bc
|
refs/heads/master
| 2020-04-22T09:37:05.191325
| 2016-08-06T07:02:50
| 2016-08-06T07:02:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
import time
class Mytimer:
def __init__(self):
self.begin = 0
self.time = 0
def __repr__(self):
if self.time == 0:
return "请先调用stop结束计时!"
else:
return "总共运行了%.5f秒" % self.time
def start(self):
print("开始计时...")
self.begin = time.clock()
def stop(self):
if self.begin == 0:
print("请先调用start开始计时!")
else:
print("计时结束.")
self.time = time.clock() - self.begin
|
[
"zoro@onepiece.com"
] |
zoro@onepiece.com
|
0743849f184d5055155ee69ce3c1a52ebb1b4098
|
cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde
|
/Q/QueriesonaPermutationWithKey.py
|
5f50edc964410d94953829c8957acc876e3cc808
|
[] |
no_license
|
bssrdf/pyleet
|
8861bbac06dfe0f0f06f6ad1010d99f8def19b27
|
810575368ecffa97677bdb51744d1f716140bbb1
|
refs/heads/master
| 2023-08-20T05:44:30.130517
| 2023-08-19T21:54:34
| 2023-08-19T21:54:34
| 91,913,009
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,517
|
py
|
'''
-Medium-
Given the array queries of positive integers between 1 and m, you have to process all queries[i]
(from i=0 to i=queries.length-1) according to the following rules:
In the beginning, you have the permutation P=[1,2,3,...,m].
For the current i, find the position of queries[i] in the permutation P (indexing from 0) and then
move this at the beginning of the permutation P. Notice that the position of queries[i] in P is
the result for queries[i].
Return an array containing the result for the given queries.
Example 1:
Input: queries = [3,1,2,1], m = 5
Output: [2,1,2,1]
Explanation: The queries are processed as follow:
For i=0: queries[i]=3, P=[1,2,3,4,5], position of 3 in P is 2, then we move 3 to the beginning of P resulting in P=[3,1,2,4,5].
For i=1: queries[i]=1, P=[3,1,2,4,5], position of 1 in P is 1, then we move 1 to the beginning of P resulting in P=[1,3,2,4,5].
For i=2: queries[i]=2, P=[1,3,2,4,5], position of 2 in P is 2, then we move 2 to the beginning of P resulting in P=[2,1,3,4,5].
For i=3: queries[i]=1, P=[2,1,3,4,5], position of 1 in P is 1, then we move 1 to the beginning of P resulting in P=[1,2,3,4,5].
Therefore, the array containing the result is [2,1,2,1].
Example 2:
Input: queries = [4,1,2,2], m = 4
Output: [3,1,2,0]
Example 3:
Input: queries = [7,5,5,8,3], m = 8
Output: [6,5,0,7,5]
Constraints:
1 <= m <= 10^3
1 <= queries.length <= m
1 <= queries[i] <= m
'''
class Fenwick:
def __init__(self, n):
sz = 1
while sz <= n:
sz *= 2
self.size = sz
self.data = [0] * sz
def sum(self, i):
s = 0
while i > 0:
s += self.data[i]
i -= i & -i
return s
def add(self, i, x):
while i < self.size:
self.data[i] += x
i += i & -i
class Solution(object):
def processQueries(self, queries, n):
"""
:type queries: List[int]
:type m: int
:rtype: List[int]
"""
fenw = Fenwick(2 * n)
vimap = {}
for i in range(1, n + 1):
fenw.add(i + n, 1)
vimap[i] = i + n
cur = n
ans = []
for q in queries:
i = vimap.pop(q)
rank = fenw.sum(i-1)
ans.append(rank)
vimap[q] = cur
fenw.add(i, -1)
fenw.add(cur, 1)
cur -= 1
return ans
if __name__ == "__main__":
print(Solution().processQueries([3,1,2,1], 5))
|
[
"merlintiger@hotmail.com"
] |
merlintiger@hotmail.com
|
05542e43a78dc07d7935c775597e82a11f69e451
|
9b32b795e45a572ae644ab515224b3c3f9836094
|
/notify.py
|
18ee6d1d22a7cc908e1e7ce990b0af5cce9a975a
|
[] |
no_license
|
Ginkooo/notifier
|
1a3cd49189400d5a25a95cc3e1518aaf88abd948
|
fec05e305971e6d1bdff85139465b0b48483df21
|
refs/heads/master
| 2021-01-22T22:02:42.366126
| 2017-03-26T19:07:38
| 2017-03-26T19:07:38
| 85,500,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
#!/usr/bin/python
import subprocess
import sys
import os
import socket
from socket import AF_INET, SOCK_STREAM
CONFIG_FILE = os.getenv('NOTIFY_CONFIG')
if not CONFIG_FILE:
print('You have no NOTIFY_CONFIG env variable set')
exit()
if len(sys.argv) < 2:
print('Too few arguments')
exit()
with open(CONFIG_FILE, 'r') as f:
for line in f.readlines():
c = line.strip().split('=')
if c[0] == 'PORT':
PORT = int(c[1])
if c[0] == 'HOST':
HOST = c[1]
def send_message(msg, host, port):
sock = socket.socket(AF_INET, SOCK_STREAM)
sock.connect((host, port))
sock.sendall(msg)
def send_and_recv(msg):
sock = socket.socket(AF_INET, SOCK_STREAM)
sock.connect((HOST, PORT))
sock.sendall(msg)
resp = sock.recv(1024)
return resp
msg = ' '.join(sys.argv[1:]).encode('utf-8')
sys.stdout.flush()
if msg == b'GET':
resp = send_and_recv(msg)
print(resp)
quit()
send_message(msg, HOST, PORT)
|
[
"piotr_czajka@outlook.com"
] |
piotr_czajka@outlook.com
|
62a6f9325e708567dfd8ff11116c7fc187205b63
|
3c81687bb6cd84ea72dac1a160660dc9ee8d59b4
|
/171.excel表列序号.py
|
68526f53563f188ce8a9a0efdac7bc3cb7e76382
|
[] |
no_license
|
whuhenry/leetcode_solution
|
59751b6f736117ce4c4d71c347161c18ffb86293
|
74e5add753a918437879154cbd3048ed47cc2e88
|
refs/heads/master
| 2023-02-09T06:06:06.623680
| 2023-02-04T06:43:42
| 2023-02-04T06:43:42
| 184,874,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
#
# @lc app=leetcode.cn id=171 lang=python3
#
# [171] Excel表列序号
#
# @lc code=start
class Solution:
def titleToNumber(self, s: str) -> int:
idx = 0
for ch in s:
idx = idx * 26 + ord(ch) - ord('A') + 1
return idx
# @lc code=end
|
[
"whuhenry@gmail.com"
] |
whuhenry@gmail.com
|
48405c6209f4df38f3a8111edb01761a4d084dc0
|
0bce7412d58675d6cc410fa7a81c294ede72154e
|
/Python3/0060. Permutation Sequence.py
|
5c9cf1b5e02c8fb54da3ceaa99f1bbef418d215b
|
[] |
no_license
|
yang4978/LeetCode
|
9ddf010b0f1dda32cddc7e94c3f987509dea3214
|
6387d05b619d403414bad273fc3a7a2c58668db7
|
refs/heads/master
| 2022-01-15T04:21:54.739812
| 2021-12-28T12:28:28
| 2021-12-28T12:28:28
| 182,653,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
class Solution:
def getPermutation(self, n: int, k: int) -> str:
num = [str(i) for i in range(1,n+1)]
res = ''
step = math.factorial(n)
while n:
step //= n
n -= 1
idx = math.ceil(k/step)-1
res += num.pop(idx)
k -= idx*step
return res
# num = [str(i) for i in range(1,n+1)]
# res = ''
# fac = math.factorial(n-1)
# n -= 1
# while k:
# if fac>k:
# fac//=n
# n -= 1
# res += num.pop(0)
# if fac==k:
# res += num.pop(0) + "".join(reversed(num))
# return res
# else:
# idx = math.ceil(k/fac)-1
# res += num.pop(idx)
# k -= idx*fac
# fac //= n
# n -= 1
# return res
|
[
"noreply@github.com"
] |
yang4978.noreply@github.com
|
a9dfdae8d0e8118e70f99ba34f2c0fbf177aa6a2
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/StudentProblem/10.21.11.29/3/1569572235.py
|
642c20c838b05b050f2e4c92c306eac8aa43970e
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
import functools
import typing
import string
import random
import pytest
def leap(year):
n = year
if n % 4 == 0:
if n % 100 == 0:
return False
elif n % 400 == 0:
return True
else:
return False
######################################################################
## Lösung Teil 2 (Tests)
######################################################################
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
a5207bcd16f7acc0c7a5d00c75fe7233a5b232e4
|
be838a8cc823ee2a1056aa94ac002462092b2ce0
|
/src/beheerconsole/conf/ci.py
|
bd973df1e4c2e6bf8a02adecfeda8694bc5e8f8d
|
[] |
no_license
|
GemeenteUtrecht/beheerconsole
|
702b2f18dafa8602396cca7944fea089b1e0678a
|
21ad66fa67ac23a8bd1e50d907fa09bd6ea9b3f1
|
refs/heads/master
| 2022-12-14T22:07:03.466320
| 2021-04-12T14:51:17
| 2021-04-12T14:51:17
| 225,420,641
| 0
| 0
| null | 2022-12-11T15:42:08
| 2019-12-02T16:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
"""
Continuous integration settings module.
"""
import logging
import os
os.environ.setdefault("SECRET_KEY", "dummy")
from .includes.base import * # noqa isort:skip
CACHES = {
"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"},
# See: https://github.com/jazzband/django-axes/blob/master/docs/configuration.rst#cache-problems
"axes": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"},
}
LOGGING = None # Quiet is nice
logging.disable(logging.CRITICAL)
ENVIRONMENT = "CI"
#
# Django-axes
#
AXES_BEHIND_REVERSE_PROXY = False
|
[
"sergei@maykinmedia.nl"
] |
sergei@maykinmedia.nl
|
4d5bad39f9a0d575b58bf1cae7bbb513a1b3f018
|
5cb33f0b2f58145ccf9c183b6366af9284227957
|
/home/migrations/0052_member_ards.py
|
3fc16148533deffa6876b509720a866868eff12d
|
[] |
no_license
|
joel081112/ArdsProject
|
a72b3038349d5cf949e55037989644d0f26fab65
|
d7867be34cdd199d4c07f4a637b89f5f7305ac36
|
refs/heads/main
| 2023-04-24T04:55:40.296316
| 2021-04-29T09:30:41
| 2021-04-29T09:30:41
| 336,305,114
| 0
| 0
| null | 2021-04-29T09:30:42
| 2021-02-05T15:06:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Generated by Django 3.1.5 on 2021-03-05 17:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0051_auto_20210305_1628'),
]
operations = [
migrations.AddField(
model_name='member',
name='ards',
field=models.BooleanField(blank=True, null=True),
),
]
|
[
"joel081112@icloud.com"
] |
joel081112@icloud.com
|
1861628aaba3bac8ca796df257d9f5249ec9eb96
|
a60e81b51935fb53c0900fecdadba55d86110afe
|
/LeetCode/python/98_medium_Validate Binary Search Tree.py
|
7312152c974df12e1c55d32eb657132f520cbf5e
|
[] |
no_license
|
FrankieZhen/Lookoop
|
fab6855f5660467f70dc5024d9aa38213ecf48a7
|
212f8b83d6ac22db1a777f980075d9e12ce521d2
|
refs/heads/master
| 2020-07-27T08:12:45.887814
| 2019-09-16T11:48:20
| 2019-09-16T11:48:20
| 209,021,915
| 1
| 0
| null | 2019-09-17T10:10:46
| 2019-09-17T10:10:46
| null |
UTF-8
|
Python
| false
| false
| 2,254
|
py
|
"""
Given a binary tree, determine if it is a valid binary search tree (BST).
Assume a BST is defined as follows:
The left subtree of a node contains only nodes with keys less than the node's key.
The right subtree of a node contains only nodes with keys greater than the node's key.
Both the left and right subtrees must also be binary search trees.
Example 1:
Input:
2
/ \
1 3
Output: true
Example 2:
5
/ \
1 4
/ \
3 6
Output: false
Explanation: The input is: [5,1,4,null,null,3,6]. The root node's value
is 5 but its right child's value is 4.
"""
# 2018-6-30
# Validate Binary Search Tree
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# LTE
class Solution1:
def __init__(self):
self.lists = []
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root == None:
return True
self.isValidBST(root.left)
self.lists.append(root.val)
# print(self.lists)
if len(self.lists) == 2:
if self.lists[1] <= self.lists[0]:
return False
else:
self.lists.pop(0)
self.isValidBST(root.right)
# print(self.lists)
if len(self.lists) == 2:
if self.lists[1] <= self.lists[0]:
return False
else:
return True
# root.left.val < root.val and root.right.val > root.val
# https://leetcode.com/problems/validate-binary-search-tree/discuss/32178/Clean-Python-Solution
class Solution2:
def isValidBST(self, root, floor=float('-inf'), ceiling=float('inf')):
"""
:type root: TreeNode
:rtype: bool
"""
# print(root,floor,ceiling)
if root == None:
return True
if root.val <= floor or root.val >= ceiling:
return False
return self.isValidBST(root.left, floor, root.val) and self.isValidBST(root.right, root.val, ceiling)
# test
root = TreeNode(1)
s = TreeNode(2)
s.left = TreeNode(3)
root.right = s
test = Solution2()
res = test.isValidBST(root)
print(res)
|
[
"33798487+YangXiaoo@users.noreply.github.com"
] |
33798487+YangXiaoo@users.noreply.github.com
|
433f3314a9d65a9f44d48aa7d4b8aba6fd80160b
|
1b3addbc9473b6ffb999665601470ccc4d1153b0
|
/libs/ftp/libsys.py
|
f9b9e5b808e87302971d10d09443fb005dc9ec07
|
[] |
no_license
|
weijia/approot
|
e1f712fa92c4c3200210eb95d251d890295769ba
|
15fac5b31a4d619d1bdede3d1131f5e6e57cd43b
|
refs/heads/master
| 2020-04-15T13:15:01.956721
| 2014-08-26T14:02:17
| 2014-08-26T14:02:17
| 11,049,975
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
import sys
import os
def get_root_dir():
c = os.getcwd()
while c.find('approot') != -1:
c = os.path.dirname(c)
return os.path.join(c, 'approot')
sys.path.insert(0, get_root_dir())
sys.path.insert(0, os.path.join(get_root_dir(),"libs"))
|
[
"richardwangwang@gmail.com"
] |
richardwangwang@gmail.com
|
8b5457e5029cac7eebac336935b708c07f950ef5
|
133e8c9df1d1725d7d34ea4317ae3a15e26e6c66
|
/django_serializers/h.py
|
7a27a611ff483b5653afffc17ef48522b67904d2
|
[
"Apache-2.0"
] |
permissive
|
425776024/Learn
|
dfa8b53233f019b77b7537cc340fce2a81ff4c3b
|
3990e75b469225ba7b430539ef9a16abe89eb863
|
refs/heads/master
| 2022-12-01T06:46:49.674609
| 2020-06-01T08:17:08
| 2020-06-01T08:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
import uuid
import time
import hashlib
def get_hashID(username, hashMode=64, tablePiece=4):
"""根据 username 确定唯一 hash 值(确定分表)
# 分组公式:64 = 每组多少个count * group需要分组的个数
# 数据所在环的位置(也就是在哪个库中):value = key mode 64 / count * count
hash(key)在0~3之间在第0号表
hash(key)在4~7之间在第4号表
hash(key)在8~11之间在第8号表
hash(key)在0~3之间在第0号库
hash(key)在4~7之间在第4号库
hash(key)在8~11之间在第8号库
"""
# hash = int
hashID = int(hash(username) % hashMode / tablePiece )
return hashID
# # 16进制 -- 900150983cd24fb0d6963f7d28e17f72
# hash_str = hashlib.md5(username.lower().encode(encoding='UTF-8')).hexdigest()
# userId = int(hash_str, 16) # 16进制 --> 10进制
# # print(hash_str, hash_str[:2], hash_str[-2:], num)
# # 按hashCount个为一组,分4个表
# hashID = int(hash(username) % hashMode / tablePiece)
# # hashID = num % hashNum
# # print('HashID:', hashID)
# return hashI
def get_sharding_model(username):
table_id = get_hashID(username, hashMode=2, tablePiece=1)
if table_id == 0:
return 1
elif table_id == 1:
return 2
# 4124bc0a9335c27f086f24ba207a4912 41 12 16658
# HashID: 0
# 4124bc0a9335c27f086f24ba207a4912 41 12 16658
# HashID: 0
H = []
count = 0
while count <= 64:
username = str(uuid.uuid4())
# username = str(count)
# hashID = get_hashID(username)
print(get_sharding_model(username))
count += 1
# time.sleep(0.1)
# if hashID not in H:
# H.append(hashID)
H.sort()
print(H)
|
[
"cheng.yang@salezoom.io"
] |
cheng.yang@salezoom.io
|
9d99fa31ca382f121ca758af5f7cae8ebd6ce00d
|
8226f8b4e7f5a48edac45831dc37f6243dc59e3d
|
/flask_cms/menu/views.py
|
1236b35221215d7db3c7cc03e0ba5c30aa89dc9f
|
[] |
no_license
|
fyarci/flask-cms
|
9b0bb3241dccd1c887f1534319d61e898d94b9e8
|
021a0afaad5133b41a79eb3ae46307915f2bf241
|
refs/heads/master
| 2021-01-20T17:42:35.795521
| 2015-03-26T19:20:11
| 2015-03-26T19:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from main.baseviews import BaseView
class MenuView(BaseView):
_template = ''
_form = ''
_context = {}
def get(self):
return self.render()
def post(self):
return self.render()
|
[
"kyle@level2designs.com"
] |
kyle@level2designs.com
|
f983bdd13adbda21ac3ba7444500ac051e15bc3f
|
fdf3aff5344271ef69ac7441c5dbca9cbf832cd1
|
/car_location/core/forms.py
|
9f5a07e19107cadb97f074332ea60379642f76e2
|
[] |
no_license
|
lffsantos/DesafioPython
|
6069b3277780326611e34ae024f7506f3d56c5b4
|
fbc451b77c0310630fd95cbd23c339e194af88d1
|
refs/heads/master
| 2021-01-17T07:42:12.181187
| 2016-01-19T03:39:20
| 2016-01-19T03:39:20
| 49,730,610
| 0
| 0
| null | 2016-01-19T03:39:22
| 2016-01-15T16:25:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 237
|
py
|
from django import forms
__author__ = 'lucas'
class LoginForm(forms.Form):
username = forms.CharField(label="Usuário")
password = forms.CharField(
label='Senha',
widget=forms.PasswordInput(render_value=False))
|
[
"lffsantos@gmail.com"
] |
lffsantos@gmail.com
|
29a7a0ab383c522c7d05694d712c48b313936f14
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res/scripts/client/gui/scaleform/daapi/view/lobby/fortifications/fortnotcommanderfirstenterwindow.py
|
5ead51d983802f010b2bf42272eda8e73e3f1285
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,523
|
py
|
# 2015.11.18 11:54:02 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortNotCommanderFirstEnterWindow.py
from gui.Scaleform.daapi.view.meta.FortNotCommanderFirstEnterWindowMeta import FortNotCommanderFirstEnterWindowMeta
from helpers import i18n
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS
class FortNotCommanderFirstEnterWindow(FortNotCommanderFirstEnterWindowMeta):
def __init__(self, _ = None):
super(FortNotCommanderFirstEnterWindow, self).__init__()
def _populate(self):
super(FortNotCommanderFirstEnterWindow, self)._populate()
self.__makeData()
def onWindowClose(self):
self.destroy()
def _dispose(self):
super(FortNotCommanderFirstEnterWindow, self)._dispose()
def __makeData(self):
ms = i18n.makeString
self.as_setWindowTitleS(ms(FORTIFICATIONS.FORTNOTCOMMANDERFIRSTENTERWINDOW_WINDOWTITLE))
self.as_setTitleS(ms(FORTIFICATIONS.FORTNOTCOMMANDERFIRSTENTERWINDOW_TEXTTITLE))
self.as_setTextS(ms(FORTIFICATIONS.FORTNOTCOMMANDERFIRSTENTERWINDOW_TEXTDESCRIPTION))
self.as_setButtonLblS(ms(FORTIFICATIONS.FORTNOTCOMMANDERFIRSTENTERWINDOW_APPLYBTNLABEL))
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\fortifications\fortnotcommanderfirstenterwindow.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:54:02 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
3e67815d8a4977a7b291405c2ba3898e0d0acafb
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R1/benchmark/startCirq161.py
|
1fde711516ef000ab6e95ed5effaaab00ca227fd
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,781
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=30
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.rx(-0.09738937226128368).on(input_qubit[2])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=3
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=20
c.append(cirq.H.on(input_qubit[1])) # number=21
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=27
c.append(cirq.X.on(input_qubit[1])) # number=28
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=29
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=18
c.append(cirq.Z.on(input_qubit[1])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[1])) # number=6
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.rx(-2.42845112122491).on(input_qubit[1])) # number=25
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq161.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
b2d2451bc3984a1ca2f67fb4553d2e0e30f40a41
|
e99dfc900052272f89d55f2fd284389de2cf6a73
|
/tests/functional_tests/apostello/test_api_setup.py
|
9efd0f948f616b2868247c953c4b980810b5ebe3
|
[
"MIT"
] |
permissive
|
armenzg/apostello
|
a3e6ca3d34917608af79fbab4134ee4de1f5e8ee
|
1827547b5a8cf94bf1708bb4029c0b0e834416a9
|
refs/heads/master
| 2021-01-18T18:16:02.364837
| 2017-03-22T20:34:21
| 2017-03-22T20:34:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
from time import sleep
import pytest
URI = '/api-setup/'
@pytest.mark.django_db
@pytest.mark.slow
@pytest.mark.selenium
class TestAPISetup:
def test_api_setup(self, live_server, browser_in, users, driver_wait_time):
"""Test api-setup form."""
no_api_token_txt = 'No API Token'
b = browser_in
browser_in.get(live_server + URI)
# delete token that doesn't exist
del_button = b.find_elements_by_xpath(
'/html/body/div[3]/div/div[2]/form[2]/button'
)[0]
del_button.click()
sleep(driver_wait_time)
assert no_api_token_txt in b.page_source
# generate token for first time
assert no_api_token_txt in b.page_source
regen_button = b.find_elements_by_xpath(
'/html/body/div[3]/div/div[2]/form[1]/button'
)[0]
regen_button.click()
sleep(driver_wait_time)
assert no_api_token_txt not in b.page_source
# regenerate token
regen_button = b.find_elements_by_xpath(
'/html/body/div[3]/div/div[2]/form[1]/button'
)[0]
regen_button.click()
sleep(driver_wait_time)
assert no_api_token_txt not in b.page_source
# delete token
del_button = b.find_elements_by_xpath(
'/html/body/div[3]/div/div[2]/form[2]/button'
)[0]
del_button.click()
sleep(driver_wait_time)
assert no_api_token_txt in b.page_source
|
[
"montgomery.dean97@gmail.com"
] |
montgomery.dean97@gmail.com
|
00ae9f869fddaf6c3843afeded27511872963210
|
b00e579fb29509ba390b4f8bbb0de510c0128f31
|
/tests/conftest.py
|
3f61372cbcdbbc7b21bb7d54016ffbd164f9a0f0
|
[
"MIT"
] |
permissive
|
TrendingTechnology/kakaowork-py
|
98dbb6d4b30e0a6bd182841bc8f1a5872f131c31
|
63ac2e09a52c9427d597a0cf53eb84d205855954
|
refs/heads/master
| 2023-07-10T09:34:14.872810
| 2021-09-01T02:32:30
| 2021-09-01T02:32:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
import pytest
from click.testing import CliRunner
from tests import Clock
@pytest.fixture(autouse=True)
def urllib3_never_request(monkeypatch):
monkeypatch.delattr("urllib3.connectionpool.HTTPConnectionPool.urlopen")
@pytest.fixture(scope="function")
def cli_runner():
return CliRunner()
@pytest.fixture(scope="function")
def cli_runner_isolated():
cli_runner = CliRunner()
with cli_runner.isolated_filesystem():
yield cli_runner
@pytest.fixture(scope="function")
def timer():
return Clock()
|
[
"noreply@github.com"
] |
TrendingTechnology.noreply@github.com
|
1d6637bc48834ad346c0d169ca0c20a478b13c04
|
a1b649fcd0b6f6c51afb13f406f53d7d823847ca
|
/studies/migrations/0023_remove_responselog_extra.py
|
6b48f882d7d4c284ca95a32f5a59ef407e00e873
|
[
"MIT"
] |
permissive
|
enrobyn/lookit-api
|
e79f0f5e7a4ef8d94e55b4be05bfacaccc246282
|
621fbb8b25100a21fd94721d39003b5d4f651dc5
|
refs/heads/master
| 2020-03-27T01:54:00.844971
| 2018-08-08T15:33:25
| 2018-08-08T15:33:25
| 145,752,095
| 0
| 0
|
MIT
| 2018-08-22T19:14:05
| 2018-08-22T19:14:04
| null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-16 23:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0022_auto_20170815_2241'),
]
operations = [
migrations.RemoveField(
model_name='responselog',
name='extra',
),
]
|
[
"pattison.dawn.r@gmail.com"
] |
pattison.dawn.r@gmail.com
|
f9c21e4c3a6dd3739b07b83be710729b6b449345
|
fd54c0886b81b49a55c31eb8c5254ce83df78785
|
/Source_Code/madagascar/appussd/ussd/services/common/language/core.py
|
ca4a04aa9d72f124a4496aeece13ee96e74e9d44
|
[] |
no_license
|
santsy03/RADIX
|
7854896651833b1be6e3279be409db59a71c76e4
|
da8f2535692697b80a6dc543b9eb270fe3d5e4d3
|
refs/heads/master
| 2021-01-12T09:48:32.085432
| 2016-12-13T06:01:41
| 2016-12-13T06:01:41
| 76,260,115
| 0
| 0
| null | 2016-12-13T06:01:41
| 2016-12-12T13:46:31
| null |
UTF-8
|
Python
| false
| false
| 4,765
|
py
|
#!/usr/bin/env python
import cx_Oracle
import traceback
from datetime import datetime
from DBUtils.PooledDB import PooledDB
from ussd.configs.core import databases
from ussd.services.common.secure.secure import decrypt
from ussd.metrics.sendmetric import sendMetric
from ussd.metrics.config import dbTimeTemplate
from ussd.metrics.config import dbTemplate
def db_setup():
db = databases['core']
pooled = PooledDB(cx_Oracle, maxcached = 5, maxconnections=100,\
user = decrypt(db['username']), password = decrypt(db['password'])\
,dsn = db['string'], threaded = True)
pooled.timeout = 300
return pooled.connection()
def getLanguage(resources):
'''retrieves the current language setting for the given subscriber'''
now = datetime.now()
resources['start'] = now
resources['type'] = 'timer'
resources['nameSpace'] = dbTimeTemplate
conn = db_setup()
cursor = conn.cursor()
try:
msisdn = resources['msisdn']
sql = ('select language from new_service_language where msisdn = :msisdn')
cursor.execute(sql,{'msisdn':msisdn})
result = cursor.fetchone()
count = cursor.rowcount
cursor.close()
conn.close()
sendMetric(resources)
except Exception,e:
error = 'operation:getLanguage,desc: could not retrieve language settings,error=%s' %str(e)
print traceback.format_exc()
try:
cursor.close()
conn.close()
resources['type'] = 'beat'
action = 'failure'
nameSpace = dbTemplate.substitute(package=action)
resources['nameSpace'] = nameSpace
sendMetric(resources)
return 'txt-2'
except:
return 'txt-2'
else:
resources['type'] = 'beat'
action = 'success'
nameSpace = dbTemplate.substitute(package=action)
resources['nameSpace'] = nameSpace
try:
sendMetric(resources)
except Exception,e:
print str(e) + ":: Error"
if count == 0:
return 'txt-2'
else:
return result[0]
def setLanguage(resources):
'''retrieves the current language setting for the given subscriber'''
from config import responses
now = datetime.now()
resources['start'] = now
resources['type'] = 'timer'
resources['nameSpace'] = dbTimeTemplate
#cursor = ((resources['connections']).acquire()).cursor()
conn = db_setup()
cursor = conn.cursor()
try:
msisdn = resources['msisdn']
msg = resources['msg']
print 'Connecting to DB : setting language for msisdn :' +str(msisdn)
sql = ("select language from new_service_language where msisdn = :msisdn")
param = {'msisdn':msisdn}
cursor.execute(sql, param).fetchall()
if cursor.rowcount > 0:
sql0 = ("update new_service_language set language = :language where msisdn = :msisdn")
else:
sql0 = ("insert into new_service_language (id, msisdn, language, modified_at)\
values (new_service_lan.nextval, :msisdn, :language, sysdate)")
params = {}
params['msisdn'] = msisdn
params['language'] = msg
cursor.execute(sql0, params)
cursor.connection.commit()
cursor.close()
conn.close()
sendMetric(resources)
except Exception,e:
error = 'operation:getLanguage,desc: could not retrieve language settings,error=%s' %str(e)
print error
try:
print 'Close DB Connection'
cursor.close()
conn.close()
resources['type'] = 'beat'
action = 'failure'
nameSpace = dbTemplate.substitute(package=action)
resources['nameSpace'] = nameSpace
sendMetric(resources)
except Exception,e:
pass
else:
resources['type'] = 'beat'
action = 'success'
nameSpace = dbTemplate.substitute(package=action)
resources['nameSpace'] = nameSpace
sendMetric(resources)
return responses[msg]
def processRequest(resources):
operation = resources['operation']
if operation == 'set':
return setLanguage(resources)
elif operation == 'get':
return getLanguage(resources)
if __name__ == '__main__':
resources = {}
conn = db_setup()
resources = {'msisdn':'261330465390','msg':'txt-3', 'connections':conn, 'operation':'get'}
resources['parameters'] = {}
#resources['parameters']['msisdn'] = '261338999232'
#parameters['msisdn'] = '261336173681'
#resources['parameters'] = parameters
print getLanguage(resources)
print processRequest(resources)
|
[
"root@oc4686551628.ibm.com"
] |
root@oc4686551628.ibm.com
|
42e38a657426d9cdb0c6ed66cbd3aa2f9c2a3afc
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2571/58822/287401.py
|
f70a0fb99d962c49ac10e9015f5dc46bae43d43d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
num=int(input())
n1=input()
n2=input()
n3=input()
if(num==2 and n1=='1,0,1'):
#n3=input()
if(n2=='0,-2,3'):
print(2)
exit()
if( (n2=='5,-2,1'and n1=='1,0,1') or (n2=='1,-2,1,4'and n1=='1,6,1,2')):
if(n3=='3'):
print(3)
exit()
print(n3)
exit()
if(num==2 and n1=='1,6,1' and n2=='4,-2,1' and n3=='3'):
print(3)
exit()
if(n1=='1,6,1' and n2== '1,-2,1' and num== 2):
print(2)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d8762f137096c890036178284a83d2b8d954379e
|
cc535054eea53a16756397a017604bc160e35c8e
|
/tasks/views.py
|
bed70a9d2c86b9a14133666af5fc63a357f446d8
|
[
"MIT"
] |
permissive
|
DevYanB/django-test-ci
|
44d1404e9efbd1107393080177d0f08089b45544
|
b36cf4138512f9578bfdd81a00d1a719f1148d01
|
refs/heads/master
| 2022-08-19T10:11:51.448879
| 2020-05-26T06:30:52
| 2020-05-26T06:30:52
| 265,990,666
| 0
| 0
|
MIT
| 2020-05-22T01:32:19
| 2020-05-22T01:32:19
| null |
UTF-8
|
Python
| false
| false
| 2,231
|
py
|
"""
Task app: Views file
"""
from django.shortcuts import (
render_to_response
)
from django.views.generic import ListView, TemplateView, DetailView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.urls import reverse_lazy
from tasks.models import Task
class TaskList(ListView):
"""
Task list Generic List View
"""
model = Task
ordering = ['-task_created']
def get_context_data(self, **kwargs):
context = super(TaskList, self).get_context_data(**kwargs)
context.update({'nlink': 'list'})
return context
class TaskCreate(CreateView):
"""
Task list Generic Create View
"""
model = Task
fields = ['task_title', 'task_description']
success_url = reverse_lazy('tasks:tasks_list')
def get_context_data(self, **kwargs):
context = super(TaskCreate, self).get_context_data(**kwargs)
context.update({'nlink': 'new'})
return context
class TaskDetails(DetailView):
"""
Task list Detail View
"""
model = Task
fields = ['task_title', 'task_description', 'task_created', 'task_updated']
class TaskUpdate(UpdateView):
"""
Task list Update View
"""
model = Task
fields = ['task_title', 'task_description']
success_url = reverse_lazy('tasks:tasks_list')
def get_context_data(self, **kwargs):
context = super(TaskUpdate, self).get_context_data(**kwargs)
context.update({'nlink': 'update'})
return context
class TaskDelete(DeleteView):
"""
Task list Delete View
"""
model = Task
success_url = reverse_lazy('tasks:tasks_list')
class Custom500(TemplateView):
"""
Task list Custom 500 View
"""
template_name = 'tasks/500.html'
def page_not_found(request, exception):
"""
function to return view http error 404.
"""
response = render_to_response(
'tasks/404.html',
{}
)
response.status_code = 404
return response
def server_error(request):
"""
function to return view http error 500.
"""
response = render_to_response(
'tasks/500.html',
{}
)
response.status_code = 500
return response
|
[
"gabicavalcantesilva@gmail.com"
] |
gabicavalcantesilva@gmail.com
|
8e44e19cb130be2674e367d7430f443fce19e273
|
a81c1492783e7cafcaf7da5f0402d2d283b7ce37
|
/google/ads/google_ads/v6/proto/services/topic_constant_service_pb2_grpc.py
|
a88d68b33e4145681b9962007e441f8bbe5a1ed7
|
[
"Apache-2.0"
] |
permissive
|
VincentFritzsche/google-ads-python
|
6650cf426b34392d1f58fb912cb3fc25b848e766
|
969eff5b6c3cec59d21191fa178cffb6270074c3
|
refs/heads/master
| 2023-03-19T17:23:26.959021
| 2021-03-18T18:18:38
| 2021-03-18T18:18:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,418
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.ads.google_ads.v6.proto.resources import topic_constant_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_topic__constant__pb2
from google.ads.google_ads.v6.proto.services import topic_constant_service_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_services_dot_topic__constant__service__pb2
class TopicConstantServiceStub(object):
"""Proto file describing the Topic constant service
Service to fetch topic constants.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetTopicConstant = channel.unary_unary(
'/google.ads.googleads.v6.services.TopicConstantService/GetTopicConstant',
request_serializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_topic__constant__service__pb2.GetTopicConstantRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_topic__constant__pb2.TopicConstant.FromString,
)
class TopicConstantServiceServicer(object):
"""Proto file describing the Topic constant service
Service to fetch topic constants.
"""
def GetTopicConstant(self, request, context):
"""Returns the requested topic constant in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TopicConstantServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetTopicConstant': grpc.unary_unary_rpc_method_handler(
servicer.GetTopicConstant,
request_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_topic__constant__service__pb2.GetTopicConstantRequest.FromString,
response_serializer=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_topic__constant__pb2.TopicConstant.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.TopicConstantService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class TopicConstantService(object):
"""Proto file describing the Topic constant service
Service to fetch topic constants.
"""
@staticmethod
def GetTopicConstant(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.TopicConstantService/GetTopicConstant',
google_dot_ads_dot_googleads_dot_v6_dot_services_dot_topic__constant__service__pb2.GetTopicConstantRequest.SerializeToString,
google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_topic__constant__pb2.TopicConstant.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"noreply@github.com"
] |
VincentFritzsche.noreply@github.com
|
c9bc8a15f2a2ae16fac4e0306ec4fcea3554ba9f
|
cfa35020cd963c013583a6bb1c862fa9884f2bf4
|
/Algorithm/SWEA/D2/5102_d2_노드의거리.py
|
cb148321280b3370741cf95b6f7e552d7fd1bf8e
|
[] |
no_license
|
LeeSungRyul/TIL
|
c16b4ef35be3226a6f9aedcc4b7c457d10de781a
|
0c085e654d4e72c84c9aa10ceca4a54b834a4c63
|
refs/heads/master
| 2023-08-21T11:09:49.575813
| 2021-10-15T14:02:36
| 2021-10-15T14:02:36
| 335,647,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
from collections import deque
T = int(input())
def bfs(start):
queue = deque()
queue.append(start)
visited[start] = 1
while queue:
cur = queue.popleft()
for nxt in G[cur]: # 인접 리스트 접근
if nxt == end:
return visited[cur] # 시작점을 1로 두고 계산했으므로 nxt까지의 거리에서 1을 뺀 cur 좌표까지의 거리 return
if not visited[nxt]:
visited[nxt] = visited[cur] + 1
queue.append(nxt)
return 0 # while 다 돈 경우, 목적지에 도착할 수 없으므로 return 0
for tc in range(1, T + 1):
V, E = map(int, input().split())
# G = [[] for _ in range(V+1)] # 인접 리스트
# for _ in range(E):
# node1, node2 = map(int, input().split())
# G[node1].append(node2)
# G[node2].append(node1)
G = {i: [] for i in range(V + 1)} # 인접 딕셔너리
for _ in range(E):
node1, node2 = map(int, input().split())
G[node1].append(node2)
G[node2].append(node1)
start, end = map(int, input().split())
visited = [0 for _ in range(V + 1)]
print("#{} {}".format(tc, bfs(start)))
|
[
"airtrack03@naver.com"
] |
airtrack03@naver.com
|
345fd054c1b316d116d5b930809e3288f775f9f4
|
6bb91e13994476f58db50374972825650cfaa0b9
|
/count-median-norm.py
|
798a733ade9ed282759117466c1cb4ec3a695154
|
[] |
no_license
|
ctb/2015-khmer-wok3-counting
|
cdf4d15137b1a214619cfaf9b00bc0b6752c28de
|
99819ed152bf0f23db9797fd4b79bd6eb9bfc9eb
|
refs/heads/master
| 2021-01-16T18:57:07.013341
| 2015-05-20T11:22:06
| 2015-05-20T11:22:06
| 34,345,558
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,966
|
py
|
#! /usr/bin/env python2
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: khmer-project@idyll.org
#
# pylint: disable=missing-docstring,invalid-name
"""
Adapted from count-median.py in khmer 1.4
"""
import screed
import argparse
import sys
import csv
import textwrap
import khmer
from khmer.kfile import check_input_files, check_space
from khmer.khmer_args import info
def kmers(seq, K):
for pos in range(0, len(seq) - K + 1):
yield seq[pos:pos+K]
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('ct_reads')
parser.add_argument('ct_exon')
parser.add_argument('transcripts')
return parser
def main():
args = get_parser().parse_args()
# reads counting table
ct_reads = khmer.load_counting_hash(args.ct_reads)
# transcripts counting table
ct_exon = khmer.load_counting_hash(args.ct_exon)
# transcripts themselves
transcripts = args.transcripts
K = ct_reads.ksize()
assert ct_exon.ksize() == K
# build a read aligner against, well, the reads:
aligner = khmer.ReadAligner(ct_reads, 1, 1.0)
# run through the transcripts.
for record in screed.open(transcripts):
counts = [] # not norm by exon count
counts2 = [] # norm by exon count
counts3 = [] # aligned & norm by exon count
seq = record.sequence.replace('N', 'A')
x, y, z = ct_reads.get_median_count(seq)
if x == 0: # skip
continue
# first, do straight k-mer distribution
for kmer in kmers(seq, K):
exon_count = ct_exon.get(kmer)
if exon_count:
count = ct_reads.get(kmer)
counts.append(count)
counts2.append(count / float(exon_count))
# next, do aligned k-mer distribution, normalized
score, alignment, _, trunc = aligner.align(seq)
alignment = alignment.replace('-', '')
for pos in range(len(alignment) - K + 1):
kmer = alignment[pos:pos + K]
exon_count = ct_exon.get(kmer)
if exon_count:
count = ct_reads.get(kmer)
counts3.append(count / float(exon_count))
# calculate summaries
avg = sum(counts) / float(len(counts))
avg2 = sum(counts2) / float(len(counts))
avg3 = 0.0
if counts3:
avg3 = sum(counts3) / float(len(counts3))
# check to see if the alignment was truncated; set to numerical
if trunc:
trunc = 1
else:
trunc = 0
# output!
print record.name, avg, avg2, avg3, trunc, len(seq), len(alignment)
if __name__ == '__main__':
main()
|
[
"titus@idyll.org"
] |
titus@idyll.org
|
79320e597beddc47d9f979c2d5bdc56d00f58d5b
|
02425f5fffe5f46961c3167c46302ef84c6e48a4
|
/binary_tree_maximum_path_sum/main.py
|
4af5b69418f0f6d82ee0138bb1528cb0b323b288
|
[] |
no_license
|
tingleshao/leetcode
|
583718b5e58c3611f3db352d82017ba1d4482f18
|
e2c589a1e81282e1c3deb6dfc5cace595acb841b
|
refs/heads/master
| 2021-01-23T03:43:31.256959
| 2015-01-23T18:00:25
| 2015-01-23T18:00:25
| 29,308,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxPathSum(self, root):
if root == None:
return 0
mx = [root.val]
self.recNodes(root, mx)
return mx[0]
def recNodes(self, node, mx):
numl = 0
numr = 0
if node.left != None:
numl = self.recNodes(node.left)
if node.right != None:
numr = self.recNodes(node.right)
value = node.val
sumWhole = self.checkMax(value, numl+numr, mx)
if numl > 0:
sumLeft = checkmax(value,numl, mx)
else:
sumLeft = value
if numlr> 0:
sumRight = checkmax(value,numr, mx)
else:
sumRight = value
return max(sumLeft,sumRight), mx
def checkMax(self, val, sm, mx):
if sm > 0:
sm += val
else:
sm = val
if sm > mx[0]:
mx[0] = sm
return sm
def main():
s = Solution()
if __name__ == "__main__":
main()
|
[
"cshao@cs.unc.edu"
] |
cshao@cs.unc.edu
|
279eed71489f31473bd2805be9982a3c27f59f15
|
5a0122509b4e7e15e556460d261d9d8a1cee76ad
|
/enterprise/legacy/util/secure_copy.py
|
2b45a64a6226ff68dc4b81f67bf5df35ab3997ac
|
[] |
no_license
|
cash2one/BHWGoogleProject
|
cec4d5353f6ea83ecec0d0325747bed812283304
|
18ecee580e284705b642b88c8e9594535993fead
|
refs/heads/master
| 2020-12-25T20:42:08.612393
| 2013-04-13T14:01:37
| 2013-04-13T14:01:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,671
|
py
|
#!/usr/bin/python2.4
#
# Copyright 2006 Google Inc. All Rights Reserved.
"""
A script that can be used to copy files safely. It is intended to
be run as root by secure_script_wrapper, which has a limited list of
scripts that it runs, but does not limit the arguments for those
scripts. Thus, I created this rsync wrapper which, in turn, checks for
arguments and accepts only pairs of files from its list. This prevents
someone from overwriting files at random.
Usage:
secure_copy.py machine file tmpdir
"""
__author__ = 'cristian@google.com'
import sys
import os
import string
from google3.pyglib import logging
import re
# Whitelist of files that secure_copy.py is allowed to copy.
# [\w\.]+ matches a string of at least 1 alphanumeric character and/or period.
FILES = [
"^/export/hda3/[\w\.]+/local/conf/certs/server.crt$",
"^/export/hda3/[\w\.]+/local/conf/certs/server.key$"
]
def CopyFile(machine, file, tmpdir):
for FILE in FILES:
if re.compile(FILE).match(file):
err = os.system("rsync -e ssh -c -a -T %s %s:%s %s" % (tmpdir, machine,
file, file))
return err != 0
logging.error("Attempting to copy unsecure file %s from %s as root "
"(tmpdir=%s). See whitelist in secure_copy.py." %
(file, machine, tmpdir))
return 1
def main(argv):
if len(argv) != 3:
return __doc__
return CopyFile(argv[0], argv[1], argv[2])
############################################################################
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
############################################################################
|
[
"nojfouldshere@gmail.com"
] |
nojfouldshere@gmail.com
|
8d0d4c4039653373109cc15e7bb85a259398b3e2
|
f59a104bc669d380f869e7156f0fff1b29d05190
|
/FPAIT/show_data.py
|
641a9c5edddaf32e17a2235ec35e1762cf138787
|
[] |
no_license
|
pratikm141/DXY-Projects
|
479049fe8bad34e91b6f31f8fee5f6e5da763a6d
|
497257de46416bfc2428f7ce3d1c75f9c8d1d737
|
refs/heads/master
| 2020-12-13T04:20:54.203204
| 2020-01-16T12:02:57
| 2020-01-16T12:02:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
import os, pdb, json, sys, torch
import numpy as np
from pathlib import Path
lib_path = str((Path(__file__).parent.resolve() / 'lib').resolve())
if lib_path not in sys.path: sys.path.insert(0, lib_path)
from datasets import TICSample, TQASample
def show_vqa(data):
all_words = data['all_words']
words_index = data['words_index']
all_answers = data['all_answers']
answers_index = data['answers_index']
training = data['training']
testing = data['testing']
print ('Few-shot VQA:')
print (' ->> {:} training samples, {:} testing samples'.format(len(training), len(testing)))
for idx, x in enumerate(training):
if idx < 3:
print (' ->> {:}/{:} : {:}'.format(idx, len(training), x))
def show_ic(data):
all_words = data['all_words']
all_blanks = data['all_blanks']
words2index = data['words2index']
training = data['training']
testing = data['testing']
print ('Few-shot Image Caption:')
print (' ->> {:} training samples, {:} testing samples'.format(len(training), len(testing)))
for idx, x in enumerate(training):
if idx < 3:
print (' ->> {:}/{:} : {:}'.format(idx, len(training), x))
if __name__ == '__main__':
vqa_list_path = './data/Toronto-COCO-QA/object.pth'
vqa_list = torch.load(vqa_list_path)
show_vqa(vqa_list)
print ('')
ic_list_path = './data/COCO-Caption/few-shot-coco.pth'
ic_list = torch.load(ic_list_path)
show_ic(ic_list)
|
[
"280835372@qq.com"
] |
280835372@qq.com
|
cd431507c5fc12ee3a008f88a24e2287be30d3bc
|
59107025a7f9afe0f94d194d547d0354e11ff6e7
|
/BrushingUp/challenge-1.0/1.1.1.py
|
f1752408adf04e930fbe972cee76912f013b3ec6
|
[
"MIT"
] |
permissive
|
samirsaravia/Python_101
|
083856643a5ca132f7126bb9a6b51b3805ba6bbe
|
0c45f11d74a356514a0c436ade6af4c0f67c56b7
|
refs/heads/master
| 2022-12-19T16:12:20.751592
| 2020-10-19T12:30:18
| 2020-10-19T12:30:18
| 251,749,435
| 0
| 0
|
MIT
| 2020-10-19T12:45:40
| 2020-03-31T21:42:36
|
Python
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
Write a function that calculates the sum of all integers up to n.Use the
iterative method and the formula and compare the results.
(sum of n integers given by S = (n(n+1))/2)
"""
def check_sum(number: int):
sum1 = 0
for i in range(1, number + 1):
print(i)
sum1 = sum1 + i
sum2 = number * (number + 1) / 2 # s = n*(n+1)/2
print(sum1, sum2)
check_sum(3)
|
[
"samir.saravia.10@gmail.com"
] |
samir.saravia.10@gmail.com
|
98c593c049d3a848c1934ecead08298d9fe34a8c
|
2118f244be2e09508e3c89dee432d4a75343b430
|
/Python Docs/Web Crawlers/To Add or To Do/Interesting2/my_good_scraper_leagle.py
|
8e012917eacef3ace6263248a136a549365a23b0
|
[] |
no_license
|
RamiJaloudi/Python-Scripts
|
91d139093a95f9498a77b1df8ec2f790c4f4dd4c
|
37e740a618ae543a02c38dc04a32ef95202ff613
|
refs/heads/master
| 2020-04-29T14:55:41.108332
| 2019-03-18T05:42:06
| 2019-03-18T05:42:06
| 176,212,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,249
|
py
|
from bs4 import BeautifulSoup
import urllib, urlparse, re
def response(url):
urls = [url] #stack of urls to scrape
visited = [url] #historical record of urls
while len(urls) >0:
try:
htmltext = urllib.urlopen(urls[0])
text = htmltext.read()
except:
print(urls[0])
#soup = BeautifulSoup(htmltext)
urls.pop(0) #print len(urls)
#print (soup.findAll('a', href=True))
pat1 = re.compile(r'<h3>.+?</h3>', re.I|re.M)
title1 = re.findall(pat1, str(text))
print title1
print '\n\n\n'
saveFile = open ('leagle_findall.txt','a')
saveFile.write(str(title1) + '\n\n\n')
pat2 = re.compile(r'<h4></h4><p>.+?</p>', re.I|re.M)
title2 = re.findall(pat2, str(text))
print title2
print '\n\n\n'
saveFile = open ('leagle_findall.txt','a')
saveFile.write(str(title2) + '\n\n\n')
## pat3 = re.compile(r'<h4></h4><p>.+?</a>', re.I|re.M)
## title3 = re.findall(pat3, str(text))
## print title3
## print '\n\n\n'
## saveFile = open ('leagle_findall.txt','a')
## saveFile.write(str(title3) + '\n\n\n')
##
## saveFile.close()
##def response_tag_href(url):
## urls = [url] #stack of urls to scrape
## visited = [url] #historical record of urls
## while len(urls) >0:
## try:
## htmltext = urllib.urlopen(urls[0]).read()
## except:
## print(urls[0])
## soup = BeautifulSoup(htmltext)
## urls.pop(0) #print len(urls)
## #print (soup.findAll('a', href=True))
## for tag in soup.findAll('a', href=True):
## tag['href'] = urlparse.urljoin(url,tag['href'])
## if url in tag['href'] and tag['href'] not in visited:
## urls.append(tag['href'])
## visited.append(tag['href']) # historical record, whereas above line is temporary stack or queue
## # print visited
## print tag['href']
if __name__=='__main__':
print response('http://leagle.com/featured-decisions')
#print response_tag_href('http://leagle.com/decisions/latest/New%20Jersey')
|
[
"rjaloudi@gmail.com"
] |
rjaloudi@gmail.com
|
eb19e01a42618f687a04943c13d5c89c97b37dec
|
a75ac3c5c641fc00a3c403b08eeb6008f648639e
|
/LeetCode/832.py
|
331d5d80ef7b89667b4bd89e0f838c52fcf341aa
|
[] |
no_license
|
Greenwicher/Competitive-Programming
|
5e9e667867c2d4e4ce68ad1bc34691ff22e2400a
|
6f830799f3ec4603cab8e3f4fbefe523f9f2db98
|
refs/heads/master
| 2018-11-15T15:25:22.059036
| 2018-09-09T07:57:28
| 2018-09-09T07:57:28
| 28,706,177
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
class Solution:
def flipAndInvertImage(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
return [[1-A[i][-j-1] for j in range(len(A[i]))] for i in range(len(A))]
|
[
"weizhiliu2009@gmail.com"
] |
weizhiliu2009@gmail.com
|
553fe2e6a47849a07c61ce50b28647576ae753b8
|
23130cd12e38dbce8db8102810edaad70b240ae2
|
/lintcode/1375.2.py
|
58bb3bdee68016c8f1865176bbbb0531b4055727
|
[
"MIT"
] |
permissive
|
kangli-bionic/algorithm
|
ee6687c82101088db20f10fb958b4e45e97d3d31
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
refs/heads/master
| 2023-01-05T09:29:33.204253
| 2020-10-25T17:29:38
| 2020-10-25T17:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
"""
1375. Substring With At Least K Distinct Characters
"""
class Solution:
"""
@param s: a string
@param k: an integer
@return: the number of substrings there are that contain at least k distinct characters
"""
def kDistinctCharacters(self, s, k):
# Write your code here
n = len(s)
left = 0
count = [0] * 256
distinct_count = 0
substring_count = 0
for right in range(n):
count[ord(s[right])] += 1
if count[ord(s[right])] == 1:
distinct_count += 1
while left <= right and distinct_count >= k:
substring_count += n - right
count[ord(s[left])] -= 1
if count[ord(s[left])] == 0:
distinct_count -= 1
left += 1
return substring_count
|
[
"hipaulshi@gmail.com"
] |
hipaulshi@gmail.com
|
7241e0a95fca5db510f0dcef217558f23e7b7581
|
6db3955c3a1f0fa1d1effbe18844853df65dfeab
|
/lib/utils/optimization.py
|
6f39283536094ec71cb8b85b3653a871f1e65e85
|
[] |
no_license
|
ZhuGeKongKong/OS-SGG
|
8222c2d98b204a474f6f96b1bdaf08fc2fd4216a
|
c0648209598db475f2a369af833f26f4d6b50ddc
|
refs/heads/master
| 2023-09-05T01:30:11.202473
| 2021-10-09T08:35:15
| 2021-10-09T08:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,103
|
py
|
# coding=utf-8
# Copyright 2019 project LXRT
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
warned_for_t_total = False
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# LXRT: grad is clipped outside.
# Add grad clipping
# if group['max_grad_norm'] > 0:
# clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
# warning for exceeding t_total (only active with warmup_linear
if group['schedule'] == "warmup_linear" and progress > 1. and not warned_for_t_total:
logger.warning(
"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. "
"Please set 't_total' of {} correctly.".format(group['schedule'], lr_scheduled, self.__class__.__name__))
warned_for_t_total = True
# end warning
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
|
[
"842651507@qq.com"
] |
842651507@qq.com
|
b02ee6280076a1a7aba6bf14f701d852d0992453
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_81/296.py
|
97a24772277f1a41527323306b74fa44d458e410
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
#!/usr/bin/env python
import sys
sys.stdin = open('in.txt')
sys.stdout = open('out.txt', 'w')
cases = int(input())
for caseno in range(1, cases+1):
teams = int(input())
table = [[-1 for i in range(teams)] for j in range(teams)]
for n in range(teams):
s = input()
for i in range(teams):
if s[i] != '.':
num = int(s[i])
table[n][i] = num
wp = [-1 for i in range(teams)]
owp = [-1 for i in range(teams)]
oowp = [-1 for i in range(teams)]
for i in range(teams):
plays = 0
wins = 0
for j in table[i]:
if j >= 0:
plays += 1
if j == 1:
wins += 1
wp[i] = wins/plays
for i in range(teams):
wps = []
t = table[i]
for j in range(teams):
if t[j] >= 0 and i != j:
plays = 0
wins = 0
for k in range(teams):
if table[j][k] >= 0 and k != i:
plays += 1
if table[j][k] == 1:
wins += 1
wps.append(wins/plays)
total = 0
for w in wps:
total += w
owp[i] = total/(len(wps))
for i in range(teams):
owps = []
for j in range(teams):
if table[i][j] >= 0 and owp[j] >= 0:
owps.append(owp[j])
total = 0
for j in owps:
total += j
assert len(owps) > 0
oowp[i] = total / len(owps)
print('Case #' + str(caseno) + ':')
for i in range(teams):
rpi = 0.25*wp[i] + 0.5*owp[i] + 0.25*oowp[i]
print(rpi)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
69486fbac13fbdae54d2d9ea6909759c410de0e9
|
9252e8a6a0a042dcbf52ea744df8e708a83293ba
|
/Easy/Cells_with_Odd_Values_in_a_Matrix/Cells_with_Odd_Values_in_a_Matrix_optimized.py
|
53e96db1c231c792b39b89d5cad25ccb7fa500ec
|
[
"MIT"
] |
permissive
|
nitin3685/LeetCode_Solutions
|
e50a40b8202154d9a60ec4ec5f1673042e5c2a50
|
ab920e96cd27e0b2c3c895ce20853edceef0cce8
|
refs/heads/master
| 2020-09-10T16:01:49.202909
| 2020-01-07T06:54:40
| 2020-01-07T06:54:40
| 221,750,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
#Weird algorithm for matrix multiplication. just addition will produce result matrix
class Solution:
def oddCells(self, n: int, m: int, indices: List[List[int]]) -> int:
row = [0] * n
col = [0] * m
ans = 0
for r,c in indices:
row[r] += 1
col[c] += 1
for i in range(n):
for j in range(m):
ans += (row[i] + col[j] )%2
return ans
|
[
"noreply@github.com"
] |
nitin3685.noreply@github.com
|
38f849d9166e3168b74edf5dba5e77dd822e6d8f
|
0c8a267966edd260177106beb04daad8622ba07f
|
/outliers/outlier_removal_regression.py
|
6dacb7e863edc2c7f38226f50ce916c150858399
|
[] |
no_license
|
BrianSipple/Machine_Learning
|
9665cec5ba9cc94e0dc06db346ddf18cff19d6a6
|
f0848183dba64000ff26a32ec45e97531d2bc758
|
refs/heads/master
| 2021-01-19T00:44:21.123062
| 2015-02-09T04:17:40
| 2015-02-09T04:17:40
| 29,520,532
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
#!/usr/bin/python
import random
import numpy
import matplotlib.pyplot as plt
import pickle
from outlier_cleaner import clean_outliers
### load up some practice data with outliers in it
ages = pickle.load( open("practice_outliers_ages.pkl", "r") )
net_worths = pickle.load( open("practice_outliers_net_worths.pkl", "r") )
### ages and net_worths need to be reshaped into 2D numpy arrays
### second argument of reshape command is a tuple of integers: (n_rows, n_columns)
### by convention, n_rows is the number of data points
### and n_columns is the number of features
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
from sklearn.cross_validation import train_test_split
ages_train, ages_test, net_worths_train, net_worths_test = train_test_split(ages, net_worths, test_size=0.1, random_state=42)
### fill in a regression here! Name the regression object reg so that
### the plotting code below works, and you can see what your regression looks like
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(ages_train, net_worths_train)
print "Initial slope: {}".format(reg.coef_[0])
print "Initial r-square: {}".format(reg.score(ages_test, net_worths_test))
try:
plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
pass
plt.scatter(ages, net_worths)
plt.show()
### identify and remove the most outlier-y points
try:
cleaned_data = clean_outliers(
reg.predict(ages_train),
ages_train,
net_worths_train
)
except NameError as e:
print e
print "your regression object doesn't exist, or isn't name reg"
print "can't make predictions to use in identifying outliers"
### only run this code if clean_outliers is returning data
if len(cleaned_data) > 0:
new_ages, new_net_worths, errors = zip(*cleaned_data)
new_ages = numpy.reshape( numpy.array(new_ages), (len(new_ages), 1))
new_net_worths = numpy.reshape( numpy.array(new_net_worths), (len(new_net_worths), 1))
### refit your cleaned data!
try:
reg.fit(new_ages, new_net_worths)
plt.plot(new_ages, reg.predict(new_ages), color="green")
except NameError:
print "you don't seem to have regression imported/created,"
print " or else your regression object isn't named reg"
print " either way, only draw the scatter plot of the cleaned data"
plt.scatter(new_ages, new_net_worths)
plt.xlabel("Ages after outlier cleaning")
plt.ylabel("Net worths after outlier cleaning")
plt.show()
new_ages_train, new_ages_test, new_net_worths_train, new_net_worths_test = train_test_split(
new_ages,
new_net_worths,
test_size=0.1,
random_state=42
)
#reg.fit(new_ages_train, new_net_worths_train)
print "New slope after cleaning: {}".format(reg.coef_[0])
print "New r-square after cleaning: {}".format(reg.score(ages_test, net_worths_test))
else:
print "outlierCleaner() is returning an empty list, no refitting to be done"
|
[
"Bsipple57@gmail.com"
] |
Bsipple57@gmail.com
|
8a90607f501666c4167d56b1318788ad7de2622b
|
112882b8d6c5071e7d2610c595bfca9210c79a0a
|
/Python/leetcode.059.spiral-matrix-ii.py
|
e94e966ba8a1993a1e5de4cb00dc0482783c04d3
|
[
"MIT"
] |
permissive
|
tedye/leetcode
|
193b1900d98e35d5c402013cbe3bc993d0235da2
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
refs/heads/master
| 2021-01-01T19:06:06.408135
| 2015-10-24T06:44:40
| 2015-10-24T06:44:40
| 41,804,923
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
if n <= 0:
return []
if n == 1:
return [[1]]
matrix = [[None] * n for _ in range(n)]
x = y = 0
direction = [(0,1),(1,0),(0,-1),(-1,0)]
count = 1
l = 0
r = n-1
u = 0
d = n-1
dc = 0
while l <= r or u <= d:
if l <= x <= r and u <= y <= d:
matrix[y][x] = count
count += 1
y += direction[dc&3][0]
x += direction[dc&3][1]
elif x > r:
u += 1
x -= 1
y += 1
dc += 1
elif y > d:
r -= 1
y -= 1
x -= 1
dc +=1
elif x < l:
d -= 1
x += 1
y -= 1
dc += 1
elif y < u:
l += 1
y += 1
x += 1
dc += 1
return matrix
|
[
"tedye@bu.edu"
] |
tedye@bu.edu
|
15900d19d8a64750afdcbbffb1afc39a78fc04ed
|
4c8152b5abdefa8fe44cb4a423985b18a3175542
|
/Books/wxpython-28-application-development-cookbook/1780_Code/1780_12_Code/05/osxapp.py
|
b79b77118dfeb20192ff0a56c1e0a769787b40e0
|
[] |
no_license
|
mcmoralesr/Learning.Python
|
c5ed943a1e4eed774c1b626c52c450b48c1062b6
|
8ea6222d260989c6973d9a0cc494ff659a78ade6
|
refs/heads/master
| 2020-06-28T04:22:53.970613
| 2015-02-12T10:23:28
| 2015-02-12T10:23:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
# Chapter 12: Application Infrastructure,
# Building and Managing Applications for Distribution
# Recipe 5: Optimizing for OSX
#
import wx
import sys
class OSXApp(wx.App):
def OnInit(self):
# Enable native spell checking and right
# click menu for Mac TextCtrl's
if wx.Platform == '__WXMAC__':
spellcheck = "mac.textcontrol-use-spell-checker"
wx.SystemOptions.SetOptionInt(spellcheck, 1)
self.frame = OSXFrame(None,
title="Optimize for OSX")
self.frame.Show()
return True
def MacReopenApp(self):
self.GetTopWindow().Raise()
class OSXFrame(wx.Frame):
"""Main application window"""
def __init__(self, *args, **kwargs):
super(OSXFrame, self).__init__(*args, **kwargs)
# Attributes
self.textctrl = wx.TextCtrl(self,
style=wx.TE_MULTILINE)
# Setup Menus
mb = wx.MenuBar()
fmenu = wx.Menu()
fmenu.Append(wx.ID_OPEN)
fmenu.Append(wx.ID_EXIT)
mb.Append(fmenu, "&File")
emenu = wx.Menu()
emenu.Append(wx.ID_COPY)
emenu.Append(wx.ID_PREFERENCES)
mb.Append(emenu, "&Edit")
hmenu = wx.Menu()
hmenu.Append(wx.NewId(), "&Online Help...")
hmenu.Append(wx.ID_ABOUT, "&About...")
mb.Append(hmenu, "&Help")
if wx.Platform == '__WXMAC__':
# Make sure we don't get duplicate
# Help menu since we used non standard name
app = wx.GetApp()
app.SetMacHelpMenuTitleName("&Help")
self.SetMenuBar(mb)
self.SetInitialSize()
if __name__ == '__main__':
app = OSXApp(False)
app.MainLoop()
|
[
"sonhuytran@gmail.com"
] |
sonhuytran@gmail.com
|
3ed8cf2c724d861b36c3e9fce3019c5683c8331a
|
27089ed5ea5f81949a6d62d08465ed92d9194fdd
|
/allennlp/tests/data/dataset_readers/universal_dependencies_multilingual_dataset_reader_test.py
|
8bf51a05a0ccc44176c39258c653be1662a0c991
|
[
"Apache-2.0"
] |
permissive
|
Whu-wxy/allennlp
|
5c87bd0916cfea51ce7ceef45b9363579d19b670
|
c863900e3e1fe7be540b9a0632a7a032491fc3ab
|
refs/heads/master
| 2021-06-27T19:34:04.720649
| 2019-09-10T15:21:40
| 2019-09-10T15:21:40
| 168,892,873
| 6
| 3
|
Apache-2.0
| 2019-08-20T13:08:37
| 2019-02-03T00:17:13
|
Python
|
UTF-8
|
Python
| false
| false
| 6,961
|
py
|
# pylint: disable=no-self-use,invalid-name
from allennlp.data.dataset_readers import UniversalDependenciesMultiLangDatasetReader
from allennlp.common.testing import AllenNlpTestCase
class TestUniversalDependenciesMultilangDatasetReader(AllenNlpTestCase):
data_path = AllenNlpTestCase.FIXTURES_ROOT / "data" / "dependencies_multilang" / "*"
def check_two_instances(self, instance1, instance2):
fields1, fields2 = instance1.fields, instance2.fields
assert fields1['metadata'].metadata['lang'] == fields2['metadata'].metadata['lang']
lang = fields1['metadata'].metadata['lang']
if lang == 'fr':
assert fields1['metadata'].metadata['lang'] == 'fr'
assert [t.text for t in fields1["words"].tokens] == ['Ses', 'habitants', 'sont', 'appelés', 'les',
'Paydrets',
'et', 'les', 'Paydrètes', ';']
assert fields1["pos_tags"].labels == ['DET', 'NOUN', 'VERB', 'VERB', 'DET',
'NOUN', 'CONJ', 'DET', 'NOUN', '.']
assert fields1["head_tags"].labels == ['poss', 'nsubjpass', 'auxpass', 'ROOT', 'det', 'attr',
'cc', 'det', 'conj', 'p']
assert fields1["head_indices"].labels == [2, 4, 4, 0, 6, 4, 6, 9, 6, 4]
assert fields2['metadata'].metadata['lang'] == 'fr'
assert [t.text for t in fields2["words"].tokens] == ['Cette', 'tour', 'de', 'a',
'été', 'achevée', 'en', '1962', '.']
assert fields2["pos_tags"].labels == ['DET', 'NOUN', 'ADP', 'VERB', 'VERB',
'VERB', 'ADP', 'NUM', '.']
assert fields2["head_tags"].labels == ['det', 'nsubjpass', 'adpmod', 'aux', 'auxpass', 'ROOT',
'adpmod', 'adpobj', 'p']
assert fields2["head_indices"].labels == [2, 6, 2, 6, 6, 0, 6, 7, 6]
elif lang == 'es':
assert [t.text for t in fields1["words"].tokens] == ['Aclarando', 'hacia', 'todo', 'el', 'mundo',
'Valderrama', 'Y', 'Eduardo', 'Son', 'La',
'Misma', 'Persona', '.']
assert fields1["pos_tags"].labels == ['VERB', 'ADP', 'DET', 'DET', 'NOUN', 'NOUN', 'CONJ',
'NOUN', 'NOUN', 'DET', 'ADJ', 'NOUN', '.']
assert fields1["head_tags"].labels == ['ROOT', 'adpmod', 'det', 'det', 'adpobj', 'nsubj', 'cc', 'conj',
'xcomp',
'det', 'amod', 'attr', 'p']
assert fields1["head_indices"].labels == [0, 1, 5, 5, 2, 9, 6, 6, 1, 12, 12, 9, 1]
assert [t.text for t in fields2["words"].tokens] == ['Es', 'un', 'bar', 'disfrazado', 'de',
'restaurante', 'la', 'comida', 'esta',
'demasiado', 'salada', '.']
assert fields2["pos_tags"].labels == ['VERB', 'DET', 'NOUN', 'VERB', 'ADP', 'NOUN',
'DET', 'NOUN', 'VERB', 'PRON', 'ADJ', '.']
assert fields2["head_tags"].labels == ['ROOT', 'det', 'attr', 'partmod', 'adpmod', 'adpobj',
'det', 'nsubj', 'parataxis', 'nmod', 'acomp', 'p']
assert fields2["head_indices"].labels == [0, 3, 1, 3, 4, 5, 8, 9, 1, 11, 9, 1]
elif lang == 'it':
assert fields1['metadata'].metadata['lang'] == 'it'
assert [t.text for t in fields1["words"].tokens] == ['Inconsueto', 'allarme', 'alla', 'Tate',
'Gallery', ':']
assert fields1["pos_tags"].labels == ['ADJ', 'NOUN', 'ADP', 'NOUN', 'NOUN', '.']
assert fields1["head_tags"].labels == ['amod', 'ROOT', 'adpmod', 'dep', 'adpobj', 'p']
assert fields1["head_indices"].labels == [2, 0, 2, 5, 3, 2]
assert fields2['metadata'].metadata['lang'] == 'it'
assert [t.text for t in fields2["words"].tokens] == ['Hamad', 'Butt', 'è', 'morto', 'nel', '1994',
'a', '32', 'anni', '.']
assert fields2["pos_tags"].labels == ['NOUN', 'NOUN', 'VERB', 'VERB', 'ADP',
'NUM', 'ADP', 'NUM', 'NOUN', '.']
assert fields2["head_tags"].labels == ['dep', 'nsubj', 'aux', 'ROOT', 'adpmod', 'adpobj',
'adpmod', 'num', 'adpobj', 'p']
assert fields2["head_indices"].labels == [2, 4, 4, 0, 4, 5, 4, 9, 7, 4]
return lang
def test_iterate_once_per_file_when_first_pass_for_vocab_is_true(self):
reader = UniversalDependenciesMultiLangDatasetReader(
languages=['es', 'fr', 'it'], is_first_pass_for_vocab=True)
instances = list(reader.read(str(self.data_path)))
assert len(instances) == 6
processed_langs = []
processed_langs.append(self.check_two_instances(instances[0], instances[1]))
processed_langs.append(self.check_two_instances(instances[2], instances[3]))
processed_langs.append(self.check_two_instances(instances[4], instances[5]))
assert 'es' in processed_langs and 'fr' in processed_langs and 'it' in processed_langs
def test_iterate_forever_when_first_pass_for_vocab_is_false(self):
'''
Note: assumes that each data file contains no more than 20 trees.
'''
reader = UniversalDependenciesMultiLangDatasetReader(languages=['es', 'fr', 'it'],
is_first_pass_for_vocab=False,
instances_per_file=1,
lazy=True)
counter_es, counter_fr, counter_it = 0, 0, 0
for instance in reader.read(str(self.data_path)):
lang = instance.fields['metadata'].metadata['lang']
if lang == 'es':
counter_es += 1
if counter_es > 20:
break
if lang == 'fr':
counter_fr += 1
if counter_fr > 20:
break
if lang == 'it':
counter_it += 1
if counter_it > 20:
break
# Asserting that the reader didn't stop after reading the three files once.
assert (counter_es > 20 or counter_fr > 20 or counter_it > 20)
|
[
"mattg@allenai.org"
] |
mattg@allenai.org
|
f5b3c461ce5399f255d6a71987c9096f64d5f927
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/_nticks.py
|
ed0d7d4831c51818983ca85921168266da541e9f
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="histogram2dcontour.colorbar", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
55c142fb77a38d5d16184d5d8309e2e0f55df2f5
|
7dd25a39d91d1f03791eeb2f39c8bdf825b24601
|
/test/input/only_describe.py
|
d2fda863179b3b0133448712f18e512f546add7e
|
[] |
no_license
|
rlgomes/pocha
|
a281736f35e6d53b0c56f5bca946bd494b0bb6cf
|
8be091fcf73b57e8c7e9efe17e8452d639e18dd7
|
refs/heads/master
| 2022-09-18T19:34:56.991444
| 2019-06-03T14:26:09
| 2019-06-03T14:26:09
| 62,520,589
| 51
| 12
| null | 2022-07-06T19:22:06
| 2016-07-04T00:20:44
|
Python
|
UTF-8
|
Python
| false
| false
| 326
|
py
|
from pocha import describe, it
@describe('first describe')
def describe1():
@it('first it')
def _():
pass
@it('second it')
def _():
pass
@describe('second describe', only=True)
def describe1():
@it('third it')
def _():
pass
@it('fourth it')
def _():
pass
|
[
"rodneygomes@gmail.com"
] |
rodneygomes@gmail.com
|
d8a6a59cd6d3c2b41ee795ed6735211d1f58ba41
|
031d4491fcd2a9620e72710e000bae8afd92bbcb
|
/custom_components/xiaomi_miot_raw/water_heater.py
|
733f78a5484764a0a2ca8a518fa2a35093cce782
|
[
"Apache-2.0"
] |
permissive
|
hzh-hzh/xiaomi_miot_raw
|
76717aa4803cf4e006d39bc685fd225724692292
|
8f8de5cc6f0eeae55770ec08cb3ff9ebcb75c422
|
refs/heads/master
| 2023-07-12T19:13:11.206861
| 2021-08-13T14:46:31
| 2021-08-13T14:46:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,698
|
py
|
import asyncio
import json
import logging
from collections import OrderedDict
from datetime import timedelta
from functools import partial
from typing import Optional
import async_timeout
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from aiohttp import ClientSession
from homeassistant.components import water_heater
from homeassistant.components.water_heater import (
SUPPORT_AWAY_MODE,
SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE,
WaterHeaterEntity,
PLATFORM_SCHEMA,
)
from homeassistant.const import *
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import aiohttp_client
from miio.exceptions import DeviceException
from .deps.miio_new import MiotDevice
import copy
from . import GenericMiotDevice, ToggleableMiotDevice, dev_info, async_generic_setup_platform
from .climate import MiotClimate
from .deps.const import (
DOMAIN,
CONF_UPDATE_INSTANT,
CONF_MAPPING,
CONF_CONTROL_PARAMS,
CONF_CLOUD,
CONF_MODEL,
ATTR_STATE_VALUE,
ATTR_MODEL,
ATTR_FIRMWARE_VERSION,
ATTR_HARDWARE_VERSION,
SCHEMA,
MAP,
DUMMY_IP,
DUMMY_TOKEN,
)
TYPE = 'water_heater'
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Generic MIoT " + TYPE
DATA_KEY = TYPE + '.' + DOMAIN
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
SCHEMA
)
SCAN_INTERVAL = timedelta(seconds=10)
# pylint: disable=unused-argument
@asyncio.coroutine
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
await async_generic_setup_platform(
hass,
config,
async_add_devices,
discovery_info,
TYPE,
{'default': MiotWaterHeater},
)
async def async_setup_entry(hass, config_entry, async_add_entities):
config = hass.data[DOMAIN]['configs'].get(config_entry.entry_id, dict(config_entry.data))
await async_setup_platform(hass, config, async_add_entities)
async def async_unload_entry(hass, config_entry, async_add_entities):
return True
class MiotWaterHeater(ToggleableMiotDevice, WaterHeaterEntity):
def __init__(self, device, config, device_info, hass, main_mi_type):
ToggleableMiotDevice.__init__(self, device, config, device_info, hass, main_mi_type)
self._target_temperature = None
self._unit_of_measurement = TEMP_CELSIUS
self._away = None
self._current_operation = None
self._current_temperature = None
@property
def supported_features(self):
"""Return the list of supported features."""
s = SUPPORT_OPERATION_MODE
if self._did_prefix + 'target_temperature' in self._mapping:
s |= SUPPORT_TARGET_TEMPERATURE
return s
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def min_temp(self):
"""Return the lowbound target temperature we try to reach."""
return self._ctrl_params['target_temperature']['value_range'][0]
@property
def max_temp(self):
"""Return the lowbound target temperature we try to reach."""
return self._ctrl_params['target_temperature']['value_range'][1]
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def operation_list(self):
"""Return the list of available operation modes."""
return (["on","off"] if self._did_prefix + 'switch_status' in self._mapping else []) + (list(self._ctrl_params['mode'].keys()) if 'mode' in self._ctrl_params else [])
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
result = await self.set_property_new(self._did_prefix + "target_temperature", kwargs.get(ATTR_TEMPERATURE))
if result:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self.async_write_ha_state()
async def async_set_operation_mode(self, operation_mode):
"""Set new operation mode."""
if operation_mode == 'on':
await self.async_turn_on()
if self._state == True:
self._current_operation = 'on'
elif operation_mode == 'off':
await self.async_turn_off()
if self._state == False:
self._current_operation = 'off'
else:
result = await self.set_property_new(self._did_prefix + "mode", self._ctrl_params['mode'][operation_mode])
if result:
self._current_operation = operation_mode
self.async_write_ha_state()
def _handle_platform_specific_attrs(self):
super()._handle_platform_specific_attrs()
try:
self._target_temperature = self._state_attrs.get(self._did_prefix + 'target_temperature')
except:
pass
try:
self._current_temperature = self._state_attrs.get(self._did_prefix + 'temperature')
except:
pass
try:
o = self._state_attrs.get(self._did_prefix + 'mode')
if o in ('on','off'):
self._current_operation = o
elif o is not None:
self.get_key_by_value(self._ctrl_params['mode'], o)
except:
pass
|
[
"whyuu@qq.com"
] |
whyuu@qq.com
|
c238b36926219fbf87c188545260cdff53b761e8
|
4e8674d7c83254aba7f2d327f16d5ad202a189b6
|
/src/select_centered_text.py
|
b072d911fa1e942f3c5aedc591f19b9b7d3468da
|
[] |
no_license
|
raysmith619/dots
|
0f5e34b17675cfb0903a20eda86493d37676b500
|
c44ff3ebf57ec73c6fd8b7898cbc186668f83915
|
refs/heads/master
| 2021-06-17T02:34:48.850425
| 2021-04-27T13:54:24
| 2021-04-27T13:54:24
| 205,397,035
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
# select_centered_text.py
""" Centered Text near/within Part
"""
from pip._vendor.distlib import resources
from PIL._imaging import display
class CenteredText:
""" Contents for text placed inside a region
"""
def __init__(self, part, text, x=None, y=None,
font_name=None,
color=None, color_bg=None,
height=None, width=None):
""" Setup instance of centered text
:part: in which centered text is placed
:text: text string to place
:font: font name
"""
self.part = part
self.text = text
self.x = x
self.y = y
self.font_name = font_name
self.color = color
self.color_bg = color_bg
self.height = height
self.width = width
self.text_tag = None # Canvas text tag, if live
self.text_bg_tag = None # Canvas text background
self.image = None # http://effbot.org/pyfaq/why-do-my-tkinter-images-not-appear.htm
def __str__(self):
""" Centered Text description
"""
st = self.text
if self.x is not None or self.y is not None:
if self.x is None:
self.x = 0
if self.y is None:
self.y = 0
st += f" at:x={self.x} y={self.y}"
if self.font_name is not None:
st += f" font={self.font_name}"
if self.color is not None:
st += " %s" % self.color
if self.color_bg is not None:
st += " bg=%s" % self.color_bg
if self.height is not None:
st += " height=%d" % self.height
if self.width is not None:
st += " width=%d" % self.height
if self.text_tag is not None:
st += " text_tag=%d" % self.text_tag
return st
def delete(self):
""" Delete centered text resources
"""
if self.text_tag is not None:
self.part.sel_area.canvas.delete(self.text_tag)
self.text_tag = None
if self.text_bg_tag is not None:
self.part.sel_area.canvas.delete(self.text_bg_tag)
self.text_bg_tag = None
def destroy(self):
""" Remove object from display
"""
self.delete()
|
[
"noreply@github.com"
] |
raysmith619.noreply@github.com
|
7939e6ea94738d2a078b9885647f393ef60e84d9
|
e982ad81d18e3a983756b4c90311b007b9d5e276
|
/pyspec/cui/api.py
|
6ff27e71232a9ebc13a3583cc49048a8f6fdffda
|
[
"MIT"
] |
permissive
|
jyotijaya/pyspec
|
2ca4428c3c9924154f7467edbdc1d8fddd59a817
|
ae7a4de39beb3cf2e0838b6c3a9ef73d082445eb
|
refs/heads/master
| 2022-12-27T20:42:15.818388
| 2020-10-01T11:50:19
| 2020-10-01T11:50:19
| 300,260,536
| 0
| 0
|
NOASSERTION
| 2020-10-01T11:49:40
| 2020-10-01T11:49:39
| null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
# -*- coding: ascii -*-
"""PySpec extension api.
This module enable following extension features:
- Add aspect to modules, classes, methods
- Add Add-in
"""
__pyspec = 1
import os
import sys
from pyspec.api import (ModuleAspect, ClassAspect, MethodAspect,
EventHandlerRegister)
def entry_point(method):
"""Notify the target method is special function of pyspec extension.
Method name must be in folloing list:
add_trace() : not implement yet
add_profile() : not implement yet
"""
if "pyspec.addin" in sys.modules:
addin = sys.modules["pyspec.addin"]
addin.AddinLoaderBase.add_entry_point(method)
def event_handler(event_type):
if event_type not in ("init_optparse", "read_option", "on_run_test",
"on_finish_test"):
ValueError("Invalid event type: %s" % event_type)
return EventHandlerRegister(event_type)
|
[
"yoshiki@shibu.jp"
] |
yoshiki@shibu.jp
|
d48f2013149fc23461f3610f581057cca53e282a
|
0fcc6353edee4eed7a1ea4b1c89a00bfcf03e851
|
/PIP/PIPOne.py
|
1f984b5b25866edabb8a0c8eb992db91bdd80bb5
|
[] |
no_license
|
GANESH0080/Python-Practice-Again
|
81d8048c23d338a99bb17fa86a9f87b3057bfe52
|
6565911d14a22d0f33a41b417026c31a0a066be5
|
refs/heads/master
| 2020-09-20T03:40:45.462869
| 2019-11-27T07:19:24
| 2019-11-27T07:19:24
| 224,368,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
# A package contains all the files you need for a module.
# Modules are Python code libraries you can include in your project.
import camelcase
c = camelcase.CamelCase()
txt = "hello world sds"
print(c.hump(txt))
|
[
"ganusalunkhe@gmail.com"
] |
ganusalunkhe@gmail.com
|
a1534181fc0cd816f25bc60d6a459e6c443e1409
|
d59bad348c88026e444c084e6e68733bb0211bc2
|
/poo/todo_v1.py
|
8f1eccc7c345536890820623508af901ad2a3a7e
|
[] |
no_license
|
dersonf/udemy-python
|
f96ec883decb21a68233b2e158c82db1c8878c7a
|
92471c607d8324902902774284f7ca81d2f25888
|
refs/heads/master
| 2022-09-25T00:18:49.833210
| 2020-06-05T18:18:38
| 2020-06-05T18:18:38
| 262,049,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/python3
from datetime import datetime
class Tarefa:
def __init__(self, descricao):
self.descricao = descricao
self.feito = False
self.criacao = datetime.now()
def concluir(self):
self.feito = True
def __str__(self):
return self.descricao + (' (Concluída)' if self.feito else '')
def main():
casa = []
casa.append(Tarefa('Passar roupa'))
casa.append(Tarefa('Lavar prato'))
casa.append(Tarefa('Lavar roupa'))
[tarefa.concluir() for tarefa in casa if tarefa.descricao == 'Lavar prato']
for tarefa in casa:
print(f'- {tarefa}')
if __name__ == '__main__':
main()
|
[
"anderson@ferneda.com.br"
] |
anderson@ferneda.com.br
|
d15e95600618fea0344edfd9dd5bab824ec4c949
|
5517b91a4be684d822d35a6c2bd8d21f1098aebc
|
/tensorlayer/core/engine/layer.py
|
0064ab73e3bfb5f364568352e0f5f7aa2181fbd7
|
[
"BSD-2-Clause"
] |
permissive
|
zhengruiguo/dragon
|
785f52a2eaba0899b2e598a4365adf1b43e07b38
|
3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810
|
refs/heads/master
| 2023-05-31T22:48:42.157381
| 2021-06-29T01:59:24
| 2021-06-29T01:59:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,342
|
py
|
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
# Copyright (c) 2016-2018, The TensorLayer contributors.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.core.util import nest
from dragon.vm.tensorlayer.core import activations
from dragon.vm.tensorlayer.core.engine import module
from dragon.vm.tensorlayer.core.engine import node
class Layer(module.Module):
"""The base layer abstraction of a neural network.
It should be subclassed when implementing new types of layers:
```python
class MyLayer(tl.layers.Layer):
def __init__(name=None, act=None):
super(MyLayer, self).__init__(name=name, act=act)
```
"""
def __init__(self, name=None, act=None, *args, **kwargs):
"""Create a new ``Layer``.
Parameters
----------
name : str, optional.
The layer name.
act : str or function, optional
The optional activation.
"""
super(Layer, self).__init__(name=name)
self._built = False
self._nodes = []
self._nodes_fixed = False
self.act = activations.get(act)
@property
def all_weights(self):
"""Return all the weights, both trainable and non-trainable.
Returns
-------
Sequence[dragon.Tensor]
The weights sequence.
"""
return self.trainable_weights + self.nontrainable_weights
@property
def name(self):
"""Return the layer name.
Returns
-------
str
The layer name.
"""
return super(Layer, self).name
@property
def nontrainable_weights(self):
"""Return the non-trainable weights.
Returns
-------
Sequence[dragon.Tensor]
The weights sequence.
"""
return self._nontrainable_weights
@property
def trainable_weights(self):
"""Return the trainable weights.
Returns
-------
Sequence[dragon.Tensor]
The weights sequence.
"""
return self._trainable_weights
@module.Module.training.setter
def training(self, mode):
"""Set the training mode.
Parameters
----------
mode : bool
``True`` for training otherwise evaluation.
"""
self._training = mode
def build(self, input_shapes):
"""Method to define the weights.
Parameters
----------
input_shapes : Sequence[Sequence[int]]
The shape of inputs.
"""
self._built = True
def forward(self, inputs):
"""Method to define the forward operations.
Parameters
----------
inputs : Sequence[dragon.Tensor]
The inputs.
Returns
-------
Sequence[dragon.Tensor]
The outputs.
"""
pass
def _add_node(self, inputs, outputs):
"""Add a layer node for inputs and outputs.
Parameters
----------
inputs : Sequence[dragon.Tensor]
The input tensors.
outputs : Sequence[dragon.Tensor]
The output tensors.
"""
inputs = nest.flatten(inputs)
outputs = nest.flatten(outputs)
input_info = [getattr(e, '_info', [None, None]) for e in inputs]
self._nodes.append(
node.LayerNode(
self,
node_index=len(self._nodes),
in_nodes=[e[0] for e in input_info],
in_tensor_idxes=[e[1] for e in input_info],
in_tensors=inputs,
out_tensors=outputs,
)
)
for idx, tensor in enumerate(outputs):
tensor._info = (self._nodes[-1], idx)
def _fix_nodes(self):
"""Fix layer nodes to stop growing."""
self._nodes_fixed = True
def __call__(self, inputs, **kwargs):
"""The preprocessor for ``self.forward(...)``."""
# Maybe build the layer at the first time.
if not self._built:
input_list = nest.flatten(inputs)
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = [x.shape for x in input_list]
if not nest.is_sequence(inputs):
input_shapes = input_shapes[0]
self.build(input_shapes)
# Call the forward implementation to get outputs.
outputs = self.forward(inputs, **kwargs)
# Record the nodes if necessary.
if not self._nodes_fixed:
self._add_node(inputs, outputs)
return outputs
def __delitem__(self, key):
raise TypeError('The Layer API does not allow to use the method: `__delitem__`')
def __repr__(self):
return 'Layer'
def __setitem__(self, key, item):
raise TypeError('The Layer API does not allow to use the method: `__setitem__`')
|
[
"ting.pan@seetatech.com"
] |
ting.pan@seetatech.com
|
ee3872bc65c7073fd639374827e837a332116b94
|
caf0ba85f1c7a2b7208e7f0acebb3c047b17b0ba
|
/1-py/venv/PY_17_ARCHIVOS_XML.py
|
a877a3074ae9230424db9c4e7b17404af91df9ce
|
[] |
no_license
|
JAreina/python
|
12ca9bd5467420a813ac3f33b0adba6cd492f855
|
3b9ac8d37ab2abe70e34043857f96a76c19468c8
|
refs/heads/master
| 2020-03-22T07:57:31.675271
| 2018-09-12T06:38:49
| 2018-09-12T06:38:49
| 139,735,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
from xml.etree.ElementTree import parse
'''
archvios XML
'''
documento = parse("nuevo.xml")
print(documento.getroot())
iterador = documento.getiterator()
print(iterador)
for i in iterador:
print(i)
print(i.text)
for elemento in documento.findall("funcion"):
print(elemento.text)
|
[
"jareinafdez@gmail.com"
] |
jareinafdez@gmail.com
|
c430b520d8077c08d1f2dad4a4e255d079ae8da2
|
4546398a18590e4e182629fb55d185547dd6df0a
|
/2015/beta/problems/demantar/input_format_validators/validator.py
|
bff24165ef084214f3ab7989be22df6477c131b1
|
[] |
no_license
|
ForritunarkeppniFramhaldsskolanna/Keppnir
|
352341fa97c6349af65b513c03171f3e706f7db2
|
65c8eb5358d8a49f956edf76c2d47b9372accc3c
|
refs/heads/master
| 2023-04-28T15:33:36.396225
| 2023-04-23T15:00:15
| 2023-04-23T15:00:15
| 78,303,702
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
import sys
import re
import string
s = sys.stdin.readline()
assert re.match('^[0-9]+ .\n$', s)
s = s[:-1]
n, c = s.split(' ', 1)
n = int(n)
assert 1 <= n <= 500
assert len(c) == 1
assert c in string.printable and c not in string.whitespace
assert sys.stdin.read() == ''
sys.exit(42)
|
[
"suprdewd@gmail.com"
] |
suprdewd@gmail.com
|
3ac2d633ab9dcb1d8b617c819a20e45565fc5fa4
|
d01f9ff2d7ba3c7c99158678adeaf082f3f15dbc
|
/model/cpn/ablation_study/cityscapes.cpn.R101_v1c.v2/network.py
|
e1e8c9f9b049a87dd56a998ac77b7ba565d5ba93
|
[
"MIT"
] |
permissive
|
akinoriosamura/TorchSeg-mirror
|
d8e76d99e80d55c2555f4f8f7a7fc3f30ef5dec4
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
refs/heads/master
| 2021-06-18T15:47:00.946788
| 2019-10-26T04:46:07
| 2019-10-26T04:46:07
| 217,657,156
| 0
| 0
|
MIT
| 2021-06-08T20:36:44
| 2019-10-26T04:46:39
|
Python
|
UTF-8
|
Python
| false
| false
| 7,740
|
py
|
# encoding: utf-8
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import config
from base_model import resnet101
from seg_opr.seg_oprs import ConvBnRelu
from seg_opr.loss_opr import AntimagnetLossv6
class CPNet(nn.Module):
def __init__(self, out_planes, criterion, pretrained_model=None,
norm_layer=nn.BatchNorm2d):
super(CPNet, self).__init__()
self.backbone = resnet101(pretrained_model, norm_layer=norm_layer,
bn_eps=config.bn_eps,
bn_momentum=config.bn_momentum,
deep_stem=True, stem_width=64)
self.generate_dilation(self.backbone.layer3, dilation=2)
self.generate_dilation(self.backbone.layer4, dilation=4,
multi_grid=[1, 2, 4])
self.business_layer = []
self.context = ObjectContext(2048, 512, norm_layer)
self.head_layer = nn.Sequential(
ConvBnRelu(2048 + 1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.aux_layer = nn.Sequential(
ConvBnRelu(1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.business_layer.append(self.context)
self.business_layer.append(self.head_layer)
self.business_layer.append(self.aux_layer)
self.criterion = criterion
self.bce_criterion = nn.BCELoss(reduction='mean')
self.antimagnet_criterion = AntimagnetLossv6()
def forward(self, data, label=None, aux_label=None):
blocks = self.backbone(data)
fm, intra_sim_map = self.context(blocks[-1])
fm = self.head_layer(fm)
fm = F.interpolate(fm, scale_factor=8, mode='bilinear',
align_corners=True)
softmax_fm = F.softmax(fm, dim=1)
aux_fm = self.aux_layer(blocks[-2])
aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',
align_corners=True)
if label is not None:
main_loss = self.criterion(fm, label)
aux_loss = self.criterion(aux_fm, label)
intra_sim_loss = self.bce_criterion(intra_sim_map, aux_label)
antimagnet_loss = self.antimagnet_criterion(intra_sim_map,
aux_label)
loss = main_loss + 0.4 * aux_loss + intra_sim_loss + antimagnet_loss
return loss
return softmax_fm
def generate_dilation(self, module, dilation, multi_grid=None):
for idx, block in enumerate(module):
if multi_grid is None:
grid = 1
else:
grid = multi_grid[idx % len(multi_grid)]
dilation = dilation * grid
block.apply(partial(self._nostride_dilate, dilate=dilation))
@staticmethod
def _nostride_dilate(m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
class SymmetricConv(nn.Module):
def __init__(self, in_channels, ksize, norm_layer=nn.BatchNorm2d):
super(SymmetricConv, self).__init__()
padding = ksize // 2
self.t1 = nn.Conv2d(in_channels, in_channels, groups=in_channels,
kernel_size=(ksize, 1),
stride=1, padding=(padding, 0))
self.t2 = nn.Conv2d(in_channels, in_channels, groups=in_channels,
kernel_size=(1, ksize),
stride=1, padding=(0, padding))
self.p1 = nn.Conv2d(in_channels, in_channels, groups=in_channels,
kernel_size=(1, ksize),
stride=1, padding=(0, padding))
self.p2 = nn.Conv2d(in_channels, in_channels, groups=in_channels,
kernel_size=(ksize, 1),
stride=1, padding=(padding, 0))
self.bn = norm_layer(in_channels)
self.relu = nn.ReLU()
def forward(self, x):
x1 = self.t1(x)
x1 = self.t2(x1)
x2 = self.p1(x)
x2 = self.p2(x2)
output = self.relu(self.bn(x1 + x2))
return output
class ObjectContext(nn.Module):
def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
super(ObjectContext, self).__init__()
self.in_channels = in_channels
self.inner_channel = inner_channel
self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
3, 1, 1,
has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.intra_similarity_branch = nn.Sequential(
SymmetricConv(self.inner_channel, 11, norm_layer),
nn.Conv2d(self.inner_channel, config.prior_size ** 2, 1, 1, 0,
groups=16, bias=False),
norm_layer(config.prior_size ** 2)
)
self.intra_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.inter_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
def forward(self, x):
b, h, w = x.size(0), x.size(2), x.size(3)
value = self.reduce_conv(x)
intra_similarity_map = self.intra_similarity_branch(value)
intra_similarity_map = intra_similarity_map.view(b, h * w, -1)
intra_similarity_map = intra_similarity_map.permute(0, 2, 1)
intra_similarity_map = torch.sigmoid(intra_similarity_map)
inter_similarity_map = 1 - intra_similarity_map
value = value.view(b, self.inner_channel, -1)
value = value.permute(0, 2, 1)
intra_context = torch.bmm(intra_similarity_map, value)
intra_context = intra_context.div(config.prior_size ** 2)
intra_context = intra_context.permute(0, 2, 1).contiguous()
intra_context = intra_context.view(b, self.inner_channel, *x.size()[2:])
intra_context = self.intra_post_conv(intra_context)
inter_context = torch.bmm(inter_similarity_map, value)
inter_context = inter_context.div(config.prior_size ** 2)
inter_context = inter_context.permute(0, 2, 1).contiguous()
inter_context = inter_context.view(b, self.inner_channel, *x.size()[2:])
inter_context = self.inter_post_conv(inter_context)
output = torch.cat([x, intra_context, inter_context], dim=1)
return output, intra_similarity_map
if __name__ == "__main__":
model = PSPNet(150, None)
print(model)
|
[
"osamura.akinori@gmail.com"
] |
osamura.akinori@gmail.com
|
9ba229cd899e1c098ef7cc34a315439025460288
|
1c6283303ceb883add8de4ee07c5ffcfc2e93fab
|
/Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/subtlv_7c94061598b794f7b720de3bb85f6cdb.py
|
e5a86fab5e949b33adb3d3d1d06e318bcc1731c0
|
[] |
no_license
|
pdobrinskiy/devcore
|
0f5b3dfc2f3bf1e44abd716f008a01c443e14f18
|
580c7df6f5db8c118990cf01bc2b986285b9718b
|
refs/heads/main
| 2023-07-29T20:28:49.035475
| 2021-09-14T10:02:16
| 2021-09-14T10:02:16
| 405,919,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,055
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class SubTlv(Base):
"""Sub Tlv container
The SubTlv class encapsulates a list of subTlv resources that are managed by the system.
A list of resources can be retrieved from the server using the SubTlv.find() method.
"""
__slots__ = ()
_SDM_NAME = 'subTlv'
_SDM_ATT_MAP = {
'Description': 'description',
'EnablePerSession': 'enablePerSession',
'IsEnabled': 'isEnabled',
'Name': 'name',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(SubTlv, self).__init__(parent, list_op)
@property
def Value(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.value_ac1d7b13584a86b9cf1c28dca3390bca.Value): An instance of the Value class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.value_ac1d7b13584a86b9cf1c28dca3390bca import Value
if self._properties.get('Value', None) is not None:
return self._properties.get('Value')
else:
return Value(self)._select()
@property
def Description(self):
# type: () -> str
"""
Returns
-------
- str: Description of the tlv
"""
return self._get_attribute(self._SDM_ATT_MAP['Description'])
@Description.setter
def Description(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Description'], value)
@property
def EnablePerSession(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable TLV per session
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnablePerSession']))
@property
def IsEnabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables/disables this tlv
"""
return self._get_attribute(self._SDM_ATT_MAP['IsEnabled'])
@IsEnabled.setter
def IsEnabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IsEnabled'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of the tlv
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Description=None, IsEnabled=None, Name=None):
# type: (str, bool, str) -> SubTlv
"""Updates subTlv resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Description (str): Description of the tlv
- IsEnabled (bool): Enables/disables this tlv
- Name (str): Name of the tlv
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Description=None, IsEnabled=None, Name=None):
# type: (str, bool, str) -> SubTlv
"""Adds a new subTlv resource on the json, only valid with config assistant
Args
----
- Description (str): Description of the tlv
- IsEnabled (bool): Enables/disables this tlv
- Name (str): Name of the tlv
Returns
-------
- self: This instance with all currently retrieved subTlv resources using find and the newly added subTlv resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Description=None, IsEnabled=None, Name=None):
# type: (str, bool, str) -> SubTlv
"""Finds and retrieves subTlv resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve subTlv resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all subTlv resources from the server.
Args
----
- Description (str): Description of the tlv
- IsEnabled (bool): Enables/disables this tlv
- Name (str): Name of the tlv
Returns
-------
- self: This instance with matching subTlv resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of subTlv data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the subTlv resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, EnablePerSession=None):
"""Base class infrastructure that gets a list of subTlv device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- EnablePerSession (str): optional regex of enablePerSession
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
[
"pdobrinskiy@yahoo.com"
] |
pdobrinskiy@yahoo.com
|
57b1d19b7a5e95e9e467c34419f47bb1f739192c
|
c124cd627d1cd2ecc2056a932db4c5c3203943f2
|
/MPSAppt/core/containers/attest.py
|
2536a3c36e1f8b87c51e74ab01d0f30f4dde7928
|
[] |
no_license
|
longooglite/mps
|
8fb2093b6a9f483a2ce4543949f7cbf0b280a1f1
|
fd8c0d1491b80074fdf5a8c923d50e55a1991ad0
|
refs/heads/master
| 2021-01-10T08:17:15.852252
| 2016-02-29T21:07:04
| 2016-02-29T21:07:04
| 52,824,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,311
|
py
|
# [Copyright]
# SmartPath v1.0
# Copyright 2014-2015 Mountain Pass Solutions, Inc.
# This unpublished material is proprietary to Mountain Pass Solutions, Inc.
# [End Copyright]
from MPSAppt.core.containers.task import Task
import MPSAppt.services.attestService as attestSvc
import MPSAppt.services.jobActionService as jaService
import MPSAppt.services.uberGapService as uberGapSvc
import MPSAppt.utilities.environmentUtils as envUtils
import MPSCore.utilities.stringUtilities as stringUtils
class Attest(Task):
def __init__(self, containerCode, parameterBlock):
Task.__init__(self, containerCode, parameterBlock)
self.setAttestation({})
# Initialization.
def initializeOnNewJobAction(self, _jobAction, _personDict, _profile, _now, _username, doCommit=True):
self.initializeItemSharingOnNewJobAction(_jobAction, _profile, _now, _username, doCommit=doCommit)
actionInfo = {}
if _personDict:
initItems = self.getConfigDict().get('initItems',[])
for initItem in initItems:
if initItem.has_key('findValidAttestation'):
findValidAttestation = initItem.get('findValidAttestation',[])
for validAttestConfig in findValidAttestation:
lookbackDays = validAttestConfig.get('lookbackDays',0)
codes = validAttestConfig.get('codes')
attestService = attestSvc.AttestService(self.getWorkflow().getConnection())
validAttest = attestService.findViableAttest(codes,lookbackDays,_personDict)
if validAttest:
validAttest['updated'] = _now
validAttest['id'] = None
jobTask = jaService.JobActionService(self.getWorkflow().getConnection()).getOrCreateJobTask(_jobAction,self,_now,_username)
validAttest['job_task_id'] = jobTask.get('id',-1)
attestService.updateAttestation(jobTask,validAttest)
else:
emailConfigs = validAttestConfig.get('emails')
if emailConfigs:
self.sendDirectiveEmail(emailConfigs,_personDict,_jobAction,self,_now,_profile)
actionInfo['grantCandidateAccess'] = True
return actionInfo
# Getters/Setters.
def getAttestation(self): return self._attestationDict
def setAttestation(self, __attestationDict): self._attestationDict = __attestationDict
# Data loading.
def loadInstance(self):
if self.getIsLoaded():
return
self.setIsLoaded(True)
if not self.getIsEnabled():
return
jobTask = self.getPrimaryJobTaskDict()
if jobTask:
resultDict = attestSvc.AttestService(self.getWorkflow().getConnection()).getAttestation(jobTask.get('id',0))
if resultDict:
self.setAttestation(resultDict)
# Directive emails.
def extendEmailContext(self, _emailContext):
# Add/Change elements in the given _emailContext.
env = envUtils.getEnvironment()
appCode = env.getAppCode()
loginURI = env.getLoginUri()
siteApplications = self.getWorkflow().getUserProfile().get('siteProfile',{}).get('siteApplications',[])
urlPrefix = env.getApplicationURLPrefix(appCode, siteApplications)
externalKey = self.getWorkflow().getJobActionDict().get('external_key', '')
_emailContext['candidate_url'] = "%s%s/%s" % (urlPrefix, loginURI, externalKey)
# Everything else.
def getDataDict(self, _sitePreferences):
self.loadInstance()
if self.getIsEnabled():
prefix = '/appt/jobaction/attest'
jobActionIdStr = str(self.getWorkflow().getJobActionDict().get('id',0))
argTuple = (prefix, jobActionIdStr, self.getCode())
dataDict = {}
dataDict['url'] = '%s/%s/%s' % argTuple
dataDict['disabled'] = self.standardTaskDisabledCheck()
return dataDict
return {}
def getEditContext(self, _sitePreferences,isForPrint = False):
self.loadInstance()
if self.getIsEnabled():
context = self.getCommonEditContext(_sitePreferences)
context['url'] = self._getURL()
context['enable_print'] = self.getConfigDict().get('print_enabled',True)
context['button_text'] = 'Submit'
context['button_url'] = self._getURL('/appt/jobaction/attest/complete')
context['print_url'] = self._getURL('/appt/jobaction/attest/print')
context['prompts'] = self.dictifyPromptsList(self.getConfigDict().get('prompts',{}))
context.update(self.updateContextWithImage(self.getConfigDict().get('displayImage',False),
self.getConfigDict().get('displayImageScalePixelWidth',400),
self.getConfigDict().get('displayImageTaskCode',''),
isForPrint))
# the old way, left here for backward compatibility
# on new templates, use {tags} below
fullName = ''
candidate = jaService.JobActionService(self.getWorkflow().getConnection()).getCandidateDict(self.getWorkflow().getJobActionDict())
if candidate:
fullName = candidate.get('full_name')
context['candidate_name'] = fullName
context['submitText'] = self.getConfigDict().get('submitText','')
if '%s' in context['submitText']:
context['submitText'] = context['submitText'] % fullName.upper()
else:
# the new way, allows for a candidate or a system user to attest
if self._attestationDict.has_key('attestor_department'):
attestor_department = self._attestationDict.get('attestor_department','')
attestor_name = self._attestationDict.get('attestor_name','')
else:
attestor_name,attestor_department = self.getNameAndDepartment()
submitText = str(context['submitText'])
if submitText.find('{attestor_department}') > -1:
submitText=submitText.replace('{attestor_department}',attestor_department)
if submitText.find('{attestor_name}') > -1:
submitText=submitText.replace('{attestor_name}',attestor_name)
context['submitText'] = submitText
configKeyName = 'uberGapsConfig'
uberGapsConfig = self.getConfigDict().get(configKeyName, [])
if uberGapsConfig:
gapSoivice = uberGapSvc.UberGapService(self.getWorkflow().getConnection())
gaps = gapSoivice.processContainer(self, _sitePreferences, _configKeyName=configKeyName, _returnLocalizedDates=True)
if gaps:
context['gapsList'] = gaps
context['gapsEnforced'] = self.getConfigDict().get('uberGapsEnforced', True)
context['gapsEnforcedDescr'] = self.getConfigDict().get('uberGapsEnforcedText', '')
context['gapsPrintIntroText'] = self.getConfigDict().get('uberGapsPrintIntroText', '')
if context['gapsEnforced']:
context['disabled'] = True
return context
return {}
def getNameAndDepartment(self):
attestor_name = ''
isCandidate = self.hasPermission('apptCandidate')
if not isCandidate:
attestor_name = self.workflow.userProfile.get('userProfile',{}).get('userPreferences',{}).get('full_name''')
else:
candidate = jaService.JobActionService(self.workflow.connection).getCandidateDict(self.workflow.getJobActionDict())
if candidate:
attestor_name = candidate.get('full_name')
attestor_department = self.workflow.department.get('full_descr','')
return attestor_name.upper(),attestor_department.upper()
def _getURL(self, _prefix='/appt/jobaction/attest'):
jobActionIdStr = str(self.getWorkflow().getJobActionDict().get('id',0))
return '%s/%s/%s' % (_prefix, jobActionIdStr, self.getCode())
def isComplete(self):
self.loadInstance()
if self.getIsEnabled():
return self.getAttestation().get('complete', False)
return True
|
[
"longuskikirk@gmail.com"
] |
longuskikirk@gmail.com
|
6d167be378b64da8b761152a806b3267181938ac
|
a9f7e40e18c935fb004fe813f98e298ded0581af
|
/Unrated/NIKKEI_2019_Ex/NIKKEI_2019_Ex-G.py
|
91c77f85fafefd28a617a6781617c33d96188fea
|
[] |
no_license
|
happa64/AtCoder_Beginner_Contest
|
2eb350f500f4bd65f5491b98cdf002ac9b174165
|
2526e72de9eb19d1e1c634dbd577816bfe39bc10
|
refs/heads/master
| 2023-07-08T15:13:51.449555
| 2021-08-11T14:18:09
| 2021-08-11T14:18:09
| 255,601,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
# https://atcoder.jp/contests/nikkei2019-ex/submissions/18385907
# G - 回文スコア
import sys
from collections import Counter
sys.setrecursionlimit(10 ** 7)
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
S = input()
D = Counter(S)
even = 0
one = 0
for v in D.values():
if v % 2 == 0:
even += v
else:
if v != 1:
even += v - 1
one += 1
if one:
res = pow(even + 1, 2)
one -= 1
else:
res = pow(even, 2)
res += one
print(res)
if __name__ == '__main__':
resolve()
|
[
"happa_iidx@yahoo.co.jp"
] |
happa_iidx@yahoo.co.jp
|
98a0437b02ec91f9eb46d2cdc1a194709887c950
|
ba9c4be2697d5299fee7adf1652152f92e789894
|
/__init__.py
|
37fc40a9722c542c9e6a902b473a04d95ad62b34
|
[] |
no_license
|
fabyc/nodux_in_invoice2note
|
8ec180a585f83d732c8819d21454403d6639c3e3
|
5b2fb6548149c0ace5cfc2c8a56237c1c372fb0b
|
refs/heads/master
| 2022-05-27T09:26:43.844978
| 2017-08-16T20:14:55
| 2017-08-16T20:14:55
| 260,091,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from trytond.pool import Pool
from .invoice import *
def register():
Pool.register(
Invoice,
module='nodux_in_invoice2note', type_='model')
|
[
"tatianaq@nodux.ec"
] |
tatianaq@nodux.ec
|
24848acbed36d1b8123122e0ce4c169f45274050
|
ee0b9cd2424e634a212a6d9734af1eaedd40dfe0
|
/jhub37_mantid_baseline/sasview-5.0.3/src/sas/qtgui/Plotting/SlicerModel.py
|
fcff73897d0b797ce5523cb2234f937919a6e72b
|
[
"BSD-3-Clause"
] |
permissive
|
moving-northwards/Docker
|
775755b4618c1a7946f540505b0178e119d294d1
|
8ef18fd8c6abb0608ce9b53187e53d00d3e4e9ae
|
refs/heads/master
| 2023-05-26T08:42:58.634525
| 2021-06-15T08:41:08
| 2021-06-15T08:41:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,547
|
py
|
from PyQt5 import QtGui
from PyQt5 import QtCore
import sas.qtgui.Utilities.GuiUtils as GuiUtils
class SlicerModel(object):
def __init__(self):
# Model representation of local parameters
self._model = QtGui.QStandardItemModel()
self.update_model = True
self._model.itemChanged.connect(self.setParamsFromModelItem)
def setModelFromParams(self):
"""
Set up the Qt model for data handling between controls
"""
parameters = self.getParams()
self._model.removeRows( 0, self._model.rowCount() )
# Crete/overwrite model items
for parameter in list(parameters.keys()):
item1 = QtGui.QStandardItem(parameter)
item2 = QtGui.QStandardItem(GuiUtils.formatNumber(parameters[parameter]))
self._model.appendRow([item1, item2])
self._model.setHeaderData(0, QtCore.Qt.Horizontal, "Parameter")
self._model.setHeaderData(1, QtCore.Qt.Horizontal, "Value")
def setParamsFromModel(self):
"""
Set up the params dictionary based on the current model content.
"""
params = self.getParams()
for row_index in range(self._model.rowCount()):
#index = self._model.indexFromItem(item)
#row_index = index.row()
param_name = str(self._model.item(row_index, 0).text())
params[param_name] = float(self._model.item(row_index, 1).text())
self.update_model = False
self.setParams(params)
self.update_model = True
def setParamsFromModelItem(self, item):
"""
Set up the params dictionary for the parameter in item.
"""
params = self.getParams()
index = self._model.indexFromItem(item)
row_index = index.row()
param_name = str(self._model.item(row_index, 0).text())
params[param_name] = float(self._model.item(row_index, 1).text())
self.update_model = False
self.setParams(params)
self.update_model = True
def model(self):
'''getter for the model'''
return self._model
def getParams(self):
''' pure virtual '''
raise NotImplementedError("Parameter getter must be implemented in derived class.")
def setParams(self):
''' pure virtual '''
raise NotImplementedError("Parameter setter must be implemented in derived class.")
def validate(self):
''' pure virtual '''
raise NotImplementedError("Validator must be implemented in derived class.")
|
[
"torben.nielsen@esss.se"
] |
torben.nielsen@esss.se
|
b34ec6fea05408a0f8b07b708fd7b9eb9aff1f36
|
54b8fa244ff0dae2018efedcb81e1bb03376e5e2
|
/test/functional/test_framework/bignum.py
|
5af132b8a1480c8b1b2ec58ba88da0a3e971c468
|
[
"MIT"
] |
permissive
|
afghany/castletmp
|
e15677a88f9a1878486b6becf93d26c0ee9dbeaf
|
9d0daed2a6abaf7d93f9308f5c602db6eeb42c8b
|
refs/heads/master
| 2022-11-27T14:58:47.802781
| 2020-08-08T21:26:12
| 2020-08-08T21:26:12
| 284,464,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
#!/usr/bin/env python3
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Big number routines.
This file is copied from python-bitcoinlib.
"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# castle-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
[
"shagolo@gmail.com"
] |
shagolo@gmail.com
|
d2fbd07c92fe745b47740d4e7405fae4d80d92f0
|
a1bffcd8854e1843e56bb812d4d83b3161a5211e
|
/plugins/callback/oneline.py
|
2e5d07c5de90f3cd6d0c6d71b3ec59da0414ebd9
|
[] |
no_license
|
goneri/ansible.community
|
1a71f9d98c164b77f8ed2ed7f558b4963005ff8f
|
f26f612dd0a3154050d90b51a75502018c95f6e4
|
refs/heads/master
| 2020-12-29T07:47:35.353515
| 2020-01-22T17:43:18
| 2020-01-22T17:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,520
|
py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: oneline
type: stdout
short_description: oneline Ansible screen output
description:
- This is the output callback used by the -o/--one-line command line option.
'''
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'ansible.community.oneline'
def _command_generic_msg(self, hostname, result, caption):
stdout = result.get('stdout', '').replace('\n', '\\n').replace('\r', '\\r')
if 'stderr' in result and result['stderr']:
stderr = result.get('stderr', '').replace('\n', '\\n').replace('\r', '\\r')
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc', -1), stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc', -1), stdout)
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n', '')
if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, 'FAILED'), color=C.COLOR_ERROR)
else:
self._display.display(msg, color=C.COLOR_ERROR)
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n', '')),
color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
color = C.COLOR_CHANGED
state = 'CHANGED'
else:
color = C.COLOR_OK
state = 'SUCCESS'
if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
else:
self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=0).replace('\n', '')),
color=color)
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE!: %s" % (result._host.get_name(), result._result.get('msg', '')), color=C.COLOR_UNREACHABLE)
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
b43c93f012b04f452b69ddcc2add80dc65a7ac0a
|
588f4991cad99f517ca5028e0e41c5b4d5252543
|
/contest/abc128/C.py
|
d515f27cb54e9ca99721b1cfb2e8eefcf66a93e6
|
[
"MIT"
] |
permissive
|
mola1129/atcoder
|
3002ff38cabf0ccb5142bd576ed90419fccde02e
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
refs/heads/master
| 2020-06-16T12:24:49.609707
| 2020-03-14T15:58:42
| 2020-03-14T15:58:42
| 195,571,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
N,M = map(int, input().split())
connect = [[0,0,0]]
on_off = [0]*(N+1)
result = 0
for _ in range(M):
k = list(map(int, input().split()))
connect.append(k[1:])
p = list(map(int, input().split()))
for cnt in range(0,2**N):
bin_str = list(format(cnt, 'b'))
i = len(on_off)-len(bin_str)
for num in bin_str:
on_off[i] = int(num)
i += 1
for i in range(1,M+1):
total = 0
for j in connect[i]:
total += on_off[j]
if total % 2 != p[i-1]:
break
elif i == M:
result += 1
print(result)
|
[
"ms.mola1129@gmail.com"
] |
ms.mola1129@gmail.com
|
02bebd325a4f76630ff5661c2d8ce5290d849fec
|
6f0ceee714bccf2a89c34a06aabd3bcb781a2fa4
|
/python/mxnet/gluon/probability/distributions/chi2.py
|
7b74683cb09c074f9ac0c521dbf91f8135d864c2
|
[
"Apache-2.0",
"MIT",
"Unlicense",
"BSL-1.0",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"OFL-1.0",
"BSD-2-Clause-Views",
"Zlib"
] |
permissive
|
yajiedesign/mxnet
|
5a495fd06dd1730c17d2d27d7e46c8a770847f17
|
8e5a16cf673db5aceb48d2cf7a0fc1abd0ee5e51
|
refs/heads/master
| 2021-03-30T22:37:18.603396
| 2020-10-23T06:40:17
| 2020-10-23T06:40:17
| 43,763,550
| 214
| 59
|
Apache-2.0
| 2020-06-01T23:31:15
| 2015-10-06T16:36:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Chi-sqaure distribution"""
__all__ = ['Chi2']
from .gamma import Gamma
from .constraint import Positive
class Chi2(Gamma):
r"""Create a Chi2 distribution object.
Chi2(df) is equivalent to Gamma(shape=df / 2, scale=2)
Parameters
----------
df : Tensor or scalar, default 0
Shape parameter of the distribution.
F : mx.ndarray or mx.symbol.numpy._Symbol or None
Variable recording running mode, will be automatically
inferred from parameters if declared None.
"""
# pylint: disable=abstract-method
arg_constraints = {'df': Positive()}
def __init__(self, df, F=None, validate_args=None):
super(Chi2, self).__init__(df / 2, 2, F, validate_args)
@property
def df(self):
return self.shape * 2
|
[
"noreply@github.com"
] |
yajiedesign.noreply@github.com
|
9e9f021e5c2d6c9d246028e2905aa22f9a704361
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R3/benchmark/startPyquil254.py
|
ae770cd6c05beda2d5d3a22c59f0cde3c69686d1
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
# qubit number=4
# total number=14
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += CNOT(0,2) # number=11
prog += X(2) # number=12
prog += CNOT(0,2) # number=13
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=7
prog += SWAP(1,0) # number=8
prog += Y(0) # number=9
prog += Y(0) # number=10
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil254.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
3ce1c4f46a991d070a0f622b26e7320bd65e0cc6
|
bc6492a9a30ac7228caad91643d58653b49ab9e3
|
/sympy/series/benchmarks/bench_limit.py
|
06fd12c97e72d64b577dd4d469f61cc9efec5885
|
[] |
no_license
|
cosmosZhou/sagemath
|
2c54ea04868882340c7ef981b7f499fb205095c9
|
0608b946174e86182c6d35d126cd89d819d1d0b8
|
refs/heads/master
| 2023-01-06T07:31:37.546716
| 2020-11-12T06:39:22
| 2020-11-12T06:39:22
| 311,177,322
| 1
| 0
| null | 2020-11-12T06:09:11
| 2020-11-08T23:42:40
|
Python
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
from __future__ import print_function, division
from sympy import Symbol, limit, oo
x = Symbol('x')
def timeit_limit_1x():
limit(1/x, x, oo)
|
[
"74498494@qq.com"
] |
74498494@qq.com
|
8d4245bbcb13287d8470f9f637b1762b06d211ae
|
8b0267e7e1ac5b7e762f705597406ef2673b4755
|
/segmentation/smp_local/encoders/__init__.py
|
1665d2724b70bfe72329ee64151569ccf99dd2c2
|
[] |
no_license
|
jionie/Ultra-high-Resolution-EM-Images-Segmentation
|
f2f33cb8c279ca698abb49cee2fd6872fbf66df0
|
b4389349e42310288bbc8704cdbdea6825598d8f
|
refs/heads/master
| 2020-09-19T12:12:57.260517
| 2019-12-18T09:00:43
| 2019-12-18T09:00:43
| 224,227,599
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
import functools
import torch.utils.model_zoo as model_zoo
from .resnet import resnet_encoders
from .dpn import dpn_encoders
from .vgg import vgg_encoders
from .senet import senet_encoders
from .densenet import densenet_encoders
from .inceptionresnetv2 import inceptionresnetv2_encoders
from .inceptionv4 import inceptionv4_encoders
from .efficientnet import efficient_net_encoders
from ._preprocessing import preprocess_input
encoders = {}
encoders.update(resnet_encoders)
encoders.update(dpn_encoders)
encoders.update(vgg_encoders)
encoders.update(senet_encoders)
encoders.update(densenet_encoders)
encoders.update(inceptionresnetv2_encoders)
encoders.update(inceptionv4_encoders)
encoders.update(efficient_net_encoders)
def get_encoder(name, encoder_weights=None):
Encoder = encoders[name]['encoder']
encoder = Encoder(**encoders[name]['params'])
encoder.out_shapes = encoders[name]['out_shapes']
if encoder_weights is not None:
settings = encoders[name]['pretrained_settings'][encoder_weights]
encoder.load_state_dict(model_zoo.load_url(settings['url']))
return encoder
def get_encoder_names():
return list(encoders.keys())
def get_preprocessing_params(encoder_name, pretrained='imagenet'):
settings = encoders[encoder_name]['pretrained_settings']
if pretrained not in settings.keys():
raise ValueError('Avaliable pretrained options {}'.format(settings.keys()))
formatted_settings = {}
formatted_settings['input_space'] = settings[pretrained].get('input_space')
formatted_settings['input_range'] = settings[pretrained].get('input_range')
formatted_settings['mean'] = settings[pretrained].get('mean')
formatted_settings['std'] = settings[pretrained].get('std')
return formatted_settings
def get_preprocessing_fn(encoder_name, pretrained='imagenet'):
params = get_preprocessing_params(encoder_name, pretrained=pretrained)
return functools.partial(preprocess_input, **params)
|
[
"jionie@sjtu.edu.cn"
] |
jionie@sjtu.edu.cn
|
dc993c018a89e7c21f48d7f0f3123050933aa7fe
|
6ddd74b228cebf0a09be584611e914391725f778
|
/readsource/bottle/bottlefirst.py
|
37bd284af1be0fdd350ea595a37880f4a4e1f569
|
[] |
no_license
|
kekefeng/gitproject
|
98435b89ae5bdc63ceed3981438ec3041927da87
|
920c0791e8db69b27d5cffc76c9dd8f90ca2525e
|
refs/heads/master
| 2020-04-22T03:26:00.135817
| 2019-03-18T04:58:06
| 2019-03-18T04:58:06
| 170,086,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
from bottle import Bottle, run, template, route
app = Bottle()
#@app.route('/hello/<name:re:[a-z]+>/zzx/kk/<id:int>')
@app.route('/hello/<name:int>')
def greet(name='Stranger'):
return template('Hello {{name}}, how are you?', name=name)
run(app, host='localhost', port=5000, server='wsgiref')
|
[
"you@example.com"
] |
you@example.com
|
557a7f5b19846ab4ef99b079dc66bebd3ca574d5
|
f3af143bada7f79db1e15b4386e5107bc99eb212
|
/ProjectBaseTest1/工具练习/10-文字聊天室/client.py
|
70ba7d7f25e6fa1e4bf62d351835bf7540105b10
|
[] |
no_license
|
xxxfly/PythonStudy
|
a5ceae1d2b16cfdba19871507458154fc292bca0
|
478d89ccefc91a84f935aebdca796c9d4c23ef61
|
refs/heads/master
| 2022-04-08T13:45:03.405768
| 2020-03-09T03:47:50
| 2020-03-09T03:47:50
| 45,187,749
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,604
|
py
|
#-*-coding:utf-8-*-
import wx
import telnetlib
from time import sleep
import _thread as thread
import random
import re
class LoginFrame(wx.Frame):
"""
登录窗口
"""
def __init__(self,parent,id,title,size):
#初始化,添加控件并绑定事件
wx.Frame.__init__(self,parent,id,title)
self.SetSize(size)
self.Center()
self.serverAddressLable=wx.StaticText(self,label="Server Address",pos=(10,50),size=(120,25))
self.userNameLabel=wx.StaticText(self,label="UserName",pos=(40,100),size=(120,25))
self.serverAddress=wx.TextCtrl(self,pos=(120,47),size=(150,25))
self.userName=wx.TextCtrl(self,pos=(120,97),size=(150,25))
self.loginButton=wx.Button(self,label="Login",pos=(80,145),size=(130,30))
#绑定登录方法
self.loginButton.Bind(wx.EVT_BUTTON,self.login)
self.Show()
def login(self,event):
#处理登录
try:
serverAddress=self.serverAddress.GetLineText(0).split(':')
address=serverAddress[0]
port=serverAddress[1]
con.open(address,port=int(port),timeout=10)
response=con.read_some()
if response!=b'Connect Success':
self.showDialog('Error','Connect Fail',(200,100))
return
con.write(('login '+str(self.userName.GetLineText(0))+'\n').encode('utf-8'))
response=con.read_some()
if response==b'UserName Empty':
self.showDialog('Error','UserName Empty!',(200,100))
elif response==b'UserName Empty':
self.showDialog('Error','UserName Exists!',(200,100))
else:
self.Close()
ChatFrame(None,2,title='Chat Client',size=(500,400))
except Exception as ex:
print(str(ex))
self.showDialog('Error','Connect Fail!',(95,20))
def showDialog(self,title,content,size):
#显示错误信息对话框
print('Error:'+content)
dialog=wx.Dialog(self,title=title,size=size)
dialog.Center()
wx.StaticText(dialog,label=content)
dialog.ShowModal()
class ChatFrame(wx.Frame):
"""
聊天窗口
"""
def __init__(self,parent,id,title,size):
#初始化,添加控件并绑定事件
wx.Frame.__init__(self,parent,id,title)
self.SetSize(size)
self.Center()
self.chatFrame=wx.TextCtrl(self,pos=(5,5),size=(490,310),style=wx.TE_MULTILINE|wx.TE_READONLY)
self.message = wx.TextCtrl(self, pos=(5, 320), size=(300, 25))
self.sendButton=wx.Button(self,label='Send',pos=(310,320),size=(58,25))
self.usersButton=wx.Button(self,label='Users',pos=(373,320),size=(58,25))
self.closeButton=wx.Button(self,label='Close',pos=(436,320),size=(58,25))
#发送按钮绑定发送消息的方法
self.sendButton.Bind(wx.EVT_BUTTON,self.send)
#Users按钮绑定获取在线用户数量的方法
self.usersButton.Bind(wx.EVT_BUTTON,self.lookUsers)
#关闭按钮绑定关闭方法
self.closeButton.Bind(wx.EVT_BUTTON,self.close)
thread.start_new_thread(self.receive,())
self.Show()
def send(self,event):
#发送消息
message=str(self.message.GetLineText(0)).strip()
if message!='':
con.write(('say '+message+'\n').encode('utf-8'))
self.message.Clear()
def lookUsers(self,event):
#查看当前在线用户
con.write(b'look\n')
def close(self,event):
#关闭窗口
con.write(b'logout\n')
con.close()
self.Close()
def receive(self):
#接收服务器的消息
while True:
sleep(0.6)
result=con.read_very_eager()
if result!='':
self.chatFrame.AppendText(result)
numChar='0123456789'
enChar='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def getRandom(length=8):
"""
获取随机码
@param {int} length 默认为8
@return {str} 随机码
"""
if length<4:
return None
randomDigit=''
numCount=random.randint(1,length-1) #数字出现的数量
enNumber=length-numCount
for i in range(length):
#随机添加数字或字母
if bool(random.randint(0,1)):
randomDigit+=random.choice(numChar)
numCount-=1
else:
randomDigit+=random.choice(enChar)
enNumber-=1
return randomDigit
if __name__ == '__main__':
app=wx.App()
con=telnetlib.Telnet()
LoginFrame(None,-1,title='Login',size=(320,250))
app.MainLoop()
# with open('内蒙电信红包随机码.txt', 'w',encoding='utf-8') as f:
# accTup=({'sum':100,'number':30},{'sum':30,'number':150},{'sum':5,'number':2500})
# charList=[]
# for i in range(3000):
# while True:
# char=getRandom(8)
# if char in charList:
# continue
# else:
# charList.append(char)
# break;
# for item in accTup:
# charCurList=charList[0:item['number']]
# for char in charCurList:
# line=char+'\t'+str(item['sum'])+'\n'
# f.write(line)
# charList=charList[item['number']:]
# for char in charList:
# line=char+'\t'+'预留'+'\n'
# f.write(line)
|
[
"970832396@qq.com"
] |
970832396@qq.com
|
bbdb15968f9fecce8a2c0b71d9694e7fb7ea60dd
|
34f1074602e711ad2311afb7e0bcd624013a954e
|
/setup.py
|
fbcce1aad21f5a3c534ac2be29a70dda74b943ca
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
tareqalam/websauna
|
e8363c7dfec7faf2a804d78bf095ed8c2fd74ca0
|
d6326efe3a35cb7cc16234b1ea64198e027ff257
|
refs/heads/master
| 2020-12-26T04:55:45.090210
| 2016-04-01T17:10:55
| 2016-04-01T17:10:55
| 55,261,614
| 0
| 0
| null | 2016-04-01T20:51:47
| 2016-04-01T20:51:46
| null |
UTF-8
|
Python
| false
| false
| 5,398
|
py
|
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
assert sys.version_info >= (3,4), "Websauna needs Python 3.4 or newer, you have {}".format(sys.version_info)
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='websauna',
namespace_packages=["websauna"],
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0',
description=long_description.split()[0],
long_description=long_description,
# The project's main homepage.
url='https://github.com/websauna/websauna',
# Author details
author='Mikko Ohtamaa',
author_email='mikko@opensourcehacker.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='sqlalchemy postgresql pyramid pytest',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
# Pyramid dependencies
'pyramid>=1.6',
'waitress',
'websauna.viewconfig',
'pyramid_redis_sessions',
'pyramid-layout',
"deform>=2.0a2",
'pyramid_deform',
"pyramid_debugtoolbar",
"pyramid_jinja2",
"ipython[notebook]<4",
"pyramid_ipython",
"scandir", # os.scandir backport for py3.4
"python-slugify", # ASCII slug generation
# Time handling
"arrow",
"pytz",
# SQLAlchemy and database support
"psycopg2",
"sqlalchemy",
"alembic",
"colanderalchemy",
"pyramid_tm",
"jsonpointer",
"pgcli",
# User management
"horus",
"authomatic",
# Email
'pyramid-mailer',
'premailer',
# Tasks
'pyramid_celery',
# Python 3.4 typing
"backports.typing",
# Needed by python_notebook etc. who call pyramid.paster module
"pyramid_notebook>=0.1.6",
"PasteDeploy",
# Console logging
"rainbow_logging_handler"
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest', 'Sphinx', 'setuptools_git', 'zest.releaser', 'sphinx-autodoc-typehints', 'pyramid_autodoc', "sphinx_rtd_theme", "sphinxcontrib-zopeext", "ruamel.yaml"],
'test': ['pytest>=2.8', 'coverage', 'webtest', 'pytest-splinter', 'pytest-timeout', 'pytest-cov', "codecov", "flaky"],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'ws-sync-db=websauna.system.devop.scripts.syncdb:main',
'ws-db-shell=websauna.system.devop.scripts.dbshell:main',
'ws-shell=websauna.system.devop.scripts.shell:main',
'ws-tweens=websauna.system.devop.scripts.tweens:main',
'ws-alembic=websauna.system.devop.scripts.alembic:main',
'ws-dump-db=websauna.system.devop.scripts.dumpdb:main',
'ws-create-user=websauna.system.devop.scripts.createuser:main',
'ws-celery=websauna.system.devop.scripts.celery:main',
'ws-pserve=websauna.system.devop.scripts.pserve:main',
'ws-create-table=websauna.system.devop.scripts.createtable:main',
'ws-sanity-check=websauna.system.devop.scripts.sanitycheck:main',
'ws-collect-static=websauna.system.devop.scripts.collectstatic:main',
],
'paste.app_factory': [
'main=websauna.system:main',
# Scheduler auomated test suite entry point
'scheduler_test=websauna.tests.test_scheduler:main',
'tutorial_test=websauna.tests.tutorial:main',
],
'pyramid.scaffold': [
"websauna_app=websauna.scaffolds:App",
"websauna_addon=websauna.scaffolds:Addon",
]
},
)
|
[
"mikko@opensourcehacker.com"
] |
mikko@opensourcehacker.com
|
de5585cdb7d57b5786ca3a57f62a92394188651f
|
b085a8631b20f5548627409e7c6d42557f0d4b7d
|
/libs/layers/assign.py
|
d3eaa1fd01e1891ed4b5a79dc9f859686a1f9e98
|
[
"Apache-2.0"
] |
permissive
|
abdulmuneer/FastMaskRCNN
|
cd810057b4d62b035f12078e19fce46ea9d1e30e
|
2bd65f0faf21e140040242d884f3e33a087e5b04
|
refs/heads/master
| 2021-01-20T03:41:51.455023
| 2017-04-26T10:06:41
| 2017-04-26T10:06:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import libs.boxes.cython_bbox as cython_bbox
import libs.configs.config_v1 as cfg
from libs.boxes.bbox_transform import bbox_transform, bbox_transform_inv, clip_boxes
from libs.boxes.anchor import anchors_plane
from libs.logs.log import LOG
# FLAGS = tf.app.flags.FLAGS
_DEBUG = False
def assign_boxes(gt_boxes, min_k=2, max_k=5):
"""assigning boxes to layers in a pyramid according to its area
Params
-----
gt_boxes: of shape (N, 5), each entry is [x1, y1, x2, y2, cls]
strides: the stride of each layer, like [4, 8, 16, 32]
Returns
-----
layer_ids: of shape (N,), each entry is a id indicating the assigned layer id
"""
k0 = 4
if gt_boxes.size > 0:
layer_ids = np.zeros((gt_boxes.shape[0], ), dtype=np.int32)
ws = gt_boxes[:, 2] - gt_boxes[:, 0]
hs = gt_boxes[:, 3] - gt_boxes[:, 1]
areas = ws * hs
k = np.floor(k0 + np.log2(np.sqrt(areas) / 224))
inds = np.where(k < min_k)[0]
k[inds] = min_k
inds = np.where(k > max_k)[0]
k[inds] = max_k
if _DEBUG:
print ("### boxes and layer ids")
print (np.hstack((gt_boxes[:, 0:4], k[:, np.newaxis])))
return k.astype(np.int32)
else:
return np.asarray([], dtype=np.int32)
|
[
"shangchong90@gmail.com"
] |
shangchong90@gmail.com
|
6b67fbc7d998de614a48c315933888a62a343846
|
6b033e3dddc280417bb97500f72e68d7378c69d6
|
/V. Algorithm/ii. Site/F. LeetCode/0241_different_ways_to_add_parentheses.py
|
bd5f9b246157a8f093769f995d43c706ba430806
|
[] |
no_license
|
inyong37/Study
|
e5cb7c23f7b70fbd525066b6e53b92352a5f00bc
|
e36252a89b68a5b05289196c03e91291dc726bc1
|
refs/heads/master
| 2023-08-17T11:35:01.443213
| 2023-08-11T04:02:49
| 2023-08-11T04:02:49
| 128,149,085
| 11
| 0
| null | 2022-10-07T02:03:09
| 2018-04-05T02:17:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
from typing import List
class Solution:
def diffWaysToCompute(self, expression: str) -> List[int]:
def compute(left, right, op):
results = []
for l in left:
for r in right:
results.append(eval(str(l) + op + str(r)))
return results
if expression.isdigit():
return [int(expression)]
results = []
for idx, val in enumerate(expression):
if val in "+-*":
left = self.diffWaysToCompute(expression[:idx])
right = self.diffWaysToCompute(expression[idx+1:])
results.extend(compute(left, right, val))
return results
|
[
"noreply@github.com"
] |
inyong37.noreply@github.com
|
a370b77af6502fb608bde82df099f9416b01af0f
|
06c0d8151983a7f16aa3d18b254b5f0ef012197e
|
/tests/conformance/conftest.py
|
e62b74e8bfadee13e226066ff225ddbf0bb12010
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-storage
|
3a15ece3cecb95ee8b9fb0b193757961f6e0f027
|
bdd7c6c19c96a4dbd2249ba39399f8f6ff799fe1
|
refs/heads/main
| 2023-09-03T13:02:17.527509
| 2023-08-31T20:09:41
| 2023-08-31T20:09:41
| 226,992,639
| 363
| 161
|
Apache-2.0
| 2023-09-02T01:01:14
| 2019-12-10T00:10:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,881
|
py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import uuid
import pytest
from google.auth.credentials import AnonymousCredentials
from google.cloud import storage
from google.cloud.exceptions import NotFound
"""Environment variable or default host for Storage testbench emulator."""
_HOST = os.environ.get("STORAGE_EMULATOR_HOST", "http://localhost:9000")
"""Emulated project information for the storage testbench."""
_CONF_TEST_PROJECT_ID = "my-project-id"
_CONF_TEST_SERVICE_ACCOUNT_EMAIL = (
"my-service-account@my-project-id.iam.gserviceaccount.com"
)
_CONF_TEST_PUBSUB_TOPIC_NAME = "my-topic-name"
"""Create content payload in different sizes."""
def _create_block(desired_kib):
line = "abcdefXYZ123456789ADDINGrandom#" # len(line) = 31
multiplier = int(desired_kib / (len(line) + 1))
lines = "".join(
line + str(random.randint(0, 9)) for _ in range(multiplier)
) # add random single digit integers
return 1024 * lines
_STRING_CONTENT = "hello world"
_SIZE_9MB = 9216 # 9*1024 KiB
########################################################################################################################################
### Pytest Fixtures to Populate Retry Conformance Test Resources #######################################################################
########################################################################################################################################
@pytest.fixture
def client():
client = storage.Client(
project=_CONF_TEST_PROJECT_ID,
credentials=AnonymousCredentials(),
client_options={"api_endpoint": _HOST},
)
return client
@pytest.fixture
def bucket(client):
bucket = client.bucket(uuid.uuid4().hex)
client.create_bucket(bucket)
yield bucket
try:
bucket.delete(force=True)
except NotFound: # in cases where bucket is deleted within the test
pass
@pytest.fixture
def object(client, bucket):
blob = client.bucket(bucket.name).blob(uuid.uuid4().hex)
blob.upload_from_string(_STRING_CONTENT)
blob.reload()
yield blob
try:
blob.delete()
except NotFound: # in cases where object is deleted within the test
pass
@pytest.fixture
def notification(client, bucket):
notification = client.bucket(bucket.name).notification(
topic_name=_CONF_TEST_PUBSUB_TOPIC_NAME
)
notification.create()
notification.reload()
yield notification
try:
notification.delete()
except NotFound: # in cases where notification is deleted within the test
pass
@pytest.fixture
def hmac_key(client):
hmac_key, _secret = client.create_hmac_key(
service_account_email=_CONF_TEST_SERVICE_ACCOUNT_EMAIL,
project_id=_CONF_TEST_PROJECT_ID,
)
yield hmac_key
try:
hmac_key.state = "INACTIVE"
hmac_key.update()
hmac_key.delete()
except NotFound: # in cases where hmac_key is deleted within the test
pass
@pytest.fixture
def file_data(client, bucket):
blob = client.bucket(bucket.name).blob(uuid.uuid4().hex)
payload = _create_block(_SIZE_9MB)
blob.upload_from_string(payload)
yield blob, payload
try:
blob.delete()
except NotFound: # in cases where object is deleted within the test
pass
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
37b862cdca4f6aaadc98f6f52088f748238199fc
|
d4f2e2e3552ab4b111f78cfbad0d30c144201093
|
/2017-06-12/fib_pool.py
|
f13094e9069f611b6d36535aced6b7f77612ccaa
|
[
"Apache-2.0"
] |
permissive
|
dongweiming/mp
|
c1e9f6f2c1fd8adbd4d7b8ffc45c5cc288cdcd80
|
129c31c818e1f0c39c983aad1f2f1ad9fa7efb1c
|
refs/heads/master
| 2023-04-29T07:56:27.198574
| 2022-10-30T04:20:09
| 2022-10-30T04:21:27
| 75,051,758
| 96
| 35
|
Apache-2.0
| 2023-04-17T17:34:17
| 2016-11-29T06:44:53
|
Python
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
# coding=utf-8
import time
from multiprocessing.pool import Pool
NUMBERS = range(25, 38)
def fib(n):
if n<= 2:
return 1
return fib(n-1) + fib(n-2)
start = time.time()
pool = Pool(3)
for num, result in zip(NUMBERS, pool.map(fib, NUMBERS)):
print('fib({}) = {}'.format(num, result))
print('COST: {}'.format(time.time() - start))
|
[
"ciici1234@hotmail.com"
] |
ciici1234@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.